text
stringlengths 2
999k
|
|---|
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from heat.common import exception
from heat.common.i18n import _
from heat.engine import constraints
from heat.engine import properties
from heat.engine.resources.openstack.neutron import neutron
from heat.engine import support
from heat.engine import translation
class SecurityGroupRule(neutron.NeutronResource):
"""A resource for managing Neutron security group rules.
Rules to use in security group resource.
"""
required_service_extension = 'security-group'
entity = 'security_group_rule'
support_status = support.SupportStatus(version='7.0.0')
PROPERTIES = (
SECURITY_GROUP, DESCRIPTION, DIRECTION, ETHERTYPE,
PORT_RANGE_MIN, PORT_RANGE_MAX, PROTOCOL, REMOTE_GROUP,
REMOTE_IP_PREFIX
) = (
'security_group', 'description', 'direction', 'ethertype',
'port_range_min', 'port_range_max', 'protocol', 'remote_group',
'remote_ip_prefix'
)
_allowed_protocols = list(range(256)) + [
'ah', 'dccp', 'egp', 'esp', 'gre', 'icmp', 'icmpv6', 'igmp',
'ipv6-encap', 'ipv6-frag', 'ipv6-icmp', 'ipv6-nonxt', 'ipv6-opts',
'ipv6-route', 'ospf', 'pgm', 'rsvp', 'sctp', 'tcp', 'udp', 'udplite',
'vrrp'
]
properties_schema = {
SECURITY_GROUP: properties.Schema(
properties.Schema.STRING,
_('Security group name or ID to add rule.'),
required=True,
constraints=[
constraints.CustomConstraint('neutron.security_group')
]
),
DESCRIPTION: properties.Schema(
properties.Schema.STRING,
_('Description of the security group rule.')
),
DIRECTION: properties.Schema(
properties.Schema.STRING,
_('The direction in which the security group rule is applied. '
'For a compute instance, an ingress security group rule '
'matches traffic that is incoming (ingress) for that '
'instance. An egress rule is applied to traffic leaving '
'the instance.'),
default='ingress',
constraints=[
constraints.AllowedValues(['ingress', 'egress']),
]
),
ETHERTYPE: properties.Schema(
properties.Schema.STRING,
_('Ethertype of the traffic.'),
default='IPv4',
constraints=[
constraints.AllowedValues(['IPv4', 'IPv6']),
]
),
PORT_RANGE_MIN: properties.Schema(
properties.Schema.INTEGER,
_('The minimum port number in the range that is matched by the '
'security group rule. If the protocol is TCP or UDP, this '
'value must be less than or equal to the value of the '
'port_range_max attribute. If the protocol is ICMP, this '
'value must be an ICMP type.'),
constraints=[
constraints.Range(0, 65535)
]
),
PORT_RANGE_MAX: properties.Schema(
properties.Schema.INTEGER,
_('The maximum port number in the range that is matched by the '
'security group rule. The port_range_min attribute constrains '
'the port_range_max attribute. If the protocol is ICMP, this '
'value must be an ICMP code.'),
constraints=[
constraints.Range(0, 65535)
]
),
PROTOCOL: properties.Schema(
properties.Schema.STRING,
_('The protocol that is matched by the security group rule. '
'Allowed values are ah, dccp, egp, esp, gre, icmp, icmpv6, '
'igmp, ipv6-encap, ipv6-frag, ipv6-icmp, ipv6-nonxt, ipv6-opts, '
'ipv6-route, ospf, pgm, rsvp, sctp, tcp, udp, udplite, vrrp '
'and integer representations [0-255].'),
default='tcp',
constraints=[constraints.AllowedValues(_allowed_protocols)]
),
REMOTE_GROUP: properties.Schema(
properties.Schema.STRING,
_('The remote group name or ID to be associated with this '
'security group rule.'),
constraints=[
constraints.CustomConstraint('neutron.security_group')
]
),
REMOTE_IP_PREFIX: properties.Schema(
properties.Schema.STRING,
_('The remote IP prefix (CIDR) to be associated with this '
'security group rule.'),
constraints=[
constraints.CustomConstraint('net_cidr')
]
)
}
def translation_rules(self, props):
client_plugin = self.client_plugin()
return [
translation.TranslationRule(
props,
translation.TranslationRule.RESOLVE,
[self.SECURITY_GROUP],
client_plugin=client_plugin,
finder='find_resourceid_by_name_or_id',
entity=client_plugin.RES_TYPE_SECURITY_GROUP
),
translation.TranslationRule(
props,
translation.TranslationRule.RESOLVE,
[self.REMOTE_GROUP],
client_plugin=client_plugin,
finder='find_resourceid_by_name_or_id',
entity=client_plugin.RES_TYPE_SECURITY_GROUP
),
]
def validate(self):
super(SecurityGroupRule, self).validate()
if (self.properties[self.REMOTE_GROUP] is not None and
self.properties[self.REMOTE_IP_PREFIX] is not None):
raise exception.ResourcePropertyConflict(
self.REMOTE_GROUP, self.REMOTE_IP_PREFIX)
port_max = self.properties[self.PORT_RANGE_MAX]
port_min = self.properties[self.PORT_RANGE_MIN]
protocol = self.properties[self.PROTOCOL]
if (port_max is not None and port_min is not None and
protocol not in ('icmp', 'icmpv6', 'ipv6-icmp') and
port_max < port_min):
msg = _('The minimum port number must be less than or equal to '
'the maximum port number.')
raise exception.StackValidationFailed(message=msg)
def handle_create(self):
props = self.prepare_properties(
self.properties,
self.physical_resource_name())
props['security_group_id'] = props.pop(self.SECURITY_GROUP)
if self.REMOTE_GROUP in props:
props['remote_group_id'] = props.pop(self.REMOTE_GROUP)
for key in (self.PORT_RANGE_MIN, self.PORT_RANGE_MAX):
if props.get(key) is not None:
props[key] = str(props[key])
rule = self.client().create_security_group_rule(
{'security_group_rule': props})['security_group_rule']
self.resource_id_set(rule['id'])
def handle_delete(self):
if self.resource_id is None:
return
with self.client_plugin().ignore_not_found:
self.client().delete_security_group_rule(self.resource_id)
def resource_mapping():
return {
'OS::Neutron::SecurityGroupRule': SecurityGroupRule
}
|
# -*- coding: utf-8 -*-
# cython: language_level=3
# BSD 3-Clause License
#
# Copyright (c) 2020-2021, Faster Speeding
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# * Neither the name of the copyright holder nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
from __future__ import annotations
import pathlib
import tempfile
import nox
nox.options.sessions = ["reformat", "lint", "spell-check", "type-check", "test"] # type: ignore
GENERAL_TARGETS = ["./examples", "./noxfile.py", "./tanjun", "./tests"]
PYTHON_VERSIONS = ["3.9", "3.10"] # TODO: @nox.session(python=["3.6", "3.7", "3.8"])?
def install_requirements(session: nox.Session, *other_requirements: str) -> None:
session.install("--upgrade", "wheel")
session.install("--upgrade", *other_requirements)
def _try_find_option(session: nox.Session, name: str, *other_names: str, when_empty: str | None = None) -> str | None:
args_iter = iter(session.posargs)
names = {name, *other_names}
for arg in args_iter:
if arg in names:
return next(args_iter, when_empty)
@nox.session(name="check-versions")
def check_versions(session: nox.Session) -> None:
import httpx
# Note: this can be linked to a specific hash by adding it between raw and {file.name} as another route segment.
with httpx.Client() as client:
requirements = client.get(
"https://gist.githubusercontent.com/FasterSpeeding/139801789f00d15b4aa8ed2598fb524e/raw/requirements.json"
).json()
# Note: this can be linked to a specific hash by adding it between raw and {file.name} as another route segment.
code = client.get(
"https://gist.githubusercontent.com/FasterSpeeding/139801789f00d15b4aa8ed2598fb524e/raw/check_versions.py"
).read()
session.install(".")
session.install(*requirements)
# This is saved to a temporary file to avoid the source showing up in any of the output.
# A try, finally is used to delete the file rather than relying on delete=True behaviour
# as on Windows the file cannot be accessed by other processes if delete is True.
file = tempfile.NamedTemporaryFile(delete=False)
try:
with file:
file.write(code)
required_version = _try_find_option(session, "--required-version", "-r")
args = ["--required-version", required_version] if required_version else []
session.run("python", file.name, "-r", "tanjun", *args)
finally:
pathlib.Path(file.name).unlink(missing_ok=False)
@nox.session(venv_backend="none")
def cleanup(session: nox.Session) -> None:
import shutil
# Remove directories
for raw_path in ["./dist", "./docs", "./.nox", "./.pytest_cache", "./hikari_tanjun.egg-info", "./coverage_html"]:
path = pathlib.Path(raw_path)
try:
shutil.rmtree(str(path.absolute()))
except Exception as exc:
session.warn(f"[ FAIL ] Failed to remove '{raw_path}': {exc!s}")
else:
session.log(f"[ OK ] Removed '{raw_path}'")
# Remove individual files
for raw_path in ["./.coverage", "./coverage_html.xml"]:
path = pathlib.Path(raw_path)
try:
path.unlink()
except Exception as exc:
session.warn(f"[ FAIL ] Failed to remove '{raw_path}': {exc!s}")
else:
session.log(f"[ OK ] Removed '{raw_path}'")
@nox.session(name="generate-docs", reuse_venv=True)
def generate_docs(session: nox.Session) -> None:
install_requirements(session, ".[docs]")
session.log("Building docs into ./docs")
output_directory = _try_find_option(session, "-o", "--output") or "./docs"
session.run("pdoc", "--docformat", "numpy", "-o", output_directory, "./tanjun", "-t", "./templates")
session.log("Docs generated: %s", pathlib.Path("./docs/index.html").absolute())
if not _try_find_option(session, "-j", "--json", when_empty="true"):
return
import httpx
# Note: this can be linked to a specific hash by adding it between raw and {file.name} as another route segment.
code = httpx.get(
"https://gist.githubusercontent.com/FasterSpeeding/19a6d3f44cdd0a1f3b2437a8c5eef07a/raw/json_index_docs.py"
).read()
# This is saved to a temporary file to avoid the source showing up in any of the output.
# A try, finally is used to delete the file rather than relying on delete=True behaviour
# as on Windows the file cannot be accessed by other processes if delete is True.
file = tempfile.NamedTemporaryFile(delete=False)
try:
with file:
file.write(code)
session.run("python", file.name, "tanjun", "-o", str(pathlib.Path(output_directory) / "search.json"))
finally:
pathlib.Path(file.name).unlink(missing_ok=False)
@nox.session(reuse_venv=True)
def lint(session: nox.Session) -> None:
install_requirements(session, ".[flake8]")
session.run("flake8", *GENERAL_TARGETS)
@nox.session(reuse_venv=True, name="spell-check")
def spell_check(session: nox.Session) -> None:
install_requirements(session, ".[lint]") # include_standard_requirements=False
session.run(
"codespell",
*GENERAL_TARGETS,
".flake8",
".gitignore",
"LICENSE",
"pyproject.toml",
"CHANGELOG.md",
"CODE_OF_CONDUCT.md",
"CONTRIBUTING.md",
"README.md",
"./github",
)
@nox.session(reuse_venv=True)
def build(session: nox.Session) -> None:
session.install("flit")
session.log("Starting build")
session.run("flit", "build")
@nox.session(reuse_venv=True)
def publish(session: nox.Session, test: bool = False) -> None:
if not _try_find_option(session, "--skip-version-check", when_empty="true"):
check_versions(session)
session.install("flit")
env: dict[str, str] = {}
if username := _try_find_option(session, "-u", "--username"):
env["FLIT_USERNAME"] = username
if password := _try_find_option(session, "-p", "--password"):
env["FLIT_PASSWORD"] = password
if index_url := _try_find_option(session, "-i", "--index-url"):
env["FLIT_INDEX_URL"] = index_url
elif test:
env["FLIT_INDEX_URL"] = "https://test.pypi.org/legacy/"
else:
env["FLIT_INDEX_URL"] = "https://upload.pypi.org/legacy/"
session.log("Initiating TestPYPI upload" if test else "Initiating PYPI upload")
session.run("flit", "publish", env=env)
@nox.session(name="test-publish", reuse_venv=True)
def test_publish(session: nox.Session) -> None:
publish(session, test=True)
@nox.session(reuse_venv=True)
def reformat(session: nox.Session) -> None:
install_requirements(session, ".[reformat]") # include_standard_requirements=False
session.run("black", *GENERAL_TARGETS)
session.run("isort", *GENERAL_TARGETS)
@nox.session(reuse_venv=True)
def test(session: nox.Session) -> None:
install_requirements(session, ".[tests]")
# TODO: can import-mode be specified in the config.
session.run("pytest", "--import-mode", "importlib")
@nox.session(name="test-coverage", reuse_venv=True)
def test_coverage(session: nox.Session) -> None:
install_requirements(session, ".[tests]")
# TODO: can import-mode be specified in the config.
# https://github.com/nedbat/coveragepy/issues/1002
session.run("pytest", "--cov=tanjun", "--cov-report", "html:coverage_html", "--cov-report", "xml:coverage.xml")
@nox.session(name="type-check", reuse_venv=True)
def type_check(session: nox.Session) -> None:
install_requirements(session, ".[tests, type_checking]", "-r", "nox-requirements.txt")
if _try_find_option(session, "--force-env", when_empty="True"):
session.env["PYRIGHT_PYTHON_GLOBAL_NODE"] = "off"
if version := _try_find_option(session, "--pyright-version"):
session.env["PYRIGHT_PYTHON_FORCE_VERSION"] = version
session.run("python", "-m", "pyright", "--version")
session.run("python", "-m", "pyright")
@nox.session(name="check-dependencies")
def check_dependencies(session: nox.Session) -> None:
import httpx
# Note: this can be linked to a specific hash by adding it between raw and {file.name} as another route segment.
with httpx.Client() as client:
requirements = client.get(
"https://gist.githubusercontent.com/FasterSpeeding/13e3d871f872fa09cf7bdc4144d62b2b/raw/requirements.json"
).json()
# Note: this can be linked to a specific hash by adding it between raw and {file.name} as another route segment.
code = client.get(
"https://gist.githubusercontent.com/FasterSpeeding/13e3d871f872fa09cf7bdc4144d62b2b/raw/check_dependency.py"
).read()
session.install(*requirements)
# This is saved to a temporary file to avoid the source showing up in any of the output.
# A try, finally is used to delete the file rather than relying on delete=True behaviour
# as on Windows the file cannot be accessed by other processes if delete is True.
file = tempfile.NamedTemporaryFile(delete=False)
try:
with file:
file.write(code)
session.run("python", file.name)
finally:
pathlib.Path(file.name).unlink(missing_ok=False)
|
import os
os.environ['CUDA_VISIBLE_DEVICES'] = '-1'
import pytest
import jax.numpy as jnp
from jaxga.mv import MultiVector
from jaxga.signatures import positive_signature
def _jaxga_mul(a, b):
return a * b
def _mv_ones(num_elements, num_bases):
return MultiVector(
values=jnp.ones([num_bases, num_elements], dtype=jnp.float32),
indices=tuple((i,) for i in range(num_bases)),
signature=positive_signature
)
@pytest.mark.parametrize("num_bases", list(range(1, 10)))
def test_jaxga_mul_mv_mv(num_bases, benchmark):
a = _mv_ones(100, num_bases)
b = _mv_ones(100, num_bases)
_jaxga_mul(a, b)
benchmark(_jaxga_mul, a, b)
if __name__ == '__main__':
pytest.main()
|
import sys
import numpy as np
rawalgo, rawimg = sys.stdin.read().strip().split('\n\n')
algo = np.array([1 if c == '#' else 0 for c in rawalgo], dtype=np.int8)
img = np.array([[1 if c == '#' else 0 for c in line] for line in rawimg.split('\n')], dtype=np.int8)
def enhance(img, algo):
img = np.pad(img, 2, 'edge')
new = np.copy(img)
for i in range(1, img.shape[0] - 1):
for j in range(1, img.shape[1] - 1):
values = img[i-1:i+2,j-1:j+2].flatten()
index = (values * 2**np.arange(9)[::-1]).sum()
new[i,j] = algo[index]
return new[1:-1,1:-1]
img = np.pad(img, 1)
for _ in range(2):
img = enhance(img, algo)
print("Result:", img.sum())
|
# Copyright 2015: Hewlett-Packard Development Company, L.P.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from rally.common import logging
from rally import consts
from rally.plugins.openstack import scenario
from rally.plugins.openstack.scenarios.nova import utils
from rally.task import types
from rally.task import validation
"""Scenarios for Nova keypairs."""
@validation.add("required_services", services=[consts.Service.NOVA])
@validation.add("required_platform", platform="openstack", users=True)
@scenario.configure(context={"cleanup@openstack": ["nova"]},
name="NovaKeypair.create_and_list_keypairs",
platform="openstack")
class CreateAndListKeypairs(utils.NovaScenario):
def run(self, **kwargs):
"""Create a keypair with random name and list keypairs.
This scenario creates a keypair and then lists all keypairs.
:param kwargs: Optional additional arguments for keypair creation
"""
keypair_name = self._create_keypair(**kwargs)
self.assertTrue(keypair_name, "Keypair isn't created")
list_keypairs = self._list_keypairs()
self.assertIn(keypair_name, [i.id for i in list_keypairs])
@validation.add("required_services", services=[consts.Service.NOVA])
@validation.add("required_platform", platform="openstack", users=True)
@scenario.configure(context={"cleanup@openstack": ["nova"]},
name="NovaKeypair.create_and_delete_keypair",
platform="openstack")
class CreateAndDeleteKeypair(utils.NovaScenario):
def run(self, **kwargs):
"""Create a keypair with random name and delete keypair.
This scenario creates a keypair and then delete that keypair.
:param kwargs: Optional additional arguments for keypair creation
"""
keypair = self._create_keypair(**kwargs)
self._delete_keypair(keypair)
@types.convert(image={"type": "glance_image"},
flavor={"type": "nova_flavor"})
@validation.add("image_valid_on_flavor", flavor_param="flavor",
image_param="image")
@validation.add("required_services", services=[consts.Service.NOVA])
@validation.add("required_platform", platform="openstack", users=True)
@scenario.configure(context={"cleanup@openstack": ["nova"]},
name="NovaKeypair.boot_and_delete_server_with_keypair",
platform="openstack")
class BootAndDeleteServerWithKeypair(utils.NovaScenario):
@logging.log_deprecated_args(
"'server_kwargs' has been renamed 'boot_server_kwargs'",
"0.3.2", ["server_kwargs"], once=True)
def run(self, image, flavor, boot_server_kwargs=None,
server_kwargs=None, **kwargs):
"""Boot and delete server with keypair.
Plan of this scenario:
- create a keypair
- boot a VM with created keypair
- delete server
- delete keypair
:param image: ID of the image to be used for server creation
:param flavor: ID of the flavor to be used for server creation
:param boot_server_kwargs: Optional additional arguments for VM
creation
:param server_kwargs: Deprecated alias for boot_server_kwargs
:param kwargs: Optional additional arguments for keypair creation
"""
boot_server_kwargs = boot_server_kwargs or server_kwargs or {}
keypair = self._create_keypair(**kwargs)
server = self._boot_server(image, flavor,
key_name=keypair,
**boot_server_kwargs)
self._delete_server(server)
self._delete_keypair(keypair)
@validation.add("required_services", services=[consts.Service.NOVA])
@validation.add("required_platform", platform="openstack", users=True)
@scenario.configure(context={"cleanup@openstack": ["nova"]},
name="NovaKeypair.create_and_get_keypair",
platform="openstack")
class CreateAndGetKeypair(utils.NovaScenario):
def run(self, **kwargs):
"""Create a keypair and get the keypair details.
:param kwargs: Optional additional arguments for keypair creation
"""
keypair = self._create_keypair(**kwargs)
self._get_keypair(keypair)
|
import mysql.connector
import pandas as pd
mydb = mysql.connector.connect(
host="135.148.9.103",
user="admin",
password="rod@2021",
database="rod_input",
)
mycursor1 = mydb.cursor()
mycursor1.execute("TRUNCATE TABLE `data_input_test`")
mydb.commit()
print("Bien")
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
import sys
from teimedlib.textentities import TextEntities
from teimedlib.textentities_log import *
from teimedlib.ualog import Log
def do_main(path_src,path_tags):
path_err=path_src.replace(".txt","_words.ERR.log")
log_err=Log("w").open(path_err,1).log
te = TextEntities(path_src, path_tags,log_err)
lst = te.get_rows_entities()
path_log=path_src.replace(".txt","_words.log")
print(path_log)
word_entities_log(lst,path_log)
if __name__ == "__main__":
path_text = sys.argv[1]
#path_tags=sys.argv[2]
path_tags="teimcfg/teimtags.csv"
do_main(path_text,path_tags)
|
# Necessary imports. Provides library functions to ease writing tests.
from lib import prebuild, testcase, SUBMITTY_TUTORIAL_DIR
import subprocess
import os
import glob
############################################################################
# COPY THE ASSIGNMENT FROM THE SAMPLE ASSIGNMENTS DIRECTORIES
SAMPLE_ASSIGNMENT_CONFIG = SUBMITTY_TUTORIAL_DIR + "/examples/05_cpp_static_analysis/config"
SAMPLE_SUBMISSIONS = SUBMITTY_TUTORIAL_DIR + "/examples/05_cpp_static_analysis/submissions/"
@prebuild
def initialize(test):
try:
os.mkdir(os.path.join(test.testcase_path, "assignment_config"))
except OSError:
pass
try:
os.mkdir(os.path.join(test.testcase_path, "data"))
except OSError:
pass
subprocess.call(["cp",
os.path.join(SAMPLE_ASSIGNMENT_CONFIG, "config.json"),
os.path.join(test.testcase_path, "assignment_config")])
def cleanup(test):
subprocess.call(["rm"] + ["-f"] +
glob.glob(os.path.join(test.testcase_path, "data", "*cpp")))
@testcase
def solution(test):
cleanup(test)
subprocess.call(["cp",
os.path.join(SAMPLE_SUBMISSIONS, "solution.cpp"),
os.path.join(test.testcase_path, "data", "solution.cpp")])
test.run_compile()
test.run_run()
test.run_validator()
test.diff("grade.txt", "grade.txt_solution", "-b")
@testcase
def buggy(test):
cleanup(test)
subprocess.call(["cp",
os.path.join(SAMPLE_SUBMISSIONS, "buggy.cpp"),
os.path.join(test.testcase_path, "data", "buggy.cpp")])
test.run_compile()
test.run_run()
test.run_validator()
test.diff("grade.txt", "grade.txt_buggy", "-b")
@testcase
def buggy2(test):
cleanup(test)
subprocess.call(["cp",
os.path.join(SAMPLE_SUBMISSIONS, "buggy2.cpp"),
os.path.join(test.testcase_path, "data", "buggy2.cpp")])
test.run_compile()
test.run_run()
test.run_validator()
test.diff("grade.txt", "grade.txt_buggy2", "-b")
@testcase
def buggy3(test):
cleanup(test)
subprocess.call(["cp",
os.path.join(SAMPLE_SUBMISSIONS, "buggy3.cpp"),
os.path.join(test.testcase_path, "data", "buggy3.cpp")])
test.run_compile()
test.run_run()
test.run_validator()
test.diff("grade.txt", "grade.txt_buggy3", "-b")
|
from typing import Any, Type, Union, List
import torch
import torch.nn as nn
from torch import Tensor
from torch.quantization import fuse_modules
from torchvision.models.resnet import Bottleneck, BasicBlock, ResNet, model_urls
from ..._internally_replaced_utils import load_state_dict_from_url
from .utils import _replace_relu, quantize_model
__all__ = ["QuantizableResNet", "resnet18", "resnet50", "resnext101_32x8d"]
quant_model_urls = {
"resnet18_fbgemm": "https://download.pytorch.org/models/quantized/resnet18_fbgemm_16fa66dd.pth",
"resnet50_fbgemm": "https://download.pytorch.org/models/quantized/resnet50_fbgemm_bf931d71.pth",
"resnext101_32x8d_fbgemm": "https://download.pytorch.org/models/quantized/resnext101_32x8_fbgemm_09835ccf.pth",
}
class QuantizableBasicBlock(BasicBlock):
def __init__(self, *args: Any, **kwargs: Any) -> None:
super(QuantizableBasicBlock, self).__init__(*args, **kwargs)
self.add_relu = torch.nn.quantized.FloatFunctional()
def forward(self, x: Tensor) -> Tensor:
identity = x
out = self.conv1(x)
out = self.bn1(out)
out = self.relu(out)
out = self.conv2(out)
out = self.bn2(out)
if self.downsample is not None:
identity = self.downsample(x)
out = self.add_relu.add_relu(out, identity)
return out
def fuse_model(self) -> None:
torch.quantization.fuse_modules(self, [["conv1", "bn1", "relu"], ["conv2", "bn2"]], inplace=True)
if self.downsample:
torch.quantization.fuse_modules(self.downsample, ["0", "1"], inplace=True)
class QuantizableBottleneck(Bottleneck):
def __init__(self, *args: Any, **kwargs: Any) -> None:
super(QuantizableBottleneck, self).__init__(*args, **kwargs)
self.skip_add_relu = nn.quantized.FloatFunctional()
self.relu1 = nn.ReLU(inplace=False)
self.relu2 = nn.ReLU(inplace=False)
def forward(self, x: Tensor) -> Tensor:
identity = x
out = self.conv1(x)
out = self.bn1(out)
out = self.relu1(out)
out = self.conv2(out)
out = self.bn2(out)
out = self.relu2(out)
out = self.conv3(out)
out = self.bn3(out)
if self.downsample is not None:
identity = self.downsample(x)
out = self.skip_add_relu.add_relu(out, identity)
return out
def fuse_model(self) -> None:
fuse_modules(self, [["conv1", "bn1", "relu1"], ["conv2", "bn2", "relu2"], ["conv3", "bn3"]], inplace=True)
if self.downsample:
torch.quantization.fuse_modules(self.downsample, ["0", "1"], inplace=True)
class QuantizableResNet(ResNet):
def __init__(self, *args: Any, **kwargs: Any) -> None:
super(QuantizableResNet, self).__init__(*args, **kwargs)
self.quant = torch.quantization.QuantStub()
self.dequant = torch.quantization.DeQuantStub()
def forward(self, x: Tensor) -> Tensor:
x = self.quant(x)
# Ensure scriptability
# super(QuantizableResNet,self).forward(x)
# is not scriptable
x = self._forward_impl(x)
x = self.dequant(x)
return x
def fuse_model(self) -> None:
r"""Fuse conv/bn/relu modules in resnet models
Fuse conv+bn+relu/ Conv+relu/conv+Bn modules to prepare for quantization.
Model is modified in place. Note that this operation does not change numerics
and the model after modification is in floating point
"""
fuse_modules(self, ["conv1", "bn1", "relu"], inplace=True)
for m in self.modules():
if type(m) == QuantizableBottleneck or type(m) == QuantizableBasicBlock:
m.fuse_model()
def _resnet(
arch: str,
block: Type[Union[BasicBlock, Bottleneck]],
layers: List[int],
pretrained: bool,
progress: bool,
quantize: bool,
**kwargs: Any,
) -> QuantizableResNet:
model = QuantizableResNet(block, layers, **kwargs)
_replace_relu(model)
if quantize:
# TODO use pretrained as a string to specify the backend
backend = "fbgemm"
quantize_model(model, backend)
else:
assert pretrained in [True, False]
if pretrained:
if quantize:
model_url = quant_model_urls[arch + "_" + backend]
else:
model_url = model_urls[arch]
state_dict = load_state_dict_from_url(model_url, progress=progress)
model.load_state_dict(state_dict)
return model
def resnet18(
pretrained: bool = False,
progress: bool = True,
quantize: bool = False,
**kwargs: Any,
) -> QuantizableResNet:
r"""ResNet-18 model from
`"Deep Residual Learning for Image Recognition" <https://arxiv.org/pdf/1512.03385.pdf>`_
Args:
pretrained (bool): If True, returns a model pre-trained on ImageNet
progress (bool): If True, displays a progress bar of the download to stderr
quantize (bool): If True, return a quantized version of the model
"""
return _resnet("resnet18", QuantizableBasicBlock, [2, 2, 2, 2], pretrained, progress, quantize, **kwargs)
def resnet50(
pretrained: bool = False,
progress: bool = True,
quantize: bool = False,
**kwargs: Any,
) -> QuantizableResNet:
r"""ResNet-50 model from
`"Deep Residual Learning for Image Recognition" <https://arxiv.org/pdf/1512.03385.pdf>`_
Args:
pretrained (bool): If True, returns a model pre-trained on ImageNet
progress (bool): If True, displays a progress bar of the download to stderr
quantize (bool): If True, return a quantized version of the model
"""
return _resnet("resnet50", QuantizableBottleneck, [3, 4, 6, 3], pretrained, progress, quantize, **kwargs)
def resnext101_32x8d(
pretrained: bool = False,
progress: bool = True,
quantize: bool = False,
**kwargs: Any,
) -> QuantizableResNet:
r"""ResNeXt-101 32x8d model from
`"Aggregated Residual Transformation for Deep Neural Networks" <https://arxiv.org/pdf/1611.05431.pdf>`_
Args:
pretrained (bool): If True, returns a model pre-trained on ImageNet
progress (bool): If True, displays a progress bar of the download to stderr
quantize (bool): If True, return a quantized version of the model
"""
kwargs["groups"] = 32
kwargs["width_per_group"] = 8
return _resnet("resnext101_32x8d", QuantizableBottleneck, [3, 4, 23, 3], pretrained, progress, quantize, **kwargs)
|
# -*- coding: utf-8 -*-
"""
Created on Mon Jul 6 18:03:09 2020
@author: Jonathan A. Yepez M.
"""
#Task Description
"""
En el archivo auto.csv se encuentran los siguientes datos de diferentes automoviles:
* Cilindros
* Cilindrada
* Potencia
* Peso
* Aceleracion
* Año del coche
* Origen
* Consumo (mpg)
Las unidades de las características no se encuentran en el sistema internacional.
La variable 'origen' es un código que identifica el país de orígen.
TASK: Crear un modelo para que se pueda estimar el consumo de un vehículo a partir del resto de las variables
"""
#Import the libraries that will be used in this case
import pandas as pd
from pandas.plotting import scatter_matrix #for a specific stage in EDA
from sklearn.linear_model import LinearRegression
from sklearn.model_selection import train_test_split
from sklearn.metrics import mean_squared_error
import seaborn as sns #visualisation
import matplotlib.pyplot as plt #visualisation
#Read the data and create a dataframe
df = pd.read_csv("auto.csv")
print("--------------------")
print(df.info()) #a quick view of the dataframe structure
print("--------------------")
print(df.describe()) #a more in-depth description of the information contained in the df
print("--------------------")
print(df.head()) #show the first 5 entries of the dataframe
print("the columns for this dataframe are:")
print(df.columns)
#we check for missing values (NAs)
#print(df.isnull().sum())
if(df.isnull().sum().sum()== 0):
print("\nThere are NO missing values.\n")
else:
print("\nThere are",df.isnull().sum().sum(),"missing values in the dataframe!\n")
#EDA => Exploratory Data Analysis
df = df.drop_duplicates() #remove duplicates (if applicable)
#Scatter matrix for the whole data
scatter_matrix(df, figsize = (12, 12), diagonal = 'kde');
plt.figure(figsize=(10, 6)) #size configuration for plotting
sns.distplot(df['mpg'], color='b', hist_kws={'alpha': 0.4}); #we first generate a distribution plot for 'mpg'
#Se nota una tendencia a un consumo entre 20 y 30 mpgs dentro del dataset.
df_cont = df.select_dtypes(include = ['float64']) #we select the continuous variables
print("The continuous variables for this case are: \n")
print(df_cont.head())
#Analysing the continuous variables -> scatter plots
for i in range(len(df_cont.columns)-1):
sns.pairplot(data=df_cont, x_vars=df_cont.columns[i], y_vars=['mpg'], height = 5, aspect=2) #scatter plot vars vs 'mpg'
"""
En este caso, notamos una relación inversamente proporcional entre el consumo (mpg) y las
variables displacement, horsepower, y weight. Esto nos indica que a mayor potencia y
desplazamiento (en términos del motor), el consumo de gasolina será mayor y por ende se
tendrá un menor 'rendimiento' de mpg.
En el caso de la variable acceleration, se nota un grafico de dispersión sin una tendencia
clara. Esto puede deberse a que esta característica varía entre modelos y tipos de carro
casi intependientemente del consumo de gasolina.
"""
df_cat = df.select_dtypes(include = ['int64']) #we select the 'categorical' variables.
print("\nThe categorical variables for this case are: \n")
print(df_cat.head())
for i in range(len(df_cat.columns)):
sns.catplot(x=df_cat.columns[i], y="mpg", data=df, alpha=0.5) #gnerate a catplot
ax = sns.boxplot(x=df_cat.columns[i], y="mpg", data=df) #add a boxplot on top of the catplot
plt.setp(ax.artists, alpha=0.6, edgecolor="k")
"""
Tras haber presentado las gráficas para las variables categóricas, se nota que el número
de cilindros muestra cierta tendencia en términos generales. A grosso modo, se puede asumir
que cuando un vehículo tiene 8 cilindros, el rendimiento en mpg tiende a ser notablemente
menor a uno que tenga 4 cilindros.
Asi mismo, el origen del vehículo indica que, si bien es cierto no hay una variación extrema,
se puede asumir que aquellos provenientes del país '3', tienen un mejor consumo de
gasolina.
En el caso del año de fabricación (model_year), se observa una tendencia general a la mejora
de mpg conforme se avanza en el tiempo. Esto puede deberse a los avances en el área de la
mecánica automotriz y la implementación de mejores diseños a nivel de componentes mecánicos
como aerodinámicos.
"""
#Implementing the Regression Model
indep_vars = df.iloc[:,:-1] #selecting 'independent variables'
dep_var = df[["mpg"]] #selecting the 'dependent variable'
#separating the data into train and test sets
x_train, x_test, y_train, y_test = train_test_split(indep_vars,dep_var)
model = LinearRegression() #constructor that initializes a LinearRegression object
model.fit(x_train, y_train) #fit the model using the training set
pred_train = model.predict(x_train) #prediction based on the training set
pred_test = model.predict(x_test) #prediction based on the test set
#Checking results using R^2 and MSE
print("====================\n")
print('\nR2 en entrenamiento es: ', round(model.score(x_train, y_train),4))
print('MSE en entrenamiento: ', round(mean_squared_error(y_train, pred_train),2))
print("-----------------------")
print('R2 en validación es: ', round(model.score(x_test, y_test),4))
print('MSE en validación es: ', round(mean_squared_error(y_test, pred_test),2))
"""
Se ha obtenido resultados aceptables en terminos de precisión. Dado que los valores de MSE
y R^2 en los test y train sets son similares se puede determinar que no se presenta
overfitting.
Los parametros de la regresión se muestran a continuación
"""
print("====================\n")
print("Los parametros de la regresion son: ")
print(model.coef_)
print("El termino independiente de la regresión es: ", model.intercept_)
|
from django.contrib import admin
from ballots.models import Poll, Category, CategoryItem, Ballot, Vote, Answer, AnswerItem
# class ItemInline(admin.TabularInline):
# model = CategoryItem
#
#
# class CategoryAdmin(admin.ModelAdmin):
# inlines = [ItemInline,
# ]
class CategoryInline(admin.TabularInline):
model = Category
class PollAdmin(admin.ModelAdmin):
inlines = [CategoryInline,
]
class VoteInline(admin.TabularInline):
model = Vote
class BallotAdmin(admin.ModelAdmin):
inlines = [VoteInline,
]
admin.site.register(Poll, PollAdmin)
admin.site.register(Category)
admin.site.register(CategoryItem)
admin.site.register(Ballot, BallotAdmin)
admin.site.register(Vote)
admin.site.register(Answer)
admin.site.register(AnswerItem)
|
"""The tests for numeric state automation."""
from datetime import timedelta
import unittest
from unittest.mock import patch
import homeassistant.components.automation as automation
from homeassistant.core import Context, callback
from homeassistant.setup import setup_component
import homeassistant.util.dt as dt_util
from tests.common import (
get_test_home_assistant, mock_component, fire_time_changed,
assert_setup_component)
from tests.components.automation import common
# pylint: disable=invalid-name
class TestAutomationNumericState(unittest.TestCase):
"""Test the event automation."""
def setUp(self):
"""Set up things to be run when tests are started."""
self.hass = get_test_home_assistant()
mock_component(self.hass, 'group')
self.calls = []
@callback
def record_call(service):
"""Record calls."""
self.calls.append(service)
self.hass.services.register('test', 'automation', record_call)
def tearDown(self): # pylint: disable=invalid-name
"""Stop everything that was started."""
self.hass.stop()
def test_if_fires_on_entity_change_below(self):
"""Test the firing with changed entity."""
context = Context()
assert setup_component(self.hass, automation.DOMAIN, {
automation.DOMAIN: {
'trigger': {
'platform': 'numeric_state',
'entity_id': 'test.entity',
'below': 10,
},
'action': {
'service': 'test.automation'
}
}
})
# 9 is below 10
self.hass.states.set('test.entity', 9, context=context)
self.hass.block_till_done()
assert 1 == len(self.calls)
assert self.calls[0].context is context
# Set above 12 so the automation will fire again
self.hass.states.set('test.entity', 12)
common.turn_off(self.hass)
self.hass.block_till_done()
self.hass.states.set('test.entity', 9)
self.hass.block_till_done()
assert 1 == len(self.calls)
def test_if_fires_on_entity_change_over_to_below(self):
"""Test the firing with changed entity."""
self.hass.states.set('test.entity', 11)
self.hass.block_till_done()
assert setup_component(self.hass, automation.DOMAIN, {
automation.DOMAIN: {
'trigger': {
'platform': 'numeric_state',
'entity_id': 'test.entity',
'below': 10,
},
'action': {
'service': 'test.automation'
}
}
})
# 9 is below 10
self.hass.states.set('test.entity', 9)
self.hass.block_till_done()
assert 1 == len(self.calls)
def test_if_fires_on_entities_change_over_to_below(self):
"""Test the firing with changed entities."""
self.hass.states.set('test.entity_1', 11)
self.hass.states.set('test.entity_2', 11)
self.hass.block_till_done()
assert setup_component(self.hass, automation.DOMAIN, {
automation.DOMAIN: {
'trigger': {
'platform': 'numeric_state',
'entity_id': [
'test.entity_1',
'test.entity_2',
],
'below': 10,
},
'action': {
'service': 'test.automation'
}
}
})
# 9 is below 10
self.hass.states.set('test.entity_1', 9)
self.hass.block_till_done()
assert 1 == len(self.calls)
self.hass.states.set('test.entity_2', 9)
self.hass.block_till_done()
assert 2 == len(self.calls)
def test_if_not_fires_on_entity_change_below_to_below(self):
"""Test the firing with changed entity."""
context = Context()
self.hass.states.set('test.entity', 11)
self.hass.block_till_done()
assert setup_component(self.hass, automation.DOMAIN, {
automation.DOMAIN: {
'trigger': {
'platform': 'numeric_state',
'entity_id': 'test.entity',
'below': 10,
},
'action': {
'service': 'test.automation'
}
}
})
# 9 is below 10 so this should fire
self.hass.states.set('test.entity', 9, context=context)
self.hass.block_till_done()
assert 1 == len(self.calls)
assert self.calls[0].context is context
# already below so should not fire again
self.hass.states.set('test.entity', 5)
self.hass.block_till_done()
assert 1 == len(self.calls)
# still below so should not fire again
self.hass.states.set('test.entity', 3)
self.hass.block_till_done()
assert 1 == len(self.calls)
def test_if_not_below_fires_on_entity_change_to_equal(self):
"""Test the firing with changed entity."""
self.hass.states.set('test.entity', 11)
self.hass.block_till_done()
assert setup_component(self.hass, automation.DOMAIN, {
automation.DOMAIN: {
'trigger': {
'platform': 'numeric_state',
'entity_id': 'test.entity',
'below': 10,
},
'action': {
'service': 'test.automation'
}
}
})
# 10 is not below 10 so this should not fire again
self.hass.states.set('test.entity', 10)
self.hass.block_till_done()
assert 0 == len(self.calls)
def test_if_fires_on_initial_entity_below(self):
"""Test the firing when starting with a match."""
self.hass.states.set('test.entity', 9)
self.hass.block_till_done()
assert setup_component(self.hass, automation.DOMAIN, {
automation.DOMAIN: {
'trigger': {
'platform': 'numeric_state',
'entity_id': 'test.entity',
'below': 10,
},
'action': {
'service': 'test.automation'
}
}
})
# Fire on first update even if initial state was already below
self.hass.states.set('test.entity', 8)
self.hass.block_till_done()
assert 1 == len(self.calls)
def test_if_fires_on_initial_entity_above(self):
"""Test the firing when starting with a match."""
self.hass.states.set('test.entity', 11)
self.hass.block_till_done()
assert setup_component(self.hass, automation.DOMAIN, {
automation.DOMAIN: {
'trigger': {
'platform': 'numeric_state',
'entity_id': 'test.entity',
'above': 10,
},
'action': {
'service': 'test.automation'
}
}
})
# Fire on first update even if initial state was already above
self.hass.states.set('test.entity', 12)
self.hass.block_till_done()
assert 1 == len(self.calls)
def test_if_fires_on_entity_change_above(self):
"""Test the firing with changed entity."""
assert setup_component(self.hass, automation.DOMAIN, {
automation.DOMAIN: {
'trigger': {
'platform': 'numeric_state',
'entity_id': 'test.entity',
'above': 10,
},
'action': {
'service': 'test.automation'
}
}
})
# 11 is above 10
self.hass.states.set('test.entity', 11)
self.hass.block_till_done()
assert 1 == len(self.calls)
def test_if_fires_on_entity_change_below_to_above(self):
"""Test the firing with changed entity."""
# set initial state
self.hass.states.set('test.entity', 9)
self.hass.block_till_done()
assert setup_component(self.hass, automation.DOMAIN, {
automation.DOMAIN: {
'trigger': {
'platform': 'numeric_state',
'entity_id': 'test.entity',
'above': 10,
},
'action': {
'service': 'test.automation'
}
}
})
# 11 is above 10 and 9 is below
self.hass.states.set('test.entity', 11)
self.hass.block_till_done()
assert 1 == len(self.calls)
def test_if_not_fires_on_entity_change_above_to_above(self):
"""Test the firing with changed entity."""
# set initial state
self.hass.states.set('test.entity', 9)
self.hass.block_till_done()
assert setup_component(self.hass, automation.DOMAIN, {
automation.DOMAIN: {
'trigger': {
'platform': 'numeric_state',
'entity_id': 'test.entity',
'above': 10,
},
'action': {
'service': 'test.automation'
}
}
})
# 12 is above 10 so this should fire
self.hass.states.set('test.entity', 12)
self.hass.block_till_done()
assert 1 == len(self.calls)
# already above, should not fire again
self.hass.states.set('test.entity', 15)
self.hass.block_till_done()
assert 1 == len(self.calls)
def test_if_not_above_fires_on_entity_change_to_equal(self):
"""Test the firing with changed entity."""
# set initial state
self.hass.states.set('test.entity', 9)
self.hass.block_till_done()
assert setup_component(self.hass, automation.DOMAIN, {
automation.DOMAIN: {
'trigger': {
'platform': 'numeric_state',
'entity_id': 'test.entity',
'above': 10,
},
'action': {
'service': 'test.automation'
}
}
})
# 10 is not above 10 so this should not fire again
self.hass.states.set('test.entity', 10)
self.hass.block_till_done()
assert 0 == len(self.calls)
def test_if_fires_on_entity_change_below_range(self):
"""Test the firing with changed entity."""
assert setup_component(self.hass, automation.DOMAIN, {
automation.DOMAIN: {
'trigger': {
'platform': 'numeric_state',
'entity_id': 'test.entity',
'below': 10,
'above': 5,
},
'action': {
'service': 'test.automation'
}
}
})
# 9 is below 10
self.hass.states.set('test.entity', 9)
self.hass.block_till_done()
assert 1 == len(self.calls)
def test_if_fires_on_entity_change_below_above_range(self):
"""Test the firing with changed entity."""
assert setup_component(self.hass, automation.DOMAIN, {
automation.DOMAIN: {
'trigger': {
'platform': 'numeric_state',
'entity_id': 'test.entity',
'below': 10,
'above': 5,
},
'action': {
'service': 'test.automation'
}
}
})
# 4 is below 5
self.hass.states.set('test.entity', 4)
self.hass.block_till_done()
assert 0 == len(self.calls)
def test_if_fires_on_entity_change_over_to_below_range(self):
"""Test the firing with changed entity."""
self.hass.states.set('test.entity', 11)
self.hass.block_till_done()
assert setup_component(self.hass, automation.DOMAIN, {
automation.DOMAIN: {
'trigger': {
'platform': 'numeric_state',
'entity_id': 'test.entity',
'below': 10,
'above': 5,
},
'action': {
'service': 'test.automation'
}
}
})
# 9 is below 10
self.hass.states.set('test.entity', 9)
self.hass.block_till_done()
assert 1 == len(self.calls)
def test_if_fires_on_entity_change_over_to_below_above_range(self):
"""Test the firing with changed entity."""
self.hass.states.set('test.entity', 11)
self.hass.block_till_done()
assert setup_component(self.hass, automation.DOMAIN, {
automation.DOMAIN: {
'trigger': {
'platform': 'numeric_state',
'entity_id': 'test.entity',
'below': 10,
'above': 5,
},
'action': {
'service': 'test.automation'
}
}
})
# 4 is below 5 so it should not fire
self.hass.states.set('test.entity', 4)
self.hass.block_till_done()
assert 0 == len(self.calls)
def test_if_not_fires_if_entity_not_match(self):
"""Test if not fired with non matching entity."""
assert setup_component(self.hass, automation.DOMAIN, {
automation.DOMAIN: {
'trigger': {
'platform': 'numeric_state',
'entity_id': 'test.another_entity',
'below': 100,
},
'action': {
'service': 'test.automation'
}
}
})
self.hass.states.set('test.entity', 11)
self.hass.block_till_done()
assert 0 == len(self.calls)
def test_if_fires_on_entity_change_below_with_attribute(self):
"""Test attributes change."""
assert setup_component(self.hass, automation.DOMAIN, {
automation.DOMAIN: {
'trigger': {
'platform': 'numeric_state',
'entity_id': 'test.entity',
'below': 10,
},
'action': {
'service': 'test.automation'
}
}
})
# 9 is below 10
self.hass.states.set('test.entity', 9, {'test_attribute': 11})
self.hass.block_till_done()
assert 1 == len(self.calls)
def test_if_not_fires_on_entity_change_not_below_with_attribute(self):
"""Test attributes."""
assert setup_component(self.hass, automation.DOMAIN, {
automation.DOMAIN: {
'trigger': {
'platform': 'numeric_state',
'entity_id': 'test.entity',
'below': 10,
},
'action': {
'service': 'test.automation'
}
}
})
# 11 is not below 10
self.hass.states.set('test.entity', 11, {'test_attribute': 9})
self.hass.block_till_done()
assert 0 == len(self.calls)
def test_if_fires_on_attribute_change_with_attribute_below(self):
"""Test attributes change."""
assert setup_component(self.hass, automation.DOMAIN, {
automation.DOMAIN: {
'trigger': {
'platform': 'numeric_state',
'entity_id': 'test.entity',
'value_template': '{{ state.attributes.test_attribute }}',
'below': 10,
},
'action': {
'service': 'test.automation'
}
}
})
# 9 is below 10
self.hass.states.set('test.entity', 'entity', {'test_attribute': 9})
self.hass.block_till_done()
assert 1 == len(self.calls)
def test_if_not_fires_on_attribute_change_with_attribute_not_below(self):
"""Test attributes change."""
assert setup_component(self.hass, automation.DOMAIN, {
automation.DOMAIN: {
'trigger': {
'platform': 'numeric_state',
'entity_id': 'test.entity',
'value_template': '{{ state.attributes.test_attribute }}',
'below': 10,
},
'action': {
'service': 'test.automation'
}
}
})
# 11 is not below 10
self.hass.states.set('test.entity', 'entity', {'test_attribute': 11})
self.hass.block_till_done()
assert 0 == len(self.calls)
def test_if_not_fires_on_entity_change_with_attribute_below(self):
"""Test attributes change."""
assert setup_component(self.hass, automation.DOMAIN, {
automation.DOMAIN: {
'trigger': {
'platform': 'numeric_state',
'entity_id': 'test.entity',
'value_template': '{{ state.attributes.test_attribute }}',
'below': 10,
},
'action': {
'service': 'test.automation'
}
}
})
# 11 is not below 10, entity state value should not be tested
self.hass.states.set('test.entity', '9', {'test_attribute': 11})
self.hass.block_till_done()
assert 0 == len(self.calls)
def test_if_not_fires_on_entity_change_with_not_attribute_below(self):
"""Test attributes change."""
assert setup_component(self.hass, automation.DOMAIN, {
automation.DOMAIN: {
'trigger': {
'platform': 'numeric_state',
'entity_id': 'test.entity',
'value_template': '{{ state.attributes.test_attribute }}',
'below': 10,
},
'action': {
'service': 'test.automation'
}
}
})
# 11 is not below 10, entity state value should not be tested
self.hass.states.set('test.entity', 'entity')
self.hass.block_till_done()
assert 0 == len(self.calls)
def test_fires_on_attr_change_with_attribute_below_and_multiple_attr(self):
"""Test attributes change."""
assert setup_component(self.hass, automation.DOMAIN, {
automation.DOMAIN: {
'trigger': {
'platform': 'numeric_state',
'entity_id': 'test.entity',
'value_template': '{{ state.attributes.test_attribute }}',
'below': 10,
},
'action': {
'service': 'test.automation'
}
}
})
# 9 is not below 10
self.hass.states.set('test.entity', 'entity',
{'test_attribute': 9, 'not_test_attribute': 11})
self.hass.block_till_done()
assert 1 == len(self.calls)
def test_template_list(self):
"""Test template list."""
assert setup_component(self.hass, automation.DOMAIN, {
automation.DOMAIN: {
'trigger': {
'platform': 'numeric_state',
'entity_id': 'test.entity',
'value_template':
'{{ state.attributes.test_attribute[2] }}',
'below': 10,
},
'action': {
'service': 'test.automation'
}
}
})
# 3 is below 10
self.hass.states.set('test.entity', 'entity',
{'test_attribute': [11, 15, 3]})
self.hass.block_till_done()
assert 1 == len(self.calls)
def test_template_string(self):
"""Test template string."""
assert setup_component(self.hass, automation.DOMAIN, {
automation.DOMAIN: {
'trigger': {
'platform': 'numeric_state',
'entity_id': 'test.entity',
'value_template':
'{{ state.attributes.test_attribute | multiply(10) }}',
'below': 10,
},
'action': {
'service': 'test.automation',
'data_template': {
'some': '{{ trigger.%s }}' % '}} - {{ trigger.'.join((
'platform', 'entity_id', 'below', 'above',
'from_state.state', 'to_state.state'))
},
}
}
})
self.hass.states.set('test.entity', 'test state 1',
{'test_attribute': '1.2'})
self.hass.block_till_done()
self.hass.states.set('test.entity', 'test state 2',
{'test_attribute': '0.9'})
self.hass.block_till_done()
assert 1 == len(self.calls)
assert 'numeric_state - test.entity - 10.0 - None - test state 1 - ' \
'test state 2' == \
self.calls[0].data['some']
def test_not_fires_on_attr_change_with_attr_not_below_multiple_attr(self):
"""Test if not fired changed attributes."""
assert setup_component(self.hass, automation.DOMAIN, {
automation.DOMAIN: {
'trigger': {
'platform': 'numeric_state',
'entity_id': 'test.entity',
'value_template': '{{ state.attributes.test_attribute }}',
'below': 10,
},
'action': {
'service': 'test.automation'
}
}
})
# 11 is not below 10
self.hass.states.set('test.entity', 'entity',
{'test_attribute': 11, 'not_test_attribute': 9})
self.hass.block_till_done()
assert 0 == len(self.calls)
def test_if_action(self):
"""Test if action."""
entity_id = 'domain.test_entity'
assert setup_component(self.hass, automation.DOMAIN, {
automation.DOMAIN: {
'trigger': {
'platform': 'event',
'event_type': 'test_event',
},
'condition': {
'condition': 'numeric_state',
'entity_id': entity_id,
'above': 8,
'below': 12,
},
'action': {
'service': 'test.automation'
}
}
})
self.hass.states.set(entity_id, 10)
self.hass.bus.fire('test_event')
self.hass.block_till_done()
assert 1 == len(self.calls)
self.hass.states.set(entity_id, 8)
self.hass.bus.fire('test_event')
self.hass.block_till_done()
assert 1 == len(self.calls)
self.hass.states.set(entity_id, 9)
self.hass.bus.fire('test_event')
self.hass.block_till_done()
assert 2 == len(self.calls)
def test_if_fails_setup_bad_for(self):
"""Test for setup failure for bad for."""
with assert_setup_component(0):
assert setup_component(self.hass, automation.DOMAIN, {
automation.DOMAIN: {
'trigger': {
'platform': 'numeric_state',
'entity_id': 'test.entity',
'above': 8,
'below': 12,
'for': {
'invalid': 5
},
},
'action': {
'service': 'homeassistant.turn_on',
}
}})
def test_if_fails_setup_for_without_above_below(self):
"""Test for setup failures for missing above or below."""
with assert_setup_component(0):
assert setup_component(self.hass, automation.DOMAIN, {
automation.DOMAIN: {
'trigger': {
'platform': 'numeric_state',
'entity_id': 'test.entity',
'for': {
'seconds': 5
},
},
'action': {
'service': 'homeassistant.turn_on',
}
}})
def test_if_not_fires_on_entity_change_with_for(self):
"""Test for not firing on entity change with for."""
assert setup_component(self.hass, automation.DOMAIN, {
automation.DOMAIN: {
'trigger': {
'platform': 'numeric_state',
'entity_id': 'test.entity',
'above': 8,
'below': 12,
'for': {
'seconds': 5
},
},
'action': {
'service': 'test.automation'
}
}
})
self.hass.states.set('test.entity', 9)
self.hass.block_till_done()
self.hass.states.set('test.entity', 15)
self.hass.block_till_done()
fire_time_changed(self.hass, dt_util.utcnow() + timedelta(seconds=10))
self.hass.block_till_done()
assert 0 == len(self.calls)
def test_if_not_fires_on_entities_change_with_for_after_stop(self):
"""Test for not firing on entities change with for after stop."""
assert setup_component(self.hass, automation.DOMAIN, {
automation.DOMAIN: {
'trigger': {
'platform': 'numeric_state',
'entity_id': [
'test.entity_1',
'test.entity_2',
],
'above': 8,
'below': 12,
'for': {
'seconds': 5
},
},
'action': {
'service': 'test.automation'
}
}
})
self.hass.states.set('test.entity_1', 9)
self.hass.states.set('test.entity_2', 9)
self.hass.block_till_done()
fire_time_changed(self.hass, dt_util.utcnow() + timedelta(seconds=10))
self.hass.block_till_done()
assert 2 == len(self.calls)
self.hass.states.set('test.entity_1', 15)
self.hass.states.set('test.entity_2', 15)
self.hass.block_till_done()
self.hass.states.set('test.entity_1', 9)
self.hass.states.set('test.entity_2', 9)
self.hass.block_till_done()
common.turn_off(self.hass)
self.hass.block_till_done()
fire_time_changed(self.hass, dt_util.utcnow() + timedelta(seconds=10))
self.hass.block_till_done()
assert 2 == len(self.calls)
def test_if_fires_on_entity_change_with_for_attribute_change(self):
"""Test for firing on entity change with for and attribute change."""
assert setup_component(self.hass, automation.DOMAIN, {
automation.DOMAIN: {
'trigger': {
'platform': 'numeric_state',
'entity_id': 'test.entity',
'above': 8,
'below': 12,
'for': {
'seconds': 5
},
},
'action': {
'service': 'test.automation'
}
}
})
utcnow = dt_util.utcnow()
with patch('homeassistant.core.dt_util.utcnow') as mock_utcnow:
mock_utcnow.return_value = utcnow
self.hass.states.set('test.entity', 9)
self.hass.block_till_done()
mock_utcnow.return_value += timedelta(seconds=4)
fire_time_changed(self.hass, mock_utcnow.return_value)
self.hass.states.set('test.entity', 9,
attributes={"mock_attr": "attr_change"})
self.hass.block_till_done()
assert 0 == len(self.calls)
mock_utcnow.return_value += timedelta(seconds=4)
fire_time_changed(self.hass, mock_utcnow.return_value)
self.hass.block_till_done()
assert 1 == len(self.calls)
def test_if_fires_on_entity_change_with_for(self):
"""Test for firing on entity change with for."""
assert setup_component(self.hass, automation.DOMAIN, {
automation.DOMAIN: {
'trigger': {
'platform': 'numeric_state',
'entity_id': 'test.entity',
'above': 8,
'below': 12,
'for': {
'seconds': 5
},
},
'action': {
'service': 'test.automation'
}
}
})
self.hass.states.set('test.entity', 9)
self.hass.block_till_done()
fire_time_changed(self.hass, dt_util.utcnow() + timedelta(seconds=10))
self.hass.block_till_done()
assert 1 == len(self.calls)
def test_wait_template_with_trigger(self):
"""Test using wait template with 'trigger.entity_id'."""
assert setup_component(self.hass, automation.DOMAIN, {
automation.DOMAIN: {
'trigger': {
'platform': 'numeric_state',
'entity_id': 'test.entity',
'above': 10,
},
'action': [
{'wait_template':
"{{ states(trigger.entity_id) | int < 10 }}"},
{'service': 'test.automation',
'data_template': {
'some':
'{{ trigger.%s }}' % '}} - {{ trigger.'.join((
'platform', 'entity_id', 'to_state.state'))
}}
],
}
})
self.hass.block_till_done()
self.calls = []
self.hass.states.set('test.entity', '12')
self.hass.block_till_done()
self.hass.states.set('test.entity', '8')
self.hass.block_till_done()
assert 1 == len(self.calls)
assert 'numeric_state - test.entity - 12' == \
self.calls[0].data['some']
|
# Generated by Django 3.2.10 on 2022-03-30 10:18
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
import django.utils.timezone
import materials.models
class Migration(migrations.Migration):
initial = True
dependencies = [
('auth', '0012_alter_user_first_name_max_length'),
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
('distributions', '0001_initial'),
('bibliography', '0001_initial'),
]
operations = [
migrations.CreateModel(
name='MaterialProperty',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('created_at', models.DateTimeField(db_index=True, default=django.utils.timezone.now, verbose_name='Created at')),
('lastmodified_at', models.DateTimeField(db_index=True, default=django.utils.timezone.now, verbose_name='Last modified at')),
('name', models.CharField(max_length=255)),
('description', models.TextField(blank=True, null=True)),
('unit', models.CharField(max_length=63)),
('created_by', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, related_name='materials_materialproperty_created', to=settings.AUTH_USER_MODEL, verbose_name='Created by')),
('lastmodified_by', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, related_name='materials_materialproperty_lastmodified', to=settings.AUTH_USER_MODEL, verbose_name='Last modified by')),
('owner', models.ForeignKey(on_delete=django.db.models.deletion.PROTECT, to=settings.AUTH_USER_MODEL)),
('visible_to_groups', models.ManyToManyField(to='auth.Group')),
],
options={
'abstract': False,
},
),
migrations.CreateModel(
name='MaterialPropertyValue',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('created_at', models.DateTimeField(db_index=True, default=django.utils.timezone.now, verbose_name='Created at')),
('lastmodified_at', models.DateTimeField(db_index=True, default=django.utils.timezone.now, verbose_name='Last modified at')),
('name', models.CharField(max_length=255)),
('description', models.TextField(blank=True, null=True)),
('average', models.FloatField()),
('standard_deviation', models.FloatField()),
('created_by', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, related_name='materials_materialpropertyvalue_created', to=settings.AUTH_USER_MODEL, verbose_name='Created by')),
('lastmodified_by', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, related_name='materials_materialpropertyvalue_lastmodified', to=settings.AUTH_USER_MODEL, verbose_name='Last modified by')),
('owner', models.ForeignKey(on_delete=django.db.models.deletion.PROTECT, to=settings.AUTH_USER_MODEL)),
('property', models.ForeignKey(on_delete=django.db.models.deletion.PROTECT, to='materials.materialproperty')),
('visible_to_groups', models.ManyToManyField(to='auth.Group')),
],
options={
'abstract': False,
},
),
migrations.CreateModel(
name='SampleSeries',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('created_at', models.DateTimeField(db_index=True, default=django.utils.timezone.now, verbose_name='Created at')),
('lastmodified_at', models.DateTimeField(db_index=True, default=django.utils.timezone.now, verbose_name='Last modified at')),
('name', models.CharField(max_length=255)),
('description', models.TextField(blank=True, null=True)),
('preview', models.ImageField(default='materials/img/generic_material.jpg', upload_to='')),
('publish', models.BooleanField(default=False)),
('standard', models.BooleanField(default=True)),
('created_by', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, related_name='materials_sampleseries_created', to=settings.AUTH_USER_MODEL, verbose_name='Created by')),
('lastmodified_by', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, related_name='materials_sampleseries_lastmodified', to=settings.AUTH_USER_MODEL, verbose_name='Last modified by')),
('owner', models.ForeignKey(on_delete=django.db.models.deletion.PROTECT, to=settings.AUTH_USER_MODEL)),
('temporal_distributions', models.ManyToManyField(to='distributions.TemporalDistribution')),
('visible_to_groups', models.ManyToManyField(to='auth.Group')),
],
options={
'abstract': False,
},
),
migrations.CreateModel(
name='Sample',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('created_at', models.DateTimeField(db_index=True, default=django.utils.timezone.now, verbose_name='Created at')),
('lastmodified_at', models.DateTimeField(db_index=True, default=django.utils.timezone.now, verbose_name='Last modified at')),
('name', models.CharField(max_length=255)),
('description', models.TextField(blank=True, null=True)),
('taken_at', models.DateTimeField(blank=True, null=True)),
('preview', models.ImageField(blank=True, null=True, upload_to='')),
('created_by', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, related_name='materials_sample_created', to=settings.AUTH_USER_MODEL, verbose_name='Created by')),
('lastmodified_by', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, related_name='materials_sample_lastmodified', to=settings.AUTH_USER_MODEL, verbose_name='Last modified by')),
('owner', models.ForeignKey(on_delete=django.db.models.deletion.PROTECT, to=settings.AUTH_USER_MODEL)),
('properties', models.ManyToManyField(to='materials.MaterialPropertyValue')),
('series', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='samples', to='materials.sampleseries')),
('sources', models.ManyToManyField(to='bibliography.Source')),
('timestep', models.ForeignKey(null=True, on_delete=django.db.models.deletion.PROTECT, related_name='samples', to='distributions.timestep')),
('visible_to_groups', models.ManyToManyField(to='auth.Group')),
],
options={
'abstract': False,
},
),
migrations.CreateModel(
name='MaterialComponentGroup',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('created_at', models.DateTimeField(db_index=True, default=django.utils.timezone.now, verbose_name='Created at')),
('lastmodified_at', models.DateTimeField(db_index=True, default=django.utils.timezone.now, verbose_name='Last modified at')),
('name', models.CharField(max_length=255)),
('description', models.TextField(blank=True, null=True)),
('created_by', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, related_name='materials_materialcomponentgroup_created', to=settings.AUTH_USER_MODEL, verbose_name='Created by')),
('lastmodified_by', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, related_name='materials_materialcomponentgroup_lastmodified', to=settings.AUTH_USER_MODEL, verbose_name='Last modified by')),
('owner', models.ForeignKey(on_delete=django.db.models.deletion.PROTECT, to=settings.AUTH_USER_MODEL)),
('visible_to_groups', models.ManyToManyField(to='auth.Group')),
],
options={
'verbose_name': 'material_component_group',
'verbose_name_plural': 'groups',
'unique_together': {('name', 'owner')},
},
),
migrations.CreateModel(
name='MaterialCategory',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('created_at', models.DateTimeField(db_index=True, default=django.utils.timezone.now, verbose_name='Created at')),
('lastmodified_at', models.DateTimeField(db_index=True, default=django.utils.timezone.now, verbose_name='Last modified at')),
('name', models.CharField(max_length=255)),
('description', models.TextField(blank=True, null=True)),
('created_by', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, related_name='materials_materialcategory_created', to=settings.AUTH_USER_MODEL, verbose_name='Created by')),
('lastmodified_by', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, related_name='materials_materialcategory_lastmodified', to=settings.AUTH_USER_MODEL, verbose_name='Last modified by')),
('owner', models.ForeignKey(on_delete=django.db.models.deletion.PROTECT, to=settings.AUTH_USER_MODEL)),
('visible_to_groups', models.ManyToManyField(to='auth.Group')),
],
options={
'verbose_name': 'Material Group',
},
),
migrations.CreateModel(
name='Composition',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('created_at', models.DateTimeField(db_index=True, default=django.utils.timezone.now, verbose_name='Created at')),
('lastmodified_at', models.DateTimeField(db_index=True, default=django.utils.timezone.now, verbose_name='Last modified at')),
('name', models.CharField(max_length=255)),
('description', models.TextField(blank=True, null=True)),
('created_by', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, related_name='materials_composition_created', to=settings.AUTH_USER_MODEL, verbose_name='Created by')),
('group', models.ForeignKey(on_delete=django.db.models.deletion.PROTECT, related_name='compositions', to='materials.materialcomponentgroup')),
('lastmodified_by', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, related_name='materials_composition_lastmodified', to=settings.AUTH_USER_MODEL, verbose_name='Last modified by')),
('owner', models.ForeignKey(on_delete=django.db.models.deletion.PROTECT, to=settings.AUTH_USER_MODEL)),
('sample', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='compositions', to='materials.sample')),
('visible_to_groups', models.ManyToManyField(to='auth.Group')),
('order', models.IntegerField(default=90)),
],
options={
'abstract': False,
'ordering': ['order']
},
),
migrations.CreateModel(
name='BaseMaterial',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('created_at', models.DateTimeField(db_index=True, default=django.utils.timezone.now, verbose_name='Created at')),
('lastmodified_at', models.DateTimeField(db_index=True, default=django.utils.timezone.now, verbose_name='Last modified at')),
('name', models.CharField(max_length=255)),
('description', models.TextField(blank=True, null=True)),
('type', models.CharField(default='material', max_length=127)),
('categories', models.ManyToManyField(blank=True, to='materials.MaterialCategory')),
('created_by', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, related_name='materials_basematerial_created', to=settings.AUTH_USER_MODEL, verbose_name='Created by')),
('lastmodified_by', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, related_name='materials_basematerial_lastmodified', to=settings.AUTH_USER_MODEL, verbose_name='Last modified by')),
('owner', models.ForeignKey(on_delete=django.db.models.deletion.PROTECT, to=settings.AUTH_USER_MODEL)),
('visible_to_groups', models.ManyToManyField(to='auth.Group')),
],
options={
'verbose_name': 'Material',
'unique_together': {('name', 'owner')},
},
),
migrations.CreateModel(
name='Material',
fields=[
],
options={
'verbose_name': 'Material',
'proxy': True,
'indexes': [],
'constraints': [],
},
bases=('materials.basematerial',),
),
migrations.CreateModel(
name='MaterialComponent',
fields=[
],
options={
'verbose_name': 'component',
'proxy': True,
'indexes': [],
'constraints': [],
},
bases=('materials.basematerial',),
),
migrations.CreateModel(
name='WeightShare',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('created_at', models.DateTimeField(db_index=True, default=django.utils.timezone.now, verbose_name='Created at')),
('lastmodified_at', models.DateTimeField(db_index=True, default=django.utils.timezone.now, verbose_name='Last modified at')),
('name', models.CharField(max_length=255)),
('description', models.TextField(blank=True, null=True)),
('average', models.FloatField(default=0.0)),
('standard_deviation', models.FloatField(default=0.0)),
('composition', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='shares', to='materials.composition')),
('created_by', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, related_name='materials_weightshare_created', to=settings.AUTH_USER_MODEL, verbose_name='Created by')),
('lastmodified_by', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, related_name='materials_weightshare_lastmodified', to=settings.AUTH_USER_MODEL, verbose_name='Last modified by')),
('owner', models.ForeignKey(on_delete=django.db.models.deletion.PROTECT, to=settings.AUTH_USER_MODEL)),
('visible_to_groups', models.ManyToManyField(to='auth.Group')),
('component', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='shares', to='materials.materialcomponent')),
],
options={
'abstract': False,
'ordering': ['-average']
},
),
migrations.AddField(
model_name='sampleseries',
name='material',
field=models.ForeignKey(on_delete=django.db.models.deletion.PROTECT, to='materials.material'),
),
migrations.AddField(
model_name='composition',
name='fractions_of',
field=models.ForeignKey(default=materials.models.get_default_component_pk, on_delete=django.db.models.deletion.PROTECT, to='materials.materialcomponent'),
),
]
|
# Generated manually on 2022-01-24 17:40
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('deployments', '0042_personnel_country_to'),
]
operations = [
migrations.RunSQL(
sql=[("update deployments_personnel " +
"set country_to_id=d.country_id " +
"from deployments_personnel a " +
"join deployments_personneldeployment b on a.deployment_id = b.id " +
"join api_event c on b.event_deployed_to_id = c.id " +
"join api_event_countries d on c.id = d.event_id " +
"join api_country e on d.country_id = e.id " +
"where deployments_personnel.deployedperson_ptr_id = a.deployedperson_ptr_id " +
"and e.record_type = 1")], # important – otherwise regions also can appear
reverse_sql=[("update deployments_personnel set country_to_id=NULL")],
)
]
|
import math
import numpy
from PIL import Image
from the_ark.selenium_helpers import SeleniumHelperExceptions, ElementNotVisibleError, ElementError
from StringIO import StringIO
import time
import traceback
DEFAULT_SCROLL_PADDING = 100
SCREENSHOT_FILE_EXTENSION = "png"
DEFAULT_PIXEL_MATCH_OFFSET = 100
FIREFOX_HEAD_HEIGHT = 75
MAX_IMAGE_HEIGHT = 32768.0
class Screenshot:
"""
A helper class for taking screenshots using a Selenium Helper instance
"""
def __init__(self, selenium_helper, paginated=False, header_ids=None, footer_ids=None,
scroll_padding=DEFAULT_SCROLL_PADDING, pixel_match_offset=DEFAULT_PIXEL_MATCH_OFFSET,
file_extenson=SCREENSHOT_FILE_EXTENSION, resize_delay=0, content_container_selector="html"):
"""
Initializes the Screenshot class. These variable will be used throughout to help determine how to capture pages
for this website.
:param
- selenium_helper: SeleniumHelper() - The Selenium Helper object whose browser you are capturing
- paginated: bool - if True, all full page screenshots captured by this class will be a sequence of
viewport sized images
- header_ids: list - A list of css_selectors for elements that "stick" to the top of the screen when
scrolling. These hidden and shown while capturing the screen so that they display
only at the top of the page, and do not cover any content
- footer_ids: list - A list of css_selectors for elements that "stick" to the bottom of the screen
when scrolling. These hidden and shown while capturing the screen so that they
display only at the bottom of the page, and do not cover any content
- scroll_padding: int - The height, in pixels, of the overlap between paginated captures. This is also
used when scrolling elements. the element is scrolled its height minus the padding
to create an overlapping of content shown on both images to not cut any text in half
- file_extenson: string - If provided, this extension will be used while creating the image. This must
be an extension that is usable with PIL
"""
# Set parameters as class variables
self.sh = selenium_helper
self.paginated = paginated
self.headers = header_ids
self.footers = footer_ids
self.content_container_selector = content_container_selector
self.scroll_padding = scroll_padding
self.pixel_match_offset = pixel_match_offset
self.file_extenson = "png"
self.headless = self.sh.desired_capabilities.get("headless", False)
self.head_padding = FIREFOX_HEAD_HEIGHT if self.sh.desired_capabilities ["browserName"] == "firefox" else 0
self.scale_factor = self.sh.desired_capabilities.get("scale_factor", 1)
self.max_height = MAX_IMAGE_HEIGHT / self.scale_factor
self.resize_delay = resize_delay
def capture_page(self, viewport_only=False, padding=None):
"""
Entry point for a screenshot of the whole page. This will send the screenshot off to the correct methods
depending on whether you need paginated screenshots, just the current viewport area, or the whole page in
one large shot.
:param
- viewport_only: bool - Whether to capture just the viewport's visible area or not
:return
- StringIO: A StingIO object containing the captured image(s)
"""
try:
if self.headless:
return self._capture_headless_page(viewport_only)
elif viewport_only:
return self._capture_single_viewport()
elif self.paginated:
return self._capture_paginated_page(padding)
else:
return self._capture_full_page()
except SeleniumHelperExceptions as selenium_error:
message = "A selenium issue arose while taking the screenshot".format()
error = SeleniumError(message, selenium_error)
raise error
except Exception as e:
message = "Unhandled exception while taking the screenshot | {0}".format(e)
raise ScreenshotException(message, stacktrace=traceback.format_exc())
def capture_scrolling_element(self, css_selector, viewport_only=True, scroll_padding=None):
"""
This method will scroll an element one height (with padding) and take a screenshot each scroll until the element
has been scrolled to the bottom. You can choose to capture the whole page (helpful when the scrollable element
is taller than the viewport) or just the viewport area
:param
- css_selector: string - The css selector for the element that you plan to scroll
- viewport_only: bool - Whether to capture just the viewport's visible area or not (each screenshot
after scrolling)
- scroll_padding: int - Overwrites the default scroll padding for the class. This can be used when the
element, or site, have greatly different scroll padding numbers
:return
- StringIO: list - A list containing multiple StringIO image objects
"""
padding = scroll_padding if scroll_padding else self.scroll_padding
try:
image_list = []
# Scroll the element to the top
self.sh.scroll_an_element(css_selector, scroll_top=True)
while True:
if self.headless:
image_list.append(self._capture_headless_page(viewport_only))
elif viewport_only:
image_list.append(self._capture_single_viewport())
else:
image_list.append(self._capture_full_page())
if self.sh.get_is_element_scroll_position_at_bottom(css_selector):
# Stop capturing once you're at the bottom
break
else:
# Scroll down for the next one!
self.sh.scroll_an_element(css_selector, scroll_padding=padding)
return image_list
except SeleniumHelperExceptions as selenium_error:
message = "A selenium issue arose while trying to capture the scrolling element"
error = SeleniumError(message, selenium_error)
raise error
except Exception as e:
message = "Unhandled exception while taking the scrolling screenshot " \
"of the element '{0}' | {1}".format(css_selector, e)
raise ScreenshotException(message,
stacktrace=traceback.format_exc(),
details={"css_selector": css_selector})
def capture_horizontal_scrolling_element(self, css_selector, viewport_only=True, scroll_padding=None):
"""
This method will scroll an element horizontally one width (with padding) and take a screenshot each scroll until
the element has been scrolled to the right. You can choose to capture the whole page (helpful when the
scrollable element is taller than the viewport) or just the viewport area.
:param
- css_selector: string - The css selector for the element that you plan to scroll
- viewport_only: bool - Whether to capture just the viewport's visible area or not (each screenshot
after scrolling)
- scroll_padding: int - Overwrites the default scroll padding for the class. This can be used when the
element, or site, have greatly different scroll padding numbers
:return
- StringIO: list - A list containing multiple StringIO image objects
"""
padding = scroll_padding if scroll_padding else self.scroll_padding
try:
image_list = []
# Scroll the element to the top
self.sh.scroll_an_element(css_selector, scroll_left=True)
while True:
# - Capture the image
if viewport_only:
image_list.append(self._capture_single_viewport())
else:
image_list.append(self._capture_full_page())
if self.sh.get_is_element_scroll_position_at_most_right(css_selector):
# - Stop capturing once you're at the most right
break
else:
# - Scroll right for the next one!
self.sh.scroll_an_element(css_selector, scroll_padding=padding, scroll_horizontal=True)
return image_list
except SeleniumHelperExceptions as selenium_error:
message = "A selenium issue arose while trying to capture the scrolling element"
error = SeleniumError(message, selenium_error)
raise error
except Exception as e:
message = "Unhandled exception while taking the scrolling screenshot " \
"of the element '{0}' | {1}".format(css_selector, e)
raise ScreenshotException(message,
stacktrace=traceback.format_exc(),
details={"css_selector": css_selector})
def _capture_single_viewport(self):
"""
Grabs an image of the page and then craps it to just the visible / viewport area
:return
- StringIO: A StingIO object containing the captured image
"""
cropped_image = self._get_image_data(viewport_only=True)
return self._create_image_file(cropped_image)
def _capture_full_page(self):
"""
Captures an image of the whole page. If there are sitcky elements, as specified by the footers and headers
class variables the code will, the code will capture them only where appropriate ie. headers on top, footers on
bottom. Otherwise the whole screen is sent back as it is currently set up.
:return
- StringIO: A StingIO object containing the captured image
"""
if self.headers and self.footers:
# Capture viewport size window of the headers
self.sh.scroll_window_to_position(0)
self._hide_elements(self.footers)
header_image = self._get_image_data(True)
# - Capture the page from the bottom without headers
self._show_elements(self.footers)
#TODO: Update when scroll position updates to have a scroll to bottom option
self.sh.scroll_window_to_position(40000)
self._hide_elements(self.headers)
footer_image = self._get_image_data()
# Show all header elements again
self._show_elements(self.headers)
# Send the two images off to get merged into one
image_data = self._crop_and_stitch_image(header_image, footer_image)
elif self.headers:
# Scroll to the top so that the headers are not covering content
self.sh.scroll_window_to_position(0)
time.sleep(0.5)
image_data = self._get_image_data()
elif self.footers:
# Scroll to the bottom so that the footer items are not covering content
self.sh.scroll_window_to_position(40000)
time.sleep(0.5)
image_data = self._get_image_data()
else:
image_data = self._get_image_data()
return self._create_image_file(image_data)
def _hide_elements(self, css_selectors):
"""
Hides all elements in the given list
:param
- css_selectors: list - A list of the elements you would like to hide
"""
for selector in css_selectors:
try:
self.sh.hide_element(selector)
# Continue to the next item is this one did not exist or was already not visible
except ElementNotVisibleError:
pass
except ElementError:
pass
def _show_elements(self, css_selectors):
"""
Shows all elements in the given list
:param
- css_selectors: list - A list of the elements you would like to make visible
"""
# Show footer items again
for selector in css_selectors:
try:
self.sh.show_element(selector)
# Continue to the next item is this one did not exist
except ElementError:
pass
def _capture_headless_page(self, viewport_only):
if self.paginated and not viewport_only:
return self._capture_headless_paginated_page()
# Store the current size and scroll position of the browser
width, height = self.sh.get_window_size()
current_scroll_position = self.sh.get_window_current_scroll_position()
if not viewport_only:
content_height = self.sh.get_content_height(self.content_container_selector)
if content_height > self.max_height:
self.sh.resize_browser(width, self.max_height + self.head_padding)
time.sleep(self.resize_delay)
elif height < content_height:
self.sh.resize_browser(width, content_height + self.head_padding)
time.sleep(self.resize_delay)
self.sh.scroll_window_to_position(scroll_bottom=True)
time.sleep(self.resize_delay)
if content_height > self.max_height:
images_list = []
number_of_loops = int(math.ceil(content_height / self.max_height))
# Loop through, starting at one for multiplication purposes
for i in range(1, number_of_loops + 1):
image_data = self.sh.get_screenshot_base64()
image = Image.open(StringIO(image_data.decode('base64')))
images_list.append(image)
self.sh.scroll_window_to_position(self.max_height * i)
# Combine al of the images into one capture
image = self._combine_vertical_images(images_list, content_height)
else:
# Gather image byte data
image_data = self.sh.get_screenshot_base64()
# Create an image canvas and write the byte data to it
image = Image.open(StringIO(image_data.decode('base64')))
else:
# Gather image byte data
image_data = self.sh.get_screenshot_base64()
# Create an image canvas and write the byte data to it
image = Image.open(StringIO(image_data.decode('base64')))
# - Return the browser to its previous size and scroll position
if not viewport_only:
self.sh.resize_browser(width, height)
self.sh.scroll_window_to_position(current_scroll_position)
time.sleep(self.resize_delay)
return self._create_image_file(image)
def _combine_vertical_images(self, images_list, content_height):
height_of_full_images = 0
total_height = 0
total_width = 0
# Make the last image the height of the remaining content
for image in images_list[:-1]:
height_of_full_images += image.size[1]
remaining_height = (content_height * self.scale_factor) - height_of_full_images
images_list[-1] = images_list[-1].crop((0,
images_list[-1].size[1] - remaining_height,
images_list[-1].size[0],
images_list[-1].size[1]))
for image in images_list:
total_width = image.size[0] if image.size[0] > total_width else total_width
total_height += image.size[1]
resulting_image = Image.new('RGB', (total_width, total_height))
current_height = 0
for i, image in enumerate(images_list):
resulting_image.paste(im=image, box=(0, current_height))
current_height += image.size[1]
return resulting_image
def _capture_paginated_page(self, padding=None):
"""
Captures the page viewport by viewport, leaving an overlap of pixels the height of the self.padding variable
between each image
"""
image_list = []
scroll_padding = padding if padding else self.scroll_padding
# Scroll page to the top
self.sh.scroll_window_to_position(0)
current_scroll_position = 0
viewport_height = self.sh.driver.execute_script("return document.documentElement.clientHeight")
while True:
# Capture the image
image_list.append(self._capture_single_viewport())
# Scroll for the next one!
self.sh.scroll_window_to_position(current_scroll_position + viewport_height - scroll_padding)
time.sleep(0.25)
new_scroll_position = self.sh.get_window_current_scroll_position()
# Break if the scroll position did not change (because it was at the bottom)
if new_scroll_position == current_scroll_position:
break
else:
current_scroll_position = new_scroll_position
return image_list
def _capture_headless_paginated_page(self, padding=None):
"""
Captures the page viewport by viewport, leaving an overlap of pixels the height of the self.padding variable
between each image
"""
image_list = []
scroll_padding = padding if padding else self.scroll_padding
# Scroll page to the top
self.sh.scroll_window_to_position(0)
current_scroll_position = 0
viewport_height = self.sh.driver.execute_script("return document.documentElement.clientHeight")
while True:
# Capture the image
image_data = self.sh.get_screenshot_base64()
image_file = self._create_image_file(Image.open(StringIO(image_data.decode('base64'))))
image_list.append(image_file)
# Scroll for the next one!
self.sh.scroll_window_to_position(current_scroll_position + viewport_height - scroll_padding)
time.sleep(0.25)
new_scroll_position = self.sh.get_window_current_scroll_position()
# Break if the scroll position did not change (because it was at the bottom)
if new_scroll_position == current_scroll_position:
break
else:
current_scroll_position = new_scroll_position
return image_list
def _get_image_data(self, viewport_only=False):
"""
Creates an Image() canvas of the page. The image is cropped to be only the viewport area if specified.
:param
- viewport_only: bool - Captures only the visible /viewport area if true
:return
- image: Image() - The image canvas of the captured data
"""
# - Capture the image
# Gather image byte data
image_data = self.sh.get_screenshot_base64()
# Create an image canvas and write the byte data to it
image = Image.open(StringIO(image_data.decode('base64')))
# - Crop the image to just the visible area
# Top of the viewport
current_scroll_position = self.sh.get_window_current_scroll_position()
# Viewport Dimensions
viewport_width, viewport_height = self.sh.get_viewport_size()
# Image size of data returned by Selenium
image_height, image_width = image.size
if viewport_only:
# Calculate the visible area
crop_box = (0, current_scroll_position, viewport_width, current_scroll_position + viewport_height)
# Crop everything of the image but the visible area
cropped_image = image.crop(crop_box)
return cropped_image
else:
# Calculate the visible area
crop_box = (0, 0, viewport_width, image_width)
# Crop everything of the image but the visible area
cropped_image = image.crop(crop_box)
return cropped_image
def _crop_and_stitch_image(self, header_image, footer_image):
"""
This object takes in a header and footer image. It then searched for a block of 100 mixles that matches between
the two images. Once it finds this point the footer image is cropped above the "match" point. A new canvas is
then created that is the total height of both images. The two images are then copied onto a new canvas to create
the final image, headers on top, footers on the bottom.
:param
- header_image: Image() - The top of the page, usually displays all of the headers elements
- footer_image: Image() - The bottom of the page, usually displays all of the footer elements
:return
- stitched_image: Image() - The resulting image of the crop and stitching of the header and footer images
"""
try:
# Create Pixel Row arrays from each image
header_array = numpy.asarray(header_image)
footer_array = numpy.asarray(footer_image)
# - Find a place in both images that match then crop and stitch them at that location
crop_row = 0
header_image_height = header_image.height
# Set the offset to the height of the image if the height is less than the offset
if self.pixel_match_offset > header_image_height:
self.pixel_match_offset = header_image_height
# - Find the pixel row in the footer image that matches the bottom row in the header image
# Grab the last 100 rows of header_image
header_last_hundred_rows = header_array[header_image_height - self.pixel_match_offset: header_image_height]
# Iterates throughout the check, will match the height of the row being checked in the image.
for i, footer_row in enumerate(footer_array):
# Jump out if the crop row has been set
if crop_row != 0:
break
# Check if the current row being inspected matches the header row 100 pixels above the bottom
if numpy.array_equal(footer_row, header_last_hundred_rows[0]):
# It is a match!
for y, row in enumerate(header_last_hundred_rows):
# Check that the 100 footer rows above the matching row also match the bottom 100 of
# the header image we grabbed at the start of this check
if numpy.array_equal(footer_array[i + y], header_last_hundred_rows[y]):
# Check whether we've found 100 matching rows or not
if y == self.pixel_match_offset - 1:
# Yes! All 100 matched. Set the crop row to this row
crop_row = i + self.pixel_match_offset
break
# If no rows matched, crop at height of header image
if crop_row == 0:
crop_row = header_image_height
# - Crop the top of the footer image off above the line that matches the header image's bottom row
# Create the crop box that outlines what to remove from the footer image
footer_image_width = footer_image.size[0]
footer_image_height = footer_image.size[1]
crop_box = (0, crop_row, footer_image_width, footer_image_height)
# Perform the crop
cropped_footer_image = footer_image.crop(crop_box)
# Grab the new height of the footer image
cropped_footer_image_height = cropped_footer_image.size[1]
# Create a blank image canvas that is as tall the footer and header images combined
total_height = header_image_height + cropped_footer_image_height
stitched_image = Image.new("RGB", (footer_image_width, total_height))
# - Paste the header and footer images onto the canvas
# Paste the header image at the top
stitched_image.paste(header_image, (0, 0))
# Paste the footer image directly below the header image
stitched_image.paste(cropped_footer_image, (0, header_image_height))
return stitched_image
except Exception as e:
message = "Error while cropping and stitching a full page screenshot | {0}".format(e)
raise ScreenshotException(message, stacktrace=traceback.format_exc())
def _create_image_file(self, image):
"""
This method takes an Image() variable and saves it into a StringIO "file".
:param
- image_data: Image() - The image to be saved into the StringIO object
:return
- image_file: StingIO() - The stringIO object containing the saved image
"""
# Instantiate the file object
image_file = StringIO()
# Save the image canvas to the file as the given file type
image.save(image_file, self.file_extenson.upper())
# Set the file marker back to the beginning
image_file.seek(0)
return image_file
class ScreenshotException(Exception):
def __init__(self, msg, stacktrace=None, details=None):
self.msg = msg
self.details = {} if details is None else details
self.details["stracktrace"] = stacktrace
super(ScreenshotException, self).__init__()
def __str__(self):
exception_msg = "Screenshot Exception: \n"
detail_string = "Exception Details:\n"
for key, value in self.details.items():
detail_string += "{0}: {1}\n".format(key, value)
exception_msg += detail_string
exception_msg += "Message: {0}".format(self.msg)
return exception_msg
class SeleniumError(ScreenshotException):
def __init__(self, message, selenium_helper_exception):
new_message = "{0} | {1}".format(message, selenium_helper_exception.msg)
super(SeleniumError, self).__init__(msg=new_message,
stacktrace=selenium_helper_exception.stacktrace,
details=selenium_helper_exception.details)
|
from __future__ import division
def get_x_distribution_series(x_values, probabilites):
distr_series = {}
for i, x in enumerate(x_values):
prob_sum = 0
for prob in probabilites[i]:
prob_sum += prob
distr_series[x] = prob_sum
return distr_series
def get_x_distribution_function(distribution_series):
"""
:type distribution_series:dict
"""
distrib_func = {}
prob = 0
for x in sorted(distribution_series.keys()):
distrib_func[x] = prob
prob += distribution_series[x]
return distrib_func
def get_conditional_distribution_series(probabilities, x_values, y_values, x_distr_series):
series = {}
for j, y in enumerate(y_values):
series[y] = {}
for i, x in enumerate(x_values):
series[y][x] = probabilities[i][j] / x_distr_series[x]
return series
def get_y_distribution_function(x_values, y_values, conditional_distribution_series):
distrib_func = {}
for x in x_values:
distrib_func[x] = {}
prob = 0
for y in sorted(y_values):
distrib_func[x][y] = prob
prob += conditional_distribution_series[y][x]
return distrib_func
def two_dim_value_generator(x_values, y_values, x_distribution_func, y_distrib_func, uniform_generator):
"""
:type x_distribution_func:dict
:type y_distrib_func:dict
"""
while True:
xi = uniform_generator.next()
for i in range(len(x_values) - 1):
if x_distribution_func[x_values[i]] < xi <= x_distribution_func[x_values[i + 1]]:
x = x_values[i]
break
else:
x = x_values[-1]
xi = uniform_generator.next()
for i in range(len(y_values) - 1):
if y_distrib_func[x][y_values[i]] < xi <= y_distrib_func[x][y_values[i + 1]]:
y = y_values[i]
break
else:
y = y_values[-1]
yield x, y
def get_expected_value_x(probabilities, x_values):
exp_value_x = 0
for i, x in enumerate(x_values):
for prob in probabilities[i]:
exp_value_x += x * prob
return exp_value_x
def get_expected_value_y(probabilities, y_values):
exp_value_y = 0
for j, y in enumerate(y_values):
for i in range(len(probabilities)):
exp_value_y += y * probabilities[i][j]
return exp_value_y
def get_dispersion_x(probabilities, x_values, exp_value_x):
disp_x = 0
for i, x in enumerate(x_values):
for prob in probabilities[i]:
disp_x += prob * (x - exp_value_x) ** 2
return disp_x
def get_dispersion_y(probabilities, y_values, exp_value_y):
disp_y = 0
for i, y in enumerate(y_values):
for j in range(len(probabilities)):
disp_y += probabilities[i][j] * (y - exp_value_y) ** 2
return disp_y
def get_correlation_coefficient(probabilities, x_values, y_values):
exp_value_x = get_expected_value_x(probabilities, x_values)
exp_value_y = get_expected_value_y(probabilities, y_values)
disp_x = get_dispersion_x(probabilities, x_values, exp_value_x)
disp_y = get_dispersion_y(probabilities, y_values, exp_value_y)
covariance_coefficient = 0
for i, x in enumerate(x_values):
for j, y in enumerate(y_values):
covariance_coefficient += (x - exp_value_x) * (y - exp_value_y) * probabilities[i][j]
return covariance_coefficient / (disp_x ** (1 / 2) * disp_y ** (1 / 2))
def get_bar_chart(k, a, b):
"""
:type values:list
:type a,b:float
:type k:int
"""
def func(values):
h = (b - a) / k
b_i = a
values.sort()
barchart = []
value_quantity = len(values)
for i in range(k):
a_i = b_i
b_i = a_i + h
v = 0
for x in values:
if x > b_i:
break
elif a_i < x <= b_i:
v += 1
barchart.append((a_i, b_i, v / value_quantity))
return barchart
return func
def cleared_values(values):
clr_values = []
for x in values:
if x not in clr_values:
clr_values.append(x)
clr_values.sort()
return clr_values
def get_conditional_distribution(x_values_inp, y_values_inp, gener_values):
cond_distrib={}
pair_quantity=len(gener_values)
for x, y in gener_values:
if x not in cond_distrib:
cond_distrib[x]={}
for y_in in y_values_inp:
cond_distrib[x][y_in]=0
cond_distrib[x][y]+=1/pair_quantity
distrib_list=[]
for x in x_values_inp:
distrib_list.append([])
for y in y_values_inp:
distrib_list[-1].append(cond_distrib[x][y])
return distrib_list
|
from openprocurement.agreement.core.adapters.configurator import BaseAgreementConfigurator
class CFAgreementUAConfigurator(BaseAgreementConfigurator):
name = "CFA configurator"
model = None # TODO:
|
ano = int(input('Digite o ano do seu nascimento: '))
idade = 2020-ano
if idade <= 10:
print('Mirim')
elif idade <= 15:
print('Infantil')
elif idade <= 19:
print('Junior')
elif idade <= 20:
print('Sênior')
else:
print('Master')
print(idade)
|
"""Snake Game Python Tutorial
youtube video: https://www.youtube.com/watch?v=CD4qAhfFuLo
current time: 33:00
"""
import sys
import math
import random
import pygame
from pygame.locals import *
import tkinter as tk
from tkinter import messagebox
WHITE = (255,255,255)
BLACK = (0,0,0)
RED = (255,0,0)
GREEN = (0,255,0)
BLUE = (0,0,255)
class cube(object):
"""
"""
w = 500 #set pixel screen width
rows = 20 #set number of rows
def __init__(self,start,dirnx=1,dirny=0,color=RED):
self.pos = start
self.dirnx = 1
self.dirny = 0
self.color = color
def move(self,dirnx,dirny):
"""
move cube, by adding new direction to previous position
"""
self.dirnx = dirnx
self.dirny = dirny
self.pos = (self.pos[0]+self.dirnx, self.pos[1]+self.dirny)
def draw(self,surface,eyes=False):
"""
drawing: convert x,y grid position to pixel position
"""
dis = self.w // self.rows #distance between x and y values
#variables for easy coding
i = self.pos[0] # row
j = self.pos[1] # column
#draw just a little bit less, so we draw inside of the square. and we dont cover grid.
pygame.draw.rect(surface, self.color, (i*dis+1,j*dis+1,dis-2,dis-2 ))
#draw eyes
if eyes:
centre = dis//2
radius = 3
circleMiddle = (i*dis+centre-radius, j*dis+8 ) #eye 1
circleMiddle2 = (i*dis+dis-radius*2, j*dis+8 ) #eye 2
pygame.draw.circle(surface, BLACK, circleMiddle, radius)
pygame.draw.circle(surface, BLACK, circleMiddle2, radius)
def drawGrid(w,rows,surface):
"""
This function draws a square grid on main display
"""
#distance between grid lines
sizeBtwn = w // rows
x = 0
y = 0
#create grid by drawing lines
for l in range(rows):
x = x + sizeBtwn
y = y + sizeBtwn
#vertical lines
pygame.draw.line(surface, WHITE, (x,0), (x,w))
#horizontal lines
pygame.draw.line(surface, WHITE, (0,y), (w,y))
def redrawWindow(surface):
global rows, width, s
#background
surface.fill(BLACK)
#draw grid
drawGrid(width, rows, surface)
#update display
pygame.display.update()
def main():
global width, rows, s
#create game display
width = 500
rows = 20
win = pygame.display.set_mode((width, width)) #square display
clock = pygame.time.Clock()
flag = True
FPScount = 0
print(FPScount)
while flag:
#pygame.time.delay(50)
clock.tick(10) #game max speed 10 FPS
s.move()
redrawWindow(win)
FPScount += 1
print(f'FPScount:{FPScount}')
#rows = 0
#w = 0
#h = 0
#cube.rows = rows
#cube.w = w
main()
pygame.quit()
sys.exit()
|
#!/usr/bin/env python
import logging
import sys
import traceback
import katcp
import readline
import codecs
import re
from optparse import OptionParser
from cmd2 import Cmd
from katcp import DeviceClient
logging.basicConfig(level=logging.INFO,
stream=sys.stderr,
format="%(asctime)s - %(name)s - %(filename)s:"
"%(lineno)s - %(levelname)s - %(message)s")
log = logging.getLogger("r2rm.basic_cli")
ESCAPE_SEQUENCE_RE = re.compile(r'''
( \\U........ # 8-digit hex escapes
| \\u.... # 4-digit hex escapes
| \\x.. # 2-digit hex escapes
| \\[0-7]{1,3} # Octal escapes
| \\N\{[^}]+\} # Unicode characters by name
| \\[\\'"abfnrtv] # Single-character escapes
)''', re.UNICODE | re.VERBOSE)
def unescape_string(s):
def decode_match(match):
return codecs.decode(match.group(0), 'unicode-escape')
return ESCAPE_SEQUENCE_RE.sub(decode_match, s)
def decode_katcp_message(s):
"""
@brief Render a katcp message human readable
@params s A string katcp message
"""
return unescape_string(s).replace("\_", " ")
class StreamClient(DeviceClient):
def __init__(self, server_host, server_port, stream=sys.stdout):
self.stream = stream
super(StreamClient, self).__init__(server_host, server_port)
self.commands = set()
def inform_help(self, msg):
""" Handls inform"""
self.unhandled_inform(msg)
self.commands.add(msg.arguments[0])
def to_stream(self, prefix, msg):
self.stream.write("%s:\n%s\n" %
(prefix, decode_katcp_message(msg.__str__())))
def unhandled_reply(self, msg):
"""Deal with unhandled replies"""
self.to_stream("Unhandled reply", msg)
def unhandled_inform(self, msg):
"""Deal with unhandled replies"""
self.to_stream("Unhandled inform", msg)
def unhandled_request(self, msg):
"""Deal with unhandled replies"""
self.to_stream("Unhandled request", msg)
class KatcpCli(Cmd):
"""
@brief Basic command line interface to KATCP device
@detail This class provides a command line interface to
to any katcp.DeviceClient subclass. Behaviour of the
interface is determined by the client object passed
at instantiation.
"""
Cmd.shortcuts.update({'?': 'katcp'})
Cmd.allow_cli_args = False
def __init__(self,host,port,*args,**kwargs):
"""
@brief Instantiate new KatcpCli instance
@params client A DeviceClient instance
"""
self.host = host
self.port = port
self.katcp_parser = katcp.MessageParser()
self.start_client()
Cmd.__init__(self, *args, persistent_history_file='~/.katcp_cli_history', **kwargs)
def start_client(self):
log.info("Client connecting to port {self.host}:{self.port}".format(**locals()))
self.client = StreamClient(self.host, self.port)
self.client.start()
self.prompt = "(katcp CLI {self.host}:{self.port}): ".format(**locals())
def stop_client(self):
self.client.stop()
self.client.join()
def do_katcp(self, arg, opts=None):
"""
@brief Send a request message to a katcp server
@param arg The katcp formatted request
"""
request = "?" + "".join(arg)
log.info("Request: %s"%request)
try:
msg = self.katcp_parser.parse(request)
self.client.ioloop.add_callback(self.client.send_message, msg)
except Exception, e:
e_type, e_value, trace = sys.exc_info()
reason = "\n".join(traceback.format_exception(
e_type, e_value, trace, 20))
log.exception(reason)
def complete_katcp(self, text, line, begidx, endidx):
"""
Tab completion for katcp requests.
"""
if not self.client.commands:
# ToDo: this request should be silent and not be printed on the cli
# ...
self.client.ioloop.add_callback(self.client.send_message, "?help")
return self.basic_complete(text, line, begidx, endidx, self.client.commands)
def do_connect(self, arg, opts=None):
"""
@brief Connect to different KATCP server
@param arg Target server address in form "host:port"
"""
try:
host,port = arg.split(":")
except Exception:
print "Usage: connect <host>:<port>"
return
try:
app = KatcpCli(host,port)
app.cmdloop()
except Exception as error:
log.exception("Error from CLI")
finally:
app.stop_client()
if __name__ == "__main__":
usage = "usage: %prog [options]"
parser = OptionParser(usage=usage)
parser.add_option('-a', '--host', dest='host', type="string", default="", metavar='HOST',
help='attach to server HOST (default="" - localhost)')
parser.add_option('-p', '--port', dest='port', type=int, default=1235, metavar='N',
help='attach to server port N (default=1235)')
(opts, args) = parser.parse_args()
sys.argv = sys.argv[:1]
log.info("Ctrl-C to terminate.")
try:
app = KatcpCli(opts.host,opts.port)
app.cmdloop()
except Exception as error:
log.exception("Error from CLI")
finally:
app.stop_client()
|
"""opentamilweb URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/1.10/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: url(r'^$', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: url(r'^$', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.conf.urls import url, include
2. Add a URL to urlpatterns: url(r'^blog/', include('blog.urls'))
"""
import sys
from django.shortcuts import redirect
from django.conf.urls import url
from .views import *
PYTHON26 = sys.version.find('2.6') >= 0
urlpatterns = [
url(r'^$', index,name="home"),
url(r'^apidoc/$',lambda r: redirect("/static/sphinx_doc/_build/html/index.html")),
url(r'^tts_demo/$',tts_demo,name="tts_demo"),
url(r'^translite/$', trans,name="translite"),
url(r'^tsci/$', uni,name="tsci"),
url(r'^keechu/$', keechu,name="keechu"),
url(r'^sandhi-check/$', sandhi_check,name="sandhi"),
url(r'^spell/$', spl,name="spell"),
url(r'^revers/$', rev,name="rever"),
url(r'^number/$', num,name="number"),
url(r'^anagram/$', anag,name="anagram"),
url(r'^unigram/$', unig,name="unigram"),
url(r'^vaypaadu/$',vaypaadu,name="multiplication"),
url(r'^number/(?P<num>\d+)/$', numstr,name="numstr"),
url(r'^tsci/(?P<tsci>.+?)/$', unicod,name="unicod"),
url(r'^keechu/(?P<k1>.+?)/$', keech,name="keech"),
url(r'^sandhi-checker/$', call_sandhi_check,name="sandhi_check"),
url(r'^translite/(?P<tan>.+?)/$', translite,name="phonetic"),
url(r'^spell/(?P<k1>.+?)/$', spell_check,name="spell_check"),
url(r'^ngram/$', ngra,name="ta_ngram"),
url(r'^ngram/(?P<ng>.+?)/$', test_ngram,name="ngram"),
url(r'^anagram/(?P<word>.+?)/$', anagram,name="ta_anagram"),
url(r'^unigram/(?P<word>.+?)/$', test_basic,name="ta_unigram"),
url(r'^revers/(?P<word>.+?)/$', revers,name="ta_revers"),
url(r'^xword/$',xword,name='ta_xword'),
url(r'^summarizer/',summarizer,name='ta_summarizer'),
url(r'^morse/(?P<direction>.+)/(?P<word>.+)/$',morse,name='ta_morse'),
url(r'^morse/$', morse_code,name="morse_code"),
url(r'^minnal/$',minnal,name="ta_minnal"),
url(r'^minnal/(?P<word>.+?)/$',test_minnal,name="minnal"),
url(r'^textrandomizer_auth/(?P<key>.+)/$',textrandomizer,name="ta_textrandomizer"),
url(r'^textrandomizer/(?P<level>.+)/$',test_textrandomizer,name="textrandomizer"),
url(r'^stemmer/$',tastemmer,name="stemmer"),
url(r'^stemmer/json/$',lambda x: tastemmer(x,use_json=True),name="json_stemmer"),
]
if not PYTHON26:
urlpatterns.extend( [
url(r'^classify-word/$', classify_word,name="classify_word"),
url(r'^get-classify/$', get_classify,name="classifier")] )
|
# Generated by Django 2.2.2 on 2019-06-24 14:07
import django.contrib.gis.db.models.fields
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('main', '0004_auto_20190624_1355'),
]
operations = [
migrations.AlterField(
model_name='photo',
name='position',
field=django.contrib.gis.db.models.fields.PointField(blank=True, null=True, srid=4326),
),
]
|
"""Script to train the Hamiltonian Generative Network
"""
import ast
import argparse
import copy
import pprint
import os
import warnings
import yaml
import numpy as np
import torch
import tqdm
from utilities.integrator import Integrator
from utilities.training_logger import TrainingLogger
from utilities import loader
from utilities.loader import load_hgn, get_online_dataloaders, get_offline_dataloaders
from utilities.losses import reconstruction_loss, kld_loss, geco_constraint
from utilities.statistics import mean_confidence_interval
def _avoid_overwriting(experiment_id):
# This function throws an error if the given experiment data already exists in runs/
logdir = os.path.join('runs', experiment_id)
if os.path.exists(logdir):
assert len(os.listdir(logdir)) == 0,\
f'Experiment id {experiment_id} already exists in runs/. Remove it, change the name ' \
f'in the yaml file.'
class HgnTrainer:
def __init__(self, params, resume=False):
"""Instantiate and train the Hamiltonian Generative Network.
Args:
params (dict): Experiment parameters (see experiment_params folder).
"""
self.params = params
self.resume = resume
if not resume: # Fail if experiment_id already exist in runs/
_avoid_overwriting(params["experiment_id"])
# Set device
self.device = params["device"]
if "cuda" in self.device and not torch.cuda.is_available():
warnings.warn(
"Warning! Set to train in GPU but cuda is not available. Device is set to CPU.")
self.device = "cpu"
# Get dtype, will raise a 'module 'torch' has no attribute' if there is a typo
self.dtype = torch.__getattribute__(params["networks"]["dtype"])
# Load hgn from parameters to deice
self.hgn = load_hgn(params=self.params,
device=self.device,
dtype=self.dtype)
if 'load_path' in self.params:
self.load_and_reset(self.params, self.device, self.dtype)
# Either generate data on-the-fly or load the data from disk
if "train_data" in self.params["dataset"]:
print("Training with OFFLINE data...")
self.train_data_loader, self.test_data_loader = get_offline_dataloaders(self.params)
else:
print("Training with ONLINE data...")
self.train_data_loader, self.test_data_loader = get_online_dataloaders(self.params)
# Initialize training logger
self.training_logger = TrainingLogger(
hyper_params=self.params,
loss_freq=100,
rollout_freq=1000,
model_freq=10000
)
# Initialize tensorboard writer
self.model_save_file = os.path.join(
self.params["model_save_dir"],
self.params["experiment_id"]
)
# Define optimization modules
optim_params = [
{
'params': self.hgn.encoder.parameters(),
'lr': params["optimization"]["encoder_lr"]
},
{
'params': self.hgn.transformer.parameters(),
'lr': params["optimization"]["transformer_lr"]
},
{
'params': self.hgn.hnn.parameters(),
'lr': params["optimization"]["hnn_lr"]
},
{
'params': self.hgn.decoder.parameters(),
'lr': params["optimization"]["decoder_lr"]
},
]
self.optimizer = torch.optim.Adam(optim_params)
def load_and_reset(self, params, device, dtype):
"""Load the HGN from the path specified in params['load_path'] and reset the networks in
params['reset'].
Args:
params (dict): Dictionary with all the necessary parameters to load the networks.
device (str): 'gpu:N' or 'cpu'
dtype (torch.dtype): Data type to be used in computations.
"""
self.hgn.load(params['load_path'])
if 'reset' in params:
if isinstance(params['reset'], list):
for net in params['reset']:
assert net in ['encoder', 'decoder', 'hamiltonian', 'transformer']
else:
assert params['reset'] in ['encoder', 'decoder', 'hamiltonian', 'transformer']
if 'encoder' in params['reset']:
self.hgn.encoder = loader.instantiate_encoder(params, device, dtype)
if 'decoder' in params['reset']:
self.hgn.decoder = loader.instantiate_decoder(params, device, dtype)
if 'transformer' in params['reset']:
self.hgn.transformer = loader.instantiate_transformer(params, device, dtype)
if 'hamiltonian' in params['reset']:
self.hgn.hnn = loader.instantiate_hamiltonian(params, device, dtype)
def training_step(self, rollouts):
"""Perform a training step with the given rollouts batch.
Args:
rollouts (torch.Tensor): Tensor of shape (batch_size, seq_len, channels, height, width)
corresponding to a batch of sampled rollouts.
Returns:
A dictionary of losses and the model's prediction of the rollout. The reconstruction loss and
KL divergence are floats and prediction is the HGNResult object with data of the forward pass.
"""
self.optimizer.zero_grad()
rollout_len = rollouts.shape[1]
input_frames = self.params['optimization']['input_frames']
assert(input_frames <= rollout_len) # optimization.use_steps must be smaller (or equal) to rollout.sequence_length
roll = rollouts[:, :input_frames]
hgn_output = self.hgn.forward(rollout_batch=roll, n_steps=rollout_len - input_frames)
target = rollouts[:, input_frames-1:] # Fit first input_frames and try to predict the last + the next (rollout_len - input_frames)
prediction = hgn_output.reconstructed_rollout
if self.params["networks"]["variational"]:
tol = self.params["geco"]["tol"]
alpha = self.params["geco"]["alpha"]
lagrange_mult_param = self.params["geco"]["lagrange_multiplier_param"]
C, rec_loss = geco_constraint(target, prediction, tol) # C has gradient
# Compute moving average of constraint C (without gradient)
if self.C_ma is None:
self.C_ma = C.detach()
else:
self.C_ma = alpha * self.C_ma + (1 - alpha) * C.detach()
C_curr = C.detach().item() # keep track for logging
C = C + (self.C_ma - C.detach()) # Move C without affecting its gradient
# Compute KL divergence
mu = hgn_output.z_mean
logvar = hgn_output.z_logvar
kld = kld_loss(mu=mu, logvar=logvar)
# normalize by number of frames, channels and pixels per frame
kld_normalizer = prediction.flatten(1).size(1)
kld = kld / kld_normalizer
# Compute losses
train_loss = kld + self.langrange_multiplier * C
# clamping the langrange multiplier to avoid inf values
self.langrange_multiplier = self.langrange_multiplier * torch.exp(
lagrange_mult_param * C.detach())
self.langrange_multiplier = torch.clamp(self.langrange_multiplier, 1e-10, 1e10)
losses = {
'loss/train': train_loss.item(),
'loss/kld': kld.item(),
'loss/C': C_curr,
'loss/C_ma': self.C_ma.item(),
'loss/rec': rec_loss.item(),
'other/langrange_mult': self.langrange_multiplier.item()
}
else: # not variational
# Compute frame reconstruction error
train_loss = reconstruction_loss(
target=target,
prediction=prediction)
losses = {'loss/train': train_loss.item()}
train_loss.backward()
self.optimizer.step()
return losses, hgn_output
def fit(self):
"""The trainer fits an HGN.
Returns:
(HGN) An HGN model that has been fitted to the data
"""
# Initial values for geco algorithm
if self.params["networks"]["variational"]:
self.langrange_multiplier = self.params["geco"]["initial_lagrange_multiplier"]
self.C_ma = None
# TRAIN
for ep in range(self.params["optimization"]["epochs"]):
print("Epoch %s / %s" % (str(ep + 1), str(self.params["optimization"]["epochs"])))
pbar = tqdm.tqdm(self.train_data_loader)
for batch_idx, rollout_batch in enumerate(pbar):
# Move to device and change dtype
rollout_batch = rollout_batch.to(self.device).type(self.dtype)
# Do an optimization step
losses, prediction = self.training_step(rollouts=rollout_batch)
# Log progress
self.training_logger.step(losses=losses,
rollout_batch=rollout_batch,
prediction=prediction,
model=self.hgn)
# Progress-bar msg
msg = ", ".join([
f"{k}: {v:.2e}" for k, v in losses.items() if v is not None
])
pbar.set_description(msg)
# Save model
self.hgn.save(self.model_save_file)
self.test()
return self.hgn
def compute_reconst_kld_errors(self, dataloader):
"""Computes reconstruction error and KL divergence.
Args:
dataloader (torch.utils.data.DataLoader): DataLoader to retrieve errors from.
Returns:
(reconst_error_mean, reconst_error_h), (kld_mean, kld_h): Tuples where the mean and 95%
conficence interval is shown.
"""
first = True
pbar = tqdm.tqdm(dataloader)
for _, rollout_batch in enumerate(pbar):
# Move to device and change dtype
rollout_batch = rollout_batch.to(self.device).type(self.dtype)
rollout_len = rollout_batch.shape[1]
input_frames = self.params['optimization']['input_frames']
assert(input_frames <= rollout_len) # optimization.use_steps must be smaller (or equal) to rollout.sequence_length
roll = rollout_batch[:, :input_frames]
hgn_output = self.hgn.forward(rollout_batch=roll, n_steps=rollout_len - input_frames)
target = rollout_batch[:, input_frames-1:] # Fit first input_frames and try to predict the last + the next (rollout_len - input_frames)
prediction = hgn_output.reconstructed_rollout
error = reconstruction_loss(
target=target,
prediction=prediction, mean_reduction=False).detach().cpu(
).numpy()
if self.params["networks"]["variational"]:
kld = kld_loss(mu=hgn_output.z_mean, logvar=hgn_output.z_logvar, mean_reduction=False).detach().cpu(
).numpy()
# normalize by number of frames, channels and pixels per frame
kld_normalizer = prediction.flatten(1).size(1)
kld = kld / kld_normalizer
if first:
first = False
set_errors = error
if self.params["networks"]["variational"]:
set_klds = kld
else:
set_errors = np.concatenate((set_errors, error))
if self.params["networks"]["variational"]:
set_klds = np.concatenate((set_klds, kld))
err_mean, err_h = mean_confidence_interval(set_errors)
if self.params["networks"]["variational"]:
kld_mean, kld_h = mean_confidence_interval(set_klds)
return (err_mean, err_h), (kld_mean, kld_h)
else:
return (err_mean, err_h), None
def test(self):
"""Test after the training is finished and logs result to tensorboard.
"""
print("Calculating final training error...")
(err_mean, err_h), kld = self.compute_reconst_kld_errors(self.train_data_loader)
self.training_logger.log_error("Train reconstruction error", err_mean, err_h)
if kld is not None:
kld_mean, kld_h = kld
self.training_logger.log_error("Train KL divergence", kld_mean, kld_h)
print("Calculating final test error...")
(err_mean, err_h), kld = self.compute_reconst_kld_errors(self.test_data_loader)
self.training_logger.log_error("Test reconstruction error", err_mean, err_h)
if kld is not None:
kld_mean, kld_h = kld
self.training_logger.log_error("Test KL divergence", kld_mean, kld_h)
def _overwrite_config_with_cmd_arguments(config, args):
if args.name is not None:
config['experiment_id'] = args.name[0]
if args.epochs is not None:
config['optimization']['epochs'] = args.epochs[0]
if args.dataset_path is not None:
# Read the parameters.yaml file in the given dataset path
dataset_config = _read_config(os.path.join(_args.dataset_path[0], 'parameters.yaml'))
for key, value in dataset_config.items():
config[key] = value
if args.env is not None:
if 'train_data' in config['dataset']:
raise ValueError(
f'--env was given but configuration is set for offline training: '
f'train_data={config["dataset"]["train_data"]}'
)
env_params = _read_config(DEFAULT_ENVIRONMENTS_PATH + args.env[0] + '.yaml')
config['environment'] = env_params['environment']
if args.params is not None:
for p in args.params:
key, value = p.split('=')
ptr = config
keys = key.split('.')
for i, k in enumerate(keys):
if i == len(keys) - 1:
ptr[k] = ast.literal_eval(value)
else:
ptr = ptr[k]
if args.load is not None:
config['load_path'] = args.load[0]
if args.reset is not None:
config['reset'] = args.reset
def _read_config(config_file):
with open(config_file, 'r') as f:
config = yaml.load(f, Loader=yaml.FullLoader)
return config
def _merge_configs(train_config, dataset_config):
config = copy.deepcopy(train_config)
for key, value in dataset_config.items():
config[key] = value
# If the config specifies a dataset path, we take the rollout from the configuration file
# in the given dataset
if 'dataset' in config and 'train_data' in config['dataset']:
dataset_config = _read_config( # Read parameters.yaml in root of given dataset
os.path.join(os.path.dirname(config['dataset']['train_data']), 'parameters.yaml'))
config['dataset']['rollout'] = dataset_config['dataset']['rollout']
return config
def _ask_confirmation(config):
printer = pprint.PrettyPrinter(indent=4)
print(f'The training will be run with the following configuration:')
printed_config = copy.deepcopy(_config)
printed_config.pop('networks')
printer.pprint(printed_config)
print('Proceed? (y/n):')
if input() != 'y':
print('Abort.')
exit()
if __name__ == "__main__":
DEFAULT_TRAIN_CONFIG_FILE = "experiment_params/train_config_default.yaml"
DEFAULT_DATASET_CONFIG_FILE = "experiment_params/dataset_online_default.yaml"
DEFAULT_ENVIRONMENTS_PATH = "experiment_params/default_environments/"
DEFAULT_SAVE_MODELS_DIR = "saved_models/"
parser = argparse.ArgumentParser()
parser.add_argument(
'--train-config', action='store', nargs=1, type=str, required=True,
help=f'Path to the training configuration yaml file.'
)
parser.add_argument(
'--dataset-config', action='store', nargs=1, type=str, required=False,
help=f'Path to the dataset configuration yaml file.'
)
parser.add_argument(
'--name', action='store', nargs=1, required=False,
help='If specified, this name will be used instead of experiment_id of the yaml file.'
)
parser.add_argument(
'--epochs', action='store', nargs=1, type=int, required=False,
help='The number of training epochs. If not specified, optimization.epochs of the '
'training configuration will be used.'
)
parser.add_argument(
'--env', action='store', nargs=1, type=str, required=False,
help='The environment to use (for online training only). Possible values are '
'\'pendulum\', \'spring\', \'two_bodies\', \'three_bodies\', corresponding to '
'environment configurations in experiment_params/default_environments/. If not '
'specified, the environment specified in the given --dataset-config will be used.'
)
parser.add_argument(
'--dataset-path', action='store', nargs=1, type=str, required=False,
help='Path to a stored dataset to use for training. For offline training only. In this '
'case no dataset configuration file will be loaded.'
)
parser.add_argument(
'--params', action='store', nargs='+', required=False,
help='Override one or more parameters in the config. The format of an argument is '
'param_name=param_value. Nested parameters are accessible by using a dot, '
'i.e. --param dataset.img_size=32. IMPORTANT: lists must be enclosed in double '
'quotes, i.e. --param environment.mass:"[0.5, 0.5]".'
)
parser.add_argument(
'-y', '-y', action='store_true', default=False, required=False,
help='Whether to skip asking for user confirmation before starting the training.'
)
parser.add_argument(
'--resume', action='store', required=False, nargs='?', default=None,
help='NOT IMPLEMENTED YET. Resume the training from a saved model. If a path is provided, '
'the training will be resumed from the given checkpoint. Otherwise, the last '
'checkpoint will be taken from saved_models/<experiment_id>.'
)
parser.add_argument(
'--load', action='store', type=str, required=False, nargs=1,
help='Path from which to load the HGN.'
)
parser.add_argument(
'--reset', action='store', nargs='+', required=False,
help='Use only in combimation with --load, tells the trainer to reinstantiate the given '
'networks. Values: \'encoder\', \'transformer\', \'decoder\', \'hamiltonian\'.'
)
_args = parser.parse_args()
# Read configurations
_train_config = _read_config(_args.train_config[0])
if _args.dataset_path is None: # Will use the dataset config file (or default if not given)
_dataset_config_file = DEFAULT_DATASET_CONFIG_FILE if _args.dataset_config is None else \
_args.dataset_config[0]
_dataset_config = _read_config(_dataset_config_file)
_config = _merge_configs(_train_config, _dataset_config)
else: # Will use the dataset given in the command line arguments
assert _args.dataset_config is None, 'Both --dataset-path and --dataset-config were given.'
_config = _train_config
# Overwrite configuration with command line arguments
_overwrite_config_with_cmd_arguments(_config, _args)
# Show configuration and ask user for confirmation
if not _args.y:
_ask_confirmation(_config)
# Train HGN network
trainer = HgnTrainer(_config)
hgn = trainer.fit()
|
# Copyright (C) 2019-2020, Therapixel SA.
# All rights reserved.
# This file is subject to the terms and conditions described in the
# LICENSE file distributed in this package.
"""Test the C-Find message are correctly sent
and can provide useful results.
"""
import os
from datetime import datetime, timedelta
from pathlib import Path
from typing import Generator
import pytest
from pynetdicom import debug_logger
from pytest import CaptureFixture
from pacsanini.config import PacsaniniConfig
from pacsanini.models import QueryLevel
from pacsanini.net import c_find
def validate_results(sys_capture, results, query_level, query_fields):
"""Validate that c-find results are ok."""
assert isinstance(results, Generator)
results_list = list(results)
out = sys_capture.readouterr()
assert query_level in out.err
for result in results_list:
for field in query_fields:
assert hasattr(result, field)
@pytest.mark.net
def test_find_single_date(capsys: CaptureFixture, pacsanini_orthanc_config: str):
"""Test that the find method functions correctly.
Check that the query level is correctly set to the patient
level by looking at the output logs.
"""
config = PacsaniniConfig.from_yaml(pacsanini_orthanc_config)
query_fields = ["SOPClassUID", "PatientBirthDate"]
debug_logger()
patient_level_results = c_find.patient_find(
config.net.local_node.dict(),
config.net.called_node.dict(),
dicom_fields=query_fields,
start_date=datetime(2016, 6, 22),
)
validate_results(
capsys,
patient_level_results,
"Patient Root Query/Retrieve Information Model - FIND",
query_fields,
)
study_level_results = c_find.study_find(
config.net.local_node,
config.net.called_node,
dicom_fields=query_fields,
start_date=datetime(2010, 12, 1),
)
validate_results(
capsys,
study_level_results,
"Study Root Query/Retrieve Information Model - FIND",
query_fields,
)
@pytest.mark.net
def test_find_multiple_dates(
capsys: CaptureFixture, pacsanini_orthanc_config: str, tmpdir: Path
):
"""Test that the find method functions correctly.
Check that the query level is correctly set to the patient
level by looking at the output logs.
"""
config = PacsaniniConfig.from_yaml(pacsanini_orthanc_config)
query_fields = ["Modality", "PatientBirthDate"]
debug_logger()
patient_out_path = os.path.join(tmpdir, "patient_results.csv")
c_find.patient_find2csv(
config.net.local_node,
config.net.called_node,
patient_out_path,
dicom_fields=query_fields,
start_date=datetime.now() - timedelta(days=23),
end_date=datetime.now(),
)
assert os.path.exists(patient_out_path)
out = capsys.readouterr()
assert "Patient Root Query/Retrieve Information Model - FIND" in out.err
study_out_path = os.path.join(tmpdir, "study_results.csv")
c_find.study_find2csv(
config.net.local_node,
config.net.called_node,
study_out_path,
dicom_fields=query_fields,
start_date=datetime.now() - timedelta(days=23),
end_date=datetime.now(),
)
assert os.path.exists(study_out_path)
out = capsys.readouterr()
assert "Study Root Query/Retrieve Information Model - FIND" in out.err
@pytest.mark.net
def test_find_invalid_parameters(pacsanini_orthanc_config: str):
"""Test that with invalid parameters, the find functionality
responds with expected errors.
"""
config = PacsaniniConfig.from_yaml(pacsanini_orthanc_config)
invalid_dates = c_find.find(
config.net.local_node,
config.net.called_node,
query_level=QueryLevel.PATIENT,
dicom_fields=["PatientID"],
start_date=datetime.now(),
end_date=datetime.now() - timedelta(days=1),
)
with pytest.raises(ValueError):
list(invalid_dates)
called_node = config.net.called_node.dict()
called_node.pop("ip", None)
invalid_dest_node = c_find.find(
config.net.local_node,
called_node,
query_level=QueryLevel.PATIENT,
dicom_fields=["PatientID"],
start_date=datetime.now(),
)
with pytest.raises(ValueError):
list(invalid_dest_node)
|
import yaml
import os
import shutil
import re
import toml
from typing import List, Set, Tuple, Pattern, Match
from urllib.parse import urlparse
from pathlib import Path
CHECKOUT_DIR = "checkouts"
GIT_CLONE_CMD = "git clone {{}} ./{}/{{}}/{{}}".format(CHECKOUT_DIR)
RE_EXTRACT_TITLE: Pattern[str] = re.compile("([#\s]*)(?P<title>.*)")
RE_EXTRACT_IMAGES: Pattern[str] = re.compile("\!\[(?P<alt>.*)\]\((?P<url>.*)\)")
RE_EXTRACT_LINKS: Pattern[str] = re.compile(
"\[(?P<alt>[^\]]*)\]\((?P<rel>[\.\/]*)(?P<url>(?P<domain>https?:\/\/[a-zA-Z\.0-9-]+)?(?!#)\S+)\)"
)
# holds the git URL and the new path for links between pulled in files
internal_links: dict = {}
config: dict = toml.load("config.toml")
def main():
os.system("rm -rf ./{}/".format(CHECKOUT_DIR))
yaml_external = _read_yaml("external.yaml")
repos_to_clone: Set[str] = set()
directories_to_create: List[str] = []
content: dict
for directory, content in yaml_external.items():
directories_to_create.append(directory)
repo = _get_repo_url_from_pull_url(content.get("source"))
repos_to_clone.add(repo)
_clone_repos(repos_to_clone)
# pull_directories(yaml_external)
generated_files = _pull_files(yaml_external)
_generate_gitignore(generated_files)
def _read_yaml(file_name: str) -> dict:
with open(file_name, "r", encoding="utf-8") as stream:
yaml_file = yaml.safe_load(stream)
return yaml_file
def _get_repo_url_from_pull_url(url: str) -> str:
parsed = urlparse(url)
repo_owner, repo_name = _get_canonical_repo_from_url(url)
return "https://{}/{}/{}".format(parsed.netloc, repo_owner, repo_name)
def _get_canonical_repo_from_url(url: str) -> Tuple[str, str]:
parsed = urlparse(url)
repo_owner, repo_name = parsed.path[1:].split("/")[:2]
return repo_owner, repo_name
def _get_file_content(filename: str, remove_heading=False) -> Tuple[str, str]:
with open(filename, "r") as f:
raw = f.readlines()
if not remove_heading:
return "".join(raw)
heading = None
for i in range(len(raw)):
if raw[i].startswith("#"):
heading = RE_EXTRACT_TITLE.match(raw[i]).group("title")
heading = '"' + heading.replace('"', '\\"') + '"'
continue
if not raw[i].startswith("#") and not raw[i].strip() == "":
return "".join(raw[i:]), heading
def _generate_yaml_front_matter(front_matter: dict = {}) -> List[str]:
fm = ["---"]
for key, value in front_matter.items():
if type(value) == dict:
fm.append(yaml.dump({key: value}).strip())
else:
fm.append("{}: {}".format(key, value))
fm.append("---")
fm = [l + "\n" for l in fm]
return fm
def _clone_repos(repos: List[str]):
for repo_url in repos:
repo_owner, repo_name = _get_canonical_repo_from_url(repo_url)
cmd = GIT_CLONE_CMD.format(repo_url, repo_owner, repo_name)
os.system(cmd)
# TODO: This is currently not being used
# def pull_directories(yaml_external: dict):
# content: dict
# for target_dir, content in yaml_external.items():
# pull_dir = content.get("pullDir", None)
# if not pull_dir:
# continue
# # abs_target_path = get_abs_dir_path(target_dir)
# repo_owner, repo_name = get_canonical_repo_from_url(content.get("source"))
# repo_checkout_base_path = os.path.join(CHECKOUT_DIR, repo_owner, repo_name)
# repo_checkout_pull_path = os.path.join(repo_checkout_base_path, pull_dir)
# for root, _, files in os.walk(repo_checkout_pull_path):
# for file in files:
# relative_path = os.path.join(
# root[len(repo_checkout_pull_path) + 1 :], file
# )
# copy_file(
# base_src_path=repo_checkout_base_path,
# pull_dir=pull_dir,
# rel_file_path=relative_path,
# target_dir=target_dir,
# transform_file=content.get("transform", {}).get(file, None),
# remove_heading=True,
# )
def _pull_files(yaml_external: dict) -> List[str]:
generated_files: List[str] = []
content: dict
# collects all the URLs and new file paths for the pulled in files.
# we need to do this as a prep step before processing each file
# so we can redirect internally
for target_dir, content in yaml_external.items():
for rel_file in content.get("pullFiles", []):
full_url = "{}/blob/master/{}".format(content.get("source"), rel_file)
file_name = os.path.basename(rel_file)
file_path = os.path.join(target_dir, file_name)
rel_path_to_target_file = ("".join(file_path.split(".")[:-1])).lower()
internal_links[full_url] = "/docs/latest/{}/".format(
rel_path_to_target_file
)
for target_dir, content in yaml_external.items():
pull_files: List[str] = content.get("pullFiles", [])
repo_owner, repo_name = _get_canonical_repo_from_url(content.get("source"))
# processes and copies content from the git checkout to the desired location
for rel_file in pull_files:
filename = os.path.basename(rel_file)
abs_path_to_source_file = os.path.abspath(
os.path.join(CHECKOUT_DIR, repo_owner, repo_name)
)
abs_path_to_target_file = _copy_file(
abs_path_to_repo_checkout_dir=abs_path_to_source_file,
rel_path_to_source_file=rel_file,
target_dir=target_dir,
transform_file=content.get("transform", {}).get(filename, None),
remove_heading=True,
repo_owner=repo_owner,
repo_name=repo_name,
)
generated_files.append(abs_path_to_target_file)
return generated_files
def _generate_gitignore(paths_to_include: List[str]):
with open("content/.gitignore", "w") as gitignore:
gitignore.write("# THIS FILE IS AUTO-GENERATED. DO NOT MODIFY BY HAND\n\n")
for f in paths_to_include:
gitignore.write(os.path.relpath(f, "content") + "\n")
def _copy_file(
abs_path_to_repo_checkout_dir: str,
rel_path_to_source_file: str,
target_dir: str,
transform_file: dict = {},
remove_heading: bool = True,
repo_owner: str = "",
repo_name: str = "",
) -> str:
file_name = os.path.basename(rel_path_to_source_file)
rel_path_to_target_file = os.path.join(target_dir, file_name)
abs_path_to_target_file = os.path.abspath(
os.path.join("content/docs/latest", rel_path_to_target_file)
)
path_to_target_file = Path(os.path.dirname(abs_path_to_target_file))
path_to_target_file.mkdir(parents=True, exist_ok=True)
# we just copy files that aren't markdown
abs_path_to_source_file = os.path.join(
abs_path_to_repo_checkout_dir, rel_path_to_source_file
)
if os.path.splitext(abs_path_to_target_file)[1] != ".md":
shutil.copyfile(abs_path_to_source_file, abs_path_to_target_file)
return
# copy file content
with open(abs_path_to_target_file, "w") as target_file:
content, heading = _get_file_content(abs_path_to_source_file, remove_heading)
front_matter = None
if heading:
front_matter = {"title": heading}
if transform_file:
front_matter = {**front_matter, **transform_file.get("frontMatter", {})}
if front_matter:
target_file.writelines(_generate_yaml_front_matter(front_matter))
final_content = _process_content(
content=content,
abs_path_to_source_dir=abs_path_to_repo_checkout_dir,
rel_path_to_source_file=rel_path_to_source_file,
repo_owner=repo_owner,
repo_name=repo_name,
)
target_file.write(final_content)
return abs_path_to_target_file
def _process_content(
content: str,
abs_path_to_source_dir: str,
rel_path_to_source_file: str,
repo_owner: str,
repo_name: str,
):
def repl_images(m: Match[str]):
url = m.group("url")
alt = m.group("alt")
new_url = _copy_asset(
url_path=url,
abs_path_to_source_dir=abs_path_to_source_dir,
rel_path_to_source_file=rel_path_to_source_file,
repo_owner=repo_owner,
repo_name=repo_name,
)
figure = '{{{{< figure src="{}" caption="{}" width="100" >}}}}'.format(
new_url, alt
)
return figure
def repl_links(m: Match[str]):
alt = m.group("alt")
rel = m.group("rel")
url = m.group("url")
domain = m.group("domain")
rel_url = url
# this is an external url
if domain is not None:
# that points to this site
if domain == config.get("publicUrl"):
new_url = url[len(config.get("publicUrl")) :]
else:
new_url = url
# this is an internal relative url
else:
if rel == "/":
rel_url = url
elif rel == "./" or rel == "":
rel_url = os.path.join(os.path.dirname(rel_path_to_source_file), url)
new_url = "https://github.com/{}/{}/blob/master/{}".format(
repo_owner, repo_name, rel_url
)
# if this file has been already pulled in, we can use the new internal URL
# instead of pointing to the original github location
if new_url in internal_links:
new_url = internal_links[new_url]
new_link = "[{}]({})".format(alt, new_url)
return new_link
parsed_content = RE_EXTRACT_IMAGES.sub(repl_images, content)
parsed_content = RE_EXTRACT_LINKS.sub(repl_links, parsed_content)
return parsed_content
def _copy_asset(
url_path: str,
abs_path_to_source_dir: str,
rel_path_to_source_file: str,
repo_owner: str,
repo_name: str,
) -> str:
if url_path.startswith(os.path.sep):
rel_path_to_asset = url_path[1:]
else:
rel_path_to_source_dir = os.path.dirname(rel_path_to_source_file)
rel_path_to_asset = os.path.join(rel_path_to_source_dir, url_path)
path_to_source_asset = os.path.join(abs_path_to_source_dir, rel_path_to_asset)
path_to_target_asset = Path(
os.path.join("./static/img/checkouts", repo_owner, repo_name, rel_path_to_asset)
)
path_to_target_asset.parent.mkdir(parents=True, exist_ok=True)
shutil.copyfile(path_to_source_asset, path_to_target_asset.absolute())
rel_target_url_path = os.path.join(
"/img/checkouts", repo_owner, repo_name, rel_path_to_asset
)
return rel_target_url_path
if __name__ == "__main__":
main()
|
from __future__ import unicode_literals
import io
import itertools
import os
import subprocess
import time
import re
import json
from .common import AudioConversionError, PostProcessor
from ..compat import compat_str
from ..utils import (
dfxp2srt,
encodeArgument,
encodeFilename,
float_or_none,
_get_exe_version_output,
detect_exe_version,
is_outdated_version,
ISO639Utils,
orderedSet,
Popen,
PostProcessingError,
prepend_extension,
replace_extension,
shell_quote,
traverse_obj,
variadic,
write_json_file,
)
EXT_TO_OUT_FORMATS = {
'aac': 'adts',
'flac': 'flac',
'm4a': 'ipod',
'mka': 'matroska',
'mkv': 'matroska',
'mpg': 'mpeg',
'ogv': 'ogg',
'ts': 'mpegts',
'wma': 'asf',
'wmv': 'asf',
'vtt': 'webvtt',
}
ACODECS = {
'mp3': 'libmp3lame',
'aac': 'aac',
'flac': 'flac',
'm4a': 'aac',
'opus': 'libopus',
'vorbis': 'libvorbis',
'wav': None,
'alac': None,
}
class FFmpegPostProcessorError(PostProcessingError):
pass
class FFmpegPostProcessor(PostProcessor):
def __init__(self, downloader=None):
PostProcessor.__init__(self, downloader)
self._determine_executables()
def check_version(self):
if not self.available:
raise FFmpegPostProcessorError('ffmpeg not found. Please install or provide the path using --ffmpeg-location')
required_version = '10-0' if self.basename == 'avconv' else '1.0'
if is_outdated_version(
self._versions[self.basename], required_version):
warning = 'Your copy of %s is outdated, update %s to version %s or newer if you encounter any errors.' % (
self.basename, self.basename, required_version)
self.report_warning(warning)
@staticmethod
def get_versions_and_features(downloader=None):
pp = FFmpegPostProcessor(downloader)
return pp._versions, pp._features
@staticmethod
def get_versions(downloader=None):
return FFmpegPostProcessor.get_version_and_features(downloader)[0]
def _determine_executables(self):
programs = ['avprobe', 'avconv', 'ffmpeg', 'ffprobe']
def get_ffmpeg_version(path, prog):
out = _get_exe_version_output(path, ['-bsfs'])
ver = detect_exe_version(out) if out else False
if ver:
regexs = [
r'(?:\d+:)?([0-9.]+)-[0-9]+ubuntu[0-9.]+$', # Ubuntu, see [1]
r'n([0-9.]+)$', # Arch Linux
# 1. http://www.ducea.com/2006/06/17/ubuntu-package-version-naming-explanation/
]
for regex in regexs:
mobj = re.match(regex, ver)
if mobj:
ver = mobj.group(1)
self._versions[prog] = ver
if prog != 'ffmpeg' or not out:
return
mobj = re.search(r'(?m)^\s+libavformat\s+(?:[0-9. ]+)\s+/\s+(?P<runtime>[0-9. ]+)', out)
lavf_runtime_version = mobj.group('runtime').replace(' ', '') if mobj else None
self._features = {
'fdk': '--enable-libfdk-aac' in out,
'setts': 'setts' in out.splitlines(),
'needs_adtstoasc': is_outdated_version(lavf_runtime_version, '57.56.100', False),
}
self.basename = None
self.probe_basename = None
self._paths = None
self._versions = None
self._features = {}
prefer_ffmpeg = self.get_param('prefer_ffmpeg', True)
location = self.get_param('ffmpeg_location')
if location is None:
self._paths = {p: p for p in programs}
else:
if not os.path.exists(location):
self.report_warning(
'ffmpeg-location %s does not exist! '
'Continuing without ffmpeg.' % (location))
self._versions = {}
return
elif os.path.isdir(location):
dirname, basename = location, None
else:
basename = os.path.splitext(os.path.basename(location))[0]
basename = next((p for p in programs if basename.startswith(p)), 'ffmpeg')
dirname = os.path.dirname(os.path.abspath(location))
if basename in ('ffmpeg', 'ffprobe'):
prefer_ffmpeg = True
self._paths = dict(
(p, os.path.join(dirname, p)) for p in programs)
if basename:
self._paths[basename] = location
self._versions = {}
for p in programs:
get_ffmpeg_version(self._paths[p], p)
if prefer_ffmpeg is False:
prefs = ('avconv', 'ffmpeg')
else:
prefs = ('ffmpeg', 'avconv')
for p in prefs:
if self._versions[p]:
self.basename = p
break
if prefer_ffmpeg is False:
prefs = ('avprobe', 'ffprobe')
else:
prefs = ('ffprobe', 'avprobe')
for p in prefs:
if self._versions[p]:
self.probe_basename = p
break
@property
def available(self):
return self.basename is not None
@property
def executable(self):
return self._paths[self.basename]
@property
def probe_available(self):
return self.probe_basename is not None
@property
def probe_executable(self):
return self._paths[self.probe_basename]
def get_audio_codec(self, path):
if not self.probe_available and not self.available:
raise PostProcessingError('ffprobe and ffmpeg not found. Please install or provide the path using --ffmpeg-location')
try:
if self.probe_available:
cmd = [
encodeFilename(self.probe_executable, True),
encodeArgument('-show_streams')]
else:
cmd = [
encodeFilename(self.executable, True),
encodeArgument('-i')]
cmd.append(encodeFilename(self._ffmpeg_filename_argument(path), True))
self.write_debug('%s command line: %s' % (self.basename, shell_quote(cmd)))
handle = Popen(cmd, stdin=subprocess.PIPE, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
stdout_data, stderr_data = handle.communicate_or_kill()
expected_ret = 0 if self.probe_available else 1
if handle.wait() != expected_ret:
return None
except (IOError, OSError):
return None
output = (stdout_data if self.probe_available else stderr_data).decode('ascii', 'ignore')
if self.probe_available:
audio_codec = None
for line in output.split('\n'):
if line.startswith('codec_name='):
audio_codec = line.split('=')[1].strip()
elif line.strip() == 'codec_type=audio' and audio_codec is not None:
return audio_codec
else:
# Stream #FILE_INDEX:STREAM_INDEX[STREAM_ID](LANGUAGE): CODEC_TYPE: CODEC_NAME
mobj = re.search(
r'Stream\s*#\d+:\d+(?:\[0x[0-9a-f]+\])?(?:\([a-z]{3}\))?:\s*Audio:\s*([0-9a-z]+)',
output)
if mobj:
return mobj.group(1)
return None
def get_metadata_object(self, path, opts=[]):
if self.probe_basename != 'ffprobe':
if self.probe_available:
self.report_warning('Only ffprobe is supported for metadata extraction')
raise PostProcessingError('ffprobe not found. Please install or provide the path using --ffmpeg-location')
self.check_version()
cmd = [
encodeFilename(self.probe_executable, True),
encodeArgument('-hide_banner'),
encodeArgument('-show_format'),
encodeArgument('-show_streams'),
encodeArgument('-print_format'),
encodeArgument('json'),
]
cmd += opts
cmd.append(encodeFilename(self._ffmpeg_filename_argument(path), True))
self.write_debug('ffprobe command line: %s' % shell_quote(cmd))
p = Popen(cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE, stdin=subprocess.PIPE)
stdout, stderr = p.communicate()
return json.loads(stdout.decode('utf-8', 'replace'))
def get_stream_number(self, path, keys, value):
streams = self.get_metadata_object(path)['streams']
num = next(
(i for i, stream in enumerate(streams) if traverse_obj(stream, keys, casesense=False) == value),
None)
return num, len(streams)
def _get_real_video_duration(self, filepath, fatal=True):
try:
duration = float_or_none(
traverse_obj(self.get_metadata_object(filepath), ('format', 'duration')))
if not duration:
raise PostProcessingError('ffprobe returned empty duration')
return duration
except PostProcessingError as e:
if fatal:
raise PostProcessingError(f'Unable to determine video duration: {e.msg}')
def _duration_mismatch(self, d1, d2):
if not d1 or not d2:
return None
# The duration is often only known to nearest second. So there can be <1sec disparity natually.
# Further excuse an additional <1sec difference.
return abs(d1 - d2) > 2
def run_ffmpeg_multiple_files(self, input_paths, out_path, opts, **kwargs):
return self.real_run_ffmpeg(
[(path, []) for path in input_paths],
[(out_path, opts)], **kwargs)
def real_run_ffmpeg(self, input_path_opts, output_path_opts, *, expected_retcodes=(0,)):
self.check_version()
oldest_mtime = min(
os.stat(encodeFilename(path)).st_mtime for path, _ in input_path_opts if path)
cmd = [encodeFilename(self.executable, True), encodeArgument('-y')]
# avconv does not have repeat option
if self.basename == 'ffmpeg':
cmd += [encodeArgument('-loglevel'), encodeArgument('repeat+info')]
def make_args(file, args, name, number):
keys = ['_%s%d' % (name, number), '_%s' % name]
if name == 'o' and number == 1:
keys.append('')
args += self._configuration_args(self.basename, keys)
if name == 'i':
args.append('-i')
return (
[encodeArgument(arg) for arg in args]
+ [encodeFilename(self._ffmpeg_filename_argument(file), True)])
for arg_type, path_opts in (('i', input_path_opts), ('o', output_path_opts)):
cmd += itertools.chain.from_iterable(
make_args(path, list(opts), arg_type, i + 1)
for i, (path, opts) in enumerate(path_opts) if path)
self.write_debug('ffmpeg command line: %s' % shell_quote(cmd))
p = Popen(cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE, stdin=subprocess.PIPE)
stdout, stderr = p.communicate_or_kill()
if p.returncode not in variadic(expected_retcodes):
stderr = stderr.decode('utf-8', 'replace').strip()
self.write_debug(stderr)
raise FFmpegPostProcessorError(stderr.split('\n')[-1])
for out_path, _ in output_path_opts:
if out_path:
self.try_utime(out_path, oldest_mtime, oldest_mtime)
return stderr.decode('utf-8', 'replace')
def run_ffmpeg(self, path, out_path, opts, **kwargs):
return self.run_ffmpeg_multiple_files([path], out_path, opts, **kwargs)
@staticmethod
def _ffmpeg_filename_argument(fn):
# Always use 'file:' because the filename may contain ':' (ffmpeg
# interprets that as a protocol) or can start with '-' (-- is broken in
# ffmpeg, see https://ffmpeg.org/trac/ffmpeg/ticket/2127 for details)
# Also leave '-' intact in order not to break streaming to stdout.
if fn.startswith(('http://', 'https://')):
return fn
return 'file:' + fn if fn != '-' else fn
@staticmethod
def _quote_for_ffmpeg(string):
# See https://ffmpeg.org/ffmpeg-utils.html#toc-Quoting-and-escaping
# A sequence of '' produces '\'''\'';
# final replace removes the empty '' between \' \'.
string = string.replace("'", r"'\''").replace("'''", "'")
# Handle potential ' at string boundaries.
string = string[1:] if string[0] == "'" else "'" + string
return string[:-1] if string[-1] == "'" else string + "'"
def force_keyframes(self, filename, timestamps):
timestamps = orderedSet(timestamps)
if timestamps[0] == 0:
timestamps = timestamps[1:]
keyframe_file = prepend_extension(filename, 'keyframes.temp')
self.to_screen(f'Re-encoding "{filename}" with appropriate keyframes')
self.run_ffmpeg(filename, keyframe_file, ['-force_key_frames', ','.join(
f'{t:.6f}' for t in timestamps)])
return keyframe_file
def concat_files(self, in_files, out_file, concat_opts=None):
"""
Use concat demuxer to concatenate multiple files having identical streams.
Only inpoint, outpoint, and duration concat options are supported.
See https://ffmpeg.org/ffmpeg-formats.html#concat-1 for details
"""
concat_file = f'{out_file}.concat'
self.write_debug(f'Writing concat spec to {concat_file}')
with open(concat_file, 'wt', encoding='utf-8') as f:
f.writelines(self._concat_spec(in_files, concat_opts))
out_flags = ['-c', 'copy']
if out_file.rpartition('.')[-1] in ('mp4', 'mov'):
# For some reason, '-c copy' is not enough to copy subtitles
out_flags.extend(['-c:s', 'mov_text', '-movflags', '+faststart'])
try:
self.real_run_ffmpeg(
[(concat_file, ['-hide_banner', '-nostdin', '-f', 'concat', '-safe', '0'])],
[(out_file, out_flags)])
finally:
os.remove(concat_file)
@classmethod
def _concat_spec(cls, in_files, concat_opts=None):
if concat_opts is None:
concat_opts = [{}] * len(in_files)
yield 'ffconcat version 1.0\n'
for file, opts in zip(in_files, concat_opts):
yield f'file {cls._quote_for_ffmpeg(cls._ffmpeg_filename_argument(file))}\n'
# Iterate explicitly to yield the following directives in order, ignoring the rest.
for directive in 'inpoint', 'outpoint', 'duration':
if directive in opts:
yield f'{directive} {opts[directive]}\n'
class FFmpegExtractAudioPP(FFmpegPostProcessor):
COMMON_AUDIO_EXTS = ('wav', 'flac', 'm4a', 'aiff', 'mp3', 'ogg', 'mka', 'opus', 'wma')
SUPPORTED_EXTS = ('best', 'aac', 'flac', 'mp3', 'm4a', 'opus', 'vorbis', 'wav', 'alac')
def __init__(self, downloader=None, preferredcodec=None, preferredquality=None, nopostoverwrites=False):
FFmpegPostProcessor.__init__(self, downloader)
self._preferredcodec = preferredcodec or 'best'
self._preferredquality = float_or_none(preferredquality)
self._nopostoverwrites = nopostoverwrites
def _quality_args(self, codec):
if self._preferredquality is None:
return []
elif self._preferredquality > 10:
return ['-b:a', f'{self._preferredquality}k']
limits = {
'libmp3lame': (10, 0),
'libvorbis': (0, 10),
# FFmpeg's AAC encoder does not have an upper limit for the value of -q:a.
# Experimentally, with values over 4, bitrate changes were minimal or non-existent
'aac': (0.1, 4),
'libfdk_aac': (1, 5),
}.get(codec)
if not limits:
return []
q = limits[1] + (limits[0] - limits[1]) * (self._preferredquality / 10)
if codec == 'libfdk_aac':
return ['-vbr', f'{int(q)}']
return ['-q:a', f'{q}']
def run_ffmpeg(self, path, out_path, codec, more_opts):
if codec is None:
acodec_opts = []
else:
acodec_opts = ['-acodec', codec]
opts = ['-vn'] + acodec_opts + more_opts
try:
FFmpegPostProcessor.run_ffmpeg(self, path, out_path, opts)
except FFmpegPostProcessorError as err:
raise AudioConversionError(err.msg)
@PostProcessor._restrict_to(images=False)
def run(self, information):
orig_path = path = information['filepath']
orig_ext = information['ext']
if self._preferredcodec == 'best' and orig_ext in self.COMMON_AUDIO_EXTS:
self.to_screen('Skipping audio extraction since the file is already in a common audio format')
return [], information
filecodec = self.get_audio_codec(path)
if filecodec is None:
raise PostProcessingError('WARNING: unable to obtain file audio codec with ffprobe')
more_opts = []
if self._preferredcodec == 'best' or self._preferredcodec == filecodec or (self._preferredcodec == 'm4a' and filecodec == 'aac'):
if filecodec == 'aac' and self._preferredcodec in ['m4a', 'best']:
# Lossless, but in another container
acodec = 'copy'
extension = 'm4a'
more_opts = ['-bsf:a', 'aac_adtstoasc']
elif filecodec in ['aac', 'flac', 'mp3', 'vorbis', 'opus']:
# Lossless if possible
acodec = 'copy'
extension = filecodec
if filecodec == 'aac':
more_opts = ['-f', 'adts']
if filecodec == 'vorbis':
extension = 'ogg'
elif filecodec == 'alac':
acodec = None
extension = 'm4a'
more_opts += ['-acodec', 'alac']
else:
# MP3 otherwise.
acodec = 'libmp3lame'
extension = 'mp3'
more_opts = self._quality_args(acodec)
else:
# We convert the audio (lossy if codec is lossy)
acodec = ACODECS[self._preferredcodec]
if acodec == 'aac' and self._features.get('fdk'):
acodec = 'libfdk_aac'
extension = self._preferredcodec
more_opts = self._quality_args(acodec)
if self._preferredcodec == 'aac':
more_opts += ['-f', 'adts']
elif self._preferredcodec == 'm4a':
more_opts += ['-bsf:a', 'aac_adtstoasc']
elif self._preferredcodec == 'vorbis':
extension = 'ogg'
elif self._preferredcodec == 'wav':
extension = 'wav'
more_opts += ['-f', 'wav']
elif self._preferredcodec == 'alac':
extension = 'm4a'
more_opts += ['-acodec', 'alac']
prefix, sep, ext = path.rpartition('.') # not os.path.splitext, since the latter does not work on unicode in all setups
temp_path = new_path = prefix + sep + extension
if new_path == path:
orig_path = prepend_extension(path, 'orig')
temp_path = prepend_extension(path, 'temp')
if (self._nopostoverwrites and os.path.exists(encodeFilename(new_path))
and os.path.exists(encodeFilename(orig_path))):
self.to_screen('Post-process file %s exists, skipping' % new_path)
return [], information
try:
self.to_screen(f'Destination: {new_path}')
self.run_ffmpeg(path, temp_path, acodec, more_opts)
except AudioConversionError as e:
raise PostProcessingError(
'audio conversion failed: ' + e.msg)
except Exception:
raise PostProcessingError('error running ' + self.basename)
os.replace(path, orig_path)
os.replace(temp_path, new_path)
information['filepath'] = new_path
information['ext'] = extension
# Try to update the date time for extracted audio file.
if information.get('filetime') is not None:
self.try_utime(
new_path, time.time(), information['filetime'],
errnote='Cannot update utime of audio file')
return [orig_path], information
class FFmpegVideoConvertorPP(FFmpegPostProcessor):
SUPPORTED_EXTS = ('mp4', 'mkv', 'flv', 'webm', 'mov', 'avi', 'mp3', 'mka', 'm4a', 'ogg', 'opus')
FORMAT_RE = re.compile(r'{0}(?:/{0})*$'.format(r'(?:\w+>)?(?:%s)' % '|'.join(SUPPORTED_EXTS)))
_ACTION = 'converting'
def __init__(self, downloader=None, preferedformat=None):
super(FFmpegVideoConvertorPP, self).__init__(downloader)
self._preferedformats = preferedformat.lower().split('/')
def _target_ext(self, source_ext):
for pair in self._preferedformats:
kv = pair.split('>')
if len(kv) == 1 or kv[0].strip() == source_ext:
return kv[-1].strip()
@staticmethod
def _options(target_ext):
if target_ext == 'avi':
return ['-c:v', 'libxvid', '-vtag', 'XVID']
return []
@PostProcessor._restrict_to(images=False)
def run(self, info):
filename, source_ext = info['filepath'], info['ext'].lower()
target_ext = self._target_ext(source_ext)
_skip_msg = (
f'could not find a mapping for {source_ext}' if not target_ext
else f'already is in target format {source_ext}' if source_ext == target_ext
else None)
if _skip_msg:
self.to_screen(f'Not {self._ACTION} media file {filename!r}; {_skip_msg}')
return [], info
outpath = replace_extension(filename, target_ext, source_ext)
self.to_screen(f'{self._ACTION.title()} video from {source_ext} to {target_ext}; Destination: {outpath}')
self.run_ffmpeg(filename, outpath, self._options(target_ext))
info['filepath'] = outpath
info['format'] = info['ext'] = target_ext
return [filename], info
class FFmpegVideoRemuxerPP(FFmpegVideoConvertorPP):
_ACTION = 'remuxing'
@staticmethod
def _options(target_ext):
options = ['-c', 'copy', '-map', '0', '-dn']
if target_ext in ['mp4', 'm4a', 'mov']:
options.extend(['-movflags', '+faststart'])
return options
class FFmpegEmbedSubtitlePP(FFmpegPostProcessor):
def __init__(self, downloader=None, already_have_subtitle=False):
super(FFmpegEmbedSubtitlePP, self).__init__(downloader)
self._already_have_subtitle = already_have_subtitle
@PostProcessor._restrict_to(images=False)
def run(self, info):
if info['ext'] not in ('mp4', 'webm', 'mkv'):
self.to_screen('Subtitles can only be embedded in mp4, webm or mkv files')
return [], info
subtitles = info.get('requested_subtitles')
if not subtitles:
self.to_screen('There aren\'t any subtitles to embed')
return [], info
filename = info['filepath']
if info.get('duration') and not info.get('__real_download') and self._duration_mismatch(
self._get_real_video_duration(filename, False), info['duration']):
self.to_screen(f'Skipping {self.pp_key()} since the real and expected durations mismatch')
return [], info
ext = info['ext']
sub_langs, sub_names, sub_filenames = [], [], []
webm_vtt_warn = False
mp4_ass_warn = False
for lang, sub_info in subtitles.items():
if not os.path.exists(sub_info.get('filepath', '')):
self.report_warning(f'Skipping embedding {lang} subtitle because the file is missing')
continue
sub_ext = sub_info['ext']
if sub_ext == 'json':
self.report_warning('JSON subtitles cannot be embedded')
elif ext != 'webm' or ext == 'webm' and sub_ext == 'vtt':
sub_langs.append(lang)
sub_names.append(sub_info.get('name'))
sub_filenames.append(sub_info['filepath'])
else:
if not webm_vtt_warn and ext == 'webm' and sub_ext != 'vtt':
webm_vtt_warn = True
self.report_warning('Only WebVTT subtitles can be embedded in webm files')
if not mp4_ass_warn and ext == 'mp4' and sub_ext == 'ass':
mp4_ass_warn = True
self.report_warning('ASS subtitles cannot be properly embedded in mp4 files; expect issues')
if not sub_langs:
return [], info
input_files = [filename] + sub_filenames
opts = [
'-c', 'copy', '-map', '0', '-dn',
# Don't copy the existing subtitles, we may be running the
# postprocessor a second time
'-map', '-0:s',
# Don't copy Apple TV chapters track, bin_data (see #19042, #19024,
# https://trac.ffmpeg.org/ticket/6016)
'-map', '-0:d',
]
if info['ext'] == 'mp4':
opts += ['-c:s', 'mov_text']
for i, (lang, name) in enumerate(zip(sub_langs, sub_names)):
opts.extend(['-map', '%d:0' % (i + 1)])
lang_code = ISO639Utils.short2long(lang) or lang
opts.extend(['-metadata:s:s:%d' % i, 'language=%s' % lang_code])
if name:
opts.extend(['-metadata:s:s:%d' % i, 'handler_name=%s' % name,
'-metadata:s:s:%d' % i, 'title=%s' % name])
temp_filename = prepend_extension(filename, 'temp')
self.to_screen('Embedding subtitles in "%s"' % filename)
self.run_ffmpeg_multiple_files(input_files, temp_filename, opts)
os.replace(temp_filename, filename)
files_to_delete = [] if self._already_have_subtitle else sub_filenames
return files_to_delete, info
class FFmpegMetadataPP(FFmpegPostProcessor):
def __init__(self, downloader, add_metadata=True, add_chapters=True, add_infojson='if_exists'):
FFmpegPostProcessor.__init__(self, downloader)
self._add_metadata = add_metadata
self._add_chapters = add_chapters
self._add_infojson = add_infojson
@staticmethod
def _options(target_ext):
yield from ('-map', '0', '-dn')
if target_ext == 'm4a':
yield from ('-vn', '-acodec', 'copy')
else:
yield from ('-c', 'copy')
@PostProcessor._restrict_to(images=False)
def run(self, info):
filename, metadata_filename = info['filepath'], None
files_to_delete, options = [], []
if self._add_chapters and info.get('chapters'):
metadata_filename = replace_extension(filename, 'meta')
options.extend(self._get_chapter_opts(info['chapters'], metadata_filename))
files_to_delete.append(metadata_filename)
if self._add_metadata:
options.extend(self._get_metadata_opts(info))
if self._add_infojson:
if info['ext'] in ('mkv', 'mka'):
infojson_filename = info.get('infojson_filename')
options.extend(self._get_infojson_opts(info, infojson_filename))
if not infojson_filename:
files_to_delete.append(info.get('infojson_filename'))
elif self._add_infojson is True:
self.to_screen('The info-json can only be attached to mkv/mka files')
if not options:
self.to_screen('There isn\'t any metadata to add')
return [], info
temp_filename = prepend_extension(filename, 'temp')
self.to_screen('Adding metadata to "%s"' % filename)
self.run_ffmpeg_multiple_files(
(filename, metadata_filename), temp_filename,
itertools.chain(self._options(info['ext']), *options))
for file in filter(None, files_to_delete):
os.remove(file) # Don't obey --keep-files
os.replace(temp_filename, filename)
return [], info
@staticmethod
def _get_chapter_opts(chapters, metadata_filename):
with io.open(metadata_filename, 'wt', encoding='utf-8') as f:
def ffmpeg_escape(text):
return re.sub(r'([\\=;#\n])', r'\\\1', text)
metadata_file_content = ';FFMETADATA1\n'
for chapter in chapters:
metadata_file_content += '[CHAPTER]\nTIMEBASE=1/1000\n'
metadata_file_content += 'START=%d\n' % (chapter['start_time'] * 1000)
metadata_file_content += 'END=%d\n' % (chapter['end_time'] * 1000)
chapter_title = chapter.get('title')
if chapter_title:
metadata_file_content += 'title=%s\n' % ffmpeg_escape(chapter_title)
f.write(metadata_file_content)
yield ('-map_metadata', '1')
def _get_metadata_opts(self, info):
metadata = {}
meta_prefix = 'meta_'
def add(meta_list, info_list=None):
value = next((
str(info[key]) for key in [meta_prefix] + list(variadic(info_list or meta_list))
if info.get(key) is not None), None)
if value not in ('', None):
metadata.update({meta_f: value for meta_f in variadic(meta_list)})
# See [1-4] for some info on media metadata/metadata supported
# by ffmpeg.
# 1. https://kdenlive.org/en/project/adding-meta-data-to-mp4-video/
# 2. https://wiki.multimedia.cx/index.php/FFmpeg_Metadata
# 3. https://kodi.wiki/view/Video_file_tagging
add('title', ('track', 'title'))
add('date', 'upload_date')
add(('description', 'synopsis'), 'description')
add(('purl', 'comment'), 'webpage_url')
add('track', 'track_number')
add('artist', ('artist', 'creator', 'uploader', 'uploader_id'))
add('genre')
add('album')
add('album_artist')
add('disc', 'disc_number')
add('show', 'series')
add('season_number')
add('episode_id', ('episode', 'episode_id'))
add('episode_sort', 'episode_number')
if 'embed-metadata' in self.get_param('compat_opts', []):
add('comment', 'description')
metadata.pop('synopsis', None)
for key, value in info.items():
if value is not None and key != meta_prefix and key.startswith(meta_prefix):
metadata[key[len(meta_prefix):]] = value
for name, value in metadata.items():
yield ('-metadata', f'{name}={value}')
stream_idx = 0
for fmt in info.get('requested_formats') or []:
stream_count = 2 if 'none' not in (fmt.get('vcodec'), fmt.get('acodec')) else 1
if fmt.get('language'):
lang = ISO639Utils.short2long(fmt['language']) or fmt['language']
for i in range(stream_count):
yield ('-metadata:s:%d' % (stream_idx + i), 'language=%s' % lang)
stream_idx += stream_count
def _get_infojson_opts(self, info, infofn):
if not infofn or not os.path.exists(infofn):
if self._add_infojson is not True:
return
infofn = infofn or '%s.temp' % (
self._downloader.prepare_filename(info, 'infojson')
or replace_extension(self._downloader.prepare_filename(info), 'info.json', info['ext']))
if not self._downloader._ensure_dir_exists(infofn):
return
self.write_debug(f'Writing info-json to: {infofn}')
write_json_file(self._downloader.sanitize_info(info, self.get_param('clean_infojson', True)), infofn)
info['infojson_filename'] = infofn
old_stream, new_stream = self.get_stream_number(info['filepath'], ('tags', 'mimetype'), 'application/json')
if old_stream is not None:
yield ('-map', '-0:%d' % old_stream)
new_stream -= 1
yield ('-attach', infofn,
'-metadata:s:%d' % new_stream, 'mimetype=application/json')
class FFmpegMergerPP(FFmpegPostProcessor):
@PostProcessor._restrict_to(images=False)
def run(self, info):
filename = info['filepath']
temp_filename = prepend_extension(filename, 'temp')
args = ['-c', 'copy']
audio_streams = 0
for (i, fmt) in enumerate(info['requested_formats']):
if fmt.get('acodec') != 'none':
args.extend(['-map', f'{i}:a:0'])
aac_fixup = fmt['protocol'].startswith('m3u8') and self.get_audio_codec(fmt['filepath']) == 'aac'
if aac_fixup:
args.extend([f'-bsf:a:{audio_streams}', 'aac_adtstoasc'])
audio_streams += 1
if fmt.get('vcodec') != 'none':
args.extend(['-map', '%u:v:0' % (i)])
self.to_screen('Merging formats into "%s"' % filename)
self.run_ffmpeg_multiple_files(info['__files_to_merge'], temp_filename, args)
os.rename(encodeFilename(temp_filename), encodeFilename(filename))
return info['__files_to_merge'], info
def can_merge(self):
# TODO: figure out merge-capable ffmpeg version
if self.basename != 'avconv':
return True
required_version = '10-0'
if is_outdated_version(
self._versions[self.basename], required_version):
warning = ('Your copy of %s is outdated and unable to properly mux separate video and audio files, '
'yt-dlp will download single file media. '
'Update %s to version %s or newer to fix this.') % (
self.basename, self.basename, required_version)
self.report_warning(warning)
return False
return True
class FFmpegFixupPostProcessor(FFmpegPostProcessor):
def _fixup(self, msg, filename, options):
temp_filename = prepend_extension(filename, 'temp')
self.to_screen(f'{msg} of "{filename}"')
self.run_ffmpeg(filename, temp_filename, options)
os.replace(temp_filename, filename)
class FFmpegFixupStretchedPP(FFmpegFixupPostProcessor):
@PostProcessor._restrict_to(images=False, audio=False)
def run(self, info):
stretched_ratio = info.get('stretched_ratio')
if stretched_ratio not in (None, 1):
self._fixup('Fixing aspect ratio', info['filepath'], [
'-c', 'copy', '-map', '0', '-dn', '-aspect', '%f' % stretched_ratio])
return [], info
class FFmpegFixupM4aPP(FFmpegFixupPostProcessor):
@PostProcessor._restrict_to(images=False, video=False)
def run(self, info):
if info.get('container') == 'm4a_dash':
self._fixup('Correcting container', info['filepath'], [
'-c', 'copy', '-map', '0', '-dn', '-f', 'mp4'])
return [], info
class FFmpegFixupM3u8PP(FFmpegFixupPostProcessor):
def _needs_fixup(self, info):
yield info['ext'] in ('mp4', 'm4a')
yield info['protocol'].startswith('m3u8')
try:
metadata = self.get_metadata_object(info['filepath'])
except PostProcessingError as e:
self.report_warning(f'Unable to extract metadata: {e.msg}')
yield True
else:
yield traverse_obj(metadata, ('format', 'format_name'), casesense=False) == 'mpegts'
@PostProcessor._restrict_to(images=False)
def run(self, info):
if all(self._needs_fixup(info)):
self._fixup('Fixing MPEG-TS in MP4 container', info['filepath'], [
'-c', 'copy', '-map', '0', '-dn', '-f', 'mp4', '-bsf:a', 'aac_adtstoasc'])
return [], info
class FFmpegFixupTimestampPP(FFmpegFixupPostProcessor):
def __init__(self, downloader=None, trim=0.001):
# "trim" should be used when the video contains unintended packets
super(FFmpegFixupTimestampPP, self).__init__(downloader)
assert isinstance(trim, (int, float))
self.trim = str(trim)
@PostProcessor._restrict_to(images=False)
def run(self, info):
if not self._features.get('setts'):
self.report_warning(
'A re-encode is needed to fix timestamps in older versions of ffmpeg. '
'Please install ffmpeg 4.4 or later to fixup without re-encoding')
opts = ['-vf', 'setpts=PTS-STARTPTS']
else:
opts = ['-c', 'copy', '-bsf', 'setts=ts=TS-STARTPTS']
self._fixup('Fixing frame timestamp', info['filepath'], opts + ['-map', '0', '-dn', '-ss', self.trim])
return [], info
class FFmpegFixupDurationPP(FFmpegFixupPostProcessor):
@PostProcessor._restrict_to(images=False)
def run(self, info):
self._fixup('Fixing video duration', info['filepath'], ['-c', 'copy', '-map', '0', '-dn'])
return [], info
class FFmpegSubtitlesConvertorPP(FFmpegPostProcessor):
SUPPORTED_EXTS = ('srt', 'vtt', 'ass', 'lrc')
def __init__(self, downloader=None, format=None):
super(FFmpegSubtitlesConvertorPP, self).__init__(downloader)
self.format = format
def run(self, info):
subs = info.get('requested_subtitles')
new_ext = self.format
new_format = new_ext
if new_format == 'vtt':
new_format = 'webvtt'
if subs is None:
self.to_screen('There aren\'t any subtitles to convert')
return [], info
self.to_screen('Converting subtitles')
sub_filenames = []
for lang, sub in subs.items():
if not os.path.exists(sub.get('filepath', '')):
self.report_warning(f'Skipping embedding {lang} subtitle because the file is missing')
continue
ext = sub['ext']
if ext == new_ext:
self.to_screen('Subtitle file for %s is already in the requested format' % new_ext)
continue
elif ext == 'json':
self.to_screen(
'You have requested to convert json subtitles into another format, '
'which is currently not possible')
continue
old_file = sub['filepath']
sub_filenames.append(old_file)
new_file = replace_extension(old_file, new_ext)
if ext in ('dfxp', 'ttml', 'tt'):
self.report_warning(
'You have requested to convert dfxp (TTML) subtitles into another format, '
'which results in style information loss')
dfxp_file = old_file
srt_file = replace_extension(old_file, 'srt')
with open(dfxp_file, 'rb') as f:
srt_data = dfxp2srt(f.read())
with io.open(srt_file, 'wt', encoding='utf-8') as f:
f.write(srt_data)
old_file = srt_file
subs[lang] = {
'ext': 'srt',
'data': srt_data,
'filepath': srt_file,
}
if new_ext == 'srt':
continue
else:
sub_filenames.append(srt_file)
self.run_ffmpeg(old_file, new_file, ['-f', new_format])
with io.open(new_file, 'rt', encoding='utf-8') as f:
subs[lang] = {
'ext': new_ext,
'data': f.read(),
'filepath': new_file,
}
info['__files_to_move'][new_file] = replace_extension(
info['__files_to_move'][sub['filepath']], new_ext)
return sub_filenames, info
class FFmpegSplitChaptersPP(FFmpegPostProcessor):
def __init__(self, downloader, force_keyframes=False):
FFmpegPostProcessor.__init__(self, downloader)
self._force_keyframes = force_keyframes
def _prepare_filename(self, number, chapter, info):
info = info.copy()
info.update({
'section_number': number,
'section_title': chapter.get('title'),
'section_start': chapter.get('start_time'),
'section_end': chapter.get('end_time'),
})
return self._downloader.prepare_filename(info, 'chapter')
def _ffmpeg_args_for_chapter(self, number, chapter, info):
destination = self._prepare_filename(number, chapter, info)
if not self._downloader._ensure_dir_exists(encodeFilename(destination)):
return
chapter['filepath'] = destination
self.to_screen('Chapter %03d; Destination: %s' % (number, destination))
return (
destination,
['-ss', compat_str(chapter['start_time']),
'-t', compat_str(chapter['end_time'] - chapter['start_time'])])
@PostProcessor._restrict_to(images=False)
def run(self, info):
chapters = info.get('chapters') or []
if not chapters:
self.to_screen('Chapter information is unavailable')
return [], info
in_file = info['filepath']
if self._force_keyframes and len(chapters) > 1:
in_file = self.force_keyframes(in_file, (c['start_time'] for c in chapters))
self.to_screen('Splitting video by chapters; %d chapters found' % len(chapters))
for idx, chapter in enumerate(chapters):
destination, opts = self._ffmpeg_args_for_chapter(idx + 1, chapter, info)
self.real_run_ffmpeg([(in_file, opts)], [(destination, ['-c', 'copy'])])
if in_file != info['filepath']:
os.remove(in_file)
return [], info
class FFmpegThumbnailsConvertorPP(FFmpegPostProcessor):
SUPPORTED_EXTS = ('jpg', 'png')
def __init__(self, downloader=None, format=None):
super(FFmpegThumbnailsConvertorPP, self).__init__(downloader)
self.format = format
@staticmethod
def is_webp(path):
with open(encodeFilename(path), 'rb') as f:
b = f.read(12)
return b[0:4] == b'RIFF' and b[8:] == b'WEBP'
def fixup_webp(self, info, idx=-1):
thumbnail_filename = info['thumbnails'][idx]['filepath']
_, thumbnail_ext = os.path.splitext(thumbnail_filename)
if thumbnail_ext:
thumbnail_ext = thumbnail_ext[1:].lower()
if thumbnail_ext != 'webp' and self.is_webp(thumbnail_filename):
self.to_screen('Correcting thumbnail "%s" extension to webp' % thumbnail_filename)
webp_filename = replace_extension(thumbnail_filename, 'webp')
os.replace(thumbnail_filename, webp_filename)
info['thumbnails'][idx]['filepath'] = webp_filename
info['__files_to_move'][webp_filename] = replace_extension(
info['__files_to_move'].pop(thumbnail_filename), 'webp')
@staticmethod
def _options(target_ext):
if target_ext == 'jpg':
return ['-bsf:v', 'mjpeg2jpeg']
return []
def convert_thumbnail(self, thumbnail_filename, target_ext):
thumbnail_conv_filename = replace_extension(thumbnail_filename, target_ext)
self.to_screen('Converting thumbnail "%s" to %s' % (thumbnail_filename, target_ext))
self.real_run_ffmpeg(
[(thumbnail_filename, ['-f', 'image2', '-pattern_type', 'none'])],
[(thumbnail_conv_filename.replace('%', '%%'), self._options(target_ext))])
return thumbnail_conv_filename
def run(self, info):
files_to_delete = []
has_thumbnail = False
for idx, thumbnail_dict in enumerate(info['thumbnails']):
if 'filepath' not in thumbnail_dict:
continue
has_thumbnail = True
self.fixup_webp(info, idx)
original_thumbnail = thumbnail_dict['filepath']
_, thumbnail_ext = os.path.splitext(original_thumbnail)
if thumbnail_ext:
thumbnail_ext = thumbnail_ext[1:].lower()
if thumbnail_ext == 'jpeg':
thumbnail_ext = 'jpg'
if thumbnail_ext == self.format:
self.to_screen('Thumbnail "%s" is already in the requested format' % original_thumbnail)
continue
thumbnail_dict['filepath'] = self.convert_thumbnail(original_thumbnail, self.format)
files_to_delete.append(original_thumbnail)
info['__files_to_move'][thumbnail_dict['filepath']] = replace_extension(
info['__files_to_move'][original_thumbnail], self.format)
if not has_thumbnail:
self.to_screen('There aren\'t any thumbnails to convert')
return files_to_delete, info
|
'''
Function:
setup
Author:
Charles
微信公众号:
Charles的皮卡丘
GitHub:
https://github.com/CharlesPikachu
更新日期:
2020-02-20
'''
import DecryptLogin
from setuptools import setup, find_packages
'''readme'''
with open('README.md', 'r', encoding='utf-8') as f:
long_description = f.read()
'''setup'''
setup(
name='DecryptLogin',
version=DecryptLogin.__version__,
description='Login some website using requests.',
long_description=long_description,
long_description_content_type='text/markdown',
classifiers=[
'License :: OSI Approved :: MIT License',
'Programming Language :: Python :: 3',
'Intended Audience :: Developers',
'Operating System :: OS Independent'],
author='Charles',
url='https://github.com/CharlesPikachu/DecryptLogin',
author_email='charlesjzc@qq.com',
license='MIT',
include_package_data=True,
install_requires=['requests >= 2.22.0', 'pycryptodome >= 3.8.1', 'rsa >= 4.0', 'PyExecJS >= 1.5.1'],
zip_safe=True,
packages=find_packages()
)
|
# Copyright (c) 2021 NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import numpy as np
import pandas as pd
import os
from joblib import Parallel, delayed
import glob
import argparse
import tqdm
import subprocess
def process_file(f, dst):
label = '_c0'
dense_columns = [f'_c{i}' for i in range(1, 14)]
categorical_columns = [f'_c{i}' for i in range(14, 40)]
all_columns_sorted = [f'_c{i}' for i in range(0, 40)]
data = pd.read_parquet(f)
data = data[all_columns_sorted]
data[label] = data[label].astype(np.int32)
data[dense_columns] = data[dense_columns].astype(np.float32)
data[categorical_columns] = data[categorical_columns].astype(np.int32)
data = data.to_records(index=False)
data = data.tobytes()
dst_file = dst + '/' + f.split('/')[-1] + '.bin'
with open(dst_file, 'wb') as dst_fd:
dst_fd.write(data)
def main():
parser = argparse.ArgumentParser()
parser.add_argument('--src_dir', type=str)
parser.add_argument('--intermediate_dir', type=str)
parser.add_argument('--dst_dir', type=str)
parser.add_argument('--parallel_jobs', default=40, type=int)
args = parser.parse_args()
print('Processing train files...')
train_src_files = glob.glob(args.src_dir + '/train/*.parquet')
train_intermediate_dir = os.path.join(args.intermediate_dir, 'train')
os.makedirs(train_intermediate_dir, exist_ok=True)
Parallel(n_jobs=args.parallel_jobs)(delayed(process_file)(f, train_intermediate_dir) for f in tqdm.tqdm(train_src_files))
print('Train files conversion done')
print('Processing test files...')
test_src_files = glob.glob(args.src_dir + '/test/*.parquet')
test_intermediate_dir = os.path.join(args.intermediate_dir, 'test')
os.makedirs(test_intermediate_dir, exist_ok=True)
Parallel(n_jobs=args.parallel_jobs)(delayed(process_file)(f, test_intermediate_dir) for f in tqdm.tqdm(test_src_files))
print('Test files conversion done')
print('Processing validation files...')
valid_src_files = glob.glob(args.src_dir + '/validation/*.parquet')
valid_intermediate_dir = os.path.join(args.intermediate_dir, 'validation')
os.makedirs(valid_intermediate_dir, exist_ok=True)
Parallel(n_jobs=args.parallel_jobs)(delayed(process_file)(f, valid_intermediate_dir) for f in tqdm.tqdm(valid_src_files))
print('Validation files conversion done')
os.makedirs(args.dst_dir, exist_ok=True)
print('Concatenating train files')
os.system(f'cat {train_intermediate_dir}/*.bin > {args.dst_dir}/train_data.bin')
print('Concatenating test files')
os.system(f'cat {test_intermediate_dir}/*.bin > {args.dst_dir}/test_data.bin')
print('Concatenating validation files')
os.system(f'cat {valid_intermediate_dir}/*.bin > {args.dst_dir}/validation_data.bin')
print('Done')
if __name__ == '__main__':
main()
|
#!/usr/bin/env python
'''
Input: An array of integers.
Output: The single integer that occurs most often.
'''
from __future__ import print_function
from collections import Counter
given_array = [2,2,3,7,5,7,7,7,4,7,2,7,4,5,6,7,7,8,6,7,7,8,10,12,29,30,19,10,7,7,7,7,7,7,7,7,7]
def find_major(array):
counted = Counter(array)
return counted.most_common(1)[0][0]
print(find_major(given_array))
|
"""Demo unanimous voting: multiparty matching without embarrassments.
Unanimous voting between parties P[0],...,P[t] is implemented by securely
evaluating the product of their votes (using 1s and 0s to encode "yes"
and "no" votes, respectively) and revealing only whether the product
equals 1 (unanimous agreement) or 0 (someone disagrees). The MPyC method
mpc.all() modeled after Python's built-in function all() can be used
for this purpose.
The secure computation is designed to be maximally private, meaning
that any t parties colluding against the remaining party should be
unsuccessful: the colluding parties should not be able to find out
the vote of the remaining party in case they do not agree (and no
unanimous agreement will be reached anyway; if the colluding parties
all vote "yes", then there remains nothing to hide).
To achieve maximal privacy, we add t parties P[t+1],...,P[2t] to the
secure computation. These additional parties provide no input to the
secure computation and are trusted not to collude with any of the
parties P[0],...,P[t]. This way we have m=2t+1 parties in total, and we
can tolerate the desired maximum of t corrupt parties among P[0],...,P[t].
Moreover, the trusted parties do not receive any output, so they do
not learn anything at all, as their number does not exceed t.
Unanimous voting is a generalization of "matching without embarrassments",
where Alice and Bob like to find out if they want to go on a second date.
They might simply tell each other if they are interested, and in case they
are both interested, this simple solution is satisfactory. However, if for
example Alice is not interested in Bob but Bob is interested in Alice, Bob
may feel embarrassed afterwards because he expressed interest in her; if he
would have known beforehand that Alice was not interested anyway, he might
have told Alice that he was not interested either. See also this YouTube
video https://youtu.be/JnmESTrsQbg by TNO.
Matching without embarrassments corresponds to unanimous voting with t=1.
Alice acts as party P[0], Bob as party P[1], and P[2] is the trusted (third)
party. We run a 3-party secure computation tolerating 1 corrupt party.
Alice and Bob provide bits x and y as their respective private inputs;
the trusted party P[2] provides no input. Of course, Alice and Bob do not
collude with each other, and P[2] is assumed to collude with neither of them.
Therefore, Alice and Bob do not learn anything beyond what they can deduce
from their output bit x*y and their respective input bits x and y; the
trusted party P[2] receives no output, hence learns nothing at all.
"""
import sys
from mpyc.runtime import mpc
m = len(mpc.parties)
if m%2 == 0:
print('Odd number of parties required.')
sys.exit()
t = m//2
voters = list(range(t+1)) # parties P[0],...,P[t]
if mpc.pid in voters:
vote = int(sys.argv[1]) if sys.argv[1:] else 1 # default "yes"
else:
vote = None # no input
secbit = mpc.SecInt(1) # 1-bit integers suffice
mpc.run(mpc.start())
votes = mpc.input(secbit(vote), senders=voters)
result = mpc.run(mpc.output(mpc.all(votes), receivers=voters))
mpc.run(mpc.shutdown())
if result is None: # no output
print('Thanks for serving as oblivious matchmaker;)')
elif result:
print(f'Match: unanimous agreement between {t+1} part{"ies" if t else "y"}!')
else:
print(f'No match: someone disagrees among {t+1} part{"ies" if t else "y"}?')
|
#!/usr/bin/python3
# -*- coding: utf-8 -*-
"""
@file:pip_util.py
@des:下载|安装 对应(win或any)pip版本的安装包
# pip 命令说明
# https://pip.pypa.io/en/stable/user_guide/
# pip uninstall SomePackage 卸载包
# pip search SomePackage 卸载包
# pip uninstall SomePackage 卸载包
# pip show 显示安装包信息
# pip show -f SomePackage 查看指定包的详细信息
# pip list 列出已安装的包
# pip list -o 查看可升级的包
"""
# @ERROR: No module named 'pip._internal.cli.main'
# 更改成 from pip._internal import main
from pip._internal.cli.main import main
pip_source = {
'官方源': 'https://pypi.org/simple/',# 下载win版本包 建议用官方源
'阿里云': 'http://mirrors.aliyun.com/pypi/simple/',
}
local_down_dir = {
'base': 'Z:\\var\\log\\mine\\',#目录根地址
'win': 'win-pages',
'any': 'any-pages',
}
def get_pip_link(page_name):
"""
获取pip官方的包源地址 当下载不到的时候可以查看下
:param page_name: 包名称
:return:
"""
if page_name.find('==') > -1:
page_name = page_name[:page_name.rfind('==')]
return 'https://pypi.org/project/{0}/#files'.format(page_name)
def freeze_requirements():
"""
导出对比 requirements 文件 未列出到包
:return:
"""
main(['freeze',
'requirements.txt'])
def down_pip_any_pages(pages=[], source='官方源'):
"""
下载对应(any版本)的pip包到 win-pages目录
:param pages: 指定需要下载到包名称
:param source: 指定下载到源地址
:return:
"""
for pn in pages:
main(['download',
'-i', pip_source[source],
'--only-binary=:all:',
# '--platform', 'win_amd64',
'--platform', 'any',
'--python-version', '37',
'--implementation', 'cp',
'--abi', 'none',
'--abi', 'cp37m',
# '-r', 'requirements.txt',
pn,
'-d', local_down_dir['any']])
print("[{1}] 对应 pip link :{0}".format(get_pip_link(pn), pn))
def down_pip_win_pages(pages=[], source='官方源'):
"""
下载对应(win版本)的pip包到 win-pages目录
:param pages: 指定需要下载到包名称
:param source: 指定下载到源地址
:return:
"""
for pn in pages:
main(['download',
'-i', pip_source[source],
'--only-binary=:all:',
'--platform', 'win_amd64',
# '--platform', 'any',
'--python-version', '37',
'--implementation', 'cp',
# '--abi', 'none',
# '-r', 'requirements.txt',
'--abi', 'cp37m',
pn,
'-d', local_down_dir['win']])
print("[{1}] 对应 pip link :{0}".format(get_pip_link(pn), pn))
def install_pip_win_pages(pages=[]):
for pn in pages:
print("执行安装包 [{0}]".format(pn))
main(['install',
'--no-index',
'--find-links='+local_down_dir['base']+local_down_dir['win'],
pn])
def install_pip_any_pages(pages=[]):
for pn in pages:
print("执行安装包 [{0}]".format(pn))
main(['install',
'--no-index',
'--find-links='+local_down_dir['base']+local_down_dir['any'],
# '-r', 'requirements.txt',
pn])
if __name__ == '__main__':
down_pip_win_pages(pages=["APScheduler==3.6.3"], source='官方源')
# down_pip_any_pages(pages=["pymongo==3.11.3"], source='官方源')
# install_pip_any_pages(pages=[])
# install_pip_win_pages(pages=[])
|
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved.
"""Centralized catalog of paths."""
import os
class DatasetCatalog(object):
DATA_DIR = "datasets"
DATASETS = {
"coco_test-dev": (
"coco/test2017",
"coco/annotations/image_info_test-dev2017.json",
),
"coco_2017_test": (
"coco/test2017",
"coco/annotations/image_info_test2017.json",
),
"coco_2017_train": (
"coco/train2017",
"coco/annotations/instances_train2017.json",
),
"coco_2017_val": (
"coco/val2017",
"coco/annotations/instances_val2017.json",
),
"coco_2014_train": (
"coco/train2014",
"coco/annotations/instances_train2014.json",
),
"coco_2014_val": ("coco/val2014", "coco/annotations/instances_val2014.json"),
"coco_2014_minival": (
"coco/val2014",
"coco/annotations/instances_minival2014.json",
),
"coco_2014_valminusminival": (
"coco/val2014",
"coco/annotations/instances_valminusminival2014.json",
),
}
@staticmethod
def get(name):
if "coco" in name:
data_dir = DatasetCatalog.DATA_DIR
attrs = DatasetCatalog.DATASETS[name]
args = dict(
root=os.path.join(data_dir, attrs[0]),
ann_file=os.path.join(data_dir, attrs[1]),
)
return dict(
factory="COCODataset",
args=args,
)
raise RuntimeError("Dataset not available: {}".format(name))
class ModelCatalog(object):
S3_C2_DETECTRON_URL = "https://s3-us-west-2.amazonaws.com/detectron"
C2_IMAGENET_MODELS = {
"MSRA/R-50": "ImageNetPretrained/MSRA/R-50.pkl",
"MSRA/R-101": "ImageNetPretrained/MSRA/R-101.pkl",
"FAIR/20171220/X-101-32x8d": "ImageNetPretrained/20171220/X-101-32x8d.pkl",
}
C2_DETECTRON_SUFFIX = "output/train/coco_2014_train%3Acoco_2014_valminusminival/generalized_rcnn/model_final.pkl"
C2_DETECTRON_MODELS = {
"35857197/e2e_faster_rcnn_R-50-C4_1x": "01_33_49.iAX0mXvW",
"35857345/e2e_faster_rcnn_R-50-FPN_1x": "01_36_30.cUF7QR7I",
"35857890/e2e_faster_rcnn_R-101-FPN_1x": "01_38_50.sNxI7sX7",
"36761737/e2e_faster_rcnn_X-101-32x8d-FPN_1x": "06_31_39.5MIHi1fZ",
"35858791/e2e_mask_rcnn_R-50-C4_1x": "01_45_57.ZgkA7hPB",
"35858933/e2e_mask_rcnn_R-50-FPN_1x": "01_48_14.DzEQe4wC",
"35861795/e2e_mask_rcnn_R-101-FPN_1x": "02_31_37.KqyEK4tT",
"36761843/e2e_mask_rcnn_X-101-32x8d-FPN_1x": "06_35_59.RZotkLKI",
}
@staticmethod
def get(name):
if name.startswith("Caffe2Detectron/COCO"):
return ModelCatalog.get_c2_detectron_12_2017_baselines(name)
if name.startswith("ImageNetPretrained"):
return ModelCatalog.get_c2_imagenet_pretrained(name)
raise RuntimeError("model not present in the catalog {}".format(name))
@staticmethod
def get_c2_imagenet_pretrained(name):
prefix = ModelCatalog.S3_C2_DETECTRON_URL
name = name[len("ImageNetPretrained/"):]
name = ModelCatalog.C2_IMAGENET_MODELS[name]
url = "/".join([prefix, name])
return url
@staticmethod
def get_c2_detectron_12_2017_baselines(name):
# Detectron C2 models are stored following the structure
# prefix/<model_id>/2012_2017_baselines/<model_name>.yaml.<signature>/suffix
# we use as identifiers in the catalog Caffe2Detectron/COCO/<model_id>/<model_name>
prefix = ModelCatalog.S3_C2_DETECTRON_URL
suffix = ModelCatalog.C2_DETECTRON_SUFFIX
# remove identification prefix
name = name[len("Caffe2Detectron/COCO/"):]
# split in <model_id> and <model_name>
model_id, model_name = name.split("/")
# parsing to make it match the url address from the Caffe2 models
model_name = "{}.yaml".format(model_name)
signature = ModelCatalog.C2_DETECTRON_MODELS[name]
unique_name = ".".join([model_name, signature])
url = "/".join([prefix, model_id, "12_2017_baselines", unique_name, suffix])
return url
|
# Copyright 2022 Paul Rogers
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from .util import dict_get, split_host_url, service_url
from . import consts
from .extensions import load_extensions
class ServiceMapper:
"""
Maps cluster services to local URLs.
This is a base class for Docker, AWS or other mappings. This
implementation is a "null mapping": addresses map to themselves.
"""
def url_for(self, host, http_port, tls_port, prefer_tls):
"""
Create a URL for a service given the remote host and ports.
Parameters
----------
host : str
The remote host.
http_port : int
The remote plain-text (HTTP) port, or -1 if not available.
tls_port : int
The remote TLS (HTTPS) port, or -1 if not available.
prefer_tls : bool
If True, prefer to map the TLS port before the HTTP port.
Returns
-------
A triple with the protocol (AKA schema): http or https, the
local host name and the local port. The combination, when converted
by the caller to a URL, must be accessible to the machine on which
the druid-client library is running.
"""
if (prefer_tls and tls_port != -1) or http_port == -1:
return (consts.TLS_PROTOCOL, host, tls_port)
else:
return (consts.PLAIN_TEXT_PROTOCOL, host, http_port)
class DockerMapper(ServiceMapper):
"""
Maps cluster services to local URLs for Druid running in Docker.
The default configuration assumes the Druid cluster runs on
localhost, with no port changes. Provide an alternative Docker host
name, or a set of port mappings, to change the defaults.
"""
def __init__(self, docker_host="localhost", port_map=[]):
"""
Constructor.
Parameters
----------
docker_host : str, Default = "localhost"
The IP address or name of the host on which Docker is running.
port_map : [], Default = []
An array of Docker-style port mappings, where each entry is
a two-element array or tuple of the form
(external-port, internal_port).
"""
self.docker_host = docker_host
self.port_map = {}
for pair in port_map:
self.port_map[pair[1]] = pair[0]
def url_for(self, host, http_port, tls_port, prefer_tls):
local_http_port = dict_get(self.port_map, http_port)
if prefer_tls or local_http_port is None:
port = dict_get(self.port_map, tls_port)
if port is not None:
return (consts.TLS_PROTOCOL, self.docker_host, port)
if local_http_port is not None:
return (consts.PLAIN_TEXT_PROTOCOL, self.docker_host, local_http_port)
return ServiceMapper.url_for(self, self.docker_host, http_port, tls_port, prefer_tls)
class ClusterConfig:
def __init__(self, config):
self.options = config
self.cluster = None
self.service_mapper = dict_get(config, 'mapper', ServiceMapper())
self.tls_cert = dict_get(config, 'tls_cert')
self.prefer_tls = dict_get(config, 'prefer_tls', False)
self.extensions = load_extensions()
# Enable this option to see the URLs as they are sent.
# Handy for debugging.
self.trace = False
def map_endpoint(self, remote_url):
"""
Maps a remote endpoint URL to the local equivalent.
"""
scheme, host, port = split_host_url(remote_url)
http_port = port if scheme == consts.PLAIN_TEXT_PROTOCOL else None
tls_port = port if scheme == consts.TLS_PROTOCOL else None
scheme, host, port = self.service_mapper.url_for(host, http_port, tls_port, False)
return service_url(scheme, host, port)
def client_for(self, client, extn):
return self.extensions.client_for(client, extn)
def extension_names(self):
return self.extensions.names()
def extension_list(self):
return self.extensions.list()
def register_services(self, service_map):
self.extensions.register_services(service_map)
|
from setuptools import setup
# "import" __version__
__version__ = 'unknown'
for line in open('src/splines/__init__.py'):
if line.startswith('__version__'):
exec(line)
break
setup(
name='splines',
packages=['splines'],
package_dir={'': 'src'},
version=__version__,
author='Matthias Geier',
author_email='Matthias.Geier@gmail.com',
description='Splines in Euclidean Space and Beyond',
long_description=open('README.rst').read(),
install_requires=['NumPy'],
python_requires='>=3.6',
license='MIT',
keywords='splines curves interpolation quaternions'.split(),
project_urls={
'Documentation': 'https://splines.readthedocs.io/',
'Source Code': 'https://github.com/AudioSceneDescriptionFormat/splines/',
'Bug Tracker': 'https://github.com/AudioSceneDescriptionFormat/splines/issues/',
},
platforms='any',
classifiers=[
'Intended Audience :: Education',
'Intended Audience :: Science/Research',
'License :: OSI Approved :: MIT License',
'Operating System :: OS Independent',
'Programming Language :: Python',
'Programming Language :: Python :: 3',
],
zip_safe=True,
)
|
from dataclasses import dataclass
from typing import List
import click
import desert
import marshmallow
import requests
API_URL = "https://www.biobiochile.cl/lista/api/get-todo?limit={limit}"
@dataclass
class New:
post_hour: str
post_title: str
post_content: str
schema = desert.schema(New, meta={"unknown": marshmallow.EXCLUDE}, many=True)
def last_news(limit: int = 10) -> List[New]:
url = API_URL.format(limit=limit)
try:
with requests.get(url) as response:
response.raise_for_status()
data = response.json()
data.reverse()
return schema.load(data)
except (requests.RequestException, marshmallow.ValidationError) as error:
message = str(error)
raise click.ClickException(message)
|
import numpy as np
import xarray as xr
from .. import time as tmlib
import warnings
from os.path import getsize
from ._read_bin import bin_reader
from .base import _find_userdata, _create_dataset, _abspath
from ..rotate.rdi import _calc_beam_orientmat, _calc_orientmat
from ..rotate.base import _set_coords
from ..rotate.api import set_declination
def read_rdi(fname, userdata=None, nens=None, debug=0):
"""Read a TRDI binary data file.
Parameters
----------
filename : string
Filename of TRDI file to read.
userdata : True, False, or string of userdata.json filename (default ``True``)
Whether to read the '<base-filename>.userdata.json' file.
nens : None (default: read entire file), int, or 2-element tuple (start, stop)
Number of pings to read from the file
Returns
-------
ds : xarray.Dataset
An xarray dataset from the binary instrument data
"""
# Reads into a dictionary of dictionaries using netcdf naming conventions
# Should be easier to debug
with _RdiReader(fname, debug_level=debug) as ldr:
dat = ldr.load_data(nens=nens)
# Read in userdata
userdata = _find_userdata(fname, userdata)
for nm in userdata:
dat['attrs'][nm] = userdata[nm]
if 'time_gps' in dat['coords']:
# GPS data not necessarily sampling at the same rate as ADCP DAQ.
dat = _remove_gps_duplicates(dat)
# Create xarray dataset from upper level dictionary
ds = _create_dataset(dat)
ds = _set_coords(ds, ref_frame=ds.coord_sys)
# Create orientation matrices
if 'beam2inst_orientmat' not in ds:
ds['beam2inst_orientmat'] = xr.DataArray(_calc_beam_orientmat(
ds.beam_angle,
ds.beam_pattern == 'convex'),
coords={'x': [1, 2, 3, 4],
'x*': [1, 2, 3, 4]},
dims=['x', 'x*'])
if 'orientmat' not in ds:
ds['orientmat'] = xr.DataArray(_calc_orientmat(ds),
coords={'earth': ['E', 'N', 'U'],
'inst': ['X', 'Y', 'Z'],
'time': ds['time']},
dims=['earth', 'inst', 'time'])
# Check magnetic declination if provided via software and/or userdata
_set_rdi_declination(ds, fname, inplace=True)
# VMDAS applies gps correction on velocity in .ENX files only
if fname.rsplit('.')[-1] == 'ENX':
ds.attrs['vel_gps_corrected'] = 1
else: # (not ENR or ENS) or WinRiver files
ds.attrs['vel_gps_corrected'] = 0
# Convert time coords to dt64
t_coords = [t for t in ds.coords if 'time' in t]
for ky in t_coords:
dt = tmlib.epoch2dt64(ds[ky])
ds = ds.assign_coords({ky: dt})
# Convert time vars to dt64
t_data = [t for t in ds.data_vars if 'time' in t]
for ky in t_data:
dt = tmlib.epoch2dt64(ds[ky])
ds[ky].data = dt
return ds
def _remove_gps_duplicates(dat):
"""
Removes duplicate and nan timestamp values in 'time_gps' coordinate, and
ads hardware (ADCP DAQ) timestamp corresponding to GPS acquisition
(in addition to the GPS unit's timestamp).
"""
dat['data_vars']['hdwtime_gps'] = dat['coords']['time']
dat['units']['hdwtime'] = 'seconds since 1970-01-01 00:00:00'
# Remove duplicate timestamp values, if applicable
dat['coords']['time_gps'], idx = np.unique(dat['coords']['time_gps'],
return_index=True)
# Remove nan values, if applicable
nan = np.zeros(dat['coords']['time'].shape, dtype=bool)
if any(np.isnan(dat['coords']['time_gps'])):
nan = np.isnan(dat['coords']['time_gps'])
dat['coords']['time_gps'] = dat['coords']['time_gps'][~nan]
for key in dat['data_vars']:
if 'gps' in key:
dat['data_vars'][key] = dat['data_vars'][key][idx]
if sum(nan) > 0:
dat['data_vars'][key] = dat['data_vars'][key][~nan]
return dat
def _set_rdi_declination(dat, fname, inplace):
# If magnetic_var_deg is set, this means that the declination is already
# included in the heading and in the velocity data.
declin = dat.attrs.pop('declination', None) # userdata declination
if dat.attrs['magnetic_var_deg'] != 0: # from TRDI software if set
dat.attrs['declination'] = dat.attrs['magnetic_var_deg']
dat.attrs['declination_in_orientmat'] = 1 # logical
if dat.attrs['magnetic_var_deg'] != 0 and declin is not None:
warnings.warn(
"'magnetic_var_deg' is set to {:.2f} degrees in the binary "
"file '{}', AND 'declination' is set in the 'userdata.json' "
"file. DOLfYN WILL USE THE VALUE of {:.2f} degrees in "
"userdata.json. If you want to use the value in "
"'magnetic_var_deg', delete the value from userdata.json and "
"re-read the file."
.format(dat.attrs['magnetic_var_deg'], fname, declin))
dat.attrs['declination'] = declin
if declin is not None:
set_declination(dat, declin, inplace)
century = 2000
data_defs = {'number': ([], 'data_vars', 'uint32', ''),
'rtc': ([7], 'sys', 'uint16', ''),
'builtin_test_fail': ([], 'data_vars', 'bool', ''),
'c_sound': ([], 'data_vars', 'float32', 'm/s'),
'depth': ([], 'data_vars', 'float32', 'm'),
'pitch': ([], 'data_vars', 'float32', 'deg'),
'roll': ([], 'data_vars', 'float32', 'deg'),
'heading': ([], 'data_vars', 'float32', 'deg'),
'temp': ([], 'data_vars', 'float32', 'C'),
'salinity': ([], 'data_vars', 'float32', 'psu'),
'min_preping_wait': ([], 'data_vars', 'float32', 's'),
'heading_std': ([], 'data_vars', 'float32', 'deg'),
'pitch_std': ([], 'data_vars', 'float32', 'deg'),
'roll_std': ([], 'data_vars', 'float32', 'deg'),
'adc': ([8], 'sys', 'uint8', ''),
'error_status_wd': ([], 'attrs', 'float32', ''),
'pressure': ([], 'data_vars', 'float32', 'dbar'),
'pressure_std': ([], 'data_vars', 'float32', 'dbar'),
'vel': (['nc', 4], 'data_vars', 'float32', 'm/s'),
'amp': (['nc', 4], 'data_vars', 'uint8', 'counts'),
'corr': (['nc', 4], 'data_vars', 'uint8', 'counts'),
'prcnt_gd': (['nc', 4], 'data_vars', 'uint8', '%'),
'status': (['nc', 4], 'data_vars', 'float32', ''),
'dist_bt': ([4], 'data_vars', 'float32', 'm'),
'vel_bt': ([4], 'data_vars', 'float32', 'm/s'),
'corr_bt': ([4], 'data_vars', 'uint8', 'counts'),
'amp_bt': ([4], 'data_vars', 'uint8', 'counts'),
'prcnt_gd_bt': ([4], 'data_vars', 'uint8', '%'),
'time': ([], 'coords', 'float64', ''),
'etime_gps': ([], 'coords', 'float64', ''),
'elatitude_gps': ([], 'data_vars', 'float64', 'deg'),
'elongitude_gps': ([], 'data_vars', 'float64', 'deg'),
'time_gps': ([], 'coords', 'float64', ''),
'latitude_gps': ([], 'data_vars', 'float64', 'deg'),
'longitude_gps': ([], 'data_vars', 'float64', 'deg'),
'ntime': ([], 'coords', 'float64', ''),
'flags': ([], 'data_vars', 'float32', ''),
}
def _get(dat, nm):
grp = data_defs[nm][1]
if grp is None:
return dat[nm]
else:
return dat[grp][nm]
def _in_group(dat, nm):
grp = data_defs[nm][1]
if grp is None:
return nm in dat
else:
return nm in dat[grp]
def _pop(dat, nm):
grp = data_defs[nm][1]
if grp is None:
dat.pop(nm)
else:
dat[grp].pop(nm)
def _setd(dat, nm, val):
grp = data_defs[nm][1]
if grp is None:
dat[nm] = val
else:
dat[grp][nm] = val
def _idata(dat, nm, sz):
group = data_defs[nm][1]
dtype = data_defs[nm][2]
units = data_defs[nm][3]
arr = np.empty(sz, dtype=dtype)
if dtype.startswith('float'):
arr[:] = np.NaN
dat[group][nm] = arr
dat['units'][nm] = units
return dat
def _get_size(name, n=None, ncell=0):
sz = list(data_defs[name][0]) # create a copy!
if 'nc' in sz:
sz.insert(sz.index('nc'), ncell)
sz.remove('nc')
if n is None:
return tuple(sz)
return tuple(sz + [n])
class _variable_setlist(set):
def __iadd__(self, vals):
if vals[0] not in self:
self |= set(vals)
return self
class _ensemble():
n_avg = 1
k = -1 # This is the counter for filling the ensemble object
def __getitem__(self, nm):
return getattr(self, nm)
def __init__(self, navg, n_cells):
if navg is None or navg == 0:
navg = 1
self.n_avg = navg
for nm in data_defs:
setattr(self, nm,
np.zeros(_get_size(nm, n=navg, ncell=n_cells),
dtype=data_defs[nm][2]))
def clean_data(self,):
self['vel'][self['vel'] == -32.768] = np.NaN
class _RdiReader():
_n_beams = 4 # Placeholder for 5-beam adcp, not currently used.
_pos = 0
progress = 0
_cfgnames = dict.fromkeys([4, 5], 'bb-adcp')
_cfgnames.update(dict.fromkeys([8, 9, 16], 'wh-adcp'))
_cfgnames.update(dict.fromkeys([14, 23], 'os-adcp'))
_cfac = 180 / 2 ** 31
_source = 0
_fixoffset = 0
_nbyte = 0
_winrivprob = False
_search_num = 30000 # Maximum distance? to search
_debug7f79 = None
extrabytes = 0
def __init__(self, fname, navg=1, debug_level=0):
self.fname = _abspath(fname)
print('\nReading file {} ...'.format(fname))
self._debug_level = debug_level
self.cfg = {}
self.cfg['name'] = 'wh-adcp'
self.cfg['sourceprog'] = 'instrument'
self.cfg['prog_ver'] = 0
self.hdr = {}
self.f = bin_reader(self.fname)
self.read_hdr()
self.read_cfg()
self.f.seek(self._pos, 0)
self.n_avg = navg
self.ensemble = _ensemble(self.n_avg, self.cfg['n_cells'])
self._filesize = getsize(self.fname)
self._npings = int(self._filesize / (self.hdr['nbyte'] + 2 +
self.extrabytes))
self.vars_read = _variable_setlist(['time'])
if self._debug_level > 0:
print(' %d pings estimated in this file' % self._npings)
def read_hdr(self,):
fd = self.f
cfgid = list(fd.read_ui8(2))
nread = 0
if self._debug_level > 2:
print(self.f.pos)
print(' cfgid0: [{:x}, {:x}]'.format(*cfgid))
while (cfgid[0] != 127 or cfgid[1] != 127) or not self.checkheader():
nextbyte = fd.read_ui8(1)
pos = fd.tell()
nread += 1
cfgid[1] = cfgid[0]
cfgid[0] = nextbyte
if not pos % 1000:
print(' Still looking for valid cfgid at file '
'position %d ...' % pos)
self._pos = self.f.tell() - 2
if self._debug_level > 0:
print(fd.tell())
self.read_hdrseg()
def read_cfg(self,):
cfgid = self.f.read_ui16(1)
self.read_cfgseg()
def init_data(self,):
outd = {'data_vars': {}, 'coords': {},
'attrs': {}, 'units': {}, 'sys': {}}
outd['attrs']['inst_make'] = 'TRDI'
outd['attrs']['inst_model'] = 'Workhorse'
outd['attrs']['inst_type'] = 'ADCP'
outd['attrs']['rotate_vars'] = ['vel', ]
# Currently RDI doesn't use IMUs
outd['attrs']['has_imu'] = 0
for nm in data_defs:
outd = _idata(outd, nm,
sz=_get_size(nm, self._nens, self.cfg['n_cells']))
self.outd = outd
def mean(self, dat):
if self.n_avg == 1:
return dat[..., 0]
return np.nanmean(dat, axis=-1)
def load_data(self, nens=None):
if nens is None:
self._nens = int(self._npings / self.n_avg)
self._ens_range = (0, self._nens)
elif (nens.__class__ is tuple or nens.__class__ is list) and \
len(nens) == 2:
nens = list(nens)
if nens[1] == -1:
nens[1] = self._npings
self._nens = int((nens[1] - nens[0]) / self.n_avg)
self._ens_range = nens
self.f.seek((self.hdr['nbyte'] + 2 + self.extrabytes) *
self._ens_range[0], 1)
else:
self._nens = nens
self._ens_range = (0, nens)
if self._debug_level > 0:
print(' taking data from pings %d - %d' % tuple(self._ens_range))
print(' %d ensembles will be produced.' % self._nens)
self.init_data()
dat = self.outd
dat['coords']['range'] = (self.cfg['bin1_dist_m'] +
np.arange(self.cfg['n_cells']) *
self.cfg['cell_size'])
for nm in self.cfg:
dat['attrs'][nm] = self.cfg[nm]
for iens in range(self._nens):
try:
self.read_buffer()
except:
self.remove_end(iens)
break
self.ensemble.clean_data()
# Fix the 'real-time-clock' century
clock = self.ensemble.rtc[:, :]
if clock[0, 0] < 100:
clock[0, :] += century
# Copy the ensemble to the dataset.
for nm in self.vars_read:
_get(dat, nm)[..., iens] = self.mean(self.ensemble[nm])
try:
dats = tmlib.date2epoch(
tmlib.datetime(*clock[:6, 0],
microsecond=clock[6, 0] * 10000))[0]
except ValueError:
warnings.warn("Invalid time stamp in ping {}.".format(
int(self.ensemble.number[0])))
dat['coords']['time'][iens] = np.NaN
else:
dat['coords']['time'][iens] = np.median(dats)
self.finalize()
if 'vel_bt' in dat['data_vars']:
dat['attrs']['rotate_vars'].append('vel_bt')
return dat
def read_buffer(self,):
fd = self.f
self.ensemble.k = -1 # so that k+=1 gives 0 on the first loop.
self.print_progress()
hdr = self.hdr
while self.ensemble.k < self.ensemble.n_avg - 1:
self.search_buffer()
startpos = fd.tell() - 2
self.read_hdrseg()
byte_offset = self._nbyte + 2
for n in range(len(hdr['dat_offsets'])):
id = fd.read_ui16(1)
self._winrivprob = False
self.print_pos()
retval = self.read_dat(id)
if retval == 'FAIL':
break
byte_offset += self._nbyte
if n < (len(hdr['dat_offsets']) - 1):
oset = hdr['dat_offsets'][n + 1] - byte_offset
if oset != 0:
if self._debug_level > 0:
print(' %s: Adjust location by %d\n' % (id, oset))
fd.seek(oset, 1)
byte_offset = hdr['dat_offsets'][n + 1]
else:
if hdr['nbyte'] - 2 != byte_offset:
if not self._winrivprob:
if self._debug_level > 0:
print(' {:d}: Adjust location by {:d}\n'
.format(id, hdr['nbyte'] - 2 - byte_offset))
self.f.seek(hdr['nbyte'] - 2 - byte_offset, 1)
byte_offset = hdr['nbyte'] - 2
readbytes = fd.tell() - startpos
offset = hdr['nbyte'] + 2 - byte_offset
self.check_offset(offset, readbytes)
self.print_pos(byte_offset=byte_offset)
def search_buffer(self):
"""
Check to see if the next bytes indicate the beginning of a
data block. If not, search for the next data block, up to
_search_num times.
"""
id1 = list(self.f.read_ui8(2))
search_cnt = 0
fd = self.f
if self._debug_level > 3:
print(' -->In search_buffer...')
while (search_cnt < self._search_num and
((id1[0] != 127 or id1[1] != 127) or
not self.checkheader())):
search_cnt += 1
nextbyte = fd.read_ui8(1)
id1[1] = id1[0]
id1[0] = nextbyte
if search_cnt == self._search_num:
raise Exception(
'Searched {} entries... Bad data encountered. -> {}'
.format(search_cnt, id1))
elif search_cnt > 0:
if self._debug_level > 0:
print(' WARNING: Searched {} bytes to find next '
'valid ensemble start [{:x}, {:x}]'.format(search_cnt,
*id1))
def checkheader(self,):
if self._debug_level > 1:
print(" ###In checkheader.")
fd = self.f
valid = 0
# print(self.f.pos)
numbytes = fd.read_i16(1)
if numbytes > 0:
fd.seek(numbytes - 2, 1)
cfgid = fd.read_ui8(2)
if len(cfgid) == 2:
fd.seek(-numbytes - 2, 1)
if cfgid[0] == 127 and cfgid[1] in [127, 121]:
if cfgid[1] == 121 and self._debug7f79 is None:
self._debug7f79 = True
valid = 1
else:
fd.seek(-2, 1)
if self._debug_level > 1:
print(" ###Leaving checkheader.")
return valid
def read_hdrseg(self,):
fd = self.f
hdr = self.hdr
hdr['nbyte'] = fd.read_i16(1)
if self._debug_level > 2:
print(fd.tell())
fd.seek(1, 1)
ndat = fd.read_i8(1)
hdr['dat_offsets'] = fd.read_i16(ndat)
self._nbyte = 4 + ndat * 2
def print_progress(self,):
self.progress = self.f.tell()
if self._debug_level > 1:
print(' pos %0.0fmb/%0.0fmb\n' %
(self.f.tell() / 1048576., self._filesize / 1048576.))
if (self.f.tell() - self.progress) < 1048576:
return
def print_pos(self, byte_offset=-1):
"""Print the position in the file, used for debugging.
"""
if self._debug_level > 3:
if hasattr(self, 'ensemble'):
k = self.ensemble.k
else:
k = 0
print(' pos: %d, pos_: %d, nbyte: %d, k: %d, byte_offset: %d' %
(self.f.tell(), self._pos, self._nbyte, k, byte_offset))
def check_offset(self, offset, readbytes):
fd = self.f
if offset != 4 and self._fixoffset == 0:
if self._debug_level >= 1:
print('\n ********************************************\n')
if fd.tell() == self._filesize:
print(' EOF reached unexpectedly - discarding this last ensemble\n')
else:
print(" Adjust location by {:d} (readbytes={:d},hdr['nbyte']={:d}\n"
.format(offset, readbytes, self.hdr['nbyte']))
print("""
NOTE - If this appears at the beginning of the file, it may be
a dolfyn problem. Please report this message, with details here:
https://github.com/lkilcher/dolfyn/issues/8
- If this appears at the end of the file it means
The file is corrupted and only a partial record
has been read\n
""")
print('\n ********************************************\n')
self._fixoffset = offset - 4
fd.seek(4 + self._fixoffset, 1)
def read_dat(self, id):
function_map = {0: (self.read_fixed, []), # 0000
128: (self.read_var, []), # 0080
256: (self.read_vel, []), # 0100
512: (self.read_corr, []), # 0200
768: (self.read_amp, []), # 0300
1024: (self.read_prcnt_gd, []), # 0400
1280: (self.read_status, []), # 0500
1536: (self.read_bottom, []), # 0600
8192: (self.read_vmdas, []), # 2000
8226: (self.read_winriver2, []), # 2022
8448: (self.read_winriver, [38]), # 2100
8449: (self.read_winriver, [97]), # 2101
8450: (self.read_winriver, [45]), # 2102
8451: (self.read_winriver, [60]), # 2103
8452: (self.read_winriver, [38]), # 2104
# Loading of these data is currently not implemented:
1793: (self.skip_Ncol, [4]), # 0701 number of pings
1794: (self.skip_Ncol, [4]), # 0702 sum of squared vel
1795: (self.skip_Ncol, [4]), # 0703 sum of velocities
2560: (self.skip_Ncol, []), # 0A00 Beam 5 velocity
# 0301 Beam 5 Number of good pings
769: (self.skip_Ncol, []),
# 0302 Beam 5 Sum of squared velocities
770: (self.skip_Ncol, []),
# 0303 Beam 5 Sum of velocities
771: (self.skip_Ncol, []),
# 020C Ambient sound profile
524: (self.skip_Nbyte, [4]),
12288: (self.skip_Nbyte, [32]),
# 3000 Fixed attitude data format for OS-ADCPs
}
# Call the correct function:
if id in function_map:
if self._debug_level >= 2:
print(' Reading code {}...'.format(hex(id)), end='')
retval = function_map.get(id)[0](*function_map[id][1])
if retval:
return retval
if self._debug_level >= 2:
print(' success!')
else:
self.read_nocode(id)
def read_fixed(self,):
if hasattr(self, 'configsize'):
self.f.seek(self.configsize, 1)
self._nbyte = self.configsize
else:
self.read_cfgseg()
if self._debug_level >= 1:
print(self._pos)
self._nbyte += 2
def read_cfgseg(self,):
cfgstart = self.f.tell()
cfg = self.cfg
fd = self.f
tmp = fd.read_ui8(5)
prog_ver0 = tmp[0]
cfg['prog_ver'] = tmp[0] + tmp[1] / 100.
cfg['name'] = self._cfgnames.get(tmp[0],
'unrecognized firmware version')
config = tmp[2:4]
cfg['beam_angle'] = [15, 20, 30][(config[1] & 3)]
#cfg['numbeams'] = [4, 5][int((config[1] & 16) == 16)]
cfg['freq'] = ([75, 150, 300, 600, 1200, 2400, 38][(config[0] & 7)])
cfg['beam_pattern'] = (['concave',
'convex'][int((config[0] & 8) == 8)])
cfg['orientation'] = ['down', 'up'][int((config[0] & 128) == 128)]
#cfg['simflag'] = ['real', 'simulated'][tmp[4]]
fd.seek(1, 1)
cfg['n_beams'] = fd.read_ui8(1)
cfg['n_cells'] = fd.read_ui8(1)
cfg['pings_per_ensemble'] = fd.read_ui16(1)
cfg['cell_size'] = fd.read_ui16(1) * .01
cfg['blank'] = fd.read_ui16(1) * .01
cfg['prof_mode'] = fd.read_ui8(1)
cfg['corr_threshold'] = fd.read_ui8(1)
cfg['prof_codereps'] = fd.read_ui8(1)
cfg['min_pgood'] = fd.read_ui8(1)
cfg['evel_threshold'] = fd.read_ui16(1)
cfg['sec_between_ping_groups'] = (
np.sum(np.array(fd.read_ui8(3)) *
np.array([60., 1., .01])))
coord_sys = fd.read_ui8(1)
cfg['coord_sys'] = (['beam', 'inst',
'ship', 'earth'][((coord_sys >> 3) & 3)])
cfg['use_pitchroll'] = ['no', 'yes'][(coord_sys & 4) == 4]
cfg['use_3beam'] = ['no', 'yes'][(coord_sys & 2) == 2]
cfg['bin_mapping'] = ['no', 'yes'][(coord_sys & 1) == 1]
cfg['xducer_misalign_deg'] = fd.read_i16(1) * .01
cfg['magnetic_var_deg'] = fd.read_i16(1) * .01
cfg['sensors_src'] = np.binary_repr(fd.read_ui8(1), 8)
cfg['sensors_avail'] = np.binary_repr(fd.read_ui8(1), 8)
cfg['bin1_dist_m'] = fd.read_ui16(1) * .01
cfg['xmit_pulse'] = fd.read_ui16(1) * .01
cfg['water_ref_cells'] = list(fd.read_ui8(2)) # list for attrs
cfg['fls_target_threshold'] = fd.read_ui8(1)
fd.seek(1, 1)
cfg['xmit_lag_m'] = fd.read_ui16(1) * .01
self._nbyte = 40
self.configsize = self.f.tell() - cfgstart
def read_var(self,):
""" Read variable leader """
fd = self.f
self.ensemble.k += 1
ens = self.ensemble
k = ens.k
self.vars_read += ['number',
'rtc',
'number',
'builtin_test_fail',
'c_sound',
'depth',
'heading',
'pitch',
'roll',
'salinity',
'temp',
'min_preping_wait',
'heading_std',
'pitch_std',
'roll_std',
'adc']
ens.number[k] = fd.read_ui16(1)
ens.rtc[:, k] = fd.read_ui8(7)
ens.number[k] += 65535 * fd.read_ui8(1)
ens.builtin_test_fail[k] = fd.read_ui16(1)
ens.c_sound[k] = fd.read_ui16(1)
ens.depth[k] = fd.read_ui16(1) * 0.1
ens.heading[k] = fd.read_ui16(1) * 0.01
ens.pitch[k] = fd.read_i16(1) * 0.01
ens.roll[k] = fd.read_i16(1) * 0.01
ens.salinity[k] = fd.read_i16(1)
ens.temp[k] = fd.read_i16(1) * 0.01
ens.min_preping_wait[k] = (fd.read_ui8(
3) * np.array([60, 1, .01])).sum()
ens.heading_std[k] = fd.read_ui8(1)
ens.pitch_std[k] = fd.read_ui8(1) * 0.1
ens.roll_std[k] = fd.read_ui8(1) * 0.1
ens.adc[:, k] = fd.read_i8(8)
self._nbyte = 2 + 40
def read_vel(self,):
ens = self.ensemble
self.vars_read += ['vel']
k = ens.k
ens['vel'][:, :, k] = np.array(
self.f.read_i16(4 * self.cfg['n_cells'])
).reshape((self.cfg['n_cells'], 4)) * .001
self._nbyte = 2 + 4 * self.cfg['n_cells'] * 2
def read_corr(self,):
k = self.ensemble.k
self.vars_read += ['corr']
self.ensemble.corr[:, :, k] = np.array(
self.f.read_ui8(4 * self.cfg['n_cells'])
).reshape((self.cfg['n_cells'], 4))
self._nbyte = 2 + 4 * self.cfg['n_cells']
def read_amp(self,):
k = self.ensemble.k
self.vars_read += ['amp']
self.ensemble.amp[:, :, k] = np.array(
self.f.read_ui8(4 * self.cfg['n_cells'])
).reshape((self.cfg['n_cells'], 4))
self._nbyte = 2 + 4 * self.cfg['n_cells']
def read_prcnt_gd(self,):
self.vars_read += ['prcnt_gd']
self.ensemble.prcnt_gd[:, :, self.ensemble.k] = np.array(
self.f.read_ui8(4 * self.cfg['n_cells'])
).reshape((self.cfg['n_cells'], 4))
self._nbyte = 2 + 4 * self.cfg['n_cells']
def read_status(self,):
self.vars_read += ['status']
self.ensemble.status[:, :, self.ensemble.k] = np.array(
self.f.read_ui8(4 * self.cfg['n_cells'])
).reshape((self.cfg['n_cells'], 4))
self._nbyte = 2 + 4 * self.cfg['n_cells']
def read_bottom(self,):
self.vars_read += ['dist_bt', 'vel_bt', 'corr_bt', 'amp_bt',
'prcnt_gd_bt']
fd = self.f
ens = self.ensemble
k = ens.k
cfg = self.cfg
if self._source == 2:
self.vars_read += ['latitude_gps', 'longitude_gps']
fd.seek(2, 1)
long1 = fd.read_ui16(1)
fd.seek(6, 1)
ens.latitude_gps[k] = fd.read_i32(1) * self._cfac
if ens.latitude_gps[k] == 0:
ens.latitude_gps[k] = np.NaN
else:
fd.seek(14, 1)
ens.dist_bt[:, k] = fd.read_ui16(4) * 0.01
ens.vel_bt[:, k] = fd.read_i16(4) * 0.001
ens.corr_bt[:, k] = fd.read_ui8(4)
ens.amp_bt[:, k] = fd.read_ui8(4)
ens.prcnt_gd_bt[:, k] = fd.read_ui8(4)
if self._source == 2:
fd.seek(2, 1)
ens.longitude_gps[k] = (
long1 + 65536 * fd.read_ui16(1)) * self._cfac
if ens.longitude_gps[k] > 180:
ens.longitude_gps[k] = ens.longitude_gps[k] - 360
if ens.longitude_gps[k] == 0:
ens.longitude_gps[k] = np.NaN
fd.seek(16, 1)
qual = fd.read_ui8(1)
if qual == 0:
print(' qual==%d,%f %f' % (qual,
ens.latitude_gps[k],
ens.longitude_gps[k]))
ens.latitude_gps[k] = np.NaN
ens.longitude_gps[k] = np.NaN
fd.seek(71 - 45 - 16 - 17, 1)
self._nbyte = 2 + 68
else:
fd.seek(71 - 45, 1)
self._nbyte = 2 + 68
if cfg['prog_ver'] >= 5.3:
fd.seek(78 - 71, 1)
ens.dist_bt[:, k] = ens.dist_bt[:, k] + fd.read_ui8(4) * 655.36
self._nbyte += 11
if cfg['name'] == 'wh-adcp':
if cfg['prog_ver'] >= 16.20:
fd.seek(4, 1)
self._nbyte += 4
def read_vmdas(self,):
""" Read something from VMDAS """
fd = self.f
# The raw files produced by VMDAS contain a binary navigation data
# block.
self.cfg['sourceprog'] = 'VMDAS'
ens = self.ensemble
k = ens.k
if self._source != 1 and self._debug_level >= 1:
print(' \n***** Apparently a VMDAS file \n\n')
self._source = 1
self.vars_read += ['time_gps',
'latitude_gps',
'longitude_gps',
'etime_gps',
'elatitude_gps',
'elongitude_gps',
'flags',
'ntime', ]
utim = fd.read_ui8(4)
date = tmlib.datetime(utim[2] + utim[3] * 256, utim[1], utim[0])
# This byte is in hundredths of seconds (10s of milliseconds):
time = tmlib.timedelta(milliseconds=(int(fd.read_ui32(1) / 10)))
fd.seek(4, 1) # "PC clock offset from UTC" - clock drift in ms?
ens.time_gps[k] = tmlib.date2epoch(date + time)[0]
ens.latitude_gps[k] = fd.read_i32(1) * self._cfac
ens.longitude_gps[k] = fd.read_i32(1) * self._cfac
ens.etime_gps[k] = tmlib.date2epoch(date + tmlib.timedelta(
milliseconds=int(fd.read_ui32(1) * 10)))[0]
ens.elatitude_gps[k] = fd.read_i32(1) * self._cfac
ens.elongitude_gps[k] = fd.read_i32(1) * self._cfac
fd.seek(12, 1)
ens.flags[k] = fd.read_ui16(1)
fd.seek(6, 1)
utim = fd.read_ui8(4)
date = tmlib.datetime(utim[0] + utim[1] * 256, utim[3], utim[2])
ens.ntime[k] = tmlib.date2epoch(date + tmlib.timedelta(
milliseconds=int(fd.read_ui32(1) / 10)))[0]
fd.seek(16, 1)
self._nbyte = 2 + 76
def read_winriver2(self, ):
startpos = self.f.tell()
self._winrivprob = True
self.cfg['sourceprog'] = 'WINRIVER'
ens = self.ensemble
k = ens.k
if self._source != 3 and self._debug_level >= 1:
warnings.warn(' \n***** Apparently a WINRIVER2 file\n'
'***** WARNING: Raw NMEA data '
'handler not yet fully implemented\n\n')
self._source = 3
spid = self.f.read_ui16(1)
if spid == 104:
sz = self.f.read_ui16(1)
dtime = self.f.read_f64(1)
start_string = self.f.reads(6)
_ = self.f.reads(1)
if start_string != '$GPGGA':
if self._debug_level > 1:
warnings.warn(f'Invalid GPGGA string found in ensemble {k},'
' skipping...')
return 'FAIL'
gga_time = str(self.f.reads(9))
time = tmlib.timedelta(hours=int(gga_time[0:2]),
minutes=int(gga_time[2:4]),
seconds=int(gga_time[4:6]),
milliseconds=int(gga_time[7:])*100)
clock = self.ensemble.rtc[:, :]
if clock[0, 0] < 100:
clock[0, :] += century
ens.time_gps[k] = tmlib.date2epoch(tmlib.datetime(
*clock[:3, 0]) + time)[0]
self.f.seek(1, 1)
ens.latitude_gps[k] = self.f.read_f64(1)
tcNS = self.f.reads(1)
if tcNS == 'S':
ens.latitude_gps[k] *= -1
elif tcNS != 'N':
if self._debug_level > 1:
warnings.warn(f'Invalid GPGGA string found in ensemble {k},'
' skipping...')
return 'FAIL'
ens.longitude_gps[k] = self.f.read_f64(1)
tcEW = self.f.reads(1)
if tcEW == 'W':
ens.longitude_gps[k] *= -1
elif tcEW != 'E':
if self._debug_level > 1:
warnings.warn(f'Invalid GPGGA string found in ensemble {k},'
' skipping...')
return 'FAIL'
ucqual, n_sat = self.f.read_ui8(2)
tmp = self.f.read_float(2)
ens.hdop, ens.altitude = tmp
if self.f.reads(1) != 'M':
if self._debug_level > 1:
warnings.warn(f'Invalid GPGGA string found in ensemble {k},'
' skipping...')
return 'FAIL'
ggeoid_sep = self.f.read_float(1)
if self.f.reads(1) != 'M':
if self._debug_level > 1:
warnings.warn(f'Invalid GPGGA string found in ensemble {k},'
' skipping...')
return 'FAIL'
gage = self.f.read_float(1)
gstation_id = self.f.read_ui16(1)
# 4 unknown bytes (2 reserved+2 checksum?)
# 78 bytes for GPGGA string (including \r\n)
# 2 reserved + 2 checksum
self.vars_read += ['longitude_gps', 'latitude_gps', 'time_gps']
self._nbyte = self.f.tell() - startpos + 2
if self._debug_level >= 5:
print('')
print(sz, ens.longitude_gps[k])
def read_winriver(self, nbt):
self._winrivprob = True
self.cfg['sourceprog'] = 'WINRIVER'
if self._source not in [2, 3]:
if self._debug_level >= 1:
warnings.warn('\n ***** Apparently a WINRIVER file - '
'Raw NMEA data handler not yet implemented\n\n')
self._source = 2
startpos = self.f.tell()
sz = self.f.read_ui16(1)
tmp = self.f.reads(sz)
self._nbyte = self.f.tell() - startpos + 2
def skip_Ncol(self, n_skip=1):
self.f.seek(n_skip * self.cfg['n_cells'], 1)
self._nbyte = 2 + n_skip * self.cfg['n_cells']
def skip_Nbyte(self, n_skip):
self.f.seek(n_skip, 1)
self._nbyte = self._nbyte = 2 + n_skip
def read_nocode(self, id):
# Skipping bytes from codes 0340-30FC, commented if needed
# hxid = hex(id)
# if hxid[2:4] == '30':
# raise Exception("")
# # I want to count the number of 1s in the middle 4 bits
# # of the 2nd two bytes.
# # 60 is a 0b00111100 mask
# nflds = (bin(int(hxid[3]) & 60).count('1') +
# bin(int(hxid[4]) & 60).count('1'))
# # I want to count the number of 1s in the highest
# # 2 bits of byte 3
# # 3 is a 0b00000011 mask:
# dfac = bin(int(hxid[3], 0) & 3).count('1')
# self.skip_Nbyte(12 * nflds * dfac)
# else:
print(' Unrecognized ID code: %0.4X\n' % id)
def remove_end(self, iens):
dat = self.outd
print(' Encountered end of file. Cleaning up data.')
for nm in self.vars_read:
_setd(dat, nm, _get(dat, nm)[..., :iens])
def finalize(self, ):
"""Remove the attributes from the data that were never loaded.
"""
dat = self.outd
for nm in set(data_defs.keys()) - self.vars_read:
_pop(dat, nm)
for nm in self.cfg:
dat['attrs'][nm] = self.cfg[nm]
dat['attrs']['fs'] = (dat['attrs']['sec_between_ping_groups'] *
dat['attrs']['pings_per_ensemble']) ** (-1)
for nm in data_defs:
shp = data_defs[nm][0]
if len(shp) and shp[0] == 'nc' and _in_group(dat, nm):
_setd(dat, nm, np.swapaxes(_get(dat, nm), 0, 1))
def __exit__(self, type, value, traceback):
self.f.close()
def __enter__(self,):
return self
|
import django_filters
import netaddr
from django.core.exceptions import ValidationError
from django.db.models import Q
from netaddr.core import AddrFormatError
from dcim.models import Site, Device, Interface
from extras.filters import CustomFieldFilterSet
from tenancy.models import Tenant
from utilities.filters import NameSlugSearchFilterSet, NumericInFilter, TagFilter
from virtualization.models import VirtualMachine
from .constants import IPADDRESS_ROLE_CHOICES, IPADDRESS_STATUS_CHOICES, PREFIX_STATUS_CHOICES, VLAN_STATUS_CHOICES
from .models import Aggregate, IPAddress, Prefix, RIR, Role, Service, VLAN, VLANGroup, VRF
class VRFFilter(CustomFieldFilterSet, django_filters.FilterSet):
id__in = NumericInFilter(
field_name='id',
lookup_expr='in'
)
q = django_filters.CharFilter(
method='search',
label='Search',
)
tenant_id = django_filters.ModelMultipleChoiceFilter(
queryset=Tenant.objects.all(),
label='Tenant (ID)',
)
tenant = django_filters.ModelMultipleChoiceFilter(
field_name='tenant__slug',
queryset=Tenant.objects.all(),
to_field_name='slug',
label='Tenant (slug)',
)
tag = TagFilter()
def search(self, queryset, name, value):
if not value.strip():
return queryset
return queryset.filter(
Q(name__icontains=value) |
Q(rd__icontains=value) |
Q(description__icontains=value)
)
class Meta:
model = VRF
fields = ['name', 'rd', 'enforce_unique']
class RIRFilter(NameSlugSearchFilterSet):
id__in = NumericInFilter(
field_name='id',
lookup_expr='in'
)
class Meta:
model = RIR
fields = ['name', 'slug', 'is_private']
class AggregateFilter(CustomFieldFilterSet, django_filters.FilterSet):
id__in = NumericInFilter(
field_name='id',
lookup_expr='in'
)
q = django_filters.CharFilter(
method='search',
label='Search',
)
rir_id = django_filters.ModelMultipleChoiceFilter(
queryset=RIR.objects.all(),
label='RIR (ID)',
)
rir = django_filters.ModelMultipleChoiceFilter(
field_name='rir__slug',
queryset=RIR.objects.all(),
to_field_name='slug',
label='RIR (slug)',
)
tag = TagFilter()
class Meta:
model = Aggregate
fields = ['family', 'date_added']
def search(self, queryset, name, value):
if not value.strip():
return queryset
qs_filter = Q(description__icontains=value)
try:
prefix = str(netaddr.IPNetwork(value.strip()).cidr)
qs_filter |= Q(prefix__net_contains_or_equals=prefix)
except (AddrFormatError, ValueError):
pass
return queryset.filter(qs_filter)
class RoleFilter(NameSlugSearchFilterSet):
q = django_filters.CharFilter(
method='search',
label='Search',
)
class Meta:
model = Role
fields = ['name', 'slug']
class PrefixFilter(CustomFieldFilterSet, django_filters.FilterSet):
id__in = NumericInFilter(
field_name='id',
lookup_expr='in'
)
q = django_filters.CharFilter(
method='search',
label='Search',
)
prefix = django_filters.CharFilter(
method='filter_prefix',
label='Prefix',
)
within = django_filters.CharFilter(
method='search_within',
label='Within prefix',
)
within_include = django_filters.CharFilter(
method='search_within_include',
label='Within and including prefix',
)
contains = django_filters.CharFilter(
method='search_contains',
label='Prefixes which contain this prefix or IP',
)
mask_length = django_filters.NumberFilter(
method='filter_mask_length',
label='Mask length',
)
vrf_id = django_filters.ModelMultipleChoiceFilter(
queryset=VRF.objects.all(),
label='VRF',
)
vrf = django_filters.ModelMultipleChoiceFilter(
field_name='vrf__rd',
queryset=VRF.objects.all(),
to_field_name='rd',
label='VRF (RD)',
)
tenant_id = django_filters.ModelMultipleChoiceFilter(
queryset=Tenant.objects.all(),
label='Tenant (ID)',
)
tenant = django_filters.ModelMultipleChoiceFilter(
field_name='tenant__slug',
queryset=Tenant.objects.all(),
to_field_name='slug',
label='Tenant (slug)',
)
site_id = django_filters.ModelMultipleChoiceFilter(
queryset=Site.objects.all(),
label='Site (ID)',
)
site = django_filters.ModelMultipleChoiceFilter(
field_name='site__slug',
queryset=Site.objects.all(),
to_field_name='slug',
label='Site (slug)',
)
vlan_id = django_filters.ModelMultipleChoiceFilter(
queryset=VLAN.objects.all(),
label='VLAN (ID)',
)
vlan_vid = django_filters.NumberFilter(
field_name='vlan__vid',
label='VLAN number (1-4095)',
)
role_id = django_filters.ModelMultipleChoiceFilter(
queryset=Role.objects.all(),
label='Role (ID)',
)
role = django_filters.ModelMultipleChoiceFilter(
field_name='role__slug',
queryset=Role.objects.all(),
to_field_name='slug',
label='Role (slug)',
)
status = django_filters.MultipleChoiceFilter(
choices=PREFIX_STATUS_CHOICES,
null_value=None
)
tag = TagFilter()
class Meta:
model = Prefix
fields = ['family', 'is_pool']
def search(self, queryset, name, value):
if not value.strip():
return queryset
qs_filter = Q(description__icontains=value)
try:
prefix = str(netaddr.IPNetwork(value.strip()).cidr)
qs_filter |= Q(prefix__net_contains_or_equals=prefix)
except (AddrFormatError, ValueError):
pass
return queryset.filter(qs_filter)
def filter_prefix(self, queryset, name, value):
if not value.strip():
return queryset
try:
query = str(netaddr.IPNetwork(value).cidr)
return queryset.filter(prefix=query)
except ValidationError:
return queryset.none()
def search_within(self, queryset, name, value):
value = value.strip()
if not value:
return queryset
try:
query = str(netaddr.IPNetwork(value).cidr)
return queryset.filter(prefix__net_contained=query)
except (AddrFormatError, ValueError):
return queryset.none()
def search_within_include(self, queryset, name, value):
value = value.strip()
if not value:
return queryset
try:
query = str(netaddr.IPNetwork(value).cidr)
return queryset.filter(prefix__net_contained_or_equal=query)
except (AddrFormatError, ValueError):
return queryset.none()
def search_contains(self, queryset, name, value):
value = value.strip()
if not value:
return queryset
try:
# Searching by prefix
if '/' in value:
return queryset.filter(prefix__net_contains_or_equals=str(netaddr.IPNetwork(value).cidr))
# Searching by IP address
else:
return queryset.filter(prefix__net_contains=str(netaddr.IPAddress(value)))
except (AddrFormatError, ValueError):
return queryset.none()
def filter_mask_length(self, queryset, name, value):
if not value:
return queryset
return queryset.filter(prefix__net_mask_length=value)
class IPAddressFilter(CustomFieldFilterSet, django_filters.FilterSet):
id__in = NumericInFilter(
field_name='id',
lookup_expr='in'
)
q = django_filters.CharFilter(
method='search',
label='Search',
)
parent = django_filters.CharFilter(
method='search_by_parent',
label='Parent prefix',
)
address = django_filters.CharFilter(
method='filter_address',
label='Address',
)
mask_length = django_filters.NumberFilter(
method='filter_mask_length',
label='Mask length',
)
vrf_id = django_filters.ModelMultipleChoiceFilter(
queryset=VRF.objects.all(),
label='VRF',
)
vrf = django_filters.ModelMultipleChoiceFilter(
field_name='vrf__rd',
queryset=VRF.objects.all(),
to_field_name='rd',
label='VRF (RD)',
)
tenant_id = django_filters.ModelMultipleChoiceFilter(
queryset=Tenant.objects.all(),
label='Tenant (ID)',
)
tenant = django_filters.ModelMultipleChoiceFilter(
field_name='tenant__slug',
queryset=Tenant.objects.all(),
to_field_name='slug',
label='Tenant (slug)',
)
device = django_filters.CharFilter(
method='filter_device',
field_name='name',
label='Device',
)
device_id = django_filters.NumberFilter(
method='filter_device',
field_name='pk',
label='Device (ID)',
)
virtual_machine_id = django_filters.ModelMultipleChoiceFilter(
field_name='interface__virtual_machine',
queryset=VirtualMachine.objects.all(),
label='Virtual machine (ID)',
)
virtual_machine = django_filters.ModelMultipleChoiceFilter(
field_name='interface__virtual_machine__name',
queryset=VirtualMachine.objects.all(),
to_field_name='name',
label='Virtual machine (name)',
)
interface_id = django_filters.ModelMultipleChoiceFilter(
queryset=Interface.objects.all(),
label='Interface (ID)',
)
status = django_filters.MultipleChoiceFilter(
choices=IPADDRESS_STATUS_CHOICES,
null_value=None
)
role = django_filters.MultipleChoiceFilter(
choices=IPADDRESS_ROLE_CHOICES
)
tag = TagFilter()
class Meta:
model = IPAddress
fields = ['family']
def search(self, queryset, name, value):
if not value.strip():
return queryset
qs_filter = (
Q(description__icontains=value) |
Q(address__istartswith=value)
)
return queryset.filter(qs_filter)
def search_by_parent(self, queryset, name, value):
value = value.strip()
if not value:
return queryset
try:
query = str(netaddr.IPNetwork(value.strip()).cidr)
return queryset.filter(address__net_host_contained=query)
except (AddrFormatError, ValueError):
return queryset.none()
def filter_address(self, queryset, name, value):
if not value.strip():
return queryset
try:
# Match address and subnet mask
if '/' in value:
return queryset.filter(address=value)
return queryset.filter(address__net_host=value)
except ValidationError:
return queryset.none()
def filter_mask_length(self, queryset, name, value):
if not value:
return queryset
return queryset.filter(address__net_mask_length=value)
def filter_device(self, queryset, name, value):
try:
device = Device.objects.select_related('device_type').get(**{name: value})
vc_interface_ids = [i['id'] for i in device.vc_interfaces.values('id')]
return queryset.filter(interface_id__in=vc_interface_ids)
except Device.DoesNotExist:
return queryset.none()
class VLANGroupFilter(NameSlugSearchFilterSet):
site_id = django_filters.ModelMultipleChoiceFilter(
queryset=Site.objects.all(),
label='Site (ID)',
)
site = django_filters.ModelMultipleChoiceFilter(
field_name='site__slug',
queryset=Site.objects.all(),
to_field_name='slug',
label='Site (slug)',
)
class Meta:
model = VLANGroup
fields = ['name', 'slug']
class VLANFilter(CustomFieldFilterSet, django_filters.FilterSet):
id__in = NumericInFilter(
field_name='id',
lookup_expr='in'
)
q = django_filters.CharFilter(
method='search',
label='Search',
)
site_id = django_filters.ModelMultipleChoiceFilter(
queryset=Site.objects.all(),
label='Site (ID)',
)
site = django_filters.ModelMultipleChoiceFilter(
field_name='site__slug',
queryset=Site.objects.all(),
to_field_name='slug',
label='Site (slug)',
)
group_id = django_filters.ModelMultipleChoiceFilter(
queryset=VLANGroup.objects.all(),
label='Group (ID)',
)
group = django_filters.ModelMultipleChoiceFilter(
field_name='group__slug',
queryset=VLANGroup.objects.all(),
to_field_name='slug',
label='Group',
)
tenant_id = django_filters.ModelMultipleChoiceFilter(
queryset=Tenant.objects.all(),
label='Tenant (ID)',
)
tenant = django_filters.ModelMultipleChoiceFilter(
field_name='tenant__slug',
queryset=Tenant.objects.all(),
to_field_name='slug',
label='Tenant (slug)',
)
role_id = django_filters.ModelMultipleChoiceFilter(
queryset=Role.objects.all(),
label='Role (ID)',
)
role = django_filters.ModelMultipleChoiceFilter(
field_name='role__slug',
queryset=Role.objects.all(),
to_field_name='slug',
label='Role (slug)',
)
status = django_filters.MultipleChoiceFilter(
choices=VLAN_STATUS_CHOICES,
null_value=None
)
tag = TagFilter()
class Meta:
model = VLAN
fields = ['vid', 'name']
def search(self, queryset, name, value):
if not value.strip():
return queryset
qs_filter = Q(name__icontains=value) | Q(description__icontains=value)
try:
qs_filter |= Q(vid=int(value.strip()))
except ValueError:
pass
return queryset.filter(qs_filter)
class ServiceFilter(django_filters.FilterSet):
q = django_filters.CharFilter(
method='search',
label='Search',
)
device_id = django_filters.ModelMultipleChoiceFilter(
queryset=Device.objects.all(),
label='Device (ID)',
)
device = django_filters.ModelMultipleChoiceFilter(
field_name='device__name',
queryset=Device.objects.all(),
to_field_name='name',
label='Device (name)',
)
virtual_machine_id = django_filters.ModelMultipleChoiceFilter(
queryset=VirtualMachine.objects.all(),
label='Virtual machine (ID)',
)
virtual_machine = django_filters.ModelMultipleChoiceFilter(
field_name='virtual_machine__name',
queryset=VirtualMachine.objects.all(),
to_field_name='name',
label='Virtual machine (name)',
)
tag = TagFilter()
class Meta:
model = Service
fields = ['name', 'protocol', 'port']
def search(self, queryset, name, value):
if not value.strip():
return queryset
qs_filter = Q(name__icontains=value) | Q(description__icontains=value)
return queryset.filter(qs_filter)
|
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
from .metrics_operations import MetricsOperations
from .events_operations import EventsOperations
from .query_operations import QueryOperations
__all__ = [
'MetricsOperations',
'EventsOperations',
'QueryOperations',
]
|
__author__ = 'frederico'
from tabuleiro import tabuleiro
# Type of ship | Size | Legenda
# Aircraft carrier 5 ac
# Battleship 4 b
# Submarine 3 s
# Destroyer (or Cruiser) 3 d
# Patrol boat (or destroyer) 2 pb
barcos = {'ac': 5, 'b': 4, 's': 3, 'd': 3, 'pb': 2}
x = input('digite a quantidade de linhas: ')
y = input('digite a quantidade de colunas: ')
z = 'M'
t = tabuleiro(x, y, z)
print(t.M)
print(t.M[0])
print('tabuleiro criado com sucesso. vc tem direito a 5 navios (um de cada tipo)')
m = 0
a = []
b = []
while m < 5:
a = input('digite a coordenadas iniciais: ')
b = input('digite a coordenadas finais: ')
c = input('tipo: ')
# marcando a
a_in = int(a[0:1])
a_fi = int(a[3:4])
# marcando b
b_in = int(b[0:1])
b_fi = int(b[3:4])
t.posicao(a_in, a_fi, b_in, b_fi)
m = m + 1
# tabuleiro.m(a, b, 'x')
#
# '\n'
|
import torch
import random
import collections
import networkx as nx
from rdkit.Chem import AllChem
import numpy as np
from loader import graph_data_obj_to_nx_simple, nx_to_graph_data_obj_simple
from loader import MoleculeDataset
def get_filtered_fingerprint(smiles):
""" Get filtered PubChem fingerprint. The digits related to elements other than C,
H, O, N, S, F, Cl, and Br are discarded.
Args:
smiles (str): SMILES string.
Return:
fp (np.ndarray): The filtered PubChem fingerprint as a vector.
length (int): length of the filtered vector.
"""
from PyFingerprint.All_Fingerprint import get_fingerprint
fp = get_fingerprint(smiles, fp_type="pubchem", output="vector")
del_pos = (
[
26,
27,
28,
29,
30,
31,
32,
41,
42,
46,
47,
48,
295,
296,
298,
303,
304,
348,
354,
369,
407,
411,
415,
456,
525,
627,
]
+ list(range(49, 115))
+ list(range(263, 283))
+ list(range(288, 293))
+ list(range(310, 317))
+ list(range(318, 327))
+ list(range(327, 332))
+ list(range(424, 427))
)
fp = np.delete(fp, del_pos)
return fp
def check_same_molecules(s1, s2):
mol1 = AllChem.MolFromSmiles(s1)
mol2 = AllChem.MolFromSmiles(s2)
return AllChem.MolToInchi(mol1) == AllChem.MolToInchi(mol2)
class NegativeEdge:
def __init__(self):
"""
Randomly sample negative edges
"""
pass
def __call__(self, data):
num_nodes = data.num_nodes
num_edges = data.num_edges
edge_set = set(
[
str(data.edge_index[0, i].cpu().item())
+ ","
+ str(data.edge_index[1, i].cpu().item())
for i in range(data.edge_index.shape[1])
]
)
redandunt_sample = torch.randint(0, num_nodes, (2, 5 * num_edges))
sampled_ind = []
sampled_edge_set = set([])
for i in range(5 * num_edges):
node1 = redandunt_sample[0, i].cpu().item()
node2 = redandunt_sample[1, i].cpu().item()
edge_str = str(node1) + "," + str(node2)
if not any(
[edge_str in edge_set, edge_str in sampled_edge_set, node1 == node2]
):
sampled_edge_set.add(edge_str)
sampled_ind.append(i)
if len(sampled_ind) == num_edges / 2:
break
data.negative_edge_index = redandunt_sample[:, sampled_ind]
return data
class ExtractSubstructureContextPair:
def __init__(self, k, l1, l2):
"""
Randomly selects a node from the data object, and adds attributes
that contain the substructure that corresponds to k hop neighbours
rooted at the node, and the context substructures that corresponds to
the subgraph that is between l1 and l2 hops away from the
root node.
:param k:
:param l1:
:param l2:
"""
self.k = k
self.l1 = l1
self.l2 = l2
# for the special case of 0, addresses the quirk with
# single_source_shortest_path_length
if self.k == 0:
self.k = -1
if self.l1 == 0:
self.l1 = -1
if self.l2 == 0:
self.l2 = -1
def __call__(self, data, root_idx=None):
"""
:param data: pytorch geometric data object
:param root_idx: If None, then randomly samples an atom idx.
Otherwise sets atom idx of root (for debugging only)
:return: None. Creates new attributes in original data object:
data.center_substruct_idx
data.x_substruct
data.edge_attr_substruct
data.edge_index_substruct
data.x_context
data.edge_attr_context
data.edge_index_context
data.overlap_context_substruct_idx
"""
num_atoms = data.x.size(0)
if root_idx is None:
root_idx = random.sample(range(num_atoms), 1)[0]
G = graph_data_obj_to_nx_simple(data) # same ordering as input data obj
# Get k-hop subgraph rooted at specified atom idx
substruct_node_idxes = nx.single_source_shortest_path_length(
G, root_idx, self.k
).keys()
if len(substruct_node_idxes) > 0:
substruct_G = G.subgraph(substruct_node_idxes)
substruct_G, substruct_node_map = reset_idxes(substruct_G) # need
# to reset node idx to 0 -> num_nodes - 1, otherwise data obj does not
# make sense, since the node indices in data obj must start at 0
substruct_data = nx_to_graph_data_obj_simple(substruct_G)
data.x_substruct = substruct_data.x
data.edge_attr_substruct = substruct_data.edge_attr
data.edge_index_substruct = substruct_data.edge_index
data.center_substruct_idx = torch.tensor(
[substruct_node_map[root_idx]]
) # need
# to convert center idx from original graph node ordering to the
# new substruct node ordering
# Get subgraphs that is between l1 and l2 hops away from the root node
l1_node_idxes = nx.single_source_shortest_path_length(
G, root_idx, self.l1
).keys()
l2_node_idxes = nx.single_source_shortest_path_length(
G, root_idx, self.l2
).keys()
context_node_idxes = set(l1_node_idxes).symmetric_difference(set(l2_node_idxes))
if len(context_node_idxes) > 0:
context_G = G.subgraph(context_node_idxes)
context_G, context_node_map = reset_idxes(context_G) # need to
# reset node idx to 0 -> num_nodes - 1, otherwise data obj does not
# make sense, since the node indices in data obj must start at 0
context_data = nx_to_graph_data_obj_simple(context_G)
data.x_context = context_data.x
data.edge_attr_context = context_data.edge_attr
data.edge_index_context = context_data.edge_index
# Get indices of overlapping nodes between substruct and context,
# WRT context ordering
context_substruct_overlap_idxes = list(
set(context_node_idxes).intersection(set(substruct_node_idxes))
)
if len(context_substruct_overlap_idxes) > 0:
context_substruct_overlap_idxes_reorder = [
context_node_map[old_idx] for old_idx in context_substruct_overlap_idxes
]
# need to convert the overlap node idxes, which is from the
# original graph node ordering to the new context node ordering
data.overlap_context_substruct_idx = torch.tensor(
context_substruct_overlap_idxes_reorder
)
return data
# ### For debugging ###
# if len(substruct_node_idxes) > 0:
# substruct_mol = graph_data_obj_to_mol_simple(data.x_substruct,
# data.edge_index_substruct,
# data.edge_attr_substruct)
# print(AllChem.MolToSmiles(substruct_mol))
# if len(context_node_idxes) > 0:
# context_mol = graph_data_obj_to_mol_simple(data.x_context,
# data.edge_index_context,
# data.edge_attr_context)
# print(AllChem.MolToSmiles(context_mol))
#
# print(list(context_node_idxes))
# print(list(substruct_node_idxes))
# print(context_substruct_overlap_idxes)
# ### End debugging ###
def __repr__(self):
return "{}(k={},l1={}, l2={})".format(
self.__class__.__name__, self.k, self.l1, self.l2
)
def reset_idxes(G):
"""
Resets node indices such that they are numbered from 0 to num_nodes - 1
:param G:
:return: copy of G with relabelled node indices, mapping
"""
mapping = {}
for new_idx, old_idx in enumerate(G.nodes()):
mapping[old_idx] = new_idx
new_G = nx.relabel_nodes(G, mapping, copy=True)
return new_G, mapping
# TODO(Bowen): more unittests
class MaskAtom:
def __init__(self, num_atom_features, num_edge_type, mask_rate, mask_edge=True):
"""
Randomly masks an atom, and optionally masks edges connecting to it.
The mask atom type index is num_possible_atom_type
The mask edge type index in num_possible_edge_type
:param num_atom_type:
:param num_edge_type:
:param mask_rate: % of atoms to be masked
:param mask_edge: If True, also mask the edges that connect to the
masked atoms
"""
self.num_atom_features = num_atom_features
self.num_edge_type = num_edge_type
self.mask_rate = mask_rate
self.mask_edge = mask_edge
def __call__(self, data, masked_atom_indices=None):
"""
:param data: pytorch geometric data object. Assume that the edge
ordering is the default pytorch geometric ordering, where the two
directions of a single edge occur in pairs.
Eg. data.edge_index = tensor([[0, 1, 1, 2, 2, 3],
[1, 0, 2, 1, 3, 2]])
:param masked_atom_indices: If None, then randomly samples num_atoms
* mask rate number of atom indices
Otherwise a list of atom idx that sets the atoms to be masked (for
debugging only)
:return: None, Creates new attributes in original data object:
data.mask_node_idx
data.mask_node_label
data.mask_edge_idx
data.mask_edge_label
"""
if masked_atom_indices is None:
# sample x distinct atoms to be masked, based on mask rate. But
# will sample at least 1 atom
num_atoms = data.x.size()[0]
sample_size = int(round(num_atoms * self.mask_rate))
if sample_size == 0:
sample_size = 1
masked_atom_indices = random.sample(range(num_atoms), sample_size)
# create mask node label by copying atom feature of mask atom
mask_node_labels_list = []
for atom_idx in masked_atom_indices:
mask_node_labels_list.append(data.x[atom_idx].view(1, -1))
data.mask_node_label = torch.cat(mask_node_labels_list, dim=0)
data.masked_atom_indices = torch.tensor(masked_atom_indices)
# modify the original node feature of the masked node
for atom_idx in masked_atom_indices:
data.x[atom_idx] = torch.tensor([0] * self.num_atom_features)
if self.mask_edge:
# create mask edge labels by copying edge features of edges that are bonded
# to mask atoms
connected_edge_indices = []
for bond_idx, (u, v) in enumerate(data.edge_index.cpu().numpy().T):
for atom_idx in masked_atom_indices:
if (
atom_idx in set((u, v))
and bond_idx not in connected_edge_indices
):
connected_edge_indices.append(bond_idx)
if len(connected_edge_indices) > 0:
# create mask edge labels by copying bond features of the bonds
# connected to the mask atoms
mask_edge_labels_list = []
for bond_idx in connected_edge_indices[::2]: # because the
# edge ordering is such that two directions of a single
# edge occur in pairs, so to get the unique undirected
# edge indices, we take every 2nd edge index from list
mask_edge_labels_list.append(data.edge_attr[bond_idx].view(1, -1))
data.mask_edge_label = torch.cat(mask_edge_labels_list, dim=0)
# modify the original bond features of the bonds connected to the mask
# atoms
for bond_idx in connected_edge_indices:
data.edge_attr[bond_idx] = torch.tensor([self.num_edge_type, 0])
data.connected_edge_indices = torch.tensor(connected_edge_indices[::2])
else:
data.mask_edge_label = torch.empty((0, 2)).to(torch.int64)
data.connected_edge_indices = torch.tensor(connected_edge_indices).to(
torch.int64
)
# data.x = data.x[2:]
return data
def __repr__(self):
reprs = "{}(num_atom_features={}, num_edge_type={}, mask_rate={}, mask_edge={})"
return reprs.format(
self.__class__.__name__,
self.num_atom_features,
self.num_edge_type,
self.mask_rate,
self.mask_edge,
)
class ONEHOT_ContextPair(object):
ONEHOTENCODING_CODEBOOKS = {
"atom_type": list(range(119)),
"degree": list(range(11)),
"formal_charge": list(range(11)),
"hybridization_type": list(range(7)),
"aromatic": [0, 1],
"chirality_type": [0, 1, 2, 3],
}
def __init__(self, dataset, k, l1, l2):
self.dataset = dataset
self.k = k
self.l1 = l1
self.l2 = l2
# for the special case of 0, addresses the quirk with
# single_source_shortest_path_length
if self.k == 0:
self.k = -1
if self.l1 == 0:
self.l1 = -1
if self.l2 == 0:
self.l2 = -1
self.FEATURE_NAMES = [
"atom_type",
"degree",
"formal_charge",
"hybridization_type",
"aromatic",
"chirality_type",
]
self.ONEHOTENCODING = [0, 1, 2, 3, 4, 5]
def get_CODEBOOKS(self):
if self.ONEHOTENCODING_CODEBOOKS:
# print("ONEHOTENCODING_CODEBOOKS is available already, do not need to
# regenerate ONEHOTENCODING_CODEBOOKS")
# print(ONEHOTENCODING_CODEBOOKS)
return
# print(f"generating ONEHOTENCODING_CODEBOOKS......")
features_all = [data.x.numpy() for data in self.dataset]
features = np.vstack(features_all)
node_attributes_cnt = {}
for j, col in enumerate(zip(*features)):
node_attributes_cnt[self.FEATURE_NAMES[j]] = collections.Counter(col)
self.ONEHOTENCODING_CODEBOOKS.update(
{
feature_name: sorted(node_attributes_cnt[feature_name].keys())
for feature_name in self.FEATURE_NAMES
}
)
def get_onehot_features(self, features):
feature_one_hot = []
# print(f'input features{features}')
for row in features.tolist():
this_row = []
for j, feature_val_before_onehot in enumerate(row):
onehot_code = self.ONEHOTENCODING_CODEBOOKS[self.FEATURE_NAMES[j]]
onehot_val = [0.0] * len(onehot_code)
assert feature_val_before_onehot in onehot_code
onehot_val[onehot_code.index(feature_val_before_onehot)] = 1.0
this_row += onehot_val
feature_one_hot.append(this_row)
return torch.Tensor(feature_one_hot)
def __call__(self, data, root_idx=None):
self.get_CODEBOOKS()
# print(f'before onehot data {data.x.numpy()}')
num_atoms = data.x.size(0)
if root_idx is None:
root_idx = random.sample(range(num_atoms), 1)[0]
G = graph_data_obj_to_nx_simple(data) # same ordering as input data obj
# Get k-hop subgraph rooted at specified atom idx
substruct_node_idxes = nx.single_source_shortest_path_length(
G, root_idx, self.k
).keys()
if len(substruct_node_idxes) > 0:
substruct_G = G.subgraph(substruct_node_idxes)
substruct_G, substruct_node_map = reset_idxes(substruct_G) # need
# to reset node idx to 0 -> num_nodes - 1, otherwise data obj does not
# make sense, since the node indices in data obj must start at 0
substruct_data = nx_to_graph_data_obj_simple(substruct_G)
data.x_substruct = substruct_data.x
data.edge_attr_substruct = substruct_data.edge_attr
data.edge_index_substruct = substruct_data.edge_index
data.center_substruct_idx = torch.tensor(
[substruct_node_map[root_idx]]
) # need
# to convert center idx from original graph node ordering to the
# new substruct node ordering
data.x_substruct = self.get_onehot_features(data.x_substruct.numpy())
# Get subgraphs that is between l1 and l2 hops away from the root node
l1_node_idxes = nx.single_source_shortest_path_length(
G, root_idx, self.l1
).keys()
l2_node_idxes = nx.single_source_shortest_path_length(
G, root_idx, self.l2
).keys()
context_node_idxes = set(l1_node_idxes).symmetric_difference(set(l2_node_idxes))
if len(context_node_idxes) > 0:
context_G = G.subgraph(context_node_idxes)
context_G, context_node_map = reset_idxes(context_G) # need to
# reset node idx to 0 -> num_nodes - 1, otherwise data obj does not
# make sense, since the node indices in data obj must start at 0
context_data = nx_to_graph_data_obj_simple(context_G)
data.x_context = context_data.x
data.edge_attr_context = context_data.edge_attr
data.edge_index_context = context_data.edge_index
data.x_context = self.get_onehot_features(data.x_context.numpy())
# Get indices of overlapping nodes between substruct and context,
# WRT context ordering
context_substruct_overlap_idxes = list(
set(context_node_idxes).intersection(set(substruct_node_idxes))
)
if len(context_substruct_overlap_idxes) > 0:
context_substruct_overlap_idxes_reorder = [
context_node_map[old_idx] for old_idx in context_substruct_overlap_idxes
]
# need to convert the overlap node idxes, which is from the
# original graph node ordering to the new context node ordering
data.overlap_context_substruct_idx = torch.tensor(
context_substruct_overlap_idxes_reorder
)
# print(f'after onehot data{onehot_features.size()}')
# print()
# print ( data )
return data
def __repr__(self):
return "{}(k={},l1={}, l2={})".format(
self.__class__.__name__, self.k, self.l1, self.l2
)
# def __repr__(self):
# return f'{self.__class__.__name__}'
class ONEHOT_ENCODING(object):
ONEHOTENCODING_CODEBOOKS = {
"atom_type": list(range(119)),
"degree": list(range(11)),
"formal_charge": list(range(11)),
"hybridization_type": list(range(7)),
"aromatic": [0, 1],
"chirality_type": [0, 1, 2, 3],
}
def __init__(self, dataset):
self.dataset = dataset
self.FEATURE_NAMES = [
"atom_type",
"degree",
"formal_charge",
"hybridization_type",
"aromatic",
"chirality_type",
]
self.ONEHOTENCODING = [0, 1, 2, 3, 4, 5]
def get_CODEBOOKS(self):
if self.ONEHOTENCODING_CODEBOOKS:
# print("ONEHOTENCODING_CODEBOOKS is available already, do not need to
# regenerate ONEHOTENCODING_CODEBOOKS")
# print(ONEHOTENCODING_CODEBOOKS)
return
features_all = [data.x.numpy() for data in self.dataset]
features = np.vstack(features_all)
node_attributes_cnt = {}
for j, col in enumerate(zip(*features)):
node_attributes_cnt[self.FEATURE_NAMES[j]] = collections.Counter(col)
ONEHOTENCODING_CODEBOOKS.update({
feature_name: sorted(node_attributes_cnt[feature_name].keys())
for feature_name in self.FEATURE_NAMES} )
#print(f"generating ONEHOTENCODING_CODEBOOKS......")
def get_onehot_features(self,features):
feature_one_hot = []
#print(f'input features{features}')
for row in features.tolist():
this_row = []
for j, feature_val_before_onehot in enumerate(row):
onehot_code = self.ONEHOTENCODING_CODEBOOKS[self.FEATURE_NAMES[j]]
onehot_val = [0.0] * len(onehot_code)
assert feature_val_before_onehot in onehot_code
onehot_val[onehot_code.index(feature_val_before_onehot)] = 1.0
this_row += onehot_val
feature_one_hot.append(this_row)
return torch.Tensor(feature_one_hot)
def __call__(self, data):
self.get_CODEBOOKS()
#print(f'before onehot data {data.x.numpy()}')
onehot_features = self.get_onehot_features(data.x.numpy())
#print(f'after onehot data{onehot_features.size()}')
data.x = onehot_features
#print()
#print ( data )
return data
def __repr__(self):
return f'{self.__class__.__name__}'
if __name__ == "__main__":
transform = NegativeEdge()
dataset = MoleculeDataset("dataset/tox21", dataset="tox21")
transform(dataset[0])
"""
# TODO(Bowen): more unit tests
# test ExtractSubstructureContextPair
smiles = 'C#Cc1c(O)c(Cl)cc(/C=C/N)c1S'
m = AllChem.MolFromSmiles(smiles)
data = mol_to_graph_data_obj_simple(m)
root_idx = 13
# 0 hops: no substructure or context. We just test the absence of x attr
transform = ExtractSubstructureContextPair(0, 0, 0)
transform(data, root_idx)
assert not hasattr(data, 'x_substruct')
assert not hasattr(data, 'x_context')
# k > n_nodes, l1 = 0 and l2 > n_nodes: substructure and context same as
# molecule
data = mol_to_graph_data_obj_simple(m)
transform = ExtractSubstructureContextPair(100000, 0, 100000)
transform(data, root_idx)
substruct_mol = graph_data_obj_to_mol_simple(data.x_substruct,
data.edge_index_substruct,
data.edge_attr_substruct)
context_mol = graph_data_obj_to_mol_simple(data.x_context,
data.edge_index_context,
data.edge_attr_context)
assert check_same_molecules(AllChem.MolToSmiles(substruct_mol),
AllChem.MolToSmiles(context_mol))
transform = ExtractSubstructureContextPair(1, 1, 10000)
transform(data, root_idx)
# increase k from 0, and increase l1 from 1 while keeping l2 > n_nodes: the
# total number of atoms should be n_atoms
for i in range(len(m.GetAtoms())):
data = mol_to_graph_data_obj_simple(m)
print('i: {}'.format(i))
transform = ExtractSubstructureContextPair(i, i, 100000)
transform(data, root_idx)
if hasattr(data, 'x_substruct'):
n_substruct_atoms = data.x_substruct.size()[0]
else:
n_substruct_atoms = 0
print('n_substruct_atoms: {}'.format(n_substruct_atoms))
if hasattr(data, 'x_context'):
n_context_atoms = data.x_context.size()[0]
else:
n_context_atoms = 0
print('n_context_atoms: {}'.format(n_context_atoms))
assert n_substruct_atoms + n_context_atoms == len(m.GetAtoms())
# l1 < k and l2 >= k, so an overlap exists between context and substruct
data = mol_to_graph_data_obj_simple(m)
transform = ExtractSubstructureContextPair(2, 1, 3)
transform(data, root_idx)
assert hasattr(data, 'center_substruct_idx')
# check correct overlap atoms between context and substruct
# m = AllChem.MolFromSmiles('COC1=CC2=C(NC(=N2)[S@@](=O)CC2=NC=C(C)C(OC)=C2C)C=C1')
# data = mol_to_graph_data_obj_simple(m)
# root_idx = 9
# k = 1
# l1 = 1
# l2 = 2
# transform = ExtractSubstructureContextPaidata =
# mol_to_graph_data_obj_simple(m)r(k, l1, l2)
# transform(data, root_idx)
pass
# TODO(Bowen): more unit tests
# test MaskAtom
from loader import mol_to_graph_data_obj_simple, \
graph_data_obj_to_mol_simple
smiles = 'C#Cc1c(O)c(Cl)cc(/C=C/N)c1S'
m = AllChem.MolFromSmiles(smiles)
original_data = mol_to_graph_data_obj_simple(m)
num_atom_type = 118
num_edge_type = 5
# manually specify masked atom indices, don't mask edge
masked_atom_indices = [13, 12]
data = mol_to_graph_data_obj_simple(m)
transform = MaskAtom(num_atom_type, num_edge_type, 0.1, mask_edge=False)
transform(data, masked_atom_indices)
assert data.mask_node_label.size() == torch.Size(
(len(masked_atom_indices), 2))
assert not hasattr(data, 'mask_edge_label')
# check that the correct rows in x have been modified to be mask atom type
assert (data.x[masked_atom_indices] == torch.tensor(([num_atom_type,
0]))).all()
assert (data.mask_node_label == original_data.x[masked_atom_indices]).all()
# manually specify masked atom indices, mask edge
masked_atom_indices = [13, 12]
data = mol_to_graph_data_obj_simple(m)
transform = MaskAtom(num_atom_type, num_edge_type, 0.1, mask_edge=True)
transform(data, masked_atom_indices)
assert data.mask_node_label.size() == torch.Size(
(len(masked_atom_indices), 2))
# check that the correct rows in x have been modified to be mask atom type
assert (data.x[masked_atom_indices] == torch.tensor(([num_atom_type,
0]))).all()
assert (data.mask_node_label == original_data.x[masked_atom_indices]).all()
# check that the correct rows in edge_attr have been modified to be mask edge
# type, and the mask_edge_label are correct
rdkit_bonds = []
for atom_idx in masked_atom_indices:
bond_indices = list(AllChem.FindAtomEnvironmentOfRadiusN(m, radius=1,
rootedAtAtom=atom_idx))
for bond_idx in bond_indices:
rdkit_bonds.append(
(m.GetBonds()[bond_idx].GetBeginAtomIdx(), m.GetBonds()[
bond_idx].GetEndAtomIdx()))
rdkit_bonds.append(
(m.GetBonds()[bond_idx].GetEndAtomIdx(), m.GetBonds()[
bond_idx].GetBeginAtomIdx()))
rdkit_bonds = set(rdkit_bonds)
connected_edge_indices = []
for i in range(data.edge_index.size()[1]):
if tuple(data.edge_index.numpy().T[i].tolist()) in rdkit_bonds:
connected_edge_indices.append(i)
assert (data.edge_attr[connected_edge_indices] ==
torch.tensor(([num_edge_type, 0]))).all()
assert (data.mask_edge_label == original_data.edge_attr[
connected_edge_indices[::2]]).all() # data.mask_edge_label contains
# the unique edges (ignoring direction). The data obj has edge ordering
# such that two directions of a single edge occur in pairs, so to get the
# unique undirected edge indices, we take every 2nd edge index from list
"""
|
from __future__ import absolute_import
import posixpath
from rest_framework import serializers
from rest_framework.exceptions import PermissionDenied
from rest_framework.response import Response
from sentry.api.base import DocSection
from sentry.api.bases.organization import OrganizationReleasesBaseEndpoint
from sentry.api.exceptions import ResourceDoesNotExist
from sentry.api.serializers import serialize
from sentry.models import Release, ReleaseFile
try:
from django.http import (
CompatibleStreamingHttpResponse as StreamingHttpResponse
)
except ImportError:
from django.http import StreamingHttpResponse
class ReleaseFileSerializer(serializers.Serializer):
name = serializers.CharField(max_length=200, required=True)
class OrganizationReleaseFileDetailsEndpoint(OrganizationReleasesBaseEndpoint):
doc_section = DocSection.RELEASES
def download(self, releasefile):
file = releasefile.file
fp = file.getfile()
response = StreamingHttpResponse(
iter(lambda: fp.read(4096), b''),
content_type=file.headers.get('content-type', 'application/octet-stream'),
)
response['Content-Length'] = file.size
response['Content-Disposition'] = 'attachment; filename="%s"' % posixpath.basename(" ".join(releasefile.name.split()))
return response
def get(self, request, organization, version, file_id):
"""
Retrieve an Organization Release's File
```````````````````````````````````````
Return details on an individual file within a release. This does
not actually return the contents of the file, just the associated
metadata.
:pparam string organization_slug: the slug of the organization the
release belongs to.
:pparam string version: the version identifier of the release.
:pparam string file_id: the ID of the file to retrieve.
:auth: required
"""
try:
release = Release.objects.get(
organization_id=organization.id,
version=version,
)
except Release.DoesNotExist:
raise ResourceDoesNotExist
if not self.has_release_permission(request, organization, release):
raise PermissionDenied
try:
releasefile = ReleaseFile.objects.get(
release=release,
id=file_id,
)
except ReleaseFile.DoesNotExist:
raise ResourceDoesNotExist
download_requested = request.GET.get('download') is not None
if download_requested and (
request.access.has_scope('project:write')):
return self.download(releasefile)
elif download_requested:
return Response(status=403)
return Response(serialize(releasefile, request.user))
def put(self, request, organization, version, file_id):
"""
Update an Organization Release's File
`````````````````````````````````````
Update metadata of an existing file. Currently only the name of
the file can be changed.
:pparam string organization_slug: the slug of the organization the
release belongs to.
:pparam string version: the version identifier of the release.
:pparam string file_id: the ID of the file to update.
:param string name: the new name of the file.
:param string dist: the name of the dist.
:auth: required
"""
try:
release = Release.objects.get(
organization_id=organization.id,
version=version,
)
except Release.DoesNotExist:
raise ResourceDoesNotExist
if not self.has_release_permission(request, organization, release):
raise PermissionDenied
try:
releasefile = ReleaseFile.objects.get(
release=release,
id=file_id,
)
except ReleaseFile.DoesNotExist:
raise ResourceDoesNotExist
serializer = ReleaseFileSerializer(data=request.DATA)
if not serializer.is_valid():
return Response(serializer.errors, status=400)
result = serializer.object
releasefile.update(
name=result['name'],
)
return Response(serialize(releasefile, request.user))
def delete(self, request, organization, version, file_id):
"""
Delete an Organization Release's File
`````````````````````````````````````
Permanently remove a file from a release.
This will also remove the physical file from storage.
:pparam string organization_slug: the slug of the organization the
release belongs to.
:pparam string version: the version identifier of the release.
:pparam string file_id: the ID of the file to delete.
:auth: required
"""
try:
release = Release.objects.get(
organization_id=organization.id,
version=version,
)
except Release.DoesNotExist:
raise ResourceDoesNotExist
if not self.has_release_permission(request, organization, release):
raise PermissionDenied
try:
releasefile = ReleaseFile.objects.get(
release=release,
id=file_id,
)
except ReleaseFile.DoesNotExist:
raise ResourceDoesNotExist
file = releasefile.file
# TODO(dcramer): this doesnt handle a failure from file.deletefile() to
# the actual deletion of the db row
releasefile.delete()
file.delete()
return Response(status=204)
|
import pytest
from stone_burner.config import parse_project_config
from stone_burner.config import TFAttributes
from stone_burner.config import get_component_paths
from .utils import SAMPLE_CONFIG
def test_parse_project_config_1():
e = {
'c1': {'component_type': 'c1', 'validate': {}},
'c2': {'component_type': 'c2', 'validate': {}},
'c3': {'component_type': 'c3', 'validate': {'check-variables': False}},
'mg1': {'component_type': 'my-generic-component', 'validate': {}},
'mg2': {'component_type': 'my-generic-component', 'validate': {}},
'mg3': {'component_type': 'my-generic-component', 'validate': {'check-variables': False}},
'oc1': {'component_type': 'other-generic-component', 'validate': {}},
'oc2': {'component_type': 'other-generic-component', 'validate': {}},
}
r = parse_project_config(SAMPLE_CONFIG, 'p1')
assert r == e
def test_parse_project_config_2():
with pytest.raises(Exception):
parse_project_config(SAMPLE_CONFIG, 'p2')
def check_variables(*args, **kwargs):
component_config = kwargs['component_config']
validate_config = component_config.get('validate', {})
check_variables = validate_config.get('check-variables', True)
return ['true'] if check_variables else ['false']
def test_TFAttributes_check_variables_1():
tf_attrs = TFAttributes()
r = tf_attrs.check_variables(
component_config={'component_type': 'c3', 'validate': {'check-variables': True}})
assert r == ['true']
def test_TFAttributes_check_variables_2():
tf_attrs = TFAttributes()
r = tf_attrs.check_variables(
component_config={'component_type': 'c3', 'validate': {'check-variables': False}})
assert r == ['false']
def test_TFAttributes_check_variables_3():
tf_attrs = TFAttributes()
r1 = tf_attrs.check_variables(
component_config={'component_type': 'c3'})
assert r1 == ['true']
r2 = tf_attrs.check_variables(
component_config={'component_type': 'c3', 'validate': {}})
assert r2 == ['true']
r3 = tf_attrs.check_variables(
component_config={'component_type': 'c3', 'validate': None})
assert r3 == ['true']
def test_get_component_paths_1():
r = get_component_paths(
'p1', 'c1', {'component_type': 'gc'}, 'e1', '/tmp/states', '/tmp/projects', '/tmp/vars')
e = {
'config_dir': '/tmp/projects/p1/gc',
'vars_file': '/tmp/vars/e1/p1/c1.tfvars',
'state_dir': '/tmp/states/e1/p1/c1',
}
assert r == e
|
import os,sys,inspect
current_dir = os.path.dirname(os.path.abspath(inspect.getfile(inspect.currentframe())))
parent_dir = os.path.dirname(current_dir)
sys.path.insert(0, parent_dir)
import sum
def test_sum():
assert sum.sum(3, 4) == 7
|
# -*- coding: utf-8 -*-
#
# Copyright @ 0x6c78.
#
# 16-10-20 下午1:27 0x6c78@gmail.com
#
# Distributed under terms of the MIT License
from operator import mul
from itertools import combinations
class Score(object):
def __init__(self):
"""
张峰实验室通过实验获得的每个位置错配的特异性,具体参考网页:
http://crispr.mit.edu/about
"""
self.m = (0, 0, 0.014, 0, 0, 0.395, 0.317, 0, 0.389, 0.079, 0.445,
0.508, 0.613, 0.851, 0.732, 0.828, 0.615, 0.804, 0.685, 0.583)
def _t1(self, locs):
"""
:param locs: 失配的位置
:return: 公式第一部分的值
"""
return reduce(mul, [1-self.m[loc] for loc in locs])
@staticmethod
def _t2(locs):
"""
:param locs: 失配的位置, 由于没有失配就没有mean pairwise distance,故locs的length至少为1
:return: 公式第二部分的值
"""
if len(locs) == 1:
return 1.000
else:
locs = sorted(locs)
length = len(locs)
mpd = (locs[-1] - locs[0]) / (length - 1) # mean pairwise distance
return 1 / (((19 - mpd) / 19) * 4 + 1)
@staticmethod
def _t3(m):
"""
:param m: 失配碱基的个数
:return: 公式第三部分的值
"""
return 1 / (m ** 2)
def get(self, locs):
if len(locs) == 0:
return 100.000
elif len(locs) == 1:
return round(100 * self._t1(locs), 3)
else:
return round(100 * self._t1(locs) * self._t2(locs) * self._t3(len(locs)), 3)
@classmethod
def to_dict(cls):
"""
将所有可能的错配结果对应的得分先计算好,放到一个字典里
加速得分的计算
:return: 一个字典,字典的键是错配的位置由下划线分割的字符串,值是得分
"""
mm2score = {}
pos_list = range(20)
score = cls()
for mm_cnt in xrange(5):
for mm_pos_list in combinations(pos_list, mm_cnt):
mm2score['_'.join(str(_) for _ in mm_pos_list)] = score.get(mm_pos_list)
return mm2score
|
def cast_kwargs(kwargs):
kwargs_copy = kwargs.copy()
for arg, value in kwargs_copy.items():
if isinstance(value, dict):
kwargs_copy[arg] = cast_kwargs(value)
else:
kwargs_copy[arg] = cast_string(kwargs_copy[arg])
return kwargs_copy
def cast_string(s):
if s == "True":
return True
elif s == "False":
return False
elif s == "None":
return None
elif is_int(s):
return int(s)
elif is_float(s):
return float(s)
else:
return s
def is_int(s):
try:
int(s)
return True
except (ValueError, TypeError):
return False
def is_float(s):
try:
float(s)
return True
except (ValueError, TypeError):
return False
|
from .analysis import *
from .plot import *
from .generate import *
from .utils import *
__author__ = 'Konstantinos Kavvadias'
__license__ = 'BSD-3-Clause'
__version__ = "0.1.dev11"
|
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from typing import TYPE_CHECKING
import warnings
from azure.core.exceptions import ClientAuthenticationError, HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error
from azure.core.paging import ItemPaged
from azure.core.pipeline import PipelineResponse
from azure.core.pipeline.transport import HttpRequest, HttpResponse
from azure.core.polling import LROPoller, NoPolling, PollingMethod
from azure.mgmt.core.exceptions import ARMErrorFormat
from azure.mgmt.core.polling.arm_polling import ARMPolling
from .. import models as _models
if TYPE_CHECKING:
# pylint: disable=unused-import,ungrouped-imports
from typing import Any, Callable, Dict, Generic, Iterable, Optional, TypeVar, Union
T = TypeVar('T')
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, HttpResponse], T, Dict[str, Any]], Any]]
class ClustersOperations(object):
"""ClustersOperations operations.
You should not instantiate this class directly. Instead, you should create a Client instance that
instantiates it for you and attaches it as an attribute.
:ivar models: Alias to model classes used in this operation group.
:type models: ~azure.mgmt.hdinsight.models
:param client: Client for service requests.
:param config: Configuration of service client.
:param serializer: An object model serializer.
:param deserializer: An object model deserializer.
"""
models = _models
def __init__(self, client, config, serializer, deserializer):
self._client = client
self._serialize = serializer
self._deserialize = deserializer
self._config = config
def _create_initial(
self,
resource_group_name, # type: str
cluster_name, # type: str
parameters, # type: "_models.ClusterCreateParametersExtended"
**kwargs # type: Any
):
# type: (...) -> "_models.Cluster"
cls = kwargs.pop('cls', None) # type: ClsType["_models.Cluster"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2021-06-01"
content_type = kwargs.pop("content_type", "application/json")
accept = "application/json"
# Construct URL
url = self._create_initial.metadata['url'] # type: ignore
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'clusterName': self._serialize.url("cluster_name", cluster_name, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str')
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
body_content_kwargs = {} # type: Dict[str, Any]
body_content = self._serialize.body(parameters, 'ClusterCreateParametersExtended')
body_content_kwargs['content'] = body_content
request = self._client.put(url, query_parameters, header_parameters, **body_content_kwargs)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
deserialized = self._deserialize('Cluster', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
_create_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.HDInsight/clusters/{clusterName}'} # type: ignore
def begin_create(
self,
resource_group_name, # type: str
cluster_name, # type: str
parameters, # type: "_models.ClusterCreateParametersExtended"
**kwargs # type: Any
):
# type: (...) -> LROPoller["_models.Cluster"]
"""Creates a new HDInsight cluster with the specified parameters.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param cluster_name: The name of the cluster.
:type cluster_name: str
:param parameters: The cluster create request.
:type parameters: ~azure.mgmt.hdinsight.models.ClusterCreateParametersExtended
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be ARMPolling.
Pass in False for this operation to not poll, or pass in your own initialized polling object for a personal polling strategy.
:paramtype polling: bool or ~azure.core.polling.PollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.
:return: An instance of LROPoller that returns either Cluster or the result of cls(response)
:rtype: ~azure.core.polling.LROPoller[~azure.mgmt.hdinsight.models.Cluster]
:raises ~azure.core.exceptions.HttpResponseError:
"""
polling = kwargs.pop('polling', True) # type: Union[bool, PollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType["_models.Cluster"]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = self._create_initial(
resource_group_name=resource_group_name,
cluster_name=cluster_name,
parameters=parameters,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
kwargs.pop('content_type', None)
def get_long_running_output(pipeline_response):
deserialized = self._deserialize('Cluster', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'clusterName': self._serialize.url("cluster_name", cluster_name, 'str'),
}
if polling is True: polling_method = ARMPolling(lro_delay, lro_options={'final-state-via': 'location'}, path_format_arguments=path_format_arguments, **kwargs)
elif polling is False: polling_method = NoPolling()
else: polling_method = polling
if cont_token:
return LROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return LROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_create.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.HDInsight/clusters/{clusterName}'} # type: ignore
def update(
self,
resource_group_name, # type: str
cluster_name, # type: str
parameters, # type: "_models.ClusterPatchParameters"
**kwargs # type: Any
):
# type: (...) -> "_models.Cluster"
"""Patch HDInsight cluster with the specified parameters.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param cluster_name: The name of the cluster.
:type cluster_name: str
:param parameters: The cluster patch request.
:type parameters: ~azure.mgmt.hdinsight.models.ClusterPatchParameters
:keyword callable cls: A custom type or function that will be passed the direct response
:return: Cluster, or the result of cls(response)
:rtype: ~azure.mgmt.hdinsight.models.Cluster
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.Cluster"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2021-06-01"
content_type = kwargs.pop("content_type", "application/json")
accept = "application/json"
# Construct URL
url = self.update.metadata['url'] # type: ignore
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'clusterName': self._serialize.url("cluster_name", cluster_name, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str')
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
body_content_kwargs = {} # type: Dict[str, Any]
body_content = self._serialize.body(parameters, 'ClusterPatchParameters')
body_content_kwargs['content'] = body_content
request = self._client.patch(url, query_parameters, header_parameters, **body_content_kwargs)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
deserialized = self._deserialize('Cluster', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
update.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.HDInsight/clusters/{clusterName}'} # type: ignore
def _delete_initial(
self,
resource_group_name, # type: str
cluster_name, # type: str
**kwargs # type: Any
):
# type: (...) -> None
cls = kwargs.pop('cls', None) # type: ClsType[None]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2021-06-01"
accept = "application/json"
# Construct URL
url = self._delete_initial.metadata['url'] # type: ignore
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'clusterName': self._serialize.url("cluster_name", cluster_name, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.delete(url, query_parameters, header_parameters)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 202, 204]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
if cls:
return cls(pipeline_response, None, {})
_delete_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.HDInsight/clusters/{clusterName}'} # type: ignore
def begin_delete(
self,
resource_group_name, # type: str
cluster_name, # type: str
**kwargs # type: Any
):
# type: (...) -> LROPoller[None]
"""Deletes the specified HDInsight cluster.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param cluster_name: The name of the cluster.
:type cluster_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be ARMPolling.
Pass in False for this operation to not poll, or pass in your own initialized polling object for a personal polling strategy.
:paramtype polling: bool or ~azure.core.polling.PollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.
:return: An instance of LROPoller that returns either None or the result of cls(response)
:rtype: ~azure.core.polling.LROPoller[None]
:raises ~azure.core.exceptions.HttpResponseError:
"""
polling = kwargs.pop('polling', True) # type: Union[bool, PollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType[None]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = self._delete_initial(
resource_group_name=resource_group_name,
cluster_name=cluster_name,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
kwargs.pop('content_type', None)
def get_long_running_output(pipeline_response):
if cls:
return cls(pipeline_response, None, {})
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'clusterName': self._serialize.url("cluster_name", cluster_name, 'str'),
}
if polling is True: polling_method = ARMPolling(lro_delay, lro_options={'final-state-via': 'location'}, path_format_arguments=path_format_arguments, **kwargs)
elif polling is False: polling_method = NoPolling()
else: polling_method = polling
if cont_token:
return LROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return LROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_delete.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.HDInsight/clusters/{clusterName}'} # type: ignore
def get(
self,
resource_group_name, # type: str
cluster_name, # type: str
**kwargs # type: Any
):
# type: (...) -> "_models.Cluster"
"""Gets the specified cluster.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param cluster_name: The name of the cluster.
:type cluster_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: Cluster, or the result of cls(response)
:rtype: ~azure.mgmt.hdinsight.models.Cluster
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.Cluster"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2021-06-01"
accept = "application/json"
# Construct URL
url = self.get.metadata['url'] # type: ignore
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'clusterName': self._serialize.url("cluster_name", cluster_name, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.get(url, query_parameters, header_parameters)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
deserialized = self._deserialize('Cluster', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
get.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.HDInsight/clusters/{clusterName}'} # type: ignore
def list_by_resource_group(
self,
resource_group_name, # type: str
**kwargs # type: Any
):
# type: (...) -> Iterable["_models.ClusterListResult"]
"""Lists the HDInsight clusters in a resource group.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either ClusterListResult or the result of cls(response)
:rtype: ~azure.core.paging.ItemPaged[~azure.mgmt.hdinsight.models.ClusterListResult]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.ClusterListResult"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2021-06-01"
accept = "application/json"
def prepare_request(next_link=None):
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
if not next_link:
# Construct URL
url = self.list_by_resource_group.metadata['url'] # type: ignore
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
request = self._client.get(url, query_parameters, header_parameters)
else:
url = next_link
query_parameters = {} # type: Dict[str, Any]
request = self._client.get(url, query_parameters, header_parameters)
return request
def extract_data(pipeline_response):
deserialized = self._deserialize('ClusterListResult', pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.next_link or None, iter(list_of_elem)
def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, response)
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
return pipeline_response
return ItemPaged(
get_next, extract_data
)
list_by_resource_group.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.HDInsight/clusters'} # type: ignore
def _resize_initial(
self,
resource_group_name, # type: str
cluster_name, # type: str
role_name, # type: Union[str, "_models.RoleName"]
parameters, # type: "_models.ClusterResizeParameters"
**kwargs # type: Any
):
# type: (...) -> None
cls = kwargs.pop('cls', None) # type: ClsType[None]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2021-06-01"
content_type = kwargs.pop("content_type", "application/json")
accept = "application/json"
# Construct URL
url = self._resize_initial.metadata['url'] # type: ignore
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'clusterName': self._serialize.url("cluster_name", cluster_name, 'str'),
'roleName': self._serialize.url("role_name", role_name, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str')
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
body_content_kwargs = {} # type: Dict[str, Any]
body_content = self._serialize.body(parameters, 'ClusterResizeParameters')
body_content_kwargs['content'] = body_content
request = self._client.post(url, query_parameters, header_parameters, **body_content_kwargs)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 202]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
if cls:
return cls(pipeline_response, None, {})
_resize_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.HDInsight/clusters/{clusterName}/roles/{roleName}/resize'} # type: ignore
def begin_resize(
self,
resource_group_name, # type: str
cluster_name, # type: str
role_name, # type: Union[str, "_models.RoleName"]
parameters, # type: "_models.ClusterResizeParameters"
**kwargs # type: Any
):
# type: (...) -> LROPoller[None]
"""Resizes the specified HDInsight cluster to the specified size.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param cluster_name: The name of the cluster.
:type cluster_name: str
:param role_name: The constant value for the roleName.
:type role_name: str or ~azure.mgmt.hdinsight.models.RoleName
:param parameters: The parameters for the resize operation.
:type parameters: ~azure.mgmt.hdinsight.models.ClusterResizeParameters
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be ARMPolling.
Pass in False for this operation to not poll, or pass in your own initialized polling object for a personal polling strategy.
:paramtype polling: bool or ~azure.core.polling.PollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.
:return: An instance of LROPoller that returns either None or the result of cls(response)
:rtype: ~azure.core.polling.LROPoller[None]
:raises ~azure.core.exceptions.HttpResponseError:
"""
polling = kwargs.pop('polling', True) # type: Union[bool, PollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType[None]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = self._resize_initial(
resource_group_name=resource_group_name,
cluster_name=cluster_name,
role_name=role_name,
parameters=parameters,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
kwargs.pop('content_type', None)
def get_long_running_output(pipeline_response):
if cls:
return cls(pipeline_response, None, {})
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'clusterName': self._serialize.url("cluster_name", cluster_name, 'str'),
'roleName': self._serialize.url("role_name", role_name, 'str'),
}
if polling is True: polling_method = ARMPolling(lro_delay, lro_options={'final-state-via': 'location'}, path_format_arguments=path_format_arguments, **kwargs)
elif polling is False: polling_method = NoPolling()
else: polling_method = polling
if cont_token:
return LROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return LROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_resize.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.HDInsight/clusters/{clusterName}/roles/{roleName}/resize'} # type: ignore
def _update_auto_scale_configuration_initial(
self,
resource_group_name, # type: str
cluster_name, # type: str
role_name, # type: Union[str, "_models.RoleName"]
parameters, # type: "_models.AutoscaleConfigurationUpdateParameter"
**kwargs # type: Any
):
# type: (...) -> None
cls = kwargs.pop('cls', None) # type: ClsType[None]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2021-06-01"
content_type = kwargs.pop("content_type", "application/json")
accept = "application/json"
# Construct URL
url = self._update_auto_scale_configuration_initial.metadata['url'] # type: ignore
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'clusterName': self._serialize.url("cluster_name", cluster_name, 'str'),
'roleName': self._serialize.url("role_name", role_name, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str')
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
body_content_kwargs = {} # type: Dict[str, Any]
body_content = self._serialize.body(parameters, 'AutoscaleConfigurationUpdateParameter')
body_content_kwargs['content'] = body_content
request = self._client.post(url, query_parameters, header_parameters, **body_content_kwargs)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 202]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
if cls:
return cls(pipeline_response, None, {})
_update_auto_scale_configuration_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.HDInsight/clusters/{clusterName}/roles/{roleName}/autoscale'} # type: ignore
def begin_update_auto_scale_configuration(
self,
resource_group_name, # type: str
cluster_name, # type: str
role_name, # type: Union[str, "_models.RoleName"]
parameters, # type: "_models.AutoscaleConfigurationUpdateParameter"
**kwargs # type: Any
):
# type: (...) -> LROPoller[None]
"""Updates the Autoscale Configuration for HDInsight cluster.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param cluster_name: The name of the cluster.
:type cluster_name: str
:param role_name: The constant value for the roleName.
:type role_name: str or ~azure.mgmt.hdinsight.models.RoleName
:param parameters: The parameters for the update autoscale configuration operation.
:type parameters: ~azure.mgmt.hdinsight.models.AutoscaleConfigurationUpdateParameter
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be ARMPolling.
Pass in False for this operation to not poll, or pass in your own initialized polling object for a personal polling strategy.
:paramtype polling: bool or ~azure.core.polling.PollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.
:return: An instance of LROPoller that returns either None or the result of cls(response)
:rtype: ~azure.core.polling.LROPoller[None]
:raises ~azure.core.exceptions.HttpResponseError:
"""
polling = kwargs.pop('polling', True) # type: Union[bool, PollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType[None]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = self._update_auto_scale_configuration_initial(
resource_group_name=resource_group_name,
cluster_name=cluster_name,
role_name=role_name,
parameters=parameters,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
kwargs.pop('content_type', None)
def get_long_running_output(pipeline_response):
if cls:
return cls(pipeline_response, None, {})
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'clusterName': self._serialize.url("cluster_name", cluster_name, 'str'),
'roleName': self._serialize.url("role_name", role_name, 'str'),
}
if polling is True: polling_method = ARMPolling(lro_delay, lro_options={'final-state-via': 'location'}, path_format_arguments=path_format_arguments, **kwargs)
elif polling is False: polling_method = NoPolling()
else: polling_method = polling
if cont_token:
return LROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return LROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_update_auto_scale_configuration.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.HDInsight/clusters/{clusterName}/roles/{roleName}/autoscale'} # type: ignore
def list(
self,
**kwargs # type: Any
):
# type: (...) -> Iterable["_models.ClusterListResult"]
"""Lists all the HDInsight clusters under the subscription.
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either ClusterListResult or the result of cls(response)
:rtype: ~azure.core.paging.ItemPaged[~azure.mgmt.hdinsight.models.ClusterListResult]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.ClusterListResult"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2021-06-01"
accept = "application/json"
def prepare_request(next_link=None):
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
if not next_link:
# Construct URL
url = self.list.metadata['url'] # type: ignore
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
request = self._client.get(url, query_parameters, header_parameters)
else:
url = next_link
query_parameters = {} # type: Dict[str, Any]
request = self._client.get(url, query_parameters, header_parameters)
return request
def extract_data(pipeline_response):
deserialized = self._deserialize('ClusterListResult', pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.next_link or None, iter(list_of_elem)
def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, response)
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
return pipeline_response
return ItemPaged(
get_next, extract_data
)
list.metadata = {'url': '/subscriptions/{subscriptionId}/providers/Microsoft.HDInsight/clusters'} # type: ignore
def _rotate_disk_encryption_key_initial(
self,
resource_group_name, # type: str
cluster_name, # type: str
parameters, # type: "_models.ClusterDiskEncryptionParameters"
**kwargs # type: Any
):
# type: (...) -> None
cls = kwargs.pop('cls', None) # type: ClsType[None]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2021-06-01"
content_type = kwargs.pop("content_type", "application/json")
accept = "application/json"
# Construct URL
url = self._rotate_disk_encryption_key_initial.metadata['url'] # type: ignore
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'clusterName': self._serialize.url("cluster_name", cluster_name, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str')
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
body_content_kwargs = {} # type: Dict[str, Any]
body_content = self._serialize.body(parameters, 'ClusterDiskEncryptionParameters')
body_content_kwargs['content'] = body_content
request = self._client.post(url, query_parameters, header_parameters, **body_content_kwargs)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 202]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
if cls:
return cls(pipeline_response, None, {})
_rotate_disk_encryption_key_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.HDInsight/clusters/{clusterName}/rotatediskencryptionkey'} # type: ignore
def begin_rotate_disk_encryption_key(
self,
resource_group_name, # type: str
cluster_name, # type: str
parameters, # type: "_models.ClusterDiskEncryptionParameters"
**kwargs # type: Any
):
# type: (...) -> LROPoller[None]
"""Rotate disk encryption key of the specified HDInsight cluster.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param cluster_name: The name of the cluster.
:type cluster_name: str
:param parameters: The parameters for the disk encryption operation.
:type parameters: ~azure.mgmt.hdinsight.models.ClusterDiskEncryptionParameters
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be ARMPolling.
Pass in False for this operation to not poll, or pass in your own initialized polling object for a personal polling strategy.
:paramtype polling: bool or ~azure.core.polling.PollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.
:return: An instance of LROPoller that returns either None or the result of cls(response)
:rtype: ~azure.core.polling.LROPoller[None]
:raises ~azure.core.exceptions.HttpResponseError:
"""
polling = kwargs.pop('polling', True) # type: Union[bool, PollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType[None]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = self._rotate_disk_encryption_key_initial(
resource_group_name=resource_group_name,
cluster_name=cluster_name,
parameters=parameters,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
kwargs.pop('content_type', None)
def get_long_running_output(pipeline_response):
if cls:
return cls(pipeline_response, None, {})
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'clusterName': self._serialize.url("cluster_name", cluster_name, 'str'),
}
if polling is True: polling_method = ARMPolling(lro_delay, lro_options={'final-state-via': 'location'}, path_format_arguments=path_format_arguments, **kwargs)
elif polling is False: polling_method = NoPolling()
else: polling_method = polling
if cont_token:
return LROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return LROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_rotate_disk_encryption_key.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.HDInsight/clusters/{clusterName}/rotatediskencryptionkey'} # type: ignore
def get_gateway_settings(
self,
resource_group_name, # type: str
cluster_name, # type: str
**kwargs # type: Any
):
# type: (...) -> "_models.GatewaySettings"
"""Gets the gateway settings for the specified cluster.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param cluster_name: The name of the cluster.
:type cluster_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: GatewaySettings, or the result of cls(response)
:rtype: ~azure.mgmt.hdinsight.models.GatewaySettings
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.GatewaySettings"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2021-06-01"
accept = "application/json"
# Construct URL
url = self.get_gateway_settings.metadata['url'] # type: ignore
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'clusterName': self._serialize.url("cluster_name", cluster_name, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.post(url, query_parameters, header_parameters)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
deserialized = self._deserialize('GatewaySettings', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
get_gateway_settings.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.HDInsight/clusters/{clusterName}/getGatewaySettings'} # type: ignore
def _update_gateway_settings_initial(
self,
resource_group_name, # type: str
cluster_name, # type: str
parameters, # type: "_models.UpdateGatewaySettingsParameters"
**kwargs # type: Any
):
# type: (...) -> None
cls = kwargs.pop('cls', None) # type: ClsType[None]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2021-06-01"
content_type = kwargs.pop("content_type", "application/json")
accept = "application/json"
# Construct URL
url = self._update_gateway_settings_initial.metadata['url'] # type: ignore
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'clusterName': self._serialize.url("cluster_name", cluster_name, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str')
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
body_content_kwargs = {} # type: Dict[str, Any]
body_content = self._serialize.body(parameters, 'UpdateGatewaySettingsParameters')
body_content_kwargs['content'] = body_content
request = self._client.post(url, query_parameters, header_parameters, **body_content_kwargs)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 202]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
if cls:
return cls(pipeline_response, None, {})
_update_gateway_settings_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.HDInsight/clusters/{clusterName}/updateGatewaySettings'} # type: ignore
def begin_update_gateway_settings(
self,
resource_group_name, # type: str
cluster_name, # type: str
parameters, # type: "_models.UpdateGatewaySettingsParameters"
**kwargs # type: Any
):
# type: (...) -> LROPoller[None]
"""Configures the gateway settings on the specified cluster.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param cluster_name: The name of the cluster.
:type cluster_name: str
:param parameters: The cluster configurations.
:type parameters: ~azure.mgmt.hdinsight.models.UpdateGatewaySettingsParameters
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be ARMPolling.
Pass in False for this operation to not poll, or pass in your own initialized polling object for a personal polling strategy.
:paramtype polling: bool or ~azure.core.polling.PollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.
:return: An instance of LROPoller that returns either None or the result of cls(response)
:rtype: ~azure.core.polling.LROPoller[None]
:raises ~azure.core.exceptions.HttpResponseError:
"""
polling = kwargs.pop('polling', True) # type: Union[bool, PollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType[None]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = self._update_gateway_settings_initial(
resource_group_name=resource_group_name,
cluster_name=cluster_name,
parameters=parameters,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
kwargs.pop('content_type', None)
def get_long_running_output(pipeline_response):
if cls:
return cls(pipeline_response, None, {})
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'clusterName': self._serialize.url("cluster_name", cluster_name, 'str'),
}
if polling is True: polling_method = ARMPolling(lro_delay, lro_options={'final-state-via': 'location'}, path_format_arguments=path_format_arguments, **kwargs)
elif polling is False: polling_method = NoPolling()
else: polling_method = polling
if cont_token:
return LROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return LROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_update_gateway_settings.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.HDInsight/clusters/{clusterName}/updateGatewaySettings'} # type: ignore
def get_azure_async_operation_status(
self,
resource_group_name, # type: str
cluster_name, # type: str
operation_id, # type: str
**kwargs # type: Any
):
# type: (...) -> "_models.AsyncOperationResult"
"""The the async operation status.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param cluster_name: The name of the cluster.
:type cluster_name: str
:param operation_id: The long running operation id.
:type operation_id: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: AsyncOperationResult, or the result of cls(response)
:rtype: ~azure.mgmt.hdinsight.models.AsyncOperationResult
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.AsyncOperationResult"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2021-06-01"
accept = "application/json"
# Construct URL
url = self.get_azure_async_operation_status.metadata['url'] # type: ignore
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'clusterName': self._serialize.url("cluster_name", cluster_name, 'str'),
'operationId': self._serialize.url("operation_id", operation_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.get(url, query_parameters, header_parameters)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
deserialized = self._deserialize('AsyncOperationResult', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
get_azure_async_operation_status.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.HDInsight/clusters/{clusterName}/azureasyncoperations/{operationId}'} # type: ignore
def _update_identity_certificate_initial(
self,
resource_group_name, # type: str
cluster_name, # type: str
parameters, # type: "_models.UpdateClusterIdentityCertificateParameters"
**kwargs # type: Any
):
# type: (...) -> None
cls = kwargs.pop('cls', None) # type: ClsType[None]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2021-06-01"
content_type = kwargs.pop("content_type", "application/json")
accept = "application/json"
# Construct URL
url = self._update_identity_certificate_initial.metadata['url'] # type: ignore
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'clusterName': self._serialize.url("cluster_name", cluster_name, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str')
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
body_content_kwargs = {} # type: Dict[str, Any]
body_content = self._serialize.body(parameters, 'UpdateClusterIdentityCertificateParameters')
body_content_kwargs['content'] = body_content
request = self._client.post(url, query_parameters, header_parameters, **body_content_kwargs)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 202]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
if cls:
return cls(pipeline_response, None, {})
_update_identity_certificate_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.HDInsight/clusters/{clusterName}/updateClusterIdentityCertificate'} # type: ignore
def begin_update_identity_certificate(
self,
resource_group_name, # type: str
cluster_name, # type: str
parameters, # type: "_models.UpdateClusterIdentityCertificateParameters"
**kwargs # type: Any
):
# type: (...) -> LROPoller[None]
"""Updates the cluster identity certificate.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param cluster_name: The name of the cluster.
:type cluster_name: str
:param parameters: The cluster configurations.
:type parameters: ~azure.mgmt.hdinsight.models.UpdateClusterIdentityCertificateParameters
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be ARMPolling.
Pass in False for this operation to not poll, or pass in your own initialized polling object for a personal polling strategy.
:paramtype polling: bool or ~azure.core.polling.PollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.
:return: An instance of LROPoller that returns either None or the result of cls(response)
:rtype: ~azure.core.polling.LROPoller[None]
:raises ~azure.core.exceptions.HttpResponseError:
"""
polling = kwargs.pop('polling', True) # type: Union[bool, PollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType[None]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = self._update_identity_certificate_initial(
resource_group_name=resource_group_name,
cluster_name=cluster_name,
parameters=parameters,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
kwargs.pop('content_type', None)
def get_long_running_output(pipeline_response):
if cls:
return cls(pipeline_response, None, {})
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'clusterName': self._serialize.url("cluster_name", cluster_name, 'str'),
}
if polling is True: polling_method = ARMPolling(lro_delay, lro_options={'final-state-via': 'location'}, path_format_arguments=path_format_arguments, **kwargs)
elif polling is False: polling_method = NoPolling()
else: polling_method = polling
if cont_token:
return LROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return LROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_update_identity_certificate.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.HDInsight/clusters/{clusterName}/updateClusterIdentityCertificate'} # type: ignore
def _execute_script_actions_initial(
self,
resource_group_name, # type: str
cluster_name, # type: str
parameters, # type: "_models.ExecuteScriptActionParameters"
**kwargs # type: Any
):
# type: (...) -> None
cls = kwargs.pop('cls', None) # type: ClsType[None]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2021-06-01"
content_type = kwargs.pop("content_type", "application/json")
accept = "application/json"
# Construct URL
url = self._execute_script_actions_initial.metadata['url'] # type: ignore
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'clusterName': self._serialize.url("cluster_name", cluster_name, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str')
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
body_content_kwargs = {} # type: Dict[str, Any]
body_content = self._serialize.body(parameters, 'ExecuteScriptActionParameters')
body_content_kwargs['content'] = body_content
request = self._client.post(url, query_parameters, header_parameters, **body_content_kwargs)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 202]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
if cls:
return cls(pipeline_response, None, {})
_execute_script_actions_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.HDInsight/clusters/{clusterName}/executeScriptActions'} # type: ignore
def begin_execute_script_actions(
self,
resource_group_name, # type: str
cluster_name, # type: str
parameters, # type: "_models.ExecuteScriptActionParameters"
**kwargs # type: Any
):
# type: (...) -> LROPoller[None]
"""Executes script actions on the specified HDInsight cluster.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param cluster_name: The name of the cluster.
:type cluster_name: str
:param parameters: The parameters for executing script actions.
:type parameters: ~azure.mgmt.hdinsight.models.ExecuteScriptActionParameters
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be ARMPolling.
Pass in False for this operation to not poll, or pass in your own initialized polling object for a personal polling strategy.
:paramtype polling: bool or ~azure.core.polling.PollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.
:return: An instance of LROPoller that returns either None or the result of cls(response)
:rtype: ~azure.core.polling.LROPoller[None]
:raises ~azure.core.exceptions.HttpResponseError:
"""
polling = kwargs.pop('polling', True) # type: Union[bool, PollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType[None]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = self._execute_script_actions_initial(
resource_group_name=resource_group_name,
cluster_name=cluster_name,
parameters=parameters,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
kwargs.pop('content_type', None)
def get_long_running_output(pipeline_response):
if cls:
return cls(pipeline_response, None, {})
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'clusterName': self._serialize.url("cluster_name", cluster_name, 'str'),
}
if polling is True: polling_method = ARMPolling(lro_delay, lro_options={'final-state-via': 'location'}, path_format_arguments=path_format_arguments, **kwargs)
elif polling is False: polling_method = NoPolling()
else: polling_method = polling
if cont_token:
return LROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return LROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_execute_script_actions.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.HDInsight/clusters/{clusterName}/executeScriptActions'} # type: ignore
|
from pathlib import Path
from fhir.resources.valueset import ValueSet as _ValueSet
from oops_fhir.utils import ValueSet
from oops_fhir.r4.code_system.structure_map_group_type_mode import (
StructureMapGroupTypeMode as StructureMapGroupTypeMode_,
)
__all__ = ["StructureMapGroupTypeMode"]
_resource = _ValueSet.parse_file(Path(__file__).with_suffix(".json"))
class StructureMapGroupTypeMode(StructureMapGroupTypeMode_):
"""
StructureMapGroupTypeMode
If this is the default rule set to apply for the source type, or this
combination of types.
Status: draft - Version: 4.0.1
http://hl7.org/fhir/ValueSet/map-group-type-mode
"""
class Meta:
resource = _resource
|
from django.contrib import admin
from .models import SensorNode
from .models import SensorData
admin.site.register(SensorNode)
admin.site.register(SensorData)
|
"""
This file is part of web2py Web Framework (Copyrighted, 2007-2009).
Developed by Massimo Di Pierro <mdipierro@cs.depaul.edu>.
License: GPL v2
"""
import datetime
from storage import Storage
from html import *
import contrib.simplejson as simplejson
import contrib.rss2 as rss2
def xml_rec(value, key):
if isinstance(value, (dict, Storage)):
return TAG[key](*[TAG[k](xml_rec(v, '')) for k, v in value.items()])
elif isinstance(value, list):
return TAG[key](*[TAG.item(xml_rec(item, '')) for item in value])
elif value == None:
return 'None'
else:
return str(value)
def xml(value, encoding='UTF-8', key='document'):
return ('<?xml version="1.0" encoding="%s"?>' % encoding) + str(xml_rec(value,key))
def json(value):
return simplejson.dumps(value)
def csv(value):
return ''
def rss(feed):
if not 'entries' in feed and 'items' in feed:
feed['entries'] = feed['items']
now=datetime.datetime.now()
rss = rss2.RSS2(title = feed['title'],
link = feed['link'],
description = feed['description'],
lastBuildDate = feed.get('created_on', now),
items = [rss2.RSSItem(\
title=entry['title'],
link=entry['link'],
description=entry['description'],
pubDate=entry.get('created_on', now)
)\
for entry in feed['entries']
]
)
return rss2.dumps(rss)
|
class TreeNode:
def __init__(self, x):
self.val = x
self.left = None
self.right = None
class Solution:
def lowestCommonAncestor(self, root: 'TreeNode', p: 'TreeNode', q: 'TreeNode') -> 'TreeNode':
# 介于二者之间
if p.val <= root.val <= q.val or q.val <= root.val <= p.val:
return root
if root.val < p.val and root.val < q.val:
# 比二者都小
return self.lowestCommonAncestor(root.right, p, q)
if root.val > p.val and root.val > q.val:
# 比二者都大
return self.lowestCommonAncestor(root.left, p, q)
|
import tensorflow as tf
from baselines.ppo2 import ppo2
from baselines.common.models import build_impala_cnn
from baselines.common.mpi_util import setup_mpi_gpus
from procgen import ProcgenEnv
from baselines.common.vec_env import (
VecExtractDictObs,
VecMonitor,
VecFrameStack,
VecNormalize
)
from baselines import logger
from mpi4py import MPI
import argparse
from .alternate_ppo2 import alt_ppo2
import os
from baselines.common import set_global_seeds
from baselines.common.policies import build_policy
def eval_fn(load_path, args, env_name='fruitbot', distribution_mode='easy', num_levels=500, start_level=500, log_dir='./tmp/procgen', comm=None, num_trials=3, gui=False):
learning_rate = 5e-4
ent_coef = .01
gamma = .999
lam = .95
nsteps = 256
nminibatches = 8
ppo_epochs = 3
clip_range = .2
use_vf_clipping = True
vf_coef = 0.5
max_grad_norm = 0.5
mpi_rank_weight = 1
log_interval = 1
seed=None
log_comm = comm.Split(0, 0)
format_strs = ['csv', 'stdout'] if log_comm.Get_rank() == 0 else []
logger.configure(comm=log_comm, dir=log_dir, format_strs=format_strs)
logger.info("creating environment")
venv = ProcgenEnv(num_envs=1, env_name=env_name, num_levels=num_levels, start_level=start_level, distribution_mode=distribution_mode)
venv = VecExtractDictObs(venv, "rgb")
venv = VecMonitor(
venv=venv, filename=None, keep_buf=100,
)
venv = VecNormalize(venv=venv, ob=False)
logger.info("creating tf session")
setup_mpi_gpus()
config = tf.ConfigProto()
config.gpu_options.allow_growth = True #pylint: disable=E1101
sess = tf.Session(config=config)
sess.__enter__()
conv_fn = lambda x: build_impala_cnn(x, depths=[16,32,32], emb_size=256)
logger.info(f"evaluating")
set_global_seeds(seed)
policy = build_policy(venv, conv_fn)
# Get the nb of env
nenvs = venv.num_envs
# Get state_space and action_space
ob_space = venv.observation_space
ac_space = venv.action_space
# Calculate the batch_size
nbatch = nenvs * nsteps
nbatch_train = nbatch // nminibatches
# Instantiate the model object (that creates act_model and train_model)
from .alternate_ppo2.model import Model
model_fn = Model
model = model_fn(policy=policy, ob_space=ob_space, ac_space=ac_space, nbatch_act=nenvs, nbatch_train=nbatch_train,
nsteps=nsteps, ent_coef=ent_coef, vf_coef=vf_coef,
max_grad_norm=max_grad_norm, comm=comm, mpi_rank_weight=mpi_rank_weight)
if os.path.isfile(load_path):
alt_ppo2.eval(
network=conv_fn,
nsteps=nsteps,
ent_coef=ent_coef,
vf_coef=vf_coef,
max_grad_norm=max_grad_norm,
gamma=gamma,
lam=lam,
log_interval=log_interval,
nminibatches=nminibatches,
noptepochs=ppo_epochs,
load_path=load_path,
mpi_rank_weight=mpi_rank_weight,
comm=comm,
clip_vf=use_vf_clipping,
lr=learning_rate,
cliprange=clip_range,
policy=policy,
nenvs=nenvs,
ob_space=ob_space,
ac_space=ac_space,
nbatch=nbatch,
nbatch_train=nbatch_train,
model_fn=model_fn,
model=model,
num_trials=num_trials,
num_levels=num_levels,
start_level=start_level,
gui=gui,
args=args
)
elif os.path.isdir(load_path):
for file in os.listdir(load_path):
log_comm = comm.Split(0, 0)
format_strs = ['csv', 'stdout'] if log_comm.Get_rank() == 0 else []
logger.configure(comm=log_comm, dir=log_dir+'/'+file, format_strs=format_strs)
alt_ppo2.eval(
network=conv_fn,
nsteps=nsteps,
ent_coef=ent_coef,
vf_coef=vf_coef,
max_grad_norm=max_grad_norm,
gamma=gamma,
lam=lam,
log_interval=log_interval,
nminibatches=nminibatches,
noptepochs=ppo_epochs,
load_path=load_path+'/'+file,
mpi_rank_weight=mpi_rank_weight,
comm=comm,
clip_vf=use_vf_clipping,
lr=learning_rate,
cliprange=clip_range,
policy=policy,
nenvs=nenvs,
ob_space=ob_space,
ac_space=ac_space,
nbatch=nbatch,
nbatch_train=nbatch_train,
model_fn=model_fn,
model=model,
num_trials=num_trials,
num_levels=num_levels,
start_level=start_level,
gui=gui,
args=args
)
else:
print('Model path does not exist.')
return
def main():
parser = argparse.ArgumentParser(description='Process procgen evaluation arguments.')
parser.add_argument('--load_model', type=str, required=True)
parser.add_argument('--log_dir', type=str, default='./logs/eval')
parser.add_argument('--env_name', type=str, default='fruitbot')
parser.add_argument('--distribution_mode', type=str, default='easy', choices=["easy", "hard", "exploration", "memory", "extreme"])
parser.add_argument('--num_levels', type=int, default=500)
parser.add_argument('--start_level', type=int, default=0)
parser.add_argument('--num_trials', type=int, default=3)
parser.add_argument('--gui', action='store_true')
args = parser.parse_args()
comm = MPI.COMM_WORLD
eval_fn(args.load_model,
log_dir=args.log_dir,
env_name=args.env_name,
distribution_mode=args.distribution_mode,
num_levels=args.num_levels,
start_level=args.start_level,
num_trials=args.num_trials,
comm=comm,
gui=args.gui,
args=args
)
if __name__ == '__main__':
main()
|
from random import randint
from time import sleep
lista = list()
jogos = list()
print('-' * 30)
print('{:^30}'.format('JOGA NA MEGA SENA'))
print('-' * 30)
quant = int(input('Quantos jogos você quer que eu sorteie? '))
tot = 1
while tot <= quant:
cont = 0
while True:
num = randint(1, 60)
if num not in lista:
lista.append(num)
cont += 1
if cont >= 6:
break
lista.sort()
jogos.append(lista[:])
lista.clear()
tot += 1
print('-=' * 3, f' SORTEANDO {quant} JOGOS ', '=-' * 3)
for i, l in enumerate(jogos):
print(f'Jogo {i+1}: {l}')
sleep(0.5)
print('-=' * 5, '< BOA SORTE! >', '=-' * 5)
|
"""
WSGI config for ChamberOfSecret project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/3.1/howto/deployment/wsgi/
"""
import os
from django.core.wsgi import get_wsgi_application
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'ChamberOfSecret.settings')
application = get_wsgi_application()
|
from __future__ import print_function
import os
from importlib import import_module, reload
from pathlib import Path
from _pytest.pytester import Testdir
from nbformat import read
from pytest import ExitCode
from .helper import failing_nb, passing_nb, write_nb
pytest_plugins = "pytester"
NB_VERSION = 4
def test_import():
reload(import_module("nbmake.nb_result"))
reload(import_module("nbmake.nb_run"))
reload(import_module("nbmake.pytest_plugin"))
reload(import_module("nbmake.pytest_items"))
def test_when_nb_present_then_collected(testdir: Testdir):
write_nb(passing_nb, Path(testdir.tmpdir) / "a.ipynb")
items, hook_recorder = testdir.inline_genitems("--nbmake")
assert hook_recorder.ret == ExitCode.OK
assert len(items) == 1
def test_when_ignored_none_collected(testdir: Testdir):
write_nb(passing_nb, Path(testdir.tmpdir) / "a.ipynb")
items, hook_recorder = testdir.inline_genitems(
"--nbmake", "--ignore-glob", "*.ipynb"
)
assert hook_recorder.ret == ExitCode.NO_TESTS_COLLECTED
assert len(items) == 0
def test_when_in_build_dir_none_collected(testdir: Testdir):
build_dir = Path("_build")
build_dir.mkdir()
write_nb(passing_nb, build_dir / "a.ipynb")
items, hook_recorder = testdir.inline_genitems("--nbmake")
assert hook_recorder.ret == ExitCode.NO_TESTS_COLLECTED
assert len(items) == 0
def test_when_parallel_passing_nbs_then_ok(testdir: Testdir):
[write_nb(passing_nb, Path(f"{i}.ipynb")) for i in range(20)]
hook_recorder = testdir.inline_run("--nbmake", "-n=auto")
assert hook_recorder.ret == ExitCode.OK
def test_when_passing_nbs_then_ok(testdir: Testdir):
nbs = [
Path(testdir.tmpdir) / "a.ipynb",
Path(testdir.tmpdir) / "b nb.ipynb",
Path(testdir.tmpdir) / Path("sub dir/a.ipynb"),
Path(testdir.tmpdir) / Path("sub dir/b.ipynb"),
]
testdir.mkdir("sub dir")
[write_nb(passing_nb, path) for path in nbs]
hook_recorder = testdir.inline_run("--nbmake", "-vvv")
for path in nbs:
nb = read(str(path), NB_VERSION)
for cell in nb.cells:
if "outputs" in cell:
assert cell.outputs == []
assert hook_recorder.ret == ExitCode.OK
def test_when_overwrite_then_overwritten(testdir: Testdir):
nbs = [
Path(testdir.tmpdir) / Path("sub dir/b.ipynb"),
]
testdir.mkdir("sub dir")
[write_nb(["1+1"], path) for path in nbs]
hook_recorder = testdir.inline_run("--nbmake", "--overwrite")
for path in nbs:
nb = read(str(path), NB_VERSION)
for cell in nb.cells:
if "outputs" in cell:
assert cell.outputs != []
assert hook_recorder.ret == ExitCode.OK
def test_when_run_from_non_root_then_ok(testdir: Testdir):
nbs = [
Path(testdir.tmpdir) / Path("sub dir/b.ipynb"),
]
testdir.mkdir("sub dir")
Path("pytest.ini").write_text("")
os.chdir("sub dir")
[write_nb(passing_nb, path) for path in nbs]
hook_recorder = testdir.inline_run("--nbmake", "-vvv")
assert hook_recorder.ret == ExitCode.OK
def test_when_failing_nb_then_fail(testdir: Testdir):
write_nb(failing_nb, Path(testdir.tmpdir) / "a.ipynb")
hook_recorder = testdir.inline_run("--nbmake", "-v")
assert hook_recorder.ret == ExitCode.TESTS_FAILED
def test_when_no_html_flag_then_no_build_dir(testdir: Testdir):
write_nb(failing_nb, Path(testdir.tmpdir) / "a.ipynb")
hook_recorder = testdir.inline_run("--nbmake")
assert hook_recorder.ret == ExitCode.TESTS_FAILED
assert not Path("_build").exists()
def test_when_init_then_passes(testdir: Testdir):
example_dir = Path(testdir.tmpdir) / "example"
example_dir.mkdir()
write_nb(passing_nb, example_dir / "a.ipynb")
(example_dir / "__init__.py").write_text("")
hook_recorder = testdir.inline_run("--nbmake")
assert hook_recorder.ret == ExitCode.OK
def test_when_timeout_flag_then_uses_as_default(testdir: Testdir):
example_dir = Path(testdir.tmpdir) / "example"
example_dir.mkdir()
write_nb(["import time; time.sleep(2)"], example_dir / "a.ipynb")
hook_recorder = testdir.inline_run("--nbmake", "--nbmake-timeout=3")
assert hook_recorder.ret == ExitCode.OK
hook_recorder = testdir.inline_run("--nbmake", "--nbmake-timeout=1")
assert hook_recorder.ret == ExitCode.TESTS_FAILED
def test_when_explicit_metadata_then_ignore_timeout(testdir: Testdir):
example_dir = Path(testdir.tmpdir) / "example"
example_dir.mkdir()
write_nb(
["import time; time.sleep(2)"],
example_dir / "a.ipynb",
metadata={"execution": {"timeout": 10}},
)
hook_recorder = testdir.inline_run("--nbmake", "--nbmake-timeout=1")
assert hook_recorder.ret == ExitCode.OK
def test_when_kernel_passed_then_override(testdir: Testdir):
write_nb(
passing_nb,
Path(f"x.ipynb"),
metadata={
"kernelspec": {
"display_name": "Python 3",
"language": "python",
"name": "blah",
}
},
)
hook_recorder = testdir.inline_run("--nbmake", "--nbmake-kernel=python3")
assert hook_recorder.ret == ExitCode.OK
|
"""
This is the Orchestra Project Python API.
TODO(marcua): Move this file to its own pip/github project.
"""
import json
import logging
from datetime import datetime
from time import mktime
from wsgiref.handlers import format_date_time
import requests
from django.conf import settings
from httpsig.requests_auth import HTTPSignatureAuth
from orchestra.utils.convert_key_to_int import convert_key_to_int
logger = logging.getLogger(__name__)
_httpsig_auth = HTTPSignatureAuth(key_id=settings.ORCHESTRA_PROJECT_API_KEY,
secret=settings.ORCHESTRA_PROJECT_API_SECRET,
algorithm='hmac-sha256')
_api_root_url = '{}/orchestra/api/project/'.format(settings.ORCHESTRA_URL)
class OrchestraError(Exception):
pass
def _make_api_request(method, endpoint, query_params='', *args, **kwargs):
func = getattr(requests, method)
# Adding 'date' header as per
# https://github.com/zzsnzmn/py-http-signature/blob/e2e2c753db7da45fab4b215d84e8d490bd708833/http_signature/sign.py#L155 # noqa
headers = {'date': format_date_time(mktime(datetime.now().timetuple())),
'X-Api-Version': '~6.5',
'X-Api-Key': settings.ORCHESTRA_PROJECT_API_KEY}
headers.update(kwargs.pop('headers', {}))
all_kwargs = {'auth': _httpsig_auth, 'headers': headers}
all_kwargs.update(kwargs)
url = '{}{}/{}'.format(_api_root_url, endpoint, query_params)
response = func(url, *args, **all_kwargs)
if response.status_code != 200 and response.status_code != 201:
raise OrchestraError(response.text)
return response
def get_workflow_types():
response = _make_api_request('get', 'workflow_types')
return json.loads(response.text)['workflows']
def get_project_details_url(project_id):
data = {'project_id': project_id}
response = _make_api_request('post', 'project_details_url',
data=json.dumps(data))
url = json.loads(response.text)['project_details_url']
return url
def create_orchestra_project(client,
workflow_slug,
workflow_version_slug,
description,
priority,
project_data,
task_class):
data = {
'workflow_slug': workflow_slug,
'workflow_version_slug': workflow_version_slug,
'description': description,
'priority': priority,
'project_data': project_data,
'task_class': task_class
}
response = _make_api_request('post', 'create_project',
data=json.dumps(data))
project_id = json.loads(response.text)['project_id']
return project_id
def get_project_information(project_ids):
data = {
'project_ids': project_ids
}
response = _make_api_request('post', 'project_information',
data=json.dumps(data))
return json.loads(response.text, object_hook=convert_key_to_int)
def create_todos(todos):
response = _make_api_request('post', 'todo-api',
headers={'Content-type': 'application/json'},
data=json.dumps(todos))
return json.loads(response.text)
def build_url_params(project_id, step_slug, **filters):
project_param = 'project__id={}'.format(project_id) if project_id else ''
step_slug_param = '&step__slug={}'.format(
step_slug) if step_slug is not None else ''
additional_filters = '&q={}'.format(
json.dumps(filters)) if filters else ''
query_params = '?{}{}{}'.format(
project_param, step_slug_param, additional_filters)
return query_params
def get_todos(project_id, step_slug=None, **filters):
"""
project_id: int
step_slug: str
filters: dict. Example: {'some_fk_field__slug': 'cool_slug'}
See GenericTodoViewset and QueryParamsFilterBackend
to get more info on filtering.
"""
if project_id is None and 'id__in' not in filters:
raise OrchestraError('project_id is required')
query_params = build_url_params(project_id, step_slug, **filters)
response = _make_api_request('get', 'todo-api', query_params)
return json.loads(response.text)
def update_todos(updated_todos):
response = _make_api_request('patch', 'todo-api',
headers={'Content-type': 'application/json'},
data=json.dumps(updated_todos))
return json.loads(response.text)
def delete_todos(todo_ids):
response = _make_api_request('delete', 'todo-api',
data=json.dumps(todo_ids))
return json.loads(response.text)
def assign_worker_to_task(worker_id, task_id):
data = {
'worker_id': worker_id,
'task_id': task_id,
}
response = _make_api_request('post', 'assign_worker_to_task',
data=json.dumps(data))
return json.loads(response.text)
def message_project_team(project_id, message):
data = {
'project_id': project_id,
'message': message
}
response = _make_api_request('post', 'message_project_team',
data=json.dumps(data))
return json.loads(response.text)
def get_todo_templates():
response = _make_api_request('get', 'todo_templates')
return json.loads(response.text)
def create_todos_from_template(todolist_template_slug, project_id,
step_slug, additional_data):
data = {
'todolist_template_slug': todolist_template_slug,
'project_id': project_id,
'step_slug': step_slug,
'additional_data': additional_data,
}
response = _make_api_request('post', 'create_todos_from_template',
data=json.dumps(data))
return json.loads(response.text)
|
# Copyright 2019 Amazon.com, Inc. or its affiliates. All Rights Reserved.
# SPDX-License-Identifier: Apache-2.0
import boto3
import os, logging, json
from botocore.retries import bucket
#from pkg_resources import Version
from crhelper import CfnResource
from botocore.exceptions import ClientError
# declare helper and logging
helper = CfnResource()
logger = logging.getLogger()
logger.setLevel(logging.INFO)
# get env variables
DrRegion = os.environ['DrRegion']
S3BucketName = os.environ['S3BucketName']
OriginalLambdaRoleName = os.environ['OriginalLambdaRoleName']
OriginalPolicyName = os.environ['OriginalPolicyName']
def lambda_handler(event, context):
helper(event, context)
@helper.create
@helper.update
def create_resources(event, context):
s3 = boto3.client('s3', region_name = DrRegion)
account_id = event['ResourceProperties']['AccountID']
#format bucket name
dr_bucket_name = str(f'disasterrecovery-{S3BucketName}')
#Create S3 bucket for DR
response = create_bucket(s3, dr_bucket_name, DrRegion)
logger.info(f'response from bucket creation{response}')
#enable bucket versioning
response = enable_bucket_versioning(s3, dr_bucket_name)
logger.info(f'response to setting bucket versioning: {response}')
bucket_arn = str(f'arn:aws:s3:::{dr_bucket_name}')
helper.Data['bucket_arn'] = bucket_arn
def create_bucket(s3, dr_bucket_name, DrRegion):
try:
response = s3.create_bucket(
Bucket = dr_bucket_name,
CreateBucketConfiguration = {
'LocationConstraint': DrRegion
}
)
return response
except ClientError as e:
logger.info(e.response)
def enable_bucket_versioning(s3, dr_bucket_name):
try:
response = s3.put_bucket_versioning(
Bucket = dr_bucket_name,
VersioningConfiguration={
'Status': 'Enabled'
}
)
return response
except ClientError as e:
logger.info(e.response)
@helper.delete
def delete_bucket(event,context):
try:
bucket_name = str(f'disasterrecovery-{S3BucketName}')
s3_resource = boto3.resource('s3', region_name = DrRegion)
s3 = boto3.client('s3', region_name = DrRegion)
bucket = s3_resource.Bucket(bucket_name)
#Delete bucket objects 1st
logger.info(f'Deleting bucket objects from bucket:{bucket_name}')
bucket.objects.all().delete()
logger.info(f'objects deleted successfully from bucket:{bucket_name}')
#Delete bucket 2nd
logger.info(f'Deleting s3 bucket {bucket_name}')
response = s3.delete_bucket(
Bucket = bucket_name)
logger.info(response)
logger.info(f'deleted s3 bucket {bucket_name}')
return response
except ClientError as e:
logger.info(e.response)
|
# -*- coding: utf-8 -*-
"""
Created on Thu Jun 3 17:03:45 2021
@author: shangfr
"""
import joblib
import operator
from functools import reduce
from whoosh.index import open_dir
from whoosh.qparser import QueryParser
import pkg_resources
INDEX_DIR = pkg_resources.resource_filename('cnsyn', 'query')
ix = open_dir(INDEX_DIR)
words_tree = joblib.load(INDEX_DIR+'/words_tree_mini.m')
tree_model = words_tree['model']
words_id = words_tree['words']
words_emb = tree_model.get_arrays()[0]
def search(word, topK=10, origin='all'):
'''
Parameters
----------
word : str
查询词.
topK : int, optional
查询返回同义词数量. The default is 10.
origin : str, optional
词源. The default is 'all'.
Returns
-------
sim_words : list
同义词列表.
'''
word_list = []
with ix.searcher() as searcher:
query = QueryParser("words", ix.schema).parse(word)
results = searcher.search(query, limit=None)
if len(results) > 0:
for result in results:
if origin == 'all':
word_list.append(result.fields()['words'].split())
elif origin == result.fields()['origin']:
word_list.append(result.fields()['words'].split())
else:
pass
if word_list != []:
sim_words = reduce(operator.add, word_list)
sim_words = list(set(sim_words))
else:
sim_words = word_list
return sim_words[0:topK]
def anns(word, topK=10):
'''
Parameters
----------
word : str
查询词.
topK : int, optional
k nearest neighbors. The default is 10.
return_distance : bool, optional
if True, return distances to neighbors of each point if False, return only neighbors. The default is True.
Returns
-------
sim_words : list
同义词列表.
'''
word_key = [x[0] for x in words_id.items() if word == x[1]]
if word_key == []:
sim_words = []
else:
word_emb = words_emb[word_key]
ind = tree_model.query(
word_emb, k=topK, return_distance=False)
sim_words = [words_id.get(i) for i in ind.ravel()]
return sim_words
if __name__ == '__main__':
# 查询同义词(全部词库)
word = '中山广场'
search(word)
search(word, topK=3)
# 使用wiki词库
search(word, origin='wiki')
# 使用中文同义词字典库
search(word, origin='cndict')
# 基于向量查同义词
anns(word)
anns(word, topK=3)
word = input("Please input the word you want to search: ")
step_result = "Something went wrong......"
try:
step_result = search(word)
finally:
print(step_result)
|
# Copyright 2018 Open Source Robotics Foundation, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for the LaunchDescriptionSource class."""
from launch import LaunchDescription
from launch import LaunchDescriptionSource
import pytest
def test_launch_description_source_constructors():
"""Test the constructors for LaunchDescriptionSource class."""
LaunchDescriptionSource()
LaunchDescriptionSource(LaunchDescription())
LaunchDescriptionSource(LaunchDescription(), 'location')
LaunchDescriptionSource(LaunchDescription(), 'location', 'method description')
def test_launch_description_source_methods():
"""Test the methods of the LaunchDescriptionSource class."""
class MockLaunchContext:
def perform_substitution(self, substitution):
return substitution.perform(None)
lds = LaunchDescriptionSource()
with pytest.raises(RuntimeError):
lds.get_launch_description(MockLaunchContext())
ld = LaunchDescription()
lds = LaunchDescriptionSource(ld)
assert lds.get_launch_description(MockLaunchContext()) == ld
ld = LaunchDescription()
lds = LaunchDescriptionSource(ld, 'location')
assert lds.get_launch_description(MockLaunchContext()) == ld
assert lds.location == 'location'
ld = LaunchDescription()
lds = LaunchDescriptionSource(ld, 'location', 'method description')
assert lds.get_launch_description(MockLaunchContext()) == ld
assert lds.location == 'location'
assert lds.method == 'method description'
|
"""Support for aggregation-based AMG."""
from __future__ import absolute_import
from warnings import warn
import numpy as np
from scipy.sparse import csr_matrix, isspmatrix_csr, isspmatrix_bsr,\
SparseEfficiencyWarning
from pyamg.multilevel import multilevel_solver
from pyamg.relaxation.smoothing import change_smoothers
from pyamg.util.utils import relaxation_as_linear_operator,\
eliminate_diag_dom_nodes, blocksize,\
levelize_strength_or_aggregation, levelize_smooth_or_improve_candidates
from pyamg.strength import classical_strength_of_connection,\
symmetric_strength_of_connection, evolution_strength_of_connection,\
energy_based_strength_of_connection, distance_strength_of_connection,\
algebraic_distance, affinity_distance
from .aggregate import standard_aggregation, naive_aggregation,\
lloyd_aggregation
from .tentative import fit_candidates
from .smooth import jacobi_prolongation_smoother,\
richardson_prolongation_smoother, energy_prolongation_smoother
__all__ = ['smoothed_aggregation_solver']
def smoothed_aggregation_solver(A, B=None, BH=None,
symmetry='hermitian', strength='symmetric',
aggregate='standard',
smooth=('jacobi', {'omega': 4.0/3.0}),
presmoother=('block_gauss_seidel',
{'sweep': 'symmetric'}),
postsmoother=('block_gauss_seidel',
{'sweep': 'symmetric'}),
improve_candidates=[('block_gauss_seidel',
{'sweep': 'symmetric',
'iterations': 4}),
None],
max_levels=10, max_coarse=10,
diagonal_dominance=False,
keep=False, **kwargs):
"""Create a multilevel solver using classical-style Smoothed Aggregation (SA).
Parameters
----------
A : csr_matrix, bsr_matrix
Sparse NxN matrix in CSR or BSR format
B : None, array_like
Right near-nullspace candidates stored in the columns of an NxK array.
The default value B=None is equivalent to B=ones((N,1))
BH : None, array_like
Left near-nullspace candidates stored in the columns of an NxK array.
BH is only used if symmetry='nonsymmetric'.
The default value B=None is equivalent to BH=B.copy()
symmetry : string
'symmetric' refers to both real and complex symmetric
'hermitian' refers to both complex Hermitian and real Hermitian
'nonsymmetric' i.e. nonsymmetric in a hermitian sense
Note, in the strictly real case, symmetric and hermitian are the same.
Note, this flag does not denote definiteness of the operator.
strength : string or list
Method used to determine the strength of connection between unknowns of
the linear system. Method-specific parameters may be passed in using a
tuple, e.g. strength=('symmetric',{'theta' : 0.25 }). If strength=None,
all nonzero entries of the matrix are considered strong.
Choose from 'symmetric', 'classical', 'evolution', 'algebraic_distance',
'affinity', ('predefined', {'C' : csr_matrix}), None
aggregate : string or list
Method used to aggregate nodes.
Choose from 'standard', 'lloyd', 'naive',
('predefined', {'AggOp' : csr_matrix})
smooth : list
Method used to smooth the tentative prolongator. Method-specific
parameters may be passed in using a tuple, e.g. smooth=
('jacobi',{'filter' : True }).
Choose from 'jacobi', 'richardson', 'energy', None
presmoother : tuple, string, list
Defines the presmoother for the multilevel cycling. The default block
Gauss-Seidel option defaults to point-wise Gauss-Seidel, if the matrix
is CSR or is a BSR matrix with blocksize of 1.
postsmoother : tuple, string, list
Same as presmoother, except defines the postsmoother.
improve_candidates : tuple, string, list
The ith entry defines the method used to improve the candidates B on
level i. If the list is shorter than max_levels, then the last entry
will define the method for all levels lower. If tuple or string, then
this single relaxation descriptor defines improve_candidates on all
levels.
The list elements are relaxation descriptors of the form used for
presmoother and postsmoother. A value of None implies no action on B.
max_levels : integer
Maximum number of levels to be used in the multilevel solver.
max_coarse : integer
Maximum number of variables permitted on the coarse grid.
diagonal_dominance : bool, tuple
If True (or the first tuple entry is True), then avoid coarsening
diagonally dominant rows. The second tuple entry requires a
dictionary, where the key value 'theta' is used to tune the diagonal
dominance threshold.
keep : bool
Flag to indicate keeping extra operators in the hierarchy for
diagnostics. For example, if True, then strength of connection (C),
tentative prolongation (T), and aggregation (AggOp) are kept.
Other Parameters
----------------
cycle_type : ['V','W','F']
Structrure of multigrid cycle
coarse_solver : ['splu', 'lu', 'cholesky, 'pinv', 'gauss_seidel', ... ]
Solver used at the coarsest level of the MG hierarchy.
Optionally, may be a tuple (fn, args), where fn is a string such as
['splu', 'lu', ...] or a callable function, and args is a dictionary of
arguments to be passed to fn.
Returns
-------
ml : multilevel_solver
Multigrid hierarchy of matrices and prolongation operators
See Also
--------
multilevel_solver, classical.ruge_stuben_solver,
aggregation.smoothed_aggregation_solver
Notes
-----
- This method implements classical-style SA, not root-node style SA
(see aggregation.rootnode_solver).
- The additional parameters are passed through as arguments to
multilevel_solver. Refer to pyamg.multilevel_solver for additional
documentation.
- At each level, four steps are executed in order to define the coarser
level operator.
1. Matrix A is given and used to derive a strength matrix, C.
2. Based on the strength matrix, indices are grouped or aggregated.
3. The aggregates define coarse nodes and a tentative prolongation
operator T is defined by injection
4. The tentative prolongation operator is smoothed by a relaxation
scheme to improve the quality and extent of interpolation from the
aggregates to fine nodes.
- The parameters smooth, strength, aggregate, presmoother, postsmoother
can be varied on a per level basis. For different methods on
different levels, use a list as input so that the i-th entry defines
the method at the i-th level. If there are more levels in the
hierarchy than list entries, the last entry will define the method
for all levels lower.
Examples are:
smooth=[('jacobi', {'omega':1.0}), None, 'jacobi']
presmoother=[('block_gauss_seidel', {'sweep':symmetric}), 'sor']
aggregate=['standard', 'naive']
strength=[('symmetric', {'theta':0.25}), ('symmetric', {'theta':0.08})]
- Predefined strength of connection and aggregation schemes can be
specified. These options are best used together, but aggregation can
be predefined while strength of connection is not.
For predefined strength of connection, use a list consisting of
tuples of the form ('predefined', {'C' : C0}), where C0 is a
csr_matrix and each degree-of-freedom in C0 represents a supernode.
For instance to predefine a three-level hierarchy, use
[('predefined', {'C' : C0}), ('predefined', {'C' : C1}) ].
Similarly for predefined aggregation, use a list of tuples. For
instance to predefine a three-level hierarchy, use [('predefined',
{'AggOp' : Agg0}), ('predefined', {'AggOp' : Agg1}) ], where the
dimensions of A, Agg0 and Agg1 are compatible, i.e. Agg0.shape[1] ==
A.shape[0] and Agg1.shape[1] == Agg0.shape[0]. Each AggOp is a
csr_matrix.
Examples
--------
>>> from pyamg import smoothed_aggregation_solver
>>> from pyamg.gallery import poisson
>>> from scipy.sparse.linalg import cg
>>> import numpy as np
>>> A = poisson((100,100), format='csr') # matrix
>>> b = np.ones((A.shape[0])) # RHS
>>> ml = smoothed_aggregation_solver(A) # AMG solver
>>> M = ml.aspreconditioner(cycle='V') # preconditioner
>>> x,info = cg(A, b, tol=1e-8, maxiter=30, M=M) # solve with CG
References
----------
.. [1996VaMaBr] Vanek, P. and Mandel, J. and Brezina, M.,
"Algebraic Multigrid by Smoothed Aggregation for
Second and Fourth Order Elliptic Problems",
Computing, vol. 56, no. 3, pp. 179--196, 1996.
http://citeseer.ist.psu.edu/vanek96algebraic.html
"""
if not (isspmatrix_csr(A) or isspmatrix_bsr(A)):
try:
A = csr_matrix(A)
warn("Implicit conversion of A to CSR", SparseEfficiencyWarning)
except BaseException:
raise TypeError('Argument A must have type csr_matrix or bsr_matrix, or be convertible to csr_matrix')
A = A.asfptype()
if (symmetry != 'symmetric') and (symmetry != 'hermitian') and\
(symmetry != 'nonsymmetric'):
raise ValueError('expected \'symmetric\', \'nonsymmetric\' or \'hermitian\' for the symmetry parameter ')
A.symmetry = symmetry
if A.shape[0] != A.shape[1]:
raise ValueError('expected square matrix')
# Right near nullspace candidates use constant for each variable as default
if B is None:
B = np.kron(np.ones((int(A.shape[0]/blocksize(A)), 1), dtype=A.dtype),
np.eye(blocksize(A), dtype=A.dtype))
else:
B = np.asarray(B, dtype=A.dtype)
if len(B.shape) == 1:
B = B.reshape(-1, 1)
if B.shape[0] != A.shape[0]:
raise ValueError('The near null-space modes B have incorrect dimensions for matrix A')
if B.shape[1] < blocksize(A):
warn('Having less target vectors, B.shape[1], than blocksize of A can degrade convergence factors.')
# Left near nullspace candidates
if A.symmetry == 'nonsymmetric':
if BH is None:
BH = B.copy()
else:
BH = np.asarray(BH, dtype=A.dtype)
if len(BH.shape) == 1:
BH = BH.reshape(-1, 1)
if BH.shape[1] != B.shape[1]:
raise ValueError('The number of left and right near null-space modes B and BH, must be equal')
if BH.shape[0] != A.shape[0]:
raise ValueError('The near null-space modes BH have incorrect dimensions for matrix A')
# Levelize the user parameters, so that they become lists describing the
# desired user option on each level.
max_levels, max_coarse, strength =\
levelize_strength_or_aggregation(strength, max_levels, max_coarse)
max_levels, max_coarse, aggregate =\
levelize_strength_or_aggregation(aggregate, max_levels, max_coarse)
improve_candidates =\
levelize_smooth_or_improve_candidates(improve_candidates, max_levels)
smooth = levelize_smooth_or_improve_candidates(smooth, max_levels)
# Construct multilevel structure
levels = []
levels.append(multilevel_solver.level())
levels[-1].A = A # matrix
# Append near nullspace candidates
levels[-1].B = B # right candidates
if A.symmetry == 'nonsymmetric':
levels[-1].BH = BH # left candidates
while len(levels) < max_levels and\
int(levels[-1].A.shape[0]/blocksize(levels[-1].A)) > max_coarse:
extend_hierarchy(levels, strength, aggregate, smooth,
improve_candidates, diagonal_dominance, keep)
ml = multilevel_solver(levels, **kwargs)
change_smoothers(ml, presmoother, postsmoother)
return ml
def extend_hierarchy(levels, strength, aggregate, smooth, improve_candidates,
diagonal_dominance=False, keep=True):
"""Extend the multigrid hierarchy.
Service routine to implement the strength of connection, aggregation,
tentative prolongation construction, and prolongation smoothing. Called by
smoothed_aggregation_solver.
"""
def unpack_arg(v):
if isinstance(v, tuple):
return v[0], v[1]
else:
return v, {}
A = levels[-1].A
B = levels[-1].B
if A.symmetry == "nonsymmetric":
AH = A.H.asformat(A.format)
BH = levels[-1].BH
# Compute the strength-of-connection matrix C, where larger
# C[i,j] denote stronger couplings between i and j.
fn, kwargs = unpack_arg(strength[len(levels)-1])
if fn == 'symmetric':
C = symmetric_strength_of_connection(A, **kwargs)
elif fn == 'classical':
C = classical_strength_of_connection(A, **kwargs)
elif fn == 'distance':
C = distance_strength_of_connection(A, **kwargs)
elif (fn == 'ode') or (fn == 'evolution'):
if 'B' in kwargs:
C = evolution_strength_of_connection(A, **kwargs)
else:
C = evolution_strength_of_connection(A, B, **kwargs)
elif fn == 'energy_based':
C = energy_based_strength_of_connection(A, **kwargs)
elif fn == 'predefined':
C = kwargs['C'].tocsr()
elif fn == 'algebraic_distance':
C = algebraic_distance(A, **kwargs)
elif fn == 'affinity':
C = affinity_distance(A, **kwargs)
elif fn is None:
C = A.tocsr()
else:
raise ValueError('unrecognized strength of connection method: %s' %
str(fn))
# Avoid coarsening diagonally dominant rows
flag, kwargs = unpack_arg(diagonal_dominance)
if flag:
C = eliminate_diag_dom_nodes(A, C, **kwargs)
# Compute the aggregation matrix AggOp (i.e., the nodal coarsening of A).
# AggOp is a boolean matrix, where the sparsity pattern for the k-th column
# denotes the fine-grid nodes agglomerated into k-th coarse-grid node.
fn, kwargs = unpack_arg(aggregate[len(levels)-1])
if fn == 'standard':
AggOp = standard_aggregation(C, **kwargs)[0]
elif fn == 'naive':
AggOp = naive_aggregation(C, **kwargs)[0]
elif fn == 'lloyd':
AggOp = lloyd_aggregation(C, **kwargs)[0]
elif fn == 'predefined':
AggOp = kwargs['AggOp'].tocsr()
else:
raise ValueError('unrecognized aggregation method %s' % str(fn))
# Improve near nullspace candidates by relaxing on A B = 0
fn, kwargs = unpack_arg(improve_candidates[len(levels)-1])
if fn is not None:
b = np.zeros((A.shape[0], 1), dtype=A.dtype)
B = relaxation_as_linear_operator((fn, kwargs), A, b) * B
levels[-1].B = B
if A.symmetry == "nonsymmetric":
BH = relaxation_as_linear_operator((fn, kwargs), AH, b) * BH
levels[-1].BH = BH
# Compute the tentative prolongator, T, which is a tentative interpolation
# matrix from the coarse-grid to the fine-grid. T exactly interpolates
# B_fine = T B_coarse.
T, B = fit_candidates(AggOp, B)
if A.symmetry == "nonsymmetric":
TH, BH = fit_candidates(AggOp, BH)
# Smooth the tentative prolongator, so that it's accuracy is greatly
# improved for algebraically smooth error.
fn, kwargs = unpack_arg(smooth[len(levels)-1])
if fn == 'jacobi':
P = jacobi_prolongation_smoother(A, T, C, B, **kwargs)
elif fn == 'richardson':
P = richardson_prolongation_smoother(A, T, **kwargs)
elif fn == 'energy':
P = energy_prolongation_smoother(A, T, C, B, None, (False, {}),
**kwargs)
elif fn is None:
P = T
else:
raise ValueError('unrecognized prolongation smoother method %s' %
str(fn))
# Compute the restriction matrix, R, which interpolates from the fine-grid
# to the coarse-grid. If A is nonsymmetric, then R must be constructed
# based on A.H. Otherwise R = P.H or P.T.
symmetry = A.symmetry
if symmetry == 'hermitian':
R = P.H
elif symmetry == 'symmetric':
R = P.T
elif symmetry == 'nonsymmetric':
fn, kwargs = unpack_arg(smooth[len(levels)-1])
if fn == 'jacobi':
R = jacobi_prolongation_smoother(AH, TH, C, BH, **kwargs).H
elif fn == 'richardson':
R = richardson_prolongation_smoother(AH, TH, **kwargs).H
elif fn == 'energy':
R = energy_prolongation_smoother(AH, TH, C, BH, None, (False, {}),
**kwargs)
R = R.H
elif fn is None:
R = T.H
else:
raise ValueError('unrecognized prolongation smoother method %s' %
str(fn))
if keep:
levels[-1].C = C # strength of connection matrix
levels[-1].AggOp = AggOp # aggregation operator
levels[-1].T = T # tentative prolongator
levels[-1].P = P # smoothed prolongator
levels[-1].R = R # restriction operator
levels.append(multilevel_solver.level())
A = R * A * P # Galerkin operator
A.symmetry = symmetry
levels[-1].A = A
levels[-1].B = B # right near nullspace candidates
if A.symmetry == "nonsymmetric":
levels[-1].BH = BH # left near nullspace candidates
|
#!/usr/bin/env python3
# Copyright (c) 2014-2017 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""Test RPCs related to blockchainstate.
Test the following RPCs:
- getblockchaininfo
- gettxoutsetinfo
- getdifficulty
- getbestblockhash
- getblockhash
- getblockheader
- getchaintxstats
- getnetworkhashps
- verifychain
Tests correspond to code in rpc/blockchain.cpp.
"""
from decimal import Decimal
import http.client
import subprocess
from test_framework.test_framework import SupernodeCoinTestFramework
from test_framework.util import (
assert_equal,
assert_greater_than,
assert_greater_than_or_equal,
assert_raises,
assert_raises_rpc_error,
assert_is_hex_string,
assert_is_hash_string,
)
class BlockchainTest(SupernodeCoinTestFramework):
def set_test_params(self):
self.num_nodes = 1
def run_test(self):
#self._test_getblockchaininfo()
self._test_gettxoutsetinfo()
self._test_getblockheader()
#self._test_getdifficulty()
self.nodes[0].verifychain(0)
def _test_getblockchaininfo(self):
self.log.info("Test getblockchaininfo")
keys = [
'bestblockhash',
'blocks',
'chain',
'chainwork',
'difficulty',
'headers',
'verificationprogress',
'warnings',
]
res = self.nodes[0].getblockchaininfo()
# result should have these additional pruning keys if manual pruning is enabled
assert_equal(sorted(res.keys()), sorted(keys))
def _test_gettxoutsetinfo(self):
node = self.nodes[0]
res = node.gettxoutsetinfo()
assert_equal(res['total_amount'], Decimal('50000.00000000'))
assert_equal(res['transactions'], 200)
assert_equal(res['height'], 200)
assert_equal(res['txouts'], 200)
assert_equal(res['bestblock'], node.getblockhash(200))
size = res['disk_size']
assert_greater_than_or_equal(size, 6400)
assert_greater_than_or_equal(64000, size)
assert_equal(len(res['bestblock']), 64)
assert_equal(len(res['hash_serialized_2']), 64)
def _test_getblockheader(self):
node = self.nodes[0]
assert_raises_rpc_error(-5, "Block not found",
node.getblockheader, "nonsense")
besthash = node.getbestblockhash()
secondbesthash = node.getblockhash(199)
header = node.getblockheader(besthash)
assert_equal(header['hash'], besthash)
assert_equal(header['height'], 200)
assert_equal(header['confirmations'], 1)
assert_equal(header['previousblockhash'], secondbesthash)
assert_is_hex_string(header['chainwork'])
assert_is_hash_string(header['hash'])
assert_is_hash_string(header['previousblockhash'])
assert_is_hash_string(header['merkleroot'])
assert_is_hash_string(header['bits'], length=None)
assert isinstance(header['time'], int)
#assert isinstance(header['mediantime'], int)
assert isinstance(header['nonce'], int)
assert isinstance(header['version'], int)
#assert isinstance(int(header['versionHex'], 16), int)
assert isinstance(header['difficulty'], Decimal)
def _test_getdifficulty(self):
difficulty = self.nodes[0].getdifficulty()
# 1 hash in 2 should be valid, so difficulty should be 1/2**31
# binary => decimal => binary math is why we do this check
assert abs(difficulty * 2**31 - 1) < 0.0001
if __name__ == '__main__':
BlockchainTest().main()
|
import os
import sys
import ctypes
from os import listdir, path
from os.path import isfile
from PyQt4.QtGui import *
from PyQt4.QtCore import *
# Before using arnold!
# I have to load dll manually
# Problem using dlls
s_path = "C:/solidangle/arnold/Arnold-5.2.2.0-windows/bin"
if os.access(s_path, os.F_OK):
dlls = [dll for dll in listdir(s_path) if isfile(path.join(s_path, dll)) and
('.dll' in dll)]
for dll in dlls:
print "{0}/{1}".format(s_path, dll)
ctypes.WinDLL ("{0}/{1}".format(s_path, dll))
# End of loading dlls
from UI import main_dialog, bridge_function
from Core import app_PyQtArnold_core
modules = [main_dialog, bridge_function, app_PyQtArnold_core]
[reload (xi) for xi in modules]
class MyApplication(QDialog, main_dialog.Ui_main_dialog):
def __init__(self, parent=None):
super(MyApplication, self).__init__(parent)
self.setupUi(self)
def main():
if QCoreApplication.instance() is not None:
app = QCoreApplication.instance()
else:
app = QApplication(sys.argv)
app.aboutToQuit.connect(app.quit)
window = MyApplication()
core = app_PyQtArnold_core.CoreFunctions()
bridge = bridge_function.UiActions(
win_dialog=window, core_func = core)
window.show()
try:
sys.exit(app.exec_())
except:
pass
main()
|
"""Classify changes in Ansible code."""
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
import collections
import os
import re
import time
from . import types as t
from .target import (
walk_module_targets,
walk_integration_targets,
walk_units_targets,
walk_compile_targets,
walk_sanity_targets,
load_integration_prefixes,
analyze_integration_target_dependencies,
)
from .util import (
display,
is_subdir,
)
from .import_analysis import (
get_python_module_utils_imports,
get_python_module_utils_name,
)
from .csharp_import_analysis import (
get_csharp_module_utils_imports,
get_csharp_module_utils_name,
)
from .powershell_import_analysis import (
get_powershell_module_utils_imports,
get_powershell_module_utils_name,
)
from .config import (
TestConfig,
IntegrationConfig,
)
from .metadata import (
ChangeDescription,
)
from .data import (
data_context,
)
FOCUSED_TARGET = '__focused__'
def categorize_changes(args, paths, verbose_command=None):
"""
:type args: TestConfig
:type paths: list[str]
:type verbose_command: str
:rtype: ChangeDescription
"""
mapper = PathMapper(args)
commands = {
'sanity': set(),
'units': set(),
'integration': set(),
'windows-integration': set(),
'network-integration': set(),
}
focused_commands = collections.defaultdict(set)
deleted_paths = set()
original_paths = set()
additional_paths = set()
no_integration_paths = set()
for path in paths:
if not os.path.exists(path):
deleted_paths.add(path)
continue
original_paths.add(path)
dependent_paths = mapper.get_dependent_paths(path)
if not dependent_paths:
continue
display.info('Expanded "%s" to %d dependent file(s):' % (path, len(dependent_paths)), verbosity=2)
for dependent_path in dependent_paths:
display.info(dependent_path, verbosity=2)
additional_paths.add(dependent_path)
additional_paths -= set(paths) # don't count changed paths as additional paths
if additional_paths:
display.info('Expanded %d changed file(s) into %d additional dependent file(s).' % (len(paths), len(additional_paths)))
paths = sorted(set(paths) | additional_paths)
display.info('Mapping %d changed file(s) to tests.' % len(paths))
none_count = 0
for path in paths:
tests = mapper.classify(path)
if tests is None:
focused_target = False
display.info('%s -> all' % path, verbosity=1)
tests = all_tests(args) # not categorized, run all tests
display.warning('Path not categorized: %s' % path)
else:
focused_target = tests.pop(FOCUSED_TARGET, False) and path in original_paths
tests = dict((key, value) for key, value in tests.items() if value)
if focused_target and not any('integration' in command for command in tests):
no_integration_paths.add(path) # path triggers no integration tests
if verbose_command:
result = '%s: %s' % (verbose_command, tests.get(verbose_command) or 'none')
# identify targeted integration tests (those which only target a single integration command)
if 'integration' in verbose_command and tests.get(verbose_command):
if not any('integration' in command for command in tests if command != verbose_command):
if focused_target:
result += ' (focused)'
result += ' (targeted)'
else:
result = '%s' % tests
if not tests.get(verbose_command):
# minimize excessive output from potentially thousands of files which do not trigger tests
none_count += 1
verbosity = 2
else:
verbosity = 1
if args.verbosity >= verbosity:
display.info('%s -> %s' % (path, result), verbosity=1)
for command, target in tests.items():
commands[command].add(target)
if focused_target:
focused_commands[command].add(target)
if none_count > 0 and args.verbosity < 2:
display.notice('Omitted %d file(s) that triggered no tests.' % none_count)
for command in commands:
commands[command].discard('none')
if any(target == 'all' for target in commands[command]):
commands[command] = set(['all'])
commands = dict((c, sorted(commands[c])) for c in commands if commands[c])
focused_commands = dict((c, sorted(focused_commands[c])) for c in focused_commands)
for command in commands:
if commands[command] == ['all']:
commands[command] = [] # changes require testing all targets, do not filter targets
changes = ChangeDescription()
changes.command = verbose_command
changes.changed_paths = sorted(original_paths)
changes.deleted_paths = sorted(deleted_paths)
changes.regular_command_targets = commands
changes.focused_command_targets = focused_commands
changes.no_integration_paths = sorted(no_integration_paths)
return changes
class PathMapper:
"""Map file paths to test commands and targets."""
def __init__(self, args):
"""
:type args: TestConfig
"""
self.args = args
self.integration_all_target = get_integration_all_target(self.args)
self.integration_targets = list(walk_integration_targets())
self.module_targets = list(walk_module_targets())
self.compile_targets = list(walk_compile_targets())
self.units_targets = list(walk_units_targets())
self.sanity_targets = list(walk_sanity_targets())
self.powershell_targets = [target for target in self.sanity_targets if os.path.splitext(target.path)[1] in ('.ps1', '.psm1')]
self.csharp_targets = [target for target in self.sanity_targets if os.path.splitext(target.path)[1] == '.cs']
self.units_modules = set(target.module for target in self.units_targets if target.module)
self.units_paths = set(a for target in self.units_targets for a in target.aliases)
self.sanity_paths = set(target.path for target in self.sanity_targets)
self.module_names_by_path = dict((target.path, target.module) for target in self.module_targets)
self.integration_targets_by_name = dict((target.name, target) for target in self.integration_targets)
self.integration_targets_by_alias = dict((a, target) for target in self.integration_targets for a in target.aliases)
self.posix_integration_by_module = dict((m, target.name) for target in self.integration_targets
if 'posix/' in target.aliases for m in target.modules)
self.windows_integration_by_module = dict((m, target.name) for target in self.integration_targets
if 'windows/' in target.aliases for m in target.modules)
self.network_integration_by_module = dict((m, target.name) for target in self.integration_targets
if 'network/' in target.aliases for m in target.modules)
self.prefixes = load_integration_prefixes()
self.integration_dependencies = analyze_integration_target_dependencies(self.integration_targets)
self.python_module_utils_imports = {} # populated on first use to reduce overhead when not needed
self.powershell_module_utils_imports = {} # populated on first use to reduce overhead when not needed
self.csharp_module_utils_imports = {} # populated on first use to reduce overhead when not needed
self.paths_to_dependent_targets = {}
for target in self.integration_targets:
for path in target.needs_file:
if path not in self.paths_to_dependent_targets:
self.paths_to_dependent_targets[path] = set()
self.paths_to_dependent_targets[path].add(target)
def get_dependent_paths(self, path):
"""
:type path: str
:rtype: list[str]
"""
unprocessed_paths = set(self.get_dependent_paths_non_recursive(path))
paths = set()
while unprocessed_paths:
queued_paths = list(unprocessed_paths)
paths |= unprocessed_paths
unprocessed_paths = set()
for queued_path in queued_paths:
new_paths = self.get_dependent_paths_non_recursive(queued_path)
for new_path in new_paths:
if new_path not in paths:
unprocessed_paths.add(new_path)
return sorted(paths)
def get_dependent_paths_non_recursive(self, path):
"""
:type path: str
:rtype: list[str]
"""
paths = self.get_dependent_paths_internal(path)
paths += [target.path + '/' for target in self.paths_to_dependent_targets.get(path, set())]
paths = sorted(set(paths))
return paths
def get_dependent_paths_internal(self, path):
"""
:type path: str
:rtype: list[str]
"""
ext = os.path.splitext(os.path.split(path)[1])[1]
if is_subdir(path, data_context().content.module_utils_path):
if ext == '.py':
return self.get_python_module_utils_usage(path)
if ext == '.psm1':
return self.get_powershell_module_utils_usage(path)
if ext == '.cs':
return self.get_csharp_module_utils_usage(path)
if is_subdir(path, data_context().content.integration_targets_path):
return self.get_integration_target_usage(path)
return []
def get_python_module_utils_usage(self, path):
"""
:type path: str
:rtype: list[str]
"""
if path == 'lib/ansible/module_utils/__init__.py':
return []
if not self.python_module_utils_imports:
display.info('Analyzing python module_utils imports...')
before = time.time()
self.python_module_utils_imports = get_python_module_utils_imports(self.compile_targets)
after = time.time()
display.info('Processed %d python module_utils in %d second(s).' % (len(self.python_module_utils_imports), after - before))
name = get_python_module_utils_name(path)
return sorted(self.python_module_utils_imports[name])
def get_powershell_module_utils_usage(self, path):
"""
:type path: str
:rtype: list[str]
"""
if not self.powershell_module_utils_imports:
display.info('Analyzing powershell module_utils imports...')
before = time.time()
self.powershell_module_utils_imports = get_powershell_module_utils_imports(self.powershell_targets)
after = time.time()
display.info('Processed %d powershell module_utils in %d second(s).' % (len(self.powershell_module_utils_imports), after - before))
name = get_powershell_module_utils_name(path)
return sorted(self.powershell_module_utils_imports[name])
def get_csharp_module_utils_usage(self, path):
"""
:type path: str
:rtype: list[str]
"""
if not self.csharp_module_utils_imports:
display.info('Analyzing C# module_utils imports...')
before = time.time()
self.csharp_module_utils_imports = get_csharp_module_utils_imports(self.powershell_targets, self.csharp_targets)
after = time.time()
display.info('Processed %d C# module_utils in %d second(s).' % (len(self.csharp_module_utils_imports), after - before))
name = get_csharp_module_utils_name(path)
return sorted(self.csharp_module_utils_imports[name])
def get_integration_target_usage(self, path):
"""
:type path: str
:rtype: list[str]
"""
target_name = path.split('/')[3]
dependents = [os.path.join(data_context().content.integration_targets_path, target) + os.path.sep
for target in sorted(self.integration_dependencies.get(target_name, set()))]
return dependents
def classify(self, path):
"""
:type path: str
:rtype: dict[str, str] | None
"""
result = self._classify(path)
# run all tests when no result given
if result is None:
return None
# run sanity on path unless result specified otherwise
if path in self.sanity_paths and 'sanity' not in result:
result['sanity'] = path
return result
def _classify(self, path): # type: (str) -> t.Optional[t.Dict[str, str]]
"""Return the classification for the given path."""
if data_context().content.is_ansible:
return self._classify_ansible(path)
if data_context().content.collection:
return self._classify_collection(path)
return None
def _classify_common(self, path): # type: (str) -> t.Optional[t.Dict[str, str]]
"""Return the classification for the given path using rules common to all layouts."""
dirname = os.path.dirname(path)
filename = os.path.basename(path)
name, ext = os.path.splitext(filename)
minimal = {}
if is_subdir(path, '.github'):
return minimal
if is_subdir(path, data_context().content.integration_targets_path):
if not os.path.exists(path):
return minimal
target = self.integration_targets_by_name.get(path.split('/')[3])
if not target:
display.warning('Unexpected non-target found: %s' % path)
return minimal
if 'hidden/' in target.aliases:
return minimal # already expanded using get_dependent_paths
return {
'integration': target.name if 'posix/' in target.aliases else None,
'windows-integration': target.name if 'windows/' in target.aliases else None,
'network-integration': target.name if 'network/' in target.aliases else None,
FOCUSED_TARGET: True,
}
if is_subdir(path, data_context().content.integration_path):
if dirname == data_context().content.integration_path:
for command in (
'integration',
'windows-integration',
'network-integration',
):
if name == command and ext == '.cfg':
return {
command: self.integration_all_target,
}
if name == command + '.requirements' and ext == '.txt':
return {
command: self.integration_all_target,
}
return {
'integration': self.integration_all_target,
'windows-integration': self.integration_all_target,
'network-integration': self.integration_all_target,
}
if is_subdir(path, data_context().content.sanity_path):
return {
'sanity': 'all', # test infrastructure, run all sanity checks
}
if is_subdir(path, data_context().content.unit_path):
if path in self.units_paths:
return {
'units': path,
}
# changes to files which are not unit tests should trigger tests from the nearest parent directory
test_path = os.path.dirname(path)
while test_path:
if test_path + '/' in self.units_paths:
return {
'units': test_path + '/',
}
test_path = os.path.dirname(test_path)
if is_subdir(path, data_context().content.module_path):
module_name = self.module_names_by_path.get(path)
if module_name:
return {
'units': module_name if module_name in self.units_modules else None,
'integration': self.posix_integration_by_module.get(module_name) if ext == '.py' else None,
'windows-integration': self.windows_integration_by_module.get(module_name) if ext in ['.cs', '.ps1'] else None,
'network-integration': self.network_integration_by_module.get(module_name),
FOCUSED_TARGET: True,
}
return minimal
if is_subdir(path, data_context().content.module_utils_path):
if ext == '.cs':
return minimal # already expanded using get_dependent_paths
if ext == '.psm1':
return minimal # already expanded using get_dependent_paths
if ext == '.py':
return minimal # already expanded using get_dependent_paths
if is_subdir(path, data_context().content.plugin_paths['action']):
if ext == '.py':
if name.startswith('net_'):
network_target = 'network/.*_%s' % name[4:]
if any(re.search(r'^%s$' % network_target, alias) for alias in self.integration_targets_by_alias):
return {
'network-integration': network_target,
'units': 'all',
}
return {
'network-integration': self.integration_all_target,
'units': 'all',
}
if self.prefixes.get(name) == 'network':
network_platform = name
elif name.endswith('_config') and self.prefixes.get(name[:-7]) == 'network':
network_platform = name[:-7]
elif name.endswith('_template') and self.prefixes.get(name[:-9]) == 'network':
network_platform = name[:-9]
else:
network_platform = None
if network_platform:
network_target = 'network/%s/' % network_platform
if network_target in self.integration_targets_by_alias:
return {
'network-integration': network_target,
'units': 'all',
}
display.warning('Integration tests for "%s" not found.' % network_target, unique=True)
return {
'units': 'all',
}
if is_subdir(path, data_context().content.plugin_paths['connection']):
if name == '__init__':
return {
'integration': self.integration_all_target,
'windows-integration': self.integration_all_target,
'network-integration': self.integration_all_target,
'units': 'test/units/plugins/connection/',
}
units_path = 'test/units/plugins/connection/test_%s.py' % name
if units_path not in self.units_paths:
units_path = None
integration_name = 'connection_%s' % name
if integration_name not in self.integration_targets_by_name:
integration_name = None
windows_integration_name = 'connection_windows_%s' % name
if windows_integration_name not in self.integration_targets_by_name:
windows_integration_name = None
# entire integration test commands depend on these connection plugins
if name in ['winrm', 'psrp']:
return {
'windows-integration': self.integration_all_target,
'units': units_path,
}
if name == 'local':
return {
'integration': self.integration_all_target,
'network-integration': self.integration_all_target,
'units': units_path,
}
if name == 'network_cli':
return {
'network-integration': self.integration_all_target,
'units': units_path,
}
if name == 'paramiko_ssh':
return {
'integration': integration_name,
'network-integration': self.integration_all_target,
'units': units_path,
}
# other connection plugins have isolated integration and unit tests
return {
'integration': integration_name,
'windows-integration': windows_integration_name,
'units': units_path,
}
if is_subdir(path, data_context().content.plugin_paths['doc_fragments']):
return {
'sanity': 'all',
}
if is_subdir(path, data_context().content.plugin_paths['inventory']):
if name == '__init__':
return all_tests(self.args) # broad impact, run all tests
# These inventory plugins are enabled by default (see INVENTORY_ENABLED).
# Without dedicated integration tests for these we must rely on the incidental coverage from other tests.
test_all = [
'host_list',
'script',
'yaml',
'ini',
'auto',
]
if name in test_all:
posix_integration_fallback = get_integration_all_target(self.args)
else:
posix_integration_fallback = None
target = self.integration_targets_by_name.get('inventory_%s' % name)
units_path = 'test/units/plugins/inventory/test_%s.py' % name
if units_path not in self.units_paths:
units_path = None
return {
'integration': target.name if target and 'posix/' in target.aliases else posix_integration_fallback,
'windows-integration': target.name if target and 'windows/' in target.aliases else None,
'network-integration': target.name if target and 'network/' in target.aliases else None,
'units': units_path,
FOCUSED_TARGET: target is not None,
}
if (is_subdir(path, data_context().content.plugin_paths['terminal']) or
is_subdir(path, data_context().content.plugin_paths['cliconf']) or
is_subdir(path, data_context().content.plugin_paths['netconf'])):
if ext == '.py':
if name in self.prefixes and self.prefixes[name] == 'network':
network_target = 'network/%s/' % name
if network_target in self.integration_targets_by_alias:
return {
'network-integration': network_target,
'units': 'all',
}
display.warning('Integration tests for "%s" not found.' % network_target, unique=True)
return {
'units': 'all',
}
return {
'network-integration': self.integration_all_target,
'units': 'all',
}
return None
def _classify_collection(self, path): # type: (str) -> t.Optional[t.Dict[str, str]]
"""Return the classification for the given path using rules specific to collections."""
result = self._classify_common(path)
if result is not None:
return result
return None
def _classify_ansible(self, path): # type: (str) -> t.Optional[t.Dict[str, str]]
"""Return the classification for the given path using rules specific to Ansible."""
if path.startswith('test/units/compat/'):
return {
'units': 'test/units/',
}
result = self._classify_common(path)
if result is not None:
return result
dirname = os.path.dirname(path)
filename = os.path.basename(path)
name, ext = os.path.splitext(filename)
minimal = {}
if path.startswith('bin/'):
return all_tests(self.args) # broad impact, run all tests
if path.startswith('changelogs/'):
return minimal
if path.startswith('contrib/'):
return {
'units': 'test/units/contrib/'
}
if path.startswith('docs/'):
return minimal
if path.startswith('examples/'):
if path == 'examples/scripts/ConfigureRemotingForAnsible.ps1':
return {
'windows-integration': 'connection_winrm',
}
return minimal
if path.startswith('hacking/'):
return minimal
if path.startswith('lib/ansible/executor/powershell/'):
units_path = 'test/units/executor/powershell/'
if units_path not in self.units_paths:
units_path = None
return {
'windows-integration': self.integration_all_target,
'units': units_path,
}
if path.startswith('lib/ansible/'):
return all_tests(self.args) # broad impact, run all tests
if path.startswith('licenses/'):
return minimal
if path.startswith('packaging/'):
if path.startswith('packaging/requirements/'):
if name.startswith('requirements-') and ext == '.txt':
component = name.split('-', 1)[1]
candidates = (
'cloud/%s/' % component,
)
for candidate in candidates:
if candidate in self.integration_targets_by_alias:
return {
'integration': candidate,
}
return all_tests(self.args) # broad impact, run all tests
return minimal
if path.startswith('test/ansible_test/'):
return minimal # these tests are not invoked from ansible-test
if path.startswith('test/legacy/'):
return minimal
if path.startswith('test/lib/ansible_test/config/'):
if name.startswith('cloud-config-'):
cloud_target = 'cloud/%s/' % name.split('-')[2].split('.')[0]
if cloud_target in self.integration_targets_by_alias:
return {
'integration': cloud_target,
}
if path.startswith('test/lib/ansible_test/_data/completion/'):
if path == 'test/lib/ansible_test/_data/completion/docker.txt':
return all_tests(self.args, force=True) # force all tests due to risk of breaking changes in new test environment
if path.startswith('test/lib/ansible_test/_internal/cloud/'):
cloud_target = 'cloud/%s/' % name
if cloud_target in self.integration_targets_by_alias:
return {
'integration': cloud_target,
}
return all_tests(self.args) # test infrastructure, run all tests
if path.startswith('test/lib/ansible_test/_internal/sanity/'):
return {
'sanity': 'all', # test infrastructure, run all sanity checks
}
if path.startswith('test/lib/ansible_test/_data/sanity/'):
return {
'sanity': 'all', # test infrastructure, run all sanity checks
}
if path.startswith('test/lib/ansible_test/_internal/units/'):
return {
'units': 'all', # test infrastructure, run all unit tests
}
if path.startswith('test/lib/ansible_test/_data/units/'):
return {
'units': 'all', # test infrastructure, run all unit tests
}
if path.startswith('test/lib/ansible_test/_data/pytest/'):
return {
'units': 'all', # test infrastructure, run all unit tests
}
if path.startswith('test/lib/ansible_test/_data/requirements/'):
if name in (
'integration',
'network-integration',
'windows-integration',
):
return {
name: self.integration_all_target,
}
if name in (
'sanity',
'units',
):
return {
name: 'all',
}
if name.startswith('integration.cloud.'):
cloud_target = 'cloud/%s/' % name.split('.')[2]
if cloud_target in self.integration_targets_by_alias:
return {
'integration': cloud_target,
}
if path.startswith('test/lib/'):
return all_tests(self.args) # test infrastructure, run all tests
if path.startswith('test/utils/shippable/tools/'):
return minimal # not used by tests
if path.startswith('test/utils/shippable/'):
if dirname == 'test/utils/shippable':
test_map = {
'cloud.sh': 'integration:cloud/',
'freebsd.sh': 'integration:all',
'linux.sh': 'integration:all',
'network.sh': 'network-integration:all',
'osx.sh': 'integration:all',
'rhel.sh': 'integration:all',
'sanity.sh': 'sanity:all',
'units.sh': 'units:all',
'windows.sh': 'windows-integration:all',
}
test_match = test_map.get(filename)
if test_match:
test_command, test_target = test_match.split(':')
return {
test_command: test_target,
}
cloud_target = 'cloud/%s/' % name
if cloud_target in self.integration_targets_by_alias:
return {
'integration': cloud_target,
}
return all_tests(self.args) # test infrastructure, run all tests
if path.startswith('test/utils/'):
return minimal
if '/' not in path:
if path in (
'.gitattributes',
'.gitignore',
'.mailmap',
'COPYING',
'Makefile',
):
return minimal
if path in (
'setup.py',
'shippable.yml',
):
return all_tests(self.args) # broad impact, run all tests
if ext in (
'.in',
'.md',
'.rst',
'.toml',
'.txt',
):
return minimal
return None # unknown, will result in fall-back to run all tests
def all_tests(args, force=False):
"""
:type args: TestConfig
:type force: bool
:rtype: dict[str, str]
"""
if force:
integration_all_target = 'all'
else:
integration_all_target = get_integration_all_target(args)
return {
'sanity': 'all',
'units': 'all',
'integration': integration_all_target,
'windows-integration': integration_all_target,
'network-integration': integration_all_target,
}
def get_integration_all_target(args):
"""
:type args: TestConfig
:rtype: str
"""
if isinstance(args, IntegrationConfig):
return args.changed_all_target
return 'all'
|
# -*- coding:utf-8 -*-
import re
import os
import time
import json
import sys
import subprocess
import requests
import hashlib
from bs4 import BeautifulSoup
"""
info:
author:CriseLYJ
github:https://github.com/CriseLYJ/
update_time:2019-3-6
"""
class Lagou_login(object):
def __init__(self):
self.session = requests.session()
self.CaptchaImagePath = os.path.split(os.path.realpath(__file__))[0] + os.sep + 'captcha.jpg'
self.HEADERS = {'Referer': 'https://passport.lagou.com/login/login.html',
'User-Agent': 'Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.36'
' (KHTML, like Gecko) Chrome/53.0.2785.104 Safari/537.36'
' Core/1.53.4882.400 QQBrowser/9.7.13059.400',
'X-Requested-With': 'XMLHttpRequest'}
#密码加密
def encryptPwd(self, passwd):
# 对密码进行了md5双重加密
passwd = hashlib.md5(passwd.encode('utf-8')).hexdigest()
# veennike 这个值是在js文件找到的一个写死的值
passwd = 'veenike'+passwd+'veenike'
passwd = hashlib.md5(passwd.encode('utf-8')).hexdigest()
return passwd
#获取请求token
def getTokenCode(self):
login_page = 'https://passport.lagou.com/login/login.html'
data = self.session.get(login_page, headers= self.HEADERS)
soup = BeautifulSoup(data.content, "lxml", from_encoding='utf-8')
'''
要从登录页面提取token,code, 在头信息里面添加
<!-- 页面样式 --><!-- 动态token,防御伪造请求,重复提交 -->
<script type="text/javascript">
window.X_Anti_Forge_Token = 'dde4db4a-888e-47ca-8277-0c6da6a8fc19';
window.X_Anti_Forge_Code = '61142241';
</script>
'''
anti_token = {'X-Anit-Forge-Token': 'None',
'X-Anit-Forge-Code': '0'}
anti = soup.findAll('script')[1].getText().splitlines()
anti = [str(x) for x in anti]
anti_token['X-Anit-Forge-Token'] = re.findall(r'= \'(.+?)\'', anti[1])[0]
anti_token['X-Anit-Forge-Code'] = re.findall(r'= \'(.+?)\'', anti[2])[0]
return anti_token
# 人工读取验证码并返回
def getCaptcha(self):
captchaImgUrl = 'https://passport.lagou.com/vcode/create?from=register&refresh=%s' % time.time()
# 写入验证码图片
f = open(self.CaptchaImagePath, 'wb')
f.write(self.session.get(captchaImgUrl, headers=self.HEADERS).content)
f.close()
# 打开验证码图片
if sys.platform.find('darwin') >= 0:
subprocess.call(['open', self.CaptchaImagePath])
elif sys.platform.find('linux') >= 0:
subprocess.call(['xdg-open', self.CaptchaImagePath])
else:
os.startfile(self.CaptchaImagePath)
# 输入返回验证码
captcha = input("请输入当前地址(% s)的验证码: " % self.CaptchaImagePath)
print('你输入的验证码是:% s' % captcha)
return captcha
# 登陆操作
def login(self, user, passwd, captchaData=None, token_code=None):
postData = {'isValidate': 'true',
'password': passwd,
# 如需验证码,则添加上验证码
'request_form_verifyCode': (captchaData if captchaData!=None else ''),
'submit': '',
'username': user
}
login_url = 'https://passport.lagou.com/login/login.json'
#头信息添加tokena
login_headers = self.HEADERS.copy()
token_code = self.getTokenCode() if token_code is None else token_code
login_headers.update(token_code)
# data = {"content":{"rows":[]},"message":"该帐号不存在或密码错误,请重新输入","state":400}
response = self.session.post(login_url, data=postData, headers=login_headers)
data = json.loads(response.content.decode('utf-8'))
if data['state'] == 1:
return response.content
elif data['state'] == 10010:
print(data['message'])
captchaData = self.getCaptcha()
token_code = {'X-Anit-Forge-Code' : data['submitCode'], 'X-Anit-Forge-Token' : data['submitToken']}
return self.login(user, passwd, captchaData, token_code)
else:
print(data['message'])
return False
if __name__ == "__main__":
username = input("请输入你的手机号或者邮箱\n >>>:")
passwd = input("请输入你的密码\n >>>:")
lg = Lagou_login()
passwd = lg.encryptPwd(passwd)
data = lg.login(username, passwd)
if data:
print(data)
print('登录成功')
else:
print('登录不成功')
|
#! /usr/bin/python3
# Define the class DVD
class DVD:
def __init__(self, title, studio, director, released):
self.__title = title
self.__studio = studio
self.__director = director
self.__released = released
def get_title(self):
return self.__title
def get_studio(self):
return self.__studio
def get_director(self):
return self.__director
def get_released(self):
return self.__released
def __str__(self):
return self.__title + ", " + self.__studio + ", " + self.__director + ", " + self.__released
# Test Code
dvd = DVD("Forbidden Planet", "Metro-Goldwyn-Mayer", "Fred M. Wilcox", "1956")
print(dvd.get_title())
print(dvd.get_studio())
print(dvd.get_director())
print(dvd.get_released())
print(dvd)
|
from scrapy.exceptions import DropItem
class PricePipeline(object):
vat_factor = 1.15
def process_item(self, item, spider):
if item.get('price'):
if item.get('price_excludes_vat'):
item['price'] = item['price'] * self.vat_factor
return item
else:
raise DropItem("Missing price in %s" % item)
|
# coding: utf-8
import pprint
import re
import six
class UpdateHealthMonitorOption:
"""
Attributes:
openapi_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
sensitive_list = []
openapi_types = {
'admin_state_up': 'bool',
'delay': 'int',
'domain_name': 'str',
'expected_codes': 'str',
'http_method': 'str',
'max_retries': 'int',
'max_retries_down': 'int',
'monitor_port': 'int',
'name': 'str',
'timeout': 'int',
'url_path': 'str',
'type': 'str'
}
attribute_map = {
'admin_state_up': 'admin_state_up',
'delay': 'delay',
'domain_name': 'domain_name',
'expected_codes': 'expected_codes',
'http_method': 'http_method',
'max_retries': 'max_retries',
'max_retries_down': 'max_retries_down',
'monitor_port': 'monitor_port',
'name': 'name',
'timeout': 'timeout',
'url_path': 'url_path',
'type': 'type'
}
def __init__(self, admin_state_up=None, delay=None, domain_name=None, expected_codes=None, http_method=None, max_retries=None, max_retries_down=None, monitor_port=None, name=None, timeout=None, url_path=None, type=None):
"""UpdateHealthMonitorOption - a model defined in huaweicloud sdk"""
self._admin_state_up = None
self._delay = None
self._domain_name = None
self._expected_codes = None
self._http_method = None
self._max_retries = None
self._max_retries_down = None
self._monitor_port = None
self._name = None
self._timeout = None
self._url_path = None
self._type = None
self.discriminator = None
if admin_state_up is not None:
self.admin_state_up = admin_state_up
if delay is not None:
self.delay = delay
if domain_name is not None:
self.domain_name = domain_name
if expected_codes is not None:
self.expected_codes = expected_codes
if http_method is not None:
self.http_method = http_method
if max_retries is not None:
self.max_retries = max_retries
if max_retries_down is not None:
self.max_retries_down = max_retries_down
if monitor_port is not None:
self.monitor_port = monitor_port
if name is not None:
self.name = name
if timeout is not None:
self.timeout = timeout
if url_path is not None:
self.url_path = url_path
if type is not None:
self.type = type
@property
def admin_state_up(self):
"""Gets the admin_state_up of this UpdateHealthMonitorOption.
功能说明:管理状态true/false。使用说明:默认为true,true表示开启健康检查,false表示关闭健康检查。
:return: The admin_state_up of this UpdateHealthMonitorOption.
:rtype: bool
"""
return self._admin_state_up
@admin_state_up.setter
def admin_state_up(self, admin_state_up):
"""Sets the admin_state_up of this UpdateHealthMonitorOption.
功能说明:管理状态true/false。使用说明:默认为true,true表示开启健康检查,false表示关闭健康检查。
:param admin_state_up: The admin_state_up of this UpdateHealthMonitorOption.
:type: bool
"""
self._admin_state_up = admin_state_up
@property
def delay(self):
"""Gets the delay of this UpdateHealthMonitorOption.
健康检查间隔。
:return: The delay of this UpdateHealthMonitorOption.
:rtype: int
"""
return self._delay
@delay.setter
def delay(self, delay):
"""Sets the delay of this UpdateHealthMonitorOption.
健康检查间隔。
:param delay: The delay of this UpdateHealthMonitorOption.
:type: int
"""
self._delay = delay
@property
def domain_name(self):
"""Gets the domain_name of this UpdateHealthMonitorOption.
功能说明:健康检查测试member健康状态时,发送的http请求的域名。仅当type为HTTP时生效。使用说明:默认为空,表示使用负载均衡器的vip作为http请求的目的地址。以数字或字母开头,只能包含数字、字母、’-’、’.’。
:return: The domain_name of this UpdateHealthMonitorOption.
:rtype: str
"""
return self._domain_name
@domain_name.setter
def domain_name(self, domain_name):
"""Sets the domain_name of this UpdateHealthMonitorOption.
功能说明:健康检查测试member健康状态时,发送的http请求的域名。仅当type为HTTP时生效。使用说明:默认为空,表示使用负载均衡器的vip作为http请求的目的地址。以数字或字母开头,只能包含数字、字母、’-’、’.’。
:param domain_name: The domain_name of this UpdateHealthMonitorOption.
:type: str
"""
self._domain_name = domain_name
@property
def expected_codes(self):
"""Gets the expected_codes of this UpdateHealthMonitorOption.
期望HTTP响应状态码,指定下列值:单值,例如200;列表,例如200,202;区间,例如200-204。仅当type为HTTP时生效。该字段为预留字段,暂未启用。
:return: The expected_codes of this UpdateHealthMonitorOption.
:rtype: str
"""
return self._expected_codes
@expected_codes.setter
def expected_codes(self, expected_codes):
"""Sets the expected_codes of this UpdateHealthMonitorOption.
期望HTTP响应状态码,指定下列值:单值,例如200;列表,例如200,202;区间,例如200-204。仅当type为HTTP时生效。该字段为预留字段,暂未启用。
:param expected_codes: The expected_codes of this UpdateHealthMonitorOption.
:type: str
"""
self._expected_codes = expected_codes
@property
def http_method(self):
"""Gets the http_method of this UpdateHealthMonitorOption.
HTTP方法,可以为GET、HEAD、POST、PUT、DELETE、TRACE、OPTIONS、CONNECT、PATCH。仅当type为HTTP时生效。该字段为预留字段,暂未启用。
:return: The http_method of this UpdateHealthMonitorOption.
:rtype: str
"""
return self._http_method
@http_method.setter
def http_method(self, http_method):
"""Sets the http_method of this UpdateHealthMonitorOption.
HTTP方法,可以为GET、HEAD、POST、PUT、DELETE、TRACE、OPTIONS、CONNECT、PATCH。仅当type为HTTP时生效。该字段为预留字段,暂未启用。
:param http_method: The http_method of this UpdateHealthMonitorOption.
:type: str
"""
self._http_method = http_method
@property
def max_retries(self):
"""Gets the max_retries of this UpdateHealthMonitorOption.
最大重试次数
:return: The max_retries of this UpdateHealthMonitorOption.
:rtype: int
"""
return self._max_retries
@max_retries.setter
def max_retries(self, max_retries):
"""Sets the max_retries of this UpdateHealthMonitorOption.
最大重试次数
:param max_retries: The max_retries of this UpdateHealthMonitorOption.
:type: int
"""
self._max_retries = max_retries
@property
def max_retries_down(self):
"""Gets the max_retries_down of this UpdateHealthMonitorOption.
健康检查连续成功多少次后,将后端服务器的健康检查状态由ONLIEN判定为OFFLINE
:return: The max_retries_down of this UpdateHealthMonitorOption.
:rtype: int
"""
return self._max_retries_down
@max_retries_down.setter
def max_retries_down(self, max_retries_down):
"""Sets the max_retries_down of this UpdateHealthMonitorOption.
健康检查连续成功多少次后,将后端服务器的健康检查状态由ONLIEN判定为OFFLINE
:param max_retries_down: The max_retries_down of this UpdateHealthMonitorOption.
:type: int
"""
self._max_retries_down = max_retries_down
@property
def monitor_port(self):
"""Gets the monitor_port of this UpdateHealthMonitorOption.
健康检查端口号。默认为空,表示使用后端云服务器组的端口。
:return: The monitor_port of this UpdateHealthMonitorOption.
:rtype: int
"""
return self._monitor_port
@monitor_port.setter
def monitor_port(self, monitor_port):
"""Sets the monitor_port of this UpdateHealthMonitorOption.
健康检查端口号。默认为空,表示使用后端云服务器组的端口。
:param monitor_port: The monitor_port of this UpdateHealthMonitorOption.
:type: int
"""
self._monitor_port = monitor_port
@property
def name(self):
"""Gets the name of this UpdateHealthMonitorOption.
健康检查名称。
:return: The name of this UpdateHealthMonitorOption.
:rtype: str
"""
return self._name
@name.setter
def name(self, name):
"""Sets the name of this UpdateHealthMonitorOption.
健康检查名称。
:param name: The name of this UpdateHealthMonitorOption.
:type: str
"""
self._name = name
@property
def timeout(self):
"""Gets the timeout of this UpdateHealthMonitorOption.
健康检查的超时时间。建议该值小于delay的值。
:return: The timeout of this UpdateHealthMonitorOption.
:rtype: int
"""
return self._timeout
@timeout.setter
def timeout(self, timeout):
"""Sets the timeout of this UpdateHealthMonitorOption.
健康检查的超时时间。建议该值小于delay的值。
:param timeout: The timeout of this UpdateHealthMonitorOption.
:type: int
"""
self._timeout = timeout
@property
def url_path(self):
"""Gets the url_path of this UpdateHealthMonitorOption.
功能说明:健康检查测试member健康时发送的http请求路径。默认为“/”。使用说明:以“/”开头。仅当type为HTTP时生效。
:return: The url_path of this UpdateHealthMonitorOption.
:rtype: str
"""
return self._url_path
@url_path.setter
def url_path(self, url_path):
"""Sets the url_path of this UpdateHealthMonitorOption.
功能说明:健康检查测试member健康时发送的http请求路径。默认为“/”。使用说明:以“/”开头。仅当type为HTTP时生效。
:param url_path: The url_path of this UpdateHealthMonitorOption.
:type: str
"""
self._url_path = url_path
@property
def type(self):
"""Gets the type of this UpdateHealthMonitorOption.
描述:健康检查类型。 取值:TCP,UDP_CONNECT,HTTP,HTTPS,PING 约束: 1、若pool的protocol为QUIC,则type只能是UDP
:return: The type of this UpdateHealthMonitorOption.
:rtype: str
"""
return self._type
@type.setter
def type(self, type):
"""Sets the type of this UpdateHealthMonitorOption.
描述:健康检查类型。 取值:TCP,UDP_CONNECT,HTTP,HTTPS,PING 约束: 1、若pool的protocol为QUIC,则type只能是UDP
:param type: The type of this UpdateHealthMonitorOption.
:type: str
"""
self._type = type
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.openapi_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
if attr in self.sensitive_list:
result[attr] = "****"
else:
result[attr] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, UpdateHealthMonitorOption):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
|
"""empty message
Revision ID: d4c798575877
Revises: 1daa601d3ae5
Create Date: 2018-05-09 10:28:22.931442
"""
from alembic import op
import sqlalchemy as sa
# revision identifiers, used by Alembic.
revision = 'd4c798575877'
down_revision = '1daa601d3ae5'
branch_labels = None
depends_on = None
def upgrade():
op.create_table('favorites',
sa.Column('updated_at', sa.DateTime(timezone=True), nullable=False),
sa.Column('created_at', sa.DateTime(timezone=True), nullable=False),
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('object_type', sa.Unicode(length=255), nullable=False),
sa.Column('object_id', sa.Integer(), nullable=False),
sa.Column('user_id', sa.Integer(), nullable=False),
sa.ForeignKeyConstraint(['user_id'], ['users.id'], ),
sa.PrimaryKeyConstraint('id')
)
def downgrade():
op.drop_table('favorites')
|
from django.apps import AppConfig
class ShortnsweetConfig(AppConfig):
name = 'shortnsweet'
|
# -*- coding: utf-8 -*-
import sys, os
from chainer import cuda, optimizers, gradient_check, Variable
sys.path.append(os.path.split(os.getcwd())[0])
from ddqn import *
from config import config
# Override config
config.ale_actions = [4, 3, 1, 0]
config.ale_screen_size = [210, 160]
config.ale_scaled_screen_size = [84, 84]
config.rl_replay_memory_size = 10 ** 5
config.rl_replay_start_size = 10 ** 4
config.q_conv_hidden_channels = [32, 64, 64]
config.q_conv_strides = [4, 2, 1]
config.q_conv_filter_sizes = [8, 4, 3]
config.q_fc_hidden_units = [256, 128]
config.apply_batchnorm = True
config.use_gpu = False
config.q_conv_output_projection_type = "global_average_pooling"
def backprop_check():
xp = cuda.cupy if config.use_gpu else np
duel = DDQN()
state = xp.random.uniform(-1.0, 1.0, (2, config.rl_agent_history_length * config.ale_screen_channels, config.ale_scaled_screen_size[1], config.ale_scaled_screen_size[0])).astype(xp.float32)
reward = [1, 0]
action = [3, 4]
episode_ends = [0, 0]
next_state = xp.random.uniform(-1.0, 1.0, (2, config.rl_agent_history_length * config.ale_screen_channels, config.ale_scaled_screen_size[1], config.ale_scaled_screen_size[0])).astype(xp.float32)
optimizer_conv = optimizers.Adam(alpha=config.rl_learning_rate, beta1=config.rl_gradient_momentum)
optimizer_conv.setup(duel.conv)
optimizer_fc = optimizers.Adam(alpha=config.rl_learning_rate, beta1=config.rl_gradient_momentum)
optimizer_fc.setup(duel.fc)
for i in xrange(10000):
optimizer_conv.zero_grads()
optimizer_fc.zero_grads()
loss, _ = duel.forward_one_step(state, action, reward, next_state, episode_ends)
loss.backward()
optimizer_conv.update()
optimizer_fc.update()
print loss.data,
print duel.conv.layer_2.W.data[0, 0, 0, 0],
print duel.fc.layer_2.W.data[0, 0],
backprop_check()
|
"""
Collectors have two main functions: synthesizing (or collecting) samples and compute metric matrix (which will be passed to selectors and losses).
All methods are listed below:
+-----------------------+-------------------------------------------------------------------------------+
| method | description |
+=======================+===============================================================================+
| BaseCollector | Base class. |
+-----------------------+-------------------------------------------------------------------------------+
| DefaultCollector | Do nothing. |
+-----------------------+-------------------------------------------------------------------------------+
| ProxyCollector | Maintain a set of proxies |
+-----------------------+-------------------------------------------------------------------------------+
| MoCoCollector | paper: **Momentum Contrast for Unsupervised Visual Representation Learning** |
+-----------------------+-------------------------------------------------------------------------------+
| SimSiamCollector | paper: **Exploring Simple Siamese Representation Learning** |
+-----------------------+-------------------------------------------------------------------------------+
| HDMLCollector | paper: **Hardness-Aware Deep Metric Learning** |
+-----------------------+-------------------------------------------------------------------------------+
| DAMLCollector | paper: **Deep Adversarial Metric Learning** |
+-----------------------+-------------------------------------------------------------------------------+
| DVMLCollector | paper: **Deep Variational Metric Learning** |
+-----------------------+-------------------------------------------------------------------------------+
Notes:
``embedders`` have significent difference with ``collectors``. ``embedders`` also take charge of generating embeddings which will be used to compute metrics.
Todo:
``epoch-based collector``
"""
from .iteration_collectors import (
DefaultCollector,
ProxyCollector,
MoCoCollector,
SimSiamCollector,
HDMLCollector,
DAMLCollector,
DVMLCollector
)
from .epoch_collectors import (
GlobalProxyCollector,
_DefaultGlobalCollector
)
from .base_collector import BaseCollector
|
# coding: utf-8
#########################################################################
# 网站: <a href="http://www.crazyit.org">疯狂Java联盟</a> #
# author yeeku.H.lee kongyeeku@163.com #
# #
# version 1.0 #
# #
# Copyright (C), 2001-2018, yeeku.H.Lee #
# #
# This program is protected by copyright laws. #
# #
# Program Name: #
# #
# <br>Date: #
#########################################################################
# 指定使用二进制方式读取文件内容
f = open("read_test3.py", 'rb', True)
# 直接读取全部文件,并调用bytes的decode将字节内容恢复成字符串
print(f.read().decode('utf-8'))
f.close()
|
import FWCore.ParameterSet.Config as cms
nEvtLumi = 4
nEvtRun = 2*nEvtLumi
nRuns = 64
nStreams = 4
nEvt = nRuns*nEvtRun
process = cms.Process("TESTGLOBALMODULES")
import FWCore.Framework.test.cmsExceptionsFatalOption_cff
process.options = cms.untracked.PSet(
numberOfStreams = cms.untracked.uint32(nStreams),
numberOfThreads = cms.untracked.uint32(nStreams),
numberOfConcurrentLuminosityBlocks = cms.untracked.uint32(1)
)
process.maxEvents = cms.untracked.PSet(
input = cms.untracked.int32(nEvt)
)
process.source = cms.Source("EmptySource",
timeBetweenEvents = cms.untracked.uint64(1000),
firstTime = cms.untracked.uint64(1000000),
numberEventsInRun = cms.untracked.uint32(nEvtRun),
numberEventsInLuminosityBlock = cms.untracked.uint32(nEvtLumi)
)
process.StreamIntProd = cms.EDProducer("edmtest::global::StreamIntProducer",
transitions = cms.int32(int(nEvt+nStreams*(2*(nEvt/nEvtRun)+2*(nEvt/nEvtLumi)+2)))
,cachevalue = cms.int32(1)
)
process.RunIntProd = cms.EDProducer("edmtest::global::RunIntProducer",
transitions = cms.int32(int(2*(nEvt/nEvtRun)))
,cachevalue = cms.int32(nEvtRun)
)
process.LumiIntProd = cms.EDProducer("edmtest::global::LumiIntProducer",
transitions = cms.int32(int(2*(nEvt/nEvtLumi)))
,cachevalue = cms.int32(nEvtLumi)
)
process.RunSumIntProd = cms.EDProducer("edmtest::global::RunSummaryIntProducer",
transitions = cms.int32(int(nStreams*(nEvt/nEvtRun)+2*(nEvt/nEvtRun)))
,cachevalue = cms.int32(nEvtRun)
)
process.LumiSumIntProd = cms.EDProducer("edmtest::global::LumiSummaryIntProducer",
transitions = cms.int32(int(nStreams*(nEvt/nEvtLumi)+2*(nEvt/nEvtLumi)))
,cachevalue = cms.int32(nEvtLumi)
)
process.ProcessBlockIntProd = cms.EDProducer("edmtest::global::ProcessBlockIntProducer",
transitions = cms.int32(int(nEvt + 2)),
consumesBeginProcessBlock = cms.InputTag("TestBeginProcessBlockProd" ,"begin"),
consumesEndProcessBlock = cms.InputTag("TestEndProcessBlockProd", "end")
)
process.TestBeginProcessBlockProd = cms.EDProducer("edmtest::global::TestBeginProcessBlockProducer",
transitions = cms.int32(int(nEvt + 1)),
consumesBeginProcessBlock = cms.InputTag("")
)
process.TestBeginProcessBlockProdRead = cms.EDProducer("edmtest::global::TestBeginProcessBlockProducer",
transitions = cms.int32(int(nEvt + 1)),
consumesBeginProcessBlock = cms.InputTag("TestBeginProcessBlockProd" ,"begin")
)
process.TestEndProcessBlockProd = cms.EDProducer("edmtest::global::TestEndProcessBlockProducer",
transitions = cms.int32(int(nEvt + 1)),
consumesEndProcessBlock = cms.InputTag("")
)
process.TestEndProcessBlockProdRead = cms.EDProducer("edmtest::global::TestEndProcessBlockProducer",
transitions = cms.int32(int(nEvt + 1)),
consumesEndProcessBlock = cms.InputTag("TestEndProcessBlockProd", "end")
)
process.TestBeginRunProd = cms.EDProducer("edmtest::global::TestBeginRunProducer",
transitions = cms.int32(int(nEvt/nEvtRun))
)
process.TestEndRunProd = cms.EDProducer("edmtest::global::TestEndRunProducer",
transitions = cms.int32(int(nEvt/nEvtRun))
)
process.TestBeginLumiBlockProd = cms.EDProducer("edmtest::global::TestBeginLumiBlockProducer",
transitions = cms.int32(int(nEvt/nEvtLumi))
)
process.TestEndLumiBlockProd = cms.EDProducer("edmtest::global::TestEndLumiBlockProducer",
transitions = cms.int32(int(nEvt/nEvtLumi))
)
process.StreamIntAn = cms.EDAnalyzer("edmtest::global::StreamIntAnalyzer",
transitions = cms.int32(int(nEvt+nStreams*(2*(nEvt/nEvtRun)+2*(nEvt/nEvtLumi)+2)))
,cachevalue = cms.int32(1)
)
process.RunIntAn= cms.EDAnalyzer("edmtest::global::RunIntAnalyzer",
transitions = cms.int32(int(nEvt+2*(nEvt/nEvtRun)))
,cachevalue = cms.int32(nEvtRun)
)
process.LumiIntAn = cms.EDAnalyzer("edmtest::global::LumiIntAnalyzer",
transitions = cms.int32(int(nEvt+2*(nEvt/nEvtLumi)))
,cachevalue = cms.int32(nEvtLumi)
# needed to avoid deleting TestAccumulator1
,moduleLabel = cms.InputTag("TestAccumulator1")
)
process.RunSumIntAn = cms.EDAnalyzer("edmtest::global::RunSummaryIntAnalyzer",
transitions = cms.int32(int(nEvt+nStreams*((nEvt/nEvtRun)+1)+2*(nEvt/nEvtRun)))
,cachevalue = cms.int32(nEvtRun)
)
process.LumiSumIntAn = cms.EDAnalyzer("edmtest::global::LumiSummaryIntAnalyzer",
transitions = cms.int32(int(nEvt+nStreams*((nEvt/nEvtLumi)+1)+2*(nEvt/nEvtLumi)))
,cachevalue = cms.int32(nEvtLumi)
)
process.ProcessBlockIntAn = cms.EDAnalyzer("edmtest::global::ProcessBlockIntAnalyzer",
transitions = cms.int32(652),
consumesBeginProcessBlock = cms.InputTag("TestBeginProcessBlockProd" ,"begin"),
consumesEndProcessBlock = cms.InputTag("TestEndProcessBlockProd", "end")
)
process.StreamIntFil = cms.EDFilter("edmtest::global::StreamIntFilter",
transitions = cms.int32(int(nEvt+nStreams*(2*(nEvt/nEvtRun)+2*(nEvt/nEvtLumi)+2)))
,cachevalue = cms.int32(1)
)
process.RunIntFil = cms.EDFilter("edmtest::global::RunIntFilter",
transitions = cms.int32(int(nEvt+2*(nEvt/nEvtRun)))
,cachevalue = cms.int32(nEvtRun)
)
process.LumiIntFil = cms.EDFilter("edmtest::global::LumiIntFilter",
transitions = cms.int32(int(nEvt+2*(nEvt/nEvtLumi)))
,cachevalue = cms.int32(nEvtLumi)
)
process.RunSumIntFil = cms.EDFilter("edmtest::global::RunSummaryIntFilter",
transitions = cms.int32(int(nEvt+nStreams*((nEvt/nEvtRun)+1)+2*(nEvt/nEvtRun)))
,cachevalue = cms.int32(nEvtRun)
)
process.LumiSumIntFil = cms.EDFilter("edmtest::global::LumiSummaryIntFilter",
transitions = cms.int32(int(nEvt+nStreams*((nEvt/nEvtLumi)+1)+2*(nEvt/nEvtLumi)))
,cachevalue = cms.int32(nEvtLumi)
)
process.ProcessBlockIntFil = cms.EDFilter("edmtest::global::ProcessBlockIntFilter",
transitions = cms.int32(int(nEvt + 2)),
consumesBeginProcessBlock = cms.InputTag("TestBeginProcessBlockFil" ,"begin"),
consumesEndProcessBlock = cms.InputTag("TestEndProcessBlockFil", "end")
)
process.TestBeginProcessBlockFil = cms.EDFilter("edmtest::global::TestBeginProcessBlockFilter",
transitions = cms.int32(int(nEvt + 1)),
consumesBeginProcessBlock = cms.InputTag("")
)
process.TestBeginProcessBlockFilRead = cms.EDFilter("edmtest::global::TestBeginProcessBlockFilter",
transitions = cms.int32(int(nEvt + 1)),
consumesBeginProcessBlock = cms.InputTag("TestBeginProcessBlockFil" ,"begin")
)
process.TestEndProcessBlockFil = cms.EDFilter("edmtest::global::TestEndProcessBlockFilter",
transitions = cms.int32(int(nEvt + 1)),
consumesEndProcessBlock = cms.InputTag("")
)
process.TestEndProcessBlockFilRead = cms.EDFilter("edmtest::global::TestEndProcessBlockFilter",
transitions = cms.int32(int(nEvt + 1)),
consumesEndProcessBlock = cms.InputTag("TestEndProcessBlockFil", "end")
)
process.TestBeginRunFil = cms.EDFilter("edmtest::global::TestBeginRunFilter",
transitions = cms.int32(int(nEvt/nEvtRun))
)
process.TestEndRunFil = cms.EDFilter("edmtest::global::TestEndRunFilter",
transitions = cms.int32(int(nEvt/nEvtRun))
)
process.TestBeginLumiBlockFil = cms.EDFilter("edmtest::global::TestBeginLumiBlockFilter",
transitions = cms.int32(int(nEvt/nEvtLumi))
)
process.TestEndLumiBlockFil = cms.EDFilter("edmtest::global::TestEndLumiBlockFilter",
transitions = cms.int32(int(nEvt/nEvtLumi))
)
process.TestAccumulator1 = cms.EDProducer("edmtest::global::TestAccumulator",
expectedCount = cms.uint32(512)
)
process.TestAccumulator2 = cms.EDProducer("edmtest::global::TestAccumulator",
expectedCount = cms.uint32(35)
)
process.testFilterModule = cms.EDFilter("TestFilterModule",
acceptValue = cms.untracked.int32(5),
onlyOne = cms.untracked.bool(False)
)
process.task = cms.Task(process.TestAccumulator1)
process.p = cms.Path(process.StreamIntProd +
process.RunIntProd +
process.LumiIntProd +
process.RunSumIntProd +
process.LumiSumIntProd +
process.ProcessBlockIntProd +
process.TestBeginProcessBlockProdRead +
process.TestBeginProcessBlockProd +
process.TestEndProcessBlockProdRead +
process.TestEndProcessBlockProd +
process.TestBeginRunProd +
process.TestEndRunProd +
process.TestBeginLumiBlockProd +
process.TestEndLumiBlockProd +
process.StreamIntAn +
process.RunIntAn +
process.LumiIntAn +
process.RunSumIntAn +
process.LumiSumIntAn +
process.ProcessBlockIntAn +
process.StreamIntFil +
process.RunIntFil +
process.LumiIntFil +
process.RunSumIntFil +
process.LumiSumIntFil +
process.ProcessBlockIntFil +
process.TestBeginProcessBlockFilRead +
process.TestBeginProcessBlockFil +
process.TestEndProcessBlockFilRead +
process.TestEndProcessBlockFil +
process.TestBeginRunFil +
process.TestEndRunFil +
process.TestBeginLumiBlockFil +
process.TestEndLumiBlockFil +
process.testFilterModule +
process.TestAccumulator2,
process.task)
|
#!/usr/bin/env python3
import cv2
import time
import os
from server.startracker.catalog import Catalog
from server.startracker.image import ImageUtils
assets_dir = "/home/igarcia/Nextcloud/University/TFG/reports/assets"
catalogs_path = "./server/startracker/catalogs/out"
catalog = Catalog(f"{catalogs_path}/hip_2000.csv",
f"{catalogs_path}/guide_stars_2000_5.csv",
f"{catalogs_path}/guide_stars_2000_5_labels.csv")
images_path = "./server/startracker/test_images/ballon"
total = 0
found = 0
not_enough = 0
for filename in os.listdir(images_path):
print(f"processing image: {filename}")
image = cv2.imread(f"{images_path}/{filename}")
gray_img = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
# Noise reduction
blurred = cv2.GaussianBlur(gray_img, (3, 3), 0)
threshold = ImageUtils.get_threshold(blurred, 170)
thresh_image = cv2.threshold(blurred, threshold, 255, cv2.THRESH_BINARY)[1]
stars = ImageUtils.get_image_stars(thresh_image, gray_img)
if len(stars) >= 4:
pattern = catalog.find_stars_pattern(stars[0:4], err=0.010)
if len(pattern) > 0:
found += 1
else:
print(f"Not enough stars for: {filename}")
not_enough += 1
total += 1
print(f"Analyzed a total of {total} stars")
print(f"Found solution for {found}")
print(f"Not enough stars {not_enough}")
|
#!/usr/bin/env python
#
# Electrum - lightweight STRAKS client
# Copyright (C) 2013 ecdsa@github
#
# Permission is hereby granted, free of charge, to any person
# obtaining a copy of this software and associated documentation files
# (the "Software"), to deal in the Software without restriction,
# including without limitation the rights to use, copy, modify, merge,
# publish, distribute, sublicense, and/or sell copies of the Software,
# and to permit persons to whom the Software is furnished to do so,
# subject to the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
# BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
# ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
# CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
from electrum_stak.i18n import _
from electrum_stak.mnemonic import Mnemonic
import electrum_stak.old_mnemonic
from .util import *
from .qrtextedit import ShowQRTextEdit, ScanQRTextEdit
from .completion_text_edit import CompletionTextEdit
def seed_warning_msg(seed):
return ''.join([
"<p>",
_("Please save these {0} words on paper (order is important). "),
_("This seed will allow you to recover your wallet in case "
"of computer failure."),
"</p>",
"<b>" + _("WARNING") + ":</b>",
"<ul>",
"<li>" + _("Never disclose your seed.") + "</li>",
"<li>" + _("Never type it on a website.") + "</li>",
"<li>" + _("Do not store it electronically.") + "</li>",
"</ul>"
]).format(len(seed.split()))
class SeedLayout(QVBoxLayout):
#options
is_bip39 = False
is_ext = False
def seed_options(self):
dialog = QDialog()
vbox = QVBoxLayout(dialog)
if 'ext' in self.options:
cb_ext = QCheckBox(_('Extend this seed with custom words'))
cb_ext.setChecked(self.is_ext)
vbox.addWidget(cb_ext)
if 'bip39' in self.options:
def f(b):
self.is_seed = (lambda x: bool(x)) if b else self.saved_is_seed
self.is_bip39 = b
self.on_edit()
if b:
msg = ' '.join([
'<b>' + _('Warning') + ':</b> ',
_('BIP39 seeds can be imported in Electrum, so that users can access funds locked in other wallets.'),
_('However, we do not generate BIP39 seeds, because they do not meet our safety standard.'),
_('BIP39 seeds do not include a version number, which compromises compatibility with future software.'),
_('We do not guarantee that BIP39 imports will always be supported in Electrum.'),
])
else:
msg = ''
self.seed_warning.setText(msg)
cb_bip39 = QCheckBox(_('BIP39 seed'))
cb_bip39.toggled.connect(f)
cb_bip39.setChecked(self.is_bip39)
vbox.addWidget(cb_bip39)
vbox.addLayout(Buttons(OkButton(dialog)))
if not dialog.exec_():
return None
self.is_ext = cb_ext.isChecked() if 'ext' in self.options else False
self.is_bip39 = cb_bip39.isChecked() if 'bip39' in self.options else False
def __init__(self, seed=None, title=None, icon=True, msg=None, options=None, is_seed=None, passphrase=None, parent=None):
QVBoxLayout.__init__(self)
self.parent = parent
self.options = options
if title:
self.addWidget(WWLabel(title))
self.seed_e = CompletionTextEdit()
if seed:
self.seed_e.setText(seed)
else:
self.seed_e.setTabChangesFocus(True)
self.is_seed = is_seed
self.saved_is_seed = self.is_seed
self.seed_e.textChanged.connect(self.on_edit)
self.initialize_completer()
self.seed_e.setMaximumHeight(75)
hbox = QHBoxLayout()
if icon:
logo = QLabel()
logo.setPixmap(QPixmap(":icons/seed.png").scaledToWidth(64))
logo.setMaximumWidth(60)
hbox.addWidget(logo)
hbox.addWidget(self.seed_e)
self.addLayout(hbox)
hbox = QHBoxLayout()
hbox.addStretch(1)
self.seed_type_label = QLabel('')
hbox.addWidget(self.seed_type_label)
if options:
opt_button = EnterButton(_('Options'), self.seed_options)
hbox.addWidget(opt_button)
self.addLayout(hbox)
if passphrase:
hbox = QHBoxLayout()
passphrase_e = QLineEdit()
passphrase_e.setText(passphrase)
passphrase_e.setReadOnly(True)
hbox.addWidget(QLabel(_("Your seed extension is") + ':'))
hbox.addWidget(passphrase_e)
self.addLayout(hbox)
self.addStretch(1)
self.seed_warning = WWLabel('')
if msg:
self.seed_warning.setText(seed_warning_msg(seed))
self.addWidget(self.seed_warning)
def initialize_completer(self):
english_list = Mnemonic('en').wordlist
old_list = electrum_stak.old_mnemonic.words
self.wordlist = english_list + list(set(old_list) - set(english_list)) #concat both lists
self.wordlist.sort()
self.completer = QCompleter(self.wordlist)
self.seed_e.set_completer(self.completer)
def get_seed(self):
text = self.seed_e.text()
return ' '.join(text.split())
def on_edit(self):
from electrum_stak.bitcoin import seed_type
s = self.get_seed()
b = self.is_seed(s)
if not self.is_bip39:
t = seed_type(s)
label = _('Seed Type') + ': ' + t if t else ''
else:
from electrum_stak.keystore import bip39_is_checksum_valid
is_checksum, is_wordlist = bip39_is_checksum_valid(s)
status = ('checksum: ' + ('ok' if is_checksum else 'failed')) if is_wordlist else 'unknown wordlist'
label = 'BIP39' + ' (%s)'%status
self.seed_type_label.setText(label)
self.parent.next_button.setEnabled(b)
# to account for bip39 seeds
for word in self.get_seed().split(" ")[:-1]:
if word not in self.wordlist:
self.seed_e.disable_suggestions()
return
self.seed_e.enable_suggestions()
class KeysLayout(QVBoxLayout):
def __init__(self, parent=None, title=None, is_valid=None, allow_multi=False):
QVBoxLayout.__init__(self)
self.parent = parent
self.is_valid = is_valid
self.text_e = ScanQRTextEdit(allow_multi=allow_multi)
self.text_e.textChanged.connect(self.on_edit)
self.addWidget(WWLabel(title))
self.addWidget(self.text_e)
def get_text(self):
return self.text_e.text()
def on_edit(self):
b = self.is_valid(self.get_text())
self.parent.next_button.setEnabled(b)
class SeedDialog(WindowModalDialog):
def __init__(self, parent, seed, passphrase):
WindowModalDialog.__init__(self, parent, ('Electrum-STAK - ' + _('Seed')))
self.setMinimumWidth(400)
vbox = QVBoxLayout(self)
title = _("Your wallet generation seed is:")
slayout = SeedLayout(title=title, seed=seed, msg=True, passphrase=passphrase)
vbox.addLayout(slayout)
vbox.addLayout(Buttons(CloseButton(self)))
|
"""
Módulo para recuperação de dados climáticos do PCBr.
A documentação do Projeto pode ser encontrada no Portal
http://pclima.inpe.br/
As escolhas para o download de dados são definidas através
de um JSON que pode ser gerado utilizando do Portal API.
http://pclima.inpe.br/analise/API
versão do Python em que foi testada: 3.6
exemplo de uso da API
Token: consultar a documentação para a geração do Token
import api as api
Client = api.Client()
data = Client.getData(
{ "formato": "CSV", "conjunto": "PR0002", "modelo": "MO0003", "experimento": "EX0003", "periodo": "PE0000", "cenario": "CE0001", "variavel": "VR0001", "frequenciaURL": "Mensal", "frequencia": "FR0003", "produto": "PDT0001", "localizacao": "Ponto", "localizacao_pontos": "-23.56/-46.62", "varCDO": "tasmax" }
)
Client.save(data,"file.csv")
"""
import os
import json
from pclima.factory import RequestFactory
class Client(object):
"""
Classe utilizada para criar um Cliente de acesso a API.
Attributes
----------
token : str
Definido no arquivo ~/.pclimaAPIrc
format : str
Definido quando deseja um download
"""
def __init__(self, token=os.environ.get("API_TOKEN"),):
"""
Parameters
----------
token : str
Chave de acesso aos serviços da API
"""
self.token = token
self.format = None
dotrc = os.environ.get("PCLIMAAPI_RC", os.path.expanduser("~/.pclimaAPIrc"))
if token is None:
if os.path.exists(dotrc):
config = read_config(dotrc)
if token is None:
token = config.get("token")
if token is None:
print("Missing/incomplete configuration file: %s" % (dotrc))
raise SystemExit
self.token = token
def getData(self,apiJSON):
"""
Method
-------
O Método envia o JSON e retorna os dados desejados.
Parameters
----------
apiJSON : json
JSON com as opções escolhidas
Returns
-------
retorno depende do formato escolhido:
Formato Retorno:
NetCDF XArray
CSV DataFrame
JSON DataFrame
CSVPontos DataFrame
CSVPontosTransposta DataFrame
"""
j = json.loads(json.dumps(apiJSON))
print(j)
self.format = j["formato"]
factory = RequestFactory()
product = factory.get_order(j["formato"],self.token,j)
print(product)
return (product.download())
def save(self,content,file):
"""
Method
-------
O Método decebe a recuperacao do dado e o nome do arquivo
se saída.
Parameters
----------
content : formato desejado
Dados recuperados
file : nome do arquivo de saída
Nome do arquivo de saída Ex.: "saida.csv"
"""
factory = RequestFactory()
factory.save(self.format,content,file)
def read_config(path):
config = {}
with open(path) as f:
for l in f.readlines():
if ":" in l:
k, v = l.strip().split(":", 1)
if k in ("token"):
config[k] = v.strip()
return config
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import json
from alipay.aop.api.FileItem import FileItem
from alipay.aop.api.constant.ParamConstants import *
class AnttechBlockchainQueryconditionQueryRequest(object):
def __init__(self, biz_model=None):
self._biz_model = biz_model
self._version = "1.0"
self._terminal_type = None
self._terminal_info = None
self._prod_code = None
self._notify_url = None
self._return_url = None
self._udf_params = None
self._need_encrypt = False
@property
def biz_model(self):
return self._biz_model
@biz_model.setter
def biz_model(self, value):
self._biz_model = value
@property
def version(self):
return self._version
@version.setter
def version(self, value):
self._version = value
@property
def terminal_type(self):
return self._terminal_type
@terminal_type.setter
def terminal_type(self, value):
self._terminal_type = value
@property
def terminal_info(self):
return self._terminal_info
@terminal_info.setter
def terminal_info(self, value):
self._terminal_info = value
@property
def prod_code(self):
return self._prod_code
@prod_code.setter
def prod_code(self, value):
self._prod_code = value
@property
def notify_url(self):
return self._notify_url
@notify_url.setter
def notify_url(self, value):
self._notify_url = value
@property
def return_url(self):
return self._return_url
@return_url.setter
def return_url(self, value):
self._return_url = value
@property
def udf_params(self):
return self._udf_params
@udf_params.setter
def udf_params(self, value):
if not isinstance(value, dict):
return
self._udf_params = value
@property
def need_encrypt(self):
return self._need_encrypt
@need_encrypt.setter
def need_encrypt(self, value):
self._need_encrypt = value
def add_other_text_param(self, key, value):
if not self.udf_params:
self.udf_params = dict()
self.udf_params[key] = value
def get_params(self):
params = dict()
params[P_METHOD] = 'anttech.blockchain.querycondition.query'
params[P_VERSION] = self.version
if self.biz_model:
params[P_BIZ_CONTENT] = json.dumps(obj=self.biz_model.to_alipay_dict(), ensure_ascii=False, sort_keys=True, separators=(',', ':'))
if self.terminal_type:
params['terminal_type'] = self.terminal_type
if self.terminal_info:
params['terminal_info'] = self.terminal_info
if self.prod_code:
params['prod_code'] = self.prod_code
if self.notify_url:
params['notify_url'] = self.notify_url
if self.return_url:
params['return_url'] = self.return_url
if self.udf_params:
params.update(self.udf_params)
return params
def get_multipart_params(self):
multipart_params = dict()
return multipart_params
|
"""
Article object definitions
"""
from collections import OrderedDict
from elifearticle import utils
class BaseObject:
"base object for shared functions"
def __str__(self):
"""
Return `str` representation of the simple object properties,
if there is a list or dict just return an empty representation
for easier viewing and test case scenario writing
"""
_dict = {}
for key in self.__dict__:
if isinstance(self.__dict__.get(key), list):
_dict[key] = []
elif isinstance(self.__dict__.get(key), dict):
_dict[key] = {}
else:
_dict[key] = str(self.__dict__.get(key))
return str(_dict)
class Article(BaseObject):
"""
We include some boiler plate in the init, namely article_type
"""
contributors = []
def __init__(self, doi=None, title=None):
self.article_type = "research-article"
self.display_channel = None
self.doi = doi
self.id = None
self.contributors = []
self.editors = []
self.title = title
self.abstract = ""
self.abstract_json = None
self.abstract_xml = None
self.digest = None
self.research_organisms = []
self.manuscript = None
self.dates = {}
self.license = None
self.article_categories = []
self.conflict_default = None
self.ethics = []
self.author_keywords = []
self.funding_awards = []
self.ref_list = []
self.component_list = []
# For PubMed function a hook to specify if article was ever through PoA pipeline
self.was_ever_poa = None
self.is_poa = None
self.volume = None
self.elocation_id = None
self.pii = None
self.related_articles = []
self.version = None
self.datasets = []
self.data_availability = None
self.funding_awards = []
self.funding_note = None
self.journal_issn = None
self.journal_title = None
self.self_uri_list = []
self.version = None
self.publisher_name = None
self.issue = None
self.review_articles = []
self.clinical_trials = []
self.preprint = None
self.related_objects = []
def add_contributor(self, contributor):
self.contributors.append(contributor)
def add_research_organism(self, research_organism):
self.research_organisms.append(research_organism)
def add_date(self, date):
self.dates[date.date_type] = date
def get_date(self, date_type):
"get date by date type"
try:
return self.dates[date_type]
except (KeyError, TypeError):
return None
def get_display_channel(self):
"display-channel string partly relates to the article_type"
return self.display_channel
def add_article_category(self, article_category):
self.article_categories.append(article_category)
def has_contributor_conflict(self):
# Return True if any contributors have a conflict
for contributor in self.contributors:
if contributor.conflict:
return True
return False
def add_ethic(self, ethic):
self.ethics.append(ethic)
def add_author_keyword(self, author_keyword):
self.author_keywords.append(author_keyword)
def add_dataset(self, dataset):
self.datasets.append(dataset)
def get_datasets(self, dataset_type=None):
if dataset_type:
return [d for d in self.datasets if d.dataset_type == dataset_type]
return self.datasets
def add_funding_award(self, funding_award):
self.funding_awards.append(funding_award)
def add_self_uri(self, uri):
self.self_uri_list.append(uri)
def get_self_uri(self, content_type):
"return the first self uri with the content_type"
try:
return [
self_uri
for self_uri in self.self_uri_list
if self_uri.content_type == content_type
][0]
except IndexError:
return None
def pretty(self):
"sort values and format output for viewing and comparing in test scenarios"
pretty_obj = OrderedDict()
for key, value in sorted(self.__dict__.items()):
if value is None:
pretty_obj[key] = None
elif isinstance(value, str):
pretty_obj[key] = self.__dict__.get(key)
elif isinstance(value, list):
pretty_obj[key] = []
elif isinstance(value, dict):
pretty_obj[key] = {}
else:
pretty_obj[key] = str(value)
return pretty_obj
class ArticleDate(BaseObject):
"""
A struct_time date and a date_type
"""
date_type = None
date = None
pub_type = None
publication_format = None
day = None
month = None
year = None
def __init__(self, date_type, date):
self.date_type = date_type
# Date as a time.struct_time
self.date = date
class Contributor(BaseObject):
"""
Currently we are not sure that we can get an auth_id for
all contributors, so this attribute remains an optional attribute.
"""
corresp = False
equal_contrib = False
contrib_type = None
auth_id = None
orcid = None
surname = None
given_name = None
suffix = None
collab = None
conflict = []
group_author_key = None
def __init__(self, contrib_type, surname, given_name, collab=None):
self.contrib_type = contrib_type
self.surname = surname
self.given_name = given_name
self.affiliations = []
self.conflict = []
self.collab = collab
def set_affiliation(self, affiliation):
self.affiliations.append(affiliation)
def set_conflict(self, conflict):
self.conflict.append(conflict)
class Affiliation(BaseObject):
phone = None
fax = None
email = None
department = None
institution = None
city = None
country = None
text = None
def __init__(self):
pass
class Dataset(BaseObject):
"""
Article component representing a dataset
"""
def __init__(self):
self.dataset_type = None
self.authors = []
# source_id is the uri in PoA generation
# todo: refactor PoA use the uri attribute then delete the source_id attribute here
self.source_id = None
self.year = None
self.title = None
self.license_info = None
self.accession_id = None
self.assigning_authority = None
self.doi = None
self.uri = None
self.comment = None
def add_author(self, author):
self.authors.append(author)
class FundingAward(BaseObject):
"""
An award group as part of a funding group
"""
def __init__(self):
self.award_group_id = None
self.award_ids = []
self.institution_name = None
self.institution_id = None
self.principal_award_recipients = []
def add_award_id(self, award_id):
self.award_ids.append(award_id)
def add_principal_award_recipient(self, contributor):
"Accepts an instance of Contributor"
self.principal_award_recipients.append(contributor)
def get_funder_identifier(self):
"Funder identifier is the unique id found in the institution_id DOI"
try:
return self.institution_id.split("/")[-1]
except AttributeError:
return None
def get_funder_name(self):
"Alias for institution_name parsed from the XML"
return self.institution_name
class License(BaseObject):
"""
License with some preset values by license_id
"""
license_id = None
license_type = None
copyright = False
copyright_statement = None
href = None
name = None
paragraph1 = None
paragraph2 = None
def __init__(self, license_id=None):
self.license_id = license_id
class Citation(BaseObject):
"""
A ref or citation in the article to support crossref VOR deposits initially
"""
def __init__(self):
self.publication_type = None
self.id = None
self.authors = []
# For journals
self.article_title = None
self.source = None
self.volume = None
self.issue = None
self.fpage = None
self.lpage = None
self.elocation_id = None
self.doi = None
self.uri = None
self.pmid = None
self.isbn = None
self.year = None
self.year_iso_8601_date = None
self.year_numeric = None
self.date_in_citation = None
self.publisher_loc = None
self.publisher_name = None
self.edition = None
self.version = None
self.comment = None
self.data_title = None
self.conf_name = None
# For patents
self.patent = None
self.country = None
# For books
self.volume_title = None
self.chapter_title = None
# For data
self.accession = None
def add_author(self, author):
"Author is a dict of values"
self.authors.append(author)
def get_journal_title(self):
"Alias for source"
return self.source
class Component(BaseObject):
"""
An article component with a component DOI, primarily for crossref VOR deposits
"""
def __init__(self):
self.id = None
self.type = None
self.asset = None
self.title = None
self.subtitle = None
self.mime_type = None
self.doi = None
self.doi_resource = None
self.permissions = None
class RelatedArticle(BaseObject):
"""
Related article tag data as an object
"""
def __init__(self):
self.xlink_href = None
self.related_article_type = None
self.ext_link_type = None
class Uri(BaseObject):
"A URI, initially created for holding self-uri data"
def __init__(self):
self.xlink_href = None
self.content_type = None
class RelatedObject(BaseObject):
def __init__(self):
self.id = None
self.xlink_href = None
self.link_type = None
class ClinicalTrial(BaseObject):
def __init__(self):
self.id = None
self.content_type = None
self.document_id = None
self.document_id_type = None
self.source_id = None
self.source_id_type = None
self.source_type = None
self.text = None
self.xlink_href = None
self.registry_doi = None
def get_registry_doi(self, registry_name_to_doi_map=None):
"""return the DOI for the registry"""
if self.registry_doi:
return self.registry_doi
if (
self.source_id_type
and self.source_id
and self.source_id_type == "crossref-doi"
):
return self.source_id
if (
registry_name_to_doi_map
and self.source_id_type
and self.source_id
and self.source_id_type == "registry-name"
):
# look for the DOI value in the name to DOI map
if self.source_id in registry_name_to_doi_map:
return registry_name_to_doi_map[self.source_id]
return None
class Preprint(BaseObject):
def __init__(self, uri=None, doi=None):
self.uri = uri
self.doi = doi
class ContentBlock:
def __init__(self, block_type=None, content=None, attr=None):
self.block_type = block_type
self.content = content
self.content_blocks = []
self.attr = {}
if attr:
self.attr = attr
def attr_names(self):
"""list of tag attribute names"""
return utils.attr_names(self.attr)
def attr_string(self):
"""tag attributes formatted as a string"""
return utils.attr_string(self.attr)
|
# tests.test_testing
import io
from unittest import TestCase
from unittest import mock
from bdemeta.testing import trim, run_one, RunResult, run_tests, MockRunner
def gen_value(length):
result = ''
for i in range(length):
result += chr(ord('A') + (i % 26))
return result
class TestMockRunner(TestCase):
def test_no_cases(self):
runner = MockRunner('')
assert(RunResult.NO_SUCH_CASE == runner('foo'))
assert(['foo'] == runner.commands)
def test_one_success(self):
runner = MockRunner('s')
assert(RunResult.SUCCESS == runner('foo'))
assert(RunResult.NO_SUCH_CASE == runner('bar'))
assert(['foo', 'bar'] == runner.commands)
def test_one_failure(self):
runner = MockRunner('f')
assert(RunResult.FAILURE == runner('foo'))
assert(RunResult.NO_SUCH_CASE == runner('bar'))
assert(['foo', 'bar'] == runner.commands)
def test_one_success_one_failure(self):
runner = MockRunner('sf')
assert(RunResult.SUCCESS == runner('foo'))
assert(RunResult.FAILURE == runner('bar'))
assert(RunResult.NO_SUCH_CASE == runner('bam'))
assert(['foo', 'bar', 'bam'] == runner.commands)
class TestTrim(TestCase):
def test_short_string_unmodified(self):
max_length = 20
trail = '...'
for length in range(max_length - len(trail)):
value = gen_value(length)
trimmed = trim(value, max_length)
assert(len(trimmed) < max_length)
assert(value == value)
def test_string_clipped(self):
value = gen_value(20)
trail = '...'
for max_length in range(len(value) + len(trail) + 20):
trimmed = trim(value, max_length)
assert(len(trimmed) <= max_length)
if len(value) <= max_length:
assert(value == trimmed)
elif max_length >= len(trail):
assert(trimmed.endswith(trail))
class TestRunOne(TestCase):
def test_driver_with_no_cases(self):
runner = MockRunner('')
test, errors = run_one((runner, 'foo', 'foo'))
assert('foo' == test)
assert(1 == len(runner.commands))
assert(['foo', '1'] == runner.commands[0])
assert(not errors)
def test_driver_with_four_successes(self):
runner = MockRunner('ssss')
test, errors = run_one((runner, 'foo', 'foo'))
assert('foo' == test)
assert(5 == len(runner.commands))
assert(['foo', '1'] == runner.commands[0])
assert(['foo', '2'] == runner.commands[1])
assert(['foo', '3'] == runner.commands[2])
assert(['foo', '4'] == runner.commands[3])
assert(['foo', '5'] == runner.commands[4])
assert(not errors)
def test_driver_with_one_failure(self):
runner = MockRunner('f')
test, errors = run_one((runner, 'foo', 'foo'))
assert('foo' == test)
assert(2 == len(runner.commands))
assert(['foo', '1'] == runner.commands[0])
assert(['foo', '2'] == runner.commands[1])
assert(1 == len(errors))
assert(1 in errors)
def test_driver_with_mixed_successes_failures(self):
runner = MockRunner('sfsf')
test, errors = run_one((runner, 'foo', 'foo'))
assert('foo' == test)
assert(5 == len(runner.commands))
assert(['foo', '1'] == runner.commands[0])
assert(['foo', '2'] == runner.commands[1])
assert(['foo', '3'] == runner.commands[2])
assert(['foo', '4'] == runner.commands[3])
assert(['foo', '5'] == runner.commands[4])
assert(2 == len(errors))
assert(2 in errors)
assert(4 in errors)
class TestRun(TestCase):
def test_single_success(self):
stdout = io.StringIO()
stderr = io.StringIO()
runner = MockRunner('s')
rc = run_tests(stdout, stderr, runner, lambda: 80, [["foo", "foo"]])
assert(0 == rc)
assert('foo' in stderr.getvalue())
def test_single_failure(self):
stdout = io.StringIO()
stderr = io.StringIO()
runner = MockRunner('f')
rc = run_tests(stdout, stderr, runner, lambda: 80, [["foo", "foo"]])
assert(1 == rc)
assert('foo' in stderr.getvalue())
failures = stdout.getvalue().split('\n')[:-1]
assert(1 == len(failures))
assert('FAIL TEST foo CASE 1' in failures)
def test_two_drivers_four_successes_each(self):
stdout = io.StringIO()
stderr = io.StringIO()
runner = MockRunner('ssss')
rc = run_tests(stdout,
stderr,
runner,
lambda: 80,
[["foo", "foo"], ["bar", "bar"]])
assert(0 == rc)
assert('foo' in stderr.getvalue())
assert('bar' in stderr.getvalue())
def test_two_drivers_mixed_successes(self):
stdout = io.StringIO()
stderr = io.StringIO()
runner = MockRunner('sfsf')
rc = run_tests(stdout,
stderr,
runner,
lambda: 80,
[["foo", "foo"], ["bar", "bar"]])
assert(1 == rc)
assert('foo' in stderr.getvalue())
assert('bar' in stderr.getvalue())
failures = stdout.getvalue().split('\n')[:-1]
assert(4 == len(failures))
assert('FAIL TEST foo CASE 2' in failures)
assert('FAIL TEST foo CASE 4' in failures)
assert('FAIL TEST bar CASE 2' in failures)
assert('FAIL TEST bar CASE 4' in failures)
def test_tty_has_no_newlines(self):
stdout = io.StringIO()
stderr = mock.Mock(wraps=io.StringIO())
stderr.isatty.return_value = True
runner = MockRunner('s')
rc = run_tests(stdout, stderr, runner, lambda: 80, [["foo", "foo"]])
assert(0 == rc)
assert('\n' not in stderr.getvalue()[:-1])
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
import tinymce.models
class Migration(migrations.Migration):
dependencies = [
]
operations = [
migrations.CreateModel(
name='Goods',
fields=[
('id', models.AutoField(primary_key=True, serialize=False, verbose_name='ID', auto_created=True)),
('create_time', models.DateTimeField(auto_now_add=True, verbose_name='创建时间')),
('update_time', models.DateTimeField(auto_now=True, verbose_name='更新时间')),
('is_delete', models.BooleanField(default=False, verbose_name='是否删除')),
('name', models.CharField(max_length=20, verbose_name='商品SPU名称')),
('detail', tinymce.models.HTMLField(verbose_name='商品详情', blank=True)),
],
options={
'verbose_name_plural': '商品SPU',
'verbose_name': '商品SPU',
'db_table': 'df_goods',
},
),
migrations.CreateModel(
name='GoodsImage',
fields=[
('id', models.AutoField(primary_key=True, serialize=False, verbose_name='ID', auto_created=True)),
('create_time', models.DateTimeField(auto_now_add=True, verbose_name='创建时间')),
('update_time', models.DateTimeField(auto_now=True, verbose_name='更新时间')),
('is_delete', models.BooleanField(default=False, verbose_name='是否删除')),
('image', models.ImageField(upload_to='goods', verbose_name='图片路径')),
],
options={
'verbose_name_plural': '商品图片',
'verbose_name': '商品图片',
'db_table': 'df_goods_image',
},
),
migrations.CreateModel(
name='GoodsSKU',
fields=[
('id', models.AutoField(primary_key=True, serialize=False, verbose_name='ID', auto_created=True)),
('create_time', models.DateTimeField(auto_now_add=True, verbose_name='创建时间')),
('update_time', models.DateTimeField(auto_now=True, verbose_name='更新时间')),
('is_delete', models.BooleanField(default=False, verbose_name='是否删除')),
('name', models.CharField(max_length=20, verbose_name='商品名称')),
('desc', models.CharField(max_length=256, verbose_name='商品简介')),
('price', models.DecimalField(verbose_name='商品价格', decimal_places=2, max_digits=10)),
('unite', models.CharField(max_length=20, verbose_name='商品单位')),
('image', models.ImageField(upload_to='goods', verbose_name='商品图片')),
('stock', models.IntegerField(default=1, verbose_name='商品库存')),
('sales', models.IntegerField(default=0, verbose_name='商品销量')),
('status', models.SmallIntegerField(default=1, choices=[(0, '下架'), (1, '上架')], verbose_name='商品状态')),
('goods', models.ForeignKey(to='goods.Goods', verbose_name='商品SPU')),
],
options={
'verbose_name_plural': '商品',
'verbose_name': '商品',
'db_table': 'df_goods_sku',
},
),
migrations.CreateModel(
name='GoodsType',
fields=[
('id', models.AutoField(primary_key=True, serialize=False, verbose_name='ID', auto_created=True)),
('create_time', models.DateTimeField(auto_now_add=True, verbose_name='创建时间')),
('update_time', models.DateTimeField(auto_now=True, verbose_name='更新时间')),
('is_delete', models.BooleanField(default=False, verbose_name='是否删除')),
('name', models.CharField(max_length=20, verbose_name='种类名称')),
('logo', models.CharField(max_length=20, verbose_name='标识')),
('image', models.ImageField(upload_to='type', verbose_name='商品类型图片')),
],
options={
'verbose_name_plural': '商品种类',
'verbose_name': '商品种类',
'db_table': 'df_goods_type',
},
),
migrations.CreateModel(
name='IndexGoodsBanner',
fields=[
('id', models.AutoField(primary_key=True, serialize=False, verbose_name='ID', auto_created=True)),
('create_time', models.DateTimeField(auto_now_add=True, verbose_name='创建时间')),
('update_time', models.DateTimeField(auto_now=True, verbose_name='更新时间')),
('is_delete', models.BooleanField(default=False, verbose_name='是否删除')),
('image', models.ImageField(upload_to='banner', verbose_name='图片')),
('index', models.SmallIntegerField(default=0, verbose_name='展示顺序')),
('sku', models.ForeignKey(to='goods.GoodsSKU', verbose_name='商品')),
],
options={
'verbose_name_plural': '首页轮播商品',
'verbose_name': '首页轮播商品',
'db_table': 'df_index_banner',
},
),
migrations.CreateModel(
name='IndexPromotionBanner',
fields=[
('id', models.AutoField(primary_key=True, serialize=False, verbose_name='ID', auto_created=True)),
('create_time', models.DateTimeField(auto_now_add=True, verbose_name='创建时间')),
('update_time', models.DateTimeField(auto_now=True, verbose_name='更新时间')),
('is_delete', models.BooleanField(default=False, verbose_name='是否删除')),
('name', models.CharField(max_length=20, verbose_name='活动名称')),
('url', models.CharField(max_length=256, verbose_name='活动链接')),
('image', models.ImageField(upload_to='banner', verbose_name='活动图片')),
('index', models.SmallIntegerField(default=0, verbose_name='展示顺序')),
],
options={
'verbose_name_plural': '主页促销活动',
'verbose_name': '主页促销活动',
'db_table': 'df_index_promotion',
},
),
migrations.CreateModel(
name='IndexTypeGoodsBanner',
fields=[
('id', models.AutoField(primary_key=True, serialize=False, verbose_name='ID', auto_created=True)),
('create_time', models.DateTimeField(auto_now_add=True, verbose_name='创建时间')),
('update_time', models.DateTimeField(auto_now=True, verbose_name='更新时间')),
('is_delete', models.BooleanField(default=False, verbose_name='是否删除')),
('display_type', models.SmallIntegerField(default=1, choices=[(0, '标题'), (1, '图片')], verbose_name='展示类型')),
('index', models.SmallIntegerField(default=0, verbose_name='展示顺序')),
('sku', models.ForeignKey(to='goods.GoodsSKU', verbose_name='商品SKU')),
('type', models.ForeignKey(to='goods.GoodsType', verbose_name='商品类型')),
],
options={
'verbose_name_plural': '主页分类展示商品',
'verbose_name': '主页分类展示商品',
'db_table': 'df_index_type_goods',
},
),
migrations.AddField(
model_name='goodssku',
name='type',
field=models.ForeignKey(to='goods.GoodsType', verbose_name='商品种类'),
),
migrations.AddField(
model_name='goodsimage',
name='sku',
field=models.ForeignKey(to='goods.GoodsSKU', verbose_name='商品'),
),
]
|
#!/usr/bin/env python
from __future__ import division, absolute_import, print_function
import numpy as np
from scipy.stats import t
"""
Defines common error measures.
Definition
----------
def bias(y_obs,y_mod): bias
def mae(y_obs,y_mod): mean absolute error
def mse(y_obs,y_mod): mean squared error
def rmse(y_obs,y_mod): root mean squared error
def nse(y_obs,y_mod): Nash-Sutcliffe-Efficiency
def kge(y_obs,y_mod): Kling-Gupta-Efficiency
def pear2(y_obs,y_mod): Squared Pearson correlation coefficient
def confint(y_obs, p=0.95): Confidence interval of samples
Input
-----
y_obs np.array(N) or np.ma.array(N)
y_mod np.array(N) or np.ma.array(N)
Output
------
measure float: error measure of respective error function
(see definitions for details)
Restrictions
------------
Deals with masked and unmasked arrays. When nan is found in an unmasked
array, it will be masked. All measures are applied only on values where
both, y_obs and y_mod, have valid entries (not masked and not nan)
Examples
--------
-> see respective function
License
-------
This file is part of the JAMS Python package, distributed under the MIT
License. The JAMS Python package originates from the former UFZ Python library,
Department of Computational Hydrosystems, Helmholtz Centre for Environmental
Research - UFZ, Leipzig, Germany.
Copyright (c) 2014-2017 Arndt Piayda, Stephan Thober, Matthias Cuntz - mc (at) macu (dot) de
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
History
-------
Written, AP, Jul 2014
Modified MC, Dec 2014 - use simple formulas that work with normal and masked arrays but do not deal with NaN
Modified AP, Sep 2015 - add confidence interval
Modified ST, Nov 2015 - added KGE
Modified ST, Jan 2017 - added components for KGE
"""
def bias(y_obs,y_mod):
"""
calculates bias = mean(y_obs) - mean(y_mod)
Examples
--------
>>> # Create some data
>>> y_obs = np.array([12.7867, 13.465, 14.1433, 15.3733, 16.6033])
>>> y_mod = np.array([12.8087, 13.151, 14.3741, 16.2302, 17.9433])
>>> # calculate bias
>>> print(np.round(bias(y_obs, y_mod),2))
-0.43
"""
# # check
# if (y_obs.ndim!=1) or (y_mod.ndim!=1):
# raise ValueError('bias: input must be 1D')
# elif y_obs.size!=y_mod.size:
# raise ValueError('bias: input must be of same size')
# # calc
# else:
# # check if masked or not
# try:
# temp = y_obs.mask
# temp = y_mod.mask
# except AttributeError:
# y_obs=np.ma.array(y_obs, mask=np.isnan(y_obs))
# y_mod=np.ma.array(y_mod, mask=np.isnan(y_mod))
# y_modr = np.ma.array(y_mod, mask=y_mod.mask | y_obs.mask)
# y_obsr = np.ma.array(y_obs, mask=y_mod.mask | y_obs.mask)
# return np.ma.mean(y_obsr) - np.ma.mean(y_modr)
return y_obs.mean() - y_mod.mean()
def mae(y_obs,y_mod):
"""
calculates mean absolute error = mean(abs(y_obs - y_mod))
Examples
--------
>>> # Create some data
>>> y_obs = np.array([12.7867, 13.465, 14.1433, 15.3733, 16.6033])
>>> y_mod = np.array([12.8087, 13.151, 14.3741, 16.2302, 17.9433])
>>> # calculate mean absolute error
>>> print(np.round(mae(y_obs, y_mod),2))
0.55
"""
# check
if (y_obs.ndim!=1) or (y_mod.ndim!=1):
raise ValueError('mae: input must be 1D')
elif y_obs.size!=y_mod.size:
raise ValueError('mae: input must be of same size')
# calc
else:
# check if masked or not
try:
temp = y_obs.mask
temp = y_mod.mask
except AttributeError:
y_obs=np.ma.array(y_obs, mask=np.isnan(y_obs))
y_mod=np.ma.array(y_mod, mask=np.isnan(y_mod))
y_modr = np.ma.array(y_mod, mask=y_mod.mask | y_obs.mask)
y_obsr = np.ma.array(y_obs, mask=y_mod.mask | y_obs.mask)
return np.ma.mean(np.ma.abs(y_obsr-y_modr))
def mse(y_obs,y_mod):
"""
calculates mean squared error = mean((y_obs - y_mod)**2)
Examples
--------
>>> # Create some data
>>> y_obs = np.array([12.7867, 13.465, 14.1433, 15.3733, 16.6033])
>>> y_mod = np.array([12.8087, 13.151, 14.3741, 16.2302, 17.9433])
>>> # calculate mean squared error
>>> print(np.round(mse(y_obs, y_mod),2))
0.54
"""
# # check
# if (y_obs.ndim!=1) or (y_mod.ndim!=1):
# raise ValueError('mse: input must be 1D')
# elif y_obs.size!=y_mod.size:
# raise ValueError('mse: input must be of same size')
# # calc
# else:
# # check if masked or not
# try:
# temp = y_obs.mask
# temp = y_mod.mask
# except AttributeError:
# y_obs=np.ma.array(y_obs, mask=np.isnan(y_obs))
# y_mod=np.ma.array(y_mod, mask=np.isnan(y_mod))
# y_modr = np.ma.array(y_mod, mask=y_mod.mask | y_obs.mask)
# y_obsr = np.ma.array(y_obs, mask=y_mod.mask | y_obs.mask)
# return np.ma.mean((y_obsr-y_modr)**2)
return ((y_obs-y_mod)**2).mean()
def rmse(y_obs,y_mod):
"""
calculates root mean squared error = sqrt(mean((y_obs - y_mod)**2))
Examples
--------
>>> # Create some data
>>> y_obs = np.array([12.7867, 13.465, 14.1433, 15.3733, 16.6033])
>>> y_mod = np.array([12.8087, 13.151, 14.3741, 16.2302, 17.9433])
>>> # calculate root mean squared error
>>> print(np.round(rmse(y_obs, y_mod),2))
0.73
"""
# # check
# if (y_obs.ndim!=1) or (y_mod.ndim!=1):
# raise ValueError('rmse: input must be 1D')
# elif y_obs.size!=y_mod.size:
# raise ValueError('rmse: input must be of same size')
# # calc
# else:
# # check if masked or not
# try:
# temp = y_obs.mask
# temp = y_mod.mask
# except AttributeError:
# y_obs=np.ma.array(y_obs, mask=np.isnan(y_obs))
# y_mod=np.ma.array(y_mod, mask=np.isnan(y_mod))
# y_modr = np.ma.array(y_mod, mask=y_mod.mask | y_obs.mask)
# y_obsr = np.ma.array(y_obs, mask=y_mod.mask | y_obs.mask)
# return np.ma.sqrt(np.ma.mean((y_obsr-y_modr)**2))
return ((y_obs-y_mod)**2).mean()**0.5
def nse(y_obs,y_mod):
"""
calculates Nash-Sutcliffe-Efficiency = 1 - (sum((y_obs - y_mod)**2) / sum((y_obs - mean(y_obs))**2))
Examples
--------
>>> # Create some data
>>> y_obs = np.array([12.7867, 13.465, 14.1433, 15.3733, 16.6033])
>>> y_mod = np.array([12.8087, 13.151, 14.3741, 16.2302, 17.9433])
>>> # calculate Nash-Sutcliffe-Efficiency
>>> print(np.round(nse(y_obs, y_mod),2))
0.71
"""
# # check
# if (y_obs.ndim!=1) or (y_mod.ndim!=1):
# raise ValueError('r2: input must be 1D')
# elif y_obs.size!=y_mod.size:
# raise ValueError('r2: input must be of same size')
# # calc
# else:
# # check if masked or not
# try:
# temp = y_obs.mask
# temp = y_mod.mask
# except AttributeError:
# y_obs=np.ma.array(y_obs, mask=np.isnan(y_obs))
# y_mod=np.ma.array(y_mod, mask=np.isnan(y_mod))
# y_modr = np.ma.array(y_mod, mask=y_mod.mask | y_obs.mask)
# y_obsr = np.ma.array(y_obs, mask=y_mod.mask | y_obs.mask)
# a = np.ma.sum((y_obsr - y_modr)**2)
# b = np.ma.sum((y_obsr - np.ma.mean(y_obsr))**2)
# return 1. - (a / b)
return 1. - ((y_obs-y_mod)**2).sum()/((y_obs-y_obs.mean())**2).sum()
def kge(y_obs,y_mod,components=False):
"""
calculates Kling-Gupta-Efficiency = 1 - sqrt((1-r)**2 + (1-a)**2 + (1-b)**2),
where r is the Pearson correlation of y_obs and y_mod,
a is mean(y_mod) / mean(y_obs), and
b is std(y_mod) / std(y_obs)
if components is True, then r, a, and b are (in this order) additionally returned to the KGE
Examples
--------
>>> # Create some data
>>> y_obs = np.array([12.7867, 13.465, 14.1433, 15.3733, 16.6033])
>>> y_mod = np.array([12.8087, 13.151, 14.3741, 16.2302, 17.9433])
>>> # calculate Kling-Gupta-Efficiency
>>> print(np.round(kge(y_obs, y_mod),2))
0.58
"""
# # check
# if (y_obs.ndim!=1) or (y_mod.ndim!=1):
# raise ValueError('r2: input must be 1D')
# elif y_obs.size!=y_mod.size:
# raise ValueError('r2: input must be of same size')
# # calc
# else:
# # check if masked or not
# try:
# temp = y_obs.mask
# temp = y_mod.mask
# except AttributeError:
# y_obs=np.ma.array(y_obs, mask=np.isnan(y_obs))
# y_mod=np.ma.array(y_mod, mask=np.isnan(y_mod))
# y_modr = np.ma.array(y_mod, mask=y_mod.mask | y_obs.mask)
# y_obsr = np.ma.array(y_obs, mask=y_mod.mask | y_obs.mask)
# r = np.ma.corrcoef(y_obsr, y_modr)[0, 1]
# a = np.ma.mean(y_modr) / np.ma.mean(y_obsr)
# b = np.ma.std(y_modr) / np.ma.std(y_obsr)
# return 1. - np.sqrt((1 - r)**2 + (1 - a)**2 + (1 - b)**2)
r = np.corrcoef(y_obs, y_mod)[0, 1]
alpha = np.std(y_mod) / np.std(y_obs)
beta = np.mean(y_mod) / np.mean(y_obs)
if components:
return 1. - np.sqrt((1 - r)**2 + (1 - beta)**2 + (1 - alpha)**2), r, alpha, beta
else:
return 1. - np.sqrt((1 - r)**2 + (1 - beta)**2 + (1 - alpha)**2)
def pear2(y_obs,y_mod):
"""
calculates squared Pearson correlation coeffcient
Examples
--------
>>> # Create some data
>>> y_obs = np.array([12.7867, 13.465, 14.1433, 15.3733, 16.6033])
>>> y_mod = np.array([12.8087, 13.151, 14.3741, 16.2302, 17.9433])
>>> # calculate Squared Pearson correlation coefficient
>>> print(np.round(pear2(y_obs, y_mod),2))
0.99
"""
# # check
# if (y_obs.ndim!=1) or (y_mod.ndim!=1):
# raise ValueError('pear2: input must be 1D')
# elif y_obs.size!=y_mod.size:
# raise ValueError('pear2: input must be of same size')
# # calc
# else:
# # check if masked or not
# try:
# temp = y_obs.mask
# temp = y_mod.mask
# except AttributeError:
# y_obs=np.ma.array(y_obs, mask=np.isnan(y_obs))
# y_mod=np.ma.array(y_mod, mask=np.isnan(y_mod))
# y_modr = np.ma.array(y_mod, mask=y_mod.mask | y_obs.mask)
# y_obsr = np.ma.array(y_obs, mask=y_mod.mask | y_obs.mask)
# return np.corrcoef(y_obsr.compressed(), y_modr.compressed())[0,1]**2
return ((y_obs-y_obs.mean())*(y_mod-y_mod.mean())).mean()/y_obs.std()/y_mod.std()
def confint(y_obs, p=0.95):
"""
calculates confidence interval of the mean of the sample applying a
student-t-distribution to a given probability p (default p=0.95)
Examples
--------
>>> # Create some data
>>> y_obs = np.array([12.7867, 13.465, 14.1433, 15.3733, 16.6033])
>>> # calculate confident interval
>>> print(np.round(confint(y_obs)))
[13. 16.]
"""
s = y_obs.size
return np.array(t.interval(p, s-1., loc=y_obs.mean(), scale=y_obs.std()/np.sqrt(s)))
if __name__ == '__main__':
import doctest
doctest.testmod()
|
# -*- coding: utf-8 -*-
"""Finitely Presented Groups and its algorithms. """
from __future__ import print_function, division
from sympy.core.basic import Basic
from sympy.core import Symbol, Mod
from sympy.printing.defaults import DefaultPrinting
from sympy.utilities import public
from sympy.utilities.iterables import flatten
from sympy.combinatorics.free_groups import (FreeGroup, FreeGroupElement,
free_group, zero_mul_simp)
from sympy.combinatorics.rewritingsystem import RewritingSystem
from sympy.combinatorics.coset_table import (CosetTable,
coset_enumeration_r,
coset_enumeration_c)
from sympy.combinatorics import PermutationGroup
from itertools import product
@public
def fp_group(fr_grp, relators=[]):
_fp_group = FpGroup(fr_grp, relators)
return (_fp_group,) + tuple(_fp_group._generators)
@public
def xfp_group(fr_grp, relators=[]):
_fp_group = FpGroup(fr_grp, relators)
return (_fp_group, _fp_group._generators)
@public
def vfp_group(fr_grpm, relators):
_fp_group = FpGroup(symbols, relators)
pollute([sym.name for sym in _fp_group.symbols], _fp_group.generators)
return _fp_group
def _parse_relators(rels):
"""Parse the passed relators."""
return rels
###############################################################################
# FINITELY PRESENTED GROUPS #
###############################################################################
class FpGroup(DefaultPrinting):
"""
The FpGroup would take a FreeGroup and a list/tuple of relators, the
relators would be specified in such a way that each of them be equal to the
identity of the provided free group.
"""
is_group = True
is_FpGroup = True
is_PermutationGroup = False
def __init__(self, fr_grp, relators):
relators = _parse_relators(relators)
self.free_group = fr_grp
self.relators = relators
self.generators = self._generators()
self.dtype = type("FpGroupElement", (FpGroupElement,), {"group": self})
# CosetTable instance on identity subgroup
self._coset_table = None
# returns whether coset table on identity subgroup
# has been standardized
self._is_standardized = False
self._order = None
self._center = None
self._rewriting_system = RewritingSystem(self)
self._perm_isomorphism = None
return
def _generators(self):
return self.free_group.generators
def make_confluent(self):
'''
Try to make the group's rewriting system confluent
'''
self._rewriting_system.make_confluent()
return
def reduce(self, word):
'''
Return the reduced form of `word` in `self` according to the group's
rewriting system. If it's confluent, the reduced form is the unique normal
form of the word in the group.
'''
return self._rewriting_system.reduce(word)
def equals(self, word1, word2):
'''
Compare `word1` and `word2` for equality in the group
using the group's rewriting system. If the system is
confluent, the returned answer is necessarily correct.
(If it isn't, `False` could be returned in some cases
where in fact `word1 == word2`)
'''
if self.reduce(word1*word2**-1) == self.identity:
return True
elif self._rewriting_system.is_confluent:
return False
return None
@property
def identity(self):
return self.free_group.identity
def __contains__(self, g):
return g in self.free_group
def subgroup(self, gens, C=None, homomorphism=False):
'''
Return the subgroup generated by `gens` using the
Reidemeister-Schreier algorithm
homomorphism -- When set to True, return a dictionary containing the images
of the presentation generators in the original group.
Examples
========
>>> from sympy.combinatorics.fp_groups import (FpGroup, FpSubgroup)
>>> from sympy.combinatorics.free_groups import free_group
>>> F, x, y = free_group("x, y")
>>> f = FpGroup(F, [x**3, y**5, (x*y)**2])
>>> H = [x*y, x**-1*y**-1*x*y*x]
>>> K, T = f.subgroup(H, homomorphism=True)
>>> T(K.generators)
[x*y, x**-1*y**2*x**-1]
'''
if not all([isinstance(g, FreeGroupElement) for g in gens]):
raise ValueError("Generators must be `FreeGroupElement`s")
if not all([g.group == self.free_group for g in gens]):
raise ValueError("Given generators are not members of the group")
if homomorphism:
g, rels, _gens = reidemeister_presentation(self, gens, C=C, homomorphism=True)
else:
g, rels = reidemeister_presentation(self, gens, C=C)
if g:
g = FpGroup(g[0].group, rels)
else:
g = FpGroup(free_group('')[0], [])
if homomorphism:
from sympy.combinatorics.homomorphisms import homomorphism
return g, homomorphism(g, self, g.generators, _gens, check=False)
return g
def coset_enumeration(self, H, strategy="relator_based", max_cosets=None,
draft=None, incomplete=False):
"""
Return an instance of ``coset table``, when Todd-Coxeter algorithm is
run over the ``self`` with ``H`` as subgroup, using ``strategy``
argument as strategy. The returned coset table is compressed but not
standardized.
An instance of `CosetTable` for `fp_grp` can be passed as the keyword
argument `draft` in which case the coset enumeration will start with
that instance and attempt to complete it.
When `incomplete` is `True` and the function is unable to complete for
some reason, the partially complete table will be returned.
"""
if not max_cosets:
max_cosets = CosetTable.coset_table_max_limit
if strategy == 'relator_based':
C = coset_enumeration_r(self, H, max_cosets=max_cosets,
draft=draft, incomplete=incomplete)
else:
C = coset_enumeration_c(self, H, max_cosets=max_cosets,
draft=draft, incomplete=incomplete)
if C.is_complete():
C.compress()
return C
def standardize_coset_table(self):
"""
Standardized the coset table ``self`` and makes the internal variable
``_is_standardized`` equal to ``True``.
"""
self._coset_table.standardize()
self._is_standardized = True
def coset_table(self, H, strategy="relator_based", max_cosets=None,
draft=None, incomplete=False):
"""
Return the mathematical coset table of ``self`` in ``H``.
"""
if not H:
if self._coset_table != None:
if not self._is_standardized:
self.standardize_coset_table()
else:
C = self.coset_enumeration([], strategy, max_cosets=max_cosets,
draft=draft, incomplete=incomplete)
self._coset_table = C
self.standardize_coset_table()
return self._coset_table.table
else:
C = self.coset_enumeration(H, strategy, max_cosets=max_cosets,
draft=draft, incomplete=incomplete)
C.standardize()
return C.table
def order(self, strategy="relator_based"):
"""
Returns the order of the finitely presented group ``self``. It uses
the coset enumeration with identity group as subgroup, i.e ``H=[]``.
Examples
========
>>> from sympy.combinatorics.free_groups import free_group
>>> from sympy.combinatorics.fp_groups import FpGroup
>>> F, x, y = free_group("x, y")
>>> f = FpGroup(F, [x, y**2])
>>> f.order(strategy="coset_table_based")
2
"""
from sympy import S, gcd
if self._order != None:
return self._order
if self._coset_table != None:
self._order = len(self._coset_table.table)
elif len(self.relators) == 0:
self._order = self.free_group.order()
elif len(self.generators) == 1:
self._order = abs(gcd([r.array_form[0][1] for r in self.relators]))
elif self._is_infinite():
self._order = S.Infinity
else:
gens, C = self._finite_index_subgroup()
if C:
ind = len(C.table)
self._order = ind*self.subgroup(gens, C=C).order()
else:
self._order = self.index([])
return self._order
def _is_infinite(self):
'''
Test if the group is infinite. Return `True` if the test succeeds
and `None` otherwise
'''
used_gens = set()
for r in self.relators:
used_gens.update(r.contains_generators())
if any([g not in used_gens for g in self.generators]):
return True
# Abelianisation test: check is the abelianisation is infinite
abelian_rels = []
from sympy.polys.solvers import RawMatrix as Matrix
from sympy.polys.domains import ZZ
from sympy.matrices.normalforms import invariant_factors
for rel in self.relators:
abelian_rels.append([rel.exponent_sum(g) for g in self.generators])
m = Matrix(abelian_rels)
setattr(m, "ring", ZZ)
if 0 in invariant_factors(m):
return True
else:
return None
def _finite_index_subgroup(self, s=[]):
'''
Find the elements of `self` that generate a finite index subgroup
and, if found, return the list of elements and the coset table of `self` by
the subgroup, otherwise return `(None, None)`
'''
gen = self.most_frequent_generator()
rels = list(self.generators)
rels.extend(self.relators)
if not s:
if len(self.generators) == 2:
s = [gen] + [g for g in self.generators if g != gen]
else:
rand = self.free_group.identity
i = 0
while ((rand in rels or rand**-1 in rels or rand.is_identity)
and i<10):
rand = self.random()
i += 1
s = [gen, rand] + [g for g in self.generators if g != gen]
mid = (len(s)+1)//2
half1 = s[:mid]
half2 = s[mid:]
draft1 = None
draft2 = None
m = 200
C = None
while not C and (m/2 < CosetTable.coset_table_max_limit):
m = min(m, CosetTable.coset_table_max_limit)
draft1 = self.coset_enumeration(half1, max_cosets=m,
draft=draft1, incomplete=True)
if draft1.is_complete():
C = draft1
half = half1
else:
draft2 = self.coset_enumeration(half2, max_cosets=m,
draft=draft2, incomplete=True)
if draft2.is_complete():
C = draft2
half = half2
if not C:
m *= 2
if not C:
return None, None
C.compress()
return half, C
def most_frequent_generator(self):
gens = self.generators
rels = self.relators
freqs = [sum([r.generator_count(g) for r in rels]) for g in gens]
return gens[freqs.index(max(freqs))]
def random(self):
import random
r = self.free_group.identity
for i in range(random.randint(2,3)):
r = r*random.choice(self.generators)**random.choice([1,-1])
return r
def index(self, H, strategy="relator_based"):
"""
Return the index of subgroup ``H`` in group ``self``.
Examples
========
>>> from sympy.combinatorics.free_groups import free_group
>>> from sympy.combinatorics.fp_groups import FpGroup
>>> F, x, y = free_group("x, y")
>>> f = FpGroup(F, [x**5, y**4, y*x*y**3*x**3])
>>> f.index([x])
4
"""
# TODO: use |G:H| = |G|/|H| (currently H can't be made into a group)
# when we know |G| and |H|
if H == []:
return self.order()
else:
C = self.coset_enumeration(H, strategy)
return len(C.table)
def __str__(self):
if self.free_group.rank > 30:
str_form = "<fp group with %s generators>" % self.free_group.rank
else:
str_form = "<fp group on the generators %s>" % str(self.generators)
return str_form
__repr__ = __str__
#==============================================================================
# PERMUTATION GROUP METHODS
#==============================================================================
def _to_perm_group(self):
'''
Return an isomorphic permutation group and the isomorphism.
The implementation is dependent on coset enumeration so
will only terminate for finite groups.
'''
from sympy.combinatorics import Permutation, PermutationGroup
from sympy.combinatorics.homomorphisms import homomorphism
from sympy import S
if self.order() == S.Infinity:
raise NotImplementedError("Permutation presentation of infinite "
"groups is not implemented")
if self._perm_isomorphism:
T = self._perm_isomorphism
P = T.image()
else:
C = self.coset_table([])
gens = self.generators
images = [[C[i][2*gens.index(g)] for i in range(len(C))] for g in gens]
images = [Permutation(i) for i in images]
P = PermutationGroup(images)
T = homomorphism(self, P, gens, images, check=False)
self._perm_isomorphism = T
return P, T
def _perm_group_list(self, method_name, *args):
'''
Given the name of a `PermutationGroup` method (returning a subgroup
or a list of subgroups) and (optionally) additional arguments it takes,
return a list or a list of lists containing the generators of this (or
these) subgroups in terms of the generators of `self`.
'''
P, T = self._to_perm_group()
perm_result = getattr(P, method_name)(*args)
single = False
if isinstance(perm_result, PermutationGroup):
perm_result, single = [perm_result], True
result = []
for group in perm_result:
gens = group.generators
result.append(T.invert(gens))
return result[0] if single else result
def derived_series(self):
'''
Return the list of lists containing the generators
of the subgroups in the derived series of `self`.
'''
return self._perm_group_list('derived_series')
def lower_central_series(self):
'''
Return the list of lists containing the generators
of the subgroups in the lower central series of `self`.
'''
return self._perm_group_list('lower_central_series')
def center(self):
'''
Return the list of generators of the center of `self`.
'''
return self._perm_group_list('center')
def derived_subgroup(self):
'''
Return the list of generators of the derived subgroup of `self`.
'''
return self._perm_group_list('derived_subgroup')
def centralizer(self, other):
'''
Return the list of generators of the centralizer of `other`
(a list of elements of `self`) in `self`.
'''
T = self._to_perm_group()[1]
other = T(other)
return self._perm_group_list('centralizer', other)
def normal_closure(self, other):
'''
Return the list of generators of the normal closure of `other`
(a list of elements of `self`) in `self`.
'''
T = self._to_perm_group()[1]
other = T(other)
return self._perm_group_list('normal_closure', other)
def _perm_property(self, attr):
'''
Given an attribute of a `PermutationGroup`, return
its value for a permutation group isomorphic to `self`.
'''
P = self._to_perm_group()[0]
return getattr(P, attr)
@property
def is_abelian(self):
'''
Check if `self` is abelian.
'''
return self._perm_property("is_abelian")
@property
def is_nilpotent(self):
'''
Check if `self` is nilpotent.
'''
return self._perm_property("is_nilpotent")
@property
def is_solvable(self):
'''
Check if `self` is solvable.
'''
return self._perm_property("is_solvable")
@property
def elements(self):
'''
List the elements of `self`.
'''
P, T = self._to_perm_group()
return T.invert(P._elements)
class FpSubgroup(DefaultPrinting):
'''
The class implementing a subgroup of an FpGroup or a FreeGroup
(only finite index subgroups are supported at this point). This
is to be used if one wishes to check if an element of the original
group belongs to the subgroup
'''
def __init__(self, G, gens, normal=False):
super(FpSubgroup,self).__init__()
self.parent = G
self.generators = list(set([g for g in gens if g != G.identity]))
self._min_words = None #for use in __contains__
self.C = None
self.normal = normal
def __contains__(self, g):
if isinstance(self.parent, FreeGroup):
if self._min_words is None:
# make _min_words - a list of subwords such that
# g is in the subgroup if and only if it can be
# partitioned into these subwords. Infinite families of
# subwords are presented by tuples, e.g. (r, w)
# stands for the family of subwords r*w**n*r**-1
def _process(w):
# this is to be used before adding new words
# into _min_words; if the word w is not cyclically
# reduced, it will generate an infinite family of
# subwords so should be written as a tuple;
# if it is, w**-1 should be added to the list
# as well
p, r = w.cyclic_reduction(removed=True)
if not r.is_identity:
return [(r, p)]
else:
return [w, w**-1]
# make the initial list
gens = []
for w in self.generators:
if self.normal:
w = w.cyclic_reduction()
gens.extend(_process(w))
for w1 in gens:
for w2 in gens:
# if w1 and w2 are equal or are inverses, continue
if w1 == w2 or (not isinstance(w1, tuple)
and w1**-1 == w2):
continue
# if the start of one word is the inverse of the
# end of the other, their multiple should be added
# to _min_words because of cancellation
if isinstance(w1, tuple):
# start, end
s1, s2 = w1[0][0], w1[0][0]**-1
else:
s1, s2 = w1[0], w1[len(w1)-1]
if isinstance(w2, tuple):
# start, end
r1, r2 = w2[0][0], w2[0][0]**-1
else:
r1, r2 = w2[0], w2[len(w1)-1]
# p1 and p2 are w1 and w2 or, in case when
# w1 or w2 is an infinite family, a representative
p1, p2 = w1, w2
if isinstance(w1, tuple):
p1 = w1[0]*w1[1]*w1[0]**-1
if isinstance(w2, tuple):
p2 = w2[0]*w2[1]*w2[0]**-1
# add the product of the words to the list is necessary
if r1**-1 == s2 and not (p1*p2).is_identity:
new = _process(p1*p2)
if not new in gens:
gens.extend(new)
if r2**-1 == s1 and not (p2*p1).is_identity:
new = _process(p2*p1)
if not new in gens:
gens.extend(new)
self._min_words = gens
min_words = self._min_words
def _is_subword(w):
# check if w is a word in _min_words or one of
# the infinite families in it
w, r = w.cyclic_reduction(removed=True)
if r.is_identity or self.normal:
return w in min_words
else:
t = [s[1] for s in min_words if isinstance(s, tuple)
and s[0] == r]
return [s for s in t if w.power_of(s)] != []
# store the solution of words for which the result of
# _word_break (below) is known
known = {}
def _word_break(w):
# check if w can be written as a product of words
# in min_words
if len(w) == 0:
return True
i = 0
while i < len(w):
i += 1
prefix = w.subword(0, i)
if not _is_subword(prefix):
continue
rest = w.subword(i, len(w))
if rest not in known:
known[rest] = _word_break(rest)
if known[rest]:
return True
return False
if self.normal:
g = g.cyclic_reduction()
return _word_break(g)
else:
if self.C is None:
C = self.parent.coset_enumeration(self.generators)
self.C = C
i = 0
C = self.C
for j in range(len(g)):
i = C.table[i][C.A_dict[g[j]]]
return i == 0
def order(self):
if not self.generators:
return 1
if isinstance(self.parent, FreeGroup):
return S.Infinity
if self.C is None:
C = self.parent.coset_enumeration(self.generators)
self.C = C
# This is valid because `len(self.C.table)` (the index of the subgroup)
# will always be finite - otherwise coset enumeration doesn't terminate
return self.parent.order()/len(self.C.table)
def to_FpGroup(self):
if isinstance(self.parent, FreeGroup):
gen_syms = [('x_%d'%i) for i in range(len(self.generators))]
return free_group(', '.join(gen_syms))[0]
return self.parent.subgroup(C=self.C)
def __str__(self):
if len(self.generators) > 30:
str_form = "<fp subgroup with %s generators>" % len(self.generators)
else:
str_form = "<fp subgroup on the generators %s>" % str(self.generators)
return str_form
__repr__ = __str__
###############################################################################
# LOW INDEX SUBGROUPS #
###############################################################################
def low_index_subgroups(G, N, Y=[]):
"""
Implements the Low Index Subgroups algorithm, i.e find all subgroups of
``G`` upto a given index ``N``. This implements the method described in
[Sim94]. This procedure involves a backtrack search over incomplete Coset
Tables, rather than over forced coincidences.
G: An FpGroup < X|R >
N: positive integer, representing the maximum index value for subgroups
Y: (an optional argument) specifying a list of subgroup generators, such
that each of the resulting subgroup contains the subgroup generated by Y.
References
==========
[1] Holt, D., Eick, B., O'Brien, E.
"Handbook of Computational Group Theory"
Section 5.4
[2] Marston Conder and Peter Dobcsanyi
"Applications and Adaptions of the Low Index Subgroups Procedure"
Examples
========
>>> from sympy.combinatorics.free_groups import free_group
>>> from sympy.combinatorics.fp_groups import FpGroup, low_index_subgroups
>>> F, x, y = free_group("x, y")
>>> f = FpGroup(F, [x**2, y**3, (x*y)**4])
>>> L = low_index_subgroups(f, 4)
>>> for coset_table in L:
... print(coset_table.table)
[[0, 0, 0, 0]]
[[0, 0, 1, 2], [1, 1, 2, 0], [3, 3, 0, 1], [2, 2, 3, 3]]
[[0, 0, 1, 2], [2, 2, 2, 0], [1, 1, 0, 1]]
[[1, 1, 0, 0], [0, 0, 1, 1]]
"""
C = CosetTable(G, [])
R = G.relators
# length chosen for the length of the short relators
len_short_rel = 5
# elements of R2 only checked at the last step for complete
# coset tables
R2 = set([rel for rel in R if len(rel) > len_short_rel])
# elements of R1 are used in inner parts of the process to prune
# branches of the search tree,
R1 = set([rel.identity_cyclic_reduction() for rel in set(R) - R2])
R1_c_list = C.conjugates(R1)
S = []
descendant_subgroups(S, C, R1_c_list, C.A[0], R2, N, Y)
return S
def descendant_subgroups(S, C, R1_c_list, x, R2, N, Y):
A_dict = C.A_dict
A_dict_inv = C.A_dict_inv
if C.is_complete():
# if C is complete then it only needs to test
# whether the relators in R2 are satisfied
for w, alpha in product(R2, C.omega):
if not C.scan_check(alpha, w):
return
# relators in R2 are satisfied, append the table to list
S.append(C)
else:
# find the first undefined entry in Coset Table
for alpha, x in product(range(len(C.table)), C.A):
if C.table[alpha][A_dict[x]] is None:
# this is "x" in pseudo-code (using "y" makes it clear)
undefined_coset, undefined_gen = alpha, x
break
# for filling up the undefine entry we try all possible values
# of β ∈ Ω or β = n where β^(undefined_gen^-1) is undefined
reach = C.omega + [C.n]
for beta in reach:
if beta < N:
if beta == C.n or C.table[beta][A_dict_inv[undefined_gen]] is None:
try_descendant(S, C, R1_c_list, R2, N, undefined_coset, \
undefined_gen, beta, Y)
def try_descendant(S, C, R1_c_list, R2, N, alpha, x, beta, Y):
r"""
Solves the problem of trying out each individual possibility
for `\alpha^x.
"""
D = C.copy()
A_dict = D.A_dict
if beta == D.n and beta < N:
D.table.append([None]*len(D.A))
D.p.append(beta)
D.table[alpha][D.A_dict[x]] = beta
D.table[beta][D.A_dict_inv[x]] = alpha
D.deduction_stack.append((alpha, x))
if not D.process_deductions_check(R1_c_list[D.A_dict[x]], \
R1_c_list[D.A_dict_inv[x]]):
return
for w in Y:
if not D.scan_check(0, w):
return
if first_in_class(D, Y):
descendant_subgroups(S, D, R1_c_list, x, R2, N, Y)
def first_in_class(C, Y=[]):
"""
Checks whether the subgroup ``H=G1`` corresponding to the Coset Table
could possibly be the canonical representative of its conjugacy class.
Parameters
==========
C: CosetTable
Returns
=======
bool: True/False
If this returns False, then no descendant of C can have that property, and
so we can abandon C. If it returns True, then we need to process further
the node of the search tree corresponding to C, and so we call
``descendant_subgroups`` recursively on C.
Examples
========
>>> from sympy.combinatorics.free_groups import free_group
>>> from sympy.combinatorics.fp_groups import FpGroup, CosetTable, first_in_class
>>> F, x, y = free_group("x, y")
>>> f = FpGroup(F, [x**2, y**3, (x*y)**4])
>>> C = CosetTable(f, [])
>>> C.table = [[0, 0, None, None]]
>>> first_in_class(C)
True
>>> C.table = [[1, 1, 1, None], [0, 0, None, 1]]; C.p = [0, 1]
>>> first_in_class(C)
True
>>> C.table = [[1, 1, 2, 1], [0, 0, 0, None], [None, None, None, 0]]
>>> C.p = [0, 1, 2]
>>> first_in_class(C)
False
>>> C.table = [[1, 1, 1, 2], [0, 0, 2, 0], [2, None, 0, 1]]
>>> first_in_class(C)
False
# TODO:: Sims points out in [Sim94] that performance can be improved by
# remembering some of the information computed by ``first_in_class``. If
# the ``continue α`` statement is executed at line 14, then the same thing
# will happen for that value of α in any descendant of the table C, and so
# the values the values of α for which this occurs could profitably be
# stored and passed through to the descendants of C. Of course this would
# make the code more complicated.
# The code below is taken directly from the function on page 208 of [Sim94]
# ν[α]
"""
n = C.n
# lamda is the largest numbered point in Ω_c_α which is currently defined
lamda = -1
# for α ∈ Ω_c, ν[α] is the point in Ω_c_α corresponding to α
nu = [None]*n
# for α ∈ Ω_c_α, μ[α] is the point in Ω_c corresponding to α
mu = [None]*n
# mutually ν and μ are the mutually-inverse equivalence maps between
# Ω_c_α and Ω_c
next_alpha = False
# For each 0≠α ∈ [0 .. nc-1], we start by constructing the equivalent
# standardized coset table C_α corresponding to H_α
for alpha in range(1, n):
# reset ν to "None" after previous value of α
for beta in range(lamda+1):
nu[mu[beta]] = None
# we only want to reject our current table in favour of a preceding
# table in the ordering in which 1 is replaced by α, if the subgroup
# G_α corresponding to this preceding table definitely contains the
# given subgroup
for w in Y:
# TODO: this should support input of a list of general words
# not just the words which are in "A" (i.e gen and gen^-1)
if C.table[alpha][C.A_dict[w]] != alpha:
# continue with α
next_alpha = True
break
if next_alpha:
next_alpha = False
continue
# try α as the new point 0 in Ω_C_α
mu[0] = alpha
nu[alpha] = 0
# compare corresponding entries in C and C_α
lamda = 0
for beta in range(n):
for x in C.A:
gamma = C.table[beta][C.A_dict[x]]
delta = C.table[mu[beta]][C.A_dict[x]]
# if either of the entries is undefined,
# we move with next α
if gamma is None or delta is None:
# continue with α
next_alpha = True
break
if nu[delta] is None:
# delta becomes the next point in Ω_C_α
lamda += 1
nu[delta] = lamda
mu[lamda] = delta
if nu[delta] < gamma:
return False
if nu[delta] > gamma:
# continue with α
next_alpha = True
break
if next_alpha:
next_alpha = False
break
return True
#========================================================================
# Simplifying Presentation
#========================================================================
def simplify_presentation(*args, **kwargs):
'''
For an instance of `FpGroup`, return a simplified isomorphic copy of
the group (e.g. remove redundant generators or relators). Alternatively,
a list of generators and relators can be passed in which case the
simplified lists will be returned.
By default, the generators of the group are unchanged. If you would
like to remove redundant generators, set the keyword argument
`change_gens = True`.
'''
change_gens = kwargs.get("change_gens", False)
if len(args) == 1:
if not isinstance(args[0], FpGroup):
raise TypeError("The argument must be an instance of FpGroup")
G = args[0]
gens, rels = simplify_presentation(G.generators, G.relators,
change_gens=change_gens)
if gens:
return FpGroup(gens[0].group, rels)
return FpGroup([])
elif len(args) == 2:
gens, rels = args[0][:], args[1][:]
identity = gens[0].group.identity
else:
if len(args) == 0:
m = "Not enough arguments"
else:
m = "Too many arguments"
raise RuntimeError(m)
prev_gens = []
prev_rels = []
while not set(prev_rels) == set(rels):
prev_rels = rels
while change_gens and not set(prev_gens) == set(gens):
prev_gens = gens
gens, rels = elimination_technique_1(gens, rels, identity)
rels = _simplify_relators(rels, identity)
if change_gens:
syms = [g.array_form[0][0] for g in gens]
F = free_group(syms)[0]
identity = F.identity
gens = F.generators
subs = dict(zip(syms, gens))
for j, r in enumerate(rels):
a = r.array_form
rel = identity
for sym, p in a:
rel = rel*subs[sym]**p
rels[j] = rel
return gens, rels
def _simplify_relators(rels, identity):
"""Relies upon ``_simplification_technique_1`` for its functioning. """
rels = rels[:]
rels = list(set(_simplification_technique_1(rels)))
rels.sort()
rels = [r.identity_cyclic_reduction() for r in rels]
try:
rels.remove(identity)
except ValueError:
pass
return rels
# Pg 350, section 2.5.1 from [2]
def elimination_technique_1(gens, rels, identity):
rels = rels[:]
# the shorter relators are examined first so that generators selected for
# elimination will have shorter strings as equivalent
rels.sort()
gens = gens[:]
redundant_gens = {}
redundant_rels = []
used_gens = set()
# examine each relator in relator list for any generator occurring exactly
# once
for rel in rels:
# don't look for a redundant generator in a relator which
# depends on previously found ones
contained_gens = rel.contains_generators()
if any([g in contained_gens for g in redundant_gens]):
continue
contained_gens = list(contained_gens)
contained_gens.sort(reverse = True)
for gen in contained_gens:
if rel.generator_count(gen) == 1 and gen not in used_gens:
k = rel.exponent_sum(gen)
gen_index = rel.index(gen**k)
bk = rel.subword(gen_index + 1, len(rel))
fw = rel.subword(0, gen_index)
chi = bk*fw
redundant_gens[gen] = chi**(-1*k)
used_gens.update(chi.contains_generators())
redundant_rels.append(rel)
break
rels = [r for r in rels if r not in redundant_rels]
# eliminate the redundant generators from remaining relators
rels = [r.eliminate_words(redundant_gens, _all = True).identity_cyclic_reduction() for r in rels]
rels = list(set(rels))
try:
rels.remove(identity)
except ValueError:
pass
gens = [g for g in gens if g not in redundant_gens]
return gens, rels
def _simplification_technique_1(rels):
"""
All relators are checked to see if they are of the form `gen^n`. If any
such relators are found then all other relators are processed for strings
in the `gen` known order.
Examples
========
>>> from sympy.combinatorics.free_groups import free_group
>>> from sympy.combinatorics.fp_groups import _simplification_technique_1
>>> F, x, y = free_group("x, y")
>>> w1 = [x**2*y**4, x**3]
>>> _simplification_technique_1(w1)
[x**-1*y**4, x**3]
>>> w2 = [x**2*y**-4*x**5, x**3, x**2*y**8, y**5]
>>> _simplification_technique_1(w2)
[x**-1*y*x**-1, x**3, x**-1*y**-2, y**5]
>>> w3 = [x**6*y**4, x**4]
>>> _simplification_technique_1(w3)
[x**2*y**4, x**4]
"""
from sympy import gcd
rels = rels[:]
# dictionary with "gen: n" where gen^n is one of the relators
exps = {}
for i in range(len(rels)):
rel = rels[i]
if rel.number_syllables() == 1:
g = rel[0]
exp = abs(rel.array_form[0][1])
if rel.array_form[0][1] < 0:
rels[i] = rels[i]**-1
g = g**-1
if g in exps:
exp = gcd(exp, exps[g].array_form[0][1])
exps[g] = g**exp
one_syllables_words = exps.values()
# decrease some of the exponents in relators, making use of the single
# syllable relators
for i in range(len(rels)):
rel = rels[i]
if rel in one_syllables_words:
continue
rel = rel.eliminate_words(one_syllables_words, _all = True)
# if rels[i] contains g**n where abs(n) is greater than half of the power p
# of g in exps, g**n can be replaced by g**(n-p) (or g**(p-n) if n<0)
for g in rel.contains_generators():
if g in exps:
exp = exps[g].array_form[0][1]
max_exp = (exp + 1)//2
rel = rel.eliminate_word(g**(max_exp), g**(max_exp-exp), _all = True)
rel = rel.eliminate_word(g**(-max_exp), g**(-(max_exp-exp)), _all = True)
rels[i] = rel
rels = [r.identity_cyclic_reduction() for r in rels]
return rels
###############################################################################
# SUBGROUP PRESENTATIONS #
###############################################################################
# Pg 175 [1]
def define_schreier_generators(C, homomorphism=False):
'''
Arguments:
C -- Coset table.
homomorphism -- When set to True, return a dictionary containing the images
of the presentation generators in the original group.
'''
y = []
gamma = 1
f = C.fp_group
X = f.generators
if homomorphism:
# `_gens` stores the elements of the parent group to
# to which the schreier generators correspond to.
_gens = {}
# compute the schreier Traversal
tau = {}
tau[0] = f.identity
C.P = [[None]*len(C.A) for i in range(C.n)]
for alpha, x in product(C.omega, C.A):
beta = C.table[alpha][C.A_dict[x]]
if beta == gamma:
C.P[alpha][C.A_dict[x]] = "<identity>"
C.P[beta][C.A_dict_inv[x]] = "<identity>"
gamma += 1
if homomorphism:
tau[beta] = tau[alpha]*x
elif x in X and C.P[alpha][C.A_dict[x]] is None:
y_alpha_x = '%s_%s' % (x, alpha)
y.append(y_alpha_x)
C.P[alpha][C.A_dict[x]] = y_alpha_x
if homomorphism:
_gens[y_alpha_x] = tau[alpha]*x*tau[beta]**-1
grp_gens = list(free_group(', '.join(y)))
C._schreier_free_group = grp_gens.pop(0)
C._schreier_generators = grp_gens
if homomorphism:
C._schreier_gen_elem = _gens
# replace all elements of P by, free group elements
for i, j in product(range(len(C.P)), range(len(C.A))):
# if equals "<identity>", replace by identity element
if C.P[i][j] == "<identity>":
C.P[i][j] = C._schreier_free_group.identity
elif isinstance(C.P[i][j], str):
r = C._schreier_generators[y.index(C.P[i][j])]
C.P[i][j] = r
beta = C.table[i][j]
C.P[beta][j + 1] = r**-1
def reidemeister_relators(C):
R = C.fp_group.relators
rels = [rewrite(C, coset, word) for word in R for coset in range(C.n)]
identity = C._schreier_free_group.identity
order_1_gens = set([i for i in rels if len(i) == 1])
# remove all the order 1 generators from relators
rels = list(filter(lambda rel: rel not in order_1_gens, rels))
# replace order 1 generators by identity element in reidemeister relators
for i in range(len(rels)):
w = rels[i]
w = w.eliminate_words(order_1_gens, _all=True)
rels[i] = w
C._schreier_generators = [i for i in C._schreier_generators
if not (i in order_1_gens or i**-1 in order_1_gens)]
# Tietze transformation 1 i.e TT_1
# remove cyclic conjugate elements from relators
i = 0
while i < len(rels):
w = rels[i]
j = i + 1
while j < len(rels):
if w.is_cyclic_conjugate(rels[j]):
del rels[j]
else:
j += 1
i += 1
C._reidemeister_relators = rels
def rewrite(C, alpha, w):
"""
Parameters
----------
C: CosetTable
α: A live coset
w: A word in `A*`
Returns
-------
ρ(τ(α), w)
Examples
========
>>> from sympy.combinatorics.fp_groups import FpGroup, CosetTable, define_schreier_generators, rewrite
>>> from sympy.combinatorics.free_groups import free_group
>>> F, x, y = free_group("x ,y")
>>> f = FpGroup(F, [x**2, y**3, (x*y)**6])
>>> C = CosetTable(f, [])
>>> C.table = [[1, 1, 2, 3], [0, 0, 4, 5], [4, 4, 3, 0], [5, 5, 0, 2], [2, 2, 5, 1], [3, 3, 1, 4]]
>>> C.p = [0, 1, 2, 3, 4, 5]
>>> define_schreier_generators(C)
>>> rewrite(C, 0, (x*y)**6)
x_4*y_2*x_3*x_1*x_2*y_4*x_5
"""
v = C._schreier_free_group.identity
for i in range(len(w)):
x_i = w[i]
v = v*C.P[alpha][C.A_dict[x_i]]
alpha = C.table[alpha][C.A_dict[x_i]]
return v
# Pg 350, section 2.5.2 from [2]
def elimination_technique_2(C):
"""
This technique eliminates one generator at a time. Heuristically this
seems superior in that we may select for elimination the generator with
shortest equivalent string at each stage.
>>> from sympy.combinatorics.free_groups import free_group
>>> from sympy.combinatorics.fp_groups import FpGroup, coset_enumeration_r, \
reidemeister_relators, define_schreier_generators, elimination_technique_2
>>> F, x, y = free_group("x, y")
>>> f = FpGroup(F, [x**3, y**5, (x*y)**2]); H = [x*y, x**-1*y**-1*x*y*x]
>>> C = coset_enumeration_r(f, H)
>>> C.compress(); C.standardize()
>>> define_schreier_generators(C)
>>> reidemeister_relators(C)
>>> elimination_technique_2(C)
([y_1, y_2], [y_2**-3, y_2*y_1*y_2*y_1*y_2*y_1, y_1**2])
"""
rels = C._reidemeister_relators
rels.sort(reverse=True)
gens = C._schreier_generators
for i in range(len(gens) - 1, -1, -1):
rel = rels[i]
for j in range(len(gens) - 1, -1, -1):
gen = gens[j]
if rel.generator_count(gen) == 1:
k = rel.exponent_sum(gen)
gen_index = rel.index(gen**k)
bk = rel.subword(gen_index + 1, len(rel))
fw = rel.subword(0, gen_index)
rep_by = (bk*fw)**(-1*k)
del rels[i]; del gens[j]
for l in range(len(rels)):
rels[l] = rels[l].eliminate_word(gen, rep_by)
break
C._reidemeister_relators = rels
C._schreier_generators = gens
return C._schreier_generators, C._reidemeister_relators
def reidemeister_presentation(fp_grp, H, C=None, homomorphism=False):
"""
fp_group: A finitely presented group, an instance of FpGroup
H: A subgroup whose presentation is to be found, given as a list
of words in generators of `fp_grp`
homomorphism: When set to True, return a homomorphism from the subgroup
to the parent group
Examples
========
>>> from sympy.combinatorics.free_groups import free_group
>>> from sympy.combinatorics.fp_groups import FpGroup, reidemeister_presentation
>>> F, x, y = free_group("x, y")
Example 5.6 Pg. 177 from [1]
>>> f = FpGroup(F, [x**3, y**5, (x*y)**2])
>>> H = [x*y, x**-1*y**-1*x*y*x]
>>> reidemeister_presentation(f, H)
((y_1, y_2), (y_1**2, y_2**3, y_2*y_1*y_2*y_1*y_2*y_1))
Example 5.8 Pg. 183 from [1]
>>> f = FpGroup(F, [x**3, y**3, (x*y)**3])
>>> H = [x*y, x*y**-1]
>>> reidemeister_presentation(f, H)
((x_0, y_0), (x_0**3, y_0**3, x_0*y_0*x_0*y_0*x_0*y_0))
Exercises Q2. Pg 187 from [1]
>>> f = FpGroup(F, [x**2*y**2, y**-1*x*y*x**-3])
>>> H = [x]
>>> reidemeister_presentation(f, H)
((x_0,), (x_0**4,))
Example 5.9 Pg. 183 from [1]
>>> f = FpGroup(F, [x**3*y**-3, (x*y)**3, (x*y**-1)**2])
>>> H = [x]
>>> reidemeister_presentation(f, H)
((x_0,), (x_0**6,))
"""
if not C:
C = coset_enumeration_r(fp_grp, H)
C.compress(); C.standardize()
define_schreier_generators(C, homomorphism=homomorphism)
reidemeister_relators(C)
gens, rels = C._schreier_generators, C._reidemeister_relators
gens, rels = simplify_presentation(gens, rels, change_gens=True)
C.schreier_generators = tuple(gens)
C.reidemeister_relators = tuple(rels)
if homomorphism:
_gens = []
for gen in gens:
_gens.append(C._schreier_gen_elem[str(gen)])
return C.schreier_generators, C.reidemeister_relators, _gens
return C.schreier_generators, C.reidemeister_relators
FpGroupElement = FreeGroupElement
|
"""Example serializing custom types"""
from datetime import datetime
from event_two import event
import json
# Serializar
def default(obj):
"""Encode datetime to string in YYYY-MM-DDTHH:MM:SS format (RFC3339)"""
if isinstance(obj, datetime):
return obj.isoformat()
return obj
def pairs_hook(pairs):
"""Convert the "time" key to datetime"""
# Aquí se almacena el objeto gaurdado en json
obj = {}
for key, value in pairs:
if key == 'time':
# Construyo un datetime
value = datetime.fromisoformat(value) # Python >= 3.7
# Lo meto en el diccionario de python
obj[key] = value
return obj
if __name__ =='__main__':
# Al pasarle el argumento default y la función default.
# Estamos serialziando
data = json.dumps(event, default=default)
print(data)
# ----------------------------------------------------------------
#Deserializar
data_dict = json.loads(data, object_pairs_hook=pairs_hook)
print(data_dict)
print(type(data_dict))
|
from utils import stringifySong
from datetime import datetime
import logging
logger = logging.getLogger(__name__)
class Migrater(object):
"""Migrater"""
def __init__(self, migrateFrom, migrateTo, mock=False):
# Store clients
self.source = migrateFrom
self.target = migrateTo
self.isMocked = mock
logger.info(f"MOCK MIGRATION? {mock}")
def migratePlaylists(self, interactive=True):
# Wrapper to gather input from the user
def should(text):
value = input(text)
return value.strip().lower() == 'y'
# Core to migrate each playlist
def migratePlaylist(playlist):
# Create a new playlist in the target server with a unique name
if not self.isMocked:
targetPlaylist = self.target.createPlaylist(f"{playlist['name']} at {int(datetime.now().timestamp())}")
logger.info(f"Target playlist: '{targetPlaylist['name']}'")
# Add each song in this playlist to the new targetPlaylist
playlist = self.source.getPlaylist(playlist["id"])
for song in playlist['entry']:
try:
matchSong = self.target.findClosestMatchToSong(song)
if matchSong:
logger.info(f"Migrating {stringifySong(song)} as {stringifySong(matchSong)}")
if not self.isMocked:
self.target.addSongToPlaylist(targetPlaylist['id'], matchSong['id'])
else:
logger.warning(f"No match to {stringifySong(song)}")
except Exception as e:
logger.exception(f"Unable to migrate song {stringifySong(song)}")
# Perform playlsts migration
try:
# Get all playlists
playlists = self.source.getPlaylists()
for playlist in playlists:
# Ask if this playlist should be migrated only if running on interactive mode
if not interactive or should(f"Migrate '{playlist['name']}' with {playlist['songCount']} songs Y/[N]? "):
logger.info(f"Migrating playlist '{playlist['name']} ({playlist['songCount']})'")
try:
migratePlaylist(playlist)
except Exception as e:
logger.exception(f"Unable to migrate playlist '{playlist['name']} ({playlist['songCount']})'")
except Exception as e:
logger.exception("Unable to migrate playlists")
def migrateStarred(self):
# TODO: support albums and artists too
try:
songs = self.source.getStarredSongs()
# Migrate each song
logger.info(f"{len(songs)} starred songs to migrate")
for song in songs:
try:
matchSong = self.target.findClosestMatchToSong(song)
if matchSong:
logger.info(f"Migrating {stringifySong(song)} as {stringifySong(matchSong)}")
if not self.isMocked:
self.target.starSong(matchSong['id'])
else:
logger.warning(f"No match to {stringifySong(song)}")
except Exception as e:
logger.exception(f"Unable to star song {stringifySong(song)}")
except Exception as e:
logger.exception("Unable to migrate starred songs")
|
"""
The MIT License (MIT)
Copyright (c) 2015-present Rapptz
Permission is hereby granted, free of charge, to any person obtaining a
copy of this software and associated documentation files (the "Software"),
to deal in the Software without restriction, including without limitation
the rights to use, copy, modify, merge, publish, distribute, sublicense,
and/or sell copies of the Software, and to permit persons to whom the
Software is furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
DEALINGS IN THE SOFTWARE.
"""
from __future__ import annotations
import logging
import asyncio
import json
import re
from urllib.parse import quote as urlquote
from typing import Any, Dict, List, Literal, Optional, TYPE_CHECKING, Tuple, Union, TypeVar, Type, overload
from contextvars import ContextVar
import weakref
import aiohttp
from .. import utils
from ..errors import HTTPException, Forbidden, NotFound, DiscordServerError
from ..message import Message
from ..enums import try_enum, WebhookType
from ..user import BaseUser, User
from ..flags import MessageFlags
from ..asset import Asset
from ..http import Route, handle_message_parameters, MultipartParameters, HTTPClient
from ..mixins import Hashable
from ..channel import PartialMessageable
from ..file import File
__all__ = (
'Webhook',
'WebhookMessage',
'PartialWebhookChannel',
'PartialWebhookGuild',
)
_log = logging.getLogger(__name__)
if TYPE_CHECKING:
from typing_extensions import Self
from types import TracebackType
from ..embeds import Embed
from ..mentions import AllowedMentions
from ..message import Attachment
from ..state import ConnectionState
from ..http import Response
from ..guild import Guild
from ..channel import TextChannel
from ..abc import Snowflake
from ..ui.view import View
import datetime
from ..types.webhook import (
Webhook as WebhookPayload,
SourceGuild as SourceGuildPayload,
)
from ..types.message import (
Message as MessagePayload,
)
from ..types.user import (
User as UserPayload,
PartialUser as PartialUserPayload,
)
from ..types.channel import (
PartialChannel as PartialChannelPayload,
)
BE = TypeVar('BE', bound=BaseException)
_State = Union[ConnectionState, '_WebhookState']
MISSING: Any = utils.MISSING
class AsyncDeferredLock:
def __init__(self, lock: asyncio.Lock):
self.lock = lock
self.delta: Optional[float] = None
async def __aenter__(self) -> Self:
await self.lock.acquire()
return self
def delay_by(self, delta: float) -> None:
self.delta = delta
async def __aexit__(
self,
exc_type: Optional[Type[BE]],
exc: Optional[BE],
traceback: Optional[TracebackType],
) -> None:
if self.delta:
await asyncio.sleep(self.delta)
self.lock.release()
class AsyncWebhookAdapter:
def __init__(self):
self._locks: weakref.WeakValueDictionary[Any, asyncio.Lock] = weakref.WeakValueDictionary()
async def request(
self,
route: Route,
session: aiohttp.ClientSession,
*,
payload: Optional[Dict[str, Any]] = None,
multipart: Optional[List[Dict[str, Any]]] = None,
files: Optional[List[File]] = None,
reason: Optional[str] = None,
auth_token: Optional[str] = None,
params: Optional[Dict[str, Any]] = None,
) -> Any:
headers: Dict[str, str] = {}
files = files or []
to_send: Optional[Union[str, aiohttp.FormData]] = None
bucket = (route.webhook_id, route.webhook_token)
try:
lock = self._locks[bucket]
except KeyError:
self._locks[bucket] = lock = asyncio.Lock()
if payload is not None:
headers['Content-Type'] = 'application/json'
to_send = utils._to_json(payload)
if auth_token is not None:
headers['Authorization'] = f'Bot {auth_token}'
if reason is not None:
headers['X-Audit-Log-Reason'] = urlquote(reason, safe='/ ')
response: Optional[aiohttp.ClientResponse] = None
data: Optional[Union[Dict[str, Any], str]] = None
method = route.method
url = route.url
webhook_id = route.webhook_id
async with AsyncDeferredLock(lock) as lock:
for attempt in range(5):
for file in files:
file.reset(seek=attempt)
if multipart:
form_data = aiohttp.FormData(quote_fields=False)
for p in multipart:
form_data.add_field(**p)
to_send = form_data
try:
async with session.request(method, url, data=to_send, headers=headers, params=params) as response:
_log.debug(
'Webhook ID %s with %s %s has returned status code %s',
webhook_id,
method,
url,
response.status,
)
data = (await response.text(encoding='utf-8')) or None
if data and response.headers['Content-Type'] == 'application/json':
data = json.loads(data)
remaining = response.headers.get('X-Ratelimit-Remaining')
if remaining == '0' and response.status != 429:
delta = utils._parse_ratelimit_header(response)
_log.debug(
'Webhook ID %s has been pre-emptively rate limited, waiting %.2f seconds', webhook_id, delta
)
lock.delay_by(delta)
if 300 > response.status >= 200:
return data
if response.status == 429:
if not response.headers.get('Via'):
raise HTTPException(response, data)
retry_after: float = data['retry_after'] # type: ignore
_log.warning('Webhook ID %s is rate limited. Retrying in %.2f seconds', webhook_id, retry_after)
await asyncio.sleep(retry_after)
continue
if response.status >= 500:
await asyncio.sleep(1 + attempt * 2)
continue
if response.status == 403:
raise Forbidden(response, data)
elif response.status == 404:
raise NotFound(response, data)
else:
raise HTTPException(response, data)
except OSError as e:
if attempt < 4 and e.errno in (54, 10054):
await asyncio.sleep(1 + attempt * 2)
continue
raise
if response:
if response.status >= 500:
raise DiscordServerError(response, data)
raise HTTPException(response, data)
raise RuntimeError('Unreachable code in HTTP handling.')
def delete_webhook(
self,
webhook_id: int,
*,
token: Optional[str] = None,
session: aiohttp.ClientSession,
reason: Optional[str] = None,
) -> Response[None]:
route = Route('DELETE', '/webhooks/{webhook_id}', webhook_id=webhook_id)
return self.request(route, session, reason=reason, auth_token=token)
def delete_webhook_with_token(
self,
webhook_id: int,
token: str,
*,
session: aiohttp.ClientSession,
reason: Optional[str] = None,
) -> Response[None]:
route = Route('DELETE', '/webhooks/{webhook_id}/{webhook_token}', webhook_id=webhook_id, webhook_token=token)
return self.request(route, session, reason=reason)
def edit_webhook(
self,
webhook_id: int,
token: str,
payload: Dict[str, Any],
*,
session: aiohttp.ClientSession,
reason: Optional[str] = None,
) -> Response[WebhookPayload]:
route = Route('PATCH', '/webhooks/{webhook_id}', webhook_id=webhook_id)
return self.request(route, session, reason=reason, payload=payload, auth_token=token)
def edit_webhook_with_token(
self,
webhook_id: int,
token: str,
payload: Dict[str, Any],
*,
session: aiohttp.ClientSession,
reason: Optional[str] = None,
) -> Response[WebhookPayload]:
route = Route('PATCH', '/webhooks/{webhook_id}/{webhook_token}', webhook_id=webhook_id, webhook_token=token)
return self.request(route, session, reason=reason, payload=payload)
def execute_webhook(
self,
webhook_id: int,
token: str,
*,
session: aiohttp.ClientSession,
payload: Optional[Dict[str, Any]] = None,
multipart: Optional[List[Dict[str, Any]]] = None,
files: Optional[List[File]] = None,
thread_id: Optional[int] = None,
wait: bool = False,
) -> Response[Optional[MessagePayload]]:
params = {'wait': int(wait)}
if thread_id:
params['thread_id'] = thread_id
route = Route('POST', '/webhooks/{webhook_id}/{webhook_token}', webhook_id=webhook_id, webhook_token=token)
return self.request(route, session, payload=payload, multipart=multipart, files=files, params=params)
def get_webhook_message(
self,
webhook_id: int,
token: str,
message_id: int,
*,
session: aiohttp.ClientSession,
) -> Response[MessagePayload]:
route = Route(
'GET',
'/webhooks/{webhook_id}/{webhook_token}/messages/{message_id}',
webhook_id=webhook_id,
webhook_token=token,
message_id=message_id,
)
return self.request(route, session)
def edit_webhook_message(
self,
webhook_id: int,
token: str,
message_id: int,
*,
session: aiohttp.ClientSession,
payload: Optional[Dict[str, Any]] = None,
multipart: Optional[List[Dict[str, Any]]] = None,
files: Optional[List[File]] = None,
) -> Response[Message]:
route = Route(
'PATCH',
'/webhooks/{webhook_id}/{webhook_token}/messages/{message_id}',
webhook_id=webhook_id,
webhook_token=token,
message_id=message_id,
)
return self.request(route, session, payload=payload, multipart=multipart, files=files)
def delete_webhook_message(
self,
webhook_id: int,
token: str,
message_id: int,
*,
session: aiohttp.ClientSession,
) -> Response[None]:
route = Route(
'DELETE',
'/webhooks/{webhook_id}/{webhook_token}/messages/{message_id}',
webhook_id=webhook_id,
webhook_token=token,
message_id=message_id,
)
return self.request(route, session)
def fetch_webhook(
self,
webhook_id: int,
token: str,
*,
session: aiohttp.ClientSession,
) -> Response[WebhookPayload]:
route = Route('GET', '/webhooks/{webhook_id}', webhook_id=webhook_id)
return self.request(route, session=session, auth_token=token)
def fetch_webhook_with_token(
self,
webhook_id: int,
token: str,
*,
session: aiohttp.ClientSession,
) -> Response[WebhookPayload]:
route = Route('GET', '/webhooks/{webhook_id}/{webhook_token}', webhook_id=webhook_id, webhook_token=token)
return self.request(route, session=session)
def create_interaction_response(
self,
interaction_id: int,
token: str,
*,
session: aiohttp.ClientSession,
params: MultipartParameters,
) -> Response[None]:
route = Route(
'POST',
'/interactions/{webhook_id}/{webhook_token}/callback',
webhook_id=interaction_id,
webhook_token=token,
)
if params.files:
return self.request(route, session=session, files=params.files, multipart=params.multipart)
else:
return self.request(route, session=session, payload=params.payload)
def get_original_interaction_response(
self,
application_id: int,
token: str,
*,
session: aiohttp.ClientSession,
) -> Response[MessagePayload]:
r = Route(
'GET',
'/webhooks/{webhook_id}/{webhook_token}/messages/@original',
webhook_id=application_id,
webhook_token=token,
)
return self.request(r, session=session)
def edit_original_interaction_response(
self,
application_id: int,
token: str,
*,
session: aiohttp.ClientSession,
payload: Optional[Dict[str, Any]] = None,
multipart: Optional[List[Dict[str, Any]]] = None,
files: Optional[List[File]] = None,
) -> Response[MessagePayload]:
r = Route(
'PATCH',
'/webhooks/{webhook_id}/{webhook_token}/messages/@original',
webhook_id=application_id,
webhook_token=token,
)
return self.request(r, session, payload=payload, multipart=multipart, files=files)
def delete_original_interaction_response(
self,
application_id: int,
token: str,
*,
session: aiohttp.ClientSession,
) -> Response[None]:
r = Route(
'DELETE',
'/webhooks/{webhook_id}/{webhook_token}/messages/@original',
webhook_id=application_id,
webhook_token=token,
)
return self.request(r, session=session)
def interaction_response_params(type: int, data: Optional[Dict[str, Any]] = None) -> MultipartParameters:
payload: Dict[str, Any] = {
'type': type,
}
if data is not None:
payload['data'] = data
return MultipartParameters(payload=payload, multipart=None, files=None)
# This is a subset of handle_message_parameters
def interaction_message_response_params(
*,
type: int,
content: Optional[str] = MISSING,
tts: bool = False,
flags: MessageFlags = MISSING,
file: File = MISSING,
files: List[File] = MISSING,
embed: Optional[Embed] = MISSING,
embeds: List[Embed] = MISSING,
attachments: List[Union[Attachment, File]] = MISSING,
view: Optional[View] = MISSING,
allowed_mentions: Optional[AllowedMentions] = MISSING,
previous_allowed_mentions: Optional[AllowedMentions] = None,
) -> MultipartParameters:
if files is not MISSING and file is not MISSING:
raise TypeError('Cannot mix file and files keyword arguments.')
if embeds is not MISSING and embed is not MISSING:
raise TypeError('Cannot mix embed and embeds keyword arguments.')
if file is not MISSING:
files = [file]
if attachments is not MISSING and files is not MISSING:
raise TypeError('Cannot mix attachments and files keyword arguments.')
data: Optional[Dict[str, Any]] = {
'tts': tts,
}
if embeds is not MISSING:
if len(embeds) > 10:
raise ValueError('embeds has a maximum of 10 elements.')
data['embeds'] = [e.to_dict() for e in embeds]
if embed is not MISSING:
if embed is None:
data['embeds'] = []
else:
data['embeds'] = [embed.to_dict()]
if content is not MISSING:
if content is not None:
data['content'] = str(content)
else:
data['content'] = None
if view is not MISSING:
if view is not None:
data['components'] = view.to_components()
else:
data['components'] = []
if flags is not MISSING:
data['flags'] = flags.value
if allowed_mentions:
if previous_allowed_mentions is not None:
data['allowed_mentions'] = previous_allowed_mentions.merge(allowed_mentions).to_dict()
else:
data['allowed_mentions'] = allowed_mentions.to_dict()
elif previous_allowed_mentions is not None:
data['allowed_mentions'] = previous_allowed_mentions.to_dict()
if attachments is MISSING:
attachments = files # type: ignore
else:
files = [a for a in attachments if isinstance(a, File)]
if attachments is not MISSING:
file_index = 0
attachments_payload = []
for attachment in attachments:
if isinstance(attachment, File):
attachments_payload.append(attachment.to_dict(file_index))
file_index += 1
else:
attachments_payload.append(attachment.to_dict())
data['attachments'] = attachments_payload
multipart = []
if files:
data = {'type': type, 'data': data}
multipart.append({'name': 'payload_json', 'value': utils._to_json(data)})
data = None
for index, file in enumerate(files):
multipart.append(
{
'name': f'files[{index}]',
'value': file.fp,
'filename': file.filename,
'content_type': 'application/octet-stream',
}
)
else:
data = {'type': type, 'data': data}
return MultipartParameters(payload=data, multipart=multipart, files=files)
async_context: ContextVar[AsyncWebhookAdapter] = ContextVar('async_webhook_context', default=AsyncWebhookAdapter())
class PartialWebhookChannel(Hashable):
"""Represents a partial channel for webhooks.
These are typically given for channel follower webhooks.
.. versionadded:: 2.0
Attributes
-----------
id: :class:`int`
The partial channel's ID.
name: :class:`str`
The partial channel's name.
"""
__slots__ = ('id', 'name')
def __init__(self, *, data: PartialChannelPayload) -> None:
self.id: int = int(data['id'])
self.name: str = data['name']
def __repr__(self) -> str:
return f'<PartialWebhookChannel name={self.name!r} id={self.id}>'
class PartialWebhookGuild(Hashable):
"""Represents a partial guild for webhooks.
These are typically given for channel follower webhooks.
.. versionadded:: 2.0
Attributes
-----------
id: :class:`int`
The partial guild's ID.
name: :class:`str`
The partial guild's name.
"""
__slots__ = ('id', 'name', '_icon', '_state')
def __init__(self, *, data: SourceGuildPayload, state: _State) -> None:
self._state: _State = state
self.id: int = int(data['id'])
self.name: str = data['name']
self._icon: str = data['icon']
def __repr__(self) -> str:
return f'<PartialWebhookGuild name={self.name!r} id={self.id}>'
@property
def icon(self) -> Optional[Asset]:
"""Optional[:class:`Asset`]: Returns the guild's icon asset, if available."""
if self._icon is None:
return None
return Asset._from_guild_icon(self._state, self.id, self._icon)
class _FriendlyHttpAttributeErrorHelper:
__slots__ = ()
def __getattr__(self, attr: str) -> Any:
raise AttributeError('PartialWebhookState does not support http methods.')
class _WebhookState:
__slots__ = ('_parent', '_webhook')
def __init__(self, webhook: Any, parent: Optional[_State]):
self._webhook: Any = webhook
self._parent: Optional[ConnectionState]
if isinstance(parent, _WebhookState):
self._parent = None
else:
self._parent = parent
def _get_guild(self, guild_id: Optional[int]) -> Optional[Guild]:
if self._parent is not None:
return self._parent._get_guild(guild_id)
return None
def store_user(self, data: Union[UserPayload, PartialUserPayload]) -> BaseUser:
if self._parent is not None:
return self._parent.store_user(data)
# state parameter is artificial
return BaseUser(state=self, data=data) # type: ignore
def create_user(self, data: Union[UserPayload, PartialUserPayload]) -> BaseUser:
# state parameter is artificial
return BaseUser(state=self, data=data) # type: ignore
@property
def http(self) -> Union[HTTPClient, _FriendlyHttpAttributeErrorHelper]:
if self._parent is not None:
return self._parent.http
# Some data classes assign state.http and that should be kosher
# however, using it should result in a late-binding error.
return _FriendlyHttpAttributeErrorHelper()
def __getattr__(self, attr: str) -> Any:
if self._parent is not None:
return getattr(self._parent, attr)
raise AttributeError(f'PartialWebhookState does not support {attr!r}.')
class WebhookMessage(Message):
"""Represents a message sent from your webhook.
This allows you to edit or delete a message sent by your
webhook.
This inherits from :class:`discord.Message` with changes to
:meth:`edit` and :meth:`delete` to work.
.. versionadded:: 1.6
"""
_state: _WebhookState
async def edit(
self,
content: Optional[str] = MISSING,
embeds: List[Embed] = MISSING,
embed: Optional[Embed] = MISSING,
attachments: List[Union[Attachment, File]] = MISSING,
view: Optional[View] = MISSING,
allowed_mentions: Optional[AllowedMentions] = None,
) -> WebhookMessage:
"""|coro|
Edits the message.
.. versionadded:: 1.6
.. versionchanged:: 2.0
The edit is no longer in-place, instead the newly edited message is returned.
.. versionchanged:: 2.0
This function will now raise :exc:`ValueError` instead of
``InvalidArgument``.
Parameters
------------
content: Optional[:class:`str`]
The content to edit the message with or ``None`` to clear it.
embeds: List[:class:`Embed`]
A list of embeds to edit the message with.
embed: Optional[:class:`Embed`]
The embed to edit the message with. ``None`` suppresses the embeds.
This should not be mixed with the ``embeds`` parameter.
attachments: List[Union[:class:`Attachment`, :class:`File`]]
A list of attachments to keep in the message as well as new files to upload. If ``[]`` is passed
then all attachments are removed.
.. note::
New files will always appear after current attachments.
.. versionadded:: 2.0
allowed_mentions: :class:`AllowedMentions`
Controls the mentions being processed in this message.
See :meth:`.abc.Messageable.send` for more information.
view: Optional[:class:`~discord.ui.View`]
The updated view to update this message with. If ``None`` is passed then
the view is removed.
.. versionadded:: 2.0
Raises
-------
HTTPException
Editing the message failed.
Forbidden
Edited a message that is not yours.
TypeError
You specified both ``embed`` and ``embeds``
ValueError
The length of ``embeds`` was invalid or
there was no token associated with this webhook.
Returns
--------
:class:`WebhookMessage`
The newly edited message.
"""
return await self._state._webhook.edit_message(
self.id,
content=content,
embeds=embeds,
embed=embed,
attachments=attachments,
view=view,
allowed_mentions=allowed_mentions,
)
async def add_files(self, *files: File) -> WebhookMessage:
r"""|coro|
Adds new files to the end of the message attachments.
.. versionadded:: 2.0
Parameters
-----------
\*files: :class:`File`
New files to add to the message.
Raises
-------
HTTPException
Editing the message failed.
Forbidden
Tried to edit a message that isn't yours.
Returns
--------
:class:`WebhookMessage`
The newly edited message.
"""
return await self.edit(attachments=[*self.attachments, *files])
async def remove_attachments(self, *attachments: Attachment) -> WebhookMessage:
r"""|coro|
Removes attachments from the message.
.. versionadded:: 2.0
Parameters
-----------
\*attachments: :class:`Attachment`
Attachments to remove from the message.
Raises
-------
HTTPException
Editing the message failed.
Forbidden
Tried to edit a message that isn't yours.
Returns
--------
:class:`WebhookMessage`
The newly edited message.
"""
return await self.edit(attachments=[a for a in self.attachments if a not in attachments])
async def delete(self, *, delay: Optional[float] = None) -> None:
"""|coro|
Deletes the message.
Parameters
-----------
delay: Optional[:class:`float`]
If provided, the number of seconds to wait before deleting the message.
The waiting is done in the background and deletion failures are ignored.
Raises
------
Forbidden
You do not have proper permissions to delete the message.
NotFound
The message was deleted already.
HTTPException
Deleting the message failed.
"""
if delay is not None:
async def inner_call(delay: float = delay):
await asyncio.sleep(delay)
try:
await self._state._webhook.delete_message(self.id)
except HTTPException:
pass
asyncio.create_task(inner_call())
else:
await self._state._webhook.delete_message(self.id)
class BaseWebhook(Hashable):
__slots__: Tuple[str, ...] = (
'id',
'type',
'guild_id',
'channel_id',
'token',
'auth_token',
'user',
'name',
'_avatar',
'source_channel',
'source_guild',
'_state',
)
def __init__(
self,
data: WebhookPayload,
token: Optional[str] = None,
state: Optional[_State] = None,
) -> None:
self.auth_token: Optional[str] = token
self._state: _State = state or _WebhookState(self, parent=state)
self._update(data)
def _update(self, data: WebhookPayload) -> None:
self.id: int = int(data['id'])
self.type: WebhookType = try_enum(WebhookType, int(data['type']))
self.channel_id: Optional[int] = utils._get_as_snowflake(data, 'channel_id')
self.guild_id: Optional[int] = utils._get_as_snowflake(data, 'guild_id')
self.name: Optional[str] = data.get('name')
self._avatar: Optional[str] = data.get('avatar')
self.token: Optional[str] = data.get('token')
user = data.get('user')
self.user: Optional[Union[BaseUser, User]] = None
if user is not None:
# state parameter may be _WebhookState
self.user = User(state=self._state, data=user) # type: ignore
source_channel = data.get('source_channel')
if source_channel:
source_channel = PartialWebhookChannel(data=source_channel)
self.source_channel: Optional[PartialWebhookChannel] = source_channel
source_guild = data.get('source_guild')
if source_guild:
source_guild = PartialWebhookGuild(data=source_guild, state=self._state)
self.source_guild: Optional[PartialWebhookGuild] = source_guild
def is_partial(self) -> bool:
""":class:`bool`: Whether the webhook is a "partial" webhook.
.. versionadded:: 2.0"""
return self.channel_id is None
def is_authenticated(self) -> bool:
""":class:`bool`: Whether the webhook is authenticated with a bot token.
.. versionadded:: 2.0
"""
return self.auth_token is not None
@property
def guild(self) -> Optional[Guild]:
"""Optional[:class:`Guild`]: The guild this webhook belongs to.
If this is a partial webhook, then this will always return ``None``.
"""
return self._state and self._state._get_guild(self.guild_id)
@property
def channel(self) -> Optional[TextChannel]:
"""Optional[:class:`TextChannel`]: The text channel this webhook belongs to.
If this is a partial webhook, then this will always return ``None``.
"""
guild = self.guild
return guild and guild.get_channel(self.channel_id) # type: ignore
@property
def created_at(self) -> datetime.datetime:
""":class:`datetime.datetime`: Returns the webhook's creation time in UTC."""
return utils.snowflake_time(self.id)
@property
def avatar(self) -> Optional[Asset]:
"""Optional[:class:`Asset`]: Returns an :class:`Asset` for the avatar the webhook has.
If the webhook does not have a traditional avatar, ``None`` is returned.
If you want the avatar that a webhook has displayed, consider :attr:`display_avatar`.
"""
if self._avatar is not None:
return Asset._from_avatar(self._state, self.id, self._avatar)
return None
@property
def default_avatar(self) -> Asset:
"""
:class:`Asset`: Returns the default avatar. This is always the blurple avatar.
.. versionadded:: 2.0
"""
# Default is always blurple apparently
return Asset._from_default_avatar(self._state, 0)
@property
def display_avatar(self) -> Asset:
""":class:`Asset`: Returns the webhook's display avatar.
This is either webhook's default avatar or uploaded avatar.
.. versionadded:: 2.0
"""
return self.avatar or self.default_avatar
class Webhook(BaseWebhook):
"""Represents an asynchronous Discord webhook.
Webhooks are a form to send messages to channels in Discord without a
bot user or authentication.
There are two main ways to use Webhooks. The first is through the ones
received by the library such as :meth:`.Guild.webhooks` and
:meth:`.TextChannel.webhooks`. The ones received by the library will
automatically be bound using the library's internal HTTP session.
The second form involves creating a webhook object manually using the
:meth:`~.Webhook.from_url` or :meth:`~.Webhook.partial` classmethods.
For example, creating a webhook from a URL and using :doc:`aiohttp <aio:index>`:
.. code-block:: python3
from discord import Webhook
import aiohttp
async def foo():
async with aiohttp.ClientSession() as session:
webhook = Webhook.from_url('url-here', session=session)
await webhook.send('Hello World', username='Foo')
For a synchronous counterpart, see :class:`SyncWebhook`.
.. container:: operations
.. describe:: x == y
Checks if two webhooks are equal.
.. describe:: x != y
Checks if two webhooks are not equal.
.. describe:: hash(x)
Returns the webhooks's hash.
.. versionchanged:: 1.4
Webhooks are now comparable and hashable.
Attributes
------------
id: :class:`int`
The webhook's ID
type: :class:`WebhookType`
The type of the webhook.
.. versionadded:: 1.3
token: Optional[:class:`str`]
The authentication token of the webhook. If this is ``None``
then the webhook cannot be used to make requests.
guild_id: Optional[:class:`int`]
The guild ID this webhook is for.
channel_id: Optional[:class:`int`]
The channel ID this webhook is for.
user: Optional[:class:`abc.User`]
The user this webhook was created by. If the webhook was
received without authentication then this will be ``None``.
name: Optional[:class:`str`]
The default name of the webhook.
source_guild: Optional[:class:`PartialWebhookGuild`]
The guild of the channel that this webhook is following.
Only given if :attr:`type` is :attr:`WebhookType.channel_follower`.
.. versionadded:: 2.0
source_channel: Optional[:class:`PartialWebhookChannel`]
The channel that this webhook is following.
Only given if :attr:`type` is :attr:`WebhookType.channel_follower`.
.. versionadded:: 2.0
"""
__slots__: Tuple[str, ...] = ('session',)
def __init__(
self,
data: WebhookPayload,
session: aiohttp.ClientSession,
token: Optional[str] = None,
state: Optional[_State] = None,
) -> None:
super().__init__(data, token, state)
self.session: aiohttp.ClientSession = session
def __repr__(self) -> str:
return f'<Webhook id={self.id!r}>'
@property
def url(self) -> str:
""":class:`str` : Returns the webhook's url."""
return f'https://discord.com/api/webhooks/{self.id}/{self.token}'
@classmethod
def partial(cls, id: int, token: str, *, session: aiohttp.ClientSession, bot_token: Optional[str] = None) -> Self:
"""Creates a partial :class:`Webhook`.
Parameters
-----------
id: :class:`int`
The ID of the webhook.
token: :class:`str`
The authentication token of the webhook.
session: :class:`aiohttp.ClientSession`
The session to use to send requests with. Note
that the library does not manage the session and
will not close it.
.. versionadded:: 2.0
bot_token: Optional[:class:`str`]
The bot authentication token for authenticated requests
involving the webhook.
.. versionadded:: 2.0
Returns
--------
:class:`Webhook`
A partial :class:`Webhook`.
A partial webhook is just a webhook object with an ID and a token.
"""
data: WebhookPayload = {
'id': id,
'type': 1,
'token': token,
}
return cls(data, session, token=bot_token)
@classmethod
def from_url(cls, url: str, *, session: aiohttp.ClientSession, bot_token: Optional[str] = None) -> Self:
"""Creates a partial :class:`Webhook` from a webhook URL.
.. versionchanged:: 2.0
This function will now raise :exc:`ValueError` instead of
``InvalidArgument``.
Parameters
------------
url: :class:`str`
The URL of the webhook.
session: :class:`aiohttp.ClientSession`
The session to use to send requests with. Note
that the library does not manage the session and
will not close it.
.. versionadded:: 2.0
bot_token: Optional[:class:`str`]
The bot authentication token for authenticated requests
involving the webhook.
.. versionadded:: 2.0
Raises
-------
ValueError
The URL is invalid.
Returns
--------
:class:`Webhook`
A partial :class:`Webhook`.
A partial webhook is just a webhook object with an ID and a token.
"""
m = re.search(r'discord(?:app)?.com/api/webhooks/(?P<id>[0-9]{17,20})/(?P<token>[A-Za-z0-9\.\-\_]{60,68})', url)
if m is None:
raise ValueError('Invalid webhook URL given.')
data: Dict[str, Any] = m.groupdict()
data['type'] = 1
return cls(data, session, token=bot_token) # type: ignore
@classmethod
def _as_follower(cls, data, *, channel, user) -> Self:
name = f"{channel.guild} #{channel}"
feed: WebhookPayload = {
'id': data['webhook_id'],
'type': 2,
'name': name,
'channel_id': channel.id,
'guild_id': channel.guild.id,
'user': {'username': user.name, 'discriminator': user.discriminator, 'id': user.id, 'avatar': user._avatar},
}
state = channel._state
session = channel._state.http._HTTPClient__session
return cls(feed, session=session, state=state, token=state.http.token)
@classmethod
def from_state(cls, data: WebhookPayload, state: ConnectionState) -> Self:
session = state.http._HTTPClient__session # type: ignore
return cls(data, session=session, state=state, token=state.http.token)
async def fetch(self, *, prefer_auth: bool = True) -> Webhook:
"""|coro|
Fetches the current webhook.
This could be used to get a full webhook from a partial webhook.
.. versionadded:: 2.0
.. note::
When fetching with an unauthenticated webhook, i.e.
:meth:`is_authenticated` returns ``False``, then the
returned webhook does not contain any user information.
Parameters
-----------
prefer_auth: :class:`bool`
Whether to use the bot token over the webhook token
if available. Defaults to ``True``.
Raises
-------
HTTPException
Could not fetch the webhook
NotFound
Could not find the webhook by this ID
ValueError
This webhook does not have a token associated with it.
Returns
--------
:class:`Webhook`
The fetched webhook.
"""
adapter = async_context.get()
if prefer_auth and self.auth_token:
data = await adapter.fetch_webhook(self.id, self.auth_token, session=self.session)
elif self.token:
data = await adapter.fetch_webhook_with_token(self.id, self.token, session=self.session)
else:
raise ValueError('This webhook does not have a token associated with it')
return Webhook(data, self.session, token=self.auth_token, state=self._state)
async def delete(self, *, reason: Optional[str] = None, prefer_auth: bool = True) -> None:
"""|coro|
Deletes this Webhook.
Parameters
------------
reason: Optional[:class:`str`]
The reason for deleting this webhook. Shows up on the audit log.
.. versionadded:: 1.4
prefer_auth: :class:`bool`
Whether to use the bot token over the webhook token
if available. Defaults to ``True``.
.. versionadded:: 2.0
Raises
-------
HTTPException
Deleting the webhook failed.
NotFound
This webhook does not exist.
Forbidden
You do not have permissions to delete this webhook.
ValueError
This webhook does not have a token associated with it.
"""
if self.token is None and self.auth_token is None:
raise ValueError('This webhook does not have a token associated with it')
adapter = async_context.get()
if prefer_auth and self.auth_token:
await adapter.delete_webhook(self.id, token=self.auth_token, session=self.session, reason=reason)
elif self.token:
await adapter.delete_webhook_with_token(self.id, self.token, session=self.session, reason=reason)
async def edit(
self,
*,
reason: Optional[str] = None,
name: Optional[str] = MISSING,
avatar: Optional[bytes] = MISSING,
channel: Optional[Snowflake] = None,
prefer_auth: bool = True,
) -> Webhook:
"""|coro|
Edits this Webhook.
.. versionchanged:: 2.0
This function will now raise :exc:`ValueError` instead of
``~InvalidArgument``.
Parameters
------------
name: Optional[:class:`str`]
The webhook's new default name.
avatar: Optional[:class:`bytes`]
A :term:`py:bytes-like object` representing the webhook's new default avatar.
channel: Optional[:class:`abc.Snowflake`]
The webhook's new channel. This requires an authenticated webhook.
.. versionadded:: 2.0
reason: Optional[:class:`str`]
The reason for editing this webhook. Shows up on the audit log.
.. versionadded:: 1.4
prefer_auth: :class:`bool`
Whether to use the bot token over the webhook token
if available. Defaults to ``True``.
.. versionadded:: 2.0
Raises
-------
HTTPException
Editing the webhook failed.
NotFound
This webhook does not exist.
ValueError
This webhook does not have a token associated with it
or it tried editing a channel without authentication.
"""
if self.token is None and self.auth_token is None:
raise ValueError('This webhook does not have a token associated with it')
payload = {}
if name is not MISSING:
payload['name'] = str(name) if name is not None else None
if avatar is not MISSING:
payload['avatar'] = utils._bytes_to_base64_data(avatar) if avatar is not None else None
adapter = async_context.get()
data: Optional[WebhookPayload] = None
# If a channel is given, always use the authenticated endpoint
if channel is not None:
if self.auth_token is None:
raise ValueError('Editing channel requires authenticated webhook')
payload['channel_id'] = channel.id
data = await adapter.edit_webhook(self.id, self.auth_token, payload=payload, session=self.session, reason=reason)
if prefer_auth and self.auth_token:
data = await adapter.edit_webhook(self.id, self.auth_token, payload=payload, session=self.session, reason=reason)
elif self.token:
data = await adapter.edit_webhook_with_token(
self.id, self.token, payload=payload, session=self.session, reason=reason
)
if data is None:
raise RuntimeError('Unreachable code hit: data was not assigned')
return Webhook(data=data, session=self.session, token=self.auth_token, state=self._state)
def _create_message(self, data):
state = _WebhookState(self, parent=self._state)
# state may be artificial (unlikely at this point...)
channel = self.channel or PartialMessageable(state=self._state, id=int(data['channel_id'])) # type: ignore
# state is artificial
return WebhookMessage(data=data, state=state, channel=channel) # type: ignore
@overload
async def send(
self,
content: str = MISSING,
*,
username: str = MISSING,
avatar_url: Any = MISSING,
tts: bool = MISSING,
ephemeral: bool = MISSING,
file: File = MISSING,
files: List[File] = MISSING,
embed: Embed = MISSING,
embeds: List[Embed] = MISSING,
allowed_mentions: AllowedMentions = MISSING,
view: View = MISSING,
thread: Snowflake = MISSING,
wait: Literal[True],
suppress_embeds: bool = MISSING,
) -> WebhookMessage:
...
@overload
async def send(
self,
content: str = MISSING,
*,
username: str = MISSING,
avatar_url: Any = MISSING,
tts: bool = MISSING,
ephemeral: bool = MISSING,
file: File = MISSING,
files: List[File] = MISSING,
embed: Embed = MISSING,
embeds: List[Embed] = MISSING,
allowed_mentions: AllowedMentions = MISSING,
view: View = MISSING,
thread: Snowflake = MISSING,
wait: Literal[False] = ...,
suppress_embeds: bool = MISSING,
) -> None:
...
async def send(
self,
content: str = MISSING,
*,
username: str = MISSING,
avatar_url: Any = MISSING,
tts: bool = False,
ephemeral: bool = False,
file: File = MISSING,
files: List[File] = MISSING,
embed: Embed = MISSING,
embeds: List[Embed] = MISSING,
allowed_mentions: AllowedMentions = MISSING,
view: View = MISSING,
thread: Snowflake = MISSING,
wait: bool = False,
suppress_embeds: bool = False,
) -> Optional[WebhookMessage]:
"""|coro|
Sends a message using the webhook.
The content must be a type that can convert to a string through ``str(content)``.
To upload a single file, the ``file`` parameter should be used with a
single :class:`File` object.
If the ``embed`` parameter is provided, it must be of type :class:`Embed` and
it must be a rich embed type. You cannot mix the ``embed`` parameter with the
``embeds`` parameter, which must be a :class:`list` of :class:`Embed` objects to send.
.. versionchanged:: 2.0
This function will now raise :exc:`ValueError` instead of
``InvalidArgument``.
Parameters
------------
content: :class:`str`
The content of the message to send.
wait: :class:`bool`
Whether the server should wait before sending a response. This essentially
means that the return type of this function changes from ``None`` to
a :class:`WebhookMessage` if set to ``True``. If the type of webhook
is :attr:`WebhookType.application` then this is always set to ``True``.
username: :class:`str`
The username to send with this message. If no username is provided
then the default username for the webhook is used.
avatar_url: :class:`str`
The avatar URL to send with this message. If no avatar URL is provided
then the default avatar for the webhook is used. If this is not a
string then it is explicitly cast using ``str``.
tts: :class:`bool`
Indicates if the message should be sent using text-to-speech.
ephemeral: :class:`bool`
Indicates if the message should only be visible to the user.
This is only available to :attr:`WebhookType.application` webhooks.
If a view is sent with an ephemeral message and it has no timeout set
then the timeout is set to 15 minutes.
.. versionadded:: 2.0
file: :class:`File`
The file to upload. This cannot be mixed with ``files`` parameter.
files: List[:class:`File`]
A list of files to send with the content. This cannot be mixed with the
``file`` parameter.
embed: :class:`Embed`
The rich embed for the content to send. This cannot be mixed with
``embeds`` parameter.
embeds: List[:class:`Embed`]
A list of embeds to send with the content. Maximum of 10. This cannot
be mixed with the ``embed`` parameter.
allowed_mentions: :class:`AllowedMentions`
Controls the mentions being processed in this message.
.. versionadded:: 1.4
view: :class:`discord.ui.View`
The view to send with the message. You can only send a view
if this webhook is not partial and has state attached. A
webhook has state attached if the webhook is managed by the
library.
.. versionadded:: 2.0
thread: :class:`~discord.abc.Snowflake`
The thread to send this webhook to.
.. versionadded:: 2.0
suppress_embeds: :class:`bool`
Whether to suppress embeds for the message. This sends the message without any embeds if set to ``True``.
.. versionadded:: 2.0
Raises
--------
HTTPException
Sending the message failed.
NotFound
This webhook was not found.
Forbidden
The authorization token for the webhook is incorrect.
TypeError
You specified both ``embed`` and ``embeds`` or ``file`` and ``files``.
ValueError
The length of ``embeds`` was invalid, there was no token
associated with this webhook or ``ephemeral`` was passed
with the improper webhook type or there was no state
attached with this webhook when giving it a view.
Returns
---------
Optional[:class:`WebhookMessage`]
If ``wait`` is ``True`` then the message that was sent, otherwise ``None``.
"""
if self.token is None:
raise ValueError('This webhook does not have a token associated with it')
previous_mentions: Optional[AllowedMentions] = getattr(self._state, 'allowed_mentions', None)
if content is None:
content = MISSING
if ephemeral or suppress_embeds:
flags = MessageFlags._from_value(0)
flags.ephemeral = ephemeral
flags.suppress_embeds = suppress_embeds
else:
flags = MISSING
application_webhook = self.type is WebhookType.application
if ephemeral and not application_webhook:
raise ValueError('ephemeral messages can only be sent from application webhooks')
if application_webhook:
wait = True
if view is not MISSING:
if isinstance(self._state, _WebhookState):
raise ValueError('Webhook views require an associated state with the webhook')
if not hasattr(view, '__discord_ui_view__'):
raise TypeError(f'expected view parameter to be of type View not {view.__class__!r}')
if ephemeral is True and view.timeout is None:
view.timeout = 15 * 60.0
params = handle_message_parameters(
content=content,
username=username,
avatar_url=avatar_url,
tts=tts,
file=file,
files=files,
embed=embed,
embeds=embeds,
flags=flags,
view=view,
allowed_mentions=allowed_mentions,
previous_allowed_mentions=previous_mentions,
)
adapter = async_context.get()
thread_id: Optional[int] = None
if thread is not MISSING:
thread_id = thread.id
data = await adapter.execute_webhook(
self.id,
self.token,
session=self.session,
payload=params.payload,
multipart=params.multipart,
files=params.files,
thread_id=thread_id,
wait=wait,
)
msg = None
if wait:
msg = self._create_message(data)
if view is not MISSING and not view.is_finished():
message_id = None if msg is None else msg.id
self._state.store_view(view, message_id)
return msg
async def fetch_message(self, id: int, /) -> WebhookMessage:
"""|coro|
Retrieves a single :class:`~discord.WebhookMessage` owned by this webhook.
.. versionadded:: 2.0
Parameters
------------
id: :class:`int`
The message ID to look for.
Raises
--------
~discord.NotFound
The specified message was not found.
~discord.Forbidden
You do not have the permissions required to get a message.
~discord.HTTPException
Retrieving the message failed.
ValueError
There was no token associated with this webhook.
Returns
--------
:class:`~discord.WebhookMessage`
The message asked for.
"""
if self.token is None:
raise ValueError('This webhook does not have a token associated with it')
adapter = async_context.get()
data = await adapter.get_webhook_message(
self.id,
self.token,
id,
session=self.session,
)
return self._create_message(data)
async def edit_message(
self,
message_id: int,
*,
content: Optional[str] = MISSING,
embeds: List[Embed] = MISSING,
embed: Optional[Embed] = MISSING,
attachments: List[Union[Attachment, File]] = MISSING,
view: Optional[View] = MISSING,
allowed_mentions: Optional[AllowedMentions] = None,
) -> WebhookMessage:
"""|coro|
Edits a message owned by this webhook.
This is a lower level interface to :meth:`WebhookMessage.edit` in case
you only have an ID.
.. versionadded:: 1.6
.. versionchanged:: 2.0
The edit is no longer in-place, instead the newly edited message is returned.
.. versionchanged:: 2.0
This function will now raise :exc:`ValueError` instead of
``InvalidArgument``.
Parameters
------------
message_id: :class:`int`
The message ID to edit.
content: Optional[:class:`str`]
The content to edit the message with or ``None`` to clear it.
embeds: List[:class:`Embed`]
A list of embeds to edit the message with.
embed: Optional[:class:`Embed`]
The embed to edit the message with. ``None`` suppresses the embeds.
This should not be mixed with the ``embeds`` parameter.
attachments: List[Union[:class:`Attachment`, :class:`File`]]
A list of attachments to keep in the message as well as new files to upload. If ``[]`` is passed
then all attachments are removed.
.. versionadded:: 2.0
allowed_mentions: :class:`AllowedMentions`
Controls the mentions being processed in this message.
See :meth:`.abc.Messageable.send` for more information.
view: Optional[:class:`~discord.ui.View`]
The updated view to update this message with. If ``None`` is passed then
the view is removed. The webhook must have state attached, similar to
:meth:`send`.
.. versionadded:: 2.0
Raises
-------
HTTPException
Editing the message failed.
Forbidden
Edited a message that is not yours.
TypeError
You specified both ``embed`` and ``embeds``
ValueError
The length of ``embeds`` was invalid,
there was no token associated with this webhook or the webhook had
no state.
Returns
--------
:class:`WebhookMessage`
The newly edited webhook message.
"""
if self.token is None:
raise ValueError('This webhook does not have a token associated with it')
if view is not MISSING:
if isinstance(self._state, _WebhookState):
raise ValueError('This webhook does not have state associated with it')
self._state.prevent_view_updates_for(message_id)
previous_mentions: Optional[AllowedMentions] = getattr(self._state, 'allowed_mentions', None)
params = handle_message_parameters(
content=content,
attachments=attachments,
embed=embed,
embeds=embeds,
view=view,
allowed_mentions=allowed_mentions,
previous_allowed_mentions=previous_mentions,
)
adapter = async_context.get()
data = await adapter.edit_webhook_message(
self.id,
self.token,
message_id,
session=self.session,
payload=params.payload,
multipart=params.multipart,
files=params.files,
)
message = self._create_message(data)
if view and not view.is_finished():
self._state.store_view(view, message_id)
return message
async def delete_message(self, message_id: int, /) -> None:
"""|coro|
Deletes a message owned by this webhook.
This is a lower level interface to :meth:`WebhookMessage.delete` in case
you only have an ID.
.. versionadded:: 1.6
.. versionchanged:: 2.0
``message_id`` parameter is now positional-only.
.. versionchanged:: 2.0
This function will now raise :exc:`ValueError` instead of
``InvalidArgument``.
Parameters
------------
message_id: :class:`int`
The message ID to delete.
Raises
-------
HTTPException
Deleting the message failed.
Forbidden
Deleted a message that is not yours.
ValueError
This webhook does not have a token associated with it.
"""
if self.token is None:
raise ValueError('This webhook does not have a token associated with it')
adapter = async_context.get()
await adapter.delete_webhook_message(
self.id,
self.token,
message_id,
session=self.session,
)
|
from flask_ember.util.string import dasherize
class ResourceGenerator:
def __init__(self, ember, resource_class):
self.ember = ember
self.resource_class = resource_class
def generate(self, app):
# TODO generation of api endpoints etc
resource = self.resource_class
name = resource.__qualname__
app.add_url_rule('/' + dasherize(name), name, lambda: name)
|
SECURITY_CONFIG_ACTIONS = [
'DeleteAccountPublicAccessBlock',
'DeleteDeliveryChannel',
'DeleteDetector',
'DeleteFlowLogs',
'DeleteRule',
'DeleteTrail',
'DisableEbsEncryptionByDefault',
'DisableRule',
'StopConfigurationRecorder',
'StopLogging',
]
def rule(event):
if event['eventName'] == 'UpdateDetector':
return not event['requestParameters'].get('enable', True)
return event['eventName'] in SECURITY_CONFIG_ACTIONS
|
from GLOBAL_VARIABLES import PLAYER_NAME
import copy
def opponents(parent_folder_ts: str, ts_filename: str) -> list:
"""
Extracts the names of the players in a tournament
Parameters:
parent_folder_ts (str): the parent folder where the tournament summary file is located
ts_filename (str): the name of the tournament summary file
Returns:
opp_no_duplicates (list): a list of players names
"""
if ts_filename is None:
return []
# open ts
with open(parent_folder_ts + ts_filename, 'r') as f:
data = f.read()
ts = eval(copy.deepcopy(data))
opponents = []
for opp in ts['tournament_finishes_and_winnings']:
opponents.append(opp['player_name'])
opp_no_duplicates = list(dict.fromkeys(opponents))
opp_no_duplicates.remove(PLAYER_NAME)
return opp_no_duplicates
|
from keras import backend as K
from keras.models import *
from keras.layers import *
import os
from datetime import datetime
import tensorflow as tf
import numpy as np
class AgedModel:
def __init__(self, model=None, age=None):
self.graph = tf.Graph()
with self.graph.as_default():
self.session = tf.Session()
with self.session.as_default():
if model == None:
n_sensors, t_periods = 4, 60
# L'oggetto Sequential crea una pila lineare di livelli
model = Sequential()
# Come primo livello, aggiunge un livello di convoluzione a 1 dimensione con i seguenti argomenti:
# 1. Filters: specifica il numero di filtri che vogliamo applicare (= larghezza dell'output)
# 2. Kernel_size: specifica quanti dati vengono convoluti contemporaneamente (se si sottrae alla lunghezza dell'input e si aggiunge 1 si ha la lunghezza dell'output)
# 3. activation: funzione di attivazione dei neuroni
# 4. input_shape: definisce la "forma" dell'input
model.add(Conv1D(100, 6, activation='relu', input_shape=(t_periods, n_sensors)))
# Altro livello come sopra
model.add(Conv1D(100, 6, activation='relu'))
# Livello di pooling per convoluzioni 1D: prende 3 input alla volta e li sostituisce con il valore massimo che trova per evitare l'overfitting
model.add(MaxPooling1D(3))
# Altro livello di convoluzione 1D
model.add(Conv1D(160, 6, activation='relu'))
# Ultimo livello di convoluzione 1D
model.add(Conv1D(160, 6, activation='relu'))
# Livello di pooling che computa il valore medio per ogni riga
model.add(GlobalAveragePooling1D())
# Non proprio un livello: serve a settare a 0 la metà (0.5) dei valori in input per ridurre l'overfitting
model.add(Dropout(0.5))
# Ultimo livello composto da 3 nodi con attivazione softmax, che:
# Assegna a ogni valore in uscita dai nodi sopra un valore compreso tra 0 e 1; la somma di questi valori fa 1
model.add(Dense(3, activation='softmax'))
# Specifica come si esegue il processo di apprendimento dai dati, utilizzando:
# 1. loss: funzione che si cerca di minimizzare
# 2. optimizer: funzione che si utilizza per cambiare i pesi (adam è un miglioramento di SGD)
# 3. metrics: lista di metriche che vuoi tenere sott'occhio durante l'apprendimento
model.compile(loss='categorical_crossentropy', optimizer='adam', metrics=['accuracy'])
self.model = model
else:
self.model = load_model(model)
if age != None:
self.age = age
else:
self.age = datetime.timestamp(datetime.now())
def train(self,data):
with self.graph.as_default():
with self.session.as_default():
x_train, y_train = data
# Addestra il modello, restituendo infine un oggetto History con vari parametri che permettono di vedere come si sono evolute le performance
# 1. numpy array o lista di numpy array (secondo la dimensionalità attesa)
# 2. come sopra
# 3. numero di sample da utilizzare prima di aggiornare i pesi
# 4. numero di iterazioni da fare sui dati in input
# 5. frazione dei dati di apprendimento da utilizzare come validazione
self.model.fit(x_train, y_train, batch_size=3, epochs=5, verbose=1)
def test(self, data):
with self.graph.as_default():
with self.session.as_default():
x_test, y_test = data
return self.model.evaluate(x_test, y_test, verbose=1)
def predict(self,data):
with self.graph.as_default():
with self.session.as_default():
return self.model.predict(data)
def get_weights(self):
with self.graph.as_default():
with self.session.as_default():
return self.model.get_weights()
def set_weights(self, weights):
with self.graph.as_default():
with self.session.as_default():
return self.model.set_weights(weights)
def export(self):
with self.graph.as_default():
with self.session.as_default():
file_name = 'my_model' + str(datetime.timestamp(datetime.now())) + '.h5'
file_path = os.path.join(os.path.dirname(os.path.dirname(os.path.abspath(__file__))), file_name)
file = open(file_path, 'wb+')
self.model.save(file_path)
file.close()
return open(file_path, 'rb'), file_path
|
import logging
import os
from datetime import date
from typing import Optional
import pandas as pd
from . import lahman
from .datasources import fangraphs
_DATA_FILENAME = os.path.join(os.path.dirname(os.path.realpath(__file__)), 'data', 'fangraphs_teams.csv')
def team_ids(season: Optional[int] = None, league: str = 'ALL') -> pd.DataFrame:
if not os.path.exists(_DATA_FILENAME):
_generate_teams()
fg_team_data = pd.read_csv(_DATA_FILENAME, index_col=0)
if season is not None:
fg_team_data = fg_team_data.query(f"yearID == {season}")
if league is not None and league.upper() != "ALL":
fg_team_data = fg_team_data.query(f"lgID == '{league.upper()}'")
return fg_team_data
_known_cities = ['Altoona', 'Anaheim', 'Arizona', 'Atlanta', 'Baltimore', 'Boston', 'Brooklyn', 'Buffalo',
'California', 'Chicago', 'Cincinnati', 'Cleveland', 'Colorado', 'Detroit', 'Elizabeth', 'Florida',
'Fort Wayne', 'Hartford', 'Houston', 'Indianapolis', 'Kansas City', 'Los Angeles', 'Milwaukee',
'Minnesota', 'Montreal', 'New York', 'Newark', 'Oakland', 'Philadelphia', 'Pittsburg',
'Pittsburgh', 'Richmond', 'San Diego', 'San Francisco', 'Seattle', 'St. Louis', 'St. Paul',
'Syracuse', 'Tampa Bay', 'Texas', 'Toronto', 'Troy', 'Washington', 'Washington', 'Wilmington']
_manual_matches = {'CPI': 'Browns/Stogies', 'ANA': 'Angels'}
def _estimate_name(team_row: pd.DataFrame, column: str) -> str:
if team_row['franchID'] in _manual_matches:
return _manual_matches[team_row['franchID']]
estimate = str(team_row[column])
for city in _known_cities + [str(team_row['city'])]:
estimate = estimate.replace(f'{city} ', '') if estimate.startswith(city) else estimate
return estimate
def _generate_teams() -> pd.DataFrame:
"""
Creates a datafile with a map of Fangraphs team IDs to lahman data to be used by fangraphss_teams
Should only need to be run when a team is added, removed, or moves to a new city.
"""
start_season = 1871
end_season = date.today().year
# Only getting AB to make payload small, and you have to specify at least one column
team_data = fangraphs.fg_team_batting_data(start_season, end_season, "ALL", stat_columns=['AB'])
# Join the lahman data
teams_franchises = lahman.teams().merge(lahman.teams_franchises(), how='left', on='franchID', suffixes=['', '.fr'])
teams_franchises = teams_franchises.merge(lahman.parks(), how='left', left_on='park', right_on='park.name',
suffixes=['', '.p'])
# Drop lahman data down to just what we need
teams_franchises = teams_franchises[
['yearID', 'lgID', 'teamID', 'franchID', 'divID', 'name', 'park', 'teamIDBR', 'teamIDlahman45', 'teamIDretro',
'franchName', 'city', 'state']
]
# Try to guess the name Fangraphs would use
teams_franchises['possibleName'] = teams_franchises.apply(lambda row: _estimate_name(row, 'name'), axis=1)
teams_franchises['possibleFranchName'] = teams_franchises.apply(lambda row: _estimate_name(row, 'franchName'),
axis=1)
# Join up the data by team name, and look for what is still without a match
outer_joined = teams_franchises.merge(team_data, how='outer', left_on=['yearID', 'possibleName'],
right_on=['Season', 'Team'])
unjoined_teams_franchises = outer_joined.query('Season.isnull()').drop(team_data.columns, axis=1)
unjoined_team_data = outer_joined.query('yearID.isnull()').drop(teams_franchises.columns, axis=1)
# Take all the unmatched data and try to join off franchise name, instead of team name
inner_joined = teams_franchises.merge(team_data, how='inner', left_on=['yearID', 'possibleName'],
right_on=['Season', 'Team'])
franch_inner_joined = unjoined_teams_franchises.merge(unjoined_team_data, how='inner',
left_on=['yearID', 'possibleFranchName'],
right_on=['Season', 'Team'])
# Clean up the data
joined = pd.concat([inner_joined, franch_inner_joined])
outer_joined = joined.merge(team_data, how='outer', left_on=['yearID', 'teamIDfg'],
right_on=['Season', 'teamIDfg'], suffixes=['', '_y'])
unjoined_teams_franchises = outer_joined.query('Season_y.isnull()').drop(team_data.columns, axis=1,
errors='ignore')
if not unjoined_teams_franchises.empty:
logging.warning('When trying to join FG data to lahman, found the following extraneous lahman data',
extra=unjoined_teams_franchises)
unjoined_team_data = outer_joined.query('yearID.isnull()').drop(teams_franchises.columns, axis=1, errors='ignore')
if not unjoined_team_data.empty:
logging.warning('When trying to join Fangraphs data to lahman, found the following extraneous Fangraphs data',
extra=unjoined_team_data)
joined = joined[['yearID', 'lgID', 'teamID', 'franchID', 'teamIDfg', 'teamIDBR', 'teamIDretro']]
joined = joined.assign(teamIDfg=joined['teamIDfg'].apply(int))
joined = joined.assign(yearID=joined['yearID'].apply(int))
joined = joined.sort_values(['yearID', 'lgID', 'teamID', 'franchID']).drop_duplicates()
joined = joined.reset_index(drop=True)
joined.to_csv(_DATA_FILENAME)
return joined
# For backwards API compatibility
fangraphs_teams = team_ids
|
##############################
# Project: 大富翁遊戲主程式 #
# Version: 0.1 #
# Date: 2021/07/15 #
# Author: Antallen #
# Content: 使用 Player 物件
##############################
# 引用 random 類別中的 randrange() 函數
from random import randrange
# 引用 Player 物件
import Player
# 引用 Chance 物件
import Chance
import Destiny
# 引用 Stores 物件
import Stores
# 引用 playMap 物件
import playMap
import Messages
# 常用函式、參數設定區域
## 遊戲方格總數
areas = 24
## 處理玩家是否有經過「開始」
def playerPo(steps):
if (steps >= areas):
nums = (steps % areas)
return nums
else:
return steps
## 清除舊資料
def clearOldData():
files = open('players.csv','w',encoding='utf-8')
files.truncate()
titles = "id,name,money,po,status\n"
files.writelines(titles)
files.close()
files = open('messages.txt','w',encoding='utf-8')
files.truncate()
titles = "遊戲即將開始...."
files.writelines(titles)
files.close()
# 程式流程開始
# 使用 if __name__
if __name__ == "__main__":
# 要求玩家要輸入遊戲人數
players_num = eval(input("請輸入玩家人數:"))
# 建立玩家物件
players = []
# 按照遊戲人數,使用 Player 類別
# 逐次產生玩家名稱、玩家代號、玩家初始遊戲幣、玩家初始位置等物件內容
clearOldData()
for i in range(players_num):
players.append(Player.Player())
# 要求玩家輸入玩家名稱
players[i].setName(input("請輸入玩家名稱:"),i)
# 設定玩家位置值
players_po = []
for i in range(players_num):
players_po.append('0')
# 設定玩家順序值
i = 0
myMap = playMap.playMap()
# 設定訊息存放物件
news = Messages.Messages()
news.inputData("請按下《ENTER》進行遊戲")
myMap.printMap(players_po)
input()
# 開始進行遊戲
while True:
##### a.) 印出地圖
news.inputData(myMap.transferNo(str(i+1)) + "號玩家按下《ENTER》進行遊戲")
myMap.printMap(players_po)
input()
##### b.) 擲骰子
oldpo = players[i].getPo()
newstep = randrange(1,6)
news.inputData(myMap.transferNo(str(i+1)) + "號玩家擲骰子:" + myMap.transferNo(str(newstep)) + "點")
myMap.printMap(players_po) # 印地圖
# 設定玩家新的位置
players[i].setPo(newstep)
##### c.) 移動到骰子點數的框格
newpo = players[i].getPo()
# I. 可能經過起點
if ((int(newpo/areas) > int(oldpo/areas)) and ((newpo % areas) != 0)):
news.inputData(myMap.transferNo(str(i+1)) + "號玩家越過「開始」位置《ENTER》")
players[i].setMoney(2000,i)
myMap.printMap(players_po)
input()
news.inputData(myMap.transferNo(str(i+1)) + "號玩家得2000《ENTER》")
newpo = playerPo(newpo)
players_po[i] = str(newpo)
myMap.printMap(players_po)
input()
news.inputData(myMap.transferNo(str(i+1)) + "號玩家在新位置:" + myMap.transferNo(str(newpo)) + "《ENTER》")
myMap.printMap(players_po)
input()
# II. 可能落在邊角框格
if (newpo == 0):
news.inputData(myMap.transferNo(str(i+1)) + "號玩家回到「開始」位置《ENTER》")
myMap.printMap(players_po)
input()
elif (newpo == 6):
#print(" 休息一天")
news.inputData(myMap.transferNo(str(i+1)) + "號玩家,無事休息一天《ENTER》")
myMap.printMap(players_po)
input()
elif (newpo == 18):
#print(" 再玩一次")
news.inputData(myMap.transferNo(str(i+1)) + "號玩家,再玩一次《ENTER》")
myMap.printMap(players_po)
input()
continue
elif (newpo == 12):
news.inputData(myMap.transferNo(str(i+1)) + "號玩家,休息三次《ENTER》")
myMap.printMap(players_po)
input()
# III. 可能是在機會與命運框格
## 機會的地圖編號是 3,15 兩個號碼
elif ((newpo == 3) or (newpo == 15)):
myChance = Chance.Chance()
chances = myChance.choice()
players[i].setMoney(int(chances[1]),i)
news.inputData(myMap.transferNo(str(i+1)) + "號玩家中機會:" + chances[0])
myMap.printMap(players_po)
input()
## 命運的地圖編號是 9,21 兩個號碼
elif ((newpo == 9) or (newpo == 21)):
myDestiny = Destiny.Destiny()
destines = myDestiny.choice()
players[i].setMoney(int(destines[1]),i)
news.inputData(myMap.transferNo(str(i+1)) + "號玩家中命運:" + destines[0])
myMap.printMap(players_po)
input()
# IV. 可能是在地產框格
else:
playerStore = Stores.Stores()
store = playerStore.getStoreData(str(newpo))
## 判斷是否有人己取得該地產所有權了
if store[2] == '-1':
#print("該地產無人所有!")
news.inputData("該地產無人所有!是否買進?(Y|N)")
myMap.printMap(players_po)
results = input()
if ((results == 'Y') or (results == 'y')):
store[2] = str(i+1)
playerStore.setStoreData(store)
players[i].setMoney(0-int(store[3]),i)
news.inputData(myMap.transferNo(str(i+1)) + "號玩家買進地產:" + store[1])
myMap.printMap(players_po)
input()
else:
news.inputData(myMap.transferNo(str(i+1)) + "號玩家放棄買進")
myMap.printMap(players_po)
input()
else:
print("該地產為:" + str(players[int(store[2])-1].getName()) + "所有")
playerStore = None
##### e.)
# 輪至下一位玩家
i = i + 1
if (i >= players_num):
i = i - players_num
##### f.) 結束遊戲條件
ends = input("是否結束遊戲?Y:是 N:繼續")
if ((ends == "Y") or (ends == "y")):
break
|
import json
# a Python object (dict):
x = {
"name": "John",
"age": 30,
"city": "New York"
}
# convert into JSON:
y = json.dumps(x)
# the result is a JSON string:
print(y)
|
# This file is licensed under the Apache License v2.0 with LLVM Exceptions.
# See https://llvm.org/LICENSE.txt for license information.
# SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
"""Repository rules to configure the terminfo used by LLVM.
Most users should pick one of the explicit rules to configure their use of terminfo
with LLVM:
- `llvm_terminfo_system` will detect and link against a terminfo-implementing
system library (non-hermetically).
- 'llvm_terminfo_disable` will disable terminfo completely.
If you would like to make your build configurable, you can use
`llvm_terminfo_from_env`. By default, this will disable terminfo, but will
inspect the environment variable (most easily set with a `--repo_env` flag to
the Bazel invocation) `BAZEL_LLVM_TERMINFO_STRATEGY`. If it is set to
`system` then it will behave the same as `llvm_terminfo_system`. Any other
setting will disable terminfo the same as not setting it at all.
"""
def _llvm_terminfo_disable_impl(repository_ctx):
repository_ctx.template(
"BUILD",
repository_ctx.attr._disable_build_template,
executable = False,
)
_terminfo_disable_attrs = {
"_disable_build_template": attr.label(
default = Label("//deps_impl:terminfo_disable.BUILD"),
allow_single_file = True,
),
}
llvm_terminfo_disable = repository_rule(
implementation = _llvm_terminfo_disable_impl,
attrs = _terminfo_disable_attrs,
)
def _find_c_compiler(repository_ctx):
"""Returns the path to a plausible C compiler.
This routine will only reliably work on roughly POSIX-y systems as it
ultimately falls back on the `cc` binary. Fortunately, the thing we are
trying to use it for (detecting if a trivial source file can compile and
link against a particular library) requires very little.
"""
cc_env = repository_ctx.os.environ.get("CC")
cc = None
if cc_env:
if "/" in cc_env:
return repository_ctx.path(cc_env)
else:
return repository_ctx.which(cc_env)
# Look for Clang, GCC, and the POSIX / UNIX specified C compiler
# binaries.
for compiler in ["clang", "gcc", "c99", "c89", "cc"]:
cc = repository_ctx.which(compiler)
if cc:
return cc
return None
def _try_link(repository_ctx, cc, source, linker_flags):
"""Returns `True` if able to link the source with the linker flag.
Given a source file that contains references to library routines, this
will check that when linked with the provided linker flag, those
references are successfully resolved. This routine assumes a generally
POSIX-y and GCC-ish compiler and environment and shouldn't be expected to
work outside of that.
"""
cmd = [
cc,
# Force discard the linked executable.
"-o",
"/dev/null",
# Leave language detection to the compiler.
source,
]
# The linker flag must be valid for a compiler invocation of the link step,
# so just append them to the command.
cmd += linker_flags
exec_result = repository_ctx.execute(cmd, timeout = 20)
return exec_result.return_code == 0
def _llvm_terminfo_system_impl(repository_ctx):
# LLVM doesn't need terminfo support on Windows, so just disable it.
if repository_ctx.os.name.lower().find("windows") != -1:
_llvm_terminfo_disable_impl(repository_ctx)
return
if len(repository_ctx.attr.system_linkopts) > 0:
linkopts = repository_ctx.attr.system_linkopts
else:
required = repository_ctx.attr.system_required
# Find a C compiler we can use to detect viable linkopts on this system.
cc = _find_c_compiler(repository_ctx)
if not cc:
if required:
fail("Failed to find a C compiler executable")
else:
_llvm_terminfo_disable_impl(repository_ctx)
return
# Get the source file we use to detect successful linking of terminfo.
source = repository_ctx.path(repository_ctx.attr._terminfo_test_source)
# Collect the candidate linkopts and wrap them into a list. Ideally,
# these would be provided as lists, but Bazel doesn't currently
# support that. See: https://github.com/bazelbuild/bazel/issues/12178
linkopts_candidates = [[x] for x in repository_ctx.attr.candidate_system_linkopts]
# For each candidate, try to use it to link our test source file.
for linkopts_candidate in linkopts_candidates:
if _try_link(repository_ctx, cc, source, linkopts_candidate):
linkopts = linkopts_candidate
break
# If we never found a viable linkopts candidate, either error or disable
# terminfo for LLVM.
if not linkopts:
if required:
fail("Failed to detect which linkopt would successfully provide the " +
"necessary terminfo functionality")
else:
_llvm_terminfo_disable_impl(repository_ctx)
return
repository_ctx.template(
"BUILD",
repository_ctx.attr._system_build_template,
substitutions = {
"{TERMINFO_LINKOPTS}": str(linkopts),
},
executable = False,
)
def _merge_attrs(attrs_list):
attrs = {}
for input_attrs in attrs_list:
attrs.update(input_attrs)
return attrs
_terminfo_system_attrs = _merge_attrs([_terminfo_disable_attrs, {
"_system_build_template": attr.label(
default = Label("//deps_impl:terminfo_system.BUILD"),
allow_single_file = True,
),
"_terminfo_test_source": attr.label(
default = Label("//deps_impl:terminfo_test.c"),
allow_single_file = True,
),
"candidate_system_linkopts": attr.string_list(
default = [
"-lterminfo",
"-ltinfo",
"-lcurses",
"-lncurses",
"-lncursesw",
],
doc = "Candidate linkopts to test and see if they can link " +
"successfully.",
),
"system_required": attr.bool(
default = False,
doc = "Require that one of the candidates is detected successfully on POSIX platforms where it is needed.",
),
"system_linkopts": attr.string_list(
default = [],
doc = "If non-empty, a specific array of linkopts to use to " +
"successfully link against the terminfo library. No " +
"detection is performed if this option is provided, it " +
"directly forces the use of these link options. No test is " +
"run to determine if they are valid or work correctly either.",
),
}])
llvm_terminfo_system = repository_rule(
implementation = _llvm_terminfo_system_impl,
configure = True,
local = True,
attrs = _terminfo_system_attrs,
)
def _llvm_terminfo_from_env_impl(repository_ctx):
terminfo_strategy = repository_ctx.os.environ.get("BAZEL_LLVM_TERMINFO_STRATEGY")
if terminfo_strategy == "system":
_llvm_terminfo_system_impl(repository_ctx)
else:
_llvm_terminfo_disable_impl(repository_ctx)
llvm_terminfo_from_env = repository_rule(
implementation = _llvm_terminfo_from_env_impl,
configure = True,
local = True,
attrs = _merge_attrs([_terminfo_disable_attrs, _terminfo_system_attrs]),
environ = ["BAZEL_LLVM_TERMINFO_STRATEGY", "CC"],
)
|
# dialogs.folder
"""A collection of dialogs to do things to all fonts in a given folder."""
# import
from actions import actionsFolderDialog
from ufo2otf import UFOsToOTFsDialog
from otf2ufo import OTFsToUFOsDialog
from woff2ufo import WOFFsToUFOsDialog
# export
__all__ = [
'actionsFolderDialog',
'OTFsToUFOsDialog',
'UFOsToOTFsDialog',
'WOFFsToUFOsDialog',
]
|
import sys,os
sys.path.append(os.path.dirname(os.getcwd()))
from app.models import User
from app import db
u = User(username=sys.argv[1])
u.set_password(sys.argv[2])
db.session.add(u)
db.session.commit()
|
# SIKULI INTERFACE FOR TAGUI FRAMEWORK ~ TEBEL.ORG #
# timeout in seconds for finding a web element
setAutoWaitTimeout(10)
# delay in seconds between scanning for inputs
scan_period = 0.5
# counter to track current tagui sikuli step
tagui_count = '0'
# prevent premature exit on unhandled exception
setThrowException(False)
# enable OCR (optical character recognition)
Settings.OcrTextRead = True
Settings.OcrTextSearch = True
# function for tap / click step
def tap_intent ( raw_intent ):
params = (raw_intent + ' ')[1+(raw_intent + ' ').find(' '):].strip()
print '[tagui] ACTION - click ' + params
if exists(params):
return click(params)
else:
return 0
# function for rtap / rclick step
def rtap_intent ( raw_intent ):
params = (raw_intent + ' ')[1+(raw_intent + ' ').find(' '):].strip()
print '[tagui] ACTION - rclick ' + params
if exists(params):
return rightClick(params)
else:
return 0
# function for dtap / dclick step
def dtap_intent ( raw_intent ):
params = (raw_intent + ' ')[1+(raw_intent + ' ').find(' '):].strip()
print '[tagui] ACTION - dclick ' + params
if exists(params):
return doubleClick(params)
else:
return 0
# function for hover / move step
def hover_intent ( raw_intent ):
params = (raw_intent + ' ')[1+(raw_intent + ' ').find(' '):].strip()
print '[tagui] ACTION - hover ' + params
if exists(params):
return hover(params)
else:
return 0
# function for type / enter step
def type_intent ( raw_intent ):
params = (raw_intent + ' ')[1+(raw_intent + ' ').find(' '):].strip()
param1 = params[:params.find(' as ')].strip()
param2 = params[4+params.find(' as '):].strip()
print '[tagui] ACTION - type ' + param1 + ' as ' + param2
if exists(param1):
return type(param1,param2)
else:
return 0
# function for select / choose step
def select_intent ( raw_intent ):
params = (raw_intent + ' ')[1+(raw_intent + ' ').find(' '):].strip()
param1 = params[:params.find(' as ')].strip()
param2 = params[4+params.find(' as '):].strip()
print '[tagui] ACTION - click ' + param1
if exists(param1):
if click(param1) == 0:
return 0
else:
print '[tagui] ACTION - click ' + param2
if exists(param2):
return click(param2)
else:
return 0
else:
return 0
# function for reading OCR text
def read_intent ( raw_intent ):
return text_read(raw_intent)
# function for showing OCR text
def show_intent ( raw_intent ):
return text_read(raw_intent)
# function for saving OCR text
def save_intent ( raw_intent ):
return text_read(raw_intent)
# function for reading text using Tesseract OCR
def text_read ( raw_intent ):
params = (raw_intent + ' ')[1+(raw_intent + ' ').find(' '):].strip()
param1 = params[:(params + ' ').find(' ')].strip()
print '[tagui] ACTION - ' + raw_intent
if param1.endswith('page.png') or param1.endswith('page.bmp'):
fullscreen_layer = Screen()
temp_text = fullscreen_layer.text()
import codecs
tagui_text = codecs.open('tagui.sikuli/tagui_sikuli.txt','w',encoding='utf8')
tagui_text.write(temp_text)
tagui_text.close()
return 1
elif exists(param1):
matched_element = find(param1)
temp_text = matched_element.text()
import codecs
tagui_text = codecs.open('tagui.sikuli/tagui_sikuli.txt','w',encoding='utf8')
tagui_text.write(temp_text)
tagui_text.close()
return 1
else:
return 0
# function for capturing screenshot
def snap_intent ( raw_intent ):
params = (raw_intent + ' ')[1+(raw_intent + ' ').find(' '):].strip()
param1 = params[:params.find(' to ')].strip()
param2 = params[4+params.find(' to '):].strip()
print '[tagui] ACTION - snap ' + param1 + ' to ' + param2
if param1.endswith('page.png') or param1.endswith('page.bmp'):
fullscreen_layer = Screen()
temp_snap_file = fullscreen_layer.capture(fullscreen_layer.getBounds()).getFile()
import shutil
shutil.copy(temp_snap_file,param2)
return 1
elif exists(param1):
fullscreen_layer = Screen()
matched_element = find(param1)
temp_snap_file = fullscreen_layer.capture(matched_element).getFile()
import shutil
shutil.copy(temp_snap_file,param2)
return 1
else:
return 0
# function for sending custom commands
def vision_intent ( raw_intent ):
params = (raw_intent + ' ')[1+(raw_intent + ' ').find(' '):].strip()
print '[tagui] ACTION - ' + params
exec(params,globals())
return 1
# function to interpret input intent
def get_intent ( raw_intent ):
if raw_intent[:4].lower() == 'tap ' or raw_intent[:6].lower() == 'click ':
return 'tap'
if raw_intent[:5].lower() == 'rtap ' or raw_intent[:7].lower() == 'rclick ':
return 'rtap'
if raw_intent[:5].lower() == 'dtap ' or raw_intent[:7].lower() == 'dclick ':
return 'dtap'
if raw_intent[:6].lower() == 'hover ' or raw_intent[:5].lower() == 'move ':
return 'hover'
if raw_intent[:5].lower() == 'type ' or raw_intent[:6].lower() == 'enter ':
return 'type'
if raw_intent[:7].lower() == 'select ' or raw_intent[:7].lower() == 'choose ':
return 'select'
if raw_intent[:5].lower() == 'read ' or raw_intent[:6].lower() == 'fetch ':
return 'read'
if raw_intent[:5].lower() == 'show ' or raw_intent[:6].lower() == 'print ':
return 'show'
if raw_intent[:5].lower() == 'save ':
return 'save'
if raw_intent[:5].lower() == 'snap ':
return 'snap'
if raw_intent[:7].lower() == 'vision ':
return 'vision'
return 'error'
# function to parse and act on intent
def parse_intent ( script_line ):
intent_type = get_intent(script_line)
if intent_type == 'tap':
return tap_intent(script_line)
elif intent_type == 'rtap':
return rtap_intent(script_line)
elif intent_type == 'dtap':
return dtap_intent(script_line)
elif intent_type == 'hover':
return hover_intent(script_line)
elif intent_type == 'type':
return type_intent(script_line)
elif intent_type == 'select':
return select_intent(script_line)
elif intent_type == 'read':
return read_intent(script_line)
elif intent_type == 'show':
return show_intent(script_line)
elif intent_type == 'save':
return save_intent(script_line)
elif intent_type == 'snap':
return snap_intent(script_line)
elif intent_type == 'vision':
return vision_intent(script_line)
else:
return 0
# write to interface out-file to signal ready for inputs
tagui_output = open('tagui.sikuli/tagui_sikuli.out','w')
tagui_output.write('[0] START')
tagui_output.close()
# initialise interface in-file before starting main loop
tagui_input = open('tagui.sikuli/tagui_sikuli.in','w')
tagui_input.write('')
tagui_input.close()
# main loop to scan inputs from automation flow
print '[tagui] START - listening for inputs'; print
while True:
# scan input from run-time interface in-file
try:
tagui_input = open('tagui.sikuli/tagui_sikuli.in','r')
except IOError, OSError:
print '[tagui] ERROR - cannot open tagui.sikuli/tagui_sikuli.in'; print
break
tagui_intent = tagui_input.read().strip()
tagui_input.close()
# quit if finish signal received, initialise and repeat loop if blank
if tagui_intent == 'finish':
break
elif not tagui_intent:
tagui_count = '0'
tagui_output = open('tagui.sikuli/tagui_sikuli.out','w')
tagui_output.write('[0] START')
tagui_output.close()
wait(scan_period)
continue
# get count and repeat loop if same count as last iteration
temp_count = tagui_intent[1:tagui_intent.find('] ')].strip()
tagui_intent = tagui_intent[2+tagui_intent.find('] '):].strip()
if tagui_count == temp_count:
wait(scan_period)
continue
else:
tagui_count = temp_count
# otherwise parse and act on input intent
print '[tagui] INPUT - ' + '[' + tagui_count + '] ' + tagui_intent
intent_result_value = parse_intent(tagui_intent)
if intent_result_value == 1:
intent_result_string = 'SUCCESS'
else:
intent_result_string = 'ERROR'
# save intent_result to interface out-file
print '[tagui] OUTPUT - ' + '[' + tagui_count + '] ' + intent_result_string; print
tagui_output = open('tagui.sikuli/tagui_sikuli.out','w')
tagui_output.write('[' + tagui_count + '] ' + intent_result_string)
tagui_output.close()
wait(scan_period)
# write to interface out-file to signal finish listening
print '[tagui] FINISH - stopped listening'
tagui_output = open('tagui.sikuli/tagui_sikuli.out','w')
tagui_output.write('[0] FINISH')
tagui_output.close()
|
from os import path
from setuptools import find_packages, setup
# requirements from requirements.txt
root_dir = path.dirname(path.abspath(__file__))
with open(path.join(root_dir, "requirements.txt"), "r") as f:
requirements = f.read().splitlines()
# long description from README
with open(path.join(root_dir, "README.md"), encoding="utf-8") as f:
long_description = f.read()
setup(
name="kf-cavatica-utils",
use_scm_version={
"local_scheme": "dirty-tag",
"version_scheme": "post-release",
},
setup_requires=["setuptools_scm"],
description="Kids First Cavatica Utilities",
long_description=long_description,
long_description_content_type="text/markdown",
url="https://github.com/kids-first/kf-cavatica-python-tools",
packages=find_packages(),
python_requires=">=3.6, <4",
install_requires=requirements,
)
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Helper functions for manipulating collections of variables during training.
"""
from tensorflow import logging as logging
import re
import tensorflow as tf
from tensorflow.python.ops import variables as tf_variables
slim = tf.contrib.slim
# TODO(derekjchow): Consider replacing with tf.contrib.filter_variables in
# tensorflow/contrib/framework/python/ops/variables.py
def filter_variables(variables, filter_regex_list, invert=False):
"""Filters out the variables matching the filter_regex.
Filter out the variables whose name matches the any of the regular
expressions in filter_regex_list and returns the remaining variables.
Optionally, if invert=True, the complement set is returned.
Args:
variables: a list of tensorflow variables.
filter_regex_list: a list of string regular expressions.
invert: (boolean). If True, returns the complement of the filter set; that
is, all variables matching filter_regex are kept and all others discarded.
Returns:
a list of filtered variables.
"""
kept_vars = []
variables_to_ignore_patterns = list(filter(None, filter_regex_list))
for var in variables:
add = True
for pattern in variables_to_ignore_patterns:
if re.match(pattern, var.op.name):
add = False
break
if add != invert:
kept_vars.append(var)
return kept_vars
def multiply_gradients_matching_regex(grads_and_vars, regex_list, multiplier):
"""Multiply gradients whose variable names match a regular expression.
Args:
grads_and_vars: A list of gradient to variable pairs (tuples).
regex_list: A list of string regular expressions.
multiplier: A (float) multiplier to apply to each gradient matching the
regular expression.
Returns:
grads_and_vars: A list of gradient to variable pairs (tuples).
"""
variables = [pair[1] for pair in grads_and_vars]
matching_vars = filter_variables(variables, regex_list, invert=True)
for var in matching_vars:
logging.info('Applying multiplier %f to variable [%s]',
multiplier, var.op.name)
grad_multipliers = {var: float(multiplier) for var in matching_vars}
return slim.learning.multiply_gradients(grads_and_vars,
grad_multipliers)
def freeze_gradients_matching_regex(grads_and_vars, regex_list):
"""Freeze gradients whose variable names match a regular expression.
Args:
grads_and_vars: A list of gradient to variable pairs (tuples).
regex_list: A list of string regular expressions.
Returns:
grads_and_vars: A list of gradient to variable pairs (tuples) that do not
contain the variables and gradients matching the regex.
"""
variables = [pair[1] for pair in grads_and_vars]
matching_vars = filter_variables(variables, regex_list, invert=True)
kept_grads_and_vars = [pair for pair in grads_and_vars
if pair[1] not in matching_vars]
for var in matching_vars:
logging.info('Freezing variable [%s]', var.op.name)
return kept_grads_and_vars
def get_variables_available_in_checkpoint(variables,
checkpoint_path,
include_global_step=True):
"""Returns the subset of variables available in the checkpoint.
Inspects given checkpoint and returns the subset of variables that are
available in it.
TODO(rathodv): force input and output to be a dictionary.
Args:
variables: a list or dictionary of variables to find in checkpoint.
checkpoint_path: path to the checkpoint to restore variables from.
include_global_step: whether to include `global_step` variable, if it
exists. Default True.
Returns:
A list or dictionary of variables.
Raises:
ValueError: if `variables` is not a list or dict.
"""
if isinstance(variables, list):
variable_names_map = {}
for variable in variables:
if isinstance(variable, tf_variables.PartitionedVariable):
name = variable.name
else:
name = variable.op.name
variable_names_map[name] = variable
elif isinstance(variables, dict):
variable_names_map = variables
else:
raise ValueError('`variables` is expected to be a list or dict.')
ckpt_reader = tf.train.NewCheckpointReader(checkpoint_path)
ckpt_vars_to_shape_map = ckpt_reader.get_variable_to_shape_map()
if not include_global_step:
ckpt_vars_to_shape_map.pop(tf.GraphKeys.GLOBAL_STEP, None)
vars_in_ckpt = {}
for variable_name, variable in sorted(variable_names_map.items()):
if variable_name in ckpt_vars_to_shape_map:
if ckpt_vars_to_shape_map[variable_name] == variable.shape.as_list():
vars_in_ckpt[variable_name] = variable
else:
logging.warning('Variable [%s] is available in checkpoint, but has an '
'incompatible shape with model variable. Checkpoint '
'shape: [%s], model variable shape: [%s]. This '
'variable will not be initialized from the checkpoint.',
variable_name, ckpt_vars_to_shape_map[variable_name],
variable.shape.as_list())
else:
logging.warning('Variable [%s] is not available in checkpoint',
variable_name)
if isinstance(variables, list):
return vars_in_ckpt.values()
return vars_in_ckpt
|
class Frame:
def __init__(self, surface, imgs):
self.idx = 0
self.imgs = imgs
self.num = len(imgs)
self.surface = surface
@property
def img(self):
return self.imgs[self.idx]
@property
def is_last_frame(self):
return self.idx == self.num - 1
def move(self):
self.idx += 1
self.idx %= self.num
def draw(self, x=0, y=0):
self.surface.blit(self.img, (x, y))
def tick(self, x=0, y=0):
self.draw(x, y)
self.move()
|
""" This is a little GUI to launch the playlist importer.
It launches the import script in a command prompt window.
"""
import os
import urllib2
import subprocess
import sys
# This requires wxPython 3.0.2.0.
# For compatibility with virtualenv you can use the wheels from
# https://www.lfd.uci.edu/~gohlke/pythonlibs/#wxpython
# (Download the "win32" or "amd64" wheel to match your python version, as well as the "common" wheel;
# pip install the common wheel first and then the architecture-specific one)
# noinspection PyUnresolvedReferences,PyPackageRequirements
import wx
import spotify_playlist_apple_music
from gui_autogen import MyFrame
from win_util import get_short_path_name
def set_enabled(obj, value):
""" Helper to enable/disable GUI control """
if value:
obj.Enable()
else:
obj.Disable()
class MyFrameImpl(MyFrame):
""" Attach code to the MyFrame GUI skeleton from the wxGlade code generator """
def __init__(self, *args, **kwargs):
super(MyFrameImpl, self).__init__(*args, **kwargs)
self.radio_btn_prefix.SetValue(True)
self.have_loaded_playlist_ok = False
self.loaded_playlist_name = None
self.update_state()
self.current_process = None
def update_state(self):
""" Update the state of controls based on other controls' entered values and
whether a playlist is already loaded """
prefix_mode = self.radio_btn_prefix.GetValue()
set_enabled(self.text_playlist_name, not prefix_mode)
set_enabled(self.text_prefix, prefix_mode)
url_ready = self.text_url.GetValue() != ""
set_enabled(self.button_load, url_ready)
set_enabled(self.button_import, self.have_loaded_playlist_ok)
if self.have_loaded_playlist_ok:
self.button_import.SetDefault()
else:
self.button_load.SetDefault()
def load_reset(self):
""" Something has been changed that invalidates the previously loaded playlist """
self.have_loaded_playlist_ok = False
self.loaded_playlist_name = None
def radio_btn_change(self, event):
self.load_reset()
self.update_state()
def text_url_change(self, event):
self.load_reset()
self.update_state()
def text_prefix_change(self, event):
self.load_reset()
self.update_state()
def text_playlist_name_change(self, event):
self.load_reset()
self.update_state()
def button_load_click(self, event):
self.load_reset()
self.update_state()
url = self.text_url.GetValue()
# Try to load the playlist, and show a message about the result.
result = False
try:
it = spotify_playlist_apple_music.get_track_iterator_for_url(url)
it.next()
except KeyError:
message = "Don't know how to load a playlist for this site. Double-check the URL."
except urllib2.HTTPError:
message = "Can't load the playlist at that address. Double-check the URL."
else:
playlist_name = self.get_playlist_name()
message = u"Loaded playlist. Click Import to create '%s'" % playlist_name
result = True
self.show_message(message)
self.have_loaded_playlist_ok = result
# If the load was successful, this will enable the Import button.
self.update_state()
def get_playlist_name(self):
imported_playlist_name = spotify_playlist_apple_music.loaded_playlist_name
prefix_mode = self.radio_btn_prefix.GetValue()
if prefix_mode:
prefix = self.text_prefix.GetValue()
if prefix is None:
prefix = ""
playlist_name = prefix + imported_playlist_name
else:
playlist_name = self.text_playlist_name.GetValue()
return playlist_name
def show_message(self, message):
self.label_load_result.SetLabel(message)
def button_import_click(self, event):
if self.checkbox_separate_window.GetValue():
params = ["--pause"]
command_form = "cmd /C start %s"
else:
params = []
command_form = "cmd /C %s"
prefix_mode = self.radio_btn_prefix.GetValue()
if prefix_mode:
prefix_text = self.text_prefix.GetValue()
if prefix_text != "":
params += ["--playlist-prefix", '"%s"' % prefix_text]
else:
playlist_name_text = self.text_playlist_name.GetValue()
params += ["--playlist-name", '"%s"' % playlist_name_text]
url = self.text_url.GetValue()
self.show_message("Importing '%s' to '%s'" % (url, self.get_playlist_name()))
params.append(url)
script_path = os.path.dirname(os.path.abspath(__file__))
python_app = os.path.join(script_path, "spotify_playlist_apple_music.py")
# I'm just using this short path name business because I can't find any quoting to convince
# cmd /C to accept the full path
python_exe = os.path.join(os.path.dirname(sys.executable), "python.exe")
python_cmd = [get_short_path_name(python_exe), "-u", get_short_path_name(python_app)] + params
command = command_form % " ".join(python_cmd)
print command
self.current_process = subprocess.Popen(command, universal_newlines=True)
self.button_import.Disable()
self.monitor()
def monitor(self):
return_code = self.current_process.poll()
if return_code is None:
wx.CallLater(2000, self.monitor)
else:
if return_code == 0:
self.show_message("Import complete")
else:
self.show_message("Import process exited with return code %d" % return_code)
self.current_process = None
self.button_import.Enable()
def main():
app = wx.App()
frame = MyFrameImpl(None)
frame .Show()
app.MainLoop()
if __name__ == "__main__":
main()
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.