text
stringlengths 2
999k
|
|---|
import datetime
from flask import jsonify
from flask import request
from flask.views import MethodView
from chainerui.database import db
from chainerui.models.log import Log
from chainerui.models.project import Project
from chainerui.models.result import Result
class LogAPI(MethodView):
def post(self, project_id=None, result_id=None):
project = db.session.query(Project).filter_by(id=project_id).first()
if project is None:
return jsonify({
'project': None,
'message': 'No interface defined for URL.'
}), 404
result = db.session.query(Result).filter_by(id=result_id).first()
if result is None:
return jsonify({
'result': None,
'message': 'No interface defined for URL.'
}), 404
data = request.get_json()
log_json = data.get('log')
modified_at = log_json.get('modifiedAt', None)
if modified_at is not None:
result.log_modified_at = datetime.datetime.fromtimestamp(
modified_at)
log_values = log_json.get('values', [])
reset = log_json.get('reset', False)
if reset:
result.logs = []
for value in log_values:
result.logs.append(Log(value))
db.session.commit()
return jsonify({
'logs': {
'resultId': result.id,
'insertedLogCount': len(log_values),
'totalLogCount': len(result.logs)
}
})
|
"""
LZ4 frame format definition: https://github.com/lz4/lz4/blob/dev/doc/lz4_Frame_format.md
"""
import io
from typing import Optional
from lz4.block import decompress
from structlog import get_logger
from unblob.extractors import Command
from ...file_utils import Endian, convert_int8, convert_int32
from ...models import File, Handler, HexString, ValidChunk
logger = get_logger()
SKIPPABLE_FRAMES_MAGIC = [0x184D2A50 + i for i in range(0, 16)]
FRAME_MAGIC = 0x184D2204
LEGACY_FRAME_MAGIC = 0x184C2102
FRAME_MAGICS = SKIPPABLE_FRAMES_MAGIC + [FRAME_MAGIC] + [LEGACY_FRAME_MAGIC]
_1BIT = 0x01
_2BITS = 0x03
END_MARK = 0x00000000
CONTENT_SIZE_LEN = 8
BLOCK_SIZE_LEN = (
FRAME_SIZE_LEN
) = BLOCK_CHECKSUM_LEN = CONTENT_CHECKSUM_LEN = MAGIC_LEN = DICTID_LEN = 4
FLG_LEN = BD_LEN = HC_LEN = 1
MAX_LEGACY_BLOCK_SIZE = 8 * 1024 * 1024 # 8 MB
class FLG:
"""Represents the FLG field"""
version: int = 0
block_independence: int = 0
block_checksum: int = 0
content_size: int = 0
content_checksum: int = 0
dictid: int = 0
def __init__(self, raw_flg: int):
self.version = (raw_flg >> 6) & _2BITS
self.block_independence = (raw_flg >> 5) & _1BIT
self.block_checksum = (raw_flg >> 4) & _1BIT
self.content_size = (raw_flg >> 3) & _1BIT
self.content_checksum = (raw_flg >> 2) & _1BIT
self.dictid = raw_flg & _1BIT
def as_dict(self) -> dict:
return {
"version": self.version,
"block_independence": self.block_independence,
"block_checksum": self.block_checksum,
"content_size": self.content_size,
"content_checksum": self.content_checksum,
"dictid": self.dictid,
}
class _LZ4HandlerBase(Handler):
"""A common base for all LZ4 formats."""
def _skip_magic_bytes(self, file: File):
file.seek(MAGIC_LEN, io.SEEK_CUR)
EXTRACTOR = Command("lz4", "--decompress", "{inpath}", "{outdir}/{infile}")
class LegacyFrameHandler(_LZ4HandlerBase):
NAME = "lz4_legacy"
PATTERNS = [HexString("02 21 4C 18")]
def calculate_chunk(self, file: File, start_offset: int) -> Optional[ValidChunk]:
self._skip_magic_bytes(file)
while True:
# The last block is detected either because it is followed by the “EOF” (End of File) mark,
# or because it is followed by a known Frame Magic Number.
raw_bsize = file.read(BLOCK_SIZE_LEN)
if raw_bsize == b"": # EOF
break
block_compressed_size = convert_int32(raw_bsize, Endian.LITTLE)
if block_compressed_size in FRAME_MAGICS:
# next magic, read too far
file.seek(-4, io.SEEK_CUR)
break
compressed_block = file.read(block_compressed_size)
uncompressed_block = decompress(compressed_block, MAX_LEGACY_BLOCK_SIZE)
# See 'fixed block size' in https://android.googlesource.com/platform/external/lz4/+/HEAD/doc/lz4_Frame_format.md#legacy-frame
if len(uncompressed_block) < MAX_LEGACY_BLOCK_SIZE:
break
end_offset = file.tell()
return ValidChunk(start_offset=start_offset, end_offset=end_offset)
class SkippableFrameHandler(_LZ4HandlerBase):
"""This can be anything, basically uncompressed data."""
NAME = "lz4_skippable"
PATTERNS = [HexString("5? 2A 4D 18")]
def calculate_chunk(self, file: File, start_offset: int) -> Optional[ValidChunk]:
self._skip_magic_bytes(file)
frame_size = convert_int32(file.read(FRAME_SIZE_LEN), Endian.LITTLE)
file.seek(frame_size, io.SEEK_CUR)
end_offset = file.tell()
return ValidChunk(start_offset=start_offset, end_offset=end_offset)
class DefaultFrameHandler(_LZ4HandlerBase):
"""This is the modern version, most frequently used."""
NAME = "lz4_default"
PATTERNS = [HexString("04 22 4D 18")]
def calculate_chunk( # noqa: C901
self, file: File, start_offset: int
) -> Optional[ValidChunk]:
self._skip_magic_bytes(file)
# 2. we parse the frame descriptor of dynamic size
flg_bytes = file.read(FLG_LEN)
raw_flg = convert_int8(flg_bytes, Endian.LITTLE)
flg = FLG(raw_flg)
logger.debug("Parsed FLG", **flg.as_dict())
# skip BD (max blocksize), only useful for decoders that needs to allocate memory
file.seek(BD_LEN, io.SEEK_CUR)
if flg.content_size:
file.seek(CONTENT_SIZE_LEN, io.SEEK_CUR)
if flg.dictid:
file.seek(DICTID_LEN, io.SEEK_CUR)
header_checksum = convert_int8(file.read(HC_LEN), Endian.LITTLE)
logger.debug("Header checksum (HC) read", header_checksum=header_checksum)
# 3. we read block by block until we hit the endmarker
while True:
block_size = convert_int32(file.read(BLOCK_SIZE_LEN), Endian.LITTLE)
logger.debug("block_size", block_size=block_size)
if block_size == END_MARK:
break
file.seek(block_size, io.SEEK_CUR)
if flg.block_checksum:
file.seek(BLOCK_CHECKSUM_LEN, io.SEEK_CUR)
# 4. we reached the endmark (0x00000000)
# 5. if frame descriptor mentions CRC, we add CRC
if flg.content_checksum:
file.seek(CONTENT_CHECKSUM_LEN, io.SEEK_CUR)
end_offset = file.tell()
return ValidChunk(start_offset=start_offset, end_offset=end_offset)
|
from PIL import Image
import os
import sys
from os import listdir
from os.path import isfile, join
def main():
folder_path = sys.argv[1]
output_folder_path = folder_path + '_png'
try:
os.mkdir(output_folder_path)
except:
if not os.listdir(output_folder_path):
print('Folder {output_folder_path} already exists and is empty.')
else:
print(
'Folder {output_folder_path} already exists and is not empty.')
files = [f for f in listdir(folder_path) if isfile(join(folder_path, f))]
for file in files:
file_path = join(folder_path, file)
img = Image.open(file_path)
output_filename = file.rsplit(sep='.', maxsplit=1)[0] + '.png'
output_path = join(output_folder_path, output_filename)
img.save(output_path, format='png', quality=100)
if __name__ == "__main__":
main()
|
# pacman imports
from pacman.model.routing_info.\
dict_based_partitioned_partition_n_keys_map import \
DictBasedPartitionedPartitionNKeysMap
# spinnMachine imports
from spinn_machine.utilities.progress_bar import ProgressBar
# front end common imports
from spinn_front_end_common.abstract_models.\
abstract_provides_incoming_partition_constraints import \
AbstractProvidesIncomingPartitionConstraints
from spinn_front_end_common.abstract_models.\
abstract_provides_n_keys_for_partition import \
AbstractProvidesNKeysForPartition
from spinn_front_end_common.abstract_models.\
abstract_provides_outgoing_partition_constraints import \
AbstractProvidesOutgoingPartitionConstraints
from spinn_front_end_common.utilities import exceptions
class FrontEndCommonEdgeToNKeysMapper(object):
""" Works out the number of keys needed for each edge
"""
def __call__(self, partitioned_graph, partitionable_graph=None,
graph_mapper=None):
# Generate an n_keys map for the graph and add constraints
n_keys_map = DictBasedPartitionedPartitionNKeysMap()
# generate progress bar
progress_bar = ProgressBar(
len(partitioned_graph.subvertices),
"Deducing edge to number of keys map")
# contains a partitionable vertex
if partitionable_graph is not None and graph_mapper is not None:
# iterate over each partition in the partitioned graph
for vertex in partitioned_graph.subvertices:
partitions = \
partitioned_graph.outgoing_edges_partitions_from_vertex(
vertex)
for partition_id in partitions:
partition = partitions[partition_id]
added_constraints = False
constraints = self._process_partitionable_partition(
partition, n_keys_map, partition_id, graph_mapper,
partitionable_graph)
if not added_constraints:
partition.add_constraints(constraints)
else:
self._check_constraints_equal(
constraints, partition.constraints)
progress_bar.update()
progress_bar.end()
else:
for vertex in partitioned_graph.subvertices:
partitions = \
partitioned_graph.outgoing_edges_partitions_from_vertex(
vertex)
for partition_id in partitions:
partition = partitions[partition_id]
added_constraints = False
constraints = self._process_partitioned_partition(
partition, n_keys_map, partition_id, partitioned_graph)
if not added_constraints:
partition.add_constraints(constraints)
else:
self._check_constraints_equal(
constraints, partition.constraints)
progress_bar.update()
progress_bar.end()
return {'n_keys_map': n_keys_map}
@staticmethod
def _check_constraints_equal(constraints, stored_constraints):
"""
:param constraints:
:param stored_constraints:
:return:
"""
for constraint in constraints:
if constraint not in stored_constraints:
raise exceptions.ConfigurationException(
"Two edges within the same partition have different "
"constraints. This is deemed an error. Please fix and "
"try again")
@staticmethod
def _process_partitionable_partition(
partition, n_keys_map, partition_id, graph_mapper,
partitionable_graph):
partitioned_edge = partition.edges[0]
vertex_slice = graph_mapper.get_subvertex_slice(
partitioned_edge.pre_subvertex)
edge = graph_mapper.get_partitionable_edge_from_partitioned_edge(
partitioned_edge)
if not isinstance(edge.pre_vertex, AbstractProvidesNKeysForPartition):
n_keys_map.set_n_keys_for_partition(
partition, vertex_slice.n_atoms)
else:
n_keys_map.set_n_keys_for_partition(
partition,
edge.pre_vertex.get_n_keys_for_partition(
partition, graph_mapper))
constraints = list()
if isinstance(edge.pre_vertex,
AbstractProvidesOutgoingPartitionConstraints):
constraints.extend(
edge.pre_vertex.get_outgoing_partition_constraints(
partition, graph_mapper))
if isinstance(edge.post_vertex,
AbstractProvidesIncomingPartitionConstraints):
constraints.extend(
edge.post_vertex.get_incoming_partition_constraints(
partition, graph_mapper))
constraints.extend(
partitionable_graph.partition_from_vertex(
edge.pre_vertex, partition_id).constraints)
return constraints
@staticmethod
def _process_partitioned_partition(
partition, n_keys_map, partition_id, partitioned_graph):
edge = partition.edges[0]
if not isinstance(edge.pre_subvertex,
AbstractProvidesNKeysForPartition):
n_keys_map.set_n_keys_for_partition(partition, 1)
else:
n_keys_map.set_n_keys_for_partition(
partition,
edge.pre_subvertex.get_n_keys_for_partition(
partition, None))
constraints = list()
if isinstance(edge.pre_subvertex,
AbstractProvidesOutgoingPartitionConstraints):
constraints.extend(
edge.pre_subvertex.get_outgoing_partition_constraints(
partition, None))
if isinstance(edge.post_subvertex,
AbstractProvidesIncomingPartitionConstraints):
constraints.extend(
edge.post_subvertex.get_incoming_partition_constraints(
partition, None))
constraints.extend(
partitioned_graph.partition_from_vertex(
edge.pre_subvertex, partition_id).constraints)
return constraints
|
from cloudant.client import Cloudant
from cloudant.error import CloudantException
from cloudant.result import Result, ResultByKey
client = Cloudant.iam("b3e03381-f624-4db8-a3da-3588bface309-bluemix", "sckyMGqNGv8CX9aIcTDbrhYZYhYBDUfEXAJuXuN8SB1D")
client.connect()
databaseName = "attendance_toqa"
myDatabase = client.create_database(databaseName)
if myDatabase.exists():
print "'{0}' successfully created.\n".format(databaseName)
sampleData = [
[1, "Gabr", "Hazem", 100],
[2, "Adel", "Muhammad", 40],
[3, "omar", "Mekawy", 20],
[4, "mustafa", "azazy", 10],
]
# Create documents by using the sample data.
# Go through each row in the array
for document in sampleData:
# Retrieve the fields in each row.
number = document[0]
name = document[1]
description = document[2]
temperature = document[3]
# Create a JSON document that represents
# all the data in the row.
jsonDocument = {
"numberField": number,
"nameField": name,
"descriptionField": description,
"temperatureField": temperature
}
# Create a document by using the database API.
newDocument = myDatabase.create_document(jsonDocument)
# Check that the document exists in the database.
if newDocument.exists():
print "Document '{0}' successfully created.".format(number)
|
#!/usr/bin/env python3
"""This code is the main access point for the majority of users of The-wiZZ. It
takes an input subselection of a survey catalog, a The-wiZZ HDF5 data file, and
matches the two together to create a resultant clustering redshift estimate
that can then be turned into a redshift PDF. This code also takes care of any
weighting of the objects with unknown redshift, redshift binning, bootstrapping
errors, and output. See input_flags.py for a list of options or use --help from
the command line.
"""
import numpy as np
from the_wizz import core_utils
from the_wizz import pdf_maker_utils
from the_wizz import input_flags
if __name__ == "__main__":
print("")
print("The-wiZZ has begun conjuring: running pdf maker...")
# First we parse the command line for arguments as usual. See
# input_flags.py for a full list of input arguments.
args = input_flags.parse_input_pdf_args()
input_flags.print_args(args)
# Load the file containing all matched pairs of spectroscopic and
# photometric objects.
print("Loading unknown data...")
unknown_data = core_utils.file_checker_loader(args.unknown_sample_file)
# Now we figure out what kind of redshift binning we would like to have.
# This will be one of the largest impacts on the signal to noise of the
# measurement. Some rules of thumb are:
# The narrower bins are in redshift the better. You are measuring a
# correlation, the narrower the bin size in comoving distance the more
# correlated things will be and thus increase the amplitude. Aka use
# Groth/Pebbles[sic] scaling to your advantage.
# For a spectroscopic sample that is selected for a specific redshift
# range with few galaxies outside that range (eg DEEP2), adaptive binning
# is recommended. This will keep a equal number spectra per redshift bin.
# A good rule is to try to have about 100 spectra per redshift bin for max
# signal to noise.
# Linear binning is provided as a curtesy and is not nesassarly
# recommended. It will not give the best signal to noise compared to
# adaptive and has the same draw backs as adaptive is that the bias could
# be changing oddly from bin to bin. It is recommended that the user try
# adaptive and comoving spaced bins for the best results. Comoving returns
# bins that are of equal comoving distance from the line of sight. We also
# provide binning in equal ln(1 + z). This is for people who want a
# comoving like binning but without the dependece on cosmology. It also
# has the convienent property of giving errors that can be more easlily
# compared the usual simga/(1 + z) error.
print("Creating bins...")
if args.z_binning_type[0] == 'linear':
z_bin_edge_array = pdf_maker_utils._create_linear_redshift_bin_edges(
args.z_min, args.z_max, args.z_n_bins)
elif args.z_binning_type[0] == 'adaptive':
z_bin_edge_array = pdf_maker_utils._create_adaptive_redshift_bin_edges(
args.z_min, args.z_max, args.z_n_bins,
pdf_maker.reference_redshift_array)
elif args.z_binning_type[0] == 'comoving':
z_bin_edge_array = pdf_maker_utils._create_comoving_redshift_bin_edges(
args.z_min, args.z_max, args.z_n_bins)
elif args.z_binning_type[0] == 'logspace':
z_bin_edge_array = pdf_maker_utils._create_logspace_redshift_bin_edges(
args.z_min, args.z_max, args.z_n_bins)
elif args.z_binning_type[0] == 'file':
z_bin_edge_array = np.loadtxt(args.z_binning_type[1])[:-1]
else:
print("Requested binning name invalid. Valid types are:")
print("\tlinear: linear binning in redshift")
print("\tadaptive: constant reference objects per redshift bin")
print("\tcomoving: linear binning in comoving distance")
print("\tfile: file providing the bin edges")
print("Returning linear binning...")
z_bin_edge_array = pdf_maker_utils._create_linear_redshift_bin_edges(
args.z_min, args.z_max, args.z_n_bins)
# This is where the heavy lifting happens. We create our PDF maker object
# which will hold the pair file for use, calculate the over density per
# redshift bin, and also store intermediary results for later use.
# Before we can estimate the PDF, we must mask for the objects we want
# to estimate the redshit of. These objects can be color selected,
# photo-z selected, or any other object selection you would like. The code
# line below turns the array of indices in the hdf5 pair file, into a
# single density estimate around the reference object.
print("Starting indices matcher...")
pdf_maker = pdf_maker_utils.collapse_ids_to_single_estimate(
args.input_pair_hdf5_file, args.pair_scale_name, unknown_data, args)
# Before we calculated the pdfs, we want to know what the over densities
# are in each of the regions calculated on the area we consider.
print("Calculating region densities...")
pdf_maker.compute_region_densities(z_bin_edge_array, args.z_max)
if args.output_region_pickle_file is not None:
pdf_maker.write_region_densities(args.output_region_pickle_file, args)
# Now that we've "collapsed" the estimate around the reference object we
# need to bin up the results in redshift and create our final PDF.
print("Calculating pdf...")
if args.bootstrap_samples is None:
pdf_maker.compute_pdf_bootstrap(args.n_bootstrap)
else:
bootstrap_region_array = np.loadtxt(args.bootstrap_samples,
dtype=np.int_)
pdf_maker._compute_pdf_bootstrap(bootstrap_region_array)
# Write individual bootstraps to file.
if args.output_bootstraps_file is not None:
pdf_maker.write_bootstrap_samples_to_ascii(args.output_bootstraps_file,
args)
# Now that we have the results. We just need to write them to file and we
# are done.
print("Writing...")
output_file = core_utils.create_ascii_file(args.output_pdf_file_name,
args)
pdf_maker.write_pdf_to_ascii(output_file)
output_file.close()
print("Done!")
|
# -*- coding: utf-8 -*-
import uuid
from django.contrib.contenttypes.fields import GenericForeignKey
from django.contrib.contenttypes.models import ContentType
from django.core.validators import MinValueValidator
from django.db import models
from rest_framework.exceptions import NotAcceptable
from apps.authentication.models import OnlineUser as User
class Order(models.Model):
content_type = models.ForeignKey(ContentType, on_delete=models.CASCADE)
object_id = models.PositiveIntegerField()
content_object = GenericForeignKey("content_type", "object_id")
order_line = models.ForeignKey(
"OrderLine", related_name="orders", on_delete=models.CASCADE
)
# Price of product when paid
price = models.DecimalField(max_digits=10, decimal_places=2, blank=True)
# Quantity of products ordered
quantity = models.PositiveIntegerField(validators=[MinValueValidator(1)])
def total_price(self):
return self.content_object.price * self.quantity
def reduce_stock(self):
self.content_object.reduce_stock(self.quantity)
def __str__(self):
return str(self.content_object)
class Meta:
default_permissions = ("add", "change", "delete")
class OrderLine(models.Model):
user = models.ForeignKey(User, related_name="u", on_delete=models.CASCADE)
datetime = models.DateTimeField(auto_now_add=True)
paid = models.BooleanField(default=False)
def count_orders(self):
return sum((order.quantity for order in self.orders.all()))
def subtotal(self):
return sum((order.total_price() for order in self.orders.all()))
def pay(self):
if self.paid:
return
if self.subtotal() > self.user.saldo:
self.delete()
raise NotAcceptable("Insufficient funds")
# Setting price for orders in case product price changes later
for order in self.orders.all():
order.price = order.total_price()
order.save()
order.reduce_stock()
self.user.saldo = self.user.saldo - self.subtotal()
self.user.save()
self.paid = True
self.save()
def __str__(self):
return str(self.pk)
class Meta:
default_permissions = ("add", "change", "delete")
class MagicToken(models.Model):
user = models.ForeignKey(User, on_delete=models.CASCADE)
token = models.CharField("token", default=uuid.uuid4, max_length=36)
data = models.TextField("data")
created = models.DateTimeField("created", editable=False, auto_now_add=True)
class Meta:
default_permissions = ("add", "change", "delete")
|
#!/usr/bin/env python3
# Copyright Istio Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import logging
from runner import cluster, config as cfg, consts, entrypoint, mesh, pipeline
def main(args: argparse.Namespace) -> None:
log_level = getattr(logging, args.log_level)
logging.basicConfig(level=log_level, format='%(levelname)s\t> %(message)s')
config = cfg.from_toml_file(args.config_path)
cluster.set_up_if_not_exists(
config.cluster_project_id, config.cluster_name, config.cluster_zones,
config.cluster_version, config.server_machine_type,
config.server_disk_size_gb, config.server_num_nodes,
config.client_machine_type, config.client_disk_size_gb)
for topology_path in config.topology_paths:
for env_name in config.environments:
entrypoint_service_name = entrypoint.extract_name(topology_path)
mesh_environment = mesh.for_state(
env_name, entrypoint_service_name,
consts.SERVICE_GRAPH_NAMESPACE, config, args.helm_values)
pipeline.run(topology_path, mesh_environment, config.server_image,
config.client_image, config.istio_archive_url,
config.client_qps, config.client_duration,
config.client_num_conc_conns, config.labels())
def parse_args() -> argparse.Namespace:
parser = argparse.ArgumentParser()
parser.add_argument('config_path', type=str)
parser.add_argument('helm_values', type=str)
parser.add_argument(
'--log_level',
type=str,
choices=['CRITICAL', 'ERROR', 'WARNING', 'INFO', 'DEBUG'],
default='DEBUG')
return parser.parse_args()
if __name__ == '__main__':
args = parse_args()
main(args)
|
from django.contrib.sites.models import Site, get_current_site
from django.core import urlresolvers, paginator
from django.core.exceptions import ImproperlyConfigured
import urllib
PING_URL = "http://www.google.com/webmasters/tools/ping"
class SitemapNotFound(Exception):
pass
def ping_google(sitemap_url=None, ping_url=PING_URL):
"""
Alerts Google that the sitemap for the current site has been updated.
If sitemap_url is provided, it should be an absolute path to the sitemap
for this site -- e.g., '/sitemap.xml'. If sitemap_url is not provided, this
function will attempt to deduce it by using urlresolvers.reverse().
"""
if sitemap_url is None:
try:
# First, try to get the "index" sitemap URL.
sitemap_url = urlresolvers.reverse('django.contrib.sitemaps.views.index')
except urlresolvers.NoReverseMatch:
try:
# Next, try for the "global" sitemap URL.
sitemap_url = urlresolvers.reverse('django.contrib.sitemaps.views.sitemap')
except urlresolvers.NoReverseMatch:
pass
if sitemap_url is None:
raise SitemapNotFound("You didn't provide a sitemap_url, and the sitemap URL couldn't be auto-detected.")
from django.contrib.sites.models import Site
current_site = Site.objects.get_current()
url = "http://%s%s" % (current_site.domain, sitemap_url)
params = urllib.urlencode({'sitemap':url})
urllib.urlopen("%s?%s" % (ping_url, params))
class Sitemap(object):
# This limit is defined by Google. See the index documentation at
# http://sitemaps.org/protocol.php#index.
limit = 50000
def __get(self, name, obj, default=None):
try:
attr = getattr(self, name)
except AttributeError:
return default
if callable(attr):
return attr(obj)
return attr
def items(self):
return []
def location(self, obj):
return obj.get_absolute_url()
def _get_paginator(self):
if not hasattr(self, "_paginator"):
self._paginator = paginator.Paginator(self.items(), self.limit)
return self._paginator
paginator = property(_get_paginator)
def get_urls(self, page=1, site=None):
if site is None:
if Site._meta.installed:
try:
site = Site.objects.get_current()
except Site.DoesNotExist:
pass
if site is None:
raise ImproperlyConfigured("In order to use Sitemaps you must either use the sites framework or pass in a Site or RequestSite object in your view code.")
urls = []
for item in self.paginator.page(page).object_list:
loc = "http://%s%s" % (site.domain, self.__get('location', item))
priority = self.__get('priority', item, None)
url_info = {
'location': loc,
'lastmod': self.__get('lastmod', item, None),
'changefreq': self.__get('changefreq', item, None),
'priority': str(priority is not None and priority or '')
}
urls.append(url_info)
return urls
class FlatPageSitemap(Sitemap):
def items(self):
current_site = Site.objects.get_current()
return current_site.flatpage_set.filter(registration_required=False)
class GenericSitemap(Sitemap):
priority = None
changefreq = None
def __init__(self, info_dict, priority=None, changefreq=None):
self.queryset = info_dict['queryset']
self.date_field = info_dict.get('date_field', None)
self.priority = priority
self.changefreq = changefreq
def items(self):
# Make sure to return a clone; we don't want premature evaluation.
return self.queryset.filter()
def lastmod(self, item):
if self.date_field is not None:
return getattr(item, self.date_field)
return None
|
# Copyright (c) 2006-2013 Regents of the University of Minnesota.
# For licensing terms, see the file LICENSE.
import os
import sys
# Pyserver's conf.py wants the pyserver directory to be the current directory.
# And for importing pyserver sub-modules to work, we need the pyserver
# directory to be the current directory. We also need to add the dir to
# sys.path.
# SYNC_ME: Search pyserver module name.
dirname_pyserver = 'pyserver'
# Check that INSTANCE is set.
# FIXME: Should INSTANCE (and other env vars) be commonly prefixed?
# E.g., CCP_INSTANCE, CCP_PYSERVER_HOME, etc.?
# And just what are all the env vars that Cyclopath uses?
try:
from mod_python import apache
# This means we're running from apache. And under apache, INSTANCE isn't
# set. But we can tell what instance is specifed in httpd.conf by reading
# its PythonInterpreter value. Note that every Cyclopath installation on
# the server has a unique name that goes [instance]___[my_ccp_dev_dir],
# e.g., minnesota___ccpv3_trunk
instance_raw = apache.interpreter
# See /var/log/apache2/error.log, or maybe /ccp/var/log/apache2/error.log.
error_log = apache.log_error
except ImportError:
# We have yet to set up logging; log to, e.g., /var/log/apache2/error.log.
error_log = sys.stderr.write
try:
instance_raw = os.environ['INSTANCE']
except KeyError:
instance_raw = ''
#
uniquely_starts = instance_raw.find('___')
if uniquely_starts != -1:
instance_name = instance_raw[:uniquely_starts]
else:
instance_name = instance_raw
# We used to set an env. var. but let's avoid a race condition with other
# Apache forks, since the env. seems to be shared among our processes.
# No: os.environ['INSTANCE'] = instance_name
#
if not instance_name:
error_log('ERROR: Please set the INSTANCE environment variable (py_glue).')
sys.exit(1)
# We hard-code the path separator, so make sure it's what we think 'tis.
assert(os.path.sep == '/') # We only run on Linux.
# If $PYSERVER_HOME is set, but to the wrong path, you'll get weird errors,
# e.g., ./ccp.py -i ==> ConfigParser.NoSectionError: No section: 'gwis'.
# because $PYSERVER_HOME set to a V1 path.
try:
# See if the user or script supplied the directory as an environment var.
# SYNC_ME: Search environment variable: PYSERVER_HOME.
pyserver_home = os.environ['PYSERVER_HOME']
except KeyError:
# Otherwise, if the user or script is running this script from somewhere
# within the Cyclopath source directory, we can deduce pyserver's home.
# NOTE: sys.path[0] is the absolute path to the script, which we need to
# use in case the calling script was invoked from a directory other
# than its own.
# NOTE: If you run py interactively, sys.path[0] is '', and abspath('')
# resolves to the current directory...
walk_path = os.path.abspath(sys.path[0])
depth = 1
pyserver_home = ''
while not pyserver_home:
# EXPLAIN: Why doesn't this use os.path.join?
test_this_path = os.path.abspath('%s/%s' % (walk_path,
dirname_pyserver,))
#print 'test_this_path: %s' % (test_this_path,)
# See if the test path is really a path.
if os.path.isdir(test_this_path):
# Ooh, this iss good news. See if we can find ourselves a VERSION.py.
# Note that this will have required that flashclient has beed maked.
if os.path.isfile('%s/VERSION.py' % (test_this_path)):
# Whooptie-doo! We have ourselves a pyserver_home.
pyserver_home = test_this_path
break
# If we didn't find pyserver_home, try the next directory in the
# ancestry, and do some error checking.
assert(not pyserver_home) # There's a 'break' above...
new_walk_path = os.path.dirname(walk_path)
assert(new_walk_path != walk_path)
walk_path = new_walk_path
# If we hit rock bottom...
if walk_path == '/':
sys.stderr.write('Got to root. Something is wrong. Buh-bye!\n')
sys.exit(1)
# Increse your loop confidence.
# MAGIC NUMBER: Just guessing that 32 is very unlikely path depth.
depth += 1
if depth > 32:
sys.stderr.write('Tired of looping. Giving up!\n')
sys.exit(1)
# Set the PYSERVER_HOME env var for the rest of the app.
# NO: Race conidition with our Cyclopath server installations:
# The next URL request -- even on a different server install --
# will inherit the environment variables for this process. So
# don't set environment variables inside the app.
# No: os.environ['PYSERVER_HOME'] = pyserver_home
# 2013.09.03: Let's add mapserver/ to the path, too, so we can always skin.
mapserver_home = '%s/mapserver' % (os.path.dirname(pyserver_home),)
sys.path.insert(0, mapserver_home)
# 2013.10.24: Let's add services/ to the path, too.
services_home = '%s/services' % (os.path.dirname(pyserver_home),)
sys.path.insert(0, services_home)
sys.path.insert(0, pyserver_home)
os.chdir(pyserver_home)
if __name__ == '__main__':
import conf
print 'Seems OK...'
|
# Copyright 2020-2021, The Autoware Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Launch Modules for Milestone 3 of the AVP 2020 Demo."""
from ament_index_python import get_package_share_directory
from launch.launch_description_sources import PythonLaunchDescriptionSource
from launch.actions import IncludeLaunchDescription
from launch import LaunchDescription
from launch.actions import DeclareLaunchArgument
from launch.conditions import IfCondition
from launch.substitutions import LaunchConfiguration
from launch_ros.actions import Node
import os
def generate_launch_description():
"""
Launch all nodes defined in the architecture for Milestone 3 of the AVP 2020 Demo.
More details about what is included can
be found at https://gitlab.com/autowarefoundation/autoware.auto/AutowareAuto/-/milestones/25.
"""
avp_demo_pkg_prefix = get_package_share_directory('autoware_auto_avp_demo')
euclidean_cluster_param_file = os.path.join(
avp_demo_pkg_prefix, 'param/euclidean_cluster.param.yaml')
ray_ground_classifier_param_file = os.path.join(
avp_demo_pkg_prefix, 'param/ray_ground_classifier.param.yaml')
scan_downsampler_param_file = os.path.join(
avp_demo_pkg_prefix, 'param/scan_downsampler_ms3.param.yaml')
lanelet2_map_provider_param_file = os.path.join(
avp_demo_pkg_prefix, 'param/lanelet2_map_provider.param.yaml')
lane_planner_param_file = os.path.join(
avp_demo_pkg_prefix, 'param/lane_planner.param.yaml')
parking_planner_param_file = os.path.join(
avp_demo_pkg_prefix, 'param/parking_planner.param.yaml')
object_collision_estimator_param_file = os.path.join(
avp_demo_pkg_prefix, 'param/object_collision_estimator.param.yaml')
behavior_planner_param_file = os.path.join(
avp_demo_pkg_prefix, 'param/behavior_planner.param.yaml')
off_map_obstacles_filter_param_file = os.path.join(
avp_demo_pkg_prefix, 'param/off_map_obstacles_filter.param.yaml')
point_cloud_fusion_node_pkg_prefix = get_package_share_directory(
'point_cloud_fusion_nodes')
# Arguments
euclidean_cluster_param = DeclareLaunchArgument(
'euclidean_cluster_param_file',
default_value=euclidean_cluster_param_file,
description='Path to config file for Euclidean Clustering'
)
ray_ground_classifier_param = DeclareLaunchArgument(
'ray_ground_classifier_param_file',
default_value=ray_ground_classifier_param_file,
description='Path to config file for Ray Ground Classifier'
)
with_obstacles_param = DeclareLaunchArgument(
'with_obstacles',
default_value='True',
description='Enable obstacle detection'
)
scan_downsampler_param = DeclareLaunchArgument(
'scan_downsampler_param_file',
default_value=scan_downsampler_param_file,
description='Path to config file for lidar scan downsampler'
)
lanelet2_map_provider_param = DeclareLaunchArgument(
'lanelet2_map_provider_param_file',
default_value=lanelet2_map_provider_param_file,
description='Path to parameter file for Lanelet2 Map Provider'
)
lane_planner_param = DeclareLaunchArgument(
'lane_planner_param_file',
default_value=lane_planner_param_file,
description='Path to parameter file for lane planner'
)
parking_planner_param = DeclareLaunchArgument(
'parking_planner_param_file',
default_value=parking_planner_param_file,
description='Path to parameter file for parking planner'
)
object_collision_estimator_param = DeclareLaunchArgument(
'object_collision_estimator_param_file',
default_value=object_collision_estimator_param_file,
description='Path to parameter file for object collision estimator'
)
behavior_planner_param = DeclareLaunchArgument(
'behavior_planner_param_file',
default_value=behavior_planner_param_file,
description='Path to parameter file for behavior planner'
)
off_map_obstacles_filter_param = DeclareLaunchArgument(
'off_map_obstacles_filter_param_file',
default_value=off_map_obstacles_filter_param_file,
description='Path to parameter file for off-map obstacle filter'
)
# Nodes
euclidean_clustering = Node(
package='euclidean_cluster_nodes',
executable='euclidean_cluster_node_exe',
namespace='perception',
condition=IfCondition(LaunchConfiguration('with_obstacles')),
parameters=[LaunchConfiguration('euclidean_cluster_param_file')],
remappings=[
("points_in", "points_nonground")
]
)
# point cloud fusion runner to fuse front and rear lidar
point_cloud_fusion_node = IncludeLaunchDescription(
PythonLaunchDescriptionSource(
os.path.join(point_cloud_fusion_node_pkg_prefix,
'launch/vlp16_sim_lexus_pc_fusion.launch.py'))
)
ray_ground_classifier = Node(
package='ray_ground_classifier_nodes',
executable='ray_ground_classifier_cloud_node_exe',
namespace='perception',
condition=IfCondition(LaunchConfiguration('with_obstacles')),
parameters=[LaunchConfiguration('ray_ground_classifier_param_file')],
remappings=[("points_in", "/lidars/points_fused")]
)
scan_downsampler = Node(
package='voxel_grid_nodes',
executable='voxel_grid_node_exe',
namespace='lidars',
name='voxel_grid_cloud_node',
parameters=[LaunchConfiguration('scan_downsampler_param_file')],
remappings=[
("points_in", "points_fused"),
("points_downsampled", "points_fused_downsampled")
]
)
lanelet2_map_provider = Node(
package='lanelet2_map_provider',
executable='lanelet2_map_provider_exe',
namespace='had_maps',
name='lanelet2_map_provider_node',
parameters=[LaunchConfiguration('lanelet2_map_provider_param_file')]
)
lanelet2_map_visualizer = Node(
package='lanelet2_map_provider',
executable='lanelet2_map_visualizer_exe',
name='lanelet2_map_visualizer_node',
namespace='had_maps'
)
global_planner = Node(
package='lanelet2_global_planner_nodes',
name='lanelet2_global_planner_node',
namespace='planning',
executable='lanelet2_global_planner_node_exe',
remappings=[('HAD_Map_Client', '/had_maps/HAD_Map_Service'),
('vehicle_kinematic_state', '/vehicle/vehicle_kinematic_state')]
)
lane_planner = Node(
package='lane_planner_nodes',
name='lane_planner_node',
namespace='planning',
executable='lane_planner_node_exe',
parameters=[LaunchConfiguration('lane_planner_param_file')],
remappings=[('HAD_Map_Service', '/had_maps/HAD_Map_Service')]
)
parking_planner = Node(
package='parking_planner_nodes',
name='parking_planner_node',
namespace='planning',
executable='parking_planner_node_exe',
parameters=[LaunchConfiguration('parking_planner_param_file')],
remappings=[('HAD_Map_Service', '/had_maps/HAD_Map_Service')]
)
object_collision_estimator = Node(
package='object_collision_estimator_nodes',
name='object_collision_estimator_node',
namespace='planning',
executable='object_collision_estimator_node_exe',
condition=IfCondition(LaunchConfiguration('with_obstacles')),
parameters=[LaunchConfiguration('object_collision_estimator_param_file')],
remappings=[
('obstacle_topic', '/perception/lidar_bounding_boxes_filtered'),
]
)
behavior_planner = Node(
package='behavior_planner_nodes',
name='behavior_planner_node',
namespace='planning',
executable='behavior_planner_node_exe',
parameters=[
LaunchConfiguration('behavior_planner_param_file'),
{'enable_object_collision_estimator': LaunchConfiguration('with_obstacles')}
],
output='screen',
remappings=[
('HAD_Map_Service', '/had_maps/HAD_Map_Service'),
('vehicle_state', '/vehicle/vehicle_kinematic_state'),
('route', 'global_path'),
('vehicle_state_report', '/vehicle/state_report'),
('vehicle_state_command', '/vehicle/state_command')
]
)
off_map_obstacles_filter = Node(
package='off_map_obstacles_filter_nodes',
name='off_map_obstacles_filter_node',
namespace='perception',
executable='off_map_obstacles_filter_nodes_exe',
parameters=[LaunchConfiguration('off_map_obstacles_filter_param_file')],
output='screen',
remappings=[
('bounding_boxes_in', 'lidar_bounding_boxes'),
('bounding_boxes_out', 'lidar_bounding_boxes_filtered'),
('HAD_Map_Service', '/had_maps/HAD_Map_Service'),
]
)
return LaunchDescription([
euclidean_cluster_param,
ray_ground_classifier_param,
scan_downsampler_param,
with_obstacles_param,
lanelet2_map_provider_param,
lane_planner_param,
parking_planner_param,
object_collision_estimator_param,
behavior_planner_param,
off_map_obstacles_filter_param,
euclidean_clustering,
ray_ground_classifier,
scan_downsampler,
point_cloud_fusion_node,
lanelet2_map_provider,
lanelet2_map_visualizer,
global_planner,
lane_planner,
parking_planner,
object_collision_estimator,
behavior_planner,
off_map_obstacles_filter,
])
|
# Copyright (c) 2013 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import logging
import sys
import time
import unittest
from telemetry.page import page_test_results
class GTestTestResults(page_test_results.PageTestResults):
def __init__(self, output_stream):
super(GTestTestResults, self).__init__(output_stream)
self._timestamp = None
def _GetMs(self):
return (time.time() - self._timestamp) * 1000
@property
def num_errors(self):
return len(self.errors) + len(self.failures)
@staticmethod
def _formatTestname(test):
if isinstance(test, unittest.TestCase):
chunks = test.id().split('.')[-2:]
return '.'.join(chunks)
else:
return str(test)
def _emitFailure(self, test, err):
print >> self._output_stream, self._exc_info_to_string(err, test)
test_name = GTestTestResults._formatTestname(test)
print >> self._output_stream, '[ FAILED ]', test_name, (
'(%0.f ms)' % self._GetMs())
sys.stdout.flush()
def addError(self, test, err):
super(GTestTestResults, self).addError(test, err)
self._emitFailure(test, err)
def addFailure(self, test, err):
super(GTestTestResults, self).addFailure(test, err)
self._emitFailure(test, err)
def startTest(self, test):
super(GTestTestResults, self).startTest(test)
print >> self._output_stream, '[ RUN ]', (
GTestTestResults._formatTestname(test))
sys.stdout.flush()
self._timestamp = time.time()
def addSuccess(self, test):
super(GTestTestResults, self).addSuccess(test)
test_name = GTestTestResults._formatTestname(test)
print >> self._output_stream, '[ OK ]', test_name, (
'(%0.f ms)' % self._GetMs())
sys.stdout.flush()
def addSkip(self, test, reason):
super(GTestTestResults, self).addSkip(test, reason)
test_name = GTestTestResults._formatTestname(test)
logging.warning('===== SKIPPING TEST %s: %s =====', test_name, reason)
if self._timestamp == None:
self._timestamp = time.time()
print >> self._output_stream, '[ OK ]', test_name, (
'(%0.f ms)' % self._GetMs())
sys.stdout.flush()
def PrintSummary(self):
unit = 'test' if len(self.successes) == 1 else 'tests'
print >> self._output_stream, '[ PASSED ]', (
'%d %s.' % (len(self.successes), unit))
if self.errors or self.failures:
all_errors = self.errors[:]
all_errors.extend(self.failures)
unit = 'test' if len(all_errors) == 1 else 'tests'
print >> self._output_stream, '[ FAILED ]', (
'%d %s, listed below:' % (len(all_errors), unit))
for test, _ in all_errors:
print >> self._output_stream, '[ FAILED ] ', (
GTestTestResults._formatTestname(test))
if not self.wasSuccessful():
print >> self._output_stream
count = len(self.errors) + len(self.failures)
unit = 'TEST' if count == 1 else 'TESTS'
print >> self._output_stream, '%d FAILED %s' % (count, unit)
print >> self._output_stream
sys.stdout.flush()
|
#! /usr/bin/python
from flask import Flask, request, jsonify
import boto3
import os
from queue import Queue
from threading import Thread
import time
s3 = boto3.client('s3')
s3_raw = boto3.resource('s3').Bucket('isitanime-data-raw')
s3_dest = boto3.resource('s3').Bucket('isitanime-data-clean')
app = Flask(__name__)
@app.route('/')
def main():
with open('main.html', 'r') as in_f:
html = in_f.read()
return html
@app.route('/keys')
def keys():
prefix = request.args.get('prefix', 'safebooru')
keys = get_keys(prefix, 100)
return jsonify(keys)
classify_queue = Queue()
@app.route('/classify')
def classify():
key = request.args.get('key')
clss = request.args.get('class')
assert clss in {'anime', 'notanime', 'delete'}
classify_queue.put((key, clss))
return '', 200
def classify_thread():
while True:
try:
key, clss = classify_queue.get()
classify_back(key, clss)
except Exception:
pass
def classify_back(name, clss):
copy_source = {
'Bucket': 'isitanime-data-raw',
'Key': name,
}
if clss != 'delete':
s3_dest.copy(copy_source, clss + '-' + name)
s3_raw.delete_objects(
Delete={
'Objects': [{
'Key': name,
}],
'Quiet': True,
}
)
print('S3 cleaned ' + name + ' == ' + clss)
s3_key_cache = {}
s3_marker_next = {}
def get_keys(prefix, count):
if prefix not in s3_key_cache:
s3_key_cache[prefix] = []
if prefix not in s3_marker_next:
if s3_key_cache[prefix]:
s3_marker_next[prefix] = s3_key_cache[prefix][-1]
else:
s3_marker_next[prefix] = None
key_cache = s3_key_cache[prefix]
marker_next = s3_marker_next[prefix]
while count > len(key_cache):
if marker_next:
resp = s3.list_objects(
Bucket='isitanime-data-raw',
Prefix=prefix,
Marker=marker_next,
)
else:
resp = s3.list_objects(
Bucket='isitanime-data-raw',
Prefix=prefix,
)
if 'Contents' not in resp:
count = len(key_cache)
print(resp)
break
key_cache.extend([obj['Key'] for obj in resp['Contents']])
s3_marker_next[prefix] = key_cache[-1]
if not resp['IsTruncated']:
count = len(key_cache)
break
print(key_cache)
s3_key_cache[prefix] = key_cache[count:]
return key_cache[:count]
if __name__ == '__main__':
boto_threadpool = []
for _ in range(5):
t = Thread(target=classify_thread)
boto_threadpool.append(t)
t.start()
app.run('127.0.0.1', port=8080)
|
from wagtail.core import blocks
from wagtail.images.blocks import ImageChooserBlock
class StandoutItemsBlock(blocks.StructBlock):
class LinkBlock(blocks.StreamBlock):
internal = blocks.PageChooserBlock()
external = blocks.URLBlock()
class Meta:
required = False
max_num = 1
subtitle = blocks.CharBlock()
title = blocks.CharBlock()
description = blocks.TextBlock()
image = ImageChooserBlock()
link = LinkBlock()
class Meta:
icon = "pick"
@staticmethod
def get_link(value):
"""The link could be internal or external."""
try:
link = value[0]
except IndexError:
return ""
else:
return (
link.value.url
if link.block_type == "internal" and link.value
else link.value
)
class InstagramEmbedBlock(blocks.StructBlock):
image = ImageChooserBlock()
link = blocks.URLBlock(
required=False,
help_text="Link to a specific post here or leave blank for it to link to https://www.instagram.com/torchboxltd/",
)
class Meta:
icon = "fa-instagram"
|
import pytest
# integration tests requires nomad Vagrant VM or Binary running
def test_get_nodes(nomad_setup):
assert isinstance(nomad_setup.nodes.get_nodes(), list) == True
def test_get_nodes_prefix(nomad_setup):
nodes = nomad_setup.nodes.get_nodes()
prefix = nodes[0]["ID"][:4]
nomad_setup.nodes.get_nodes(prefix=prefix)
def test_dunder_getitem_exist(nomad_setup):
n = nomad_setup.nodes["pynomad1"]
assert isinstance(n, dict)
def test_dunder_getitem_not_exist(nomad_setup):
with pytest.raises(KeyError):
j = nomad_setup.nodes["pynomad2"]
def test_dunder_contain_exists(nomad_setup):
assert "pynomad1" in nomad_setup.nodes
def test_dunder_contain_not_exist(nomad_setup):
assert "real.localdomain" not in nomad_setup.nodes
def test_dunder_str(nomad_setup):
assert isinstance(str(nomad_setup.nodes), str)
def test_dunder_repr(nomad_setup):
assert isinstance(repr(nomad_setup.nodes), str)
def test_dunder_getattr(nomad_setup):
with pytest.raises(AttributeError):
d = nomad_setup.nodes.does_not_exist
def test_dunder_iter(nomad_setup):
assert hasattr(nomad_setup.nodes, '__iter__')
for j in nomad_setup.nodes:
pass
def test_dunder_len(nomad_setup):
assert len(nomad_setup.nodes) >= 0
|
################################################################################
from subprocess import Popen, PIPE, STDOUT
from threading import Thread
import bz2, json, click
from newsroom import jsonl
from . import readiter
from tqdm import tqdm
################################################################################
def _writer(process, dataset_file, keys):
for article in dataset_file:
subset = {k: article[k] for k in keys if k in article}
encoded = json.dumps(subset).encode("utf-8")
process.stdin.write(encoded + b"\n")
process.stdin.close()
################################################################################
articles_file = click.Path(
exists = True,
dir_okay = False,
readable = True,
resolve_path = True,
)
summaries_file = click.Path(
exists = False,
dir_okay = False,
writable = True,
resolve_path = True,
)
################################################################################
@click.command()
@click.option(
"--system",
type = str,
required = True,
help = "Name of docker image."
)
@click.option(
"--dataset",
type = articles_file,
required = True,
help = "Input path to full dataset."
)
@click.option(
"--summaries",
type = summaries_file,
required = True,
help = "Output path for system generated summaries."
)
@click.option(
"--keys",
type = str,
default = "text",
help = "List of dataset keys to pass to system. [default = text]"
)
################################################################################
def main(system, dataset, summaries, keys):
print("Starting", system, "Docker image.")
process = Popen(
[
"docker", "run", "--rm",
"-a", "stdin", "-a", "stdout",
"-i", system
],
stdin = PIPE,
stdout = PIPE,
)
dataset_file = jsonl.open(dataset, gzip = True)
# Check the size of the dataset.
# As a sanity check and for the progress bar.
print("Loading articles... ", end = "", flush = True)
dataset_length = len(dataset_file)
print("found", dataset_length, "articles.\n")
# Start new thread to feed summaries into container.
Thread(
target = _writer,
args = (process, dataset_file, keys.split(","))
).start()
# Start progress bar.
progress = tqdm(
readiter(process.stdout),
total = dataset_length,
desc = "Running " + system,
)
# Prepare to decode summaries.
is_json = True
with jsonl.open(summaries, gzip = True) as summaries_file:
summaries_file.delete()
with progress as output:
for line in output:
summaries_file.appendline({"system": line})
print("\nRun complete. Next, evaluate with newsroom-score.")
################################################################################
|
#!/usr/bin/env python
# -*- coding:utf-8 -*-
"""Created with Pycharm IDEA
@Create on 2015/9/12 16:31
@my_story models.py
@author : OmegaMiao"""
from app import db, loginManager
from datetime import datetime
from werkzeug.security import generate_password_hash, check_password_hash
from flask.ext.login import UserMixin
class Story(db.Model):
__tablename__ = 'story'
id = db.Column(db.Integer, primary_key=True, autoincrement=True)
title = db.Column(db.String(30), nullable=False)
content = db.Column(db.String(500), nullable=False)
create_time = db.Column(db.DateTime, default=datetime.now().strftime('%Y-%m-%d %H:%M:%S'))
category_id = db.Column(db.Integer, db.ForeignKey('category.id'))
author_id = db.Column(db.Integer, db.ForeignKey('author.id'))
def __init__(self, title, content):
self.title = title
self.content = content
def __repr__(self):
return "<Story %r title %r>" % (self.id, self.title)
def to_json(self):
return {
"id": self.id,
"title": self.title,
"content": self.content,
"create_time": self.create_time.strftime('%Y-%m-%d %H:%M:%S')
}
class Category(db.Model):
__tablename__ = 'category'
id = db.Column(db.Integer, primary_key=True, autoincrement=True)
name = db.Column(db.String(20), nullable=False, unique=True)
storys = db.relationship('Story', backref='category', lazy='joined')
def __init__(self, name):
self.name = name
def __repr__(self):
return "<Category %r name %r>" % (self.id, self.name)
class Author(db.Model):
__tablename__ = 'author'
id = db.Column(db.Integer, primary_key=True, autoincrement=True)
name = db.Column(db.String(20), nullable=False)
nick_name = db.Column(db.String(20), nullable=False, unique=True)
storys = db.relationship('Story', backref='author', lazy='joined')
def __init__(self, name, nick_name):
self.name = name
self.nick_name = nick_name
def __repr__(self):
return "<Author id: %r Name: %r nickName:%r>" % (self.id, self.name, self.nick_name)
class User(UserMixin, db.Model):
__tablename__ = 'users'
id = db.Column(db.Integer, primary_key=True)
email = db.Column(db.String(64), unique=True, index=True)
username = db.Column(db.String(64), unique=True, index=True)
password_hash = db.Column(db.String(128))
role_id = db.Column(db.Integer, db.ForeignKey('roles.id'))
@property
def password(self):
raise AttributeError('password is not a readable attribute')
@password.setter
def password(self, password):
self.password_hash = generate_password_hash(password)
def verify_password(self, password):
return check_password_hash(self.password_hash, password)
def __repr__(self):
return '<User %r>' % self.username
class Role(db.Model):
__tablename__ = 'roles'
id = db.Column(db.Integer, primary_key=True)
name = db.Column(db.String(64), unique=True)
def __repr__(self):
return '<Role %r>' % self.name
@loginManager.user_loader
def load_user(user_id):
return User.query.get(int(user_id))
|
''' The application's Globals object '''
import logging
import time
from threading import Lock
import re
from paste.deploy.converters import asbool
from pylons import config
import ckan
import ckan.model as model
import ckan.logic as logic
log = logging.getLogger(__name__)
# mappings translate between config settings and globals because our naming
# conventions are not well defined and/or implemented
mappings = {
# 'config_key': 'globals_key',
}
# This mapping is only used to define the configuration options (from the
# `config` object) that should be copied to the `app_globals` (`g`) object.
app_globals_from_config_details = {
'ckan.site_title': {},
'ckan.site_logo': {},
'ckan.site_url': {},
'ckan.site_description': {},
'ckan.site_about': {},
'ckan.site_intro_text': {},
'ckan.site_custom_css': {},
'ckan.favicon': {}, # default gets set in config.environment.py
'ckan.template_head_end': {},
'ckan.template_footer_end': {},
# has been setup in load_environment():
'ckan.site_id': {},
'ckan.recaptcha.publickey': {'name': 'recaptcha_publickey'},
'ckan.recaptcha.version': {'name': 'recaptcha_version', 'default': '1'},
'ckan.template_title_deliminater': {'default': '-'},
'ckan.template_head_end': {},
'ckan.template_footer_end': {},
'ckan.dumps_url': {},
'ckan.dumps_format': {},
'ofs.impl': {'name': 'ofs_impl'},
'ckan.homepage_style': {'default': '1'},
# split string
'search.facets': {'default': 'organization groups tags res_format license_id',
'type': 'split',
'name': 'facets'},
'package_hide_extras': {'type': 'split'},
'ckan.plugins': {'type': 'split'},
# bool
'debug': {'default': 'false', 'type' : 'bool'},
'ckan.debug_supress_header' : {'default': 'false', 'type' : 'bool'},
'ckan.legacy_templates' : {'default': 'false', 'type' : 'bool'},
'ckan.tracking_enabled' : {'default': 'false', 'type' : 'bool'},
# int
'ckan.datasets_per_page': {'default': '20', 'type': 'int'},
'ckan.activity_list_limit': {'default': '30', 'type': 'int'},
'search.facets.default': {'default': '10', 'type': 'int',
'name': 'facets_default_number'},
}
# A place to store the origional config options of we override them
_CONFIG_CACHE = {}
def set_main_css(css_file):
''' Sets the main_css. The css_file must be of the form file.css '''
assert css_file.endswith('.css')
new_css = css_file
# FIXME we should check the css file exists
app_globals.main_css = str(new_css)
def set_app_global(key, value):
'''
Set a new key on the app_globals (g) object
It will process the value according to the options on
app_globals_from_config_details (if any)
'''
key, value = process_app_global(key, value)
setattr(app_globals, key, value)
def process_app_global(key, value):
'''
Tweak a key, value pair meant to be set on the app_globals (g) object
According to the options on app_globals_from_config_details (if any)
'''
options = app_globals_from_config_details.get(key)
key = get_globals_key(key)
if options:
if 'name' in options:
key = options['name']
value = value or options.get('default', '')
data_type = options.get('type')
if data_type == 'bool':
value = asbool(value)
elif data_type == 'int':
value = int(value)
elif data_type == 'split':
value = value.split()
return key, value
def get_globals_key(key):
# create our globals key
# these can be specified in mappings or else we remove
# the `ckan.` part this is to keep the existing namings
# set the value
if key in mappings:
return mappings[key]
elif key.startswith('ckan.'):
return key[5:]
else:
return key
def reset():
''' set updatable values from config '''
def get_config_value(key, default=''):
if model.meta.engine.has_table('system_info'):
value = model.get_system_info(key)
else:
value = None
config_value = config.get(key)
# sort encodeings if needed
if isinstance(config_value, str):
try:
config_value = config_value.decode('utf-8')
except UnicodeDecodeError:
config_value = config_value.decode('latin-1')
# we want to store the config the first time we get here so we can
# reset them if needed
if key not in _CONFIG_CACHE:
_CONFIG_CACHE[key] = config_value
if value is not None:
log.debug('config `%s` set to `%s` from db' % (key, value))
else:
value = _CONFIG_CACHE[key]
if value:
log.debug('config `%s` set to `%s` from config' % (key, value))
else:
value = default
set_app_global(key, value)
# update the config
config[key] = value
return value
# update the config settings in auto update
schema = logic.schema.update_configuration_schema()
for key in schema.keys():
get_config_value(key)
# cusom styling
main_css = get_config_value('ckan.main_css', '/base/css/main.css')
set_main_css(main_css)
# site_url_nice
site_url_nice = app_globals.site_url.replace('http://', '')
site_url_nice = site_url_nice.replace('www.', '')
app_globals.site_url_nice = site_url_nice
if app_globals.site_logo:
app_globals.header_class = 'header-image'
elif not app_globals.site_description:
app_globals.header_class = 'header-text-logo'
else:
app_globals.header_class = 'header-text-logo-tagline'
class _Globals(object):
''' Globals acts as a container for objects available throughout the
life of the application. '''
def __init__(self):
'''One instance of Globals is created during application
initialization and is available during requests via the
'app_globals' variable
'''
self._init()
self._config_update = None
self._mutex = Lock()
def _check_uptodate(self):
''' check the config is uptodate needed when several instances are
running '''
value = model.get_system_info('ckan.config_update')
if self._config_update != value:
if self._mutex.acquire(False):
reset()
self._config_update = value
self._mutex.release()
def _init(self):
self.ckan_version = ckan.__version__
self.ckan_base_version = re.sub('[^0-9\.]', '', self.ckan_version)
if self.ckan_base_version == self.ckan_version:
self.ckan_doc_version = 'ckan-{0}'.format(self.ckan_version)
else:
self.ckan_doc_version = 'latest'
# process the config details to set globals
for key in app_globals_from_config_details.keys():
new_key, value = process_app_global(key, config.get(key) or '')
setattr(self, new_key, value)
app_globals = _Globals()
del _Globals
|
import configparser
class BotConfig:
def __init__(self, path):
parser = configparser.ConfigParser()
# open the file implicitly because parser.read() will not fail if file is not readable
file = open(path)
parser.read_file(file)
file.close()
if 'Bot' not in parser.sections():
raise Exception('All parameters must reside in section ''Bot''')
bot_section = parser['Bot']
self.address = bot_section.get('address', 'localhost')
port_string = bot_section.get('port', '9091')
try:
self.port = int(port_string)
except ValueError:
raise ValueError('Port ''%s'' is invalid' % port_string)
try:
self.user = bot_section.get('user')
except KeyError:
self.user = None
try:
self.password = bot_section.get('password')
except KeyError:
self.password = None
if self.password and not self.user:
raise Exception('Password with no user name is meaningless')
self.token = bot_section.get('token', '')
if not self.token:
raise Exception('Telegram token is required')
self.secret = bot_section.get('secret', '')
if not self.secret:
raise Exception('Secret is required')
try:
self.persistence_file = bot_section.get('persistence_file')
except KeyError:
self.persistence_file = None
def __str__(self):
result = '{address:<%s> ' \
'port:<%d> ' % (self.address, self.port)
if not self.user:
result += 'user:None '
else:
result += 'user:<%s> ' % self.user
if not self.password:
result += 'password:None'
else:
result += 'password:present '
result += 'token:present '
result += 'secret:present '
result += 'persistence_file:<%s>}' % self.persistence_file
return result
def __repr__(self):
return '{address:''%s'' port:%d user:''%s'' password:''%s'' ' \
'token:''%s'' secret:''%s'' persistence_file:''%s''}' \
% (self.address, self.port, self.user, self.password,
self.token, self.secret, self.persistence_file)
|
# -*- coding: utf-8 -*-
import os
from setuptools import find_packages, setup
with open('README.rst') as f:
readme = f.read()
# with open('LICENSE.txt') as f:
# licenses = f.read()
setup(
name='dbestclient',
version='2.0',
description='Model-based Approximate Query Processing (AQP) engine.',
classifiers=[
'Development Status :: 2.0',
'License :: OSI Approved :: MIT License',
'Programming Language :: Python :: 3.7',
'Topic :: Approximate Query Processing :: AQP :: Data Warehouse',
],
keywords='Approximate Query Processing AQP',
url='https://github.com/qingzma/DBEstClient',
author='Qingzhi Ma',
author_email='Q.Ma.2@warwick.ac.uk',
long_description=readme,
# license=licenses,
# packages=['dbestclient'],
packages=find_packages(exclude=('experiments', 'tests', 'docs')),
entry_points={
'console_scripts': ['dbestclient=dbestclient.main:main', 'dbestslave=dbestclient.main:slave', 'dbestmaster=dbestclient.main:master'],
},
zip_safe=False,
install_requires=[
'numpy', 'sqlparse', 'pandas', 'scikit-learn', 'qregpy', 'scipy', 'dill', 'matplotlib', 'torch', 'category_encoders', 'tox', 'sphinx', 'gensim',
],
test_suite='nose.collector',
tests_require=['nose'],
)
|
import sympy as sp
import cyllene.f_aux as fa
import cyllene.f_functionclass as ff
import cyllene.f_compare as fc
def function(expr):
"""
Defines a function based on a syntax check
and a Function object, using lambda operator.
Returns a pure function.
"""
func = ff.Function(expr)
if func.is_defined:
return lambda x: func.eval_at(x)
else:
issues_report=''.join(['\t' + str(i+1) + '. ' + func.issues[i]+'\n' \
for i in range(len(func.issues))])
print('Problems encountered:\n' + issues_report)
return None
# raise ValueError('Problems encountered:\n'+issues_report)
# def function(expr):
# [func, issues] = fd.define_function(expr)
# if issues:
# print("Invalid format")
# return None
# else:
# return func
def random_function(arg='random'):
"""
Pick a function at random.
One of the folliwing types can be specified:
'const', 'linear', 'quadratic', 'cubic', 'squareroot',
'cubicroot', 'rational', 'exp', 'tri', 'log', 'comp',
'random'
"""
if arg in ff.FUNCTION_LIST:
func = ff.Function(arg)
else:
func = ff.Function('random')
return lambda x: func.eval_at(x)
def expression(expr):
"""
Defines a function based on a syntax check
and a Function object.
Returns a Sympy object.
"""
func = ff.Function(expr)
if func.is_defined:
return func.sym_form
else:
issues_report=''.join(['\t' + str(i+1) + '. ' + func.issues[i]+'\n' \
for i in range(len(func.issues))])
print('Problems encountered:\n' + issues_report)
return None
def compare(expr1, expr2):
return fc.compare_functions(expr1, expr2)
def graph(expr):
""" Try to find good plotting range and plot the graph """
var = fa.get_variables(expr)
try:
[xran, yran] = fpl.find_plot_range(expr)
sp.plot(expr, (var[0], xran[0], xran[1]), axis_center=(0,0),
ylim=(yran[0],yran[1]))
except:
sp.plot(expr)
|
from Commander import Commander
from gui.UserInterface import UserInterface
from log.Replay import Replay
|
import ast
from bluesky.plans import scan, grid_scan
import bluesky.preprocessors as bpp
import bluesky.plan_stubs as bps
from bluesky.preprocessors import SupplementalData
from bluesky.callbacks.best_effort import BestEffortCallback
def test_hints(RE, hw):
motor = hw.motor
expected_hint = {'fields': [motor.name]}
assert motor.hints == expected_hint
collector = []
def collect(*args):
collector.append(args)
RE(scan([], motor, 1, 2, 2), {'descriptor': collect})
name, doc = collector.pop()
assert doc['hints'][motor.name] == expected_hint
def test_simple(RE, hw):
bec = BestEffortCallback()
RE.subscribe(bec)
RE(scan([hw.ab_det], hw.motor, 1, 5, 5))
def test_disable(RE, hw):
det, motor = hw.ab_det, hw.motor
bec = BestEffortCallback()
RE.subscribe(bec)
bec.disable_table()
RE(scan([det], motor, 1, 5, 5))
assert bec._table is None
bec.enable_table()
RE(scan([det], motor, 1, 5, 5))
assert bec._table is not None
bec.peaks.com
bec.peaks['com']
assert ast.literal_eval(repr(bec.peaks)) == vars(bec.peaks)
bec.clear()
assert bec._table is None
# smoke test
bec.disable_plots()
bec.enable_plots()
bec.disable_baseline()
bec.enable_baseline()
bec.disable_heading()
bec.enable_heading()
def test_blank_hints(RE, hw):
bec = BestEffortCallback()
RE.subscribe(bec)
RE(scan([hw.ab_det], hw.motor, 1, 5, 5, md={'hints': {}}))
def test_with_baseline(RE, hw):
bec = BestEffortCallback()
RE.subscribe(bec)
sd = SupplementalData(baseline=[hw.det])
RE.preprocessors.append(sd)
RE(scan([hw.ab_det], hw.motor, 1, 5, 5))
def test_underhinted_plan(RE, hw):
bec = BestEffortCallback()
RE.subscribe(bec)
@bpp.run_decorator()
def broken_plan(dets):
yield from bps.trigger_and_read(dets)
RE(broken_plan([hw.det]))
def test_live_grid(RE, hw):
bec = BestEffortCallback()
RE.subscribe(bec)
RE(grid_scan([hw.det4], hw.motor1, 0, 1, 1, hw.motor2, 0, 1, 2, True))
|
"""Actor-Critic Algorithm."""
from rllib.util.neural_networks.utilities import broadcast_to_tensor
from .abstract_algorithm import AbstractAlgorithm
class ActorCritic(AbstractAlgorithm):
r"""Implementation of Policy Gradient algorithm.
Policy-Gradient is an on-policy model-free control algorithm.
Policy-Gradient computes the policy gradient using a critic to estimate the returns
(sum of discounted rewards).
The Policy-Gradient algorithm is a policy gradient algorithm that estimates the
gradient:
.. math:: \grad J = \int_{\tau} \grad \log \pi(s_t) Q(s_t, a_t),
where the previous integral is computed through samples (s_t, a_t) samples.
Parameters
----------
policy: AbstractPolicy
Policy to optimize.
critic: AbstractQFunction
Critic that evaluates the current policy.
criterion: _Loss
Criterion to optimize the baseline.
gamma: float
Discount factor.
References
----------
Sutton, R. S., McAllester, D. A., Singh, S. P., & Mansour, Y. (2000).
Policy gradient methods for reinforcement learning with function approximation.
NeurIPS.
Konda, V. R., & Tsitsiklis, J. N. (2000).
Actor-critic algorithms. NeurIPS.
Degris, T., White, M., & Sutton, R. S. (2012).
Off-policy actor-critic. ICML
"""
def __init__(
self, num_policy_samples=15, standardize_returns=True, *args, **kwargs
):
super().__init__(num_policy_samples=num_policy_samples, *args, **kwargs)
self.standardize_returns = standardize_returns
def returns(self, trajectory):
"""Estimate the returns of a trajectory."""
state, action = trajectory.state, trajectory.action
weight = self.get_ope_weight(state, action, trajectory.log_prob_action)
advantage = self.critic(state, action)
weight = broadcast_to_tensor(input_tensor=weight, target_tensor=advantage)
return weight * advantage
def actor_loss(self, observation):
"""Get Actor loss."""
return self.score_actor_loss(observation, linearized=False).reduce(
self.criterion.reduction
)
|
#!/usr/bin/env python
#
# Copyright (c) 2012 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Compile Android resources into an intermediate APK.
This can also generate an R.txt, and an .srcjar file containing the proper
final R.java class for all resource packages the APK depends on.
This will crunch images with aapt2.
"""
import argparse
import collections
import multiprocessing.pool
import os
import re
import shutil
import subprocess
import sys
import zipfile
from xml.etree import ElementTree
from util import build_utils
from util import resource_utils
_SOURCE_ROOT = os.path.dirname(os.path.dirname(os.path.dirname(os.path.dirname(
__file__))))
# Import jinja2 from third_party/jinja2
sys.path.insert(1, os.path.join(_SOURCE_ROOT, 'third_party'))
from jinja2 import Template # pylint: disable=F0401
# Pngs that we shouldn't convert to webp. Please add rationale when updating.
_PNG_WEBP_BLACKLIST_PATTERN = re.compile('|'.join([
# Crashes on Galaxy S5 running L (https://crbug.com/807059).
r'.*star_gray\.png',
# Android requires pngs for 9-patch images.
r'.*\.9\.png',
# Daydream requires pngs for icon files.
r'.*daydream_icon_.*\.png']))
# Regular expression for package declaration in 'aapt dump resources' output.
_RE_PACKAGE_DECLARATION = re.compile(
r'^Package Group ([0-9]+) id=0x([0-9a-fA-F]+)')
def _PackageIdArgument(x):
"""Convert a string into a package ID while checking its range.
Args:
x: argument string.
Returns:
the package ID as an int, or -1 in case of error.
"""
try:
x = int(x, 0)
if x < 0 or x > 127:
x = -1
except ValueError:
x = -1
return x
def _ParseArgs(args):
"""Parses command line options.
Returns:
An options object as from argparse.ArgumentParser.parse_args()
"""
parser, input_opts, output_opts = resource_utils.ResourceArgsParser()
input_opts.add_argument('--android-manifest', required=True,
help='AndroidManifest.xml path')
input_opts.add_argument(
'--shared-resources',
action='store_true',
help='Make all resources in R.java non-final and allow the resource IDs '
'to be reset to a different package index when the apk is loaded by '
'another application at runtime.')
input_opts.add_argument(
'--app-as-shared-lib',
action='store_true',
help='Same as --shared-resources, but also ensures all resource IDs are '
'directly usable from the APK loaded as an application.')
input_opts.add_argument(
'--shared-resources-whitelist',
help='An R.txt file acting as a whitelist for resources that should be '
'non-final and have their package ID changed at runtime in R.java. '
'Implies and overrides --shared-resources.')
input_opts.add_argument('--proto-format', action='store_true',
help='Compile resources to protocol buffer format.')
input_opts.add_argument('--support-zh-hk', action='store_true',
help='Use zh-rTW resources for zh-rHK.')
input_opts.add_argument('--debuggable',
action='store_true',
help='Whether to add android:debuggable="true"')
input_opts.add_argument('--version-code', help='Version code for apk.')
input_opts.add_argument('--version-name', help='Version name for apk.')
input_opts.add_argument(
'--no-compress',
help='disables compression for the given comma-separated list of '
'extensions')
input_opts.add_argument(
'--locale-whitelist',
default='[]',
help='GN list of languages to include. All other language configs will '
'be stripped out. List may include a combination of Android locales '
'or Chrome locales.')
input_opts.add_argument('--exclude-xxxhdpi', action='store_true',
help='Do not include xxxhdpi drawables.')
input_opts.add_argument(
'--xxxhdpi-whitelist',
default='[]',
help='GN list of globs that say which xxxhdpi images to include even '
'when --exclude-xxxhdpi is set.')
input_opts.add_argument('--png-to-webp', action='store_true',
help='Convert png files to webp format.')
input_opts.add_argument('--webp-binary', default='',
help='Path to the cwebp binary.')
input_opts.add_argument('--no-xml-namespaces',
action='store_true',
help='Whether to strip xml namespaces from processed '
'xml resources')
input_opts.add_argument(
'--check-resources-pkg-id', type=_PackageIdArgument,
help='Check the package ID of the generated resources table. '
'Value must be integer in [0..127] range.')
output_opts.add_argument('--apk-path', required=True,
help='Path to output (partial) apk.')
output_opts.add_argument('--apk-info-path', required=True,
help='Path to output info file for the partial apk.')
output_opts.add_argument('--srcjar-out',
help='Path to srcjar to contain generated R.java.')
output_opts.add_argument('--r-text-out',
help='Path to store the generated R.txt file.')
output_opts.add_argument('--proguard-file',
help='Path to proguard.txt generated file')
output_opts.add_argument(
'--proguard-file-main-dex',
help='Path to proguard.txt generated file for main dex')
options = parser.parse_args(args)
resource_utils.HandleCommonOptions(options)
options.locale_whitelist = build_utils.ParseGnList(options.locale_whitelist)
options.xxxhdpi_whitelist = build_utils.ParseGnList(options.xxxhdpi_whitelist)
if options.check_resources_pkg_id is not None:
if options.check_resources_pkg_id < 0:
raise Exception(
'Package resource id should be integer in [0..127] range.')
if options.shared_resources and options.app_as_shared_lib:
raise Exception('Only one of --app-as-shared-lib or --shared-resources '
'can be used.')
return options
def _ExtractPackageIdFromApk(apk_path, aapt_path):
"""Extract the package ID of a given APK (even intermediate ones).
Args:
apk_path: Input apk path.
aapt_path: Path to aapt tool.
Returns:
An integer corresponding to the APK's package id.
Raises:
Exception if there is no resources table in the input file.
"""
cmd_args = [ aapt_path, 'dump', 'resources', apk_path ]
output = build_utils.CheckOutput(cmd_args)
for line in output.splitlines():
m = _RE_PACKAGE_DECLARATION.match(line)
if m:
return int(m.group(2), 16)
raise Exception("No resources in this APK!")
def _SortZip(original_path, sorted_path):
"""Generate new zip archive by sorting all files in the original by name."""
with zipfile.ZipFile(sorted_path, 'w') as sorted_zip, \
zipfile.ZipFile(original_path, 'r') as original_zip:
for info in sorted(original_zip.infolist(), key=lambda i: i.filename):
sorted_zip.writestr(info, original_zip.read(info))
def _DuplicateZhResources(resource_dirs):
"""Duplicate Taiwanese resources into Hong-Kong specific directory."""
renamed_paths = dict()
for resource_dir in resource_dirs:
# We use zh-TW resources for zh-HK (if we have zh-TW resources).
for path in build_utils.IterFiles(resource_dir):
if 'zh-rTW' in path:
hk_path = path.replace('zh-rTW', 'zh-rHK')
build_utils.MakeDirectory(os.path.dirname(hk_path))
shutil.copyfile(path, hk_path)
renamed_paths[os.path.relpath(hk_path, resource_dir)] = os.path.relpath(
path, resource_dir)
return renamed_paths
def _ToAaptLocales(locale_whitelist, support_zh_hk):
"""Converts the list of Chrome locales to aapt config locales."""
ret = set()
for locale in locale_whitelist:
locale = resource_utils.CHROME_TO_ANDROID_LOCALE_MAP.get(locale, locale)
if locale is None or ('-' in locale and '-r' not in locale):
raise Exception('CHROME_TO_ANDROID_LOCALE_MAP needs updating.'
' Found: %s' % locale)
ret.add(locale)
# Always keep non-regional fall-backs.
language = locale.split('-')[0]
ret.add(language)
# We don't actually support zh-HK in Chrome on Android, but we mimic the
# native side behavior where we use zh-TW resources when the locale is set to
# zh-HK. See https://crbug.com/780847.
if support_zh_hk:
assert not any('HK' in l for l in locale_whitelist), (
'Remove special logic if zh-HK is now supported (crbug.com/780847).')
ret.add('zh-rHK')
return sorted(ret)
def _MoveImagesToNonMdpiFolders(res_root):
"""Move images from drawable-*-mdpi-* folders to drawable-* folders.
Why? http://crbug.com/289843
"""
renamed_paths = dict()
for src_dir_name in os.listdir(res_root):
src_components = src_dir_name.split('-')
if src_components[0] != 'drawable' or 'mdpi' not in src_components:
continue
src_dir = os.path.join(res_root, src_dir_name)
if not os.path.isdir(src_dir):
continue
dst_components = [c for c in src_components if c != 'mdpi']
assert dst_components != src_components
dst_dir_name = '-'.join(dst_components)
dst_dir = os.path.join(res_root, dst_dir_name)
build_utils.MakeDirectory(dst_dir)
for src_file_name in os.listdir(src_dir):
if not os.path.splitext(src_file_name)[1] in ('.png', '.webp'):
continue
src_file = os.path.join(src_dir, src_file_name)
dst_file = os.path.join(dst_dir, src_file_name)
assert not os.path.lexists(dst_file)
shutil.move(src_file, dst_file)
renamed_paths[os.path.relpath(dst_file, res_root)] = os.path.relpath(
src_file, res_root)
return renamed_paths
def _CreateLinkApkArgs(options):
"""Create command-line arguments list to invoke 'aapt2 link'.
Args:
options: The command-line options tuple.
Returns:
A list of strings corresponding to the command-line invokation for
the command, matching the arguments from |options|.
"""
link_command = [
options.aapt2_path,
'link',
'--version-code', options.version_code,
'--version-name', options.version_name,
'--auto-add-overlay',
'--no-version-vectors',
'-o', options.apk_path,
]
for j in options.android_sdk_jars:
link_command += ['-I', j]
if options.proguard_file:
link_command += ['--proguard', options.proguard_file]
if options.proguard_file_main_dex:
link_command += ['--proguard-main-dex', options.proguard_file_main_dex]
if options.no_compress:
for ext in options.no_compress.split(','):
link_command += ['-0', ext]
# Note: only one of --proto-format, --shared-lib or --app-as-shared-lib
# can be used with recent versions of aapt2.
if options.proto_format:
link_command.append('--proto-format')
elif options.shared_resources:
link_command.append('--shared-lib')
if options.locale_whitelist:
aapt_locales = _ToAaptLocales(
options.locale_whitelist, options.support_zh_hk)
link_command += ['-c', ','.join(aapt_locales)]
if options.no_xml_namespaces:
link_command.append('--no-xml-namespaces')
return link_command
def _ExtractVersionFromSdk(aapt_path, sdk_path):
"""Extract version code and name from Android SDK .jar file.
Args:
aapt_path: Path to 'aapt' build tool.
sdk_path: Path to SDK-specific android.jar file.
Returns:
A (version_code, version_name) pair of strings.
"""
output = build_utils.CheckOutput(
[aapt_path, 'dump', 'badging', sdk_path],
print_stdout=False, print_stderr=False)
version_code = re.search(r"versionCode='(.*?)'", output).group(1)
version_name = re.search(r"versionName='(.*?)'", output).group(1)
return version_code, version_name,
def _FixManifest(options, temp_dir):
"""Fix the APK's AndroidManifest.xml.
This adds any missing namespaces for 'android' and 'tools', and
sets certains elements like 'platformBuildVersionCode' or
'android:debuggable' depending on the content of |options|.
Args:
options: The command-line arguments tuple.
temp_dir: A temporary directory where the fixed manifest will be written to.
Returns:
Path to the fixed manifest within |temp_dir|.
"""
debug_manifest_path = os.path.join(temp_dir, 'AndroidManifest.xml')
_ANDROID_NAMESPACE = 'http://schemas.android.com/apk/res/android'
_TOOLS_NAMESPACE = 'http://schemas.android.com/tools'
ElementTree.register_namespace('android', _ANDROID_NAMESPACE)
ElementTree.register_namespace('tools', _TOOLS_NAMESPACE)
original_manifest = ElementTree.parse(options.android_manifest)
def maybe_extract_version(j):
try:
return _ExtractVersionFromSdk(options.aapt_path, j)
except build_utils.CalledProcessError:
return None
extract_all = [maybe_extract_version(j) for j in options.android_sdk_jars]
successful_extractions = [x for x in extract_all if x]
if len(successful_extractions) == 0:
raise Exception(
'Unable to find android SDK jar among candidates: %s'
% ', '.join(options.android_sdk_jars))
elif len(successful_extractions) > 1:
raise Exception(
'Found multiple android SDK jars among candidates: %s'
% ', '.join(options.android_sdk_jars))
version_code, version_name = successful_extractions.pop()
# ElementTree.find does not work if the required tag is the root.
if original_manifest.getroot().tag == 'manifest':
manifest_node = original_manifest.getroot()
else:
manifest_node = original_manifest.find('manifest')
manifest_node.set('platformBuildVersionCode', version_code)
manifest_node.set('platformBuildVersionName', version_name)
if options.debuggable:
app_node = original_manifest.find('application')
app_node.set('{%s}%s' % (_ANDROID_NAMESPACE, 'debuggable'), 'true')
with open(debug_manifest_path, 'w') as debug_manifest:
debug_manifest.write(ElementTree.tostring(
original_manifest.getroot(), encoding='UTF-8'))
return debug_manifest_path
def _ResourceNameFromPath(path):
return os.path.splitext(os.path.basename(path))[0]
def _CreateKeepPredicate(resource_dirs, exclude_xxxhdpi, xxxhdpi_whitelist):
"""Return a predicate lambda to determine which resource files to keep."""
if not exclude_xxxhdpi:
# Do not extract dotfiles (e.g. ".gitkeep"). aapt ignores them anyways.
return lambda path: os.path.basename(path)[0] != '.'
# Returns False only for xxxhdpi non-mipmap, non-whitelisted drawables.
naive_predicate = lambda path: (
not re.search(r'[/-]xxxhdpi[/-]', path) or
re.search(r'[/-]mipmap[/-]', path) or
build_utils.MatchesGlob(path, xxxhdpi_whitelist))
# Build a set of all non-xxxhdpi drawables to ensure that we never exclude any
# xxxhdpi drawable that does not exist in other densities.
non_xxxhdpi_drawables = set()
for resource_dir in resource_dirs:
for path in build_utils.IterFiles(resource_dir):
if re.search(r'[/-]drawable[/-]', path) and naive_predicate(path):
non_xxxhdpi_drawables.add(_ResourceNameFromPath(path))
return lambda path: (naive_predicate(path) or
_ResourceNameFromPath(path) not in non_xxxhdpi_drawables)
def _ConvertToWebP(webp_binary, png_files):
renamed_paths = dict()
pool = multiprocessing.pool.ThreadPool(10)
def convert_image(png_path_tuple):
png_path, original_dir = png_path_tuple
root = os.path.splitext(png_path)[0]
webp_path = root + '.webp'
args = [webp_binary, png_path, '-mt', '-quiet', '-m', '6', '-q', '100',
'-lossless', '-o', webp_path]
subprocess.check_call(args)
os.remove(png_path)
renamed_paths[os.path.relpath(webp_path, original_dir)] = os.path.relpath(
png_path, original_dir)
pool.map(convert_image, [f for f in png_files
if not _PNG_WEBP_BLACKLIST_PATTERN.match(f[0])])
pool.close()
pool.join()
return renamed_paths
def _CompileDeps(aapt2_path, dep_subdirs, temp_dir):
partials_dir = os.path.join(temp_dir, 'partials')
build_utils.MakeDirectory(partials_dir)
partial_compile_command = [
aapt2_path,
'compile',
# TODO(wnwen): Turn this on once aapt2 forces 9-patch to be crunched.
# '--no-crunch',
]
pool = multiprocessing.pool.ThreadPool(10)
def compile_partial(directory):
dirname = os.path.basename(directory)
partial_path = os.path.join(partials_dir, dirname + '.zip')
compile_command = (partial_compile_command +
['--dir', directory, '-o', partial_path])
build_utils.CheckOutput(compile_command)
# Sorting the files in the partial ensures deterministic output from the
# aapt2 link step which uses order of files in the partial.
sorted_partial_path = os.path.join(partials_dir, dirname + '.sorted.zip')
_SortZip(partial_path, sorted_partial_path)
return sorted_partial_path
partials = pool.map(compile_partial, dep_subdirs)
pool.close()
pool.join()
return partials
def _CreateResourceInfoFile(
renamed_paths, apk_info_path, dependencies_res_zips):
lines = set()
for zip_file in dependencies_res_zips:
zip_info_file_path = zip_file + '.info'
if os.path.exists(zip_info_file_path):
with open(zip_info_file_path, 'r') as zip_info_file:
lines.update(zip_info_file.readlines())
for dest, source in renamed_paths.iteritems():
lines.add('Rename:{},{}\n'.format(dest, source))
with open(apk_info_path, 'w') as info_file:
info_file.writelines(sorted(lines))
def _PackageApk(options, dep_subdirs, temp_dir, gen_dir, r_txt_path):
"""Compile resources with aapt2 and generate intermediate .ap_ file.
Args:
options: The command-line options tuple. E.g. the generated apk
will be written to |options.apk_path|.
dep_subdirs: The list of directories where dependency resource zips
were extracted (its content will be altered by this function).
temp_dir: A temporary directory.
gen_dir: Another temp directory where some intermediate files are
generated.
r_txt_path: The path where the R.txt file will written to.
"""
renamed_paths = dict()
renamed_paths.update(_DuplicateZhResources(dep_subdirs))
keep_predicate = _CreateKeepPredicate(
dep_subdirs, options.exclude_xxxhdpi, options.xxxhdpi_whitelist)
png_paths = []
for directory in dep_subdirs:
for f in build_utils.IterFiles(directory):
if not keep_predicate(f):
os.remove(f)
elif f.endswith('.png'):
png_paths.append((f, directory))
if png_paths and options.png_to_webp:
renamed_paths.update(_ConvertToWebP(options.webp_binary, png_paths))
for directory in dep_subdirs:
renamed_paths.update(_MoveImagesToNonMdpiFolders(directory))
link_command = _CreateLinkApkArgs(options)
link_command += ['--output-text-symbols', r_txt_path]
# TODO(digit): Is this below actually required for R.txt generation?
link_command += ['--java', gen_dir]
fixed_manifest = _FixManifest(options, temp_dir)
link_command += ['--manifest', fixed_manifest]
partials = _CompileDeps(options.aapt2_path, dep_subdirs, temp_dir)
for partial in partials:
link_command += ['-R', partial]
# Creates a .zip with AndroidManifest.xml, resources.arsc, res/*
# Also creates R.txt
build_utils.CheckOutput(
link_command, print_stdout=False, print_stderr=False)
_CreateResourceInfoFile(
renamed_paths, options.apk_info_path, options.dependencies_res_zips)
def _WriteFinalRTxtFile(options, aapt_r_txt_path):
"""Determine final R.txt and return its location.
This handles --r-text-in and --r-text-out options at the same time.
Args:
options: The command-line options tuple.
aapt_r_txt_path: The path to the R.txt generated by aapt.
Returns:
Path to the final R.txt file.
"""
if options.r_text_in:
r_txt_file = options.r_text_in
else:
# When an empty res/ directory is passed, aapt does not write an R.txt.
r_txt_file = aapt_r_txt_path
if not os.path.exists(r_txt_file):
build_utils.Touch(r_txt_file)
if options.r_text_out:
shutil.copyfile(r_txt_file, options.r_text_out)
return r_txt_file
def _OnStaleMd5(options):
with resource_utils.BuildContext() as build:
dep_subdirs = resource_utils.ExtractDeps(options.dependencies_res_zips,
build.deps_dir)
_PackageApk(options, dep_subdirs, build.temp_dir, build.gen_dir,
build.r_txt_path)
r_txt_path = _WriteFinalRTxtFile(options, build.r_txt_path)
package = resource_utils.ExtractPackageFromManifest(
options.android_manifest)
# If --shared-resources-whitelist is used, the all resources listed in
# the corresponding R.txt file will be non-final, and an onResourcesLoaded()
# will be generated to adjust them at runtime.
#
# Otherwise, if --shared-resources is used, the all resources will be
# non-final, and an onResourcesLoaded() method will be generated too.
#
# Otherwise, all resources will be final, and no method will be generated.
#
rjava_build_options = resource_utils.RJavaBuildOptions()
if options.shared_resources_whitelist:
rjava_build_options.ExportSomeResources(
options.shared_resources_whitelist)
rjava_build_options.GenerateOnResourcesLoaded()
elif options.shared_resources or options.app_as_shared_lib:
rjava_build_options.ExportAllResources()
rjava_build_options.GenerateOnResourcesLoaded()
resource_utils.CreateRJavaFiles(
build.srcjar_dir, package, r_txt_path,
options.extra_res_packages,
options.extra_r_text_files,
rjava_build_options)
if options.srcjar_out:
build_utils.ZipDir(options.srcjar_out, build.srcjar_dir)
if options.check_resources_pkg_id is not None:
expected_id = options.check_resources_pkg_id
package_id = _ExtractPackageIdFromApk(options.apk_path,
options.aapt_path)
if package_id != expected_id:
raise Exception('Invalid package ID 0x%x (expected 0x%x)' %
(package_id, expected_id))
def main(args):
args = build_utils.ExpandFileArgs(args)
options = _ParseArgs(args)
# Order of these must match order specified in GN so that the correct one
# appears first in the depfile.
possible_output_paths = [
options.apk_path,
options.apk_path + '.info',
options.r_text_out,
options.srcjar_out,
options.proguard_file,
options.proguard_file_main_dex,
]
output_paths = [x for x in possible_output_paths if x]
# List python deps in input_strings rather than input_paths since the contents
# of them does not change what gets written to the depsfile.
input_strings = options.extra_res_packages + [
options.shared_resources,
options.exclude_xxxhdpi,
options.xxxhdpi_whitelist,
str(options.debuggable),
str(options.png_to_webp),
str(options.support_zh_hk),
str(options.no_xml_namespaces),
]
input_strings.extend(_CreateLinkApkArgs(options))
possible_input_paths = [
options.aapt_path,
options.android_manifest,
options.shared_resources_whitelist,
]
possible_input_paths += options.android_sdk_jars
input_paths = [x for x in possible_input_paths if x]
input_paths.extend(options.dependencies_res_zips)
input_paths.extend(options.extra_r_text_files)
if options.webp_binary:
input_paths.append(options.webp_binary)
build_utils.CallAndWriteDepfileIfStale(
lambda: _OnStaleMd5(options),
options,
input_paths=input_paths,
input_strings=input_strings,
output_paths=output_paths)
if __name__ == '__main__':
main(sys.argv[1:])
|
"""diarypro URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/3.0/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: path('', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.urls import include, path
2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))
"""
from django.contrib import admin
from django.urls import path
from diaryapp import views
from django.conf import settings
from django.conf.urls.static import static
urlpatterns = [
path('admin/', admin.site.urls),
path('',views.home_view, name='home'),
path('event/',views.diary_view, name='create event'),
path('update/<pk>',views.update_diary, name='update'),
path('delete/<pk>',views.delete_diary, name='delete')
] + static(settings.MEDIA_URL,document_root=settings.MEDIA_ROOT)
|
from .dagger import DAgger
|
# This source code is part of the Biotite package and is distributed
# under the 3-Clause BSD License. Please see 'LICENSE.rst' for further
# information.
__name__ = "biotite.sequence.io.gff"
__author__ = "Patrick Kunzmann"
__all__ = ["GFFFile"]
import copy
import string
from urllib.parse import quote, unquote
import warnings
from ....file import TextFile, InvalidFileError
from ...annotation import Location
# All punctuation characters except
# percent, semicolon, equals, ampersand, comma
_NOT_QUOTED = "".join(
[char for char in string.punctuation if char not in "%;=&,"]
) + " "
class GFFFile(TextFile):
"""
This class represents a file in *Generic Feature Format 3*
(`GFF3 <https://github.com/The-Sequence-Ontology/Specifications/blob/master/gff3.md>`_)
format.
Similar to GenBank files, GFF3 files contain information about
features of a reference sequence, but in a more concise and better
parsable way.
However, it does not provide additional meta information.
This class serves as low-level API for accessing GFF3 files.
It is used as a sequence of entries, where each entry is defined as
a non-comment and non-directive line.
Each entry consists of values corresponding to the 9 columns of
GFF3:
============== =============================== ==========================================================
**seqid** ``str`` The ID of the reference sequence
**source** ``str`` Source of the data (e.g. ``Genbank``)
**type** ``str`` Type of the feature (e.g. ``CDS``)
**start** ``int`` Start coordinate of feature on the reference sequence
**end** ``int`` End coordinate of feature on the reference sequence
**score** ``float`` or ``None`` Optional score (e.g. an E-value)
**strand** ``Location.Strand`` or ``None`` Strand of the feature, ``None`` if feature is not stranded
**phase** ``int`` or ``None`` Reading frame shift, ``None`` for non-CDS features
**attributes** ``dict`` Additional properties of the feature
============== =============================== ==========================================================
Note that the entry index may not be equal to the line index,
because GFF3 files can contain comment and directive lines.
Notes
-----
Although the GFF3 specification allows mixing in reference sequence
data in FASTA format via the ``##FASTA`` directive, this class does
not support extracting the sequence information.
The content after the ``##FASTA`` directive is simply ignored.
Please provide the sequence via a separate file or read the FASTA
data directly via the :attr:`lines` attribute:
>>> import os.path
>>> from io import StringIO
>>> gff_file = GFFFile.read(os.path.join(path_to_sequences, "indexing_test.gff3"))
>>> fasta_start_index = None
>>> for directive, line_index in gff_file.directives():
... if directive == "FASTA":
... fasta_start_index = line_index + 1
>>> fasta_data = StringIO("\\n".join(gff_file.lines[fasta_start_index:]))
>>> fasta_file = FastaFile.read(fasta_data)
>>> for seq_string in fasta_file.values():
... print(seq_string[:60] + "...")
TACGTAGCTAGCTGATCGATGTTGTGTGTATCGATCTAGCTAGCTAGCTGACTACACAAT...
Examples
--------
Reading and editing of an existing GFF3 file:
>>> import os.path
>>> gff_file = GFFFile.read(os.path.join(path_to_sequences, "gg_avidin.gff3"))
>>> # Get content of first entry
>>> seqid, source, type, start, end, score, strand, phase, attrib = gff_file[0]
>>> print(seqid)
AJ311647.1
>>> print(source)
EMBL
>>> print(type)
region
>>> print(start)
1
>>> print(end)
1224
>>> print(score)
None
>>> print(strand)
Strand.FORWARD
>>> print(phase)
None
>>> print(attrib)
{'ID': 'AJ311647.1:1..1224', 'Dbxref': 'taxon:9031', 'Name': 'Z', 'chromosome': 'Z', 'gbkey': 'Src', 'mol_type': 'genomic DNA'}
>>> # Edit the first entry: Simply add a score
>>> score = 1.0
>>> gff_file[0] = seqid, source, type, start, end, score, strand, phase, attrib
>>> # Delete first entry
>>> del gff_file[0]
Writing a new GFF3 file:
>>> gff_file = GFFFile()
>>> gff_file.append_directive("Example directive", "param1", "param2")
>>> gff_file.append(
... "SomeSeqID", "Biotite", "CDS", 1, 99,
... None, Location.Strand.FORWARD, 0,
... {"ID": "FeatureID", "product":"A protein"}
... )
>>> print(gff_file) #doctest: +NORMALIZE_WHITESPACE
##gff-version 3
##Example directive param1 param2
SomeSeqID Biotite CDS 1 99 . + 0 ID=FeatureID;product=A protein
"""
def __init__(self):
super().__init__()
# Maps entry indices to line indices
self._entries = None
# Stores the directives as (directive text, line index)-tuple
self._directives = None
# Stores whether the file has FASTA data
self._has_fasta = None
self._index_entries()
self.append_directive("gff-version", "3")
@classmethod
def read(cls, file):
"""
Read a GFF3 file.
Parameters
----------
file : file-like object or str
The file to be read.
Alternatively a file path can be supplied.
Returns
-------
file_object : GFFFile
The parsed file.
"""
file = super().read(file)
file._index_entries()
return file
def insert(self, index, seqid, source, type, start, end,
score, strand, phase, attributes=None):
"""
Insert an entry at the given index.
Parameters
----------
index : int
Index where the entry is inserted.
If the index is equal to the length of the file, the entry
is appended at the end of the file.
seqid : str
The ID of the reference sequence.
source : str
Source of the data (e.g. ``Genbank``).
type : str
Type of the feature (e.g. ``CDS``).
start : int
Start coordinate of feature on the reference sequence.
end : int
End coordinate of feature on the reference sequence.
score : float or None
Optional score (e.g. an E-value).
strand : Location.Strand or None
Strand of the feature, ``None`` if feature is not stranded.
phase : int or None
Reading frame shift, ``None`` for non-CDS features.
attributes : dict, optional
Additional properties of the feature.
"""
if index == len(self):
self.append(seqid, source, type, start, end,
score, strand, phase, attributes)
else:
line_index = self._entries[index]
line = GFFFile._create_line(
seqid, source, type, start, end,
score, strand, phase, attributes
)
self.lines.insert(line_index, line)
self._index_entries()
def append(self, seqid, source, type, start, end,
score, strand, phase, attributes=None):
"""
Append an entry to the end of the file.
Parameters
----------
seqid : str
The ID of the reference sequence.
source : str
Source of the data (e.g. ``Genbank``).
type : str
Type of the feature (e.g. ``CDS``).
start : int
Start coordinate of feature on the reference sequence.
end : int
End coordinate of feature on the reference sequence.
score : float or None
Optional score (e.g. an E-value).
strand : Location.Strand or None
Strand of the feature, ``None`` if feature is not stranded.
phase : int or None
Reading frame shift, ``None`` for non-CDS features.
attributes : dict, optional
Additional properties of the feature.
"""
if self._has_fasta:
raise NotImplementedError(
"Cannot append feature entries, "
"as this file contains additional FASTA data"
)
line = GFFFile._create_line(
seqid, source, type, start, end, score, strand, phase, attributes
)
self.lines.append(line)
# Fast update of entry index by adding last line
self._entries.append(len(self.lines) - 1)
def append_directive(self, directive, *args):
"""
Append a directive line to the end of the file.
Parameters
----------
directive : str
Name of the directive.
*args : str
Optional parameters for the directive.
Each argument is simply appended to the directive, separated
by a single space character.
Raises
------
NotImplementedError
If the ``##FASTA`` directive is used, which is not
supported.
Examples
--------
>>> gff_file = GFFFile()
>>> gff_file.append_directive("Example directive", "param1", "param2")
>>> print(gff_file)
##gff-version 3
##Example directive param1 param2
"""
if directive.startswith("FASTA"):
raise NotImplementedError(
"Adding FASTA information is not supported"
)
directive_line = "##" + directive + " " + " ".join(args)
self._directives.append((directive_line[2:], len(self.lines)))
self.lines.append(directive_line)
def directives(self):
"""
Get the directives in the file.
Returns
-------
directives : list of tuple(str, int)
A list of directives, sorted by their line order.
The first element of each tuple is the name of the
directive (without ``##``), the second element is the index
of the corresponding line.
"""
# Sort in line order
return sorted(self._directives, key=lambda directive: directive[1])
def __setitem__(self, index, item):
seqid, source, type, start, end, score, strand, phase, attrib = item
line = GFFFile._create_line(
seqid, source, type, start, end, score, strand, phase, attrib
)
line_index = self._entries[index]
self.lines[line_index] = line
def __getitem__(self, index):
if (index >= 0 and index >= len(self)) or \
(index < 0 and -index > len(self)):
raise IndexError(
f"Index {index} is out of range for GFFFile with "
f"{len(self)} entries"
)
line_index = self._entries[index]
# Columns are tab separated
s = self.lines[line_index].strip().split("\t")
if len(s) != 9:
raise InvalidFileError(f"Expected 9 columns, but got {len(s)}")
seqid, source, type, start, end, score, strand, phase, attrib = s
seqid = unquote(seqid)
source = unquote(source)
type = unquote(type)
start = int(start)
end = int(end)
score = None if score == "." else float(score)
if strand == "+":
strand = Location.Strand.FORWARD
elif strand == "-":
strand = Location.Strand.REVERSE
else:
strand = None
phase = None if phase == "." else int(phase)
attrib = GFFFile._parse_attributes(attrib)
return seqid, source, type, start, end, score, strand, phase, attrib
def __delitem__(self, index):
line_index = self._entries[index]
del self.lines[line_index]
self._index_entries()
def __len__(self):
return len(self._entries)
def _index_entries(self):
"""
Parse the file for comment and directive lines.
Count these lines cumulatively, so that entry indices can be
mapped onto line indices.
Additionally track the line index of directive lines.
"""
self._directives = []
# Worst case allocation -> all lines contain actual entries
self._entries = [None] * len(self.lines)
self._has_fasta = False
entry_counter = 0
for line_i, line in enumerate(self.lines):
if len(line) == 0 or line[0] == " ":
# Empty line -> do nothing
pass
elif line.startswith("#"):
# Comment or directive
if line.startswith("##"):
# Directive
# Omit the leading '##'
self._directives.append((line[2:], line_i))
if line[2:] == "FASTA":
self._has_fasta = True
# This parser does not support bundled FASTA
# data
warnings.warn(
"Biotite does not support FASTA data mixed into "
"GFF files, the FASTA data will be ignored"
)
# To ignore the following FASTA data, stop
# parsing at this point
break
else:
# Actual entry
self._entries[entry_counter] = line_i
entry_counter += 1
# Trim to correct size
self._entries = self._entries[:entry_counter]
@staticmethod
def _create_line(seqid, source, type, start, end,
score, strand, phase, attributes):
"""
Create a line for a newly created entry.
"""
seqid = quote(seqid.strip(), safe=_NOT_QUOTED) \
if seqid is not None else "."
source = quote(source.strip(), safe=_NOT_QUOTED) \
if source is not None else "."
type = type.strip()
# Perform checks
if len(seqid) == 0:
raise ValueError("'seqid' must not be empty")
if len(source) == 0:
raise ValueError("'source' must not be empty")
if len(type) == 0:
raise ValueError("'type' must not be empty")
if seqid[0] == ">":
raise ValueError("'seqid' must not start with '>'")
score = str(score) if score is not None else "."
if strand == Location.Strand.FORWARD:
strand = "+"
elif strand == Location.Strand.REVERSE:
strand = "-"
else:
strand = "."
phase = str(phase) if phase is not None else "."
attributes = ";".join(
[quote(key, safe=_NOT_QUOTED) + "=" + quote(val, safe=_NOT_QUOTED)
for key, val in attributes.items()]
) if attributes is not None and len(attributes) > 0 else "."
return "\t".join(
[seqid, source, type, str(start), str(end),
str(score), strand, phase, attributes]
)
@staticmethod
def _parse_attributes(attributes):
"""
Parse the *attributes* string into a dictionary.
"""
if attributes == ".":
return {}
attrib_dict = {}
attrib_entries = attributes.split(";")
for entry in attrib_entries:
compounds = entry.split("=")
if len(compounds) != 2:
raise InvalidFileError(
f"Attribute entry '{entry}' is invalid"
)
key, val = compounds
attrib_dict[unquote(key)] = unquote(val)
return attrib_dict
|
# Copyright 2021 Huawei Technologies Co., Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
from tqdm import tqdm
import numpy as np
import mindspore as ms
import mindspore.nn as nn
from mindspore.dataset import NumpySlicesDataset
from mindspore import context, Tensor
context.set_context(mode=context.GRAPH_MODE, device_target="Ascend")
class AutoEncoderTrainNetwork(nn.Cell):
def __init__(self):
super(AutoEncoderTrainNetwork, self).__init__()
self.loss_fun = nn.MSELoss()
self.net = nn.CellList([nn.Dense(2, 32), nn.Dense(32, 2)])
self.relu = nn.ReLU()
def reconstruct_sample(self, x: Tensor):
for _, layer in enumerate(self.net):
x = layer(x)
x = self.relu(x)
return x
def construct(self, x: Tensor):
recon_x = self.reconstruct_sample(x)
return self.loss_fun(recon_x, x)
def sample_2d_data(self, n_normals=2000, n_outliers=400):
z = np.random.randn(n_normals, 2)
outliers = np.random.uniform(low=-6, high=6, size=(n_outliers, 2))
centers = np.array([(2., 0), (-2., 0)])
sigma = 0.3
normal_points = sigma * z + centers[np.random.randint(len(centers), size=(n_normals,))]
return np.vstack((normal_points, outliers))
def create_synthetic_dataset(self):
transformed_dataset = self.sample_2d_data()
for dim in range(transformed_dataset.shape[1]):
min_val = transformed_dataset[:, dim].min()
max_val = transformed_dataset[:, dim].max()
if min_val != max_val:
transformed_dataset[:, dim] = (transformed_dataset[:, dim] - min_val) / (max_val - min_val)
elif min_val != 1:
transformed_dataset[:, dim] = transformed_dataset[:, dim] / min_val
transformed_dataset = transformed_dataset.astype(np.float32)
return transformed_dataset
def test_auto_monad_layer():
ae_with_loss = AutoEncoderTrainNetwork()
transformed_dataset = ae_with_loss.create_synthetic_dataset()
dataloader = NumpySlicesDataset(data=(transformed_dataset,), shuffle=True)
dataloader = dataloader.batch(batch_size=16)
optim = nn.RMSProp(params=ae_with_loss.trainable_params(), learning_rate=0.002,)
train_net = nn.TrainOneStepCell(ae_with_loss, optim)
train_net.set_train()
gen_samples = dict()
num_epoch = 21
for epoch in tqdm(range(num_epoch)):
loss = []
for _, (batch,) in enumerate(dataloader):
batch = Tensor(batch, dtype=ms.float32)
loss_ = train_net(batch)
loss.append(loss_.asnumpy())
avg_loss = np.array(loss).mean()
if epoch % 10 == 0:
gen_samples[epoch] = ae_with_loss.reconstruct_sample(Tensor(transformed_dataset)).asnumpy()
print(f"epoch: {epoch}/{num_epoch}, avg loss: {avg_loss}")
|
from datetime import *
class Receipt:
def __init__(self, member_number):
# Initialize the receipt as a list for future modifications (adding and removing items)
self.member_items = []
self.member_number = member_number
self.total = 0.0
self.total_tax = 0.0
# Adds items to the member's receipt and displays the current total with tax
def add_item(self, item):
self.member_items.append(item)
item_price = item.floatPrice
self.total_tax += item.tax
self.total += item_price + item.tax
print("{0:<20} {1:>10}".format(item.name, str(item.floatPrice)))
print("TAX: %26.2f" % (self.total_tax))
print("TOTAL: %24.2f" % (self.total))
# Removes items from the receipt and displays the current total with tax
def remove_item(self, item):
if item in self.member_items:
self.member_items.remove(item)
self.total_tax -= item.tax
self.total -= item.floatPrice
print("REMOVED")
print("{0:<20} {1:>10}".format(item.name, str(item.floatPrice)))
print("TAX: %26.2f" % (self.total_tax))
print("TOTAL: %24.2f" % (self.total))
elif len(self.member_items) == 0:
print("No items in the receipt")
else:
print("Item does not exist in member's receipt")
# Finalizes the receipt string and returns it to the POS
def finalize_receipt(self):
# Initialize the receipt string
final_receipt = " RECEIPT\nMembership Number: " + (self.member_number + "\n")
total = 0.0
total_tax = 0.0
final_receipt += "ITEMS:\n"
for item in self.member_items:
final_receipt += ("{0:<20} {1:>10}\n".format(item.name, str(item.floatPrice)))
total_tax += item.tax
total += total_tax + item.floatPrice
final_receipt += ("\nTAX: %26.2f\n" % (self.total_tax))
final_receipt += ("TOTAL: %24.2f\n" % (self.total))
final_receipt += str(date.today())
return final_receipt
|
# -*- coding: utf-8 -*-
# Copyright (c) 2020 Nekokatt
# Copyright (c) 2021-present davfsa
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
import mock
import pytest
from hikari import files
from hikari.internal import routes
from tests.hikari import hikari_test_helpers
class TestCompiledRoute:
@pytest.fixture()
def compiled_route(self):
return routes.CompiledRoute(
major_param_hash="abc123", route=mock.Mock(method="GET"), compiled_path="/some/endpoint"
)
def test_method(self, compiled_route):
assert compiled_route.method == "GET"
def test_create_url(self, compiled_route):
assert compiled_route.create_url("https://some.url/api") == "https://some.url/api/some/endpoint"
def test_create_real_bucket_hash(self, compiled_route):
assert compiled_route.create_real_bucket_hash("UNKNOWN") == "UNKNOWN;abc123"
def test__str__(self, compiled_route):
assert str(compiled_route) == "GET /some/endpoint"
class TestRoute:
@pytest.mark.parametrize(
("route", "params"),
[
(routes.DELETE_CHANNEL, frozenset(("channel",))),
(routes.PATCH_GUILD, frozenset(("guild",))),
(routes.POST_WEBHOOK_WITH_TOKEN, frozenset(("webhook", "token"))),
(routes.GET_INVITE, None),
],
)
def test_major_params(self, route, params):
assert route.major_params == params
def test_compile_with_no_major_params(self):
route = routes.Route(method="GET", path_template="/some/endpoint/{baguette}")
expected = routes.CompiledRoute(route=route, compiled_path="/some/endpoint/1234", major_param_hash="-")
assert route.compile(baguette=1234) == expected
def test_compile_with_channel_major_params(self):
route = routes.Route(method="GET", path_template="/channels/{channel}")
expected = routes.CompiledRoute(route=route, compiled_path="/channels/4325", major_param_hash="4325")
assert route.compile(channel=4325) == expected
def test_compile_with_guild_major_params(self):
route = routes.Route(method="GET", path_template="/guilds/{guild}")
expected = routes.CompiledRoute(route=route, compiled_path="/guilds/5555", major_param_hash="5555")
assert route.compile(guild=5555) == expected
def test_compile_with_webhook_major_params(self):
route = routes.Route(method="GET", path_template="/webhooks/{webhook}/{token}")
expected = routes.CompiledRoute(
route=route, compiled_path="/webhooks/123/okfdkdfkdf", major_param_hash="123:okfdkdfkdf"
)
assert route.compile(webhook=123, token="okfdkdfkdf") == expected
def test__str__(self):
assert str(routes.Route(method="GET", path_template="/some/endpoint/{channel}")) == "/some/endpoint/{channel}"
class TestCDNRoute:
def test_zero_formats_results_in_error(self):
with pytest.raises(ValueError, match="/foo/bar must have at least one valid format set"):
routes.CDNRoute("/foo/bar", set())
def test_any_formats_results_in_no_error(self):
routes.CDNRoute("/foo/bar", {"do", "ray", "me"})
def test_formats_converted_to_frozenset(self):
route = routes.CDNRoute("/foo/bar", {"i", "really", "like", "cats"})
assert isinstance(route.valid_formats, frozenset)
assert route.valid_formats == {"i", "really", "like", "cats"}
def test_formats_converted_to_lower(self):
route = routes.CDNRoute("/foo/bar", {"FOO", "BaR", "bAz", "bork"})
assert route.valid_formats == {"foo", "bar", "baz", "bork"}
def test_eq_operator__considers_path_template_only(self):
route1 = routes.CDNRoute("/foo/bar", {"hello", "world"}, sizable=False)
route2 = routes.CDNRoute("/foo/bar", {"i", "said", "meow"}, sizable=True)
route3 = routes.CDNRoute("/foo/bar", {"i", "said", "meow"}, sizable=False)
route4 = routes.CDNRoute("/foo/bar/baz", {"i", "said", "meow"}, sizable=True)
assert route1 == route2
assert route1 == route3
assert route1 != route4
assert route2 == route3
assert route2 != route4
assert route3 != route4
def test_hash_operator_considers_path_template_only(self):
route1 = routes.CDNRoute("/foo/bar", {"hello", "world"}, sizable=False)
route2 = routes.CDNRoute("/foo/bar", {"i", "said", "meow"}, sizable=True)
route3 = routes.CDNRoute("/foo/bar", {"i", "said", "meow"}, sizable=False)
route4 = routes.CDNRoute("/foo/bar/baz", {"i", "said", "meow"}, sizable=True)
assert hash(route1) == hash(route2)
assert hash(route1) == hash(route3)
assert hash(route1) != hash(route4)
assert hash(route2) == hash(route3)
assert hash(route2) != hash(route4)
assert hash(route3) != hash(route4)
@pytest.mark.parametrize(
("input_file_format", "expected_file_format"),
[
("jpg", "jpg"),
("JPG", "jpg"),
("png", "png"),
("PNG", "png"),
],
)
def test_compile_uses_lowercase_file_format_always(self, input_file_format, expected_file_format):
route = routes.CDNRoute("/foo/bar", {"png", "jpg"}, sizable=False)
compiled_url = route.compile("http://example.com", file_format=input_file_format)
assert compiled_url.endswith(f".{expected_file_format}"), f"compiled_url={compiled_url}"
def test_disallowed_file_format_raises_TypeError(self):
route = routes.CDNRoute("/foo/bar", {"png", "jpg"}, sizable=False)
with pytest.raises(TypeError):
route.compile("http://example.com", file_format="gif")
def test_allowed_file_format_does_not_raise_TypeError(self):
route = routes.CDNRoute("/foo/bar", {"png", "jpg"}, sizable=False)
route.compile("http://example.com", file_format="png")
def test_requesting_gif_on_non_animated_hash_raises_TypeError(self):
route = routes.CDNRoute("/foo/bar", {"png", "jpg", "gif"}, sizable=False)
with pytest.raises(TypeError):
route.compile("http://example.com", file_format="gif", hash="boooob")
@pytest.mark.parametrize("format", ["png", "jpg", "webp"])
def test_requesting_non_gif_on_non_animated_hash_does_not_raise_TypeError(self, format):
route = routes.CDNRoute("/foo/bar", {"png", "jpg", "webp", "gif"}, sizable=False)
route.compile("http://example.com", file_format=format, hash="boooob")
@pytest.mark.parametrize("format", ["png", "jpg", "webp"])
def test_requesting_non_gif_on_animated_hash_does_not_raise_TypeError(self, format):
route = routes.CDNRoute("/foo/bar", {"png", "jpg", "webp", "gif"}, sizable=False)
route.compile("http://example.com", file_format=format, hash="a_boooob")
def test_requesting_gif_on_animated_hash_does_not_raise_TypeError(self):
route = routes.CDNRoute("/foo/bar", {"png", "jpg", "gif"}, sizable=False)
route.compile("http://example.com", file_format="gif", hash="a_boooob")
def test_requesting_gif_without_passing_hash_does_not_raise_TypeError(self):
route = routes.CDNRoute("/foo/bar", {"png", "jpg", "gif"}, sizable=False)
route.compile("http://example.com", file_format="gif")
def test_passing_size_on_non_sizable_raises_TypeError(self):
route = routes.CDNRoute("/foo/bar", {"png", "jpg", "gif"}, sizable=False)
with pytest.raises(TypeError):
route.compile("http://example.com", file_format="png", hash="boooob", size=128)
def test_passing_size_on_sizable_does_not_raise_TypeError(self):
route = routes.CDNRoute("/foo/bar", {"png", "jpg", "gif"}, sizable=True)
route.compile("http://example.com", file_format="png", hash="boooob", size=128)
def test_passing_no_size_on_non_sizable_does_not_raise_TypeError(self):
route = routes.CDNRoute("/foo/bar", {"png", "jpg", "gif"}, sizable=False)
route.compile("http://example.com", file_format="png", hash="boooob")
def test_passing_no_size_on_sizable_does_not_raise_TypeError(self):
route = routes.CDNRoute("/foo/bar", {"png", "jpg", "gif"}, sizable=True)
route.compile("http://example.com", file_format="png", hash="boooob")
@pytest.mark.parametrize("size", [*range(17, 32)])
def test_passing_non_power_of_2_sizes_to_sizable_raises_ValueError(self, size):
route = routes.CDNRoute("/foo/bar", {"png", "jpg", "gif"}, sizable=True)
with pytest.raises(ValueError, match="size must be an integer power of 2 between 16 and 4096 inclusive"):
route.compile("http://example.com", file_format="png", hash="boooob", size=size)
@pytest.mark.parametrize("size", [int(2 ** size) for size in [1, *range(17, 25)]])
def test_passing_invalid_magnitude_sizes_to_sizable_raises_ValueError(self, size):
route = routes.CDNRoute("/foo/bar", {"png", "jpg", "png"}, sizable=True)
with pytest.raises(ValueError, match="size must be an integer power of 2 between 16 and 4096 inclusive"):
route.compile("http://example.com", file_format="png", hash="boooob", size=size)
@pytest.mark.parametrize("size", [*range(-10, 0)])
def test_passing_negative_sizes_to_sizable_raises_ValueError(self, size):
route = routes.CDNRoute("/foo/bar", {"png", "jpg", "png"}, sizable=True)
with pytest.raises(ValueError, match="size must be positive"):
route.compile("http://example.com", file_format="png", hash="boooob", size=size)
@pytest.mark.parametrize("size", [int(2 ** size) for size in range(4, 13)])
def test_passing_valid_sizes_to_sizable_does_not_raise_ValueError(self, size):
route = routes.CDNRoute("/foo/bar", {"png", "jpg", "gif"}, sizable=True)
route.compile("http://example.com", file_format="png", hash="boooob", size=size)
def test_passing_size_adds_query_string(self):
route = routes.CDNRoute("/foo/bar", {"png", "jpg", "gif"}, sizable=True)
compiled_url = route.compile("http://example.com", file_format="png", hash="boooob", size=128)
assert compiled_url.endswith(".png?size=128"), f"compiled_url={compiled_url}"
def test_passing_None_size_does_not_add_query_string(self):
route = routes.CDNRoute("/foo/bar", {"png", "jpg", "gif"}, sizable=True)
compiled_url = route.compile("http://example.com", file_format="png", hash="boooob", size=None)
assert "?size=" not in compiled_url, f"compiled_url={compiled_url}"
def test_passing_no_size_does_not_add_query_string(self):
route = routes.CDNRoute("/foo/bar", {"png", "jpg", "gif"}, sizable=True)
compiled_url = route.compile("http://example.com", file_format="png", hash="boooob")
assert "?size=" not in compiled_url, f"compiled_url={compiled_url}"
@pytest.mark.parametrize(
("base_url", "template", "format", "size_kwds", "foo", "bar", "expected_url"),
[
(
"http://example.com",
"/{foo}/{bar}",
"PNG",
{"size": 128},
"baz",
"bork qux",
"http://example.com/baz/bork%20qux.png?size=128",
),
(
"http://example.com",
"/{foo}/bar",
"jpg",
{"size": 128},
"baz",
"bork qux",
"http://example.com/baz/bar.jpg?size=128",
),
(
"http://example.com",
"/{foo}/{bar}",
"WEBP",
{"size": None},
"baz",
123456,
"http://example.com/baz/123456.webp",
),
(
"http://example.com",
"/{foo}/bar",
"GIF",
{"size": None},
"baz",
"bork qux",
"http://example.com/baz/bar.gif",
),
(
"http://example.com",
"/{foo}/{bar}",
"WEBP",
{},
"baz",
"bork qux",
"http://example.com/baz/bork%20qux.webp",
),
(
"http://example.com",
"/{foo}/bar",
"GIF",
{},
"baz",
"bork qux",
"http://example.com/baz/bar.gif",
),
],
)
def test_compile_generates_expected_url(self, base_url, template, format, size_kwds, foo, bar, expected_url):
route = routes.CDNRoute(template, {"png", "gif", "jpg", "webp"}, sizable=True)
actual_url = route.compile(base_url=base_url, file_format=format, foo=foo, bar=bar, **size_kwds)
assert actual_url == expected_url
@pytest.mark.parametrize("format", ["png", "jpg"])
@pytest.mark.parametrize("size", [64, 256, 2048])
def test_compile_to_file_calls_compile(self, format, size):
with mock.patch.object(files, "URL", autospec=files.URL):
route = hikari_test_helpers.mock_class_namespace(routes.CDNRoute, slots_=False)(
"/hello/world", {"png", "jpg"}, sizable=True
)
route.compile = mock.Mock(spec_set=route.compile)
route.compile_to_file("https://blep.com", file_format=format, size=size, boop="oyy lumo", nya="weeb")
route.compile.assert_called_once_with(
"https://blep.com", file_format=format, size=size, boop="oyy lumo", nya="weeb"
)
def test_compile_to_file_passes_compile_result_to_URL_and_returns_constructed_url(self):
resultant_url_str = "http://blep.com/hello/world/weeb/oyy%20lumo"
resultant_url = files.URL("http://blep.com/hello/world/weeb/oyy%20lumo")
with mock.patch.object(files, "URL", autospec=files.URL, return_value=resultant_url) as URL:
route = hikari_test_helpers.mock_class_namespace(routes.CDNRoute, slots_=False)(
"/hello/world/{nya}/{boop}", {"png", "jpg"}, sizable=True
)
route.compile = mock.Mock(spec_set=route.compile, return_value=resultant_url_str)
result = route.compile_to_file("https://blep.com", file_format="png", size=64, boop="oyy lumo", nya="weeb")
URL.assert_called_once_with(resultant_url_str)
assert result is resultant_url
|
class Node:
def __init__(self, data):
self.data = data
self.next_node = None
class LinkedList:
def __init__(self):
self.head = None
self.no_of_nodes = 0
# O(1) for insertion at the start of LL
def insert_at_start(self, data):
self.no_of_nodes = self.no_of_nodes + 1
new_node = Node(data)
if self.head is None:
self.head = new_node
else:
new_node.next_node = self.head
self.head = new_node
# O(N) for insertion at the end of LL
def insert_at_end(self, data):
self.no_of_nodes = self.no_of_nodes + 1
new_node = Node(data)
actual_node = self.head
while actual_node.next_node is not None:
actual_node = actual_node.next_node
actual_node.next_node = new_node
# O(1)
def size_of_ll(self):
return self.no_of_nodes
# O(N)
def traverse_ll(self):
actual_node = self.head
while actual_node is not None:
print(actual_node.data)
actual_node = actual_node.next_node
def remove_from_ll(self, data):
if self.head is None:
return
actual_node = self.head
previous_node = None
while actual_node is not None and actual_node.data != data:
previous_node = actual_node
actual_node = actual_node.next_node
# Item not present in Linked List
if actual_node is None:
return
# Decrease node count for deletion
self.no_of_nodes = self.no_of_nodes - 1
if previous_node is None:
self.head = actual_node.next_node
else:
previous_node.next_node = actual_node.next_node
# O(N) runtime complexity
def find_middle_node(self):
fast_pointer = self.head
slow_pointer = self.head
while fast_pointer.next_node and fast_pointer.next_node.next_node:
fast_pointer = fast_pointer.next_node.next_node
slow_pointer = slow_pointer.next_node
return slow_pointer.data
#O(N) runtime complexity
def reverse_ll_in_place(self):
previous_node = None
current_node = self.head
next_node = None
while current_node is not None:
next_node = current_node.next_node
current_node.next_node = previous_node
previous_node = current_node
current_node = next_node
self.head = previous_node
return
ll = LinkedList()
ll.insert_at_start(15)
ll.insert_at_start(8)
ll.insert_at_start(5)
ll.insert_at_end(6)
ll.insert_at_end(76)
ll.insert_at_end(43)
ll.insert_at_start("Yo")
ll.traverse_ll()
print("Size : ", ll.size_of_ll())
# ll.remove_from_ll(8)
print("---------")
# ll.traverse_ll()
print("Size : ", ll.size_of_ll())
print(ll.find_middle_node())
ll.reverse_ll_in_place()
ll.traverse_ll()
|
from gettybase import Session
import unicodedata
import os
class Getty():
def __init__(self):
try:
self.s = Session(os.environ['getty_system_id'],
os.environ['getty_system_pass'],
os.environ['getty_user_name'],
os.environ['getty_user_pass'])
except KeyError:
exit('Missing Getty API keys')
def search(self, terms):
return self.s.search(terms, items=5, from_item=1)
def buy(self, item):
print type(item)
print item
output = self.s.buy(item, 1024 * 1024)
return output
def findAndReturn(self, terms, needed):
images = []
items = self.search(terms)
for i, item in enumerate(items):
if i >= needed:
break
url = self.buy(unicodedata.normalize('NFKD', item['image_id']).encode('ascii', 'ignore')) # noqa
images.append(url)
return images
if __name__ == '__main__':
g = Getty()
g.findAndReturn('ice, cold', 3)
|
from exceptions.exceptions import NGSIUsageError
from utils.jsondict import lookup_string_match
from flask import request
from reporter.reporter import _validate_query_params
from translators.crate import CrateTranslatorInstance
import logging
from .geo_query_handler import handle_geo_query
def query_NTNENA(id_=None, # In Query
attrs=None,
type_=None,
aggr_method=None,
aggr_period=None,
aggr_scope=None,
options=None,
from_date=None,
to_date=None,
last_n=None,
limit=10000,
offset=0,
georel=None,
geometry=None,
coords=None):
"""
See /v2/attrs in API Specification
quantumleap.yml
"""
r, c = _validate_query_params(attrs, aggr_period, aggr_method, aggr_scope,
options)
if c != 200:
return r, c
r, c, geo_query = handle_geo_query(georel, geometry, coords)
if r:
return r, c
if attrs is not None:
attrs = attrs.split(',')
fiware_s = request.headers.get('fiware-service', None)
fiware_sp = request.headers.get('fiware-servicepath', None)
entities = None
entity_ids = None
if id_:
entity_ids = [s.strip() for s in id_.split(',') if s]
try:
with CrateTranslatorInstance() as trans:
entities = trans.query(attr_names=attrs,
entity_type=type_,
entity_ids=entity_ids,
aggr_method=aggr_method,
aggr_period=aggr_period,
aggr_scope=aggr_scope,
from_date=from_date,
to_date=to_date,
last_n=last_n,
limit=limit,
offset=offset,
fiware_service=fiware_s,
fiware_servicepath=fiware_sp,
geo_query=geo_query)
except NGSIUsageError as e:
msg = "Bad Request Error: {}".format(e)
logging.getLogger().error(msg, exc_info=True)
return msg, 400
except Exception as e:
msg = "Something went wrong with QL. Error: {}".format(e)
logging.getLogger().error(msg, exc_info=True)
return msg, 500
attributes = []
entries = []
attrs_names = []
attrs_values = []
ignore = ('id', 'index', 'type')
if entities:
for e in entities:
attrs = [at for at in sorted(e.keys()) if at not in ignore]
for at in attrs:
if at not in attrs_names:
attrs_names.append(at)
for at in attrs_names:
entity_type = []
entity_types = []
entity_value = []
for e in entities:
matched_attr = lookup_string_match(e, at)
if matched_attr is not None:
index = [from_date or '', to_date or ''] if aggr_method and not aggr_period else e['index']
entity = {
'entityId': e['id'],
'index': index,
'values': matched_attr['values'] if matched_attr else [],
}
if e['type'] not in entity_types:
entity_value = []
entity_value.append(entity)
entity_ty = {
'entityType': e['type'],
'entities': entity_value
}
entity_type.append(entity_ty)
entity_types.append(e['type'])
else:
entity_value.append(entity)
entity_type.pop()
entity_ty = {
'entityType': e['type'],
'entities': entity_value
}
entity_type.append(entity_ty)
attrs_value = {
'attrName': at,
'types': entity_type
}
attrs_values.append(attrs_value)
res = {
'attrs': attrs_values
}
return res
r = {
"error": "Not Found",
"description": "No records were found for such query."
}
return r, 404
def query_NTNENA_value(*args, **kwargs):
res = query_NTNENA(*args, **kwargs)
if isinstance(res, dict):
res['values'] = res['attrs']
res.pop('attrs', None)
return res
|
from typing import Any, Dict
import argparse
import math
import torch
import torch.nn as nn
import torch.nn.functional as F
CONV_DIM = 64
FC_DIM = 128
WINDOW_WIDTH = 28
WINDOW_STRIDE = 28
class ConvBlock(nn.Module):
"""
Simple 3x3 conv with padding size 1 (to leave the input size unchanged), followed by a ReLU.
"""
def __init__(self, input_channels: int, output_channels: int, kernel_size: int = 3, stride: int = 1) -> None:
super().__init__()
self.conv = nn.Conv2d(input_channels, output_channels, kernel_size=kernel_size, stride=stride, padding=1)
self.relu = nn.ReLU()
def forward(self, x: torch.Tensor) -> torch.Tensor:
"""
Parameters
----------
x
of dimensions (B, C, H, W)
Returns
-------
torch.Tensor
of dimensions (B, C, H, W)
"""
c = self.conv(x)
r = self.relu(c)
return r
class LineCNN(nn.Module):
"""
Model that uses a simple CNN to process an image of a line of characters with a window, outputting a sequence of logits.
"""
def __init__(
self,
data_config: Dict[str, Any],
args: argparse.Namespace = None,
) -> None:
super().__init__()
self.data_config = data_config
self.args = vars(args) if args is not None else {}
self.num_classes = len(data_config["mapping"])
self.output_length = data_config["output_dims"][0]
self.limit_output_length = self.args.get("limit_output_length", False)
_C, H, _W = data_config["input_dims"]
conv_dim = self.args.get("conv_dim", CONV_DIM)
fc_dim = self.args.get("fc_dim", FC_DIM)
self.WW = self.args.get("window_width", WINDOW_WIDTH)
self.WS = self.args.get("window_stride", WINDOW_STRIDE)
# Input is (1, H, W)
self.conv1 = ConvBlock(1, conv_dim)
self.conv2 = ConvBlock(conv_dim, conv_dim)
self.conv3 = ConvBlock(conv_dim, conv_dim, stride=2)
# Conv math! https://pytorch.org/docs/stable/generated/torch.nn.Conv2d.html
# OW = torch.floor((W // 2 - WW // 2) + 1)
self.conv4 = ConvBlock(conv_dim, fc_dim, kernel_size=(H // 2, self.WW // 2), stride=(H // 2, self.WS // 2))
self.dropout = nn.Dropout(0.25)
self.fc1 = nn.Linear(fc_dim, fc_dim)
self.fc2 = nn.Linear(fc_dim, self.num_classes)
self._init_weights()
def _init_weights(self):
"""
A better weight initialization scheme than PyTorch default.
See https://github.com/pytorch/pytorch/issues/18182
"""
for m in self.modules():
if type(m) in {
nn.Conv2d,
nn.Conv3d,
nn.ConvTranspose2d,
nn.ConvTranspose3d,
nn.Linear,
}:
nn.init.kaiming_normal_(m.weight.data, a=0, mode="fan_out", nonlinearity="relu")
if m.bias is not None:
_fan_in, fan_out = nn.init._calculate_fan_in_and_fan_out(m.weight.data)
bound = 1 / math.sqrt(fan_out)
nn.init.normal_(m.bias, -bound, bound)
def forward(self, x: torch.Tensor) -> torch.Tensor:
"""
Parameters
----------
x
(B, 1, H, W) input image
Returns
-------
torch.Tensor
(B, C, S) logits, where S is the length of the sequence and C is the number of classes
S can be computed from W and self.window_width
C is self.num_classes
"""
_B, _C, _H, W = x.shape
x = self.conv1(x) # -> (B, CONV_DIM, H, W)
x = self.conv2(x) # -> (B, CONV_DIM, H, W)
x = self.conv3(x) # -> (B, CONV_DIM, H//2, W//2)
OW = math.floor((W // 2 + 2 - self.WW // 2) / (self.WS // 2) + 1)
x = self.conv4(x) # -> (B, FC_DIM, 1, OW)
assert x.shape[-1] == OW
x = x.squeeze().permute(0, 2, 1) # -> (B, OW, FC_DIM)
x = F.relu(self.fc1(x)) # -> (B, OW, FC_DIM)
x = self.dropout(x)
x = self.fc2(x) # -> (B, OW, self.C)
x = x.permute(0, 2, 1) # -> (B, self.C, OW)
if self.limit_output_length:
x = x[:, :, : self.output_length]
return x
@staticmethod
def add_to_argparse(parser):
parser.add_argument("--conv_dim", type=int, default=CONV_DIM)
parser.add_argument("--fc_dim", type=int, default=FC_DIM)
parser.add_argument(
"--window_width",
type=int,
default=WINDOW_WIDTH,
help="Width of the window that will slide over the input image.",
)
parser.add_argument(
"--window_stride",
type=int,
default=WINDOW_STRIDE,
help="Stride of the window that will slide over the input image.",
)
parser.add_argument("--limit_output_length", action="store_true", default=False)
return parser
|
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import threading
import time
import cloudify_rest_client
import cloudify_rest_client.exceptions as cloudify_exceptions
from murano.dsl import dsl
from oslo_config import cfg as config
from yaql.language import specs
from yaql.language import yaqltypes
import cfg
CONF = config.CONF
archive_upload_lock = threading.Lock()
class CloudifyClient(object):
@specs.parameter('app', dsl.MuranoObjectParameter('io.murano.Application'))
def __init__(self, app):
cloudify_manager = self.CONF.cloudify_manager
self._client = cloudify_rest_client.CloudifyClient(cloudify_manager)
self._blueprint_id = '{0}-{1}'.format(app.type.name, app.type.version)
self._deployment_id = app.id
self._application_package = app.package
@specs.parameter('entry_point', yaqltypes.String())
def publish_blueprint(self, entry_point):
global archive_upload_lock
if self._check_blueprint_exists():
return
path = self._application_package.get_resource(entry_point)
with archive_upload_lock:
try:
self._client.blueprints.upload(
path, self._blueprint_id)
except cloudify_exceptions.CloudifyClientError as e:
if e.status_code != 409:
raise
def _check_blueprint_exists(self):
try:
self._client.blueprints.get(self._blueprint_id)
return True
except cloudify_exceptions.CloudifyClientError as e:
if e.status_code == 404:
return False
raise
@specs.parameter('parameters', dict)
def create_deployment(self, parameters=None):
self._client.deployments.create(
self._blueprint_id, self._deployment_id, parameters)
def delete_deployment(self):
self._client.deployments.delete(self._deployment_id)
def wait_deployment_ready(self):
while True:
executions = self._client.executions.list(self._deployment_id)
if any(t.status in ('pending', 'started') for t in executions):
time.sleep(3)
else:
deployment = self._client.deployments.get(self._deployment_id)
return deployment.outputs
@specs.parameter('name', yaqltypes.String())
@specs.parameter('parameters', dict)
def execute_workflow(self, name, parameters=None):
self._client.executions.start(self._deployment_id, name, parameters)
@classmethod
def init_plugin(cls):
cls.CONF = cfg.init_config(CONF)
|
from django.contrib.auth import forms as admin_forms
from django.contrib.auth import get_user_model
from django.utils.translation import gettext_lazy as _
User = get_user_model()
class UserChangeForm(admin_forms.UserChangeForm):
class Meta(admin_forms.UserChangeForm.Meta):
model = User
class UserCreationForm(admin_forms.UserCreationForm):
class Meta(admin_forms.UserCreationForm.Meta):
model = User
error_messages = {
"username": {"unique": _("This username has already been taken.")}
}
labels = {
'username': _('Name of the user'),
'password': _('User password'),
'email': _('User email'),
'avatar': _('User avatar')
}
|
from random import shuffle
counter=1
#index = None
index = []
#indexlist = []
decrypt_list = []
intermediate = []
words = ['B', 'A', 'L', 'K','J','I']
newwords = words.copy() # Copy words
shuffle(newwords) # Shuffle newwords
for i in range(len(words)):
for j in range(len(newwords)):
if(words[i]==newwords[j]):
index.append(j)
print("Original list: ",words)
#zipped_lists = zip(index, newwords)
#print(zipped_lists)
'''
sorted_zipped_lists = sorted(zipped_lists)
decrypt_list = [element for _, element in sorted_zipped_lists]
'''
print("Index: ",index)
print("New list: ",newwords)
#print("Decrypted List :", decrypt_list)
for i in range(len(newwords)):
intermediate.append((i,newwords[i]))
print(intermediate)
res = [tuple for x in index for tuple in intermediate if tuple[0] == x]
#print(res)
for i in res:
tuples = i
alphabet = tuples[1]
decrypt_list.append(alphabet)
print(res)
print(decrypt_list)
|
import numpy as np
from sys import argv
tobs = int(argv[1])
p0 = np.zeros(10)
p2 = np.zeros(10)
p1 = np.zeros(10)
Zab = np.zeros(10)
rate = np.zeros(10)
for i in range(10):
da = np.loadtxt('tobs%d/reweighted_hist_%d.dat'%(tobs,i))
p0[i] = np.exp(-da[-2,1])
p2[i] = np.exp(-da[-1,1])
p1[i] = np.exp(-da[-3,1])
Zab = p1/(p0+p2)
f = open('tobs%d/path_partition_function_%d.dat'%(tobs,tobs),'w')
for i in range(10):
f.write('%d %.16f\n'%(i,Zab[i]))
Zab_avg = np.sum(Zab[:])/10.
for i in range(10):
Zab[i] -= Zab_avg
Zab *= Zab
std_err = np.sqrt(np.sum(Zab[:])/10.)
f.write('%.16f %.16f\n'%(Zab_avg,std_err))
f.close()
|
from pythonforandroid.recipe import CythonRecipe
from os.path import join
class ShapelyRecipe(CythonRecipe):
version = '1.7a1'
url = 'https://github.com/Toblerity/Shapely/archive/{version}.tar.gz'
depends = ['setuptools', 'libgeos']
# Actually, this recipe seems to compile/install fine for python2, but it
# fails at runtime when importing module with:
# `[Errno 2] No such file or directory`
conflicts = ['python2']
call_hostpython_via_targetpython = False
# Patch to avoid libgeos check (because it fails), insert environment
# variables for our libgeos build (includes, lib paths...) and force
# the cython's compilation to raise an error in case that it fails
patches = ['setup.patch']
# Don't Force Cython
# setup_extra_args = ['sdist']
def get_recipe_env(self, arch=None, with_flags_in_cc=True):
env = super(ShapelyRecipe, self).get_recipe_env(arch)
libgeos_install = join(self.get_recipe(
'libgeos', self.ctx).get_build_dir(arch.arch), 'install_target')
# All this `GEOS_X` variables should be string types, separated
# by commas in case that we need to pass more than one value
env['GEOS_INCLUDE_DIRS'] = join(libgeos_install, 'include')
env['GEOS_LIBRARY_DIRS'] = join(libgeos_install, 'lib')
env['GEOS_LIBRARIES'] = 'geos_c,geos'
return env
recipe = ShapelyRecipe()
|
import os
import torch
from tensorboardX import SummaryWriter
import time
import glob
import re
import datetime
import argparse
from pathlib import Path
import torch.distributed as dist
from pcdet.datasets import build_dataloader
from pcdet.models import build_network
from pcdet.utils import common_utils
from pcdet.config import cfg, cfg_from_list, cfg_from_yaml_file, log_config_to_file
from eval_utils import eval_utils
def parse_config():
parser = argparse.ArgumentParser(description='arg parser')
parser.add_argument('--cfg_file', type=str, default=None, help='specify the config for training')
parser.add_argument('--batch_size', type=int, default=16, required=False, help='batch size for training')
parser.add_argument('--epochs', type=int, default=80, required=False, help='Number of epochs to train for')
parser.add_argument('--workers', type=int, default=4, help='number of workers for dataloader')
parser.add_argument('--extra_tag', type=str, default='default', help='extra tag for this experiment')
parser.add_argument('--ckpt', type=str, default=None, help='checkpoint to start from')
parser.add_argument('--mgpus', action='store_true', default=False, help='whether to use multiple gpu')
parser.add_argument('--launcher', choices=['none', 'pytorch', 'slurm'], default='none')
parser.add_argument('--tcp_port', type=int, default=18888, help='tcp port for distrbuted training')
parser.add_argument('--local_rank', type=int, default=0, help='local rank for distributed training')
parser.add_argument('--set', dest='set_cfgs', default=None, nargs=argparse.REMAINDER,
help='set extra config keys if needed')
parser.add_argument('--max_waiting_mins', type=int, default=30, help='max waiting minutes')
parser.add_argument('--start_epoch', type=int, default=0, help='')
parser.add_argument('--eval_tag', type=str, default='default', help='eval tag for this experiment')
parser.add_argument('--eval_all', action='store_true', default=False, help='whether to evaluate all checkpoints')
parser.add_argument('--ckpt_dir', type=str, default=None, help='specify a ckpt directory to be evaluated if needed')
parser.add_argument('--save_to_file', action='store_true', default=False, help='')
args = parser.parse_args()
cfg_from_yaml_file(args.cfg_file, cfg)
cfg.TAG = Path(args.cfg_file).stem
cfg.EXP_GROUP_PATH = '/'.join(args.cfg_file.split('/')[1:-1]) # remove 'cfgs' and 'xxxx.yaml'
if args.set_cfgs is not None:
cfg_from_list(args.set_cfgs, cfg)
return args, cfg
def eval_single_ckpt(model, test_loader, args, eval_output_dir, logger, epoch_id, dist_test=False):
# load checkpoint
model.load_params_from_file(filename=args.ckpt, logger=logger, to_cpu=dist_test)
model.cuda()
# start evaluation
eval_utils.eval_one_epoch(
cfg, model, test_loader, epoch_id, logger, dist_test=dist_test,
result_dir=eval_output_dir, save_to_file=args.save_to_file
)
def get_no_evaluated_ckpt(ckpt_dir, ckpt_record_file, args):
ckpt_list = glob.glob(os.path.join(ckpt_dir, '*checkpoint_epoch_*.pth'))
ckpt_list.sort(key=os.path.getmtime)
evaluated_ckpt_list = [float(x.strip()) for x in open(ckpt_record_file, 'r').readlines()]
for cur_ckpt in ckpt_list:
num_list = re.findall('checkpoint_epoch_(.*).pth', cur_ckpt)
if num_list.__len__() == 0:
continue
epoch_id = num_list[-1]
if 'optim' in epoch_id:
continue
if float(epoch_id) not in evaluated_ckpt_list and int(float(epoch_id)) >= args.start_epoch:
return epoch_id, cur_ckpt
return -1, None
def repeat_eval_ckpt(model, test_loader, args, eval_output_dir, logger, ckpt_dir, dist_test=False):
# evaluated ckpt record
ckpt_record_file = eval_output_dir / ('eval_list_%s.txt' % cfg.DATA_CONFIG.DATA_SPLIT['test'])
with open(ckpt_record_file, 'a'):
pass
# tensorboard log
if cfg.LOCAL_RANK == 0:
tb_log = SummaryWriter(log_dir=str(eval_output_dir / ('tensorboard_%s' % cfg.DATA_CONFIG.DATA_SPLIT['test'])))
total_time = 0
first_eval = True
while True:
# check whether there is checkpoint which is not evaluated
cur_epoch_id, cur_ckpt = get_no_evaluated_ckpt(ckpt_dir, ckpt_record_file, args)
if cur_epoch_id == -1 or int(float(cur_epoch_id)) < args.start_epoch:
wait_second = 30
if cfg.LOCAL_RANK == 0:
print('Wait %s seconds for next check (progress: %.1f / %d minutes): %s \r'
% (wait_second, total_time * 1.0 / 60, args.max_waiting_mins, ckpt_dir), end='', flush=True)
time.sleep(wait_second)
total_time += 30
if total_time > args.max_waiting_mins * 60 and (first_eval is False):
break
continue
total_time = 0
first_eval = False
model.load_params_from_file(filename=cur_ckpt, logger=logger, to_cpu=dist_test)
model.cuda()
# start evaluation
cur_result_dir = eval_output_dir / ('epoch_%s' % cur_epoch_id) / cfg.DATA_CONFIG.DATA_SPLIT['test']
tb_dict = eval_utils.eval_one_epoch(
cfg, model, test_loader, cur_epoch_id, logger, dist_test=dist_test,
result_dir=cur_result_dir, save_to_file=args.save_to_file
)
if cfg.LOCAL_RANK == 0:
for key, val in tb_dict.items():
tb_log.add_scalar(key, val, cur_epoch_id)
# record this epoch which has been evaluated
with open(ckpt_record_file, 'a') as f:
print('%s' % cur_epoch_id, file=f)
logger.info('Epoch %s has been evaluated' % cur_epoch_id)
def main():
args, cfg = parse_config()
if args.launcher == 'none':
dist_test = False
else:
args.batch_size, cfg.LOCAL_RANK = getattr(common_utils, 'init_dist_%s' % args.launcher)(
args.batch_size, args.tcp_port, args.local_rank, backend='nccl'
)
dist_test = True
output_dir = cfg.ROOT_DIR / 'output' / cfg.EXP_GROUP_PATH / cfg.TAG / args.extra_tag
output_dir.mkdir(parents=True, exist_ok=True)
eval_output_dir = output_dir / 'eval'
if not args.eval_all:
num_list = re.findall(r'\d+', args.ckpt) if args.ckpt is not None else []
epoch_id = num_list[-1] if num_list.__len__() > 0 else 'no_number'
eval_output_dir = eval_output_dir / ('epoch_%s' % epoch_id) / cfg.DATA_CONFIG.DATA_SPLIT['test']
else:
eval_output_dir = eval_output_dir / 'eval_all_default'
if args.eval_tag is not None:
eval_output_dir = eval_output_dir / args.eval_tag
eval_output_dir.mkdir(parents=True, exist_ok=True)
log_file = eval_output_dir / ('log_eval_%s.txt' % datetime.datetime.now().strftime('%Y%m%d-%H%M%S'))
logger = common_utils.create_logger(log_file, rank=cfg.LOCAL_RANK)
# log to file
logger.info('**********************Start logging**********************')
gpu_list = os.environ['CUDA_VISIBLE_DEVICES'] if 'CUDA_VISIBLE_DEVICES' in os.environ.keys() else 'ALL'
logger.info('CUDA_VISIBLE_DEVICES=%s' % gpu_list)
if dist_test:
total_gpus = dist.get_world_size()
logger.info('total_batch_size: %d' % (total_gpus * args.batch_size))
for key, val in vars(args).items():
logger.info('{:16} {}'.format(key, val))
log_config_to_file(cfg, logger=logger)
ckpt_dir = args.ckpt_dir if args.ckpt_dir is not None else output_dir / 'ckpt'
test_set, test_loader, sampler = build_dataloader(
dataset_cfg=cfg.DATA_CONFIG,
class_names=cfg.CLASS_NAMES,
batch_size=args.batch_size,
dist=dist_test, workers=args.workers, logger=logger, training=False
)
model = build_network(model_cfg=cfg.MODEL, num_class=len(cfg.CLASS_NAMES), dataset=test_set)
with torch.no_grad():
if args.eval_all:
repeat_eval_ckpt(model, test_loader, args, eval_output_dir, logger, ckpt_dir, dist_test=dist_test)
else:
eval_single_ckpt(model, test_loader, args, eval_output_dir, logger, epoch_id, dist_test=dist_test)
if __name__ == '__main__':
main()
|
from scripts.bilstm_tagger import bilstm_tagger
from scripts.bilstm_tagger_model import build_model
|
"""
Contains data structures designed for manipulating panel (3-dimensional) data
"""
# pylint: disable=E1103,W0231,W0212,W0621
from __future__ import division
import warnings
import numpy as np
from pandas.types.cast import (_infer_dtype_from_scalar,
_possibly_cast_item)
from pandas.types.common import (is_integer, is_list_like,
is_string_like, is_scalar)
from pandas.types.missing import notnull
import pandas.computation.expressions as expressions
import pandas.core.common as com
import pandas.core.ops as ops
import pandas.core.missing as missing
from pandas import compat
from pandas.compat import (map, zip, range, u, OrderedDict, OrderedDefaultdict)
from pandas.compat.numpy import function as nv
from pandas.core.common import PandasError, _try_sort, _default_index
from pandas.core.frame import DataFrame
from pandas.core.generic import NDFrame, _shared_docs
from pandas.core.index import (Index, MultiIndex, _ensure_index,
_get_combined_index)
from pandas.formats.printing import pprint_thing
from pandas.core.indexing import maybe_droplevels
from pandas.core.internals import (BlockManager,
create_block_manager_from_arrays,
create_block_manager_from_blocks)
from pandas.core.ops import _op_descriptions
from pandas.core.series import Series
from pandas.tools.util import cartesian_product
from pandas.util.decorators import (deprecate, Appender)
_shared_doc_kwargs = dict(
axes='items, major_axis, minor_axis',
klass="Panel",
axes_single_arg="{0, 1, 2, 'items', 'major_axis', 'minor_axis'}")
_shared_doc_kwargs['args_transpose'] = ("three positional arguments: each one"
"of\n%s" %
_shared_doc_kwargs['axes_single_arg'])
def _ensure_like_indices(time, panels):
"""
Makes sure that time and panels are conformable
"""
n_time = len(time)
n_panel = len(panels)
u_panels = np.unique(panels) # this sorts!
u_time = np.unique(time)
if len(u_time) == n_time:
time = np.tile(u_time, len(u_panels))
if len(u_panels) == n_panel:
panels = np.repeat(u_panels, len(u_time))
return time, panels
def panel_index(time, panels, names=None):
"""
Returns a multi-index suitable for a panel-like DataFrame
Parameters
----------
time : array-like
Time index, does not have to repeat
panels : array-like
Panel index, does not have to repeat
names : list, optional
List containing the names of the indices
Returns
-------
multi_index : MultiIndex
Time index is the first level, the panels are the second level.
Examples
--------
>>> years = range(1960,1963)
>>> panels = ['A', 'B', 'C']
>>> panel_idx = panel_index(years, panels)
>>> panel_idx
MultiIndex([(1960, 'A'), (1961, 'A'), (1962, 'A'), (1960, 'B'),
(1961, 'B'), (1962, 'B'), (1960, 'C'), (1961, 'C'),
(1962, 'C')], dtype=object)
or
>>> import numpy as np
>>> years = np.repeat(range(1960,1963), 3)
>>> panels = np.tile(['A', 'B', 'C'], 3)
>>> panel_idx = panel_index(years, panels)
>>> panel_idx
MultiIndex([(1960, 'A'), (1960, 'B'), (1960, 'C'), (1961, 'A'),
(1961, 'B'), (1961, 'C'), (1962, 'A'), (1962, 'B'),
(1962, 'C')], dtype=object)
"""
if names is None:
names = ['time', 'panel']
time, panels = _ensure_like_indices(time, panels)
return MultiIndex.from_arrays([time, panels], sortorder=None, names=names)
class Panel(NDFrame):
"""
Represents wide format panel data, stored as 3-dimensional array
Parameters
----------
data : ndarray (items x major x minor), or dict of DataFrames
items : Index or array-like
axis=0
major_axis : Index or array-like
axis=1
minor_axis : Index or array-like
axis=2
dtype : dtype, default None
Data type to force, otherwise infer
copy : boolean, default False
Copy data from inputs. Only affects DataFrame / 2d ndarray input
"""
@property
def _constructor(self):
return type(self)
_constructor_sliced = DataFrame
def __init__(self, data=None, items=None, major_axis=None, minor_axis=None,
copy=False, dtype=None):
self._init_data(data=data, items=items, major_axis=major_axis,
minor_axis=minor_axis, copy=copy, dtype=dtype)
def _init_data(self, data, copy, dtype, **kwargs):
"""
Generate ND initialization; axes are passed
as required objects to __init__
"""
if data is None:
data = {}
if dtype is not None:
dtype = self._validate_dtype(dtype)
passed_axes = [kwargs.pop(a, None) for a in self._AXIS_ORDERS]
if kwargs:
raise TypeError('_init_data() got an unexpected keyword '
'argument "{0}"'.format(list(kwargs.keys())[0]))
axes = None
if isinstance(data, BlockManager):
if any(x is not None for x in passed_axes):
axes = [x if x is not None else y
for x, y in zip(passed_axes, data.axes)]
mgr = data
elif isinstance(data, dict):
mgr = self._init_dict(data, passed_axes, dtype=dtype)
copy = False
dtype = None
elif isinstance(data, (np.ndarray, list)):
mgr = self._init_matrix(data, passed_axes, dtype=dtype, copy=copy)
copy = False
dtype = None
elif is_scalar(data) and all(x is not None for x in passed_axes):
if dtype is None:
dtype, data = _infer_dtype_from_scalar(data)
values = np.empty([len(x) for x in passed_axes], dtype=dtype)
values.fill(data)
mgr = self._init_matrix(values, passed_axes, dtype=dtype,
copy=False)
copy = False
else: # pragma: no cover
raise PandasError('Panel constructor not properly called!')
NDFrame.__init__(self, mgr, axes=axes, copy=copy, dtype=dtype)
def _init_dict(self, data, axes, dtype=None):
haxis = axes.pop(self._info_axis_number)
# prefilter if haxis passed
if haxis is not None:
haxis = _ensure_index(haxis)
data = OrderedDict((k, v)
for k, v in compat.iteritems(data)
if k in haxis)
else:
ks = list(data.keys())
if not isinstance(data, OrderedDict):
ks = _try_sort(ks)
haxis = Index(ks)
for k, v in compat.iteritems(data):
if isinstance(v, dict):
data[k] = self._constructor_sliced(v)
# extract axis for remaining axes & create the slicemap
raxes = [self._extract_axis(self, data, axis=i) if a is None else a
for i, a in enumerate(axes)]
raxes_sm = self._extract_axes_for_slice(self, raxes)
# shallow copy
arrays = []
haxis_shape = [len(a) for a in raxes]
for h in haxis:
v = values = data.get(h)
if v is None:
values = np.empty(haxis_shape, dtype=dtype)
values.fill(np.nan)
elif isinstance(v, self._constructor_sliced):
d = raxes_sm.copy()
d['copy'] = False
v = v.reindex(**d)
if dtype is not None:
v = v.astype(dtype)
values = v.values
arrays.append(values)
return self._init_arrays(arrays, haxis, [haxis] + raxes)
def _init_arrays(self, arrays, arr_names, axes):
return create_block_manager_from_arrays(arrays, arr_names, axes)
@classmethod
def from_dict(cls, data, intersect=False, orient='items', dtype=None):
"""
Construct Panel from dict of DataFrame objects
Parameters
----------
data : dict
{field : DataFrame}
intersect : boolean
Intersect indexes of input DataFrames
orient : {'items', 'minor'}, default 'items'
The "orientation" of the data. If the keys of the passed dict
should be the items of the result panel, pass 'items'
(default). Otherwise if the columns of the values of the passed
DataFrame objects should be the items (which in the case of
mixed-dtype data you should do), instead pass 'minor'
dtype : dtype, default None
Data type to force, otherwise infer
Returns
-------
Panel
"""
orient = orient.lower()
if orient == 'minor':
new_data = OrderedDefaultdict(dict)
for col, df in compat.iteritems(data):
for item, s in compat.iteritems(df):
new_data[item][col] = s
data = new_data
elif orient != 'items': # pragma: no cover
raise ValueError('Orientation must be one of {items, minor}.')
d = cls._homogenize_dict(cls, data, intersect=intersect, dtype=dtype)
ks = list(d['data'].keys())
if not isinstance(d['data'], OrderedDict):
ks = list(sorted(ks))
d[cls._info_axis_name] = Index(ks)
return cls(**d)
def __getitem__(self, key):
key = com._apply_if_callable(key, self)
if isinstance(self._info_axis, MultiIndex):
return self._getitem_multilevel(key)
if not (is_list_like(key) or isinstance(key, slice)):
return super(Panel, self).__getitem__(key)
return self.ix[key]
def _getitem_multilevel(self, key):
info = self._info_axis
loc = info.get_loc(key)
if isinstance(loc, (slice, np.ndarray)):
new_index = info[loc]
result_index = maybe_droplevels(new_index, key)
slices = [loc] + [slice(None) for x in range(self._AXIS_LEN - 1)]
new_values = self.values[slices]
d = self._construct_axes_dict(self._AXIS_ORDERS[1:])
d[self._info_axis_name] = result_index
result = self._constructor(new_values, **d)
return result
else:
return self._get_item_cache(key)
def _init_matrix(self, data, axes, dtype=None, copy=False):
values = self._prep_ndarray(self, data, copy=copy)
if dtype is not None:
try:
values = values.astype(dtype)
except Exception:
raise ValueError('failed to cast to %s' % dtype)
shape = values.shape
fixed_axes = []
for i, ax in enumerate(axes):
if ax is None:
ax = _default_index(shape[i])
else:
ax = _ensure_index(ax)
fixed_axes.append(ax)
return create_block_manager_from_blocks([values], fixed_axes)
# ----------------------------------------------------------------------
# Comparison methods
def _compare_constructor(self, other, func):
if not self._indexed_same(other):
raise Exception('Can only compare identically-labeled '
'same type objects')
new_data = {}
for col in self._info_axis:
new_data[col] = func(self[col], other[col])
d = self._construct_axes_dict(copy=False)
return self._constructor(data=new_data, **d)
# ----------------------------------------------------------------------
# Magic methods
def __unicode__(self):
"""
Return a string representation for a particular Panel
Invoked by unicode(df) in py2 only.
Yields a Unicode String in both py2/py3.
"""
class_name = str(self.__class__)
shape = self.shape
dims = u('Dimensions: %s') % ' x '.join(
["%d (%s)" % (s, a) for a, s in zip(self._AXIS_ORDERS, shape)])
def axis_pretty(a):
v = getattr(self, a)
if len(v) > 0:
return u('%s axis: %s to %s') % (a.capitalize(),
pprint_thing(v[0]),
pprint_thing(v[-1]))
else:
return u('%s axis: None') % a.capitalize()
output = '\n'.join(
[class_name, dims] + [axis_pretty(a) for a in self._AXIS_ORDERS])
return output
def _get_plane_axes_index(self, axis):
"""
Get my plane axes indexes: these are already
(as compared with higher level planes),
as we are returning a DataFrame axes indexes
"""
axis_name = self._get_axis_name(axis)
if axis_name == 'major_axis':
index = 'minor_axis'
columns = 'items'
if axis_name == 'minor_axis':
index = 'major_axis'
columns = 'items'
elif axis_name == 'items':
index = 'major_axis'
columns = 'minor_axis'
return index, columns
def _get_plane_axes(self, axis):
"""
Get my plane axes indexes: these are already
(as compared with higher level planes),
as we are returning a DataFrame axes
"""
return [self._get_axis(axi)
for axi in self._get_plane_axes_index(axis)]
fromDict = from_dict
def to_sparse(self, *args, **kwargs):
"""
NOT IMPLEMENTED: do not call this method, as sparsifying is not
supported for Panel objects and will raise an error.
Convert to SparsePanel
"""
raise NotImplementedError("sparsifying is not supported "
"for Panel objects")
def to_excel(self, path, na_rep='', engine=None, **kwargs):
"""
Write each DataFrame in Panel to a separate excel sheet
Parameters
----------
path : string or ExcelWriter object
File path or existing ExcelWriter
na_rep : string, default ''
Missing data representation
engine : string, default None
write engine to use - you can also set this via the options
``io.excel.xlsx.writer``, ``io.excel.xls.writer``, and
``io.excel.xlsm.writer``.
Other Parameters
----------------
float_format : string, default None
Format string for floating point numbers
cols : sequence, optional
Columns to write
header : boolean or list of string, default True
Write out column names. If a list of string is given it is
assumed to be aliases for the column names
index : boolean, default True
Write row names (index)
index_label : string or sequence, default None
Column label for index column(s) if desired. If None is given, and
`header` and `index` are True, then the index names are used. A
sequence should be given if the DataFrame uses MultiIndex.
startrow : upper left cell row to dump data frame
startcol : upper left cell column to dump data frame
Notes
-----
Keyword arguments (and na_rep) are passed to the ``to_excel`` method
for each DataFrame written.
"""
from pandas.io.excel import ExcelWriter
if isinstance(path, compat.string_types):
writer = ExcelWriter(path, engine=engine)
else:
writer = path
kwargs['na_rep'] = na_rep
for item, df in self.iteritems():
name = str(item)
df.to_excel(writer, name, **kwargs)
writer.save()
def as_matrix(self):
self._consolidate_inplace()
return self._data.as_matrix()
# ----------------------------------------------------------------------
# Getting and setting elements
def get_value(self, *args, **kwargs):
"""
Quickly retrieve single value at (item, major, minor) location
Parameters
----------
item : item label (panel item)
major : major axis label (panel item row)
minor : minor axis label (panel item column)
takeable : interpret the passed labels as indexers, default False
Returns
-------
value : scalar value
"""
nargs = len(args)
nreq = self._AXIS_LEN
# require an arg for each axis
if nargs != nreq:
raise TypeError('There must be an argument for each axis, you gave'
' {0} args, but {1} are required'.format(nargs,
nreq))
takeable = kwargs.pop('takeable', None)
if kwargs:
raise TypeError('get_value() got an unexpected keyword '
'argument "{0}"'.format(list(kwargs.keys())[0]))
if takeable is True:
lower = self._iget_item_cache(args[0])
else:
lower = self._get_item_cache(args[0])
return lower.get_value(*args[1:], takeable=takeable)
def set_value(self, *args, **kwargs):
"""
Quickly set single value at (item, major, minor) location
Parameters
----------
item : item label (panel item)
major : major axis label (panel item row)
minor : minor axis label (panel item column)
value : scalar
takeable : interpret the passed labels as indexers, default False
Returns
-------
panel : Panel
If label combo is contained, will be reference to calling Panel,
otherwise a new object
"""
# require an arg for each axis and the value
nargs = len(args)
nreq = self._AXIS_LEN + 1
if nargs != nreq:
raise TypeError('There must be an argument for each axis plus the '
'value provided, you gave {0} args, but {1} are '
'required'.format(nargs, nreq))
takeable = kwargs.pop('takeable', None)
if kwargs:
raise TypeError('set_value() got an unexpected keyword '
'argument "{0}"'.format(list(kwargs.keys())[0]))
try:
if takeable is True:
lower = self._iget_item_cache(args[0])
else:
lower = self._get_item_cache(args[0])
lower.set_value(*args[1:], takeable=takeable)
return self
except KeyError:
axes = self._expand_axes(args)
d = self._construct_axes_dict_from(self, axes, copy=False)
result = self.reindex(**d)
args = list(args)
likely_dtype, args[-1] = _infer_dtype_from_scalar(args[-1])
made_bigger = not np.array_equal(axes[0], self._info_axis)
# how to make this logic simpler?
if made_bigger:
_possibly_cast_item(result, args[0], likely_dtype)
return result.set_value(*args)
def _box_item_values(self, key, values):
if self.ndim == values.ndim:
result = self._constructor(values)
# a dup selection will yield a full ndim
if result._get_axis(0).is_unique:
result = result[key]
return result
d = self._construct_axes_dict_for_slice(self._AXIS_ORDERS[1:])
return self._constructor_sliced(values, **d)
def __setitem__(self, key, value):
key = com._apply_if_callable(key, self)
shape = tuple(self.shape)
if isinstance(value, self._constructor_sliced):
value = value.reindex(
**self._construct_axes_dict_for_slice(self._AXIS_ORDERS[1:]))
mat = value.values
elif isinstance(value, np.ndarray):
if value.shape != shape[1:]:
raise ValueError('shape of value must be {0}, shape of given '
'object was {1}'.format(
shape[1:], tuple(map(int, value.shape))))
mat = np.asarray(value)
elif is_scalar(value):
dtype, value = _infer_dtype_from_scalar(value)
mat = np.empty(shape[1:], dtype=dtype)
mat.fill(value)
else:
raise TypeError('Cannot set item of type: %s' % str(type(value)))
mat = mat.reshape(tuple([1]) + shape[1:])
NDFrame._set_item(self, key, mat)
def _unpickle_panel_compat(self, state): # pragma: no cover
"Unpickle the panel"
_unpickle = com._unpickle_array
vals, items, major, minor = state
items = _unpickle(items)
major = _unpickle(major)
minor = _unpickle(minor)
values = _unpickle(vals)
wp = Panel(values, items, major, minor)
self._data = wp._data
def conform(self, frame, axis='items'):
"""
Conform input DataFrame to align with chosen axis pair.
Parameters
----------
frame : DataFrame
axis : {'items', 'major', 'minor'}
Axis the input corresponds to. E.g., if axis='major', then
the frame's columns would be items, and the index would be
values of the minor axis
Returns
-------
DataFrame
"""
axes = self._get_plane_axes(axis)
return frame.reindex(**self._extract_axes_for_slice(self, axes))
def head(self, n=5):
raise NotImplementedError
def tail(self, n=5):
raise NotImplementedError
def round(self, decimals=0, *args, **kwargs):
"""
Round each value in Panel to a specified number of decimal places.
.. versionadded:: 0.18.0
Parameters
----------
decimals : int
Number of decimal places to round to (default: 0).
If decimals is negative, it specifies the number of
positions to the left of the decimal point.
Returns
-------
Panel object
See Also
--------
numpy.around
"""
nv.validate_round(args, kwargs)
if is_integer(decimals):
result = np.apply_along_axis(np.round, 0, self.values)
return self._wrap_result(result, axis=0)
raise TypeError("decimals must be an integer")
def _needs_reindex_multi(self, axes, method, level):
""" don't allow a multi reindex on Panel or above ndim """
return False
def align(self, other, **kwargs):
raise NotImplementedError
def dropna(self, axis=0, how='any', inplace=False):
"""
Drop 2D from panel, holding passed axis constant
Parameters
----------
axis : int, default 0
Axis to hold constant. E.g. axis=1 will drop major_axis entries
having a certain amount of NA data
how : {'all', 'any'}, default 'any'
'any': one or more values are NA in the DataFrame along the
axis. For 'all' they all must be.
inplace : bool, default False
If True, do operation inplace and return None.
Returns
-------
dropped : Panel
"""
axis = self._get_axis_number(axis)
values = self.values
mask = notnull(values)
for ax in reversed(sorted(set(range(self._AXIS_LEN)) - set([axis]))):
mask = mask.sum(ax)
per_slice = np.prod(values.shape[:axis] + values.shape[axis + 1:])
if how == 'all':
cond = mask > 0
else:
cond = mask == per_slice
new_ax = self._get_axis(axis)[cond]
result = self.reindex_axis(new_ax, axis=axis)
if inplace:
self._update_inplace(result)
else:
return result
def _combine(self, other, func, axis=0):
if isinstance(other, Panel):
return self._combine_panel(other, func)
elif isinstance(other, DataFrame):
return self._combine_frame(other, func, axis=axis)
elif is_scalar(other):
return self._combine_const(other, func)
else:
raise NotImplementedError("%s is not supported in combine "
"operation with %s" %
(str(type(other)), str(type(self))))
def _combine_const(self, other, func):
with np.errstate(all='ignore'):
new_values = func(self.values, other)
d = self._construct_axes_dict()
return self._constructor(new_values, **d)
def _combine_frame(self, other, func, axis=0):
index, columns = self._get_plane_axes(axis)
axis = self._get_axis_number(axis)
other = other.reindex(index=index, columns=columns)
with np.errstate(all='ignore'):
if axis == 0:
new_values = func(self.values, other.values)
elif axis == 1:
new_values = func(self.values.swapaxes(0, 1), other.values.T)
new_values = new_values.swapaxes(0, 1)
elif axis == 2:
new_values = func(self.values.swapaxes(0, 2), other.values)
new_values = new_values.swapaxes(0, 2)
return self._constructor(new_values, self.items, self.major_axis,
self.minor_axis)
def _combine_panel(self, other, func):
items = self.items.union(other.items)
major = self.major_axis.union(other.major_axis)
minor = self.minor_axis.union(other.minor_axis)
# could check that everything's the same size, but forget it
this = self.reindex(items=items, major=major, minor=minor)
other = other.reindex(items=items, major=major, minor=minor)
with np.errstate(all='ignore'):
result_values = func(this.values, other.values)
return self._constructor(result_values, items, major, minor)
def major_xs(self, key):
"""
Return slice of panel along major axis
Parameters
----------
key : object
Major axis label
Returns
-------
y : DataFrame
index -> minor axis, columns -> items
Notes
-----
major_xs is only for getting, not setting values.
MultiIndex Slicers is a generic way to get/set values on any level or
levels and is a superset of major_xs functionality, see
:ref:`MultiIndex Slicers <advanced.mi_slicers>`
"""
return self.xs(key, axis=self._AXIS_LEN - 2)
def minor_xs(self, key):
"""
Return slice of panel along minor axis
Parameters
----------
key : object
Minor axis label
Returns
-------
y : DataFrame
index -> major axis, columns -> items
Notes
-----
minor_xs is only for getting, not setting values.
MultiIndex Slicers is a generic way to get/set values on any level or
levels and is a superset of minor_xs functionality, see
:ref:`MultiIndex Slicers <advanced.mi_slicers>`
"""
return self.xs(key, axis=self._AXIS_LEN - 1)
def xs(self, key, axis=1):
"""
Return slice of panel along selected axis
Parameters
----------
key : object
Label
axis : {'items', 'major', 'minor}, default 1/'major'
Returns
-------
y : ndim(self)-1
Notes
-----
xs is only for getting, not setting values.
MultiIndex Slicers is a generic way to get/set values on any level or
levels and is a superset of xs functionality, see
:ref:`MultiIndex Slicers <advanced.mi_slicers>`
"""
axis = self._get_axis_number(axis)
if axis == 0:
return self[key]
self._consolidate_inplace()
axis_number = self._get_axis_number(axis)
new_data = self._data.xs(key, axis=axis_number, copy=False)
result = self._construct_return_type(new_data)
copy = new_data.is_mixed_type
result._set_is_copy(self, copy=copy)
return result
_xs = xs
def _ixs(self, i, axis=0):
"""
i : int, slice, or sequence of integers
axis : int
"""
ax = self._get_axis(axis)
key = ax[i]
# xs cannot handle a non-scalar key, so just reindex here
# if we have a multi-index and a single tuple, then its a reduction
# (GH 7516)
if not (isinstance(ax, MultiIndex) and isinstance(key, tuple)):
if is_list_like(key):
indexer = {self._get_axis_name(axis): key}
return self.reindex(**indexer)
# a reduction
if axis == 0:
values = self._data.iget(i)
return self._box_item_values(key, values)
# xs by position
self._consolidate_inplace()
new_data = self._data.xs(i, axis=axis, copy=True, takeable=True)
return self._construct_return_type(new_data)
def groupby(self, function, axis='major'):
"""
Group data on given axis, returning GroupBy object
Parameters
----------
function : callable
Mapping function for chosen access
axis : {'major', 'minor', 'items'}, default 'major'
Returns
-------
grouped : PanelGroupBy
"""
from pandas.core.groupby import PanelGroupBy
axis = self._get_axis_number(axis)
return PanelGroupBy(self, function, axis=axis)
def to_frame(self, filter_observations=True):
"""
Transform wide format into long (stacked) format as DataFrame whose
columns are the Panel's items and whose index is a MultiIndex formed
of the Panel's major and minor axes.
Parameters
----------
filter_observations : boolean, default True
Drop (major, minor) pairs without a complete set of observations
across all the items
Returns
-------
y : DataFrame
"""
_, N, K = self.shape
if filter_observations:
# shaped like the return DataFrame
mask = notnull(self.values).all(axis=0)
# size = mask.sum()
selector = mask.ravel()
else:
# size = N * K
selector = slice(None, None)
data = {}
for item in self.items:
data[item] = self[item].values.ravel()[selector]
def construct_multi_parts(idx, n_repeat, n_shuffle=1):
axis_idx = idx.to_hierarchical(n_repeat, n_shuffle)
labels = [x[selector] for x in axis_idx.labels]
levels = axis_idx.levels
names = axis_idx.names
return labels, levels, names
def construct_index_parts(idx, major=True):
levels = [idx]
if major:
labels = [np.arange(N).repeat(K)[selector]]
names = idx.name or 'major'
else:
labels = np.arange(K).reshape(1, K)[np.zeros(N, dtype=int)]
labels = [labels.ravel()[selector]]
names = idx.name or 'minor'
names = [names]
return labels, levels, names
if isinstance(self.major_axis, MultiIndex):
major_labels, major_levels, major_names = construct_multi_parts(
self.major_axis, n_repeat=K)
else:
major_labels, major_levels, major_names = construct_index_parts(
self.major_axis)
if isinstance(self.minor_axis, MultiIndex):
minor_labels, minor_levels, minor_names = construct_multi_parts(
self.minor_axis, n_repeat=N, n_shuffle=K)
else:
minor_labels, minor_levels, minor_names = construct_index_parts(
self.minor_axis, major=False)
levels = major_levels + minor_levels
labels = major_labels + minor_labels
names = major_names + minor_names
index = MultiIndex(levels=levels, labels=labels, names=names,
verify_integrity=False)
return DataFrame(data, index=index, columns=self.items)
to_long = deprecate('to_long', to_frame)
toLong = deprecate('toLong', to_frame)
def apply(self, func, axis='major', **kwargs):
"""
Applies function along axis (or axes) of the Panel
Parameters
----------
func : function
Function to apply to each combination of 'other' axes
e.g. if axis = 'items', the combination of major_axis/minor_axis
will each be passed as a Series; if axis = ('items', 'major'),
DataFrames of items & major axis will be passed
axis : {'items', 'minor', 'major'}, or {0, 1, 2}, or a tuple with two
axes
Additional keyword arguments will be passed as keywords to the function
Examples
--------
Returns a Panel with the square root of each element
>>> p = pd.Panel(np.random.rand(4,3,2))
>>> p.apply(np.sqrt)
Equivalent to p.sum(1), returning a DataFrame
>>> p.apply(lambda x: x.sum(), axis=1)
Equivalent to previous:
>>> p.apply(lambda x: x.sum(), axis='minor')
Return the shapes of each DataFrame over axis 2 (i.e the shapes of
items x major), as a Series
>>> p.apply(lambda x: x.shape, axis=(0,1))
Returns
-------
result : Panel, DataFrame, or Series
"""
if kwargs and not isinstance(func, np.ufunc):
f = lambda x: func(x, **kwargs)
else:
f = func
# 2d-slabs
if isinstance(axis, (tuple, list)) and len(axis) == 2:
return self._apply_2d(f, axis=axis)
axis = self._get_axis_number(axis)
# try ufunc like
if isinstance(f, np.ufunc):
try:
with np.errstate(all='ignore'):
result = np.apply_along_axis(func, axis, self.values)
return self._wrap_result(result, axis=axis)
except (AttributeError):
pass
# 1d
return self._apply_1d(f, axis=axis)
def _apply_1d(self, func, axis):
axis_name = self._get_axis_name(axis)
ndim = self.ndim
values = self.values
# iter thru the axes
slice_axis = self._get_axis(axis)
slice_indexer = [0] * (ndim - 1)
indexer = np.zeros(ndim, 'O')
indlist = list(range(ndim))
indlist.remove(axis)
indexer[axis] = slice(None, None)
indexer.put(indlist, slice_indexer)
planes = [self._get_axis(axi) for axi in indlist]
shape = np.array(self.shape).take(indlist)
# all the iteration points
points = cartesian_product(planes)
results = []
for i in range(np.prod(shape)):
# construct the object
pts = tuple([p[i] for p in points])
indexer.put(indlist, slice_indexer)
obj = Series(values[tuple(indexer)], index=slice_axis, name=pts)
result = func(obj)
results.append(result)
# increment the indexer
slice_indexer[-1] += 1
n = -1
while (slice_indexer[n] >= shape[n]) and (n > (1 - ndim)):
slice_indexer[n - 1] += 1
slice_indexer[n] = 0
n -= 1
# empty object
if not len(results):
return self._constructor(**self._construct_axes_dict())
# same ndim as current
if isinstance(results[0], Series):
arr = np.vstack([r.values for r in results])
arr = arr.T.reshape(tuple([len(slice_axis)] + list(shape)))
tranp = np.array([axis] + indlist).argsort()
arr = arr.transpose(tuple(list(tranp)))
return self._constructor(arr, **self._construct_axes_dict())
# ndim-1 shape
results = np.array(results).reshape(shape)
if results.ndim == 2 and axis_name != self._info_axis_name:
results = results.T
planes = planes[::-1]
return self._construct_return_type(results, planes)
def _apply_2d(self, func, axis):
""" handle 2-d slices, equiv to iterating over the other axis """
ndim = self.ndim
axis = [self._get_axis_number(a) for a in axis]
# construct slabs, in 2-d this is a DataFrame result
indexer_axis = list(range(ndim))
for a in axis:
indexer_axis.remove(a)
indexer_axis = indexer_axis[0]
slicer = [slice(None, None)] * ndim
ax = self._get_axis(indexer_axis)
results = []
for i, e in enumerate(ax):
slicer[indexer_axis] = i
sliced = self.iloc[tuple(slicer)]
obj = func(sliced)
results.append((e, obj))
return self._construct_return_type(dict(results))
def _reduce(self, op, name, axis=0, skipna=True, numeric_only=None,
filter_type=None, **kwds):
if numeric_only:
raise NotImplementedError('Panel.{0} does not implement '
'numeric_only.'.format(name))
axis_name = self._get_axis_name(axis)
axis_number = self._get_axis_number(axis_name)
f = lambda x: op(x, axis=axis_number, skipna=skipna, **kwds)
with np.errstate(all='ignore'):
result = f(self.values)
axes = self._get_plane_axes(axis_name)
if result.ndim == 2 and axis_name != self._info_axis_name:
result = result.T
return self._construct_return_type(result, axes)
def _construct_return_type(self, result, axes=None):
""" return the type for the ndim of the result """
ndim = getattr(result, 'ndim', None)
# need to assume they are the same
if ndim is None:
if isinstance(result, dict):
ndim = getattr(list(compat.itervalues(result))[0], 'ndim', 0)
# have a dict, so top-level is +1 dim
if ndim != 0:
ndim += 1
# scalar
if ndim == 0:
return Series(result)
# same as self
elif self.ndim == ndim:
# return the construction dictionary for these axes
if axes is None:
return self._constructor(result)
return self._constructor(result, **self._construct_axes_dict())
# sliced
elif self.ndim == ndim + 1:
if axes is None:
return self._constructor_sliced(result)
return self._constructor_sliced(
result, **self._extract_axes_for_slice(self, axes))
raise PandasError('invalid _construct_return_type [self->%s] '
'[result->%s]' % (self, result))
def _wrap_result(self, result, axis):
axis = self._get_axis_name(axis)
axes = self._get_plane_axes(axis)
if result.ndim == 2 and axis != self._info_axis_name:
result = result.T
return self._construct_return_type(result, axes)
@Appender(_shared_docs['reindex'] % _shared_doc_kwargs)
def reindex(self, items=None, major_axis=None, minor_axis=None, **kwargs):
major_axis = (major_axis if major_axis is not None else
kwargs.pop('major', None))
minor_axis = (minor_axis if minor_axis is not None else
kwargs.pop('minor', None))
return super(Panel, self).reindex(items=items, major_axis=major_axis,
minor_axis=minor_axis, **kwargs)
@Appender(_shared_docs['rename'] % _shared_doc_kwargs)
def rename(self, items=None, major_axis=None, minor_axis=None, **kwargs):
major_axis = (major_axis if major_axis is not None else
kwargs.pop('major', None))
minor_axis = (minor_axis if minor_axis is not None else
kwargs.pop('minor', None))
return super(Panel, self).rename(items=items, major_axis=major_axis,
minor_axis=minor_axis, **kwargs)
@Appender(_shared_docs['reindex_axis'] % _shared_doc_kwargs)
def reindex_axis(self, labels, axis=0, method=None, level=None, copy=True,
limit=None, fill_value=np.nan):
return super(Panel, self).reindex_axis(labels=labels, axis=axis,
method=method, level=level,
copy=copy, limit=limit,
fill_value=fill_value)
@Appender(_shared_docs['transpose'] % _shared_doc_kwargs)
def transpose(self, *args, **kwargs):
# check if a list of axes was passed in instead as a
# single *args element
if (len(args) == 1 and hasattr(args[0], '__iter__') and
not is_string_like(args[0])):
axes = args[0]
else:
axes = args
if 'axes' in kwargs and axes:
raise TypeError("transpose() got multiple values for "
"keyword argument 'axes'")
elif not axes:
axes = kwargs.pop('axes', ())
return super(Panel, self).transpose(*axes, **kwargs)
@Appender(_shared_docs['fillna'] % _shared_doc_kwargs)
def fillna(self, value=None, method=None, axis=None, inplace=False,
limit=None, downcast=None, **kwargs):
return super(Panel, self).fillna(value=value, method=method, axis=axis,
inplace=inplace, limit=limit,
downcast=downcast, **kwargs)
def count(self, axis='major'):
"""
Return number of observations over requested axis.
Parameters
----------
axis : {'items', 'major', 'minor'} or {0, 1, 2}
Returns
-------
count : DataFrame
"""
i = self._get_axis_number(axis)
values = self.values
mask = np.isfinite(values)
result = mask.sum(axis=i, dtype='int64')
return self._wrap_result(result, axis)
def shift(self, periods=1, freq=None, axis='major'):
"""
Shift index by desired number of periods with an optional time freq.
The shifted data will not include the dropped periods and the
shifted axis will be smaller than the original. This is different
from the behavior of DataFrame.shift()
Parameters
----------
periods : int
Number of periods to move, can be positive or negative
freq : DateOffset, timedelta, or time rule string, optional
axis : {'items', 'major', 'minor'} or {0, 1, 2}
Returns
-------
shifted : Panel
"""
if freq:
return self.tshift(periods, freq, axis=axis)
return super(Panel, self).slice_shift(periods, axis=axis)
def tshift(self, periods=1, freq=None, axis='major'):
return super(Panel, self).tshift(periods, freq, axis)
def join(self, other, how='left', lsuffix='', rsuffix=''):
"""
Join items with other Panel either on major and minor axes column
Parameters
----------
other : Panel or list of Panels
Index should be similar to one of the columns in this one
how : {'left', 'right', 'outer', 'inner'}
How to handle indexes of the two objects. Default: 'left'
for joining on index, None otherwise
* left: use calling frame's index
* right: use input frame's index
* outer: form union of indexes
* inner: use intersection of indexes
lsuffix : string
Suffix to use from left frame's overlapping columns
rsuffix : string
Suffix to use from right frame's overlapping columns
Returns
-------
joined : Panel
"""
from pandas.tools.merge import concat
if isinstance(other, Panel):
join_major, join_minor = self._get_join_index(other, how)
this = self.reindex(major=join_major, minor=join_minor)
other = other.reindex(major=join_major, minor=join_minor)
merged_data = this._data.merge(other._data, lsuffix, rsuffix)
return self._constructor(merged_data)
else:
if lsuffix or rsuffix:
raise ValueError('Suffixes not supported when passing '
'multiple panels')
if how == 'left':
how = 'outer'
join_axes = [self.major_axis, self.minor_axis]
elif how == 'right':
raise ValueError('Right join not supported with multiple '
'panels')
else:
join_axes = None
return concat([self] + list(other), axis=0, join=how,
join_axes=join_axes, verify_integrity=True)
def update(self, other, join='left', overwrite=True, filter_func=None,
raise_conflict=False):
"""
Modify Panel in place using non-NA values from passed
Panel, or object coercible to Panel. Aligns on items
Parameters
----------
other : Panel, or object coercible to Panel
join : How to join individual DataFrames
{'left', 'right', 'outer', 'inner'}, default 'left'
overwrite : boolean, default True
If True then overwrite values for common keys in the calling panel
filter_func : callable(1d-array) -> 1d-array<boolean>, default None
Can choose to replace values other than NA. Return True for values
that should be updated
raise_conflict : bool
If True, will raise an error if a DataFrame and other both
contain data in the same place.
"""
if not isinstance(other, self._constructor):
other = self._constructor(other)
axis_name = self._info_axis_name
axis_values = self._info_axis
other = other.reindex(**{axis_name: axis_values})
for frame in axis_values:
self[frame].update(other[frame], join, overwrite, filter_func,
raise_conflict)
def _get_join_index(self, other, how):
if how == 'left':
join_major, join_minor = self.major_axis, self.minor_axis
elif how == 'right':
join_major, join_minor = other.major_axis, other.minor_axis
elif how == 'inner':
join_major = self.major_axis.intersection(other.major_axis)
join_minor = self.minor_axis.intersection(other.minor_axis)
elif how == 'outer':
join_major = self.major_axis.union(other.major_axis)
join_minor = self.minor_axis.union(other.minor_axis)
return join_major, join_minor
# miscellaneous data creation
@staticmethod
def _extract_axes(self, data, axes, **kwargs):
""" return a list of the axis indicies """
return [self._extract_axis(self, data, axis=i, **kwargs)
for i, a in enumerate(axes)]
@staticmethod
def _extract_axes_for_slice(self, axes):
""" return the slice dictionary for these axes """
return dict([(self._AXIS_SLICEMAP[i], a)
for i, a in zip(
self._AXIS_ORDERS[self._AXIS_LEN - len(axes):],
axes)])
@staticmethod
def _prep_ndarray(self, values, copy=True):
if not isinstance(values, np.ndarray):
values = np.asarray(values)
# NumPy strings are a pain, convert to object
if issubclass(values.dtype.type, compat.string_types):
values = np.array(values, dtype=object, copy=True)
else:
if copy:
values = values.copy()
if values.ndim != self._AXIS_LEN:
raise ValueError("The number of dimensions required is {0}, "
"but the number of dimensions of the "
"ndarray given was {1}".format(self._AXIS_LEN,
values.ndim))
return values
@staticmethod
def _homogenize_dict(self, frames, intersect=True, dtype=None):
"""
Conform set of _constructor_sliced-like objects to either
an intersection of indices / columns or a union.
Parameters
----------
frames : dict
intersect : boolean, default True
Returns
-------
dict of aligned results & indicies
"""
result = dict()
# caller differs dict/ODict, presered type
if isinstance(frames, OrderedDict):
result = OrderedDict()
adj_frames = OrderedDict()
for k, v in compat.iteritems(frames):
if isinstance(v, dict):
adj_frames[k] = self._constructor_sliced(v)
else:
adj_frames[k] = v
axes = self._AXIS_ORDERS[1:]
axes_dict = dict([(a, ax) for a, ax in zip(axes, self._extract_axes(
self, adj_frames, axes, intersect=intersect))])
reindex_dict = dict(
[(self._AXIS_SLICEMAP[a], axes_dict[a]) for a in axes])
reindex_dict['copy'] = False
for key, frame in compat.iteritems(adj_frames):
if frame is not None:
result[key] = frame.reindex(**reindex_dict)
else:
result[key] = None
axes_dict['data'] = result
axes_dict['dtype'] = dtype
return axes_dict
@staticmethod
def _extract_axis(self, data, axis=0, intersect=False):
index = None
if len(data) == 0:
index = Index([])
elif len(data) > 0:
raw_lengths = []
indexes = []
have_raw_arrays = False
have_frames = False
for v in data.values():
if isinstance(v, self._constructor_sliced):
have_frames = True
indexes.append(v._get_axis(axis))
elif v is not None:
have_raw_arrays = True
raw_lengths.append(v.shape[axis])
if have_frames:
index = _get_combined_index(indexes, intersect=intersect)
if have_raw_arrays:
lengths = list(set(raw_lengths))
if len(lengths) > 1:
raise ValueError('ndarrays must match shape on axis %d' % axis)
if have_frames:
if lengths[0] != len(index):
raise AssertionError('Length of data and index must match')
else:
index = Index(np.arange(lengths[0]))
if index is None:
index = Index([])
return _ensure_index(index)
@classmethod
def _add_aggregate_operations(cls, use_numexpr=True):
""" add the operations to the cls; evaluate the doc strings again """
# doc strings substitors
_agg_doc = """
Wrapper method for %%s
Parameters
----------
other : %s or %s""" % (cls._constructor_sliced.__name__, cls.__name__) + """
axis : {""" + ', '.join(cls._AXIS_ORDERS) + "}" + """
Axis to broadcast over
Returns
-------
""" + cls.__name__ + "\n"
def _panel_arith_method(op, name, str_rep=None, default_axis=None,
fill_zeros=None, **eval_kwargs):
def na_op(x, y):
try:
result = expressions.evaluate(op, str_rep, x, y,
raise_on_error=True,
**eval_kwargs)
except TypeError:
result = op(x, y)
# handles discrepancy between numpy and numexpr on division/mod
# by 0 though, given that these are generally (always?)
# non-scalars, I'm not sure whether it's worth it at the moment
result = missing.fill_zeros(result, x, y, name, fill_zeros)
return result
if name in _op_descriptions:
op_name = name.replace('__', '')
op_desc = _op_descriptions[op_name]
if op_desc['reversed']:
equiv = 'other ' + op_desc['op'] + ' panel'
else:
equiv = 'panel ' + op_desc['op'] + ' other'
_op_doc = """
%%s of series and other, element-wise (binary operator `%%s`).
Equivalent to ``%%s``.
Parameters
----------
other : %s or %s""" % (cls._constructor_sliced.__name__,
cls.__name__) + """
axis : {""" + ', '.join(cls._AXIS_ORDERS) + "}" + """
Axis to broadcast over
Returns
-------
""" + cls.__name__ + """
See also
--------
""" + cls.__name__ + ".%s\n"
doc = _op_doc % (op_desc['desc'], op_name, equiv,
op_desc['reverse'])
else:
doc = _agg_doc % name
@Appender(doc)
def f(self, other, axis=0):
return self._combine(other, na_op, axis=axis)
f.__name__ = name
return f
# add `div`, `mul`, `pow`, etc..
ops.add_flex_arithmetic_methods(
cls, _panel_arith_method, use_numexpr=use_numexpr,
flex_comp_method=ops._comp_method_PANEL)
Panel._setup_axes(axes=['items', 'major_axis', 'minor_axis'], info_axis=0,
stat_axis=1, aliases={'major': 'major_axis',
'minor': 'minor_axis'},
slicers={'major_axis': 'index',
'minor_axis': 'columns'})
ops.add_special_arithmetic_methods(Panel, **ops.panel_special_funcs)
Panel._add_aggregate_operations()
Panel._add_numeric_operations()
# legacy
class WidePanel(Panel):
def __init__(self, *args, **kwargs):
# deprecation, #10892
warnings.warn("WidePanel is deprecated. Please use Panel",
FutureWarning, stacklevel=2)
super(WidePanel, self).__init__(*args, **kwargs)
class LongPanel(DataFrame):
def __init__(self, *args, **kwargs):
# deprecation, #10892
warnings.warn("LongPanel is deprecated. Please use DataFrame",
FutureWarning, stacklevel=2)
super(LongPanel, self).__init__(*args, **kwargs)
|
# Generated by Django 3.0.3 on 2020-02-25 19:30
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
('main', '0001_initial'),
]
operations = [
migrations.AddField(
model_name='annotationclass',
name='user',
field=models.ForeignKey(default='', on_delete=django.db.models.deletion.DO_NOTHING, to=settings.AUTH_USER_MODEL),
preserve_default=False,
),
migrations.AddField(
model_name='objectclassmodel',
name='user',
field=models.ForeignKey(default='', on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL),
preserve_default=False,
),
]
|
from __future__ import print_function, absolute_import
from six import iteritems, iterkeys, itervalues
from six.moves import range
from _file_reader import FileReader
from f06_table import F06Table
class _DummyTable(object):
def __init__(self):
self.header = []
self.data = []
self.line_number = -1
self.table_format = None
class TableFormat(object):
def __init__(self):
self.header_check = b'D I S P L A C E M E N T V E C T O R'
self.header_check_line = 2
self.header_lines = 5
class F06Reader(object):
def __init__(self, filename):
self.file = FileReader(filename)
self._done_reading = False
self._table_formats = [TableFormat()]
self._current_table = None
self._callback = None
def register_callback(self, callback):
assert callable(callback)
self._callback = callback
def read(self):
while not self._done_reading:
table_lines, line_number = self._read_table()
if self._done_reading:
break
table_format = F06Table.find_table(table_lines)
if table_format is None:
self._process_table(self._current_table)
self._current_table = None
continue
table = table_format()
table.set_data(table_lines)
table.line_number = line_number
for i in range(len(table.header)):
table.header[i] = table.header[i].strip()
if self._current_table is None:
self._current_table = table
else:
if self._current_table.header == table.header:
self._current_table.data.extend(table.data)
else:
self._process_table(self._current_table)
self._current_table = table
if self._current_table is not None:
self._process_table(self._current_table)
self._current_table = None
def _process_table(self, table):
if table is None:
return
pch_table = table.to_punch()
if isinstance(pch_table, (list, tuple)):
for table in pch_table:
self._callback(table)
else:
self._callback(pch_table)
def _read_table(self):
table_lines = []
first_line = self._find_next_table()
if self._done_reading:
return None, None
line_number = self.file.line_number()
while True:
if first_line is not None:
line = first_line
first_line = None
else:
line = self.file.next_line()
self._check_done_reading(line)
if self._done_reading:
break
# print(line)
if line.startswith(b'1'):
break
table_lines.append(line)
return table_lines, line_number
def _find_next_table(self):
while True:
line = self.file.next_line()
self._check_done_reading(line)
if self._done_reading:
break
if line.startswith(b'0') and b'SUBCASE' in line:
return line
return None
def _check_done_reading(self, line):
if line is None or b'END OF JOB' in line:
self._done_reading = True
|
from __future__ import print_function #Python 2 & 3 compatibility
from __future__ import absolute_import
import numpy as np
import logging
import unittest
import os
import scipy.linalg as LA
import time
from pysnptools.snpreader import Bed,Pheno
from pysnptools.snpreader import SnpData,SnpReader
from pysnptools.kernelreader import KernelNpz
from pysnptools.kernelreader import SnpKernel
from pysnptools.kernelreader import KernelReader
from pysnptools.kernelreader import Identity as KernelIdentity
import pysnptools.util as pstutil
from pysnptools.standardizer import DiagKtoN,UnitTrained
from pysnptools.standardizer import Unit
from pysnptools.util import intersect_apply
from pysnptools.standardizer import Standardizer
from fastlmm.inference.lmm import LMM
from pysnptools.standardizer import Identity as StandardizerIdentity
from scipy.stats import multivariate_normal
from fastlmm.util.pickle_io import load, save
from pysnptools.pstreader import PstReader
from six.moves import range
class _SnpWholeTest(KernelReader):
'''
Warning: Assumes that if train and test contains the same iid, they have the same value.
'''
def __init__(self,train,test,standardizer,block_size,iid0=None):
self.train = train
self.test = test
self.standardizer = standardizer
assert standardizer.is_constant, "Expect standardizer to be constant"
self.block_size = block_size
if iid0 is not None:
_row = iid0
@property
def row(self):
if not hasattr(self,'_row'):
assert np.array_equal(self.train.sid,self.test.sid), "Expect train and test to have same sid in same order"
train_set = set(tuple(item) for item in self.train.iid)
test_unique = [item2 for item2 in (tuple(item) for item in self.test.iid) if item2 not in train_set]
self._row = np.r_[self.train.iid,np.array(test_unique,dtype='str').reshape(-1,2)]
return self._row
@property
def col(self):
return self.test.iid
def __getitem__(self, iid_indexer_and_snp_indexer):
if isinstance(iid_indexer_and_snp_indexer,tuple):
iid0_indexer, iid1_indexer = iid_indexer_and_snp_indexer
else:
iid0_indexer = iid_indexer_and_snp_indexer
iid1_indexer = iid0_indexer
row_index_or_none = PstReader._make_sparray_from_sparray_or_slice(self.row_count, iid0_indexer)
col_index_or_none = PstReader._make_sparray_from_sparray_or_slice(self.col_count, iid1_indexer)
if row_index_or_none is None:
row_index_or_none = list(range(self.row_count))
assert not isinstance(row_index_or_none,str), "row_index_or_none should not be a string"
iid = self.row[row_index_or_none]
if col_index_or_none is None or np.array_equal(col_index_or_none,list(range(self.col_count))):
test = self.test
else:
test = self.test[col_index_or_none]
try: #case 1: asking for train x test
train = self.train[self.train.iid_to_index(iid),:]
is_ok = True
except:
is_ok = False
if is_ok:
return _SnpTrainTest(train=train,test=test,standardizer=self.standardizer,block_size=self.block_size)
#case 2: asking for train x test
if np.array_equal(test.iid,iid):
return SnpKernel(test,standardizer=self.standardizer,block_size=self.block_size)
#case 3: Just re-reordering the iids
if len(row_index_or_none) == self.row_count and (col_index_or_none is None or len(col_index_or_none) == self.col_count):
result = _SnpWholeTest(train=self.train,test=test,standardizer=self.standardizer,block_size=self.block_size,iid0=iid)
return result
raise Exception("When reading from a _SnpWholeTest, can only ask to reorder iids or to access from train x test or test x test")
#!!! does it make sense to read from disk in to parts?
def _read(self, row_index_or_none, col_index_or_none, order, dtype, force_python_only, view_ok):
result = self[row_index_or_none,col_index_or_none]._read(row_index_or_none, col_index_or_none, order, dtype, force_python_only, view_ok)
return result
def __repr__(self):
s = "_SnpWholeTest(train={0},test={1},standardizer={2}".format(self.train,self.test,self.standardizer)
if self.block_size is not None:
s += ",block_size={0}".format(self.block_size)
s += ")"
return s
def copyinputs(self, copier):
#Doesn't need run_once
copier.input(self.train)
copier.input(self.test)
copier.input(self.standardizer)
class _SnpTrainTest(KernelReader):
def __init__(self,train,test,standardizer,block_size):
self.train = train
self.test = test
self.standardizer = standardizer
assert standardizer.is_constant, "Expect standardizer to be constant"
self.block_size = block_size
if np.array_equal(train.iid,test.iid):
self._col = train.iid
else:
self._col = test.iid
@property
def row(self):
return self.train.iid
@property
def col(self):
return self._col
def _read(self, row_index_or_none, col_index_or_none, order, dtype, force_python_only, view_ok):
assert self.train.sid_count == self.test.sid_count, "real assert"
#case 1: asking for all of train x test
if (row_index_or_none is None or np.array_equal(row_index_or_none,np.arange(self.row_count))
and col_index_or_none is None or np.array_equal(col_index_or_none,np.arange(self.col_count))):
#Do all-at-once (not in blocks) if 1. No block size is given or 2. The #ofSNPs < Min(block_size,iid_count) #similar code elsewhere
if self.block_size is None or (self.train.sid_count <= self.block_size or self.train.sid_count <= self.train.iid_count+self.test.iid_count):
train_snps = self.train.read(dtype=dtype).standardize(self.standardizer)
test_snps = self.test.read(dtype=dtype).standardize(self.standardizer)
if order == 'F': #numpy's 'dot' always returns 'C' order
k_val = test_snps.val.dot(train_snps.val.T).T
else:
k_val = train_snps.val.dot(test_snps.val.T)
return k_val
else: #Do in blocks
#Set the default order to 'C' because with kernels any order is fine and the Python .dot method likes 'C' best.
if order=='A':
order = 'C'
k_val = np.zeros([self.train.iid_count,self.test.iid_count],dtype=dtype,order=order)
ct = 0
ts = time.time()
for start in range(0, self.train.sid_count, self.block_size):
ct += self.block_size
train_snps = self.train[:,start:start+self.block_size].read(dtype=dtype).standardize(self.standardizer)
test_snps = self.test [:,start:start+self.block_size].read(dtype=dtype).standardize(self.standardizer)
if order == 'F': #numpy's 'dot' always returns 'C' order
k_val += test_snps.val.dot(train_snps.val.T).T
else:
k_val += train_snps.val.dot(test_snps.val.T)
if ct % self.block_size==0:
diff = time.time()-ts
if diff > 1: logging.info("read %s SNPs in %.2f seconds" % (ct, diff))
return k_val
else:
raise Exception("_SnpTrainTest currently only has code for reading all of train x test")
def __repr__(self):
s = "_SnpTrainTest(train={0},test={1},standardizer={2}".format(self.train,self.test,self.standardizer)
if self.block_size is not None:
s += ",block_size={0}".format(self.block_size)
s += ")"
return s
def copyinputs(self, copier):
#Doesn't need run_once
copier.input(self.train)
copier.input(self.test)
copier.input(self.standardizer)
def _snps_fixup(snp_input, iid_if_none=None,count_A1=None):
from pysnptools.snpreader import _snps_fixup as pst_snps_fixup
return pst_snps_fixup(snp_input,iid_if_none,count_A1)
def _pheno_fixup(pheno_input, iid_if_none=None, missing ='NaN',count_A1=None):
try:
ret = Pheno(pheno_input, iid_if_none, missing=missing)
ret.iid #doing this just to force file load
return ret
except:
return _snps_fixup(pheno_input, iid_if_none=iid_if_none,count_A1=count_A1)
def _kernel_fixup(input, iid_if_none, standardizer, test=None, test_iid_if_none=None, block_size=None, train_snps=None, count_A1=None):
if test is not None and input is None:
input = test
test = None
if isinstance(input, str) and input.endswith(".npz"):
return KernelNpz(input)
if isinstance(input, str):
input = Bed(input, count_A1=count_A1) #Note that we don't return here. Processing continues
if isinstance(test, str):
test = Bed(test, count_A1=count_A1) #Note that we don't return here. Processing continues
if isinstance(input,SnpReader):
if test is not None:
return _SnpWholeTest(train=train_snps,test=test,standardizer=standardizer,block_size=block_size)
else:
return SnpKernel(input,standardizer=standardizer, block_size=block_size)
if input is None:
return KernelIdentity(iid=iid_if_none,test=test_iid_if_none)
return input
class FastLMM(object):
'''
A predictor, somewhat in the style of scikit-learn, for learning and predicting with linear mixed models.
**Constructor:**
:Parameters: * **GB_goal** (int) -- gigabytes of memory the run should use, optional. If not given, will read the test_snps in blocks the same size as the kernel, which is memory efficient with little overhead on computation time.
* **force_full_rank** (bool) -- Even if kernels are defined with fewer SNPs than IIDs, create an explicit iid_count x iid_count kernel. Cannot be True if force_low_rank is True.
* **force_low_rank** (bool) -- Even if kernels are defined with fewer IIDs than SNPs, create a low-rank iid_count x sid_count kernel. Cannot be True if force_full_rank is True.
* **snp_standardizer** (:class:`Standardizer`) -- The PySnpTools standardizer to be apply to SNP data. Choices include :class:`Standardizer.Unit` (Default. Makes values for each SNP have mean zero and standard deviation 1.0, then fills missing with zero) and :class:`Standardizer.Identity` (Do nothing)
* **covariate_standardizer** (:class:`Standardizer`) -- The PySnpTools standardizer to be apply to X, the covariate data. Some choices include :class:`Standardizer.Unit` (Default. Fills missing with zero) and :class:`Standardizer.Identity` (do nothing)
* **kernel_standardizer** (:class:`KernelStandardizer`) -- The PySnpTools kernel standardizer to be apply to the kernels. Some choices include :class:`KernelStandardizer.DiagKToN` (Default. Make the diagonal sum to iid_count) and :class:`KernelStandardizer.Identity` (Do nothing)
:Example:
>>> from __future__ import print_function #Python 2 & 3 compatibility
>>> import numpy as np
>>> import logging
>>> from pysnptools.snpreader import Bed, Pheno
>>> from fastlmm.inference import FastLMM
>>> logging.basicConfig(level=logging.INFO)
>>> snpreader = Bed('../feature_selection/examples/toydata.bed',count_A1=False)
>>> cov_fn = "../feature_selection/examples/toydata.cov"
>>> pheno_fn = "../feature_selection/examples/toydata.phe"
>>> train_idx = np.r_[10:snpreader.iid_count] # iids 10 and on
>>> test_idx = np.r_[0:10] # the first 10 iids
>>> fastlmm = FastLMM(GB_goal=2)
>>> #We give it phenotype and covariate information for extra examples, but it reorders and intersects the examples, so only training examples are used.
>>> _ = fastlmm.fit(K0_train=snpreader[train_idx,:],X=cov_fn,y=pheno_fn)
>>> mean, covariance = fastlmm.predict(K0_whole_test=snpreader[test_idx,:],X=cov_fn,count_A1=False)
>>> print(list(mean.iid[0]), round(mean.val[0,0],7), round(covariance.val[0,0],7))
['per0', 'per0'] 0.1791958 0.8995209
>>> nll = fastlmm.score(K0_whole_test=snpreader[test_idx,:],X=cov_fn,y=pheno_fn,count_A1=False)
>>> print(round(nll,7))
13.4623234
'''
def __init__(self, GB_goal=None, force_full_rank=False, force_low_rank=False, snp_standardizer=Unit(), covariate_standardizer=Unit(), kernel_standardizer=DiagKtoN()):
self.GB_goal = GB_goal
self.force_full_rank = force_full_rank
self.force_low_rank = force_low_rank
self.snp_standardizer = snp_standardizer
self.covariate_standardizer = covariate_standardizer
self.kernel_standardizer = kernel_standardizer
self.is_fitted = False
#!!!update doc to explain h2raw w.r.t h2
def fit(self, X=None, y=None, K0_train=None, K1_train=None, h2raw=None, mixing=None,count_A1=None):#!!!is this h2 or h2corr????
"""
Method for training a :class:`FastLMM` predictor. If the examples in X, y, K0_train, K1_train are not the same, they will be reordered and intersected.
:param X: training covariate information, optional:
If you give a string, it should be the file name of a PLINK phenotype-formatted file.
:type X: a PySnpTools `SnpReader <http://fastlmm.github.io/PySnpTools/#snpreader-snpreader>`__
(such as `Pheno <http://fastlmm.github.io/PySnpTools/#snpreader-pheno>`__ or `SnpData <http://fastlmm.github.io/PySnpTools/#snpreader-snpdata>`__) or string.
:param y: training phenotype:
If you give a string, it should be the file name of a PLINK phenotype-formatted file.
:type y: a PySnpTools `SnpReader <http://fastlmm.github.io/PySnpTools/#snpreader-snpreader>`__
(such as `Pheno <http://fastlmm.github.io/PySnpTools/#snpreader-pheno>`__ or `SnpData <http://fastlmm.github.io/PySnpTools/#snpreader-snpdata>`__) or string.
:param K0_train: A similarity matrix or SNPs from which to construct such a similarity matrix.
Can be any `SnpReader <http://fastlmm.github.io/PySnpTools/#snpreader-snpreader>`__.
If you give a string, can be the name of a PLINK-formated Bed file.
Can be PySnpTools `KernelReader <http://fastlmm.github.io/PySnpTools/#kernelreader-kernelreader>`__.
If you give a string it can be the name of a `KernelNpz <http://fastlmm.github.io/PySnpTools/#kernelreader-kernelnpz>`__ file.
:type K0_train: `SnpReader <http://fastlmm.github.io/PySnpTools/#snpreader-snpreader>`__ or a string or
`KernelReader <http://fastlmm.github.io/PySnpTools/#kernelreader-kernelreader>`__
:param K1_train: A second similarity matrix or SNPs from which to construct such a second similarity matrix. (Also, see 'mixing').
Can be any `SnpReader <http://fastlmm.github.io/PySnpTools/#snpreader-snpreader>`__. If you give a string, can be the name of a PLINK-formated Bed file.
Can be PySnpTools `KernelReader <http://fastlmm.github.io/PySnpTools/#kernelreader-kernelreader>`__.
If you give a string it can be the name of a `KernelNpz <http://fastlmm.github.io/PySnpTools/#kernelreader-kernelnpz>`__ file.
:type K1_train: `SnpReader <http://fastlmm.github.io/PySnpTools/#snpreader-snpreader>`__ or a string or
`KernelReader <http://fastlmm.github.io/PySnpTools/#kernelreader-kernelreader>`__
:param h2raw: A parameter to LMM learning that tells how much weight to give the K's vs. the identity matrix, optional
If not given will search for best value.
If mixing is unspecified, then h2 must also be unspecified.
:type h2raw: number
:param mixing: Weight between 0.0 (inclusive, default) and 1.0 (inclusive) given to K1_train relative to K0_train.
If you give no mixing number and a K1_train is given, the best weight will be learned.
:type mixing: number
:param count_A1: If it needs to read SNP data from a BED-formatted file, tells if it should count the number of A1
alleles (the PLINK standard) or the number of A2 alleles. False is the current default, but in the future the default will change to True.
:type count_A1: bool
:rtype: self, the fitted FastLMM predictor
"""
self.is_fitted = True
# should this have a cache file like 'single_snp'?
#!!!later what happens if missing values in pheno_train?
#!!!later add code so that X, y, etc can be array-like objects without iid information. In that case, make up iid info
assert y is not None, "y must be given"
y = _pheno_fixup(y,count_A1=count_A1)
assert y.sid_count == 1, "Expect y to be just one variable"
X = _pheno_fixup(X, iid_if_none=y.iid,count_A1=count_A1)
K0_train = _kernel_fixup(K0_train, iid_if_none=y.iid, standardizer=self.snp_standardizer,count_A1=count_A1)
K1_train = _kernel_fixup(K1_train, iid_if_none=y.iid, standardizer=self.snp_standardizer,count_A1=count_A1)
K0_train, K1_train, X, y = intersect_apply([K0_train, K1_train, X, y],intersect_before_standardize=True) #!!! test this on both K's as None
from fastlmm.association.single_snp import _set_block_size
K0_train, K1_train, block_size = _set_block_size(K0_train, K1_train, mixing, self.GB_goal, self.force_full_rank, self.force_low_rank)
X = X.read()
# If possible, unit standardize train and test together. If that is not possible, unit standardize only train and later apply
# the same linear transformation to test. Unit standardization is necessary for FastLMM to work correctly.
#!!!later is the calculation of the training data's stats done twice???
X, covar_unit_trained = X.standardize(self.covariate_standardizer,block_size=block_size,return_trained=True) #This also fills missing with the mean
# add a column of 1's to cov to increase DOF of model (and accuracy) by allowing a constant offset
X = SnpData(iid=X.iid,
sid=self._new_snp_name(X),
val=np.c_[X.val,np.ones((X.iid_count,1))],
name ="covariate_train w/ 1's")
y0 = y.read().val #!!!later would view_ok=True,order='A' be ok because this code already did a fresh read to look for any missing values
from fastlmm.association.single_snp import _Mixer #!!!move _combine_the_best_way to another file (e.g. this one)
K_train, h2raw, mixer = _Mixer.combine_the_best_way(K0_train,K1_train,X.val,y0,mixing,h2raw,force_full_rank=self.force_full_rank,force_low_rank=self.force_low_rank,kernel_standardizer=self.kernel_standardizer,block_size=block_size)
# do final prediction using lmm.py
lmm = LMM()
#Special case: The K kernel is defined implicitly with SNP data
if mixer.do_g:
assert isinstance(K_train.standardizer,StandardizerIdentity), "Expect Identity standardizer"
G_train = K_train.snpreader
lmm.setG(G0=K_train.snpreader.val)
else:
lmm.setK(K0=K_train.val)
lmm.setX(X.val)
lmm.sety(y0[:,0])
# Find the best h2 and also on covariates (not given from new model)
if h2raw is None:
res = lmm.findH2() #!!!why is REML true in the return???
else:
res = lmm.nLLeval(h2=h2raw)
#We compute sigma2 instead of using res['sigma2'] because res['sigma2'] is only the pure noise.
full_sigma2 = float(sum((np.dot(X.val,res['beta']).reshape(-1,1)-y0)**2))/y.iid_count #!!! this is non REML. Is that right?
###### all references to 'fastlmm_model' should be here so that we don't forget any
self.block_size = block_size
self.beta = res['beta']
self.h2raw = res['h2']
self.sigma2 = full_sigma2
self.U = lmm.U
self.S = lmm.S
self.K = lmm.K
self.G = lmm.G
self.y = lmm.y
self.Uy = lmm.Uy
self.X = lmm.X
self.UX = lmm.UX
self.mixer = mixer
self.covar_unit_trained = covar_unit_trained
self.K_train_iid = K_train.iid
self.covar_sid = X.sid
self.pheno_sid = y.sid
self.G0_train = K0_train.snpreader if isinstance(K0_train,SnpKernel) else None #!!!later expensive?
self.G1_train = K1_train.snpreader if isinstance(K1_train,SnpKernel) else None #!!!later expensive?
return self
@staticmethod
def _new_snp_name(snpreader):
new_snp = "always1"
while True:
if not new_snp in snpreader.sid:
return np.r_[snpreader.sid,[new_snp]]
new_snp += "_"
def score(self, X=None, y=None, K0_whole_test=None, K1_whole_test=None, iid_if_none=None, return_mse_too=False, return_per_iid=False, count_A1=None):
"""
Method for calculating the negative log likelihood of testing examples.
If the examples in X,y, K0_whole_test, K1_whole_test are not the same, they will be reordered and intersected.
:param X: testing covariate information, optional:
If you give a string, it should be the file name of a PLINK phenotype-formatted file.
:type X: a PySnpTools `SnpReader <http://fastlmm.github.io/PySnpTools/#snpreader-snpreader>`__ (such as `Pheno <http://fastlmm.github.io/PySnpTools/#snpreader-pheno>`__ or `SnpData <http://fastlmm.github.io/PySnpTools/#snpreader-snpdata>`__) or string.
:param y: testing phenotype:
If you give a string, it should be the file name of a PLINK phenotype-formatted file.
:type y: a PySnpTools `SnpReader <http://fastlmm.github.io/PySnpTools/#snpreader-snpreader>`__ (such as `Pheno <http://fastlmm.github.io/PySnpTools/#snpreader-pheno>`__ or `SnpData <http://fastlmm.github.io/PySnpTools/#snpreader-snpdata>`__) or string.
:param K0_whole_test: A similarity matrix from all the examples to the test examples. Alternatively,
the test SNPs needed to construct such a similarity matrix.
Can be any `SnpReader <http://fastlmm.github.io/PySnpTools/#snpreader-snpreader>`__. If you give a string, can be the name of a PLINK-formated Bed file.
Can be PySnpTools `KernelReader <http://fastlmm.github.io/PySnpTools/#kernelreader-kernelreader>`__. If you give a string it can be the name of a `KernelNpz <http://fastlmm.github.io/PySnpTools/#kernelreader-kernelnpz>`__ file.
:type K0_whole_test: `SnpReader <http://fastlmm.github.io/PySnpTools/#snpreader-snpreader>`__ or a string or `KernelReader <http://fastlmm.github.io/PySnpTools/#kernelreader-kernelreader>`__
:param K1_whole_test: A second similarity matrix from all the examples to the test examples. Alternatively,
the test SNPs needed to construct such a similarity matrix.
Can be any `SnpReader <http://fastlmm.github.io/PySnpTools/#snpreader-snpreader>`__. If you give a string, can be the name of a PLINK-formated Bed file.
Can be PySnpTools `KernelReader <http://fastlmm.github.io/PySnpTools/#kernelreader-kernelreader>`__. If you give a string it can be the name of a `KernelNpz <http://fastlmm.github.io/PySnpTools/#kernelreader-kernelnpz>`__ file.
:type K1_whole_test: `SnpReader <http://fastlmm.github.io/PySnpTools/#snpreader-snpreader>`__ or a string or `KernelReader <http://fastlmm.github.io/PySnpTools/#kernelreader-kernelreader>`__
:param iid_if_none: Examples to predict for if no X, K0_whole_test, K1_whole_test is provided.
:type iid_if_none: an ndarray of two strings
:param return_mse_too: If true, will also return the mean squared error.
:type return_mse_too: bool
:param count_A1: If it needs to read SNP data from a BED-formatted file, tells if it should count the number of A1
alleles (the PLINK standard) or the number of A2 alleles. False is the current default, but in the future the default will change to True.
:type count_A1: bool
:param count_A1: If it needs to read SNP data from a BED-formatted file, tells if it should count the number of A1
alleles (the PLINK standard) or the number of A2 alleles. False is the current default, but in the future the default will change to True.
:type count_A1: bool
:rtype: a float of the negative log likelihood and, optionally, a float of the mean squared error.
"""
mean0, covar0 = self.predict(K0_whole_test=K0_whole_test,K1_whole_test=K1_whole_test,X=X,iid_if_none=iid_if_none,count_A1=count_A1)
y = _pheno_fixup(y, iid_if_none=covar0.iid,count_A1=count_A1)
mean, covar, y = intersect_apply([mean0, covar0, y])
mean = mean.read(order='A',view_ok=True).val
covar = covar.read(order='A',view_ok=True).val
y_actual = y.read().val
if not return_per_iid:
var = multivariate_normal(mean=mean.reshape(-1), cov=covar)
nll = -np.log(var.pdf(y_actual.reshape(-1)))
if not return_mse_too:
return nll
else:
mse = ((y_actual-mean)**2).sum()
return nll, mse
else:
if not return_mse_too:
result = SnpData(iid=y.iid,sid=['nLL'],val=np.empty((y.iid_count,1)),name="nLL")
for iid_index in range(y.iid_count):
var = multivariate_normal(mean=mean[iid_index], cov=covar[iid_index,iid_index])
nll = -np.log(var.pdf(y_actual[iid_index]))
result.val[iid_index,0] = nll
return result
else:
raise Exception("need code for mse_too")
def _extract_fixup(kernel):
assert kernel.iid0_count >= kernel.iid1_count, "Expect iid0 to be at least as long as iid1"
def predict(self,X=None,K0_whole_test=None,K1_whole_test=None,iid_if_none=None, count_A1=None):
"""
Method for predicting from a fitted :class:`FastLMM` predictor.
If the examples in X, K0_whole_test, K1_whole_test are not the same, they will be reordered and intersected.
:param X: testing covariate information, optional:
If you give a string, it should be the file name of a PLINK phenotype-formatted file.
:type X: a PySnpTools `SnpReader <http://fastlmm.github.io/PySnpTools/#snpreader-snpreader>`__ (such as `Pheno <http://fastlmm.github.io/PySnpTools/#snpreader-pheno>`__ or `SnpData <http://fastlmm.github.io/PySnpTools/#snpreader-snpdata>`__) or string.
:param K0_whole_test: A similarity matrix from all the examples to the test examples. Alternatively,
the test SNPs needed to construct such a similarity matrix.
Can be any `SnpReader <http://fastlmm.github.io/PySnpTools/#snpreader-snpreader>`__. If you give a string, can be the name of a PLINK-formated Bed file.
Can be PySnpTools `KernelReader <http://fastlmm.github.io/PySnpTools/#kernelreader-kernelreader>`__. If you give a string it can be the name of a `KernelNpz <http://fastlmm.github.io/PySnpTools/#kernelreader-kernelnpz>`__ file.
:type K0_whole_test: `SnpReader <http://fastlmm.github.io/PySnpTools/#snpreader-snpreader>`__ or a string or `KernelReader <http://fastlmm.github.io/PySnpTools/#kernelreader-kernelreader>`__
:param K1_whole_test: A second similarity matrix from all the examples to the test examples. Alternatively,
the test SNPs needed to construct such a similarity matrix.
Can be any `SnpReader <http://fastlmm.github.io/PySnpTools/#snpreader-snpreader>`__. If you give a string, can be the name of a PLINK-formated Bed file.
Can be PySnpTools `KernelReader <http://fastlmm.github.io/PySnpTools/#kernelreader-kernelreader>`__. If you give a string it can be the name of a `KernelNpz <http://fastlmm.github.io/PySnpTools/#kernelreader-kernelnpz>`__ file.
:type K1_whole_test: `SnpReader <http://fastlmm.github.io/PySnpTools/#snpreader-snpreader>`__ or a string or `KernelReader <http://fastlmm.github.io/PySnpTools/#kernelreader-kernelreader>`__
:param iid_if_none: Examples to predict for if no X, K0_whole_test, K1_whole_test is provided.
:type iid_if_none: an ndarray of two strings
:rtype: A `SnpData <http://fastlmm.github.io/PySnpTools/#snpreader-snpdata>`__ of the means and a :class:`KernelData` of the covariance
"""
assert self.is_fitted, "Can only predict after predictor has been fitted"
#assert K0_whole_test is not None, "K0_whole_test must be given"
#!!!later is it too wasteful to keep both G0_train, G1_train, and lmm.G when storing to disk?
#!!!later all _kernel_fixup's should use block_size input
K0_whole_test_b = _kernel_fixup(K0_whole_test, train_snps=self.G0_train, iid_if_none=iid_if_none, standardizer=self.mixer.snp_trained0, test=K0_whole_test, test_iid_if_none=None, block_size=self.block_size,count_A1=count_A1)
K1_whole_test = _kernel_fixup(K1_whole_test, train_snps=self.G1_train, iid_if_none=K0_whole_test_b.iid0, standardizer=self.mixer.snp_trained1, test=K1_whole_test, test_iid_if_none=K0_whole_test_b.iid1, block_size=self.block_size,count_A1=count_A1)
X = _pheno_fixup(X,iid_if_none=K0_whole_test_b.iid1,count_A1=count_A1)
K0_whole_test_c, K1_whole_test, X = intersect_apply([K0_whole_test_b, K1_whole_test, X],intersect_before_standardize=True,is_test=True)
X = X.read().standardize(self.covar_unit_trained)
# add a column of 1's to cov to increase DOF of model (and accuracy) by allowing a constant offset
X = SnpData(iid=X.iid,
sid=self._new_snp_name(X),
val=np.c_[X.read().val,np.ones((X.iid_count,1))])
assert np.array_equal(X.sid,self.covar_sid), "Expect covar sids to be the same in train and test."
train_idx0 = K0_whole_test_c.iid0_to_index(self.K_train_iid)
K0_train_test = K0_whole_test_c[train_idx0,:]
train_idx1 = K1_whole_test.iid0_to_index(self.K_train_iid)
K1_train_test = K1_whole_test[train_idx1,:]
test_idx0 = K0_whole_test_c.iid0_to_index(K0_whole_test_c.iid1)
K0_test_test = K0_whole_test_c[test_idx0,:]
if K0_test_test.iid0 is not K0_test_test.iid1:
raise Exception("real assert")
test_idx1 = K1_whole_test.iid0_to_index(K0_whole_test_c.iid1)
K1_test_test = K1_whole_test[test_idx1,:]
if self.mixer.do_g:
###################################################
# low rank from Rasmussen eq 2.9 + noise term added to covar
###################################################
Gstar = self.mixer.g_mix(K0_train_test,K1_train_test)
varg = self.h2raw * self.sigma2
vare = (1.-self.h2raw) * self.sigma2
Ainv = LA.inv((1./vare) * np.dot(self.G.T,self.G) + (1./varg)*np.eye(self.G.shape[1]))
testAinv = np.dot(Gstar.test.val, Ainv)
pheno_predicted = np.dot(X.val,self.beta) + (1./vare) * np.dot(np.dot(testAinv,self.G.T),self.y-np.dot(self.X,self.beta))
pheno_predicted = pheno_predicted.reshape(-1,1)
covar = np.dot(testAinv,Gstar.test.val.T) + vare * np.eye(Gstar.test.val.shape[0])
else:
lmm = LMM()
lmm.U = self.U
lmm.S = self.S
lmm.G = self.G
lmm.y = self.y
lmm.Uy = self.Uy
lmm.X = self.X
lmm.UX = self.UX
Kstar = self.mixer.k_mix(K0_train_test,K1_train_test) #!!!later do we need/want reads here? how about view_OK?
lmm.setTestData(Xstar=X.val, K0star=Kstar.val.T)
Kstar_star = self.mixer.k_mix(K0_test_test,K1_test_test) #!!!later do we need/want reads here?how about view_OK?
pheno_predicted, covar = lmm.predict_mean_and_variance(beta=self.beta, h2=self.h2raw,sigma2=self.sigma2, Kstar_star=Kstar_star.val)
#pheno_predicted = lmm.predictMean(beta=self.beta, h2=self.h2,scale=self.sigma2).reshape(-1,1)
ret0 = SnpData(iid = X.iid, sid=self.pheno_sid,val=pheno_predicted,pos=np.array([[np.nan,np.nan,np.nan]]),name="lmm Prediction")
from pysnptools.kernelreader import KernelData
ret1 = KernelData(iid=K0_test_test.iid,val=covar)
return ret0, ret1
if __name__ == "__main__":
logging.basicConfig(level=logging.INFO)
import doctest
doctest.testmod()
|
class Node:
def __init__(self, val):
self.val = val
self.left = None
self.right = None
self.height = 1
def insert(node, val):
if not node:
return Node(val)
if val <= node.val:
node.left = insert(node.left, val)
else:
node.right = insert(node.right, val)
return node
def search(node, val):
if not node:
return False
if val == node.val:
return True
elif val < node.val:
return search(node.left, val)
else:
return search(node.right, val)
def delete(node, val):
if not node:
return
if val < node.val:
node.left = delete(node.left, val)
elif node.val < val:
node.right = delete(node.right, val)
else:
if not node.left:
return node.right
if not node.right:
return node.left
successor = get_successor(node.right)
node.val = successor.val
node.right = delete(node.right, successor.val)
return node
def get_successor(node):
while node.left:
node = node.left
return node
def inorder(node):
if not node:
return
inorder(node.left)
print(node.val)
inorder(node.right)
if __name__ == '__main__':
root = Node(50)
insert(root, 30)
insert(root, 20)
insert(root, 40)
insert(root, 70)
insert(root, 60)
insert(root, 80)
inorder(root)
print()
assert search(root, 40)
assert not search(root, 45)
root = delete(root, 30)
assert root.val == 50
inorder(root)
print()
root = delete(root, 50)
assert root.val == 60
inorder(root)
print()
|
"""Support for Broadlink sensors."""
import logging
import voluptuous as vol
from homeassistant.components.sensor import (
DEVICE_CLASS_HUMIDITY,
DEVICE_CLASS_ILLUMINANCE,
DEVICE_CLASS_POWER,
DEVICE_CLASS_TEMPERATURE,
PLATFORM_SCHEMA,
STATE_CLASS_MEASUREMENT,
SensorEntity,
)
from homeassistant.const import CONF_HOST, PERCENTAGE, POWER_WATT, TEMP_CELSIUS
from homeassistant.helpers import config_validation as cv
from .const import DOMAIN
from .entity import BroadlinkEntity
from .helpers import import_device
_LOGGER = logging.getLogger(__name__)
SENSOR_TYPES = {
"temperature": (
"Temperature",
TEMP_CELSIUS,
DEVICE_CLASS_TEMPERATURE,
STATE_CLASS_MEASUREMENT,
),
"air_quality": ("Air Quality", None, None, None),
"humidity": (
"Humidity",
PERCENTAGE,
DEVICE_CLASS_HUMIDITY,
STATE_CLASS_MEASUREMENT,
),
"light": ("Light", None, DEVICE_CLASS_ILLUMINANCE, None),
"noise": ("Noise", None, None, None),
"power": (
"Current power",
POWER_WATT,
DEVICE_CLASS_POWER,
STATE_CLASS_MEASUREMENT,
),
}
PLATFORM_SCHEMA = PLATFORM_SCHEMA.extend(
{vol.Required(CONF_HOST): cv.string}, extra=vol.ALLOW_EXTRA
)
async def async_setup_platform(hass, config, async_add_entities, discovery_info=None):
"""Import the device and discontinue platform.
This is for backward compatibility.
Do not use this method.
"""
import_device(hass, config[CONF_HOST])
_LOGGER.warning(
"The sensor platform is deprecated, please remove it from your configuration"
)
async def async_setup_entry(hass, config_entry, async_add_entities):
"""Set up the Broadlink sensor."""
device = hass.data[DOMAIN].devices[config_entry.entry_id]
sensor_data = device.update_manager.coordinator.data
sensors = [
BroadlinkSensor(device, monitored_condition)
for monitored_condition in sensor_data
if monitored_condition in SENSOR_TYPES
and (
# These devices have optional sensors.
# We don't create entities if the value is 0.
sensor_data[monitored_condition] != 0
or device.api.type not in {"RM4PRO", "RM4MINI"}
)
]
async_add_entities(sensors)
class BroadlinkSensor(BroadlinkEntity, SensorEntity):
"""Representation of a Broadlink sensor."""
def __init__(self, device, monitored_condition):
"""Initialize the sensor."""
super().__init__(device)
self._monitored_condition = monitored_condition
self._attr_device_class = SENSOR_TYPES[monitored_condition][2]
self._attr_name = f"{device.name} {SENSOR_TYPES[monitored_condition][0]}"
self._attr_state_class = SENSOR_TYPES[monitored_condition][3]
self._attr_native_value = self._coordinator.data[monitored_condition]
self._attr_unique_id = f"{device.unique_id}-{monitored_condition}"
self._attr_native_unit_of_measurement = SENSOR_TYPES[monitored_condition][1]
def _update_state(self, data):
"""Update the state of the entity."""
self._attr_native_value = data[self._monitored_condition]
|
#!python3
import re
from blackmamba.system import Pythonista
def _comment_line(line, hash_prefix=''):
stripped = line.strip()
if stripped.startswith('#'):
return line
if not stripped:
return hash_prefix + '# \n'
return hash_prefix + '# ' + line[len(hash_prefix):]
_UNCOMMENT_RE = re.compile('\A(\s*)#( ?)(.*)\Z', re.DOTALL)
def _uncomment_line(line):
if line.find('#') == -1:
return line
match = _UNCOMMENT_RE.search(line)
if match:
result = match.group(1) + match.group(3)
else:
result = line
if not result.strip():
result = '\n'
return result
_HASH_INDEX_RE = re.compile('\A(\s*)')
def _hash_prefix(lines):
prefix = None
for line in lines:
if not line.strip():
continue
match = _HASH_INDEX_RE.search(line)
if not match:
continue
if prefix is None or len(match.group(1)) < len(prefix):
prefix = match.group(1)
if prefix is None:
prefix = ''
return prefix
def _toggle_lines(lines):
if not lines:
return lines
if lines[0].strip().startswith('#'):
comment = False
hash_prefix = ''
else:
comment = True
hash_prefix = _hash_prefix(lines)
replacement = []
for line in lines:
if comment:
replacement.append(_comment_line(line, hash_prefix))
else:
replacement.append(_uncomment_line(line))
return replacement
@Pythonista()
def main():
import editor
selection_range = editor.get_selection()
if not selection_range:
# No file opened in the editor
return
text = editor.get_text()
selected_lines_range = editor.get_line_selection()
selected_lines_text = text[selected_lines_range[0]:selected_lines_range[1]]
selected_lines = selected_lines_text.splitlines(True)
last_line_deleted = False
if len(selected_lines) > 1:
# Ignore the last line selection if there's just cursor at the beginning of
# this line and nothing is selected
last_line = selected_lines[-1]
if selected_lines_range[1] - len(last_line) == selection_range[1]:
last_line_deleted = True
del selected_lines[-1]
selected_lines_range = (selected_lines_range[0], selected_lines_range[1] - len(last_line) - 1)
replacement = ''.join(_toggle_lines(selected_lines))
if last_line_deleted:
replacement = replacement[:-1]
editor.replace_text(selected_lines_range[0], selected_lines_range[1], replacement)
editor.set_selection(selected_lines_range[0], selected_lines_range[0] + len(replacement))
if __name__ == '__main__':
main()
|
"""
<Program Name>
test_primary.py
<Purpose>
Unit testing for uptane/clients/primary.py
<Copyright>
See LICENSE for licensing information.
"""
from __future__ import unicode_literals
import uptane # Import before TUF modules; may change tuf.conf values.
import unittest
import os.path
import time
import copy
import shutil
import hashlib
import iso8601
from six.moves.urllib.error import URLError
import tuf
import tuf.formats
import tuf.conf
import tuf.client.updater # to test one of the fields in the Primary object
import uptane.formats
import uptane.clients.primary as primary
import uptane.common # verify sigs, create client dir structure, convert key
import uptane.encoding.asn1_codec as asn1_codec
from uptane.encoding.asn1_codec import DATATYPE_TIME_ATTESTATION
from uptane.encoding.asn1_codec import DATATYPE_ECU_MANIFEST
from uptane.encoding.asn1_codec import DATATYPE_VEHICLE_MANIFEST
# For temporary convenience:
import demo # for generate_key, import_public_key, import_private_key
import json
SAMPLE_DATA_DIR = os.path.join(uptane.WORKING_DIR, 'samples')
TEST_DATA_DIR = os.path.join(uptane.WORKING_DIR, 'tests', 'test_data')
TEST_DIRECTOR_METADATA_DIR = os.path.join(TEST_DATA_DIR, 'director_metadata')
TEST_IMAGE_REPO_METADATA_DIR = os.path.join(
TEST_DATA_DIR, 'image_repo_metadata')
TEST_DIRECTOR_ROOT_FNAME = os.path.join(
TEST_DIRECTOR_METADATA_DIR, 'root.' + tuf.conf.METADATA_FORMAT)
TEST_IMAGE_REPO_ROOT_FNAME = os.path.join(
TEST_IMAGE_REPO_METADATA_DIR, 'root.' + tuf.conf.METADATA_FORMAT)
TEST_PINNING_FNAME = os.path.join(TEST_DATA_DIR, 'pinned.json')
TEMP_CLIENT_DIR = os.path.join(TEST_DATA_DIR, 'temp_test_primary')
# Sample metadata and targets that will be copied to TEMP_CLIENT_DIR to use
# as a local repository for testing.
SAMPLE_METADATA = os.path.join(
uptane.WORKING_DIR, 'samples', 'metadata_samples_long_expiry',
'update_to_one_ecu', 'full_metadata_archive')
SAMPLE_TARGETS = os.path.join(uptane.WORKING_DIR, 'demo', 'images')
# Changing some of these values would require producing new signed sample data
# from the Timeserver or a Secondary.
NONCE = 5
VIN = 'democar'
PRIMARY_ECU_SERIAL = '00000'
def destroy_temp_dir():
# Clean up anything that may currently exist in the temp test directory.
if os.path.exists(TEMP_CLIENT_DIR):
shutil.rmtree(TEMP_CLIENT_DIR)
class TestPrimary(unittest.TestCase):
"""
"unittest"-style test class for the Primary module in the reference
implementation
Please note that these tests are NOT entirely independent of each other.
Several of them build on the results of previous tests. This is an unusual
pattern but saves code and works at least for now.
"""
# Class variables
ecu_key = None
key_timeserver_pub = None
key_timeserver_pri = None
initial_time = None
# I'll initialize instance in the first test, and use it for later tests so
# as to avoid repeated initialization.
instance = None
@classmethod
def setUpClass(cls):
"""
This is run once for the class, before all tests. Since there is only one
class, this runs once. It prepares some variables and stores them in the
class.
"""
destroy_temp_dir()
# Load the private key for this Primary ECU.
cls.ecu_key = uptane.common.canonical_key_from_pub_and_pri(
demo.import_public_key('primary'),
demo.import_private_key('primary'))
# Load the public timeserver key.
cls.key_timeserver_pub = demo.import_public_key('timeserver')
cls.key_timeserver_pri = demo.import_private_key('timeserver')
# Generate a trusted initial time for the Primary.
cls.initial_time = tuf.formats.unix_timestamp_to_datetime(
int(time.time())).isoformat() + 'Z'
tuf.formats.ISO8601_DATETIME_SCHEMA.check_match(cls.initial_time)
@classmethod
def tearDownClass(cls):
"""
This is run once for the class, after all tests. Since there is only one
class, this runs once.
"""
destroy_temp_dir()
def test_01_init(self):
"""
Note that this doesn't test the root files provided, as those aren't used
at all in the initialization; for that, we'll have to test the update cycle.
"""
# Set up a client directory first.
uptane.common.create_directory_structure_for_client(
TEMP_CLIENT_DIR,
TEST_PINNING_FNAME,
{'imagerepo': TEST_IMAGE_REPO_ROOT_FNAME,
'director': TEST_DIRECTOR_ROOT_FNAME})
# Create repository directories that will be accessed locally (using
# file:// URLs) from which to "download" test metadata and targets.
for repository in ["director", "imagerepo"]:
shutil.copytree(
os.path.join(SAMPLE_METADATA, repository),
os.path.join(TEMP_CLIENT_DIR, repository))
# Note that there may be extra targets available here.
shutil.copytree(
SAMPLE_TARGETS, os.path.join(TEMP_CLIENT_DIR, 'imagerepo', 'targets'))
# TODO: Test with invalid pinning file
# TODO: Test with pinning file lacking a Director repo.
# Now try creating a Primary with a series of bad arguments, expecting
# errors.
# TODO: Add test for my_secondaries argument.
# Invalid VIN:
with self.assertRaises(tuf.FormatError):
primary.Primary(
full_client_dir=TEMP_CLIENT_DIR,
director_repo_name=demo.DIRECTOR_REPO_NAME,
vin=5, # INVALID
ecu_serial=PRIMARY_ECU_SERIAL,
primary_key=TestPrimary.ecu_key,
time=TestPrimary.initial_time,
timeserver_public_key=TestPrimary.key_timeserver_pub,
my_secondaries=[])
# Invalid ECU Serial
with self.assertRaises(tuf.FormatError):
primary.Primary(
full_client_dir=TEMP_CLIENT_DIR,
director_repo_name=demo.DIRECTOR_REPO_NAME,
vin=VIN,
ecu_serial=500, # INVALID
primary_key=TestPrimary.ecu_key,
time=TestPrimary.initial_time,
timeserver_public_key=TestPrimary.key_timeserver_pub,
my_secondaries=[])
# Invalid ECU Key
with self.assertRaises(tuf.FormatError):
primary.Primary(
full_client_dir=TEMP_CLIENT_DIR,
director_repo_name=demo.DIRECTOR_REPO_NAME,
vin=VIN,
ecu_serial=PRIMARY_ECU_SERIAL,
primary_key={''}, # INVALID
time=TestPrimary.initial_time,
timeserver_public_key=TestPrimary.key_timeserver_pub,
my_secondaries=[])
# Invalid time:
with self.assertRaises(tuf.FormatError):
primary.Primary(
full_client_dir=TEMP_CLIENT_DIR,
director_repo_name=demo.DIRECTOR_REPO_NAME,
vin=VIN,
ecu_serial=PRIMARY_ECU_SERIAL,
primary_key=TestPrimary.ecu_key,
time='invalid because this is not a time', # INVALID
timeserver_public_key=TestPrimary.key_timeserver_pub,
my_secondaries=[])
# Invalid timeserver key
with self.assertRaises(tuf.FormatError):
primary.Primary(
full_client_dir=TEMP_CLIENT_DIR,
director_repo_name=demo.DIRECTOR_REPO_NAME,
vin=VIN,
ecu_serial=PRIMARY_ECU_SERIAL,
primary_key=TestPrimary.ecu_key, time=TestPrimary.initial_time,
timeserver_public_key=TestPrimary.initial_time, # INVALID
my_secondaries=[])
# Invalid format for Director Repository name
with self.assertRaises(tuf.FormatError):
primary.Primary(
full_client_dir=TEMP_CLIENT_DIR,
director_repo_name=5, #INVALID
vin=VIN,
ecu_serial=PRIMARY_ECU_SERIAL,
primary_key=TestPrimary.ecu_key, time=TestPrimary.initial_time,
timeserver_public_key = TestPrimary.key_timeserver_pub,
my_secondaries=[])
# Invalid name for Director repository
with self.assertRaises(uptane.Error):
primary.Primary(
full_client_dir=TEMP_CLIENT_DIR,
director_repo_name= "invalid", #INVALID
vin=VIN,
ecu_serial=PRIMARY_ECU_SERIAL,
primary_key=TestPrimary.ecu_key, time=TestPrimary.initial_time,
timeserver_public_key = TestPrimary.key_timeserver_pub,
my_secondaries=[])
# Try creating a Primary, expecting it to work.
# Initializes a Primary ECU, making a client directory and copying the root
# file from the repositories.
# Save the result for future tests, to save time and code.
TestPrimary.instance = primary.Primary(
full_client_dir=TEMP_CLIENT_DIR,
director_repo_name=demo.DIRECTOR_REPO_NAME,
vin=VIN,
ecu_serial=PRIMARY_ECU_SERIAL,
primary_key=TestPrimary.ecu_key,
time=TestPrimary.initial_time,
timeserver_public_key=TestPrimary.key_timeserver_pub)
# Check the fields initialized in the instance to make sure they're correct.
self.assertEqual([], TestPrimary.instance.nonces_to_send)
self.assertEqual([], TestPrimary.instance.nonces_sent)
self.assertEqual(VIN, TestPrimary.instance.vin)
self.assertEqual(PRIMARY_ECU_SERIAL, TestPrimary.instance.ecu_serial)
self.assertEqual(TestPrimary.ecu_key, TestPrimary.instance.primary_key)
self.assertEqual(dict(), TestPrimary.instance.ecu_manifests)
self.assertEqual(
TestPrimary.instance.full_client_dir, TEMP_CLIENT_DIR)
self.assertIsInstance(
TestPrimary.instance.updater, tuf.client.updater.Updater)
tuf.formats.ANYKEY_SCHEMA.check_match(
TestPrimary.instance.timeserver_public_key)
self.assertEqual([], TestPrimary.instance.my_secondaries)
# Now, fix the updater's pinned metadata to point it to the appropriate
# local directory, since the pinned metadata we fed in was actually for the
# live demo, connecting to localhost:30401. We instead want to use a
# local directory via file://.
# TODO: Determine if this code should be adjusted to use os.path.join(),
# or if that's not appropriate for file:// links.
image_repo_mirror = ['file://' + TEMP_CLIENT_DIR + '/imagerepo']
director_mirror = ['file://' + TEMP_CLIENT_DIR + '/director']
repository_urls = TestPrimary.instance.updater.pinned_metadata['repositories']
repository_urls['imagerepo']['mirrors'] = image_repo_mirror
repository_urls['director']['mirrors'] = director_mirror
# Also fix the copied pinned metadata in the individual repo updaters
# in the updater.
TestPrimary.instance.updater.repositories['imagerepo'].mirrors = image_repo_mirror
TestPrimary.instance.updater.repositories['director'].mirrors = director_mirror
def test_05_register_new_secondary(self):
self.assertEqual([], TestPrimary.instance.my_secondaries)
TestPrimary.instance.register_new_secondary('1352')
self.assertIn('1352', TestPrimary.instance.my_secondaries)
def test_10_register_ecu_manifest(self):
# Throughout this function, I'll use a different nonces in each call to
# register_ecu_manifest, and check that the ones in calls expected to
# succeed have been noted and that the ones in calls expected to fail have
# not been noted.
# Starting with an empty ecu manifest dictionary.
self.assertEqual(dict(), TestPrimary.instance.ecu_manifests)
# Make sure we're starting with no nonces sent or to send.
self.assertEqual([], TestPrimary.instance.nonces_to_send)
self.assertEqual([], TestPrimary.instance.nonces_sent)
# Load the manifests we'll use in these tests.
# Note that the .json and .der manifest samples aren't identical; they're
# signed over different data, so to get the JSON version of the DER
# manifests, we'll convert them.
# We'll always need the JSON encodings for testing, and we'll load the
# ASN.1/DER manifests only if we're in DER mode.
# 1: Correctly signed ECU manifest from ECU TCUdemocar (good sample)
# 2: Correctly signed ECU manifest from ECU unknown_ecu
# 3: ECU Manifest from ECU TCUdemocar signed by the wrong key
# (demo's Image Repo timestamp key in particular, instead of demo's
# Secondary key)
# 4: Correctly signed ECU manifest from TCUdemocar w/ attack report
if tuf.conf.METADATA_FORMAT == 'json':
manifest1 = manifest1_json = json.load(open(os.path.join(SAMPLE_DATA_DIR,
'sample_ecu_manifest_TCUdemocar.json')))
manifest2 = manifest2_json = json.load(open(os.path.join(TEST_DATA_DIR,
'flawed_manifests', 'em2_unknown_ecu_manifest.json')))
manifest3 = manifest3_json = json.load(open(os.path.join(TEST_DATA_DIR,
'flawed_manifests', 'em3_ecu_manifest_signed_with_wrong_key.json')))
manifest4 = manifest4_json = json.load(open(os.path.join(TEST_DATA_DIR,
'flawed_manifests', 'em4_attack_detected_in_ecu_manifest.json')))
else:
assert tuf.conf.METADATA_FORMAT == 'der', 'Test code is flawed.'
manifest1 = open(os.path.join(SAMPLE_DATA_DIR,
'sample_ecu_manifest_TCUdemocar.der'), 'rb').read()
manifest1_json = asn1_codec.convert_signed_der_to_dersigned_json(
manifest1, DATATYPE_ECU_MANIFEST)
manifest2 = open(os.path.join(TEST_DATA_DIR, 'flawed_manifests',
'em2_unknown_ecu_manifest.der'), 'rb').read()
manifest2_json = asn1_codec.convert_signed_der_to_dersigned_json(
manifest2, DATATYPE_ECU_MANIFEST)
manifest3 = open(os.path.join(TEST_DATA_DIR, 'flawed_manifests',
'em3_ecu_manifest_signed_with_wrong_key.der'), 'rb').read()
manifest3_json = asn1_codec.convert_signed_der_to_dersigned_json(
manifest3, DATATYPE_ECU_MANIFEST)
manifest4 = open(os.path.join(TEST_DATA_DIR, 'flawed_manifests',
'em4_attack_detected_in_ecu_manifest.der'), 'rb').read()
manifest4_json = asn1_codec.convert_signed_der_to_dersigned_json(
manifest4, DATATYPE_ECU_MANIFEST)
# Register two Secondaries with the Primary.
TestPrimary.instance.register_new_secondary('TCUdemocar')
TestPrimary.instance.register_new_secondary('ecu11111')
# Start with a sequence of tests with bad arguments but an otherwise
# correct ECU Manifest, manifest1.
# Try using a VIN that is not the Primary's VIN (ECU Manifest apparently
# from another car!)
with self.assertRaises(uptane.UnknownVehicle):
TestPrimary.instance.register_ecu_manifest(
vin='13105941', # unexpected VIN
ecu_serial='TCUdemocar', nonce=1,
signed_ecu_manifest=manifest1)
# Try using the wrong ECU Serial - one that is registered, but which does
# not match the ECU Serial listed in the ECU Manifest itself.
with self.assertRaises(uptane.Spoofing):
TestPrimary.instance.register_ecu_manifest(
vin=VIN,
ecu_serial='ecu11111', # not the same ECU Serial in the manifest
nonce=2, signed_ecu_manifest=manifest1)
# Try using an ECU Serial that the Primary is not aware of.
with self.assertRaises(uptane.UnknownECU):
TestPrimary.instance.register_ecu_manifest(
vin=VIN, # unexpected VIN
ecu_serial='an unknown secondary ecu serial', # unexpected ECU Serial
nonce=3,
signed_ecu_manifest=manifest1)
# Register the ECU Manifest correctly this time.
TestPrimary.instance.register_ecu_manifest(
vin=VIN, ecu_serial='TCUdemocar', nonce=10,
signed_ecu_manifest=manifest1)
# Make sure the provided manifest is now in the Primary's ecu manifests
# dictionary. Note that the Primary holds manifests as JSON-compatible
# Python dictionaries regardless of the format it receives them in.
self.assertIn('TCUdemocar', TestPrimary.instance.ecu_manifests)
self.assertIn(
manifest1_json, TestPrimary.instance.ecu_manifests['TCUdemocar'])
# Make sure the nonce provided was noted in the right place.
self.assertIn(10, TestPrimary.instance.nonces_to_send)
self.assertEqual([], TestPrimary.instance.nonces_sent)
# Though this is not required functionality, test register_ecu_manifest
# with JSON manifests as well, even if we're running in DER mode.
# And make sure force_pydict=True doesn't break if we're already in JSON
# mode, either.
TestPrimary.instance.register_ecu_manifest(
VIN, 'TCUdemocar', nonce=11, signed_ecu_manifest=manifest1_json,
force_pydict=True)
# The next tests use ECU Manifests that contain problematic values.
# (We're now testing things beyond just the arguments provided.
# If we're running in DER mode, we'll try both DER and JSON manifests.
# If we're running in JSON mode, we'll only try JSON manifests
# (though in JSON mode, we'll run twice, once with force_pydict on
# to make sure that run doesn't break despite the redundant argument).
# The list again is:
# 2: Correctly signed ECU manifest from ECU unknown_ecu
# 3: ECU Manifest from ECU TCUdemocar signed by the wrong key
# 4: Correctly signed ECU manifest from TCUdemocar w/ attack report
# Case 2: We won't save the ECU Manifest from an unknown ECU Serial.
self.assertNotIn('unknown_ecu', TestPrimary.instance.ecu_manifests)
self.assertNotIn(
manifest2_json, TestPrimary.instance.ecu_manifests['TCUdemocar'])
with self.assertRaises(uptane.UnknownECU):
TestPrimary.instance.register_ecu_manifest(
'democar', 'unknown_ecu', nonce=4, signed_ecu_manifest=manifest2)
with self.assertRaises(uptane.UnknownECU):
TestPrimary.instance.register_ecu_manifest(
'democar', 'unknown_ecu', nonce=5,
signed_ecu_manifest=manifest2_json, force_pydict=True)
self.assertNotIn('unknown_ecu', TestPrimary.instance.ecu_manifests)
self.assertNotIn( # Make sure it's not in the wrong list of ECU Manifests
manifest2_json, TestPrimary.instance.ecu_manifests['TCUdemocar'])
# Case 3: ECU Manifest signed with the wrong key: we save it anyway and
# send it on to the Director like any other; Primaries don't check
# the signatures on ECU Manifests: they can't be expected to know
# the right public or symmetric keys.
self.assertNotIn(
manifest3_json, TestPrimary.instance.ecu_manifests['TCUdemocar'])
TestPrimary.instance.register_ecu_manifest(
'democar', 'TCUdemocar', nonce=12, signed_ecu_manifest=manifest3)
TestPrimary.instance.register_ecu_manifest(
'democar', 'TCUdemocar', nonce=13, signed_ecu_manifest=manifest3_json,
force_pydict=True)
self.assertIn(
manifest3_json, TestPrimary.instance.ecu_manifests['TCUdemocar'])
# Case 4: ECU Manifest containing an attack report. Make sure it doesn't
# fail to be registered.
self.assertNotIn(
manifest4_json, TestPrimary.instance.ecu_manifests['TCUdemocar'])
TestPrimary.instance.register_ecu_manifest(
'democar', 'TCUdemocar', nonce=14, signed_ecu_manifest=manifest4)
TestPrimary.instance.register_ecu_manifest(
'democar', 'TCUdemocar', nonce=15, signed_ecu_manifest=manifest4_json,
force_pydict=True)
self.assertIn(
manifest4_json, TestPrimary.instance.ecu_manifests['TCUdemocar'])
# Confirm that we've succeeded in registering the right nonces.
for this_nonce in [1, 2, 3, 4, 5]:
self.assertNotIn(this_nonce, TestPrimary.instance.nonces_to_send)
for this_nonce in [10, 11, 12, 13, 14, 15]:
self.assertIn(this_nonce, TestPrimary.instance.nonces_to_send)
def test_15_get_nonces_to_send_and_rotate(self):
# The Primary's list of nonces to send in the next request to the
# timeserver for a time attestation:
nonces_to_have_sent = TestPrimary.instance.nonces_to_send
# Double-check that one of the expected nonces from the previous test
# function is in the list of the Primary's nonces to send.
self.assertIn(10, nonces_to_have_sent)
# Cycle nonces: Request the list of nonces to send to the timeserver,
# triggering the rotation of nonces. Make sure the nonce list provided
# is as expected from the previous test, and then that the rotation has
# actually occurred (nonces_to_send emptied, contents moved to nonces_sent).
self.assertEqual(
sorted(nonces_to_have_sent),
sorted(TestPrimary.instance.get_nonces_to_send_and_rotate()))
self.assertEqual(nonces_to_have_sent, TestPrimary.instance.nonces_sent)
self.assertEqual([], TestPrimary.instance.nonces_to_send)
def test_20_update_time(self):
# First, confirm that we've never verified a timeserver attestation, and/or
# that that results in get_last_timeserver_attestation returning None.
self.assertIsNone(TestPrimary.instance.get_last_timeserver_attestation())
# Try a good time attestation first, signed by an expected timeserver key,
# with an expected nonce (previously "received" from a Secondary)
original_time_attestation = time_attestation = {
'signed': {'nonces': [NONCE], 'time': '2016-11-02T21:06:05Z'},
'signatures': [{
'method': 'ed25519',
'sig': 'aabffcebaa57f1d6397bdc5647764261fd23516d2996446c3c40b3f30efb2a4a8d80cd2c21a453e78bf99dafb9d0f5e56c4e072db365499fa5f2f304afec100e',
'keyid': '79c796d7e87389d1ebad04edce49faef611d139ee41ea9fb1931732afbfaac2e'}]}
if tuf.conf.METADATA_FORMAT == 'der':
# Convert this time attestation to the expected ASN.1/DER format.
time_attestation = asn1_codec.convert_signed_metadata_to_der(
original_time_attestation, DATATYPE_TIME_ATTESTATION,
private_key=TestPrimary.key_timeserver_pri, resign=True)
# Check expected base conditions before updating time:
# The only timeserver times registered should be one added during
# initialization. Because the clock override is a module variable in TUF,
# its value (whether None or already set) depends on whether or not other
# tests resulting in time attestation verification have occurred (e.g.
# those for the Primary).
self.assertEqual(1, len(TestPrimary.instance.all_valid_timeserver_times))
initial_clock_override = tuf.conf.CLOCK_OVERRIDE
# In the previous functions, we added a variety of nonces in the nonce
# rotation. Verification of a time attestation confirms that the time
# attestation contains the nonces we've most recently sent to the
# timeserver. The sample attestation we have here does not have the nonces
# we've indicated to the Primary that we've sent, so this verification
# should fail:
with self.assertRaises(uptane.BadTimeAttestation):
TestPrimary.instance.update_time(time_attestation)
# Check results. The bad attestation should change none of these.
self.assertEqual(1, len(TestPrimary.instance.all_valid_timeserver_times))
self.assertEqual(initial_clock_override, tuf.conf.CLOCK_OVERRIDE)
# Now we adjust the Primary's notion of what nonces we sent to the
# timeserver most recently, and then try the verification again, expecting
# it to succeed.
TestPrimary.instance.get_nonces_to_send_and_rotate()
TestPrimary.instance.nonces_to_send = [NONCE]
TestPrimary.instance.get_nonces_to_send_and_rotate()
TestPrimary.instance.update_time(time_attestation)
# Check results. Among other things, since the verification succeeded,
# get_last_timeserver_attestation should return the attestation we just
# provided.
self.assertEqual(
time_attestation,
TestPrimary.instance.get_last_timeserver_attestation())
self.assertEqual(2, len(TestPrimary.instance.all_valid_timeserver_times))
self.assertEqual(
int(tuf.formats.datetime_to_unix_timestamp(iso8601.parse_date(
'2016-11-02T21:06:05Z'))), tuf.conf.CLOCK_OVERRIDE)
# Prepare to try again with a bad signature.
# This test we will conduct differently depending on TUF's current format:
if tuf.conf.METADATA_FORMAT == 'der':
# Fail to re-sign the DER, so that the signature is over JSON instead,
# which results in a bad signature.
time_attestation__badsig = asn1_codec.convert_signed_metadata_to_der(
original_time_attestation, DATATYPE_TIME_ATTESTATION, resign=False)
else: # 'json' format
# Rewrite the first 9 digits of the signature ('sig') to something
# invalid.
time_attestation__badsig = {
'signed': {'nonces': [NONCE], 'time': '2016-11-02T21:06:05Z'},
'signatures': [{
'method': 'ed25519',
'sig': '987654321a57f1d6397bdc5647764261fd23516d2996446c3c40b3f30efb2a4a8d80cd2c21a453e78bf99dafb9d0f5e56c4e072db365499fa5f2f304afec100e',
'keyid': '79c796d7e87389d1ebad04edce49faef611d139ee41ea9fb1931732afbfaac2e'}]}
# Now actually perform the bad signature test.
with self.assertRaises(tuf.BadSignatureError):
TestPrimary.instance.update_time(time_attestation__badsig)
assert 500 not in original_time_attestation['signed']['nonces'], \
'Programming error: bad and good test nonces are equal.'
time_attestation__wrongnonce = {
'signed': {'nonces': [500], 'time': '2016-11-02T21:15:00Z'},
'signatures': [{
'method': 'ed25519',
'sig': '4d01df35ca829fd7ead1408c250950c444db8ac51fa929a7f0288578fbf81016f0e81ed35789689481aee6b7af28ab311306397ef38572732854fb6cf2072604',
'keyid': '79c796d7e87389d1ebad04edce49faef611d139ee41ea9fb1931732afbfaac2e'}]}
if tuf.conf.METADATA_FORMAT == 'der':
# Convert this time attestation to the expected ASN.1/DER format.
time_attestation__wrongnonce = asn1_codec.convert_signed_metadata_to_der(
time_attestation__wrongnonce, DATATYPE_TIME_ATTESTATION,
private_key=TestPrimary.key_timeserver_pri, resign=True)
with self.assertRaises(uptane.BadTimeAttestation):
TestPrimary.instance.update_time(
time_attestation__wrongnonce)
# TODO: Consider other tests here.
def test_25_generate_signed_vehicle_manifest(self):
vehicle_manifest = TestPrimary.instance.generate_signed_vehicle_manifest()
# If the vehicle manifest is in DER format, check its format and then
# convert back to JSON so that we can inspect it further.
if tuf.conf.METADATA_FORMAT == 'der':
uptane.formats.DER_DATA_SCHEMA.check_match(vehicle_manifest)
vehicle_manifest = asn1_codec.convert_signed_der_to_dersigned_json(
vehicle_manifest, DATATYPE_VEHICLE_MANIFEST)
# Now it's not in DER format, whether or not it started that way.
# Check its format and inspect it.
uptane.formats.SIGNABLE_VEHICLE_VERSION_MANIFEST_SCHEMA.check_match(
vehicle_manifest)
# Test contents of vehicle manifest.
# Make sure there is exactly one signature.
self.assertEqual(1, len(vehicle_manifest['signatures']))
# Make sure that the Secondary's ECU Manifest (from the register ECU
# ECU Manifest test above) is listed in the Vehicle Manifest.
self.assertIn(
'TCUdemocar', vehicle_manifest['signed']['ecu_version_manifests'])
# TODO: More testing of the contents of the vehicle manifest.
# Check the signature on the vehicle manifest.
self.assertTrue(uptane.common.verify_signature_over_metadata(
TestPrimary.ecu_key,
vehicle_manifest['signatures'][0], # TODO: Deal with 1-sig assumption?
vehicle_manifest['signed'],
DATATYPE_VEHICLE_MANIFEST))
def test_30_refresh_toplevel_metadata(self):
# Check that in the fresh temp directory for this test Primary client,
# there aren't any metadata files except root.json yet.
self.assertEqual(
['root.der', 'root.json'],
sorted(os.listdir(TEST_DIRECTOR_METADATA_DIR)))
self.assertEqual(
['root.der', 'root.json'],
sorted(os.listdir(TEST_IMAGE_REPO_METADATA_DIR)))
try:
TestPrimary.instance.refresh_toplevel_metadata()
except (URLError, tuf.NoWorkingMirrorError) as e:
pass
else:
# Check the resulting top-level metadata files in the client directory.
# Expect root, snapshot, targets, and timestamp for both director and
# image repo.
for repo in ['director', 'imagerepo']:
self.assertEqual(
['root.' + tuf.conf.METADATA_FORMAT,
'snapshot.' + tuf.conf.METADATA_FORMAT,
'targets.' + tuf.conf.METADATA_FORMAT,
'timestamp.' + tuf.conf.METADATA_FORMAT],
sorted(os.listdir(os.path.join(TEMP_CLIENT_DIR, 'metadata', repo,
'current'))))
def test_35_get_target_list_from_director(self):
# TODO: Write this in a way that draws on saved sample Director metadata.
# Don't expect an actual server to be running.
# This will probably entail modification to the pinned.json file to
# point it to a local directory instead of a remote server.
#directed_targets = TestPrimary.instance.test_35_get_target_list_from_director
pass
def test_40_get_validated_target_info(self):
# TODO: Write this in a way that draws on saved sample metadata from the
# Director and Image Repo. Don't expect an actual server to be
# running. This will probably entail modification to the pinned.json
# file to point it to a local directory instead of a remote server.
pass
def test_55_update_exists_for_ecu(self):
# The various ECU Serials of Secondary ECUs we'll test:
# 1: Registered with the Primary but NOT listed in Director metadata
# (i.e. will not have any updates assigned)
known_secondary_with_no_updates = "secondary_without_updates"
# 2: NOT registered w/ the Primary and NOT listed in Director metadata
unknown_secondary = "unknown_ecu_serial"
# 3: Registered with the Primary and listed in Director metadata
normal_secondary = "TCUdemocar"
# 4: Invalid name for a Secondary (wrong format)
invalid_name_secondary = 5
# Register the Secondaries with the Primary and make sure registration
# succeeded.
TestPrimary.instance.register_new_secondary(known_secondary_with_no_updates)
TestPrimary.instance.register_new_secondary(normal_secondary)
self.assertIn(
known_secondary_with_no_updates, TestPrimary.instance.my_secondaries)
self.assertIn(normal_secondary, TestPrimary.instance.my_secondaries)
# Try registering a Secondary that has already been registered with the
# Primary. Expect success??? # TODO: Clarify.
TestPrimary.instance.register_new_secondary(known_secondary_with_no_updates)
# Try registering an invalid name.
with self.assertRaises(tuf.FormatError):
TestPrimary.instance.register_new_secondary(invalid_name_secondary)
# Confirm that unknown_secondary has not been registered.
with self.assertRaises(uptane.UnknownECU):
TestPrimary.instance._check_ecu_serial(unknown_secondary)
# Run a primary update cycle so that the Primary fetches and validates
# metadata and targets from the "repositories" (in this test, the
# repositories sit in a local folder accessed by file://).
# This also processes the data acquired to populate fields accessed by
# Secondaries below.
TestPrimary.instance.primary_update_cycle()
# Try to find out if updates exist for an unknown ECU.
with self.assertRaises(uptane.UnknownECU):
TestPrimary.instance.update_exists_for_ecu(unknown_secondary)
# Find out if updates exist for a known ECU that has no updates assigned to
# it by the Director (expect empty list).
self.assertFalse(TestPrimary.instance.update_exists_for_ecu(
known_secondary_with_no_updates))
# Confirm that updates exist for a known ECU to which we've assigned
# updates (list is not empty).
self.assertTrue(TestPrimary.instance.update_exists_for_ecu(
normal_secondary))
# Run the update cycle again to test file/archive replacement when an
# update cycle has already occurred.
TestPrimary.instance.primary_update_cycle()
def test_60_get_image_fname_for_ecu(self):
# TODO: More thorough tests.
with self.assertRaises(uptane.UnknownECU):
TestPrimary.instance.get_image_fname_for_ecu('unknown')
# Expect an image.
image_fname = TestPrimary.instance.get_image_fname_for_ecu('TCUdemocar')
self.assertTrue(image_fname)
tuf.formats.RELPATH_SCHEMA.check_match(image_fname)
# Fetch the image filename for an ECU that has had no update assigned it,
# expecting None.
self.assertIsNone(TestPrimary.instance.get_image_fname_for_ecu(
'secondary_without_updates'))
def test_61_get_full_metadata_archive_fname(self):
# TODO: More thorough tests.
archive_fname = TestPrimary.instance.get_full_metadata_archive_fname()
self.assertTrue(archive_fname)
tuf.formats.RELPATH_SCHEMA.check_match(archive_fname)
def test_62_get_partial_metadata_fname(self):
# TODO: More thorough tests.
fname = TestPrimary.instance.get_partial_metadata_fname()
self.assertTrue(fname)
tuf.formats.RELPATH_SCHEMA.check_match(fname)
def test_65_get_metadata_for_ecu(self):
pass
def test_70_get_last_timeserver_attestation(self):
# get_last_timeserver_attestation is tested in more detail in a previous
# test, test_20_update_time.
attestation = TestPrimary.instance.get_last_timeserver_attestation()
# We expect to have verified an attestation in previous tests.
self.assertIsNotNone(attestation)
if tuf.conf.METADATA_FORMAT == 'der':
uptane.formats.DER_DATA_SCHEMA.check_match(attestation)
else:
assert tuf.conf.METADATA_FORMAT == 'json', 'Coding error in test.'
uptane.formats.SIGNABLE_TIMESERVER_ATTESTATION_SCHEMA.check_match(
attestation)
# Run unit test.
if __name__ == '__main__':
unittest.main()
|
# -*- coding: utf-8 -*-
"""Calculate the mobility demand.
SPDX-FileCopyrightText: 2016-2019 Uwe Krien <krien@uni-bremen.de>
SPDX-License-Identifier: MIT
"""
__copyright__ = "Uwe Krien <krien@uni-bremen.de>"
__license__ = "MIT"
import os
import pandas as pd
from collections import namedtuple
from reegis import geometries, config as cfg, tools, energy_balance
def format_kba_table(filename, sheet):
"""
Clean the layout of the table.
The tables are made for human readability and not for automatic processing.
Lines with subtotals and format-strings of the column names are removed.
A valid MultiIndex is created to make it easier to filter the table by the
index.
Parameters
----------
filename : str
Path and name of the excel file.
sheet : str
Name of the sheet of the excel table.
Returns
-------
pandas.DataFrame
"""
# Read table
df = pd.read_excel(filename, sheet, skiprows=7, header=[0, 1])
# Drop empty column
df = df.drop([("Unnamed: 0_level_0", "Unnamed: 0_level_1")], axis=1)
idx1 = df.columns[0]
idx2 = df.columns[1]
idx3 = df.columns[2]
# Remove lines with subtotal
df.loc[(df[idx1] == "SONSTIGE"), idx2] = "SONSTIGE"
df.loc[(df[idx1] == "SONSTIGE"), idx3] = "00000 SONSTIGE"
df = df.drop(df.loc[df[idx3].isnull()].index)
df[df.columns[[0, 1, 2]]] = df[df.columns[[0, 1, 2]]].fillna(
method="ffill"
)
# Add column with name of subregion and remove name from index
df[df.columns[2]] = df[df.columns[2]].str[:5]
# set MultiIndex
df.set_index(list(df.columns[[0, 1, 2]]), inplace=True)
df.index = df.index.set_names(["state", "region", "subregion"])
# Remove format-strings from column names
level1 = (
df.columns.get_level_values(1)
.str.replace("\n", " ")
.str.replace("- ", "")
.str.replace(":", "")
)
level0 = (
df.columns.get_level_values(0)
.str.replace("\n", " ")
.str.replace("- ", "")
.str.replace(":", "")
)
df.columns = pd.MultiIndex.from_arrays([level0, level1])
return df
def get_kba_table():
"""
Get the "kfz" table for all vehicles and the "pkw" table for more
statistics about passenger cars.
Returns
-------
namedtuple
Examples
--------
>>> table = get_kba_table()
>>> kfz = table.kfz
>>> print(type(kfz))
<class 'pandas.core.frame.DataFrame'>
"""
kba_table = namedtuple("kba_table", "kfz pkw")
kba_filename = os.path.join(
cfg.get("paths", "general"), cfg.get("mobility", "table_kba")
)
# Download table if it does not exit
if not os.path.isfile(kba_filename):
tools.download_file(kba_filename, cfg.get("mobility", "url_kba"))
return kba_table(
kfz=format_kba_table(kba_filename, "Kfz_u_Kfz_Anh"),
pkw=format_kba_table(kba_filename, "Pkw"),
)
def get_mileage_table():
"""
Download mileage table from the KBA (Kraftfahrtbundesamt) and store it
locally.
"""
url = (
"https://www.kba.de/SharedDocs/Publikationen/DE/Statistik/"
"Kraftverkehr/VK/2018/vk_2018_xlsx.xlsx?__blob=publicationFile&v=22"
)
mileage_filename = os.path.join(
cfg.get("paths", "general"), "mileage_table_kba.xlsx"
)
# Download table if it does not exit
if not os.path.isfile(mileage_filename):
tools.download_file(mileage_filename, url)
return mileage_filename
def get_sheet_from_mileage_table(sheet):
"""Load given sheet from the mileage file."""
fn = get_mileage_table()
df = pd.read_excel(
fn, sheet, skiprows=7, index_col=[0, 1, 2], skipfooter=9
)
df.index = df.index.droplevel(0).set_names(["", ""])
return df.drop(
df.loc[pd.IndexSlice[slice(None), "Insgesamt"], slice(None)].index
)
def get_mileage_by_type_and_fuel(year=2018):
"""
Get mileage by type and fuel from mileage table and other sources.
See mobility.ini file for more information.
"""
# get km per year and type
total = (
get_sheet_from_mileage_table("VK 1.1")
.loc["Jahresfahrleistung in 1.000 km", str(year)]
.mul(1000)
)
passenger = (
get_sheet_from_mileage_table("VK 1.7")
.loc["Jahresfahrleistung in 1.000 km", str(year)]
.mul(1000)
)
small_trucks = (
get_sheet_from_mileage_table("VK 1.17")
.loc["Jahresfahrleistung in 1.000 km", str(year)]
.mul(1000)
)
medium_trucks = (
get_sheet_from_mileage_table("VK 1.20")
.loc["Jahresfahrleistung in 1.000 km", str(year)]
.mul(1000)
)
big_trucks_diesel = (
get_sheet_from_mileage_table("VK 1.23")
.loc["Jahresfahrleistung in 1.000 km", str(year)]
.mul(1000)
.sum()
)
df = pd.DataFrame(index=total.index, columns=["diesel", "petrol", "other"])
vt_dict = cfg.get_dict("vehicle_types_dictionary")
df.rename(vt_dict, axis=0, inplace=True)
total.rename(vt_dict, axis=0, inplace=True)
dc = cfg.get_dict("fuel_dictionary")
# add km by fuel for passenger cars
df.loc["passenger car"] = passenger.rename(dc, axis=0)
# add km by fuel for small trucks (<= 3.5 tons)
df.loc["small truck (max. 3.5 tons)"] = small_trucks.rename(dc, axis=0)
# add km by fuel for medium trucks (3.5 < weight <= 7.5 tons)
df.loc["medium truck (3.5 to 7.5 tons)"] = medium_trucks.rename(dc, axis=0)
# add km by fuel for big trucks (> 7.5 tons)
# assuming that non-diesel engines are 50% petrol and 50% other
n = "big truck (over 7.5 tons)"
df.loc[n, "diesel"] = big_trucks_diesel
df.loc[n, ["petrol", "other"]] = (total[n] - big_trucks_diesel) / 2
fuel_share = pd.DataFrame(
cfg.get_dict_list("fuel share"), index=["diesel", "petrol", "other"]
).astype(float)
for col in fuel_share.columns:
df.loc[col] = fuel_share[col].mul(total[col])
return df
def create_grouped_table_kfz():
"""Group the kfz-table by main groups."""
df = get_kba_table().kfz
df.index = df.index.droplevel([0, 1])
df.columns = [" ".join(col).strip() for col in df.columns]
kfz_dict = cfg.get_dict("KFZ")
for col in df.columns:
df[col] = pd.to_numeric(df[col].replace("-", ""))
df = df.groupby(by=kfz_dict, axis=1).sum()
df["traction engine, general"] = (
df["traction engine"] - df["traction engine, agriculture and forestry"]
)
df.drop("traction engine", axis=1, inplace=True)
df.drop("ignore", axis=1, inplace=True)
return df
def create_grouped_table_pkw():
"""
Extract fuel groups of passenger cars
Examples
--------
>>> pkw = create_grouped_table_pkw()
>>> pkw['petrol'].sum()
31031021.0
>>> pkw['diesel'].sum()
15153364.0
"""
df = get_kba_table().pkw
df.index = df.index.droplevel([0, 1])
df = df["Nach Kraftstoffarten"]
df = df.groupby(by=cfg.get_dict("PKW"), axis=1).sum()
df.drop("ignore", axis=1, inplace=True)
return df
def get_admin_by_region(region):
"""
Allocate admin keys to the given regions.
Parameters
----------
region : geopandas.GeoDataFrame
Returns
-------
pd.DataFrame
"""
fn = os.path.join(cfg.get("paths", "geometry"), "vg1000_geodata.geojson")
vg = geometries.load(fullname=fn)
vg.set_index("RS", inplace=True)
reg2vg = geometries.spatial_join_with_buffer(
vg.representative_point(), region, "fs", limit=0
)
return pd.DataFrame(reg2vg.drop("geometry", axis=1))
def get_grouped_kfz_by_region(region):
"""
Get the main vehicle groups by region.
Parameters
----------
region : geopandas.GeoDataFrame
Returns
-------
pd.DataFrame
Examples
--------
>>> fs = geometries.get_federal_states_polygon()
>>> total = get_grouped_kfz_by_region(fs).sum()
>>> int(total["passenger car"])
47095784
>>> int(total["lorry, > 7500"])
295826
"""
df = create_grouped_table_kfz()
reg2vg = get_admin_by_region(region)
df2reg = df.merge(reg2vg, left_index=True, right_index=True, how="left")
df2reg["fs"] = df2reg["fs"].fillna("unknown")
return df2reg.groupby("fs").sum()
def get_traffic_fuel_energy(year):
"""
Parameters
----------
year : int
Returns
-------
Examples
--------
>>> fuel_energy = get_traffic_fuel_energy(2017)
>>> int(fuel_energy["Ottokraftstoffe"])
719580
>>> fuel_share = fuel_energy.div(fuel_energy.sum()) * 100
>>> round(fuel_share["Dieselkraftstoffe"], 1)
62.7
"""
fuel_energy = energy_balance.get_de_balance(year).loc["Straßenverkehr"]
fuel_energy = fuel_energy[fuel_energy != 0]
fuel_energy.drop(
["primär (gesamt)", "sekundär (gesamt)", "Row", "gesamt"], inplace=True
)
return fuel_energy
def calculate_mobility_energy_use(year):
"""
Parameters
----------
year
Returns
-------
Examples
--------
>>> mobility_balance = get_traffic_fuel_energy(2017)
>>> energy_use = calculate_mobility_energy_use(2017)
>>> p = "Petrol usage [TJ]"
>>> d = "Diesel usage [TJ]"
>>> o = "Overall fuel usage [TJ]"
>>> print(p, "(energy balance):", int(mobility_balance["Ottokraftstoffe"]))
Petrol usage [TJ] (energy balance): 719580
>>> print(p, "(calculated):", int(energy_use["petrol"].sum()))
Petrol usage [TJ] (calculated): 803603
>>> print(d, "(energy balance):",
... int(mobility_balance["Dieselkraftstoffe"]))
Diesel usage [TJ] (energy balance): 1425424
>>> print(d, "(calculated):", int(energy_use["diesel"].sum()))
Diesel usage [TJ] (calculated): 1636199
>>> print(o, "(energy balance):", int(mobility_balance.sum()))
Overall fuel usage [TJ] (energy balance): 2275143
>>> print(o, "(calculated):", int(energy_use.sum().sum()))
Overall fuel usage [TJ] (calculated): 2439803
"""
# fetch table of mileage by fuel and vehicle type
mileage = get_mileage_by_type_and_fuel(year)
# fetch table of specific demand by fuel and vehicle type (from 2011)
spec_demand = (
pd.DataFrame(
cfg.get_dict_list("fuel consumption"),
index=["diesel", "petrol", "other"],
)
.astype(float)
.transpose()
)
# fetch the energy content of the different fuel types
energy_content = pd.Series(cfg.get_dict("energy_per_liter"))[
["diesel", "petrol", "other"]
]
return mileage.mul(spec_demand).mul(energy_content) / 10 ** 6
if __name__ == "__main__":
pass
|
_base_ = ['../../../../_base_/datasets/aic.py']
log_level = 'INFO'
load_from = None
resume_from = None
dist_params = dict(backend='nccl')
workflow = [('train', 1)]
checkpoint_config = dict(interval=50)
evaluation = dict(interval=50, metric='mAP', save_best='AP')
optimizer = dict(
type='Adam',
lr=0.0015,
)
optimizer_config = dict(grad_clip=None)
# learning policy
lr_config = dict(
policy='step',
warmup='linear',
warmup_iters=500,
warmup_ratio=0.001,
step=[200, 260])
total_epochs = 300
log_config = dict(
interval=50,
hooks=[
dict(type='TextLoggerHook'),
# dict(type='TensorboardLoggerHook')
])
channel_cfg = dict(
num_output_channels=14,
dataset_joints=14,
dataset_channel=[
[0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13],
],
inference_channel=[0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13])
data_cfg = dict(
image_size=512,
base_size=256,
base_sigma=2,
heatmap_size=[128, 256],
num_joints=channel_cfg['dataset_joints'],
dataset_channel=channel_cfg['dataset_channel'],
inference_channel=channel_cfg['inference_channel'],
num_scales=2,
scale_aware_sigma=False,
)
# model settings
model = dict(
type='AssociativeEmbedding',
pretrained='https://download.openmmlab.com/mmpose/'
'pretrain_models/hrnet_w32-36af842e.pth',
backbone=dict(
type='HRNet',
in_channels=3,
extra=dict(
stage1=dict(
num_modules=1,
num_branches=1,
block='BOTTLENECK',
num_blocks=(4, ),
num_channels=(64, )),
stage2=dict(
num_modules=1,
num_branches=2,
block='BASIC',
num_blocks=(4, 4),
num_channels=(32, 64)),
stage3=dict(
num_modules=4,
num_branches=3,
block='BASIC',
num_blocks=(4, 4, 4),
num_channels=(32, 64, 128)),
stage4=dict(
num_modules=3,
num_branches=4,
block='BASIC',
num_blocks=(4, 4, 4, 4),
num_channels=(32, 64, 128, 256))),
),
keypoint_head=dict(
type='AEHigherResolutionHead',
in_channels=32,
num_joints=14,
tag_per_joint=True,
extra=dict(final_conv_kernel=1, ),
num_deconv_layers=1,
num_deconv_filters=[32],
num_deconv_kernels=[4],
num_basic_blocks=4,
cat_output=[True],
with_ae_loss=[True, False],
loss_keypoint=dict(
type='MultiLossFactory',
num_joints=14,
num_stages=2,
ae_loss_type='exp',
with_ae_loss=[True, False],
push_loss_factor=[0.01, 0.01],
pull_loss_factor=[0.001, 0.001],
with_heatmaps_loss=[True, True],
heatmaps_loss_factor=[1.0, 1.0])),
train_cfg=dict(),
test_cfg=dict(
num_joints=channel_cfg['dataset_joints'],
max_num_people=30,
scale_factor=[1],
with_heatmaps=[True, True],
with_ae=[True, False],
project2image=True,
align_corners=False,
nms_kernel=5,
nms_padding=2,
tag_per_joint=True,
detection_threshold=0.1,
tag_threshold=1,
use_detection_val=True,
ignore_too_much=False,
adjust=True,
refine=True,
flip_test=True))
train_pipeline = [
dict(type='LoadImageFromFile'),
dict(
type='BottomUpRandomAffine',
rot_factor=30,
scale_factor=[0.75, 1.5],
scale_type='short',
trans_factor=40),
dict(type='BottomUpRandomFlip', flip_prob=0.5),
dict(type='ToTensor'),
dict(
type='NormalizeTensor',
mean=[0.485, 0.456, 0.406],
std=[0.229, 0.224, 0.225]),
dict(
type='BottomUpGenerateTarget',
sigma=2,
max_num_people=30,
),
dict(
type='Collect',
keys=['img', 'joints', 'targets', 'masks'],
meta_keys=[]),
]
val_pipeline = [
dict(type='LoadImageFromFile'),
dict(type='BottomUpGetImgSize', test_scale_factor=[1]),
dict(
type='BottomUpResizeAlign',
transforms=[
dict(type='ToTensor'),
dict(
type='NormalizeTensor',
mean=[0.485, 0.456, 0.406],
std=[0.229, 0.224, 0.225]),
]),
dict(
type='Collect',
keys=['img'],
meta_keys=[
'image_file', 'aug_data', 'test_scale_factor', 'base_size',
'center', 'scale', 'flip_index'
]),
]
test_pipeline = val_pipeline
data_root = 'data/aic'
data = dict(
samples_per_gpu=24,
workers_per_gpu=2,
train=dict(
type='BottomUpAicDataset',
ann_file=f'{data_root}/annotations/aic_train.json',
img_prefix=f'{data_root}/ai_challenger_keypoint_train_20170902/'
'keypoint_train_images_20170902/',
data_cfg=data_cfg,
pipeline=train_pipeline,
dataset_info={{_base_.dataset_info}}),
val=dict(
type='BottomUpAicDataset',
ann_file=f'{data_root}/annotations/aic_val.json',
img_prefix=f'{data_root}/ai_challenger_keypoint_validation_20170911/'
'keypoint_validation_images_20170911/',
data_cfg=data_cfg,
pipeline=val_pipeline,
dataset_info={{_base_.dataset_info}}),
test=dict(
type='BottomUpAicDataset',
ann_file=f'{data_root}/annotations/aic_val.json',
img_prefix=f'{data_root}/ai_challenger_keypoint_validation_20170911/'
'keypoint_validation_images_20170911/',
data_cfg=data_cfg,
pipeline=test_pipeline,
dataset_info={{_base_.dataset_info}}),
)
|
import os
from rpython.rlib.rpoll import POLLIN, PollError
from rpython.rlib import streamio
from topaz.coerce import Coerce
from topaz.error import error_for_oserror
from topaz.module import ClassDef
from topaz.modules.fcntl import fcntl
from topaz.objects.objectobject import W_Object
from topaz.objects.stringobject import W_StringObject
from topaz.utils.filemode import map_filemode
from topaz.utils.ll_file import close_without_validation
from topaz.system import IS_WINDOWS
if IS_WINDOWS:
from rpython.rlib.rpoll import _poll as poll
else:
from rpython.rlib.rpoll import poll
class W_IOObject(W_Object):
classdef = ClassDef("IO", W_Object.classdef)
def __init__(self, space):
W_Object.__init__(self, space)
self.fd = -1
self.stream = None
def __del__(self):
# Do not close standard file streams
if self.fd > 3:
try:
close_without_validation(self.fd)
except OSError as e:
pass
def __deepcopy__(self, memo):
obj = super(W_IOObject, self).__deepcopy__(memo)
obj.fd = self.fd
return obj
def ensure_not_closed(self, space):
if self.fd < 0:
raise space.error(space.w_IOError, "closed stream")
def getfd(self):
return self.fd
@classdef.setup_class
def setup_class(cls, space, w_cls):
w_stdin = space.send(w_cls, "new", [space.newint(0), space.newstr_fromstr("r")])
space.globals.set(space, "$stdin", w_stdin)
space.set_const(space.w_object, "STDIN", w_stdin)
w_stdout = space.send(w_cls, "new", [space.newint(1), space.newstr_fromstr("w")])
space.globals.set(space, "$stdout", w_stdout)
space.globals.set(space, "$>", w_stdout)
space.globals.set(space, "$/", space.newstr_fromstr("\n"))
space.set_const(space.w_object, "STDOUT", w_stdout)
w_stderr = space.send(w_cls, "new", [space.newint(2), space.newstr_fromstr("w")])
space.globals.set(space, "$stderr", w_stderr)
space.set_const(space.w_object, "STDERR", w_stderr)
space.set_const(w_cls, "SEEK_CUR", space.newint(os.SEEK_CUR))
space.set_const(w_cls, "SEEK_END", space.newint(os.SEEK_END))
space.set_const(w_cls, "SEEK_SET", space.newint(os.SEEK_SET))
@classdef.singleton_method("allocate")
def method_allocate(self, space, args_w):
return W_IOObject(space)
@classdef.singleton_method("sysopen")
def method_sysopen(self, space, w_path, w_mode_str_or_int=None, w_perm=None):
perm = 0666
mode = os.O_RDONLY
if w_mode_str_or_int is not None:
mode, mode_str, encoding = map_filemode(space, w_mode_str_or_int)
if w_perm is not None and w_perm is not space.w_nil:
perm = space.int_w(w_perm)
path = Coerce.path(space, w_path)
try:
fd = os.open(path, mode, perm)
except OSError as e:
raise error_for_oserror(space, e)
else:
return space.newint(fd)
@classdef.method("initialize")
def method_initialize(self, space, w_fd_or_io, w_mode_str_or_int=None, w_opts=None):
if isinstance(w_fd_or_io, W_IOObject):
fd = w_fd_or_io.fd
else:
fd = Coerce.int(space, w_fd_or_io)
if w_opts is not None:
raise space.error(space.w_NotImplementedError, "options hash for IO.new")
mode, mode_str, encoding = map_filemode(space, w_mode_str_or_int)
self.fd = fd
# Optimization for ReadOnly files, using stream reading
# this speedup common file read by 4 times
# TODO: rewrite to something better
if mode_str == "r" or mode_str == "rb":
try:
self.stream = streamio.fdopen_as_stream(fd, mode_str)
except OSError as e:
raise error_for_oserror(space, e)
return self
@classdef.method("read")
def method_read(self, space, w_length=None, w_str=None):
self.ensure_not_closed(space)
if w_length:
length = space.int_w(w_length)
if length < 0:
raise space.error(space.w_ArgumentError,
"negative length %d given" % length
)
elif length == 0:
return space.newstr_fromstr("")
else:
length = -1
if self.stream is None:
read_bytes = 0
read_chunks = []
while length < 0 or read_bytes < length:
if length > 0:
max_read = int(length - read_bytes)
else:
max_read = 8192
try:
current_read = os.read(self.fd, max_read)
except OSError as e:
raise error_for_oserror(space, e)
if len(current_read) == 0:
break
read_bytes += len(current_read)
read_chunks += current_read
# Return nil on EOF if length is given
if read_bytes == 0:
return space.w_nil
w_read_str = space.newstr_fromchars(read_chunks)
else:
try:
if length < 0:
read_str = self.stream.readall()
else:
read_str = self.stream.read(length)
except OSError as e:
raise error_for_oserror(space, e)
if len(read_str) == 0:
return space.w_nil
w_read_str = space.newstr_fromstr(read_str)
if w_str is not None:
w_str.clear(space)
w_str.extend(space, w_read_str)
return w_str
return w_read_str
@classdef.method("write")
def method_write(self, space, w_str):
self.ensure_not_closed(space)
string = space.str_w(space.send(w_str, "to_s"))
try:
bytes_written = os.write(self.fd, string)
except OSError as e:
raise error_for_oserror(space, e)
return space.newint(bytes_written)
@classdef.method("flush")
def method_flush(self, space):
# We have no internal buffers to flush!
self.ensure_not_closed(space)
return self
@classdef.method("seek", amount="int", whence="int")
def method_seek(self, space, amount, whence=os.SEEK_SET):
self.ensure_not_closed(space)
try:
if self.stream is not None:
self.stream.seek(amount, whence)
else:
os.lseek(self.fd, amount, whence)
except OSError as e:
raise error_for_oserror(space, e)
return space.newint(0)
@classdef.method("pos")
@classdef.method("tell")
def method_pos(self, space):
self.ensure_not_closed(space)
# TODO: this currently truncates large values, switch this to use a
# Bignum in those cases
if self.stream is not None:
pos = self.stream.tell()
return space.newint(int(pos))
return space.newint(int(os.lseek(self.fd, 0, os.SEEK_CUR)))
@classdef.method("rewind")
def method_rewind(self, space):
self.ensure_not_closed(space)
os.lseek(self.fd, 0, os.SEEK_SET)
return space.newint(0)
@classdef.method("print")
def method_print(self, space, args_w):
self.ensure_not_closed(space)
if not args_w:
w_last = space.globals.get(space, "$_")
if w_last is not None:
args_w.append(w_last)
w_sep = space.globals.get(space, "$,")
if w_sep:
sep = space.str_w(space.send(w_sep, "to_s"))
else:
sep = ""
w_end = space.globals.get(space, "$\\")
if w_end:
end = space.str_w(space.send(w_end, "to_s"))
else:
end = ""
strings = [space.str_w(space.send(w_arg, "to_s")) for w_arg in args_w]
try:
os.write(self.fd, sep.join(strings))
os.write(self.fd, end)
except OSError as e:
raise error_for_oserror(space, e)
return space.w_nil
@classdef.method("getc")
def method_getc(self, space):
self.ensure_not_closed(space)
try:
c = os.read(self.fd, 1)
except OSError as e:
raise error_for_oserror(space, e)
if not c:
return space.w_nil
return space.newstr_fromstr(c)
@classdef.singleton_method("pipe")
def method_pipe(self, space, block=None):
r, w = os.pipe()
pipes_w = [
space.send(self, "new", [space.newint(r), space.newstr_fromstr("r")]),
space.send(self, "new", [space.newint(w), space.newstr_fromstr("w")])
]
if block is not None:
try:
return space.invoke_block(block, pipes_w)
finally:
for pipe_w in pipes_w:
if not space.is_true(space.send(pipe_w, "closed?")):
space.send(pipe_w, "close")
else:
return space.newarray(pipes_w)
@classdef.method("reopen")
def method_reopen(self, space, w_arg, w_mode=None):
self.ensure_not_closed(space)
w_io = space.convert_type(w_arg, space.w_io, "to_io", raise_error=False)
if w_io is space.w_nil:
from topaz.objects.fileobject import W_FileObject
args = [w_arg] if w_mode is None else [w_arg, w_mode]
w_io = space.send(space.getclassfor(W_FileObject), "new", args)
assert isinstance(w_io, W_IOObject)
w_io.ensure_not_closed(space)
os.close(self.fd)
os.dup2(w_io.getfd(), self.fd)
return self
@classdef.method("to_io")
def method_to_io(self):
return self
@classdef.method("fileno")
@classdef.method("to_i")
def method_to_i(self, space):
self.ensure_not_closed(space)
return space.newint(self.fd)
@classdef.method("close")
def method_close(self, space):
self.ensure_not_closed(space)
try:
os.close(self.fd)
except OSError as e:
raise error_for_oserror(space, e)
self.fd = -1
self.stream = None
return self
@classdef.method("closed?")
def method_closedp(self, space):
return space.newbool(self.fd == -1)
@classdef.method("stat")
def method_stat(self, space):
from topaz.objects.fileobject import W_FileStatObject
try:
stat_val = os.fstat(self.fd)
except OSError as e:
raise error_for_oserror(space, e)
stat_obj = W_FileStatObject(space)
stat_obj.set_stat(stat_val)
return stat_obj
@classdef.method("isatty")
@classdef.method("tty?")
def method_isatty(self, space):
self.ensure_not_closed(space)
return space.newbool(os.isatty(self.fd))
@classdef.method("fcntl", cmd="int", arg="int")
def method_fcntl(self, space, cmd, arg=0):
fcntl(self.fd, cmd, arg)
return self
@classdef.method("ready?")
def method_ready(self, space):
retval = None
try:
retval = poll({self.fd: POLLIN}, 0)
except PollError:
return space.w_nil
return space.newbool(len(retval) > 0)
|
from KOMORANPy.training.model_builder import ModelBuilder
# corpus_builder = CorpusBuilder()
# # todo : 트레이닝 데이터 위치 ( 실제로는 바이너리 파일만 제공 될 예정 )
# corpus_builder.build_path("/Users/shinjunsoo/shineware/data/komoran_training_data", ".refine.txt")
# corpus_builder.save("corpus_build")
model_builder = ModelBuilder()
model_builder.build_path("corpus_build")
model_builder.save("../model")
|
from .common import *
DEBUG = False
ALLOWED_HOSTS = [os.environ['HOST']]
EMAIL_HOST = os.environ['EMAIL_HOST']
EMAIL_PORT = int(os.environ['EMAIL_PORT'])
EMAIL_HOST_USER = os.environ['EMAIL_HOST_USER']
EMAIL_HOST_PASSWORD = os.environ['EMAIL_HOST_PASSWORD']
EMAIL_USE_TLS = True
LOGGING = {
'version': 1,
'disable_existing_loggers': False,
'handlers': {
'console': {
'class': 'logging.StreamHandler',
},
},
'loggers': {
'django': {
'handlers': ['console'],
'level': 'INFO',
},
},
}
|
__author__ = 'mnowotka'
try:
from urlparse import urlparse
except ImportError:
from urllib.parse import urlparse
import requests
import requests_cache
from chembl_webresource_client.spore_client import Client, make_spore_function
from chembl_webresource_client.query_set import QuerySet
from chembl_webresource_client.query_set import Model
from chembl_webresource_client.settings import Settings
from easydict import EasyDict
#-----------------------------------------------------------------------------------------------------------------------
class NewClient(object):
pass
#-----------------------------------------------------------------------------------------------------------------------
def client_from_url(url, base_url=None):
"""Builds a client from an url
:param url: the url you want to get the SPORE schema from
:param session: the :class:`request.Session` instance to use. Defaults to
the requests module itself.
"""
res = requests.get(url)
if not res.ok:
raise Exception('Error getting schema from url {0} with status {1} and msg {2}'.format(url, res.status_code, res.text))
schema = res.json()
if 'base_url' not in schema:
if base_url:
schema['base_url'] = base_url
else:
parsed_url = urlparse(url)
schema['base_url'] = parsed_url.scheme + '://' + parsed_url.netloc + '/'
if not schema['base_url'].endswith('/'):
schema['base_url'] += '/'
client = NewClient()
client.description = EasyDict(schema)
client.official = False # TODO: change
keys = client.description.methods.keys()
for method, definition in [(m,d) for (m,d) in client.description.methods.items() if
(m.startswith('POST_') or m.startswith('GET_')) and m.endswith('_detail')]:
searchable = False
if method.replace('dispatch_detail', 'get_search') in keys:
searchable = True
name = definition['resource_name']
collection_name = definition['collection_name']
formats = [format for format in definition['formats'] if format not in ('jsonp', 'html')]
default_format = definition['default_format'].split('/')[-1]
if not name:
continue
model = Model(name, collection_name, formats, searchable)
qs = QuerySet(model=model)
if default_format != 'xml':
qs.set_format(default_format)
setattr(client, name, qs)
return client
#-----------------------------------------------------------------------------------------------------------------------
new_client = client_from_url(Settings.Instance().NEW_CLIENT_URL + '/spore')
#-----------------------------------------------------------------------------------------------------------------------
|
import pickle
from appcore.services import Factory
from platforms.base_platform import BasePlatform
from platforms.helpers.mysql_connection import MysqlConnection
class CountryMapUpdate(BasePlatform):
API_URL = 'my.sql.server'
DB_SETTINGS = {
'hostname': API_URL,
'username': 'db_user',
'password': 'db_pass',
'db': 'db_schema',
'table': 'countries'
}
def _run(self):
country_map = self._fetch()
self._store(country_map)
return True
def _fetch(self):
self.update('pull', 'started')
with MysqlConnection(**self.DB_SETTINGS) as connection:
countries = connection.execute(
'select country_name, country_code from ' + self.DB_SETTINGS['table']
).fetchall()
self.update('pull', 'completed')
country_map = {country[0].lower(): country[1].lower() for country in countries}
return country_map
def _store(self, country_map):
self.update('store', 'attempted')
Factory().get_storage_client('redis').set('maps', record={'country': pickle.dumps(country_map)})
|
# quality tests for L1 HfBitCounts trigger objects
import FWCore.ParameterSet.Config as cms
l1EmulatorObjHfBitCountsQualityTests = cms.EDAnalyzer("QualityTester",
qtList=cms.untracked.FileInPath('DQM/L1TMonitorClient/data/L1EmulatorObjHfBitCountsQualityTests.xml'),
QualityTestPrescaler=cms.untracked.int32(1),
getQualityTestsFromFile=cms.untracked.bool(True),
testInEventloop=cms.untracked.bool(False),
qtestOnEndLumi=cms.untracked.bool(True),
qtestOnEndRun=cms.untracked.bool(True),
qtestOnEndJob=cms.untracked.bool(False),
reportThreshold=cms.untracked.string(""),
verboseQT=cms.untracked.bool(True)
)
|
from adia.sequence import Module
from adia.renderer import ModulePlan, ItemStartPlan, ItemEndPlan, LEFT, RIGHT
def test_moduleplan():
p = ModulePlan(Module('foo'))
assert repr(p) == 'ModulePlan: foo'
def test_itemplans():
class Item:
def __repr__(self):
return 'foo -> bar'
item = Item()
p = ItemStartPlan(item, Module('foo'), Module('bar'), RIGHT, 0)
assert repr(p) == '~~~> foo -> bar'
p = ItemEndPlan(item, Module('foo'), Module('bar'), LEFT, 0)
assert repr(p) == '<--- foo -> bar'
|
def XXX(nums1, nums2):
"""
:type nums1: List[int]
:type nums2: List[int]
:rtype: float
"""
len1 = len(nums1)
len2 = len(nums2)
if (len1 == 0) & (len2 == 0):
return 0
if (len1 != 0) & (len2 == 0):
if len1 % 2 == 0:
return (nums1[len1 // 2 - 1] + nums1[len1 // 2]) / 2
else:
return nums1[len1 // 2]
if (len1 == 0) & (len2 != 0):
if len2 % 2 == 0:
return (nums2[len2 // 2 - 1] + nums2[(len2 // 2)]) / 2
else:
return nums2[len2 // 2]
a = 0
b = 0
num = []
lens = len1 + len2
for item in range(lens):
if (nums1[a] <= nums2[b]) & (a < len1) & (b < len2):
num.append(nums1[a])
a = a + 1
elif (nums1[a] > nums2[b]) & (a < len1) & (b < len2):
num.append(nums2[b])
b = b + 1
elif a > len1:
num.append(nums2[b])
b = b + 1
else:
num.append(nums1[a])
a = a + 1
if lens % 2 == 0:
return (num[lens // 2 - 1] + num[(lens // 2)]) / 2
else:
return num[lens // 2]
|
import time
class TestSflow:
speed_rate_table = {
"400000": "400000",
"200000": "200000",
"100000": "100000",
"50000": "50000",
"40000": "40000",
"25000": "25000",
"10000": "10000",
"1000": "1000"
}
def setup_sflow(self, dvs):
self.adb = dvs.get_asic_db()
self.cdb = dvs.get_config_db()
self.cdb.create_entry("SFLOW", "global", {"admin_state": "up"})
def test_defaultGlobal(self, dvs, testlog):
self.setup_sflow(dvs)
# Verify that the session is up
port_oid = self.adb.port_name_map["Ethernet0"]
expected_fields = {"SAI_PORT_ATTR_INGRESS_SAMPLEPACKET_ENABLE": "oid:0x0"}
fvs = self.adb.wait_for_field_negative_match("ASIC_STATE:SAI_OBJECT_TYPE_PORT", port_oid, expected_fields)
sample_session = fvs["SAI_PORT_ATTR_INGRESS_SAMPLEPACKET_ENABLE"]
speed = fvs["SAI_PORT_ATTR_SPEED"]
rate = self.speed_rate_table.get(speed, None)
assert rate
expected_fields = {"SAI_SAMPLEPACKET_ATTR_SAMPLE_RATE": rate}
self.adb.wait_for_field_match("ASIC_STATE:SAI_OBJECT_TYPE_SAMPLEPACKET", sample_session, expected_fields)
self.cdb.update_entry("SFLOW", "global", {"admin_state": "down"})
expected_fields = {"SAI_PORT_ATTR_INGRESS_SAMPLEPACKET_ENABLE": "oid:0x0"}
self.adb.wait_for_field_match("ASIC_STATE:SAI_OBJECT_TYPE_PORT", port_oid, expected_fields)
def test_globalAll(self, dvs, testlog):
self.setup_sflow(dvs)
# Verify that the session is up first
port_oid = self.adb.port_name_map["Ethernet0"]
expected_fields = {"SAI_PORT_ATTR_INGRESS_SAMPLEPACKET_ENABLE": "oid:0x0"}
self.adb.wait_for_field_negative_match("ASIC_STATE:SAI_OBJECT_TYPE_PORT", port_oid, expected_fields)
# Then shut down the session
self.cdb.update_entry("SFLOW_SESSION", "all", {"admin_state": "down"})
expected_fields = {"SAI_PORT_ATTR_INGRESS_SAMPLEPACKET_ENABLE": "oid:0x0"}
self.adb.wait_for_field_match("ASIC_STATE:SAI_OBJECT_TYPE_PORT", port_oid, expected_fields)
self.cdb.update_entry("SFLOW_SESSION", "all", {"admin_state": "up"})
self.adb.wait_for_field_negative_match("ASIC_STATE:SAI_OBJECT_TYPE_PORT", port_oid, expected_fields)
self.cdb.delete_entry("SFLOW_SESSION", "all")
self.adb.wait_for_field_negative_match("ASIC_STATE:SAI_OBJECT_TYPE_PORT", port_oid, expected_fields)
def test_InterfaceSet(self, dvs, testlog):
self.setup_sflow(dvs)
# Get the global session info as a baseline
port_oid = self.adb.port_name_map["Ethernet0"]
expected_fields = ["SAI_PORT_ATTR_INGRESS_SAMPLEPACKET_ENABLE"]
fvs = self.adb.wait_for_fields("ASIC_STATE:SAI_OBJECT_TYPE_PORT", port_oid, expected_fields)
global_session = fvs["SAI_PORT_ATTR_INGRESS_SAMPLEPACKET_ENABLE"]
# Then create the interface session
session_params = {"admin_state": "up", "sample_rate": "1000"}
self.cdb.create_entry("SFLOW_SESSION", "Ethernet0", session_params)
# Verify that the new interface session has been created and is different from the global one
port_oid = self.adb.port_name_map["Ethernet0"]
expected_fields = {"SAI_PORT_ATTR_INGRESS_SAMPLEPACKET_ENABLE": global_session}
fvs = self.adb.wait_for_field_negative_match("ASIC_STATE:SAI_OBJECT_TYPE_PORT", port_oid, expected_fields)
sample_session = fvs["SAI_PORT_ATTR_INGRESS_SAMPLEPACKET_ENABLE"]
expected_fields = {"SAI_SAMPLEPACKET_ATTR_SAMPLE_RATE": "1000"}
self.adb.wait_for_field_match("ASIC_STATE:SAI_OBJECT_TYPE_SAMPLEPACKET", sample_session, expected_fields)
self.cdb.create_entry("SFLOW_SESSION", "all", {"admin_state": "down"})
expected_fields = {"SAI_PORT_ATTR_INGRESS_SAMPLEPACKET_ENABLE": "oid:0x0"}
self.adb.wait_for_field_negative_match("ASIC_STATE:SAI_OBJECT_TYPE_PORT", port_oid, expected_fields)
self.cdb.create_entry("SFLOW", "global", {"admin_state": "down"})
self.adb.wait_for_field_match("ASIC_STATE:SAI_OBJECT_TYPE_PORT", port_oid, expected_fields)
self.cdb.delete_entry("SFLOW_SESSION", "all")
self.cdb.delete_entry("SFLOW_SESSION", "Ethernet0")
def test_defaultRate(self, dvs, testlog):
self.setup_sflow(dvs)
session_params = {"admin_state": "up"}
self.cdb.create_entry("SFLOW_SESSION", "Ethernet4", session_params)
port_oid = self.adb.port_name_map["Ethernet4"]
expected_fields = {"SAI_PORT_ATTR_INGRESS_SAMPLEPACKET_ENABLE": "oid:0x0"}
fvs = self.adb.wait_for_field_negative_match("ASIC_STATE:SAI_OBJECT_TYPE_PORT", port_oid, expected_fields)
sample_session = fvs["SAI_PORT_ATTR_INGRESS_SAMPLEPACKET_ENABLE"]
speed = fvs["SAI_PORT_ATTR_SPEED"]
rate = self.speed_rate_table.get(speed, None)
assert rate
expected_fields = {"SAI_SAMPLEPACKET_ATTR_SAMPLE_RATE": rate}
self.adb.wait_for_field_match("ASIC_STATE:SAI_OBJECT_TYPE_SAMPLEPACKET", sample_session, expected_fields)
self.cdb.delete_entry("SFLOW_SESSION", "Ethernet4")
def test_ConfigDel(self, dvs, testlog):
self.setup_sflow(dvs)
session_params = {"admin_state": "up", "sample_rate": "1000"}
self.cdb.create_entry("SFLOW_SESSION_TABLE", "Ethernet0", session_params)
self.cdb.delete_entry("SFLOW_SESSION_TABLE", "Ethernet0")
port_oid = self.adb.port_name_map["Ethernet0"]
expected_fields = {"SAI_PORT_ATTR_INGRESS_SAMPLEPACKET_ENABLE": "oid:0x0"}
fvs = self.adb.wait_for_field_negative_match("ASIC_STATE:SAI_OBJECT_TYPE_PORT", port_oid, expected_fields)
sample_session = fvs["SAI_PORT_ATTR_INGRESS_SAMPLEPACKET_ENABLE"]
speed = fvs["SAI_PORT_ATTR_SPEED"]
rate = self.speed_rate_table.get(speed, None)
assert rate
expected_fields = {"SAI_SAMPLEPACKET_ATTR_SAMPLE_RATE": rate}
self.adb.wait_for_field_match("ASIC_STATE:SAI_OBJECT_TYPE_SAMPLEPACKET", sample_session, expected_fields)
def test_SamplingRatePortCfgUpdate(self, dvs, testlog):
'''
This test checks if the SflowMgr updates the sampling rate
1) When the Speed is Updated on the port and no local configuration has been given on the port
Eg:
config sflow enable
config interface speed Ethernet0 25000 (Let's suppose Original Speed for Ethernet0 is 100G)
show sflow interface | grep Ethernet0 (Should see a sampling rate of 25000 not 100000)
'''
self.setup_sflow(dvs)
appldb = dvs.get_app_db()
#dvs.runcmd("portconfig -p {} -s {}".format("Ethernet0", "25000"))
self.cdb.update_entry("PORT", "Ethernet0", {'speed' : "25000"})
expected_fields = {"sample_rate": self.speed_rate_table["25000"]}
appldb.wait_for_field_match("SFLOW_SESSION_TABLE", "Ethernet0", expected_fields)
def test_SamplingRateManualUpdate(self, dvs, testlog):
'''
This test checks if the SflowMgr updates the sampling rate
1) When the Cfg Sflow Table is updated with sampling rate by the user, this rate should not be impacted by Port Speed Changes
Eg:
config sflow enable
config sflow interface sample-rate Ethernet4 256
config interface Ethernet0 speed 25000 (Original Speed for Ethernet0 is 100G)
show sflow interface | grep Ethernet0 (Should see a sampling rate of 256 not 100000 or 25000
'''
self.setup_sflow(dvs)
appldb = dvs.get_app_db()
session_params = {"admin_state": "up", "sample_rate": "256"}
self.cdb.create_entry("SFLOW_SESSION", "Ethernet4", session_params)
self.cdb.wait_for_field_match("SFLOW_SESSION", "Ethernet4", session_params)
appldb.wait_for_field_match("SFLOW_SESSION_TABLE", "Ethernet4", {"sample_rate": "256"})
self.cdb.update_entry("PORT", "Ethernet4", {'speed' : "25000"})
# The Check here is about the original value not getting changed.
# If some bug was to appear, let's give it some time to get noticed
time.sleep(1)
appldb.wait_for_field_match("SFLOW_SESSION_TABLE", "Ethernet4", {"sample_rate": "256"})
def test_Teardown(self, dvs, testlog):
self.setup_sflow(dvs)
self.cdb.delete_entry("SFLOW", "global")
self.adb.wait_for_n_keys("ASIC_STATE:SAI_OBJECT_TYPE_SAMPLEPACKET", 0)
# Add Dummy always-pass test at end as workaroud
# for issue when Flaky fail on final test it invokes module tear-down before retrying
def test_nonflaky_dummy():
pass
|
# Copyright (C) 2008 The Android Open Source Project
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import optparse
import platform
import re
import sys
from event_log import EventLog
from error import NoSuchProjectError
from error import InvalidProjectGroupsError
# Number of projects to submit to a single worker process at a time.
# This number represents a tradeoff between the overhead of IPC and finer
# grained opportunity for parallelism. This particular value was chosen by
# iterating through powers of two until the overall performance no longer
# improved. The performance of this batch size is not a function of the
# number of cores on the system.
WORKER_BATCH_SIZE = 32
# How many jobs to run in parallel by default? This assumes the jobs are
# largely I/O bound and do not hit the network.
DEFAULT_LOCAL_JOBS = min(os.cpu_count(), 8)
class Command(object):
"""Base class for any command line action in repo.
"""
common = False
event_log = EventLog()
manifest = None
_optparse = None
# Whether this command supports running in parallel. If greater than 0,
# it is the number of parallel jobs to default to.
PARALLEL_JOBS = None
def WantPager(self, _opt):
return False
def ReadEnvironmentOptions(self, opts):
""" Set options from environment variables. """
env_options = self._RegisteredEnvironmentOptions()
for env_key, opt_key in env_options.items():
# Get the user-set option value if any
opt_value = getattr(opts, opt_key)
# If the value is set, it means the user has passed it as a command
# line option, and we should use that. Otherwise we can try to set it
# with the value from the corresponding environment variable.
if opt_value is not None:
continue
env_value = os.environ.get(env_key)
if env_value is not None:
setattr(opts, opt_key, env_value)
return opts
@property
def OptionParser(self):
if self._optparse is None:
try:
me = 'repo %s' % self.NAME
usage = self.helpUsage.strip().replace('%prog', me)
except AttributeError:
usage = 'repo %s' % self.NAME
epilog = 'Run `repo help %s` to view the detailed manual.' % self.NAME
self._optparse = optparse.OptionParser(usage=usage, epilog=epilog)
self._Options(self._optparse)
return self._optparse
def _Options(self, p):
"""Initialize the option parser.
"""
if self.PARALLEL_JOBS is not None:
p.add_option(
'-j', '--jobs',
type=int, default=self.PARALLEL_JOBS,
help='number of jobs to run in parallel (default: %s)' % self.PARALLEL_JOBS)
def _RegisteredEnvironmentOptions(self):
"""Get options that can be set from environment variables.
Return a dictionary mapping environment variable name
to option key name that it can override.
Example: {'REPO_MY_OPTION': 'my_option'}
Will allow the option with key value 'my_option' to be set
from the value in the environment variable named 'REPO_MY_OPTION'.
Note: This does not work properly for options that are explicitly
set to None by the user, or options that are defined with a
default value other than None.
"""
return {}
def Usage(self):
"""Display usage and terminate.
"""
self.OptionParser.print_usage()
sys.exit(1)
def ValidateOptions(self, opt, args):
"""Validate the user options & arguments before executing.
This is meant to help break the code up into logical steps. Some tips:
* Use self.OptionParser.error to display CLI related errors.
* Adjust opt member defaults as makes sense.
* Adjust the args list, but do so inplace so the caller sees updates.
* Try to avoid updating self state. Leave that to Execute.
"""
def Execute(self, opt, args):
"""Perform the action, after option parsing is complete.
"""
raise NotImplementedError
def _ResetPathToProjectMap(self, projects):
self._by_path = dict((p.worktree, p) for p in projects)
def _UpdatePathToProjectMap(self, project):
self._by_path[project.worktree] = project
def _GetProjectByPath(self, manifest, path):
project = None
if os.path.exists(path):
oldpath = None
while (path and
path != oldpath and
path != manifest.topdir):
try:
project = self._by_path[path]
break
except KeyError:
oldpath = path
path = os.path.dirname(path)
if not project and path == manifest.topdir:
try:
project = self._by_path[path]
except KeyError:
pass
else:
try:
project = self._by_path[path]
except KeyError:
pass
return project
def GetProjects(self, args, manifest=None, groups='', missing_ok=False,
submodules_ok=False):
"""A list of projects that match the arguments.
"""
if not manifest:
manifest = self.manifest
all_projects_list = manifest.projects
result = []
mp = manifest.manifestProject
if not groups:
groups = manifest.GetGroupsStr()
groups = [x for x in re.split(r'[,\s]+', groups) if x]
if not args:
derived_projects = {}
for project in all_projects_list:
if submodules_ok or project.sync_s:
derived_projects.update((p.name, p)
for p in project.GetDerivedSubprojects())
all_projects_list.extend(derived_projects.values())
for project in all_projects_list:
if (missing_ok or project.Exists) and project.MatchesGroups(groups):
result.append(project)
else:
self._ResetPathToProjectMap(all_projects_list)
for arg in args:
# We have to filter by manifest groups in case the requested project is
# checked out multiple times or differently based on them.
projects = [project for project in manifest.GetProjectsWithName(arg)
if project.MatchesGroups(groups)]
if not projects:
path = os.path.abspath(arg).replace('\\', '/')
project = self._GetProjectByPath(manifest, path)
# If it's not a derived project, update path->project mapping and
# search again, as arg might actually point to a derived subproject.
if (project and not project.Derived and (submodules_ok or
project.sync_s)):
search_again = False
for subproject in project.GetDerivedSubprojects():
self._UpdatePathToProjectMap(subproject)
search_again = True
if search_again:
project = self._GetProjectByPath(manifest, path) or project
if project:
projects = [project]
if not projects:
raise NoSuchProjectError(arg)
for project in projects:
if not missing_ok and not project.Exists:
raise NoSuchProjectError('%s (%s)' % (arg, project.relpath))
if not project.MatchesGroups(groups):
raise InvalidProjectGroupsError(arg)
result.extend(projects)
def _getpath(x):
return x.relpath
result.sort(key=_getpath)
return result
def FindProjects(self, args, inverse=False):
result = []
patterns = [re.compile(r'%s' % a, re.IGNORECASE) for a in args]
for project in self.GetProjects(''):
for pattern in patterns:
match = pattern.search(project.name) or pattern.search(project.relpath)
if not inverse and match:
result.append(project)
break
if inverse and match:
break
else:
if inverse:
result.append(project)
result.sort(key=lambda project: project.relpath)
return result
class InteractiveCommand(Command):
"""Command which requires user interaction on the tty and
must not run within a pager, even if the user asks to.
"""
def WantPager(self, _opt):
return False
class PagedCommand(Command):
"""Command which defaults to output in a pager, as its
display tends to be larger than one screen full.
"""
def WantPager(self, _opt):
return True
class MirrorSafeCommand(object):
"""Command permits itself to run within a mirror,
and does not require a working directory.
"""
class GitcAvailableCommand(object):
"""Command that requires GITC to be available, but does
not require the local client to be a GITC client.
"""
class GitcClientCommand(object):
"""Command that requires the local client to be a GITC
client.
"""
|
import bz2
from six.moves.cPickle import load
from string import punctuation
def offsets_to_token(left, right, offset_array, lemmas, punc=set(punctuation)):
token_start, token_end = None, None
for i, c in enumerate(offset_array):
if left >= c:
token_start = i
if c > right and token_end is None:
token_end = i
break
token_end = len(offset_array) - 1 if token_end is None else token_end
token_end = token_end - 1 if lemmas[token_end - 1] in punc else token_end
return range(token_start, token_end)
class CDRTagger(object):
def __init__(self, fname='data/unary_tags.pkl.bz2'):
with bz2.BZ2File(fname, 'rb') as f:
self.tag_dict = load(f)
def tag(self, parts):
pubmed_id, _, _, sent_start, sent_end = parts['stable_id'].split(':')
sent_start, sent_end = int(sent_start), int(sent_end)
tags = self.tag_dict.get(pubmed_id, {})
for tag in tags:
if not (sent_start <= tag[1] <= sent_end):
continue
offsets = [offset + sent_start for offset in parts['char_offsets']]
toks = offsets_to_token(tag[1], tag[2], offsets, parts['lemmas'])
for tok in toks:
ts = tag[0].split('|')
parts['entity_types'][tok] = ts[0]
parts['entity_cids'][tok] = ts[1]
return parts
class TaggerOneTagger(CDRTagger):
def __init__(self, fname_tags='data/taggerone_unary_tags_cdr.pkl.bz2',
fname_mesh='data/chem_dis_mesh_dicts.pkl.bz2'):
with bz2.BZ2File(fname_tags, 'rb') as f:
self.tag_dict = load(f)
with bz2.BZ2File(fname_mesh, 'rb') as f:
self.chem_mesh_dict, self.dis_mesh_dict = load(f)
def tag(self, parts):
parts = super(TaggerOneTagger, self).tag(parts)
for i, word in enumerate(parts['words']):
tag = parts['entity_types'][i]
if len(word) > 4 and tag is None:
wl = word.lower()
if wl in self.dis_mesh_dict:
parts['entity_types'][i] = 'Disease'
parts['entity_cids'][i] = self.dis_mesh_dict[wl]
elif wl in self.chem_mesh_dict:
parts['entity_types'][i] = 'Chemical'
parts['entity_cids'][i] = self.chem_mesh_dict[wl]
return parts
|
import cv2
import numpy as np
import socket
if __name__ == '__main__':
s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
port = 12345
while True:
s.sendto(b'hello world', ("192.168.1.10", 8001))
|
"""
Utilities and base functions for Services.
"""
import abc
import datetime
from typing import Any, Dict, List, Optional, Set, Tuple
from pydantic import validator
from qcelemental.models import ComputeError
from ..interface.models import ObjectId, ProtoModel
from ..interface.models.rest_models import TaskQueuePOSTBody
from ..interface.models.task_models import PriorityEnum
from ..procedures import get_procedure_parser
class TaskManager(ProtoModel):
storage_socket: Optional[Any] = None
logger: Optional[Any] = None
required_tasks: Dict[str, str] = {}
tag: Optional[str] = None
priority: PriorityEnum = PriorityEnum.HIGH
class Config(ProtoModel.Config):
allow_mutation = True
serialize_default_excludes = {"storage_socket", "logger"}
def done(self) -> bool:
"""
Check if requested tasks are complete.
"""
if len(self.required_tasks) == 0:
return True
task_query = self.storage_socket.get_procedures(
id=list(self.required_tasks.values()), include=["status", "error"]
)
status_values = set(x["status"] for x in task_query["data"])
if status_values == {"COMPLETE"}:
return True
elif "ERROR" in status_values:
for x in task_query["data"]:
if x["status"] != "ERROR":
continue
self.logger.error("Error in service compute as follows:")
tasks = self.storage_socket.get_queue()["data"]
for x in tasks:
if "error" not in x:
continue
self.logger.error(x["error"]["error_message"])
raise KeyError("All tasks did not execute successfully.")
else:
return False
def get_tasks(self) -> Dict[str, Any]:
"""
Pulls currently held tasks.
"""
ret = {}
for k, id in self.required_tasks.items():
ret[k] = self.storage_socket.get_procedures(id=id)["data"][0]
return ret
def submit_tasks(self, procedure_type: str, tasks: Dict[str, Any]) -> bool:
"""
Submits new tasks to the queue and provides a waiter until there are done.
"""
procedure_parser = get_procedure_parser(procedure_type, self.storage_socket, self.logger)
required_tasks = {}
# Add in all new tasks
for key, packet in tasks.items():
packet["meta"].update({"tag": self.tag, "priority": self.priority})
# print("Check tag and priority:", packet)
packet = TaskQueuePOSTBody(**packet)
# Turn packet into a full task, if there are duplicates, get the ID
r = procedure_parser.submit_tasks(packet)
if len(r["meta"]["errors"]):
raise KeyError("Problem submitting task: {}.".format(errors))
# print("Submission:", r["data"])
required_tasks[key] = r["data"]["ids"][0]
self.required_tasks = required_tasks
return True
class BaseService(ProtoModel, abc.ABC):
# Excluded fields
storage_socket: Optional[Any]
logger: Optional[Any]
# Base identification
id: Optional[ObjectId] = None
hash_index: str
service: str
program: str
procedure: str
# Output data
output: Any
# Links
task_id: Optional[ObjectId] = None
procedure_id: Optional[ObjectId] = None
# Task manager
task_tag: Optional[str] = None
task_priority: PriorityEnum
task_manager: TaskManager = TaskManager()
status: str = "WAITING"
error: Optional[ComputeError] = None
tag: Optional[str] = None
# Sorting and priority
priority: PriorityEnum = PriorityEnum.NORMAL
modified_on: datetime.datetime = None
created_on: datetime.datetime = None
class Config(ProtoModel.Config):
allow_mutation = True
serialize_default_excludes = {"storage_socket", "logger"}
def __init__(self, **data):
dt = datetime.datetime.utcnow()
data.setdefault("modified_on", dt)
data.setdefault("created_on", dt)
super().__init__(**data)
self.task_manager.logger = self.logger
self.task_manager.storage_socket = self.storage_socket
self.task_manager.tag = self.task_tag
self.task_manager.priority = self.task_priority
@validator("task_priority", pre=True)
def munge_priority(cls, v):
if isinstance(v, str):
v = PriorityEnum[v.upper()]
elif v is None:
v = PriorityEnum.HIGH
return v
@classmethod
@abc.abstractmethod
def initialize_from_api(cls, storage_socket, meta, molecule, tag=None, priority=None):
"""
Initalizes a Service from the API.
"""
@abc.abstractmethod
def iterate(self):
"""
Takes a "step" of the service. Should return False if not finished.
"""
def expand_ndimensional_grid(
dimensions: Tuple[int, ...], seeds: Set[Tuple[int, ...]], complete: Set[Tuple[int, ...]]
) -> List[Tuple[Tuple[int, ...], Tuple[int, ...]]]:
"""
Expands an n-dimensional key/value grid.
Example
-------
>>> expand_ndimensional_grid((3, 3), {(1, 1)}, set())
[((1, 1), (0, 1)), ((1, 1), (2, 1)), ((1, 1), (1, 0)), ((1, 1), (1, 2))]
"""
dimensions = tuple(dimensions)
compute = set()
connections = []
for d in range(len(dimensions)):
# Loop over all compute seeds
for seed in seeds:
# Iterate both directions
for disp in [-1, 1]:
new_dim = seed[d] + disp
# Bound check
if new_dim >= dimensions[d]:
continue
if new_dim < 0:
continue
new = list(seed)
new[d] = new_dim
new = tuple(new)
# Push out duplicates from both new compute and copmlete
if new in compute:
continue
if new in complete:
continue
compute |= {new}
connections.append((seed, new))
return connections
|
# AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT
from nipype.testing import assert_equal
from nipype.interfaces.fsl.utils import ImageMeants
def test_ImageMeants_inputs():
input_map = dict(args=dict(argstr='%s',
),
eig=dict(argstr='--eig',
),
environ=dict(nohash=True,
usedefault=True,
),
ignore_exception=dict(nohash=True,
usedefault=True,
),
in_file=dict(argstr='-i %s',
mandatory=True,
position=0,
),
mask=dict(argstr='-m %s',
),
nobin=dict(argstr='--no_bin',
),
order=dict(argstr='--order=%d',
usedefault=True,
),
out_file=dict(argstr='-o %s',
genfile=True,
hash_files=False,
),
output_type=dict(),
show_all=dict(argstr='--showall',
),
spatial_coord=dict(argstr='-c %s',
),
terminal_output=dict(nohash=True,
),
transpose=dict(argstr='--transpose',
),
use_mm=dict(argstr='--usemm',
),
)
inputs = ImageMeants.input_spec()
for key, metadata in input_map.items():
for metakey, value in metadata.items():
yield assert_equal, getattr(inputs.traits()[key], metakey), value
def test_ImageMeants_outputs():
output_map = dict(out_file=dict(),
)
outputs = ImageMeants.output_spec()
for key, metadata in output_map.items():
for metakey, value in metadata.items():
yield assert_equal, getattr(outputs.traits()[key], metakey), value
|
import json
import os
import boto3
from aws_lambda_powertools import Logger, Metrics, Tracer
from shared import (
NotFoundException,
generate_ttl,
get_cart_id,
get_headers,
get_user_sub,
)
from utils import get_product_from_external_service
logger = Logger()
tracer = Tracer()
metrics = Metrics()
dynamodb = boto3.resource("dynamodb")
table = dynamodb.Table(os.environ["TABLE_NAME"])
product_service_url = os.environ["PRODUCT_SERVICE_URL"]
@metrics.log_metrics(capture_cold_start_metric=True)
@logger.inject_lambda_context(log_event=True)
@tracer.capture_lambda_handler
def lambda_handler(event, context):
"""
Add a the provided quantity of a product to a cart. Where an item already exists in the cart, the quantities will
be summed.
"""
try:
request_payload = json.loads(event["body"])
except KeyError:
return {
"statusCode": 400,
"headers": get_headers(""),
"body": json.dumps({"message": "No Request payload"}),
}
product_id = request_payload["productId"]
quantity = request_payload.get("quantity", 1)
cart_id, _ = get_cart_id(event["headers"])
# Because this method can be called anonymously, we need to check there's a logged in user
user_sub = None
jwt_token = event["headers"].get("Authorization")
if jwt_token:
user_sub = get_user_sub(jwt_token)
try:
product = get_product_from_external_service(product_id)
logger.info("No product found with product_id: %s", product_id)
except NotFoundException:
return {
"statusCode": 404,
"headers": get_headers(cart_id=cart_id),
"body": json.dumps({"message": "product not found"}),
}
if user_sub:
logger.info("Authenticated user")
pk = f"user#{user_sub}"
ttl = generate_ttl(
7
) # Set a longer ttl for logged in users - we want to keep their cart for longer.
else:
logger.info("Unauthenticated user")
pk = f"cart#{cart_id}"
ttl = generate_ttl()
if int(quantity) < 0:
table.update_item(
Key={"pk": pk, "sk": f"product#{product_id}"},
ExpressionAttributeNames={
"#quantity": "quantity",
"#expirationTime": "expirationTime",
"#productDetail": "productDetail",
},
ExpressionAttributeValues={
":val": quantity,
":ttl": ttl,
":productDetail": product,
":limit": abs(quantity),
},
UpdateExpression="ADD #quantity :val SET #expirationTime = :ttl, #productDetail = :productDetail",
# Prevent quantity less than 0
ConditionExpression="quantity >= :limit",
)
else:
table.update_item(
Key={"pk": pk, "sk": f"product#{product_id}"},
ExpressionAttributeNames={
"#quantity": "quantity",
"#expirationTime": "expirationTime",
"#productDetail": "productDetail",
},
ExpressionAttributeValues={
":val": quantity,
":ttl": generate_ttl(),
":productDetail": product,
},
UpdateExpression="ADD #quantity :val SET #expirationTime = :ttl, #productDetail = :productDetail",
)
metrics.add_metric(name="CartUpdated", unit="Count", value=1)
return {
"statusCode": 200,
"headers": get_headers(cart_id),
"body": json.dumps(
{"productId": product_id, "message": "product added to cart"}
),
}
|
import logging
import pprint
from vnc_api.gen.resource_client import Card
from vnc_api.gen.resource_client import Hardware
from vnc_api.gen.resource_client import Node
from vnc_api.gen.resource_client import NodeProfile
from vnc_api.gen.resource_client import Port
from vnc_api.gen.resource_client import Tag
from vnc_api.gen.resource_xsd import BaremetalPortInfo
from vnc_api.gen.resource_xsd import InterfaceMapType
from vnc_api.gen.resource_xsd import LocalLinkConnection
from vnc_api.gen.resource_xsd import PortInfoType
from vnc_cfg_api_server.tests import test_case
logger = logging.getLogger(__name__)
class TestNodeProfile(test_case.ApiServerTestCase):
@classmethod
def setUpClass(cls, *args, **kwargs):
cls.console_handler = logging.StreamHandler()
cls.console_handler.setLevel(logging.DEBUG)
logger.addHandler(cls.console_handler)
super(TestNodeProfile, cls).setUpClass(*args, **kwargs)
@classmethod
def tearDownClass(cls, *args, **kwargs):
logger.removeHandler(cls.console_handler)
super(TestNodeProfile, cls).tearDownClass(*args, **kwargs)
@property
def api(self):
return self._vnc_lib
def print_node_profile(self, node_profile_uuid="", np_fq_name=[]):
if node_profile_uuid:
np_read = self.api.node_profile_read(id=node_profile_uuid)
elif np_fq_name:
np_read = self.api.node_profile_read(fq_name=np_fq_name)
else:
return
# hw_read = self.api.hardware_read(fq_name=["test-card1"])
# logger.warn( pprint.pformat(hw_read.__dict__))
logger.warn("============ Node Profile Dict ===================")
logger.warn(pprint.pformat(np_read.__dict__))
hw_refs = np_read.get_hardware_refs()
for hw_ref in hw_refs:
hw_obj = self.api.hardware_read(id=hw_ref.get('uuid'))
logger.warn(pprint.pformat(hw_obj.__dict__))
card_refs = hw_obj.get_card_refs()
for card_ref in card_refs:
card_obj = self.api.card_read(id=card_ref.get('uuid'))
logger.warn(pprint.pformat(card_obj.__dict__))
port_map = card_obj.get_interface_map()
port_info = port_map.get_port_info()
for port in port_info:
logger.warn("============== Port Info =================")
logger.warn(pprint.pformat(port))
def create_node_and_port(self, node_and_port):
for node in node_and_port:
node_obj = Node(node, node_hostname=node)
self.api.node_create(node_obj)
for port in node_and_port[node]:
logger.warn(port['name'])
ll_obj = None
if port.get('sw_name') and port.get('port_id'):
ll_obj = LocalLinkConnection(
switch_info=port.get('sw_name'),
port_id=port.get('port_id'))
bm_info = BaremetalPortInfo(address=port.get('address'),
local_link_connection=ll_obj)
node_port_obj = Port(port.get('name'),
node_obj,
bms_port_info=bm_info)
self.api.port_create(node_port_obj)
def remove_node_and_port(self, node_and_port):
logger.warn("Removing Node and Port")
for node in node_and_port:
logger.warn("Removing Node ")
port_groups = self.api.port_groups_list(
parent_fq_name=['default-global-system-config', node])
logger.warn(pprint.pformat(port_groups))
for pg in port_groups['port-groups']:
logger.warn('DELETING Port-Group : ' + str(pg['fq_name'][-1]))
self.api.port_group_delete(fq_name=pg['fq_name'])
for port in node_and_port[node]:
logger.warn("Removing Port " + port.get('name'))
self.api.port_delete(fq_name=['default-global-system-config',
node, port.get('name')])
logger.warn("PORT : " + port.get('name'))
self.api.node_delete(fq_name=['default-global-system-config',
node])
logger.warn("NODE: " + node)
return
def create_tags(self):
tag_list = {
'provisioning': {'tag_type_name': 'label'},
'tenant': {'tag_type_name': 'label'},
'tenant1': {'tag_type_name': 'label'},
'tenant2': {'tag_type_name': 'label'},
'tenant3': {'tag_type_name': 'label'},
'provisioning1': {'tag_type_name': 'label'},
'control-data1': {'tag_type_name': 'label'},
'control-data': {'tag_type_name': 'label'}}
for tag in tag_list:
tag_obj = Tag(tag_type_name=tag_list[tag]['tag_type_name'],
tag_value=tag)
self.api.tag_create(tag_obj)
tag_read_obj = self.api.tag_read(id=tag_obj.uuid)
logger.warn("TAGS %s", pprint.pformat(tag_read_obj.__dict__))
def create_node_profile(self, node_profile_data):
for np in node_profile_data:
hardware = node_profile_data[np]['hardware']
interface_map = hardware['card']['interface-map']
ifmap_list = []
for iface in interface_map:
logger.warn(iface)
logger.warn(pprint.pformat(interface_map[iface]))
port_info = PortInfoType(
name=iface,
type="xe",
port_speed=interface_map[iface].get('port_speed'),
labels=interface_map[iface].get('labels'),
port_group=interface_map[iface].get('port_group'))
ifmap_list.append(port_info)
iface_map = InterfaceMapType(port_info=ifmap_list)
logger.warn("PORT-MPA %s", pprint.pformat(iface_map.__dict__))
card_obj = Card(hardware['card'].get('name'),
interface_map=iface_map)
self.api.card_create(card_obj)
hw_obj = Hardware(hardware.get('name'))
hw_obj.add_card(card_obj)
self.api.hardware_create(hw_obj)
node_profile_obj = NodeProfile(
np,
node_profile_vendor=node_profile_data[np].get(
'node_profile_vendor'),
node_profile_device_family=node_profile_data[np].get(
'node_profile_device_family'))
node_profile_obj.add_hardware(hw_obj)
self.api.node_profile_create(node_profile_obj)
self.print_node_profile(node_profile_uuid=node_profile_obj.uuid)
return
def test_create_node_profile(self):
"""Test node-profile association with Node.
create node (node1), and ports.
create node-profiles qfx1-np and qfx2-np
create tags to be used
associate node with qfx1-np, now node-ports should
ref to tags from node-profile.
assoicate node with qfx2-np, now node-ports should
ref to new tags from node-profile.
remove ref from node, tags from node-ports should
be removed.
remove ports and node, there should not be any error.
"""
node_and_port = {
'node1':
[{'name': 'eth0',
'address': "11:22:33:44:55:55",
'sw_name': 'unit_test_qfx1',
'port_id': 'xe-0/0/0'},
{'name': 'eth1',
'address': "11:22:33:44:55:56",
'sw_name': 'unit_test_qfx1',
'port_id': 'xe-0/0/1'},
{'name': 'eth2',
'address': "11:22:33:44:55:57",
'sw_name': 'unit_test_qfx1',
'port_id': 'xe-0/0/2'}]}
node_profile_data = {
'qfx1-np': {
'node_profile_vendor': 'Juniper',
'node_profile_device_family': 'qfx',
'hardware': {
'name': 'hw1',
'card': {
'name': 'card1',
'interface-map': {
'eth0': {
'labels': ["provisioning", "tenant"],
'port_group': 'bond0',
'port_speed': '10G'
},
'eth1': {
'labels': ["tenant"],
'port_group': 'bond0',
'port_speed': '10G'
},
'eth2': {
'labels': ["provisioning",
"tenant",
"control-data"],
'port_speed': '10G'
}
}
}
}
}
}
node_profile_data1 = {
'qfx2-np': {
'node_profile_vendor': 'Juniper',
'node_profile_device_family': 'qfx',
'hardware': {
'name': 'hw2',
'card': {
'name': 'card2',
'interface-map': {
'eth0': {
'labels': [
"provisioning1",
"tenant1"],
'port_group': 'bond1',
'port_speed': '10G'
},
'eth1': {
'labels': ["tenant2"],
'port_group': 'bond1',
'port_speed': '10G'
},
'eth2': {
'labels': [
"provisioning1",
"tenant3",
"control-data1"],
'port_speed': '10G'
}
}
}
}
}
}
self.create_tags()
self.create_node_profile(node_profile_data)
self.create_node_profile(node_profile_data1)
self.create_node_and_port(node_and_port)
node_object = self.api.node_read(
fq_name=['default-global-system-config', 'node1'])
np_object = self.api.node_profile_read(
fq_name=['default-global-system-config', 'qfx1-np'])
np2_object = self.api.node_profile_read(
fq_name=['default-global-system-config', 'qfx2-np'])
logger.warn(pprint.pformat(node_object.__dict__))
node_object.set_node_profile(np_object)
self.api.node_update(node_object)
node_object.set_node_profile(np2_object)
self.api.node_update(node_object)
for node in node_and_port:
node_object_update = self.api.node_read(
fq_name=['default-global-system-config', node])
logger.warn(pprint.pformat(node_object_update.__dict__))
for port in node_and_port[node]:
port_obj = self.api.port_read(
fq_name=['default-global-system-config',
node,
port.get('name')])
logger.warn(pprint.pformat(port_obj.__dict__))
self.api.ref_update('node',
node_object.uuid,
'node-profile',
np2_object.uuid,
['default-global-system-config', 'qfx2-np'],
'DELETE')
for node in node_and_port:
node_object_update = self.api.node_read(
fq_name=['default-global-system-config', node])
logger.warn(pprint.pformat(node_object_update.__dict__))
for port in node_and_port[node]:
port_obj = self.api.port_read(
fq_name=['default-global-system-config',
node,
port.get('name')])
logger.warn("==============")
logger.warn(pprint.pformat(port_obj.__dict__))
port_groups = self.api.port_groups_list(
parent_fq_name=['default-global-system-config', node])
logger.warn('Port-Groups Printing ==============')
logger.warn(pprint.pformat(port_groups))
for pg in port_groups['port-groups']:
logger.warn("==============")
pg_obj = self.api.port_group_read(fq_name=pg['fq_name'])
logger.warn(pprint.pformat(pg_obj.__dict__))
self.remove_node_and_port(node_and_port)
logger.warn('PASS - NodeProfile Created')
|
import numpy as np
from torchvision import transforms
import os
from PIL import Image, ImageOps
import numbers
import torch
class ResizeImage():
def __init__(self, size):
if isinstance(size, int):
self.size = (int(size), int(size))
else:
self.size = size
def __call__(self, img):
th, tw = self.size
return img.resize((th, tw))
class RandomSizedCrop(object):
"""Crop the given PIL.Image to random size and aspect ratio.
A crop of random size of (0.08 to 1.0) of the original size and a random
aspect ratio of 3/4 to 4/3 of the original aspect ratio is made. This crop
is finally resized to given size.
This is popularly used to train the Inception networks.
Args:
size: size of the smaller edge
interpolation: Default: PIL.Image.BILINEAR
"""
def __init__(self, size, interpolation=Image.BILINEAR):
self.size = size
self.interpolation = interpolation
def __call__(self, img):
h_off = random.randint(0, img.shape[1]-self.size)
w_off = random.randint(0, img.shape[2]-self.size)
img = img[:, h_off:h_off+self.size, w_off:w_off+self.size]
return img
class Normalize(object):
"""Normalize an tensor image with mean and standard deviation.
Given mean: (R, G, B),
will normalize each channel of the torch.*Tensor, i.e.
channel = channel - mean
Args:
mean (sequence): Sequence of means for R, G, B channels respecitvely.
"""
def __init__(self, mean=None, meanfile=None):
if mean:
self.mean = mean
else:
arr = np.load(meanfile)
self.mean = torch.from_numpy(arr.astype('float32')/255.0)[[2, 1, 0], :, :]
def __call__(self, tensor):
"""
Args:
tensor (Tensor): Tensor image of size (C, H, W) to be normalized.
Returns:
Tensor: Normalized image.
"""
# TODO: make efficient
for t, m in zip(tensor, self.mean):
t.sub_(m)
return tensor
class PlaceCrop(object):
"""Crops the given PIL.Image at the particular index.
Args:
size (sequence or int): Desired output size of the crop. If size is an
int instead of sequence like (w, h), a square crop (size, size) is
made.
"""
def __init__(self, size, start_x, start_y):
if isinstance(size, int):
self.size = (int(size), int(size))
else:
self.size = size
self.start_x = start_x
self.start_y = start_y
def __call__(self, img):
"""
Args:
img (PIL.Image): Image to be cropped.
Returns:
PIL.Image: Cropped image.
"""
th, tw = self.size
return img.crop((self.start_x, self.start_y, self.start_x + tw, self.start_y + th))
class ForceFlip(object):
"""Horizontally flip the given PIL.Image randomly with a probability of 0.5."""
def __call__(self, img):
"""
Args:
img (PIL.Image): Image to be flipped.
Returns:
PIL.Image: Randomly flipped image.
"""
return img.transpose(Image.FLIP_LEFT_RIGHT)
class CenterCrop(object):
"""Crops the given PIL.Image at the center.
Args:
size (sequence or int): Desired output size of the crop. If size is an
int instead of sequence like (h, w), a square crop (size, size) is
made.
"""
def __init__(self, size):
if isinstance(size, numbers.Number):
self.size = (int(size), int(size))
else:
self.size = size
def __call__(self, img):
"""
Args:
img (PIL.Image): Image to be cropped.
Returns:
PIL.Image: Cropped image.
"""
w, h = (img.shape[1], img.shape[2])
th, tw = self.size
w_off = int((w - tw) / 2.)
h_off = int((h - th) / 2.)
img = img[:, h_off:h_off+th, w_off:w_off+tw]
return img
def image_train(resize_size=256, crop_size=224, alexnet=False):
if not alexnet:
normalize = transforms.Normalize(mean=[0.485, 0.456, 0.406],
std=[0.229, 0.224, 0.225])
else:
normalize = Normalize(meanfile='./ilsvrc_2012_mean.npy')
return transforms.Compose([
transforms.Resize((resize_size, resize_size)),
transforms.RandomResizedCrop(crop_size),
transforms.RandomHorizontalFlip(),
transforms.ToTensor(),
normalize
])
def image_target(resize_size=256, crop_size=224, alexnet=False):
if not alexnet:
normalize = transforms.Normalize(mean=[0.485, 0.456, 0.406],
std=[0.229, 0.224, 0.225])
else:
normalize = Normalize(meanfile='./ilsvrc_2012_mean.npy')
return transforms.Compose([
transforms.Resize((resize_size, resize_size)),
transforms.RandomCrop(crop_size),
transforms.RandomHorizontalFlip(),
transforms.ToTensor(),
normalize
])
def image_test(resize_size=256, crop_size=224, alexnet=False):
if not alexnet:
normalize = transforms.Normalize(mean=[0.485, 0.456, 0.406],
std=[0.229, 0.224, 0.225])
else:
normalize = Normalize(meanfile='./ilsvrc_2012_mean.npy')
start_first = 0
start_center = (resize_size - crop_size - 1) / 2
start_last = resize_size - crop_size - 1
return transforms.Compose([
transforms.Resize((resize_size, resize_size)),
transforms.CenterCrop(224),
transforms.ToTensor(),
normalize
])
def image_test_10crop(resize_size=256, crop_size=224, alexnet=False):
if not alexnet:
normalize = transforms.Normalize(mean=[0.485, 0.456, 0.406],
std=[0.229, 0.224, 0.225])
else:
normalize = Normalize(meanfile='./ilsvrc_2012_mean.npy')
start_first = 0
start_center = (resize_size - crop_size - 1) / 2
start_last = resize_size - crop_size - 1
data_transforms = [
transforms.Compose([
ResizeImage(resize_size), ForceFlip(),
PlaceCrop(crop_size, start_first, start_first),
transforms.ToTensor(),
normalize
]),
transforms.Compose([
ResizeImage(resize_size), ForceFlip(),
PlaceCrop(crop_size, start_last, start_last),
transforms.ToTensor(),
normalize
]),
transforms.Compose([
ResizeImage(resize_size), ForceFlip(),
PlaceCrop(crop_size, start_last, start_first),
transforms.ToTensor(),
normalize
]),
transforms.Compose([
ResizeImage(resize_size), ForceFlip(),
PlaceCrop(crop_size, start_first, start_last),
transforms.ToTensor(),
normalize
]),
transforms.Compose([
ResizeImage(resize_size), ForceFlip(),
PlaceCrop(crop_size, start_center, start_center),
transforms.ToTensor(),
normalize
]),
transforms.Compose([
ResizeImage(resize_size),
PlaceCrop(crop_size, start_first, start_first),
transforms.ToTensor(),
normalize
]),
transforms.Compose([
ResizeImage(resize_size),
PlaceCrop(crop_size, start_last, start_last),
transforms.ToTensor(),
normalize
]),
transforms.Compose([
ResizeImage(resize_size),
PlaceCrop(crop_size, start_last, start_first),
transforms.ToTensor(),
normalize
]),
transforms.Compose([
ResizeImage(resize_size),
PlaceCrop(crop_size, start_first, start_last),
transforms.ToTensor(),
normalize
]),
transforms.Compose([
ResizeImage(resize_size),
PlaceCrop(crop_size, start_center, start_center),
transforms.ToTensor(),
normalize
])
]
return data_transforms
|
# -*- coding: utf-8 -*-
# Form implementation generated from reading ui file 'UI/cadastro_fornecedor.ui'
#
# Created by: PyQt5 UI code generator 5.15.4
#
# WARNING: Any manual changes made to this file will be lost when pyuic5 is
# run again. Do not edit this file unless you know what you are doing.
from PyQt5 import QtCore, QtGui, QtWidgets
class Ui_ct_FormFornecedor(object):
def setupUi(self, ct_FormFornecedor):
ct_FormFornecedor.setObjectName("ct_FormFornecedor")
ct_FormFornecedor.resize(653, 371)
self.fr_FormFornecedor = QtWidgets.QFrame(ct_FormFornecedor)
self.fr_FormFornecedor.setGeometry(QtCore.QRect(0, 0, 1000, 500))
self.fr_FormFornecedor.setStyleSheet("background: #FFF;\n"
"border: none")
self.fr_FormFornecedor.setObjectName("fr_FormFornecedor")
self.lb_FormFornecedor = QtWidgets.QLabel(self.fr_FormFornecedor)
self.lb_FormFornecedor.setGeometry(QtCore.QRect(20, 10, 880, 30))
self.lb_FormFornecedor.setStyleSheet("QLabel{\n"
"font-size: 14px;\n"
"font-family: \"Arial\";\n"
"font-weight: bold;\n"
"\n"
"border-bottom: 2px solid #A2A2A2\n"
"}")
self.lb_FormFornecedor.setObjectName("lb_FormFornecedor")
self.lb_FormFornecedor_2 = QtWidgets.QLabel(self.fr_FormFornecedor)
self.lb_FormFornecedor_2.setGeometry(QtCore.QRect(370, 60, 150, 20))
self.lb_FormFornecedor_2.setStyleSheet("QLabel{\n"
"font-size: 12px;\n"
"font-family: \"Arial Unicode MS\";\n"
"font-weight: bold;\n"
"color: #797979\n"
"}")
self.lb_FormFornecedor_2.setObjectName("lb_FormFornecedor_2")
self.tx_NomeFantasia = QtWidgets.QLineEdit(self.fr_FormFornecedor)
self.tx_NomeFantasia.setGeometry(QtCore.QRect(370, 80, 271, 25))
self.tx_NomeFantasia.setFocusPolicy(QtCore.Qt.WheelFocus)
self.tx_NomeFantasia.setStyleSheet("QLineEdit{\n"
"background: #CFCFCF;\n"
"border-radius: 2px;\n"
"color: #000;\n"
"font: 13px \"Arial\" ;\n"
"text-transform: uppercase;\n"
"}\n"
"QLineEdit:Focus {\n"
"border: 1px solid red;\n"
"}")
self.tx_NomeFantasia.setObjectName("tx_NomeFantasia")
self.lb_FormFornecedor_3 = QtWidgets.QLabel(self.fr_FormFornecedor)
self.lb_FormFornecedor_3.setGeometry(QtCore.QRect(20, 60, 190, 20))
self.lb_FormFornecedor_3.setStyleSheet("QLabel{\n"
"font-size: 12px;\n"
"font-family: \"Arial Unicode MS\";\n"
"font-weight: bold;\n"
"color: #797979\n"
"}")
self.lb_FormFornecedor_3.setObjectName("lb_FormFornecedor_3")
self.lb_FormFornecedor_5 = QtWidgets.QLabel(self.fr_FormFornecedor)
self.lb_FormFornecedor_5.setGeometry(QtCore.QRect(20, 120, 196, 20))
self.lb_FormFornecedor_5.setStyleSheet("QLabel{\n"
"font-size: 12px;\n"
"font-family: \"Arial Unicode MS\";\n"
"font-weight: bold;\n"
"color: #797979\n"
"}")
self.lb_FormFornecedor_5.setObjectName("lb_FormFornecedor_5")
self.tx_Telefone = QtWidgets.QLineEdit(self.fr_FormFornecedor)
self.tx_Telefone.setGeometry(QtCore.QRect(20, 140, 196, 25))
self.tx_Telefone.setFocusPolicy(QtCore.Qt.WheelFocus)
self.tx_Telefone.setStyleSheet("QLineEdit{\n"
"background: #CFCFCF;\n"
"border-radius: 2px;\n"
"color: #000;\n"
"font: 13px \"Arial\" \n"
"}\n"
"QLineEdit:Focus {\n"
"border: 1px solid red;\n"
"}")
self.tx_Telefone.setPlaceholderText("")
self.tx_Telefone.setObjectName("tx_Telefone")
self.lb_FormFornecedor_8 = QtWidgets.QLabel(self.fr_FormFornecedor)
self.lb_FormFornecedor_8.setGeometry(QtCore.QRect(20, 180, 630, 30))
self.lb_FormFornecedor_8.setStyleSheet("QLabel{\n"
"font-size: 14px;\n"
"font-family: \"Arial\";\n"
"font-weight: normal;\n"
"\n"
"border-bottom: 2px solid #A2A2A2;\n"
"color: #797979\n"
"}")
self.lb_FormFornecedor_8.setObjectName("lb_FormFornecedor_8")
self.tx_Cep = QtWidgets.QLineEdit(self.fr_FormFornecedor)
self.tx_Cep.setGeometry(QtCore.QRect(20, 240, 101, 25))
self.tx_Cep.setFocusPolicy(QtCore.Qt.WheelFocus)
self.tx_Cep.setStyleSheet("QLineEdit{\n"
"background: #CFCFCF;\n"
"border-radius: 2px;\n"
"color: #000;\n"
"font: 13px \"Arial\" ;\n"
"text-transform: uppercase\n"
"}\n"
"QLineEdit:Focus {\n"
"border: 1px solid red;\n"
"}")
self.tx_Cep.setAlignment(QtCore.Qt.AlignCenter)
self.tx_Cep.setObjectName("tx_Cep")
self.lb_FormFornecedor_10 = QtWidgets.QLabel(self.fr_FormFornecedor)
self.lb_FormFornecedor_10.setGeometry(QtCore.QRect(20, 215, 50, 20))
self.lb_FormFornecedor_10.setStyleSheet("QLabel{\n"
"font-size: 12px;\n"
"font-family: \"Arial Unicode MS\";\n"
"font-weight: bold;\n"
"color: #797979\n"
"}")
self.lb_FormFornecedor_10.setObjectName("lb_FormFornecedor_10")
self.fr_BotoesFormFornecedor = QtWidgets.QFrame(self.fr_FormFornecedor)
self.fr_BotoesFormFornecedor.setGeometry(QtCore.QRect(-340, 340, 1000, 30))
self.fr_BotoesFormFornecedor.setStyleSheet("background:#E1DFE0;\n"
"border: none;")
self.fr_BotoesFormFornecedor.setObjectName("fr_BotoesFormFornecedor")
self.bt_Voltar = QtWidgets.QPushButton(self.fr_BotoesFormFornecedor)
self.bt_Voltar.setGeometry(QtCore.QRect(880, 0, 120, 30))
font = QtGui.QFont()
font.setFamily("Tahoma")
font.setPointSize(10)
font.setBold(True)
font.setWeight(75)
self.bt_Voltar.setFont(font)
self.bt_Voltar.setCursor(QtGui.QCursor(QtCore.Qt.PointingHandCursor))
self.bt_Voltar.setFocusPolicy(QtCore.Qt.NoFocus)
self.bt_Voltar.setContextMenuPolicy(QtCore.Qt.ActionsContextMenu)
self.bt_Voltar.setStyleSheet("QPushButton {\n"
"background-color: #1E87F0;\n"
"color: #FFF\n"
" }\n"
"QPushButton:hover{\n"
"background-color: #40a286\n"
"}")
self.bt_Voltar.setIconSize(QtCore.QSize(75, 35))
self.bt_Voltar.setObjectName("bt_Voltar")
self.bt_Salvar = QtWidgets.QPushButton(self.fr_BotoesFormFornecedor)
self.bt_Salvar.setGeometry(QtCore.QRect(750, 0, 120, 30))
font = QtGui.QFont()
font.setFamily("Tahoma")
font.setPointSize(10)
font.setBold(True)
font.setWeight(75)
self.bt_Salvar.setFont(font)
self.bt_Salvar.setCursor(QtGui.QCursor(QtCore.Qt.PointingHandCursor))
self.bt_Salvar.setFocusPolicy(QtCore.Qt.NoFocus)
self.bt_Salvar.setContextMenuPolicy(QtCore.Qt.ActionsContextMenu)
self.bt_Salvar.setStyleSheet("QPushButton {\n"
"background-color: #7AB32E;\n"
"color: #FFF\n"
" }\n"
"QPushButton:hover{\n"
"background-color: #40a286\n"
"}")
self.bt_Salvar.setIconSize(QtCore.QSize(75, 35))
self.bt_Salvar.setObjectName("bt_Salvar")
self.tx_cnpj = QtWidgets.QLineEdit(self.fr_FormFornecedor)
self.tx_cnpj.setGeometry(QtCore.QRect(20, 80, 221, 25))
self.tx_cnpj.setFocusPolicy(QtCore.Qt.WheelFocus)
self.tx_cnpj.setStyleSheet("QLineEdit{\n"
"background: #CFCFCF;\n"
"border-radius: 2px;\n"
"color: #000;\n"
"font: 13px \"Arial\" \n"
"}\n"
"QLineEdit:Focus {\n"
"border: 1px solid red;\n"
"}")
self.tx_cnpj.setPlaceholderText("")
self.tx_cnpj.setObjectName("tx_cnpj")
self.lb_FormFornecedor_23 = QtWidgets.QLabel(self.fr_FormFornecedor)
self.lb_FormFornecedor_23.setGeometry(QtCore.QRect(230, 120, 190, 20))
self.lb_FormFornecedor_23.setStyleSheet("QLabel{\n"
"font-size: 12px;\n"
"font-family: \"Arial Unicode MS\";\n"
"font-weight: bold;\n"
"color: #797979\n"
"}")
self.lb_FormFornecedor_23.setObjectName("lb_FormFornecedor_23")
self.tx_Email = QtWidgets.QLineEdit(self.fr_FormFornecedor)
self.tx_Email.setGeometry(QtCore.QRect(230, 140, 196, 25))
self.tx_Email.setFocusPolicy(QtCore.Qt.WheelFocus)
self.tx_Email.setStyleSheet("QLineEdit{\n"
"background: #CFCFCF;\n"
"border-radius: 2px;\n"
"color: #000;\n"
"font: 13px \"Arial\" \n"
"}\n"
"QLineEdit:Focus {\n"
"border: 1px solid red;\n"
"}")
self.tx_Email.setPlaceholderText("")
self.tx_Email.setObjectName("tx_Email")
self.lb_FormFornecedor_11 = QtWidgets.QLabel(self.fr_FormFornecedor)
self.lb_FormFornecedor_11.setGeometry(QtCore.QRect(160, 215, 250, 20))
self.lb_FormFornecedor_11.setStyleSheet("QLabel{\n"
"font-size: 12px;\n"
"font-family: \"Arial Unicode MS\";\n"
"font-weight: bold;\n"
"color: #797979\n"
"}")
self.lb_FormFornecedor_11.setObjectName("lb_FormFornecedor_11")
self.tx_Endereco = QtWidgets.QLineEdit(self.fr_FormFornecedor)
self.tx_Endereco.setGeometry(QtCore.QRect(160, 240, 400, 25))
self.tx_Endereco.setFocusPolicy(QtCore.Qt.WheelFocus)
self.tx_Endereco.setStyleSheet("QLineEdit{\n"
"background: #CFCFCF;\n"
"border-radius: 2px;\n"
"color: #000;\n"
"font: 13px \"Arial\" ;\n"
"text-transform: uppercase\n"
"}\n"
"QLineEdit:Focus {\n"
"border: 1px solid red;\n"
"}")
self.tx_Endereco.setInputMask("")
self.tx_Endereco.setAlignment(QtCore.Qt.AlignLeading|QtCore.Qt.AlignLeft|QtCore.Qt.AlignVCenter)
self.tx_Endereco.setPlaceholderText("")
self.tx_Endereco.setObjectName("tx_Endereco")
self.lb_FormFornecedor_12 = QtWidgets.QLabel(self.fr_FormFornecedor)
self.lb_FormFornecedor_12.setGeometry(QtCore.QRect(580, 215, 50, 20))
self.lb_FormFornecedor_12.setStyleSheet("QLabel{\n"
"font-size: 12px;\n"
"font-family: \"Arial Unicode MS\";\n"
"font-weight: bold;\n"
"color: #797979\n"
"}")
self.lb_FormFornecedor_12.setObjectName("lb_FormFornecedor_12")
self.tx_Numero = QtWidgets.QLineEdit(self.fr_FormFornecedor)
self.tx_Numero.setGeometry(QtCore.QRect(580, 240, 70, 25))
self.tx_Numero.setFocusPolicy(QtCore.Qt.WheelFocus)
self.tx_Numero.setStyleSheet("QLineEdit{\n"
"background: #CFCFCF;\n"
"border-radius: 2px;\n"
"color: #000;\n"
"font: 13px \"Arial\" ;\n"
"text-transform: uppercase\n"
"}\n"
"QLineEdit:Focus {\n"
"border: 1px solid red;\n"
"}")
self.tx_Numero.setInputMask("")
self.tx_Numero.setAlignment(QtCore.Qt.AlignLeading|QtCore.Qt.AlignLeft|QtCore.Qt.AlignVCenter)
self.tx_Numero.setPlaceholderText("")
self.tx_Numero.setObjectName("tx_Numero")
self.tx_Bairro = QtWidgets.QLineEdit(self.fr_FormFornecedor)
self.tx_Bairro.setGeometry(QtCore.QRect(20, 295, 260, 25))
self.tx_Bairro.setFocusPolicy(QtCore.Qt.WheelFocus)
self.tx_Bairro.setStyleSheet("QLineEdit{\n"
"background: #CFCFCF;\n"
"border-radius: 2px;\n"
"color: #000;\n"
"font: 13px \"Arial\" ;\n"
"text-transform: uppercase\n"
"}\n"
"QLineEdit:Focus {\n"
"border: 1px solid red;\n"
"}")
self.tx_Bairro.setInputMask("")
self.tx_Bairro.setAlignment(QtCore.Qt.AlignLeading|QtCore.Qt.AlignLeft|QtCore.Qt.AlignVCenter)
self.tx_Bairro.setPlaceholderText("")
self.tx_Bairro.setObjectName("tx_Bairro")
self.lb_FormFornecedor_13 = QtWidgets.QLabel(self.fr_FormFornecedor)
self.lb_FormFornecedor_13.setGeometry(QtCore.QRect(20, 270, 120, 20))
self.lb_FormFornecedor_13.setStyleSheet("QLabel{\n"
"font-size: 12px;\n"
"font-family: \"Arial Unicode MS\";\n"
"font-weight: bold;\n"
"color: #797979\n"
"}")
self.lb_FormFornecedor_13.setObjectName("lb_FormFornecedor_13")
self.tx_Cidade = QtWidgets.QLineEdit(self.fr_FormFornecedor)
self.tx_Cidade.setGeometry(QtCore.QRect(300, 295, 260, 25))
self.tx_Cidade.setFocusPolicy(QtCore.Qt.WheelFocus)
self.tx_Cidade.setStyleSheet("QLineEdit{\n"
"background: #CFCFCF;\n"
"border-radius: 2px;\n"
"color: #000;\n"
"font: 13px \"Arial\" ;\n"
"text-transform: uppercase\n"
"}\n"
"QLineEdit:Focus {\n"
"border: 1px solid red;\n"
"}")
self.tx_Cidade.setInputMask("")
self.tx_Cidade.setAlignment(QtCore.Qt.AlignLeading|QtCore.Qt.AlignLeft|QtCore.Qt.AlignVCenter)
self.tx_Cidade.setPlaceholderText("")
self.tx_Cidade.setObjectName("tx_Cidade")
self.lb_FormFornecedor_14 = QtWidgets.QLabel(self.fr_FormFornecedor)
self.lb_FormFornecedor_14.setGeometry(QtCore.QRect(300, 270, 120, 20))
self.lb_FormFornecedor_14.setStyleSheet("QLabel{\n"
"font-size: 12px;\n"
"font-family: \"Arial Unicode MS\";\n"
"font-weight: bold;\n"
"color: #797979\n"
"}")
self.lb_FormFornecedor_14.setObjectName("lb_FormFornecedor_14")
self.lb_FormFornecedor_15 = QtWidgets.QLabel(self.fr_FormFornecedor)
self.lb_FormFornecedor_15.setGeometry(QtCore.QRect(580, 270, 70, 20))
self.lb_FormFornecedor_15.setStyleSheet("QLabel{\n"
"font-size: 12px;\n"
"font-family: \"Arial Unicode MS\";\n"
"font-weight: bold;\n"
"color: #797979\n"
"}")
self.lb_FormFornecedor_15.setObjectName("lb_FormFornecedor_15")
self.tx_Estado = QtWidgets.QLineEdit(self.fr_FormFornecedor)
self.tx_Estado.setGeometry(QtCore.QRect(580, 295, 70, 25))
self.tx_Estado.setFocusPolicy(QtCore.Qt.WheelFocus)
self.tx_Estado.setStyleSheet("QLineEdit{\n"
"background: #CFCFCF;\n"
"border-radius: 2px;\n"
"color: #000;\n"
"font: 13px \"Arial\" ;\n"
"text-transform: uppercase\n"
"}\n"
"QLineEdit:Focus {\n"
"border: 1px solid red;\n"
"}")
self.tx_Estado.setInputMask("")
self.tx_Estado.setAlignment(QtCore.Qt.AlignLeading|QtCore.Qt.AlignLeft|QtCore.Qt.AlignVCenter)
self.tx_Estado.setPlaceholderText("")
self.tx_Estado.setObjectName("tx_Estado")
self.bt_busca_cep = QtWidgets.QPushButton(self.fr_FormFornecedor)
self.bt_busca_cep.setGeometry(QtCore.QRect(130, 240, 21, 31))
self.bt_busca_cep.setText("")
icon = QtGui.QIcon()
icon.addPixmap(QtGui.QPixmap("UI/../../Imagens/search.png"), QtGui.QIcon.Normal, QtGui.QIcon.Off)
self.bt_busca_cep.setIcon(icon)
self.bt_busca_cep.setObjectName("bt_busca_cep")
self.bt_busca_cnpj = QtWidgets.QPushButton(self.fr_FormFornecedor)
self.bt_busca_cnpj.setGeometry(QtCore.QRect(250, 80, 111, 31))
font = QtGui.QFont()
font.setFamily("Tahoma")
font.setPointSize(10)
font.setBold(True)
font.setWeight(75)
self.bt_busca_cnpj.setFont(font)
self.bt_busca_cnpj.setCursor(QtGui.QCursor(QtCore.Qt.PointingHandCursor))
self.bt_busca_cnpj.setFocusPolicy(QtCore.Qt.NoFocus)
self.bt_busca_cnpj.setContextMenuPolicy(QtCore.Qt.ActionsContextMenu)
self.bt_busca_cnpj.setStyleSheet("QPushButton {\n"
"background-color: #7AB32E;\n"
"color: #FFF\n"
" }\n"
"QPushButton:hover{\n"
"background-color: #40a286\n"
"}")
self.bt_busca_cnpj.setIconSize(QtCore.QSize(75, 35))
self.bt_busca_cnpj.setObjectName("bt_busca_cnpj")
self.retranslateUi(ct_FormFornecedor)
QtCore.QMetaObject.connectSlotsByName(ct_FormFornecedor)
ct_FormFornecedor.setTabOrder(self.tx_cnpj, self.tx_NomeFantasia)
ct_FormFornecedor.setTabOrder(self.tx_NomeFantasia, self.tx_Telefone)
ct_FormFornecedor.setTabOrder(self.tx_Telefone, self.tx_Email)
ct_FormFornecedor.setTabOrder(self.tx_Email, self.tx_Cep)
ct_FormFornecedor.setTabOrder(self.tx_Cep, self.bt_busca_cep)
ct_FormFornecedor.setTabOrder(self.bt_busca_cep, self.tx_Endereco)
ct_FormFornecedor.setTabOrder(self.tx_Endereco, self.tx_Numero)
ct_FormFornecedor.setTabOrder(self.tx_Numero, self.tx_Bairro)
ct_FormFornecedor.setTabOrder(self.tx_Bairro, self.tx_Cidade)
ct_FormFornecedor.setTabOrder(self.tx_Cidade, self.tx_Estado)
def retranslateUi(self, ct_FormFornecedor):
_translate = QtCore.QCoreApplication.translate
ct_FormFornecedor.setWindowTitle(_translate("ct_FormFornecedor", "Cadastro Fornecedores"))
self.lb_FormFornecedor.setText(_translate("ct_FormFornecedor", "FICHA CADASTRAL FORNECEDOR"))
self.lb_FormFornecedor_2.setText(_translate("ct_FormFornecedor", "NOME FANTASIA"))
self.tx_NomeFantasia.setPlaceholderText(_translate("ct_FormFornecedor", "NOME FANTASIA"))
self.lb_FormFornecedor_3.setText(_translate("ct_FormFornecedor", "CNPJ"))
self.lb_FormFornecedor_5.setText(_translate("ct_FormFornecedor", "TELEFONE "))
self.tx_Telefone.setInputMask(_translate("ct_FormFornecedor", "(00) 0000-00000"))
self.tx_Telefone.setText(_translate("ct_FormFornecedor", "() -"))
self.lb_FormFornecedor_8.setText(_translate("ct_FormFornecedor", "ENDEREÇO"))
self.tx_Cep.setInputMask(_translate("ct_FormFornecedor", "99999-999"))
self.tx_Cep.setPlaceholderText(_translate("ct_FormFornecedor", "123456789"))
self.lb_FormFornecedor_10.setText(_translate("ct_FormFornecedor", "CEP"))
self.bt_Voltar.setText(_translate("ct_FormFornecedor", "VOLTAR"))
self.bt_Salvar.setText(_translate("ct_FormFornecedor", "SALVAR"))
self.tx_cnpj.setInputMask(_translate("ct_FormFornecedor", "##.###.###/####-##"))
self.tx_cnpj.setText(_translate("ct_FormFornecedor", "../-----"))
self.lb_FormFornecedor_23.setText(_translate("ct_FormFornecedor", "Email"))
self.lb_FormFornecedor_11.setText(_translate("ct_FormFornecedor", "ENDEREÇO"))
self.lb_FormFornecedor_12.setText(_translate("ct_FormFornecedor", "Nº"))
self.lb_FormFornecedor_13.setText(_translate("ct_FormFornecedor", "BAIRRO"))
self.lb_FormFornecedor_14.setText(_translate("ct_FormFornecedor", "CIDADE"))
self.lb_FormFornecedor_15.setText(_translate("ct_FormFornecedor", "ESTADO"))
self.bt_busca_cep.setAccessibleName(_translate("ct_FormFornecedor", "BUSCA CEP"))
self.bt_busca_cnpj.setText(_translate("ct_FormFornecedor", "BUSCAR CNPJ"))
|
from selenium import webdriver
from selenium.webdriver.common.keys import Keys
from selenium.webdriver.chrome.options import Options
import time
import os
import sys
from glob import glob
import re
import json
import random
import shutil
import re
import codecs
dones = [x for x in open("done.txt",'r').read().split("\n") if len(x)]
correct = json.loads(open("bad-corrected.json",'r').read())
# print(correct)
box = sys.argv[1]
print("DOING BOX: ",box)
chrome_options = Options()
chrome_options.add_argument("--headless")
chrome_options.add_argument("safebrowsing-disable-extension-blacklist")
chrome_options.add_argument("--safebrowsing-disable-download-protection")
chrome_options.add_experimental_option("prefs", {'safebrowsing.enabled': 'false'})
filenames = [x.split(".")[0].split("/") for x in str(open("canonical_filename_order.txt",'r').read()).split("\n") if len(x)]
filenames = [x[1] for x in filenames if x[0] == box]
print("NUM STUFF IN BOX: ",len(filenames))
#filenames = [x.split("/")[1].split(".")[0] for x in str(open("canonical_filename_order.txt",'r').read()).split("\n") if len(x)]
def init_driver(path=os.path.join(os.getcwd(),"chromedriver")):
driver = webdriver.Chrome(chrome_options=chrome_options, executable_path=path)
return driver
def parse_info_html(html):
url = html.split('href="')[1].split('">')[0]
creator = html.split('creator-link\">')[1].split('</a>')[0]
date = html.split('Date:</dt><dd class="search-result__value">')[1].split("</dd>")[0]
desc = html.split('">')[2].split('</a>')[0]
return url,desc,creator,date
def parse_accession_number(html):
return html.split('Accession number:')[1].split('object__attributes-value">')[1].split('</dd>')[0]
driver = init_driver();
time.sleep(3);
for idx,fname in enumerate(filenames):
if fname in dones:
print(fname,"is done, skip")
continue
print("now processing ",fname)
entry = ("no description","no date","no accession number","no object id")
try:
driver.get("https://collection.cmoa.org/?q="+fname)
search_results = []
trials = 0
while len(search_results) == 0:
time.sleep(3)
if (trials > 5):
print("give up")
break
print("trial ",trials)
search_results = driver.find_elements_by_class_name("search-result__info")
trials += 1
cands = []
for x in search_results:
html = x.get_attribute('innerHTML')
iurl,desc,creator,date = parse_info_html(html)
print(iurl,desc,creator,date)
if (fname in correct):
if correct[fname].split("/")[-1] != iurl.split("/")[-1]:
print("SKIPPING BECAUSE OF MANUAL LABEL", fname,iurl)
continue
if True or (u"Teenie" in creator):
driver.get("https://collection.cmoa.org"+iurl);
time.sleep(2)
obj = driver.find_elements_by_class_name("object")[1].get_attribute('innerHTML')
# print(obj)
acc = parse_accession_number(obj)
print(acc)
cands.append((desc,date,acc,iurl.split("/")[-1]))
if (len(cands) > 1):
entry = cands[0]
print("WARNING!!!!!! MULIPLE POSSIBLE RESULTS FOUND!!! MANUAL CHECK!!!", fname)
elif (len(cands) == 0):
print("WARNING!!!!!! NO RELAVENT RESULT FOUND!!! MANUAL CHECK!!!", fname)
else:
entry = cands[0]
print("ENTRY:",fname,entry)
except:
print("SHIT!!!! DONT KNOW WHAT WENT WRONG",fname)
print(sys.exc_info())
codecs.open("out/"+box+".txt",'a+',encoding='utf8').write(fname+"\t"+entry[0]+"\t"+entry[1]+"\t"+entry[2]+"\t"+entry[3]+"\n")
|
# -*- coding:utf-8 -*-
# --------------------------------------------------------
# Copyright (C), 2016-2020, lizhe, All rights reserved
# --------------------------------------------------------
# @Name: usb_can_reader.py
# @Author: lizhe
# @Created: 2021/5/1 - 23:45
# --------------------------------------------------------
import re
from typing import List, Tuple
from automotive.core.can.message import Message
from .trace_reader import TraceReader
from automotive.logger.logger import logger
class UsbCanReader(TraceReader):
def read(self, file: str) -> List[Tuple[float, Message]]:
contents = self.__filter_content(file)
logger.debug(f"trace size = {len(contents)}")
return self.__convert(contents)
@staticmethod
def __filter_content(file: str):
with open(file, "r") as f:
lines = f.readlines()
lines.pop(0)
return lines
def __convert(self, contents: list) -> List[Tuple[float, Message]]:
"""
解析content,并生成message对象
00345,="09:35:34.992",0x376549,ch1,接收,0x0406,数据帧,标准帧,0x08,x| 06 01 00 00 00 00 00 00
:param contents:
:return: List<Message>
"""
trace = []
for content in contents:
time = re.search(r"\d{2}:\d{2}:\d{2}\.\d{3}", content).group(0)
data = re.search(r"(\s\w{2}){8}", content).group(0).strip().split(" ")
msg_id = re.search(r"0x\w{4},", content).group(0)[:-1]
logger.debug(f"{time}, {data}, {msg_id}")
message = Message()
message.msg_id = int(msg_id, 16)
message.data = list(map(lambda x: int(x, 16), data))
trace.append((self.__get_time(time), message))
return trace
@staticmethod
def __get_time(hex_time):
splits = hex_time.split(".")
date_time = splits[0].split(":")
hour = date_time[0]
minutes = date_time[1]
seconds = date_time[2]
millisecond = splits[1]
current_time = (int(hour) * 60 * 60 + int(minutes) * 60 + int(seconds)) * 1000 + int(millisecond)
return current_time / 1000
|
from django.urls import path, include
from .admin import urls as admin_urls
app_name = "baserow_premium.api"
urlpatterns = [
path("admin/", include(admin_urls, namespace="admin")),
]
|
from typing import Union, cast
import libcst as cst
import libcst.matchers as m
from .util import CodeMod, runner
"""
libcst based transformer to convert 'for x in generator: yield x' to 'yield from generator'.
"""
__author__ = "Gina Häußge <gina@octoprint.org>"
__license__ = "MIT"
class YieldFromGenerator(CodeMod):
DESCRIPTION: str = "Converts 'for x in generator: yield x' to 'yield from generator'."
def leave_For(
self, original_node: cst.For, updated_node: cst.For
) -> Union[cst.For, cst.SimpleStatementLine]:
if m.matches(
updated_node,
m.For(
target=m.Name(),
body=m.IndentedBlock(
body=[m.SimpleStatementLine(body=[m.Expr(value=m.Yield(m.Name()))])]
),
),
):
target = updated_node.target.value
block = cast(cst.IndentedBlock, updated_node.body)
simple_stmt = cast(cst.SimpleStatementLine, block.body[0])
expr_stmt = cast(cst.Expr, simple_stmt.body[0])
yield_stmt = cast(cst.Yield, expr_stmt.value)
yielded = cast(cst.Name, yield_stmt.value).value
if target == yielded:
self._report_node(original_node)
self.count += 1
updated_node = cst.SimpleStatementLine(
body=[
cst.Expr(value=cst.Yield(value=cst.From(item=updated_node.iter)))
]
)
return updated_node
def main():
runner(YieldFromGenerator)
if __name__ == "__main__":
main()
|
# -*- coding: utf-8 -*-
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import abc
from typing import Awaitable, Callable, Dict, Optional, Sequence, Union
import packaging.version
import pkg_resources
import google.auth # type: ignore
import google.api_core # type: ignore
from google.api_core import exceptions as core_exceptions # type: ignore
from google.api_core import gapic_v1 # type: ignore
from google.api_core import retry as retries # type: ignore
from google.api_core import operations_v1 # type: ignore
from google.auth import credentials as ga_credentials # type: ignore
from google.oauth2 import service_account # type: ignore
from google.cloud.aiplatform_v1beta1.types import pipeline_job
from google.cloud.aiplatform_v1beta1.types import pipeline_job as gca_pipeline_job
from google.cloud.aiplatform_v1beta1.types import pipeline_service
from google.cloud.aiplatform_v1beta1.types import training_pipeline
from google.cloud.aiplatform_v1beta1.types import (
training_pipeline as gca_training_pipeline,
)
from google.longrunning import operations_pb2 # type: ignore
from google.protobuf import empty_pb2 # type: ignore
try:
DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo(
gapic_version=pkg_resources.get_distribution(
"google-cloud-aiplatform",
).version,
)
except pkg_resources.DistributionNotFound:
DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo()
try:
# google.auth.__version__ was added in 1.26.0
_GOOGLE_AUTH_VERSION = google.auth.__version__
except AttributeError:
try: # try pkg_resources if it is available
_GOOGLE_AUTH_VERSION = pkg_resources.get_distribution("google-auth").version
except pkg_resources.DistributionNotFound: # pragma: NO COVER
_GOOGLE_AUTH_VERSION = None
class PipelineServiceTransport(abc.ABC):
"""Abstract transport class for PipelineService."""
AUTH_SCOPES = ("https://www.googleapis.com/auth/cloud-platform",)
DEFAULT_HOST: str = "aiplatform.googleapis.com"
def __init__(
self,
*,
host: str = DEFAULT_HOST,
credentials: ga_credentials.Credentials = None,
credentials_file: Optional[str] = None,
scopes: Optional[Sequence[str]] = None,
quota_project_id: Optional[str] = None,
client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO,
always_use_jwt_access: Optional[bool] = False,
**kwargs,
) -> None:
"""Instantiate the transport.
Args:
host (Optional[str]):
The hostname to connect to.
credentials (Optional[google.auth.credentials.Credentials]): The
authorization credentials to attach to requests. These
credentials identify the application to the service; if none
are specified, the client will attempt to ascertain the
credentials from the environment.
credentials_file (Optional[str]): A file with credentials that can
be loaded with :func:`google.auth.load_credentials_from_file`.
This argument is mutually exclusive with credentials.
scopes (Optional[Sequence[str]]): A list of scopes.
quota_project_id (Optional[str]): An optional project to use for billing
and quota.
client_info (google.api_core.gapic_v1.client_info.ClientInfo):
The client info used to send a user-agent string along with
API requests. If ``None``, then default info will be used.
Generally, you only need to set this if you're developing
your own client library.
always_use_jwt_access (Optional[bool]): Whether self signed JWT should
be used for service account credentials.
"""
# Save the hostname. Default to port 443 (HTTPS) if none is specified.
if ":" not in host:
host += ":443"
self._host = host
scopes_kwargs = self._get_scopes_kwargs(self._host, scopes)
# Save the scopes.
self._scopes = scopes
# If no credentials are provided, then determine the appropriate
# defaults.
if credentials and credentials_file:
raise core_exceptions.DuplicateCredentialArgs(
"'credentials_file' and 'credentials' are mutually exclusive"
)
if credentials_file is not None:
credentials, _ = google.auth.load_credentials_from_file(
credentials_file, **scopes_kwargs, quota_project_id=quota_project_id
)
elif credentials is None:
credentials, _ = google.auth.default(
**scopes_kwargs, quota_project_id=quota_project_id
)
# If the credentials are service account credentials, then always try to use self signed JWT.
if (
always_use_jwt_access
and isinstance(credentials, service_account.Credentials)
and hasattr(service_account.Credentials, "with_always_use_jwt_access")
):
credentials = credentials.with_always_use_jwt_access(True)
# Save the credentials.
self._credentials = credentials
# TODO(busunkim): This method is in the base transport
# to avoid duplicating code across the transport classes. These functions
# should be deleted once the minimum required versions of google-auth is increased.
# TODO: Remove this function once google-auth >= 1.25.0 is required
@classmethod
def _get_scopes_kwargs(
cls, host: str, scopes: Optional[Sequence[str]]
) -> Dict[str, Optional[Sequence[str]]]:
"""Returns scopes kwargs to pass to google-auth methods depending on the google-auth version"""
scopes_kwargs = {}
if _GOOGLE_AUTH_VERSION and (
packaging.version.parse(_GOOGLE_AUTH_VERSION)
>= packaging.version.parse("1.25.0")
):
scopes_kwargs = {"scopes": scopes, "default_scopes": cls.AUTH_SCOPES}
else:
scopes_kwargs = {"scopes": scopes or cls.AUTH_SCOPES}
return scopes_kwargs
def _prep_wrapped_messages(self, client_info):
# Precompute the wrapped methods.
self._wrapped_methods = {
self.create_training_pipeline: gapic_v1.method.wrap_method(
self.create_training_pipeline,
default_timeout=5.0,
client_info=client_info,
),
self.get_training_pipeline: gapic_v1.method.wrap_method(
self.get_training_pipeline,
default_timeout=5.0,
client_info=client_info,
),
self.list_training_pipelines: gapic_v1.method.wrap_method(
self.list_training_pipelines,
default_timeout=5.0,
client_info=client_info,
),
self.delete_training_pipeline: gapic_v1.method.wrap_method(
self.delete_training_pipeline,
default_timeout=5.0,
client_info=client_info,
),
self.cancel_training_pipeline: gapic_v1.method.wrap_method(
self.cancel_training_pipeline,
default_timeout=5.0,
client_info=client_info,
),
self.create_pipeline_job: gapic_v1.method.wrap_method(
self.create_pipeline_job, default_timeout=None, client_info=client_info,
),
self.get_pipeline_job: gapic_v1.method.wrap_method(
self.get_pipeline_job, default_timeout=None, client_info=client_info,
),
self.list_pipeline_jobs: gapic_v1.method.wrap_method(
self.list_pipeline_jobs, default_timeout=None, client_info=client_info,
),
self.delete_pipeline_job: gapic_v1.method.wrap_method(
self.delete_pipeline_job, default_timeout=None, client_info=client_info,
),
self.cancel_pipeline_job: gapic_v1.method.wrap_method(
self.cancel_pipeline_job, default_timeout=None, client_info=client_info,
),
}
@property
def operations_client(self) -> operations_v1.OperationsClient:
"""Return the client designed to process long-running operations."""
raise NotImplementedError()
@property
def create_training_pipeline(
self,
) -> Callable[
[pipeline_service.CreateTrainingPipelineRequest],
Union[
gca_training_pipeline.TrainingPipeline,
Awaitable[gca_training_pipeline.TrainingPipeline],
],
]:
raise NotImplementedError()
@property
def get_training_pipeline(
self,
) -> Callable[
[pipeline_service.GetTrainingPipelineRequest],
Union[
training_pipeline.TrainingPipeline,
Awaitable[training_pipeline.TrainingPipeline],
],
]:
raise NotImplementedError()
@property
def list_training_pipelines(
self,
) -> Callable[
[pipeline_service.ListTrainingPipelinesRequest],
Union[
pipeline_service.ListTrainingPipelinesResponse,
Awaitable[pipeline_service.ListTrainingPipelinesResponse],
],
]:
raise NotImplementedError()
@property
def delete_training_pipeline(
self,
) -> Callable[
[pipeline_service.DeleteTrainingPipelineRequest],
Union[operations_pb2.Operation, Awaitable[operations_pb2.Operation]],
]:
raise NotImplementedError()
@property
def cancel_training_pipeline(
self,
) -> Callable[
[pipeline_service.CancelTrainingPipelineRequest],
Union[empty_pb2.Empty, Awaitable[empty_pb2.Empty]],
]:
raise NotImplementedError()
@property
def create_pipeline_job(
self,
) -> Callable[
[pipeline_service.CreatePipelineJobRequest],
Union[gca_pipeline_job.PipelineJob, Awaitable[gca_pipeline_job.PipelineJob]],
]:
raise NotImplementedError()
@property
def get_pipeline_job(
self,
) -> Callable[
[pipeline_service.GetPipelineJobRequest],
Union[pipeline_job.PipelineJob, Awaitable[pipeline_job.PipelineJob]],
]:
raise NotImplementedError()
@property
def list_pipeline_jobs(
self,
) -> Callable[
[pipeline_service.ListPipelineJobsRequest],
Union[
pipeline_service.ListPipelineJobsResponse,
Awaitable[pipeline_service.ListPipelineJobsResponse],
],
]:
raise NotImplementedError()
@property
def delete_pipeline_job(
self,
) -> Callable[
[pipeline_service.DeletePipelineJobRequest],
Union[operations_pb2.Operation, Awaitable[operations_pb2.Operation]],
]:
raise NotImplementedError()
@property
def cancel_pipeline_job(
self,
) -> Callable[
[pipeline_service.CancelPipelineJobRequest],
Union[empty_pb2.Empty, Awaitable[empty_pb2.Empty]],
]:
raise NotImplementedError()
__all__ = ("PipelineServiceTransport",)
|
# coding: utf-8
#########################################################################
# 网站: <a href="http://www.crazyit.org">疯狂Java联盟</a> #
# author yeeku.H.lee kongyeeku@163.com #
# #
# version 1.0 #
# #
# Copyright (C), 2001-2018, yeeku.H.Lee #
# #
# This program is protected by copyright laws. #
# #
# Program Name: #
# #
# <br>Date: #
#########################################################################
# 导入sys整个模块
import sys
# 使用sys模块名作为前缀来访问模块中的成员
print(sys.argv[0])
|
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for rnn module."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import itertools
import numpy as np
from six.moves import xrange # pylint: disable=redefined-builtin
from tensorflow.contrib import rnn as rnn_lib
from tensorflow.core.protobuf import config_pb2
from tensorflow.python.eager import context
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops as ops_lib
from tensorflow.python.framework import tensor_shape
from tensorflow.python.framework import test_util
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops import gradients_impl
from tensorflow.python.ops import init_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import rnn
from tensorflow.python.ops import rnn_cell
from tensorflow.python.ops import state_ops
from tensorflow.python.ops import tensor_array_ops
from tensorflow.python.ops import variable_scope
from tensorflow.python.ops import variables as variables_lib
from tensorflow.python.platform import test
from tensorflow.python.platform import tf_logging
from tensorflow.python.util import nest
class Plus1RNNCell(rnn_lib.RNNCell):
"""RNN Cell generating (output, new_state) = (input + 1, state + 1)."""
@property
def output_size(self):
return 5
@property
def state_size(self):
return 5
def __call__(self, input_, state, scope=None):
return (input_ + 1, state + 1)
class DummyMultiDimensionalLSTM(rnn_lib.RNNCell):
"""LSTM Cell generating (output, new_state) = (input + 1, state + 1).
The input to this cell may have an arbitrary number of dimensions that follow
the preceding 'Time' and 'Batch' dimensions.
"""
def __init__(self, dims):
"""Initialize the Multi-dimensional LSTM cell.
Args:
dims: tuple that contains the dimensions of the output of the cell,
without including 'Time' or 'Batch' dimensions.
"""
if not isinstance(dims, tuple):
raise TypeError("The dimensions passed to DummyMultiDimensionalLSTM "
"should be a tuple of ints.")
self._dims = dims
self._output_size = tensor_shape.TensorShape(self._dims)
self._state_size = (tensor_shape.TensorShape(self._dims),
tensor_shape.TensorShape(self._dims))
@property
def output_size(self):
return self._output_size
@property
def state_size(self):
return self._state_size
def __call__(self, input_, state, scope=None):
h, c = state
return (input_ + 1, (h + 1, c + 1))
class NestedRNNCell(rnn_lib.RNNCell):
"""RNN Cell generating (output, new_state) = (input + 1, state + 1).
The input, output and state of this cell is a tuple of two tensors.
"""
@property
def output_size(self):
return (5, 5)
@property
def state_size(self):
return (6, 6)
def __call__(self, input_, state, scope=None):
h, c = state
x, y = input_
return ((x + 1, y + 1), (h + 1, c + 1))
class TestStateSaver(object):
def __init__(self, batch_size, state_size):
self._batch_size = batch_size
self._state_size = state_size
self.saved_state = {}
def state(self, name):
if isinstance(self._state_size, dict):
state_size = self._state_size[name]
else:
state_size = self._state_size
if isinstance(state_size, int):
state_size = (state_size,)
elif isinstance(state_size, tuple):
pass
else:
raise TypeError("state_size should either be an int or a tuple")
return array_ops.zeros((self._batch_size,) + state_size)
def save_state(self, name, state):
self.saved_state[name] = state
return array_ops.identity(state)
@property
def batch_size(self):
return self._batch_size
@property
def state_size(self):
return self._state_size
class TestStateSaverWithCounters(TestStateSaver):
"""Class wrapper around TestStateSaver.
A dummy class used for testing of static_state_saving_rnn. It helps test if
save_state and state functions got called same number of time when we
evaluate output of rnn cell and state or either of them separately. It
inherits from the TestStateSaver and adds the counters for calls of functions.
"""
def __init__(self, batch_size, state_size):
super(TestStateSaverWithCounters, self).__init__(batch_size, state_size)
self._num_state_calls = variables_lib.Variable(0)
self._num_save_state_calls = variables_lib.Variable(0)
def state(self, name):
with ops_lib.control_dependencies(
[state_ops.assign_add(self._num_state_calls, 1)]):
return super(TestStateSaverWithCounters, self).state(name)
def save_state(self, name, state):
with ops_lib.control_dependencies([state_ops.assign_add(
self._num_save_state_calls, 1)]):
return super(TestStateSaverWithCounters, self).save_state(name, state)
@property
def num_state_calls(self):
return self._num_state_calls
@property
def num_save_state_calls(self):
return self._num_save_state_calls
class RNNTest(test.TestCase):
def setUp(self):
self._seed = 23489
np.random.seed(self._seed)
def testInvalidSequenceLengthShape(self):
cell = Plus1RNNCell()
inputs = [array_ops.placeholder(dtypes.float32, shape=(3, 4))]
with self.assertRaisesRegexp(ValueError, "must be a vector"):
rnn.static_rnn(cell, inputs, dtype=dtypes.float32, sequence_length=4)
def testRNN(self):
cell = Plus1RNNCell()
batch_size = 2
input_size = 5
max_length = 8 # unrolled up to this length
inputs = max_length * [
array_ops.placeholder(dtypes.float32, shape=(batch_size, input_size))
]
outputs, state = rnn.static_rnn(cell, inputs, dtype=dtypes.float32)
self.assertEqual(len(outputs), len(inputs))
for out, inp in zip(outputs, inputs):
self.assertEqual(out.get_shape(), inp.get_shape())
self.assertEqual(out.dtype, inp.dtype)
with self.test_session(use_gpu=True) as sess:
input_value = np.random.randn(batch_size, input_size)
values = sess.run(outputs + [state], feed_dict={inputs[0]: input_value})
# Outputs
for v in values[:-1]:
self.assertAllClose(v, input_value + 1.0)
# Final state
self.assertAllClose(values[-1],
max_length * np.ones(
(batch_size, input_size), dtype=np.float32))
def testDropout(self):
cell = Plus1RNNCell()
full_dropout_cell = rnn_cell.DropoutWrapper(
cell, input_keep_prob=1e-12, seed=0)
(name, dep), = full_dropout_cell._checkpoint_dependencies
self.assertIs(dep, cell)
self.assertEqual("cell", name)
batch_size = 2
input_size = 5
max_length = 8
inputs = max_length * [
array_ops.placeholder(dtypes.float32, shape=(batch_size, input_size))
]
with variable_scope.variable_scope("share_scope"):
outputs, state = rnn.static_rnn(cell, inputs, dtype=dtypes.float32)
with variable_scope.variable_scope("drop_scope"):
dropped_outputs, _ = rnn.static_rnn(
full_dropout_cell, inputs, dtype=dtypes.float32)
self.assertEqual(len(outputs), len(inputs))
for out, inp in zip(outputs, inputs):
self.assertEqual(out.get_shape().as_list(), inp.get_shape().as_list())
self.assertEqual(out.dtype, inp.dtype)
with self.test_session(use_gpu=True) as sess:
input_value = np.random.randn(batch_size, input_size)
values = sess.run(outputs + [state], feed_dict={inputs[0]: input_value})
full_dropout_values = sess.run(
dropped_outputs, feed_dict={
inputs[0]: input_value
})
for v in values[:-1]:
self.assertAllClose(v, input_value + 1.0)
for d_v in full_dropout_values[:-1]: # Add 1.0 to dropped_out (all zeros)
self.assertAllClose(d_v, np.ones_like(input_value))
def testDynamicCalculation(self):
cell = Plus1RNNCell()
sequence_length = array_ops.placeholder(dtypes.int64)
batch_size = 2
input_size = 5
max_length = 8
inputs = max_length * [
array_ops.placeholder(dtypes.float32, shape=(batch_size, input_size))
]
with variable_scope.variable_scope("drop_scope"):
dynamic_outputs, dynamic_state = rnn.static_rnn(
cell, inputs, sequence_length=sequence_length, dtype=dtypes.float32)
self.assertEqual(len(dynamic_outputs), len(inputs))
with self.test_session(use_gpu=True) as sess:
input_value = np.random.randn(batch_size, input_size)
dynamic_values = sess.run(
dynamic_outputs,
feed_dict={
inputs[0]: input_value,
sequence_length: [2, 3]
})
dynamic_state_value = sess.run(
[dynamic_state],
feed_dict={
inputs[0]: input_value,
sequence_length: [2, 3]
})
# outputs are fully calculated for t = 0, 1
for v in dynamic_values[:2]:
self.assertAllClose(v, input_value + 1.0)
# outputs at t = 2 are zero for entry 0, calculated for entry 1
self.assertAllClose(dynamic_values[2],
np.vstack((np.zeros((input_size)),
1.0 + input_value[1, :])))
# outputs at t = 3+ are zero
for v in dynamic_values[3:]:
self.assertAllEqual(v, np.zeros_like(input_value))
# the final states are:
# entry 0: the values from the calculation at t=1
# entry 1: the values from the calculation at t=2
self.assertAllEqual(dynamic_state_value[0],
np.vstack((1.0 * (1 + 1) * np.ones((input_size)),
1.0 * (2 + 1) * np.ones((input_size)))))
def _testScope(self, factory, prefix="prefix", use_outer_scope=True):
with self.test_session(use_gpu=True, graph=ops_lib.Graph()):
if use_outer_scope:
with variable_scope.variable_scope(prefix) as scope:
factory(scope)
else:
factory(prefix)
# check that all the variables names starts
# with the proper scope.
variables_lib.global_variables_initializer()
all_vars = variables_lib.global_variables()
prefix = prefix or "rnn"
scope_vars = [v for v in all_vars if v.name.startswith(prefix + "/")]
tf_logging.info("RNN with scope: %s (%s)" %
(prefix, "scope" if use_outer_scope else "str"))
for v in scope_vars:
tf_logging.info(v.name)
self.assertEqual(len(scope_vars), len(all_vars))
def testScope(self):
def factory(scope):
cell = Plus1RNNCell()
batch_size = 2
input_size = 5
max_length = 8 # unrolled up to this length
inputs = max_length * [
array_ops.placeholder(dtypes.float32, shape=(batch_size, input_size))
]
return rnn.static_rnn(cell, inputs, dtype=dtypes.float32, scope=scope)
self._testScope(factory, use_outer_scope=True)
self._testScope(factory, use_outer_scope=False)
self._testScope(factory, prefix=None, use_outer_scope=False)
class LSTMTest(test.TestCase):
def setUp(self):
self._seed = 23489
np.random.seed(self._seed)
def testDType(self):
# Test case for GitHub issue 16228
# Not passing dtype in constructor results in default float32
lstm = rnn_cell.LSTMCell(10)
input_tensor = array_ops.ones([10, 50])
lstm.build(input_tensor.get_shape())
self.assertEqual(lstm._bias.dtype, dtypes.float32_ref)
# Explicitly pass dtype in constructor
for dtype in [dtypes.float16, dtypes.float32, dtypes.float64]:
lstm = rnn_cell.LSTMCell(10, dtype=dtype)
input_tensor = array_ops.ones([10, 50])
lstm.build(input_tensor.get_shape())
self.assertEqual(lstm._bias.dtype, dtype._as_ref)
def testNoProjNoSharding(self):
num_units = 3
input_size = 5
batch_size = 2
max_length = 8
with self.test_session(use_gpu=True, graph=ops_lib.Graph()) as sess:
initializer = init_ops.random_uniform_initializer(
-0.01, 0.01, seed=self._seed)
cell = rnn_cell.LSTMCell(
num_units, initializer=initializer, state_is_tuple=False)
inputs = max_length * [
array_ops.placeholder(dtypes.float32, shape=(batch_size, input_size))
]
outputs, _ = rnn.static_rnn(cell, inputs, dtype=dtypes.float32)
self.assertEqual(len(outputs), len(inputs))
for out in outputs:
self.assertEqual(out.get_shape().as_list(), [batch_size, num_units])
variables_lib.global_variables_initializer().run()
input_value = np.random.randn(batch_size, input_size)
sess.run(outputs, feed_dict={inputs[0]: input_value})
def testCellClipping(self):
num_units = 3
input_size = 5
batch_size = 2
max_length = 8
with self.test_session(use_gpu=True, graph=ops_lib.Graph()) as sess:
initializer = init_ops.random_uniform_initializer(
-0.01, 0.01, seed=self._seed)
cell = rnn_cell.LSTMCell(
num_units,
use_peepholes=True,
cell_clip=0.0,
initializer=initializer,
state_is_tuple=False)
inputs = max_length * [
array_ops.placeholder(dtypes.float32, shape=(batch_size, input_size))
]
outputs, _ = rnn.static_rnn(cell, inputs, dtype=dtypes.float32)
self.assertEqual(len(outputs), len(inputs))
for out in outputs:
self.assertEqual(out.get_shape().as_list(), [batch_size, num_units])
variables_lib.global_variables_initializer().run()
input_value = np.random.randn(batch_size, input_size)
values = sess.run(outputs, feed_dict={inputs[0]: input_value})
for value in values:
# if cell c is clipped to 0, tanh(c) = 0 => m==0
self.assertAllEqual(value, np.zeros((batch_size, num_units)))
def testNoProjNoShardingSimpleStateSaver(self):
num_units = 3
input_size = 5
batch_size = 2
max_length = 8
with self.test_session(use_gpu=True, graph=ops_lib.Graph()) as sess:
initializer = init_ops.random_uniform_initializer(
-0.01, 0.01, seed=self._seed)
state_saver = TestStateSaver(batch_size, 2 * num_units)
cell = rnn_cell.LSTMCell(
num_units,
use_peepholes=False,
initializer=initializer,
state_is_tuple=False)
inputs = max_length * [
array_ops.placeholder(dtypes.float32, shape=(batch_size, input_size))
]
with variable_scope.variable_scope("share_scope"):
outputs, state = rnn.static_state_saving_rnn(
cell, inputs, state_saver=state_saver, state_name="save_lstm")
self.assertEqual(len(outputs), len(inputs))
for out in outputs:
self.assertEqual(out.get_shape().as_list(), [batch_size, num_units])
variables_lib.global_variables_initializer().run()
input_value = np.random.randn(batch_size, input_size)
(last_state_value, saved_state_value) = sess.run(
[state, state_saver.saved_state["save_lstm"]],
feed_dict={
inputs[0]: input_value
})
self.assertAllEqual(last_state_value, saved_state_value)
def testNoProjNoShardingTupleStateSaver(self):
num_units = 3
input_size = 5
batch_size = 2
max_length = 8
with self.test_session(graph=ops_lib.Graph()) as sess:
initializer = init_ops.random_uniform_initializer(
-0.01, 0.01, seed=self._seed)
state_saver = TestStateSaver(batch_size, num_units)
cell = rnn_cell.LSTMCell(
num_units,
use_peepholes=False,
initializer=initializer,
state_is_tuple=True)
inputs = max_length * [
array_ops.placeholder(dtypes.float32, shape=(batch_size, input_size))
]
with variable_scope.variable_scope("share_scope"):
outputs, state = rnn.static_state_saving_rnn(
cell, inputs, state_saver=state_saver, state_name=("c", "m"))
self.assertEqual(len(outputs), len(inputs))
for out in outputs:
self.assertEqual(out.get_shape().as_list(), [batch_size, num_units])
variables_lib.global_variables_initializer().run()
input_value = np.random.randn(batch_size, input_size)
last_and_saved_states = sess.run(
state + (state_saver.saved_state["c"], state_saver.saved_state["m"]),
feed_dict={
inputs[0]: input_value
})
self.assertEqual(4, len(last_and_saved_states))
self.assertAllEqual(last_and_saved_states[:2], last_and_saved_states[2:])
def testNoProjNoShardingNestedTupleStateSaver(self):
num_units = 3
input_size = 5
batch_size = 2
max_length = 8
with self.test_session(graph=ops_lib.Graph()) as sess:
initializer = init_ops.random_uniform_initializer(
-0.01, 0.01, seed=self._seed)
state_saver = TestStateSaver(
batch_size, {
"c0": num_units,
"m0": num_units,
"c1": num_units + 1,
"m1": num_units + 1,
"c2": num_units + 2,
"m2": num_units + 2,
"c3": num_units + 3,
"m3": num_units + 3
})
def _cell(i):
return rnn_cell.LSTMCell(
num_units + i,
use_peepholes=False,
initializer=initializer,
state_is_tuple=True)
# This creates a state tuple which has 4 sub-tuples of length 2 each.
cell = rnn_cell.MultiRNNCell(
[_cell(i) for i in range(4)], state_is_tuple=True)
self.assertEqual(len(cell.state_size), 4)
for i in range(4):
self.assertEqual(len(cell.state_size[i]), 2)
inputs = max_length * [
array_ops.placeholder(dtypes.float32, shape=(batch_size, input_size))
]
state_names = (("c0", "m0"), ("c1", "m1"), ("c2", "m2"), ("c3", "m3"))
with variable_scope.variable_scope("share_scope"):
outputs, state = rnn.static_state_saving_rnn(
cell, inputs, state_saver=state_saver, state_name=state_names)
self.assertEqual(len(outputs), len(inputs))
# Final output comes from _cell(3) which has state size num_units + 3
for out in outputs:
self.assertEqual(out.get_shape().as_list(), [batch_size, num_units + 3])
variables_lib.global_variables_initializer().run()
input_value = np.random.randn(batch_size, input_size)
last_states = sess.run(
list(nest.flatten(state)), feed_dict={
inputs[0]: input_value
})
saved_states = sess.run(
list(state_saver.saved_state.values()),
feed_dict={
inputs[0]: input_value
})
self.assertEqual(8, len(last_states))
self.assertEqual(8, len(saved_states))
flat_state_names = nest.flatten(state_names)
named_saved_states = dict(
zip(state_saver.saved_state.keys(), saved_states))
for i in range(8):
self.assertAllEqual(last_states[i],
named_saved_states[flat_state_names[i]])
def testProjNoSharding(self):
num_units = 3
input_size = 5
batch_size = 2
num_proj = 4
max_length = 8
with self.test_session(use_gpu=True, graph=ops_lib.Graph()) as sess:
initializer = init_ops.random_uniform_initializer(
-0.01, 0.01, seed=self._seed)
inputs = max_length * [
array_ops.placeholder(dtypes.float32, shape=(None, input_size))
]
cell = rnn_cell.LSTMCell(
num_units,
use_peepholes=True,
num_proj=num_proj,
initializer=initializer,
state_is_tuple=False)
outputs, _ = rnn.static_rnn(cell, inputs, dtype=dtypes.float32)
self.assertEqual(len(outputs), len(inputs))
variables_lib.global_variables_initializer().run()
input_value = np.random.randn(batch_size, input_size)
sess.run(outputs, feed_dict={inputs[0]: input_value})
def _testStateTupleWithProjAndSequenceLength(self):
num_units = 3
input_size = 5
batch_size = 2
num_proj = 4
max_length = 8
sequence_length = [4, 6]
with self.test_session(graph=ops_lib.Graph()) as sess:
initializer = init_ops.random_uniform_initializer(
-0.01, 0.01, seed=self._seed)
inputs = max_length * [
array_ops.placeholder(dtypes.float32, shape=(None, input_size))
]
cell_notuple = rnn_cell.LSTMCell(
num_units,
use_peepholes=True,
num_proj=num_proj,
initializer=initializer,
state_is_tuple=False)
cell_tuple = rnn_cell.LSTMCell(
num_units,
use_peepholes=True,
num_proj=num_proj,
initializer=initializer,
state_is_tuple=True)
with variable_scope.variable_scope("root") as scope:
outputs_notuple, state_notuple = rnn.static_rnn(
cell_notuple,
inputs,
dtype=dtypes.float32,
sequence_length=sequence_length,
scope=scope)
scope.reuse_variables()
# TODO(ebrevdo): For this test, we ensure values are identical and
# therefore the weights here are tied. In the future, we may consider
# making the state_is_tuple property mutable so we can avoid
# having to do this - especially if users ever need to reuse
# the parameters from different RNNCell instances. Right now,
# this seems an unrealistic use case except for testing.
cell_tuple._scope = cell_notuple._scope # pylint: disable=protected-access
outputs_tuple, state_tuple = rnn.static_rnn(
cell_tuple,
inputs,
dtype=dtypes.float32,
sequence_length=sequence_length,
scope=scope)
self.assertEqual(len(outputs_notuple), len(inputs))
self.assertEqual(len(outputs_tuple), len(inputs))
self.assertTrue(isinstance(state_tuple, tuple))
self.assertTrue(isinstance(state_notuple, ops_lib.Tensor))
variables_lib.global_variables_initializer().run()
input_value = np.random.randn(batch_size, input_size)
outputs_notuple_v = sess.run(
outputs_notuple, feed_dict={
inputs[0]: input_value
})
outputs_tuple_v = sess.run(
outputs_tuple, feed_dict={
inputs[0]: input_value
})
self.assertAllEqual(outputs_notuple_v, outputs_tuple_v)
(state_notuple_v,) = sess.run(
(state_notuple,), feed_dict={
inputs[0]: input_value
})
state_tuple_v = sess.run(state_tuple, feed_dict={inputs[0]: input_value})
self.assertAllEqual(state_notuple_v, np.hstack(state_tuple_v))
def testProjSharding(self):
num_units = 3
input_size = 5
batch_size = 2
num_proj = 4
num_proj_shards = 3
num_unit_shards = 2
max_length = 8
with self.test_session(use_gpu=True, graph=ops_lib.Graph()) as sess:
initializer = init_ops.random_uniform_initializer(
-0.01, 0.01, seed=self._seed)
inputs = max_length * [
array_ops.placeholder(dtypes.float32, shape=(None, input_size))
]
cell = rnn_cell.LSTMCell(
num_units,
use_peepholes=True,
num_proj=num_proj,
num_unit_shards=num_unit_shards,
num_proj_shards=num_proj_shards,
initializer=initializer,
state_is_tuple=False)
outputs, _ = rnn.static_rnn(cell, inputs, dtype=dtypes.float32)
self.assertEqual(len(outputs), len(inputs))
variables_lib.global_variables_initializer().run()
input_value = np.random.randn(batch_size, input_size)
sess.run(outputs, feed_dict={inputs[0]: input_value})
def testDoubleInput(self):
num_units = 3
input_size = 5
batch_size = 2
num_proj = 4
num_proj_shards = 3
num_unit_shards = 2
max_length = 8
with self.test_session(use_gpu=True, graph=ops_lib.Graph()) as sess:
initializer = init_ops.random_uniform_initializer(-1, 1, seed=self._seed)
inputs = max_length * [
array_ops.placeholder(dtypes.float64, shape=(None, input_size))
]
cell = rnn_cell.LSTMCell(
num_units,
use_peepholes=True,
num_proj=num_proj,
num_unit_shards=num_unit_shards,
num_proj_shards=num_proj_shards,
initializer=initializer,
state_is_tuple=False)
outputs, _ = rnn.static_rnn(
cell,
inputs,
initial_state=cell.zero_state(batch_size, dtypes.float64))
self.assertEqual(len(outputs), len(inputs))
variables_lib.global_variables_initializer().run()
input_value = np.asarray(
np.random.randn(batch_size, input_size), dtype=np.float64)
values = sess.run(outputs, feed_dict={inputs[0]: input_value})
self.assertEqual(values[0].dtype, input_value.dtype)
def testShardNoShardEquivalentOutput(self):
num_units = 3
input_size = 5
batch_size = 2
num_proj = 4
num_proj_shards = 3
num_unit_shards = 2
max_length = 8
with self.test_session(use_gpu=True, graph=ops_lib.Graph()) as sess:
inputs = max_length * [
array_ops.placeholder(dtypes.float32, shape=(None, input_size))
]
initializer = init_ops.constant_initializer(0.001)
cell_noshard = rnn_cell.LSTMCell(
num_units,
num_proj=num_proj,
use_peepholes=True,
initializer=initializer,
num_unit_shards=num_unit_shards,
num_proj_shards=num_proj_shards,
state_is_tuple=False)
cell_shard = rnn_cell.LSTMCell(
num_units,
use_peepholes=True,
initializer=initializer,
num_proj=num_proj,
state_is_tuple=False)
with variable_scope.variable_scope("noshard_scope"):
outputs_noshard, state_noshard = rnn.static_rnn(
cell_noshard, inputs, dtype=dtypes.float32)
with variable_scope.variable_scope("shard_scope"):
outputs_shard, state_shard = rnn.static_rnn(
cell_shard, inputs, dtype=dtypes.float32)
self.assertEqual(len(outputs_noshard), len(inputs))
self.assertEqual(len(outputs_noshard), len(outputs_shard))
variables_lib.global_variables_initializer().run()
input_value = np.random.randn(batch_size, input_size)
feeds = dict((x, input_value) for x in inputs)
values_noshard = sess.run(outputs_noshard, feed_dict=feeds)
values_shard = sess.run(outputs_shard, feed_dict=feeds)
state_values_noshard = sess.run([state_noshard], feed_dict=feeds)
state_values_shard = sess.run([state_shard], feed_dict=feeds)
self.assertEqual(len(values_noshard), len(values_shard))
self.assertEqual(len(state_values_noshard), len(state_values_shard))
for (v_noshard, v_shard) in zip(values_noshard, values_shard):
self.assertAllClose(v_noshard, v_shard, atol=1e-3)
for (s_noshard, s_shard) in zip(state_values_noshard, state_values_shard):
self.assertAllClose(s_noshard, s_shard, atol=1e-3)
def testDoubleInputWithDropoutAndDynamicCalculation(self):
"""Smoke test for using LSTM with doubles, dropout, dynamic calculation."""
num_units = 3
input_size = 5
batch_size = 2
num_proj = 4
num_proj_shards = 3
num_unit_shards = 2
max_length = 8
with self.test_session(use_gpu=True, graph=ops_lib.Graph()) as sess:
sequence_length = array_ops.placeholder(dtypes.int64)
initializer = init_ops.random_uniform_initializer(
-0.01, 0.01, seed=self._seed)
inputs = max_length * [
array_ops.placeholder(dtypes.float64, shape=(None, input_size))
]
cell = rnn_cell.LSTMCell(
num_units,
use_peepholes=True,
num_proj=num_proj,
num_unit_shards=num_unit_shards,
num_proj_shards=num_proj_shards,
initializer=initializer,
state_is_tuple=False)
dropout_cell = rnn_cell.DropoutWrapper(cell, 0.5, seed=0)
outputs, state = rnn.static_rnn(
dropout_cell,
inputs,
sequence_length=sequence_length,
initial_state=cell.zero_state(batch_size, dtypes.float64))
self.assertEqual(len(outputs), len(inputs))
variables_lib.global_variables_initializer().run(feed_dict={
sequence_length: [2, 3]
})
input_value = np.asarray(
np.random.randn(batch_size, input_size), dtype=np.float64)
values = sess.run(
outputs, feed_dict={
inputs[0]: input_value,
sequence_length: [2, 3]
})
state_value = sess.run(
[state], feed_dict={
inputs[0]: input_value,
sequence_length: [2, 3]
})
self.assertEqual(values[0].dtype, input_value.dtype)
self.assertEqual(state_value[0].dtype, input_value.dtype)
def testSharingWeightsWithReuse(self):
num_units = 3
input_size = 5
batch_size = 2
num_proj = 4
max_length = 8
with self.test_session(graph=ops_lib.Graph()) as sess:
initializer = init_ops.random_uniform_initializer(-1, 1, seed=self._seed)
initializer_d = init_ops.random_uniform_initializer(
-1, 1, seed=self._seed + 1)
inputs = max_length * [
array_ops.placeholder(dtypes.float32, shape=(None, input_size))
]
cell = rnn_cell.LSTMCell(
num_units,
use_peepholes=True,
num_proj=num_proj,
initializer=initializer,
state_is_tuple=False)
cell_d = rnn_cell.LSTMCell(
num_units,
use_peepholes=True,
num_proj=num_proj,
initializer=initializer_d,
state_is_tuple=False)
with variable_scope.variable_scope("share_scope"):
outputs0, _ = rnn.static_rnn(cell, inputs, dtype=dtypes.float32)
with variable_scope.variable_scope("share_scope", reuse=True):
outputs1, _ = rnn.static_rnn(cell, inputs, dtype=dtypes.float32)
with variable_scope.variable_scope("diff_scope"):
outputs2, _ = rnn.static_rnn(cell_d, inputs, dtype=dtypes.float32)
variables_lib.global_variables_initializer().run()
input_value = np.random.randn(batch_size, input_size)
output_values = sess.run(
outputs0 + outputs1 + outputs2, feed_dict={
inputs[0]: input_value
})
outputs0_values = output_values[:max_length]
outputs1_values = output_values[max_length:2 * max_length]
outputs2_values = output_values[2 * max_length:]
self.assertEqual(len(outputs0_values), len(outputs1_values))
self.assertEqual(len(outputs0_values), len(outputs2_values))
for o1, o2, o3 in zip(outputs0_values, outputs1_values, outputs2_values):
# Same weights used by both RNNs so outputs should be the same.
self.assertAllEqual(o1, o2)
# Different weights used so outputs should be different.
self.assertTrue(np.linalg.norm(o1 - o3) > 1e-6)
def testSharingWeightsWithDifferentNamescope(self):
num_units = 3
input_size = 5
batch_size = 2
num_proj = 4
max_length = 8
with self.test_session(graph=ops_lib.Graph()) as sess:
initializer = init_ops.random_uniform_initializer(-1, 1, seed=self._seed)
inputs = max_length * [
array_ops.placeholder(dtypes.float32, shape=(None, input_size))
]
cell = rnn_cell.LSTMCell(
num_units,
use_peepholes=True,
num_proj=num_proj,
initializer=initializer,
state_is_tuple=False)
with ops_lib.name_scope("scope0"):
with variable_scope.variable_scope("share_scope"):
outputs0, _ = rnn.static_rnn(cell, inputs, dtype=dtypes.float32)
with ops_lib.name_scope("scope1"):
with variable_scope.variable_scope("share_scope", reuse=True):
outputs1, _ = rnn.static_rnn(cell, inputs, dtype=dtypes.float32)
variables_lib.global_variables_initializer().run()
input_value = np.random.randn(batch_size, input_size)
output_values = sess.run(
outputs0 + outputs1, feed_dict={
inputs[0]: input_value
})
outputs0_values = output_values[:max_length]
outputs1_values = output_values[max_length:]
self.assertEqual(len(outputs0_values), len(outputs1_values))
for out0, out1 in zip(outputs0_values, outputs1_values):
self.assertAllEqual(out0, out1)
def testDynamicRNNAllowsUnknownTimeDimension(self):
inputs = array_ops.placeholder(dtypes.float32, shape=[1, None, 20])
cell = rnn_cell.GRUCell(30)
# Smoke test, this should not raise an error
rnn.dynamic_rnn(cell, inputs, dtype=dtypes.float32)
@test_util.run_in_graph_and_eager_modes
def testDynamicRNNWithTupleStates(self):
num_units = 3
input_size = 5
batch_size = 2
num_proj = 4
max_length = 8
sequence_length = [4, 6]
in_graph_mode = not context.executing_eagerly()
with self.test_session(graph=ops_lib.Graph()) as sess:
initializer = init_ops.random_uniform_initializer(
-0.01, 0.01, seed=self._seed)
if in_graph_mode:
inputs = max_length * [
array_ops.placeholder(dtypes.float32, shape=(None, input_size))
]
else:
inputs = max_length * [
constant_op.constant(
np.random.randn(batch_size, input_size).astype(np.float32))
]
inputs_c = array_ops.stack(inputs)
cell = rnn_cell.LSTMCell(
num_units,
use_peepholes=True,
num_proj=num_proj,
initializer=initializer,
state_is_tuple=True)
with variable_scope.variable_scope("root") as scope:
outputs_static, state_static = rnn.static_rnn(
cell,
inputs,
dtype=dtypes.float32,
sequence_length=sequence_length,
scope=scope)
scope.reuse_variables()
outputs_dynamic, state_dynamic = rnn.dynamic_rnn(
cell,
inputs_c,
dtype=dtypes.float32,
time_major=True,
sequence_length=sequence_length,
scope=scope)
self.assertTrue(isinstance(state_static, rnn_cell.LSTMStateTuple))
self.assertTrue(isinstance(state_dynamic, rnn_cell.LSTMStateTuple))
self.assertEqual(state_static[0], state_static.c)
self.assertEqual(state_static[1], state_static.h)
self.assertEqual(state_dynamic[0], state_dynamic.c)
self.assertEqual(state_dynamic[1], state_dynamic.h)
if in_graph_mode:
variables_lib.global_variables_initializer().run()
input_value = np.random.randn(batch_size, input_size)
outputs_static = sess.run(
outputs_static, feed_dict={
inputs[0]: input_value
})
outputs_dynamic = sess.run(
outputs_dynamic, feed_dict={
inputs[0]: input_value
})
state_static = sess.run(
state_static, feed_dict={
inputs[0]: input_value
})
state_dynamic = sess.run(
state_dynamic, feed_dict={
inputs[0]: input_value
})
if in_graph_mode:
self.assertAllEqual(outputs_static, outputs_dynamic)
else:
self.assertAllEqual(array_ops.stack(outputs_static), outputs_dynamic)
self.assertAllEqual(np.hstack(state_static), np.hstack(state_dynamic))
@test_util.run_in_graph_and_eager_modes
def testDynamicRNNWithNestedTupleStates(self):
num_units = 3
input_size = 5
batch_size = 2
num_proj = 4
max_length = 8
sequence_length = [4, 6]
in_graph_mode = not context.executing_eagerly()
with self.test_session(graph=ops_lib.Graph()) as sess:
initializer = init_ops.random_uniform_initializer(
-0.01, 0.01, seed=self._seed)
if in_graph_mode:
inputs = max_length * [
array_ops.placeholder(dtypes.float32, shape=(None, input_size))
]
else:
inputs = max_length * [
constant_op.constant(
np.random.randn(batch_size, input_size).astype(np.float32))
]
inputs_c = array_ops.stack(inputs)
def _cell(i):
return rnn_cell.LSTMCell(
num_units + i,
use_peepholes=True,
num_proj=num_proj + i,
initializer=initializer,
state_is_tuple=True)
# This creates a state tuple which has 4 sub-tuples of length 2 each.
cell = rnn_cell.MultiRNNCell(
[_cell(i) for i in range(4)], state_is_tuple=True)
self.assertEqual(len(cell.state_size), 4)
for i in range(4):
self.assertEqual(len(cell.state_size[i]), 2)
test_zero = cell.zero_state(1, dtypes.float32)
self.assertEqual(len(test_zero), 4)
for i in range(4):
self.assertEqual(test_zero[i][0].get_shape()[1], cell.state_size[i][0])
self.assertEqual(test_zero[i][1].get_shape()[1], cell.state_size[i][1])
with variable_scope.variable_scope("root") as scope:
outputs_static, state_static = rnn.static_rnn(
cell,
inputs,
dtype=dtypes.float32,
sequence_length=sequence_length,
scope=scope)
scope.reuse_variables()
outputs_dynamic, state_dynamic = rnn.dynamic_rnn(
cell,
inputs_c,
dtype=dtypes.float32,
time_major=True,
sequence_length=sequence_length,
scope=scope)
if in_graph_mode:
input_value = np.random.randn(batch_size, input_size)
variables_lib.global_variables_initializer().run()
outputs_static = sess.run(
outputs_static, feed_dict={
inputs[0]: input_value
})
outputs_dynamic = sess.run(
outputs_dynamic, feed_dict={
inputs[0]: input_value
})
state_static = sess.run(
nest.flatten(state_static), feed_dict={
inputs[0]: input_value
})
state_dynamic = sess.run(
nest.flatten(state_dynamic), feed_dict={
inputs[0]: input_value
})
if in_graph_mode:
self.assertAllEqual(outputs_static, outputs_dynamic)
else:
self.assertAllEqual(array_ops.stack(outputs_static), outputs_dynamic)
state_static = nest.flatten(state_static)
state_dynamic = nest.flatten(state_dynamic)
self.assertAllEqual(np.hstack(state_static), np.hstack(state_dynamic))
def _testDynamicEquivalentToStaticRNN(self, use_sequence_length):
time_steps = 8
num_units = 3
num_proj = 4
input_size = 5
batch_size = 2
input_values = np.random.randn(time_steps, batch_size, input_size).astype(
np.float32)
if use_sequence_length:
sequence_length = np.random.randint(0, time_steps, size=batch_size)
else:
sequence_length = None
in_graph_mode = not context.executing_eagerly()
# TODO(b/68017812): Eager ignores operation seeds, so we need to create a
# single cell and reuse it across the static and dynamic RNNs. Remove this
# special case once is fixed.
if not in_graph_mode:
initializer = init_ops.random_uniform_initializer(
-0.01, 0.01, seed=self._seed)
cell = rnn_cell.LSTMCell(
num_units,
use_peepholes=True,
initializer=initializer,
num_proj=num_proj,
state_is_tuple=False)
########### Step 1: Run static graph and generate readouts
with self.test_session(use_gpu=True, graph=ops_lib.Graph()) as sess:
if in_graph_mode:
concat_inputs = array_ops.placeholder(
dtypes.float32, shape=(time_steps, batch_size, input_size))
else:
concat_inputs = constant_op.constant(input_values)
inputs = array_ops.unstack(concat_inputs)
initializer = init_ops.random_uniform_initializer(
-0.01, 0.01, seed=self._seed)
# TODO(akshayka): Remove special case once b/68017812 is fixed.
if in_graph_mode:
cell = rnn_cell.LSTMCell(
num_units,
use_peepholes=True,
initializer=initializer,
num_proj=num_proj,
state_is_tuple=False)
with variable_scope.variable_scope("dynamic_scope"):
outputs_static, state_static = rnn.static_rnn(
cell, inputs, sequence_length=sequence_length, dtype=dtypes.float32)
if in_graph_mode:
# Generate gradients and run sessions to obtain outputs
feeds = {concat_inputs: input_values}
# Initialize
variables_lib.global_variables_initializer().run(feed_dict=feeds)
# Generate gradients of sum of outputs w.r.t. inputs
static_gradients = gradients_impl.gradients(
outputs_static + [state_static], [concat_inputs])
# Generate gradients of individual outputs w.r.t. inputs
static_individual_gradients = nest.flatten([
gradients_impl.gradients(y, [concat_inputs])
for y in [outputs_static[0], outputs_static[-1], state_static]
])
# Generate gradients of individual variables w.r.t. inputs
trainable_variables = ops_lib.get_collection(
ops_lib.GraphKeys.TRAINABLE_VARIABLES)
assert len(trainable_variables) > 1, (
"Count of trainable variables: %d" % len(trainable_variables))
# pylint: disable=bad-builtin
static_individual_variable_gradients = nest.flatten([
gradients_impl.gradients(y, trainable_variables)
for y in [outputs_static[0], outputs_static[-1], state_static]
])
# Test forward pass
values_static = sess.run(outputs_static, feed_dict=feeds)
(state_value_static,) = sess.run((state_static,), feed_dict=feeds)
# Test gradients to inputs and variables w.r.t. outputs & final state
static_grad_values = sess.run(static_gradients, feed_dict=feeds)
static_individual_grad_values = sess.run(
static_individual_gradients, feed_dict=feeds)
static_individual_var_grad_values = sess.run(
static_individual_variable_gradients, feed_dict=feeds)
########## Step 2: Run dynamic graph and generate readouts
with self.test_session(use_gpu=True, graph=ops_lib.Graph()) as sess:
if in_graph_mode:
concat_inputs = array_ops.placeholder(
dtypes.float32, shape=(time_steps, batch_size, input_size))
else:
concat_inputs = constant_op.constant(input_values)
initializer = init_ops.random_uniform_initializer(
-0.01, 0.01, seed=self._seed)
# TODO(akshayka): Remove this special case once b/68017812 is
# fixed.
if in_graph_mode:
cell = rnn_cell.LSTMCell(
num_units,
use_peepholes=True,
initializer=initializer,
num_proj=num_proj,
state_is_tuple=False)
with variable_scope.variable_scope("dynamic_scope"):
outputs_dynamic, state_dynamic = rnn.dynamic_rnn(
cell,
inputs=concat_inputs,
sequence_length=sequence_length,
time_major=True,
dtype=dtypes.float32)
split_outputs_dynamic = array_ops.unstack(outputs_dynamic, time_steps)
if in_graph_mode:
feeds = {concat_inputs: input_values}
# Initialize
variables_lib.global_variables_initializer().run(feed_dict=feeds)
# Generate gradients of sum of outputs w.r.t. inputs
dynamic_gradients = gradients_impl.gradients(
split_outputs_dynamic + [state_dynamic], [concat_inputs])
# Generate gradients of several individual outputs w.r.t. inputs
dynamic_individual_gradients = nest.flatten([
gradients_impl.gradients(y, [concat_inputs])
for y in [
split_outputs_dynamic[0], split_outputs_dynamic[-1],
state_dynamic
]
])
# Generate gradients of individual variables w.r.t. inputs
trainable_variables = ops_lib.get_collection(
ops_lib.GraphKeys.TRAINABLE_VARIABLES)
assert len(trainable_variables) > 1, (
"Count of trainable variables: %d" % len(trainable_variables))
dynamic_individual_variable_gradients = nest.flatten([
gradients_impl.gradients(y, trainable_variables)
for y in [
split_outputs_dynamic[0], split_outputs_dynamic[-1],
state_dynamic
]
])
# Test forward pass
values_dynamic = sess.run(split_outputs_dynamic, feed_dict=feeds)
(state_value_dynamic,) = sess.run((state_dynamic,), feed_dict=feeds)
# Test gradients to inputs and variables w.r.t. outputs & final state
dynamic_grad_values = sess.run(dynamic_gradients, feed_dict=feeds)
dynamic_individual_grad_values = sess.run(
dynamic_individual_gradients, feed_dict=feeds)
dynamic_individual_var_grad_values = sess.run(
dynamic_individual_variable_gradients, feed_dict=feeds)
######### Step 3: Comparisons
if not in_graph_mode:
values_static = outputs_static
values_dynamic = split_outputs_dynamic
state_value_static = state_static
state_value_dynamic = state_dynamic
self.assertEqual(len(values_static), len(values_dynamic))
for (value_static, value_dynamic) in zip(values_static, values_dynamic):
self.assertAllEqual(value_static, value_dynamic)
self.assertAllEqual(state_value_static, state_value_dynamic)
if in_graph_mode:
self.assertAllEqual(static_grad_values, dynamic_grad_values)
self.assertEqual(
len(static_individual_grad_values),
len(dynamic_individual_grad_values))
self.assertEqual(
len(static_individual_var_grad_values),
len(dynamic_individual_var_grad_values))
for i, (a, b) in enumerate(
zip(static_individual_grad_values, dynamic_individual_grad_values)):
tf_logging.info("Comparing individual gradients iteration %d" % i)
self.assertAllEqual(a, b)
for i, (a, b) in enumerate(
zip(static_individual_var_grad_values,
dynamic_individual_var_grad_values)):
tf_logging.info(
"Comparing individual variable gradients iteration %d" % i)
self.assertAllEqual(a, b)
@test_util.run_in_graph_and_eager_modes
def testDynamicEquivalentToStaticRNN(self):
self._testDynamicEquivalentToStaticRNN(use_sequence_length=True)
self._testDynamicEquivalentToStaticRNN(use_sequence_length=False)
class BidirectionalRNNTest(test.TestCase):
def setUp(self):
self._seed = 23489
np.random.seed(self._seed)
def _createBidirectionalRNN(self, use_shape, use_sequence_length, scope=None):
num_units = 3
input_size = 5
batch_size = 2
max_length = 8
initializer = init_ops.random_uniform_initializer(
-0.01, 0.01, seed=self._seed)
sequence_length = array_ops.placeholder(
dtypes.int64) if use_sequence_length else None
cell_fw = rnn_cell.LSTMCell(
num_units, input_size, initializer=initializer, state_is_tuple=False)
cell_bw = rnn_cell.LSTMCell(
num_units, input_size, initializer=initializer, state_is_tuple=False)
inputs = max_length * [
array_ops.placeholder(
dtypes.float32,
shape=(batch_size, input_size) if use_shape else (None, input_size))
]
outputs, state_fw, state_bw = rnn.static_bidirectional_rnn(
cell_fw,
cell_bw,
inputs,
dtype=dtypes.float32,
sequence_length=sequence_length,
scope=scope)
self.assertEqual(len(outputs), len(inputs))
for out in outputs:
self.assertEqual(out.get_shape().as_list(),
[batch_size if use_shape else None, 2 * num_units])
input_value = np.random.randn(batch_size, input_size)
outputs = array_ops.stack(outputs)
return input_value, inputs, outputs, state_fw, state_bw, sequence_length
def _testBidirectionalRNN(self, use_shape):
with self.test_session(use_gpu=True, graph=ops_lib.Graph()) as sess:
input_value, inputs, outputs, state_fw, state_bw, sequence_length = (
self._createBidirectionalRNN(use_shape, True))
variables_lib.global_variables_initializer().run()
# Run with pre-specified sequence length of 2, 3
out, s_fw, s_bw = sess.run(
[outputs, state_fw, state_bw],
feed_dict={
inputs[0]: input_value,
sequence_length: [2, 3]
})
# Since the forward and backward LSTM cells were initialized with the
# same parameters, the forward and backward output has to be the same,
# but reversed in time. The format is output[time][batch][depth], and
# due to depth concatenation (as num_units=3 for both RNNs):
# - forward output: out[][][depth] for 0 <= depth < 3
# - backward output: out[][][depth] for 4 <= depth < 6
#
# First sequence in batch is length=2
# Check that the time=0 forward output is equal to time=1 backward output
self.assertEqual(out[0][0][0], out[1][0][3])
self.assertEqual(out[0][0][1], out[1][0][4])
self.assertEqual(out[0][0][2], out[1][0][5])
# Check that the time=1 forward output is equal to time=0 backward output
self.assertEqual(out[1][0][0], out[0][0][3])
self.assertEqual(out[1][0][1], out[0][0][4])
self.assertEqual(out[1][0][2], out[0][0][5])
# Second sequence in batch is length=3
# Check that the time=0 forward output is equal to time=2 backward output
self.assertEqual(out[0][1][0], out[2][1][3])
self.assertEqual(out[0][1][1], out[2][1][4])
self.assertEqual(out[0][1][2], out[2][1][5])
# Check that the time=1 forward output is equal to time=1 backward output
self.assertEqual(out[1][1][0], out[1][1][3])
self.assertEqual(out[1][1][1], out[1][1][4])
self.assertEqual(out[1][1][2], out[1][1][5])
# Check that the time=2 forward output is equal to time=0 backward output
self.assertEqual(out[2][1][0], out[0][1][3])
self.assertEqual(out[2][1][1], out[0][1][4])
self.assertEqual(out[2][1][2], out[0][1][5])
# Via the reasoning above, the forward and backward final state should be
# exactly the same
self.assertAllClose(s_fw, s_bw)
def _testBidirectionalRNNWithoutSequenceLength(self, use_shape):
with self.test_session(use_gpu=True, graph=ops_lib.Graph()) as sess:
input_value, inputs, outputs, state_fw, state_bw, _ = (
self._createBidirectionalRNN(use_shape, False))
variables_lib.global_variables_initializer().run()
out, s_fw, s_bw = sess.run(
[outputs, state_fw, state_bw], feed_dict={
inputs[0]: input_value
})
# Since the forward and backward LSTM cells were initialized with the
# same parameters, the forward and backward output has to be the same,
# but reversed in time. The format is output[time][batch][depth], and
# due to depth concatenation (as num_units=3 for both RNNs):
# - forward output: out[][][depth] for 0 <= depth < 3
# - backward output: out[][][depth] for 4 <= depth < 6
#
# Both sequences in batch are length=8. Check that the time=i
# forward output is equal to time=8-1-i backward output
for i in xrange(8):
self.assertEqual(out[i][0][0], out[8 - 1 - i][0][3])
self.assertEqual(out[i][0][1], out[8 - 1 - i][0][4])
self.assertEqual(out[i][0][2], out[8 - 1 - i][0][5])
for i in xrange(8):
self.assertEqual(out[i][1][0], out[8 - 1 - i][1][3])
self.assertEqual(out[i][1][1], out[8 - 1 - i][1][4])
self.assertEqual(out[i][1][2], out[8 - 1 - i][1][5])
# Via the reasoning above, the forward and backward final state should be
# exactly the same
self.assertAllClose(s_fw, s_bw)
def testBidirectionalRNN(self):
self._testBidirectionalRNN(use_shape=False)
self._testBidirectionalRNN(use_shape=True)
def testBidirectionalRNNWithoutSequenceLength(self):
self._testBidirectionalRNNWithoutSequenceLength(use_shape=False)
self._testBidirectionalRNNWithoutSequenceLength(use_shape=True)
def _createBidirectionalDynamicRNN(self,
use_shape,
use_state_tuple,
use_time_major,
use_sequence_length,
scope=None):
num_units = 3
input_size = 5
batch_size = 2
max_length = 8
initializer = init_ops.random_uniform_initializer(
-0.01, 0.01, seed=self._seed)
sequence_length = (
array_ops.placeholder(dtypes.int64) if use_sequence_length else None)
cell_fw = rnn_cell.LSTMCell(
num_units, initializer=initializer, state_is_tuple=use_state_tuple)
cell_bw = rnn_cell.LSTMCell(
num_units, initializer=initializer, state_is_tuple=use_state_tuple)
inputs = max_length * [
array_ops.placeholder(
dtypes.float32,
shape=(batch_size if use_shape else None, input_size))
]
inputs_c = array_ops.stack(inputs)
if not use_time_major:
inputs_c = array_ops.transpose(inputs_c, [1, 0, 2])
outputs, states = rnn.bidirectional_dynamic_rnn(
cell_fw,
cell_bw,
inputs_c,
sequence_length,
dtype=dtypes.float32,
time_major=use_time_major,
scope=scope)
outputs = array_ops.concat(outputs, 2)
state_fw, state_bw = states
outputs_shape = [None, max_length, 2 * num_units]
if use_shape:
outputs_shape[0] = batch_size
if use_time_major:
outputs_shape[0], outputs_shape[1] = outputs_shape[1], outputs_shape[0]
self.assertEqual(outputs.get_shape().as_list(), outputs_shape)
input_value = np.random.randn(batch_size, input_size)
return input_value, inputs, outputs, state_fw, state_bw, sequence_length
def _testBidirectionalDynamicRNN(self, use_shape, use_state_tuple,
use_time_major, use_sequence_length):
with self.test_session(use_gpu=True, graph=ops_lib.Graph()) as sess:
input_value, inputs, outputs, state_fw, state_bw, sequence_length = (
self._createBidirectionalDynamicRNN(
use_shape, use_state_tuple, use_time_major, use_sequence_length))
variables_lib.global_variables_initializer().run()
# Run with pre-specified sequence length of 2, 3
feed_dict = ({sequence_length: [2, 3]} if use_sequence_length else {})
feed_dict.update({inputs[0]: input_value})
if use_state_tuple:
out, c_fw, m_fw, c_bw, m_bw = sess.run(
[outputs, state_fw[0], state_fw[1], state_bw[0], state_bw[1]],
feed_dict=feed_dict)
s_fw = (c_fw, m_fw)
s_bw = (c_bw, m_bw)
else:
feed_dict.update({inputs[0]: input_value})
out, s_fw, s_bw = sess.run(
[outputs, state_fw, state_bw], feed_dict=feed_dict)
# Since the forward and backward LSTM cells were initialized with the
# same parameters, the forward and backward output has to be the same,
# but reversed in time. The format is output[time][batch][depth], and
# due to depth concatenation (as num_units=3 for both RNNs):
# - forward output: out[][][depth] for 0 <= depth < 3
# - backward output: out[][][depth] for 4 <= depth < 6
#
if not use_time_major:
out = np.swapaxes(out, 0, 1)
if use_sequence_length:
# First sequence in batch is length=2
# Check that the t=0 forward output is equal to t=1 backward output
self.assertEqual(out[0][0][0], out[1][0][3])
self.assertEqual(out[0][0][1], out[1][0][4])
self.assertEqual(out[0][0][2], out[1][0][5])
# Check that the t=1 forward output is equal to t=0 backward output
self.assertEqual(out[1][0][0], out[0][0][3])
self.assertEqual(out[1][0][1], out[0][0][4])
self.assertEqual(out[1][0][2], out[0][0][5])
# Second sequence in batch is length=3
# Check that the t=0 forward output is equal to t=2 backward output
self.assertEqual(out[0][1][0], out[2][1][3])
self.assertEqual(out[0][1][1], out[2][1][4])
self.assertEqual(out[0][1][2], out[2][1][5])
# Check that the t=1 forward output is equal to t=1 backward output
self.assertEqual(out[1][1][0], out[1][1][3])
self.assertEqual(out[1][1][1], out[1][1][4])
self.assertEqual(out[1][1][2], out[1][1][5])
# Check that the t=2 forward output is equal to t=0 backward output
self.assertEqual(out[2][1][0], out[0][1][3])
self.assertEqual(out[2][1][1], out[0][1][4])
self.assertEqual(out[2][1][2], out[0][1][5])
# Via the reasoning above, the forward and backward final state should
# be exactly the same
self.assertAllClose(s_fw, s_bw)
else: # not use_sequence_length
max_length = 8 # from createBidirectionalDynamicRNN
for t in range(max_length):
self.assertAllEqual(out[t, :, 0:3], out[max_length - t - 1, :, 3:6])
self.assertAllClose(s_fw, s_bw)
def testBidirectionalDynamicRNN(self):
# Generate 2^5 option values
# from [True, True, True, True, True] to [False, False, False, False, False]
options = itertools.product([True, False], repeat=4)
for option in options:
self._testBidirectionalDynamicRNN(
use_shape=option[0],
use_state_tuple=option[1],
use_time_major=option[2],
use_sequence_length=option[3])
def _testScope(self, factory, prefix="prefix", use_outer_scope=True):
# REMARKS: factory(scope) is a function accepting a scope
# as an argument, such scope can be None, a string
# or a VariableScope instance.
with self.test_session(use_gpu=True, graph=ops_lib.Graph()):
if use_outer_scope:
with variable_scope.variable_scope(prefix) as scope:
factory(scope)
else:
factory(prefix)
# check that all the variables names starts
# with the proper scope.
variables_lib.global_variables_initializer()
all_vars = variables_lib.global_variables()
prefix = prefix or "bidirectional_rnn"
scope_vars = [v for v in all_vars if v.name.startswith(prefix + "/")]
tf_logging.info("BiRNN with scope: %s (%s)" %
(prefix, "scope" if use_outer_scope else "str"))
for v in scope_vars:
tf_logging.info(v.name)
self.assertEqual(len(scope_vars), len(all_vars))
def testBidirectionalRNNScope(self):
def factory(scope):
return self._createBidirectionalRNN(
use_shape=True, use_sequence_length=True, scope=scope)
self._testScope(factory, use_outer_scope=True)
self._testScope(factory, use_outer_scope=False)
self._testScope(factory, prefix=None, use_outer_scope=False)
def testBidirectionalDynamicRNNScope(self):
def get_factory(use_time_major):
def factory(scope):
return self._createBidirectionalDynamicRNN(
use_shape=True,
use_state_tuple=True,
use_sequence_length=True,
use_time_major=use_time_major,
scope=scope)
return factory
self._testScope(get_factory(True), use_outer_scope=True)
self._testScope(get_factory(True), use_outer_scope=False)
self._testScope(get_factory(True), prefix=None, use_outer_scope=False)
self._testScope(get_factory(False), use_outer_scope=True)
self._testScope(get_factory(False), use_outer_scope=False)
self._testScope(get_factory(False), prefix=None, use_outer_scope=False)
class MultiDimensionalLSTMTest(test.TestCase):
def setUp(self):
self._seed = 23489
np.random.seed(self._seed)
def testMultiDimensionalLSTMAllRNNContainers(self):
feature_dims = (3, 4, 5)
input_size = feature_dims
batch_size = 2
max_length = 8
sequence_length = [4, 6]
with self.test_session(graph=ops_lib.Graph()) as sess:
inputs = max_length * [
array_ops.placeholder(dtypes.float32, shape=(None,) + input_size)
]
inputs_using_dim = max_length * [
array_ops.placeholder(
dtypes.float32, shape=(batch_size,) + input_size)
]
inputs_c = array_ops.stack(inputs)
# Create a cell for the whole test. This is fine because the cell has no
# variables.
cell = DummyMultiDimensionalLSTM(feature_dims)
state_saver = TestStateSaver(batch_size, input_size)
outputs_static, state_static = rnn.static_rnn(
cell, inputs, dtype=dtypes.float32, sequence_length=sequence_length)
outputs_dynamic, state_dynamic = rnn.dynamic_rnn(
cell,
inputs_c,
dtype=dtypes.float32,
time_major=True,
sequence_length=sequence_length)
outputs_bid, state_fw, state_bw = rnn.static_bidirectional_rnn(
cell,
cell,
inputs_using_dim,
dtype=dtypes.float32,
sequence_length=sequence_length)
outputs_sav, state_sav = rnn.static_state_saving_rnn(
cell,
inputs_using_dim,
sequence_length=sequence_length,
state_saver=state_saver,
state_name=("h", "c"))
self.assertEqual(outputs_dynamic.get_shape().as_list(),
inputs_c.get_shape().as_list())
for out, inp in zip(outputs_static, inputs):
self.assertEqual(out.get_shape().as_list(), inp.get_shape().as_list())
for out, inp in zip(outputs_bid, inputs_using_dim):
input_shape_list = inp.get_shape().as_list()
# fwd and bwd activations are concatenated along the second dim.
input_shape_list[1] *= 2
self.assertEqual(out.get_shape().as_list(), input_shape_list)
variables_lib.global_variables_initializer().run()
input_total_size = (batch_size,) + input_size
input_value = np.random.randn(*input_total_size)
outputs_static_v = sess.run(
outputs_static, feed_dict={
inputs[0]: input_value
})
outputs_dynamic_v = sess.run(
outputs_dynamic, feed_dict={
inputs[0]: input_value
})
outputs_bid_v = sess.run(
outputs_bid, feed_dict={
inputs_using_dim[0]: input_value
})
outputs_sav_v = sess.run(
outputs_sav, feed_dict={
inputs_using_dim[0]: input_value
})
self.assertAllEqual(outputs_static_v, outputs_dynamic_v)
self.assertAllEqual(outputs_static_v, outputs_sav_v)
outputs_static_array = np.array(outputs_static_v)
outputs_static_array_double = np.concatenate(
(outputs_static_array, outputs_static_array), axis=2)
outputs_bid_array = np.array(outputs_bid_v)
self.assertAllEqual(outputs_static_array_double, outputs_bid_array)
state_static_v = sess.run(
state_static, feed_dict={
inputs[0]: input_value
})
state_dynamic_v = sess.run(
state_dynamic, feed_dict={
inputs[0]: input_value
})
state_bid_fw_v = sess.run(
state_fw, feed_dict={
inputs_using_dim[0]: input_value
})
state_bid_bw_v = sess.run(
state_bw, feed_dict={
inputs_using_dim[0]: input_value
})
state_sav_v = sess.run(
state_sav, feed_dict={
inputs_using_dim[0]: input_value
})
self.assertAllEqual(np.hstack(state_static_v), np.hstack(state_dynamic_v))
self.assertAllEqual(np.hstack(state_static_v), np.hstack(state_sav_v))
self.assertAllEqual(np.hstack(state_static_v), np.hstack(state_bid_fw_v))
self.assertAllEqual(np.hstack(state_static_v), np.hstack(state_bid_bw_v))
class NestedLSTMTest(test.TestCase):
def setUp(self):
self._seed = 23489
np.random.seed(self._seed)
def testNestedIOLSTMAllRNNContainers(self):
input_size = 5
batch_size = 2
state_size = 6
max_length = 8
sequence_length = [4, 6]
with self.test_session(graph=ops_lib.Graph()) as sess:
state_saver = TestStateSaver(batch_size, state_size)
single_input = (array_ops.placeholder(
dtypes.float32, shape=(None, input_size)),
array_ops.placeholder(
dtypes.float32, shape=(None, input_size)))
inputs = max_length * [single_input]
inputs_c = (array_ops.stack([input_[0] for input_ in inputs]),
array_ops.stack([input_[1] for input_ in inputs]))
single_input_using_dim = (array_ops.placeholder(
dtypes.float32, shape=(batch_size, input_size)),
array_ops.placeholder(
dtypes.float32,
shape=(batch_size, input_size)))
inputs_using_dim = max_length * [single_input_using_dim]
# Create a cell for the whole test. This is fine because the cell has no
# variables.
cell = NestedRNNCell()
outputs_dynamic, state_dynamic = rnn.dynamic_rnn(
cell,
inputs_c,
dtype=dtypes.float32,
time_major=True,
sequence_length=sequence_length)
outputs_static, state_static = rnn.static_rnn(
cell, inputs, dtype=dtypes.float32, sequence_length=sequence_length)
outputs_bid, state_fw, state_bw = rnn.static_bidirectional_rnn(
cell,
cell,
inputs_using_dim,
dtype=dtypes.float32,
sequence_length=sequence_length)
outputs_sav, state_sav = rnn.static_state_saving_rnn(
cell,
inputs_using_dim,
sequence_length=sequence_length,
state_saver=state_saver,
state_name=("h", "c"))
def _assert_same_shape(input1, input2, double=False):
flat_input1 = nest.flatten(input1)
flat_input2 = nest.flatten(input2)
for inp1, inp2 in zip(flat_input1, flat_input2):
input_shape = inp1.get_shape().as_list()
if double:
input_shape[1] *= 2
self.assertEqual(input_shape, inp2.get_shape().as_list())
_assert_same_shape(inputs_c, outputs_dynamic)
_assert_same_shape(inputs, outputs_static)
_assert_same_shape(inputs_using_dim, outputs_sav)
_assert_same_shape(inputs_using_dim, outputs_bid, double=True)
variables_lib.global_variables_initializer().run()
input_total_size = (batch_size, input_size)
input_value = (np.random.randn(*input_total_size),
np.random.randn(*input_total_size))
outputs_dynamic_v = sess.run(
outputs_dynamic, feed_dict={
single_input: input_value
})
outputs_static_v = sess.run(
outputs_static, feed_dict={
single_input: input_value
})
outputs_sav_v = sess.run(
outputs_sav, feed_dict={
single_input_using_dim: input_value
})
outputs_bid_v = sess.run(
outputs_bid, feed_dict={
single_input_using_dim: input_value
})
self.assertAllEqual(outputs_static_v,
np.transpose(outputs_dynamic_v, (1, 0, 2, 3)))
self.assertAllEqual(outputs_static_v, outputs_sav_v)
outputs_static_array = np.array(outputs_static_v)
outputs_static_array_double = np.concatenate(
(outputs_static_array, outputs_static_array), axis=3)
outputs_bid_array = np.array(outputs_bid_v)
self.assertAllEqual(outputs_static_array_double, outputs_bid_array)
state_dynamic_v = sess.run(
state_dynamic, feed_dict={
single_input: input_value
})
state_static_v = sess.run(
state_static, feed_dict={
single_input: input_value
})
state_bid_fw_v = sess.run(
state_fw, feed_dict={
single_input_using_dim: input_value
})
state_bid_bw_v = sess.run(
state_bw, feed_dict={
single_input_using_dim: input_value
})
state_sav_v = sess.run(
state_sav, feed_dict={
single_input_using_dim: input_value
})
self.assertAllEqual(np.hstack(state_static_v), np.hstack(state_dynamic_v))
self.assertAllEqual(np.hstack(state_static_v), np.hstack(state_sav_v))
self.assertAllEqual(np.hstack(state_static_v), np.hstack(state_bid_fw_v))
self.assertAllEqual(np.hstack(state_static_v), np.hstack(state_bid_bw_v))
class StateSaverRNNTest(test.TestCase):
def setUp(self):
self._seed = 23489
np.random.seed(self._seed)
def _factory(self, scope, state_saver):
num_units = state_saver.state_size // 2
batch_size = state_saver.batch_size
input_size = 5
max_length = 8
initializer = init_ops.random_uniform_initializer(
-0.01, 0.01, seed=self._seed)
cell = rnn_cell.LSTMCell(
num_units,
use_peepholes=False,
initializer=initializer,
state_is_tuple=False)
inputs = max_length * [
array_ops.zeros(dtype=dtypes.float32, shape=(batch_size, input_size))
]
out, state = rnn.static_state_saving_rnn(
cell,
inputs,
state_saver=state_saver,
state_name="save_lstm",
scope=scope)
return out, state, state_saver
def _testScope(self, prefix="prefix", use_outer_scope=True):
num_units = 3
batch_size = 2
state_saver = TestStateSaver(batch_size, 2 * num_units)
with self.test_session(use_gpu=True, graph=ops_lib.Graph()):
if use_outer_scope:
with variable_scope.variable_scope(prefix) as scope:
self._factory(scope=scope, state_saver=state_saver)
else:
self._factory(scope=prefix, state_saver=state_saver)
variables_lib.global_variables_initializer()
# check that all the variables names starts
# with the proper scope.
all_vars = variables_lib.global_variables()
prefix = prefix or "rnn"
scope_vars = [v for v in all_vars if v.name.startswith(prefix + "/")]
tf_logging.info("RNN with scope: %s (%s)" %
(prefix, "scope" if use_outer_scope else "str"))
for v in scope_vars:
tf_logging.info(v.name)
self.assertEqual(len(scope_vars), len(all_vars))
def testStateSaverRNNScope(self):
self._testScope(use_outer_scope=True)
self._testScope(use_outer_scope=False)
self._testScope(prefix=None, use_outer_scope=False)
def testStateSaverCallsSaveState(self):
"""Test that number of calls to state and save_state is equal.
Test if the order of actual evaluating or skipping evaluation of out,
state tensors, which are the output tensors from static_state_saving_rnn,
have influence on number of calls to save_state and state methods of
state_saver object (the number of calls should be same.)
"""
num_units = 3
batch_size = 2
state_saver = TestStateSaverWithCounters(batch_size, 2 * num_units)
out, state, state_saver = self._factory(scope=None, state_saver=state_saver)
with self.test_session() as sess:
sess.run(variables_lib.global_variables_initializer())
sess.run(variables_lib.local_variables_initializer())
_, _, num_state_calls, num_save_state_calls = sess.run([
out,
state,
state_saver.num_state_calls,
state_saver.num_save_state_calls])
self.assertEqual(num_state_calls, num_save_state_calls)
_, num_state_calls, num_save_state_calls = sess.run([
out,
state_saver.num_state_calls,
state_saver.num_save_state_calls])
self.assertEqual(num_state_calls, num_save_state_calls)
_, num_state_calls, num_save_state_calls = sess.run([
state,
state_saver.num_state_calls,
state_saver.num_save_state_calls])
self.assertEqual(num_state_calls, num_save_state_calls)
class GRUTest(test.TestCase):
def setUp(self):
self._seed = 23489
np.random.seed(self._seed)
def testDynamic(self):
time_steps = 8
num_units = 3
input_size = 5
batch_size = 2
input_values = np.random.randn(time_steps, batch_size, input_size)
sequence_length = np.random.randint(0, time_steps, size=batch_size)
with self.test_session(use_gpu=True, graph=ops_lib.Graph()) as sess:
concat_inputs = array_ops.placeholder(
dtypes.float32, shape=(time_steps, batch_size, input_size))
cell = rnn_cell.GRUCell(num_units=num_units)
with variable_scope.variable_scope("dynamic_scope"):
outputs_dynamic, state_dynamic = rnn.dynamic_rnn(
cell,
inputs=concat_inputs,
sequence_length=sequence_length,
time_major=True,
dtype=dtypes.float32)
feeds = {concat_inputs: input_values}
# Initialize
variables_lib.global_variables_initializer().run(feed_dict=feeds)
sess.run([outputs_dynamic, state_dynamic], feed_dict=feeds)
def _testScope(self, factory, prefix="prefix", use_outer_scope=True):
with self.test_session(use_gpu=True, graph=ops_lib.Graph()):
if use_outer_scope:
with variable_scope.variable_scope(prefix) as scope:
factory(scope)
else:
factory(prefix)
variables_lib.global_variables_initializer()
# check that all the variables names starts
# with the proper scope.
all_vars = variables_lib.global_variables()
prefix = prefix or "rnn"
scope_vars = [v for v in all_vars if v.name.startswith(prefix + "/")]
tf_logging.info("RNN with scope: %s (%s)" %
(prefix, "scope" if use_outer_scope else "str"))
for v in scope_vars:
tf_logging.info(v.name)
self.assertEqual(len(scope_vars), len(all_vars))
def testDynamicScope(self):
time_steps = 8
num_units = 3
input_size = 5
batch_size = 2
sequence_length = np.random.randint(0, time_steps, size=batch_size)
def factory(scope):
concat_inputs = array_ops.placeholder(
dtypes.float32, shape=(time_steps, batch_size, input_size))
cell = rnn_cell.GRUCell(num_units=num_units)
return rnn.dynamic_rnn(
cell,
inputs=concat_inputs,
sequence_length=sequence_length,
time_major=True,
dtype=dtypes.float32,
scope=scope)
self._testScope(factory, use_outer_scope=True)
self._testScope(factory, use_outer_scope=False)
self._testScope(factory, prefix=None, use_outer_scope=False)
class RawRNNTest(test.TestCase):
def setUp(self):
self._seed = 23489
np.random.seed(self._seed)
def _testRawRNN(self, max_time):
with self.test_session(graph=ops_lib.Graph()) as sess:
batch_size = 16
input_depth = 4
num_units = 3
inputs = array_ops.placeholder(
shape=(max_time, batch_size, input_depth), dtype=dtypes.float32)
sequence_length = array_ops.placeholder(
shape=(batch_size,), dtype=dtypes.int32)
inputs_ta = tensor_array_ops.TensorArray(
dtype=dtypes.float32, size=array_ops.shape(inputs)[0])
inputs_ta = inputs_ta.unstack(inputs)
cell = rnn_cell.LSTMCell(num_units, state_is_tuple=True)
def loop_fn(time_, cell_output, cell_state, unused_loop_state):
emit_output = cell_output # == None for time == 0
if cell_output is None: # time == 0
next_state = cell.zero_state(batch_size, dtypes.float32)
else:
next_state = cell_state # copy state through
elements_finished = (time_ >= sequence_length)
finished = math_ops.reduce_all(elements_finished)
# For the very final iteration, we must emit a dummy input
next_input = control_flow_ops.cond(
finished,
lambda: array_ops.zeros([batch_size, input_depth], dtype=dtypes.float32),
lambda: inputs_ta.read(time_))
return (elements_finished, next_input, next_state, emit_output, None)
reuse_scope = variable_scope.get_variable_scope()
outputs_ta, final_state, _ = rnn.raw_rnn(cell, loop_fn, scope=reuse_scope)
outputs = outputs_ta.stack()
reuse_scope.reuse_variables()
outputs_dynamic_rnn, final_state_dynamic_rnn = rnn.dynamic_rnn(
cell,
inputs,
time_major=True,
dtype=dtypes.float32,
sequence_length=sequence_length,
scope=reuse_scope)
variables = variables_lib.trainable_variables()
gradients = gradients_impl.gradients([outputs, final_state],
[inputs] + variables)
gradients_dynamic_rnn = gradients_impl.gradients(
[outputs_dynamic_rnn, final_state_dynamic_rnn], [inputs] + variables)
variables_lib.global_variables_initializer().run()
rand_input = np.random.randn(max_time, batch_size, input_depth)
if max_time == 0:
rand_seq_len = np.zeros(batch_size)
else:
rand_seq_len = np.random.randint(max_time, size=batch_size)
# To ensure same output lengths for dynamic_rnn and raw_rnn
rand_seq_len[0] = max_time
(outputs_val, outputs_dynamic_rnn_val, final_state_val,
final_state_dynamic_rnn_val) = sess.run(
[outputs, outputs_dynamic_rnn, final_state, final_state_dynamic_rnn],
feed_dict={
inputs: rand_input,
sequence_length: rand_seq_len
})
self.assertAllClose(outputs_dynamic_rnn_val, outputs_val)
self.assertAllClose(final_state_dynamic_rnn_val, final_state_val)
# NOTE: Because with 0 time steps, raw_rnn does not have shape
# information about the input, it is impossible to perform
# gradients comparisons as the gradients eval will fail. So
# this case skips the gradients test.
if max_time > 0:
self.assertEqual(len(gradients), len(gradients_dynamic_rnn))
gradients_val = sess.run(
gradients,
feed_dict={
inputs: rand_input,
sequence_length: rand_seq_len
})
gradients_dynamic_rnn_val = sess.run(
gradients_dynamic_rnn,
feed_dict={
inputs: rand_input,
sequence_length: rand_seq_len
})
self.assertEqual(len(gradients_val), len(gradients_dynamic_rnn_val))
input_gradients_val = gradients_val[0]
input_gradients_dynamic_rnn_val = gradients_dynamic_rnn_val[0]
self.assertAllClose(input_gradients_val,
input_gradients_dynamic_rnn_val)
for i in range(1, len(gradients_val)):
self.assertAllClose(gradients_dynamic_rnn_val[i], gradients_val[i])
def testRawRNNZeroLength(self):
# NOTE: Because with 0 time steps, raw_rnn does not have shape
# information about the input, it is impossible to perform
# gradients comparisons as the gradients eval will fail. So this
# case skips the gradients test.
self._testRawRNN(max_time=0)
def testRawRNN(self):
self._testRawRNN(max_time=10)
def testLoopState(self):
with self.test_session(graph=ops_lib.Graph()):
max_time = 10
batch_size = 16
input_depth = 4
num_units = 3
inputs = np.random.randn(max_time, batch_size, input_depth)
inputs_ta = tensor_array_ops.TensorArray(
dtype=dtypes.float32, size=array_ops.shape(inputs)[0])
inputs_ta = inputs_ta.unstack(inputs)
cell = rnn_cell.LSTMCell(num_units, state_is_tuple=True)
def loop_fn(time_, cell_output, cell_state, loop_state):
if cell_output is None:
loop_state = constant_op.constant([0])
next_state = cell.zero_state(batch_size, dtypes.float32)
else:
loop_state = array_ops.stack([array_ops.squeeze(loop_state) + 1])
next_state = cell_state
emit_output = cell_output # == None for time == 0
elements_finished = array_ops.tile([time_ >= max_time], [batch_size])
finished = math_ops.reduce_all(elements_finished)
# For the very final iteration, we must emit a dummy input
next_input = control_flow_ops.cond(
finished,
lambda: array_ops.zeros([batch_size, input_depth], dtype=dtypes.float32),
lambda: inputs_ta.read(time_))
return (elements_finished, next_input, next_state, emit_output,
loop_state)
r = rnn.raw_rnn(cell, loop_fn)
loop_state = r[-1]
self.assertEqual([10], loop_state.eval())
def testLoopStateWithTensorArray(self):
with self.test_session(graph=ops_lib.Graph()):
max_time = 4
batch_size = 16
input_depth = 4
num_units = 3
inputs = np.random.randn(max_time, batch_size, input_depth)
inputs_ta = tensor_array_ops.TensorArray(
dtype=dtypes.float32, size=array_ops.shape(inputs)[0])
inputs_ta = inputs_ta.unstack(inputs)
cell = rnn_cell.LSTMCell(num_units, state_is_tuple=True)
def loop_fn(time_, cell_output, cell_state, loop_state):
if cell_output is None:
loop_state = tensor_array_ops.TensorArray(
dynamic_size=True,
size=0,
dtype=dtypes.int32,
clear_after_read=False)
loop_state = loop_state.write(0, 1)
next_state = cell.zero_state(batch_size, dtypes.float32)
else:
loop_state = loop_state.write(time_,
loop_state.read(time_ - 1) + time_)
next_state = cell_state
emit_output = cell_output # == None for time == 0
elements_finished = array_ops.tile([time_ >= max_time], [batch_size])
finished = math_ops.reduce_all(elements_finished)
# For the very final iteration, we must emit a dummy input
next_input = control_flow_ops.cond(
finished,
lambda: array_ops.zeros([batch_size, input_depth], dtype=dtypes.float32),
lambda: inputs_ta.read(time_))
return (elements_finished, next_input, next_state, emit_output,
loop_state)
r = rnn.raw_rnn(cell, loop_fn)
loop_state = r[-1]
loop_state = loop_state.stack()
self.assertAllEqual([1, 2, 2 + 2, 4 + 3, 7 + 4], loop_state.eval())
def testEmitDifferentStructureThanCellOutput(self):
with self.test_session(graph=ops_lib.Graph()) as sess:
max_time = 10
batch_size = 16
input_depth = 4
num_units = 3
inputs = np.random.randn(max_time, batch_size, input_depth)
inputs_ta = tensor_array_ops.TensorArray(
dtype=dtypes.float32, size=array_ops.shape(inputs)[0])
inputs_ta = inputs_ta.unstack(inputs)
# Verify emit shapes may be unknown by feeding a placeholder that
# determines an emit shape.
unknown_dim = array_ops.placeholder(dtype=dtypes.int32)
cell = rnn_cell.LSTMCell(num_units, state_is_tuple=True)
def loop_fn(time_, cell_output, cell_state, _):
if cell_output is None:
emit_output = (array_ops.zeros([2, 3], dtype=dtypes.int32),
array_ops.zeros([unknown_dim], dtype=dtypes.int64))
next_state = cell.zero_state(batch_size, dtypes.float32)
else:
emit_output = (array_ops.ones([batch_size, 2, 3], dtype=dtypes.int32),
array_ops.ones(
[batch_size, unknown_dim], dtype=dtypes.int64))
next_state = cell_state
elements_finished = array_ops.tile([time_ >= max_time], [batch_size])
finished = math_ops.reduce_all(elements_finished)
# For the very final iteration, we must emit a dummy input
next_input = control_flow_ops.cond(
finished,
lambda: array_ops.zeros([batch_size, input_depth], dtype=dtypes.float32),
lambda: inputs_ta.read(time_))
return (elements_finished, next_input, next_state, emit_output, None)
r = rnn.raw_rnn(cell, loop_fn)
output_ta = r[0]
self.assertEqual(2, len(output_ta))
self.assertEqual([dtypes.int32, dtypes.int64],
[ta.dtype for ta in output_ta])
output = [ta.stack() for ta in output_ta]
output_vals = sess.run(output, feed_dict={unknown_dim: 1})
self.assertAllEqual(
np.ones((max_time, batch_size, 2, 3), np.int32), output_vals[0])
self.assertAllEqual(
np.ones((max_time, batch_size, 1), np.int64), output_vals[1])
def _testScope(self, factory, prefix="prefix", use_outer_scope=True):
with self.test_session(use_gpu=True, graph=ops_lib.Graph()):
if use_outer_scope:
with variable_scope.variable_scope(prefix) as scope:
factory(scope)
else:
factory(prefix)
variables_lib.global_variables_initializer()
# check that all the variables names starts
# with the proper scope.
all_vars = variables_lib.global_variables()
prefix = prefix or "rnn"
scope_vars = [v for v in all_vars if v.name.startswith(prefix + "/")]
tf_logging.info("RNN with scope: %s (%s)" %
(prefix, "scope" if use_outer_scope else "str"))
for v in scope_vars:
tf_logging.info(v.name)
self.assertEqual(len(scope_vars), len(all_vars))
def testRawRNNScope(self):
max_time = 10
batch_size = 16
input_depth = 4
num_units = 3
def factory(scope):
inputs = array_ops.placeholder(
shape=(max_time, batch_size, input_depth), dtype=dtypes.float32)
sequence_length = array_ops.placeholder(
shape=(batch_size,), dtype=dtypes.int32)
inputs_ta = tensor_array_ops.TensorArray(
dtype=dtypes.float32, size=array_ops.shape(inputs)[0])
inputs_ta = inputs_ta.unstack(inputs)
cell = rnn_cell.LSTMCell(num_units, state_is_tuple=True)
def loop_fn(time_, cell_output, cell_state, unused_loop_state):
emit_output = cell_output # == None for time == 0
if cell_output is None: # time == 0
next_state = cell.zero_state(batch_size, dtypes.float32)
else:
next_state = cell_state
elements_finished = (time_ >= sequence_length)
finished = math_ops.reduce_all(elements_finished)
# For the very final iteration, we must emit a dummy input
next_input = control_flow_ops.cond(
finished,
lambda: array_ops.zeros([batch_size, input_depth], dtype=dtypes.float32),
lambda: inputs_ta.read(time_))
return (elements_finished, next_input, next_state, emit_output, None)
return rnn.raw_rnn(cell, loop_fn, scope=scope)
self._testScope(factory, use_outer_scope=True)
self._testScope(factory, use_outer_scope=False)
self._testScope(factory, prefix=None, use_outer_scope=False)
class DeviceWrapperCell(rnn_cell.RNNCell):
"""Class to ensure cell calculation happens on a specific device."""
def __init__(self, cell, device):
self._cell = cell
self._device = device
@property
def output_size(self):
return self._cell.output_size
@property
def state_size(self):
return self._cell.state_size
def __call__(self, input_, state, scope=None):
if self._device is not None:
with ops_lib.device(self._device):
return self._cell(input_, state, scope=scope)
else:
return self._cell(input_, state, scope=scope)
class TensorArrayOnCorrectDeviceTest(test.TestCase):
def _execute_rnn_on(self,
rnn_device=None,
cell_device=None,
input_device=None):
batch_size = 3
time_steps = 7
input_size = 5
num_units = 10
cell = rnn_cell.LSTMCell(num_units, use_peepholes=True)
gpu_cell = DeviceWrapperCell(cell, cell_device)
inputs = np.random.randn(batch_size, time_steps, input_size).astype(
np.float32)
sequence_length = np.random.randint(0, time_steps, size=batch_size)
if input_device is not None:
with ops_lib.device(input_device):
inputs = constant_op.constant(inputs)
if rnn_device is not None:
with ops_lib.device(rnn_device):
outputs, _ = rnn.dynamic_rnn(
gpu_cell,
inputs,
sequence_length=sequence_length,
dtype=dtypes.float32)
else:
outputs, _ = rnn.dynamic_rnn(
gpu_cell,
inputs,
sequence_length=sequence_length,
dtype=dtypes.float32)
with self.test_session(use_gpu=True) as sess:
opts = config_pb2.RunOptions(trace_level=config_pb2.RunOptions.FULL_TRACE)
run_metadata = config_pb2.RunMetadata()
variables_lib.global_variables_initializer().run()
sess.run(outputs, options=opts, run_metadata=run_metadata)
return run_metadata
def _retrieve_cpu_gpu_stats(self, run_metadata):
cpu_stats = None
gpu_stats = None
step_stats = run_metadata.step_stats
for ds in step_stats.dev_stats:
if "cpu:0" in ds.device[-5:].lower():
cpu_stats = ds.node_stats
if "gpu:0" == ds.device[-5:].lower():
gpu_stats = ds.node_stats
return cpu_stats, gpu_stats
def testRNNOnCPUCellOnGPU(self):
if not test.is_gpu_available():
return # Test requires access to a GPU
gpu_dev = test.gpu_device_name()
run_metadata = self._execute_rnn_on(
rnn_device="/cpu:0", cell_device=gpu_dev)
cpu_stats, gpu_stats = self._retrieve_cpu_gpu_stats(run_metadata)
def _assert_in(op_str, in_stats, out_stats):
self.assertTrue(any(op_str in s.node_name for s in in_stats))
self.assertFalse(any(op_str in s.node_name for s in out_stats))
# Writes happen at output of RNN cell
_assert_in("TensorArrayWrite", gpu_stats, cpu_stats)
# Gather happens on final TensorArray
_assert_in("TensorArrayGather", gpu_stats, cpu_stats)
# Reads happen at input to RNN cell
_assert_in("TensorArrayRead", cpu_stats, gpu_stats)
# Scatters happen to get initial input into TensorArray
_assert_in("TensorArrayScatter", cpu_stats, gpu_stats)
def testRNNOnCPUCellOnCPU(self):
if not test.is_gpu_available():
return # Test requires access to a GPU
gpu_dev = test.gpu_device_name()
run_metadata = self._execute_rnn_on(
rnn_device="/cpu:0", cell_device="/cpu:0", input_device=gpu_dev)
cpu_stats, gpu_stats = self._retrieve_cpu_gpu_stats(run_metadata)
def _assert_in(op_str, in_stats, out_stats):
self.assertTrue(any(op_str in s.node_name for s in in_stats))
self.assertFalse(any(op_str in s.node_name for s in out_stats))
# All TensorArray operations happen on CPU
_assert_in("TensorArray", cpu_stats, gpu_stats)
def testInputOnGPUCellNotDeclared(self):
if not test.is_gpu_available():
return # Test requires access to a GPU
gpu_dev = test.gpu_device_name()
run_metadata = self._execute_rnn_on(input_device=gpu_dev)
cpu_stats, gpu_stats = self._retrieve_cpu_gpu_stats(run_metadata)
def _assert_in(op_str, in_stats, out_stats):
self.assertTrue(any(op_str in s.node_name for s in in_stats))
self.assertFalse(any(op_str in s.node_name for s in out_stats))
# Everything happens on GPU
_assert_in("TensorArray", gpu_stats, cpu_stats)
if __name__ == "__main__":
test.main()
|
import os
import filecmp
from dvc.main import main
from dvc.utils import file_md5
from dvc.stage import Stage
from dvc.command.run import CmdRun
from tests.basic_env import TestDvc
class TestRun(TestDvc):
def test(self):
cmd = 'python {} {} {}'.format(self.CODE, self.FOO, 'out')
deps = [self.FOO, self.CODE]
outs = [os.path.join(self.dvc.root_dir, 'out')]
outs_no_cache = []
fname = os.path.join(self.dvc.root_dir, 'out.dvc')
cwd = os.curdir
self.dvc.add(self.FOO)
stage = self.dvc.run(cmd=cmd,
deps=deps,
outs=outs,
outs_no_cache=outs_no_cache,
fname=fname,
cwd=cwd)
self.assertTrue(filecmp.cmp(self.FOO, 'out'))
self.assertTrue(os.path.isfile(stage.path))
self.assertEqual(stage.cmd, cmd)
self.assertEqual(len(stage.deps), len(deps))
self.assertEqual(len(stage.outs), len(outs + outs_no_cache))
self.assertEqual(stage.outs[0].path, outs[0])
self.assertEqual(stage.outs[0].md5, file_md5(self.FOO)[0])
self.assertTrue(stage.path, fname)
class TestRunEmpty(TestDvc):
def test(self):
self.dvc.run(cmd='',
deps=[],
outs=[],
outs_no_cache=[],
fname='empty.dvc',
cwd=os.curdir)
class TestRunNoExec(TestDvc):
def test(self):
self.dvc.run(cmd='python {} {} {}'.format(self.CODE, self.FOO, 'out'),
no_exec=True)
self.assertFalse(os.path.exists('out'))
class TestCmdRun(TestDvc):
def test_run(self):
ret = main(['run',
'-d', self.FOO,
'-d', self.CODE,
'-o', 'out',
'-f', 'out.dvc',
'python', self.CODE, self.FOO, 'out'])
self.assertEqual(ret, 0)
self.assertTrue(os.path.isfile('out'))
self.assertTrue(os.path.isfile('out.dvc'))
self.assertTrue(filecmp.cmp(self.FOO, 'out'))
def test_run_bad_command(self):
ret = main(['run',
'non-existing-command'])
self.assertNotEqual(ret, 0)
|
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import tempfile
from typing import Any, Generator, Optional, Tuple, Union
import yaml
from cached_property import cached_property
from kubernetes import client, config, watch
from airflow.exceptions import AirflowException
from airflow.hooks.base_hook import BaseHook
def _load_body_to_dict(body):
try:
body_dict = yaml.safe_load(body)
except yaml.YAMLError as e:
raise AirflowException("Exception when loading resource definition: %s\n" % e)
return body_dict
class KubernetesHook(BaseHook):
"""
Creates Kubernetes API connection.
- use in cluster configuration by using ``extra__kubernetes__in_cluster`` in connection
- use custom config by providing path to the file using ``extra__kubernetes__kube_config_path``
- use custom configuration by providing content of kubeconfig file via
``extra__kubernetes__kube_config`` in connection
- use default config by providing no extras
This hook check for configuration option in the above order. Once an option is present it will
use this configuration.
.. seealso::
For more information about Kubernetes connection:
:ref:`apache-airflow:howto/connection:kubernetes`
:param conn_id: the connection to Kubernetes cluster
:type conn_id: str
"""
def __init__(
self, conn_id: str = "kubernetes_default", client_configuration: Optional[client.Configuration] = None
) -> None:
super().__init__()
self.conn_id = conn_id
self.client_configuration = client_configuration
def get_conn(self) -> Any:
"""Returns kubernetes api session for use with requests"""
connection = self.get_connection(self.conn_id)
extras = connection.extra_dejson
in_cluster = extras.get("extra__kubernetes__in_cluster")
kubeconfig_path = extras.get("extra__kubernetes__kube_config_path")
kubeconfig = extras.get("extra__kubernetes__kube_config")
num_selected_configuration = len([o for o in [in_cluster, kubeconfig, kubeconfig_path] if o])
if num_selected_configuration > 1:
raise AirflowException(
"Invalid connection configuration. Options extra__kubernetes__kube_config_path, "
"extra__kubernetes__kube_config, extra__kubernetes__in_cluster are mutually exclusive. "
"You can only use one option at a time."
)
if in_cluster:
self.log.debug("loading kube_config from: in_cluster configuration")
config.load_incluster_config()
return client.ApiClient()
if kubeconfig_path is not None:
self.log.debug("loading kube_config from: %s", kubeconfig_path)
config.load_kube_config(
config_file=kubeconfig_path, client_configuration=self.client_configuration
)
return client.ApiClient()
if kubeconfig is not None:
with tempfile.NamedTemporaryFile() as temp_config:
self.log.debug("loading kube_config from: connection kube_config")
temp_config.write(kubeconfig.encode())
temp_config.flush()
config.load_kube_config(
config_file=temp_config.name, client_configuration=self.client_configuration
)
return client.ApiClient()
self.log.debug("loading kube_config from: default file")
config.load_kube_config(client_configuration=self.client_configuration)
return client.ApiClient()
@cached_property
def api_client(self) -> Any:
"""Cached Kubernetes API client"""
return self.get_conn()
def create_custom_object(
self, group: str, version: str, plural: str, body: Union[str, dict], namespace: Optional[str] = None
):
"""
Creates custom resource definition object in Kubernetes
:param group: api group
:type group: str
:param version: api version
:type version: str
:param plural: api plural
:type plural: str
:param body: crd object definition
:type body: Union[str, dict]
:param namespace: kubernetes namespace
:type namespace: str
"""
api = client.CustomObjectsApi(self.api_client)
if namespace is None:
namespace = self.get_namespace()
if isinstance(body, str):
body = _load_body_to_dict(body)
try:
response = api.create_namespaced_custom_object(
group=group, version=version, namespace=namespace, plural=plural, body=body
)
self.log.debug("Response: %s", response)
return response
except client.rest.ApiException as e:
raise AirflowException("Exception when calling -> create_custom_object: %s\n" % e)
def get_custom_object(
self, group: str, version: str, plural: str, name: str, namespace: Optional[str] = None
):
"""
Get custom resource definition object from Kubernetes
:param group: api group
:type group: str
:param version: api version
:type version: str
:param plural: api plural
:type plural: str
:param name: crd object name
:type name: str
:param namespace: kubernetes namespace
:type namespace: str
"""
api = client.CustomObjectsApi(self.api_client)
if namespace is None:
namespace = self.get_namespace()
try:
response = api.get_namespaced_custom_object(
group=group, version=version, namespace=namespace, plural=plural, name=name
)
return response
except client.rest.ApiException as e:
raise AirflowException("Exception when calling -> get_custom_object: %s\n" % e)
def get_namespace(self) -> str:
"""Returns the namespace that defined in the connection"""
connection = self.get_connection(self.conn_id)
extras = connection.extra_dejson
namespace = extras.get("extra__kubernetes__namespace", "default")
return namespace
def get_pod_log_stream(
self,
pod_name: str,
container: Optional[str] = "",
namespace: Optional[str] = None,
) -> Tuple[watch.Watch, Generator[str, None, None]]:
"""
Retrieves a log stream for a container in a kubernetes pod.
:param pod_name: pod name
:type pod_name: str
:param container: container name
:param namespace: kubernetes namespace
:type namespace: str
"""
api = client.CoreV1Api(self.api_client)
watcher = watch.Watch()
return (
watcher,
watcher.stream(
api.read_namespaced_pod_log,
name=pod_name,
container=container,
namespace=namespace if namespace else self.get_namespace(),
),
)
def get_pod_logs(
self,
pod_name: str,
container: Optional[str] = "",
namespace: Optional[str] = None,
):
"""
Retrieves a container's log from the specified pod.
:param pod_name: pod name
:type pod_name: str
:param container: container name
:param namespace: kubernetes namespace
:type namespace: str
"""
api = client.CoreV1Api(self.api_client)
return api.read_namespaced_pod_log(
name=pod_name,
container=container,
_preload_content=False,
namespace=namespace if namespace else self.get_namespace(),
)
|
from Window import Window
from Logic import Logic
print("Init")
logic = Logic()
win = Window(logic)
win.mainloop()
print("End")
|
import socket
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
sock.bind(('localhost', 8080))
sock.listen(1)
clientsoc, addr = sock.accept()
print('connected:', addr)
message = ''
while True:
clientsoc.sendall(bytes(message + f'Enter two bases and its hight or "exit" to finish the program', "utf-8"))
try:
data = clientsoc.recv(1024)
if not data:
break
if data.decode("utf-8") == "exit":
clientsoc.close()
break
a, b, h = data.decode("utf-8").split(' ')
s = (int(a) + int(b)) / 2 * int(h)
message = f'Square of the figure is {s}; \n'
except KeyboardInterrupt:
clientsoc.close()
break
|
from angr.procedures.stubs.format_parser import FormatParser
from cle.backends.externs.simdata.io_file import io_file_data_for_arch
class fscanf(FormatParser):
#pylint:disable=arguments-differ
def run(self, file_ptr):
# TODO handle errors
fd_offset = io_file_data_for_arch(self.state.arch)['fd']
fd = self.state.mem[file_ptr + fd_offset:].int.resolved
simfd = self.state.posix.get_fd(fd)
if simfd is None:
return -1
fmt_str = self._parse(1)
items = fmt_str.interpret(2, self.arg, simfd=simfd)
return items
|
from xml.etree import ElementTree as ET
from gomatic.mixins import CommonEqualityMixin
def fetch_artifact_src_from(element):
if 'srcfile' in element.attrib:
return FetchArtifactFile(element.attrib['srcfile'])
if 'srcdir' in element.attrib:
return FetchArtifactDir(element.attrib['srcdir'])
raise RuntimeError("Expected srcfile or srcdir. Do not know what src type to use for " + ET.tostring(element, 'utf-8'))
def fetch_properties_from(element):
props = {}
for prop in element.iter('property'):
props[prop.find('key').text] = prop.find('value').text
return props if props else None
class FetchArtifactFile(CommonEqualityMixin):
def __init__(self, src_value):
self.__src_value = src_value
def __repr__(self):
return 'FetchArtifactFile("%s")' % self.__src_value
@property
def as_xml_type_and_value(self):
return "srcfile", self.__src_value
class FetchArtifactDir(CommonEqualityMixin):
def __init__(self, src_value):
self.__src_value = src_value
def __repr__(self):
return 'FetchArtifactDir("%s")' % self.__src_value
@property
def as_xml_type_and_value(self):
return "srcdir", self.__src_value
class Artifact(CommonEqualityMixin):
def __init__(self, src=None, dest=None, id=None, store_id=None, config=None, artifact_type='build'):
self._src = src
self._dest = dest
self._artifact_id = id
self._store_id = store_id
self._config = config
self._type = artifact_type
def __repr__(self):
if self._artifact_id is not None:
if self._config is None:
return '%s("%s", "%s")' % (self.constructor, self._artifact_id, self._store_id)
else:
return '%s("%s", "%s", %s)' % (self.constructor, self._artifact_id, self._store_id, self._config)
if self._dest is None:
return '%s("%s")' % (self.constructor, self._src)
else:
return '%s("%s", "%s")' % (self.constructor, self._src, self._dest)
@property
def constructor(self):
if self._type == "build":
return "BuildArtifact"
if self._type == "test":
return "TestArtifact"
if self._type == "external":
return "ExternalArtifact"
raise RuntimeError("Unknown artifact type %s" % self._type)
def append_to(self, element, gocd_18_3_and_above=False):
if gocd_18_3_and_above:
self._append_to_gocd_18_3_and_above(element)
else:
self._append_to_gocd_18_2_and_below(element)
def _append_to_gocd_18_3_and_above(self, element):
if self._artifact_id is not None:
if self._config is None:
element.append(ET.fromstring('<artifact id="%s" storeId="%s" type="%s" />' % (self._artifact_id, self._store_id, self._type)))
else:
properties_xml = "".join(["<property><key>{}</key><value>{}</value></property>".format(k, str(v or '')) for k, v in self._config.items()])
new_element = ET.fromstring('<artifact id="{}" storeId="{}" type="{}"><configuration>{}</configuration></artifact>'.format(self._artifact_id, self._store_id, self._type, properties_xml))
element.append(new_element)
elif self._dest is None:
element.append(ET.fromstring('<artifact src="%s" type="%s" />' % (self._src, self._type)))
else:
element.append(ET.fromstring('<artifact src="%s" dest="%s" type="%s" />' % (self._src, self._dest, self._type)))
def _append_to_gocd_18_2_and_below(self, element):
if not self._type == 'build' and not self._type == 'test':
raise RuntimeError("Artifact type '%s' not supported in GoCD 18.2 and below" % self._type)
tag = 'artifact' if self._type == 'build' else 'test'
if self._dest is None:
element.append(ET.fromstring('<%s src="%s" />' % (tag, self._src)))
else:
element.append(ET.fromstring('<%s src="%s" dest="%s" />' % (tag, self._src, self._dest)))
@classmethod
def get_artifact_for(cls, element):
src = element.attrib.get('src', None)
dest = element.attrib.get('dest', None)
id = element.attrib.get('id', None)
store_id = element.attrib.get('storeId', None)
artifact_type_attribute = element.attrib.get('type', None)
if id is not None:
return cls(id=id, store_id=store_id, config=fetch_properties_from(element), artifact_type=artifact_type_attribute)
if artifact_type_attribute is None:
_type = 'build' if element.tag == 'artifact' else 'test'
return cls(src=src, dest=dest, artifact_type=_type)
else:
return cls(src=src, dest=dest, artifact_type=artifact_type_attribute)
@classmethod
def get_build_artifact(cls, src, dest=None):
return cls(src=src, dest=dest, artifact_type='build')
@classmethod
def get_test_artifact(cls, src, dest=None):
return cls(src=src, dest=dest, artifact_type='test')
@classmethod
def get_external_artifact(cls, id, store_id, config=None):
return cls(id=id, store_id=store_id, config=config, artifact_type='external')
ArtifactFor = Artifact.get_artifact_for
BuildArtifact = Artifact.get_build_artifact
TestArtifact = Artifact.get_test_artifact
ExternalArtifact = Artifact.get_external_artifact
|
def notebookUI(samplenxs, mtnxs, initdos=None, options=None, load_options_path=None):
import yaml
if options is not None and load_options_path:
raise RuntimeError(
"Both options and load_options_path were set: %s, %s" %(
options, load_options_path)
)
if load_options_path:
with open(load_options_path) as stream:
options = yaml.load(stream)
if options is None:
options = default_options
#
import ipywidgets as widgets
from IPython.display import display
w_mt_fraction = widgets.BoundedFloatText(description="mt_fraction", min=0., max=100., value=options['mt_fraction'])
w_const_bg_fraction = widgets.BoundedFloatText(description="const_bg_fraction", min=0., max=1., value=options.get('const_bg_fraction', 0.0))
w_Emin = widgets.BoundedFloatText(description="Emin", min=-1000., max=0., value=options['Emin'])
w_Emax = widgets.BoundedFloatText(description="Emax", min=0., max=1000., value=options['Emax'])
w_dE = widgets.BoundedFloatText(description="dE", min=0, max=50., value=options['dE'])
w_Qmin = widgets.BoundedFloatText(description="Qmin", min=0, max=50., value=options['Qmin'])
w_Qmax = widgets.BoundedFloatText(description="Qmax", min=0., max=50., value=options['Qmax'])
w_dQ = widgets.BoundedFloatText(description="dQ", min=0, max=5., value=options['dQ'])
w_T = widgets.BoundedFloatText(description="Temperature", min=0., max=5000., value=options['T'])
w_Ecutoff = widgets.BoundedFloatText(description="Max energy of phonons", min=0, max=1000., value=options['Ecutoff'])
w_ElasticPeakMin = widgets.BoundedFloatText(description="Emin of elastic peak", min=-300., max=0., value=options['ElasticPeakMin'])
w_ElasticPeakMax = widgets.BoundedFloatText(description="Emax of elastic peak", min=0., max=300., value=options['ElasticPeakMax'])
w_M = widgets.BoundedFloatText(description="Average atom mass", min=1., max=1000., value=options['M'])
w_C_ms = widgets.BoundedFloatText(description="C_ms", min=0., max=10., value=options['C_ms'])
w_Ei = widgets.BoundedFloatText(description="Ei", min=0, max=2000., value=options['Ei'])
w_workdir = widgets.Text(description="work dir", value=options['workdir'])
update_strategy_weights = options.get('update_strategy_weights', (.5, .5))
w_update_weight_continuity = widgets.BoundedFloatText(
description='"enforce continuity" weight for DOS update strategy',
min=0., max=1., value=update_strategy_weights[0])
w_update_weight_area = widgets.BoundedFloatText(
description='"area conservation" weight for DOS update strategy',
min=0., max=1., value=update_strategy_weights[1])
w_inputs = (
w_mt_fraction, w_const_bg_fraction,
w_Emin, w_Emax, w_dE,
w_Qmin, w_Qmax, w_dQ,
w_T, w_Ecutoff,
w_ElasticPeakMin, w_ElasticPeakMax,
w_M, w_C_ms, w_Ei, w_workdir,
w_update_weight_continuity, w_update_weight_area
)
w_Run = widgets.Button(description="Run")
w_all = w_inputs + (w_Run,)
def submit(b):
# suppress warning from h5py
import warnings
warnings.simplefilter(action = "ignore", category = FutureWarning)
dos_update_weights = _get_dos_update_weights(w_update_weight_continuity.value, w_update_weight_area.value)
#
kargs = dict(
mt_fraction = w_mt_fraction.value,
const_bg_fraction = w_const_bg_fraction.value,
Emin=w_Emin.value, Emax=w_Emax.value, dE=w_dE.value,
Qmin=w_Qmin.value, Qmax=w_Qmax.value, dQ=w_dQ.value,
T=w_T.value, Ecutoff=w_Ecutoff.value,
elastic_E_cutoff=(w_ElasticPeakMin.value, w_ElasticPeakMax.value),
M=w_M.value,
C_ms=w_C_ms.value, Ei=w_Ei.value,
workdir=w_workdir.value,
initdos=initdos,
update_strategy_weights = dos_update_weights,
)
import pprint, os, yaml
# pprint.pprint(samplenxs)
# pprint.pprint(mtnxs)
# pprint.pprint(kargs)
workdir = kargs['workdir']
if not os.path.exists(workdir):
os.makedirs(workdir)
options = dict(kargs)
options['ElasticPeakMin']=w_ElasticPeakMin.value
options['ElasticPeakMax']=w_ElasticPeakMax.value
with open(os.path.join(workdir, 'getdos-opts.yaml'), 'wt') as stream:
yaml.dump(options, stream)
maxiter = 10
close = lambda w: w.close()
list(map(close, w_all))
from ..getdos import getDOS
log_progress(getDOS(samplenxs, mt_nxs=mtnxs, maxiter=maxiter, **kargs), every=1, size=maxiter+2)
return
w_Run.on_click( submit )
display(*w_all)
return
def _get_dos_update_weights(*w):
# w should be all positive
wsum = sum(w)
if wsum <= 0:
N = len(w)
return [1./N]*N
return [t/wsum for t in w]
# modified from https://github.com/alexanderkuk/log-progress
def log_progress(sequence, every=None, size=None):
from ipywidgets import IntProgress, HTML, VBox
from IPython.display import display
is_iterator = False
if size is None:
try:
size = len(sequence)
except TypeError:
is_iterator = True
if size is not None:
if every is None:
if size <= 200:
every = 1
else:
every = int(size / 200) # every 0.5%
else:
assert every is not None, 'sequence is iterator, set every'
if is_iterator:
progress = IntProgress(min=0, max=1, value=1)
progress.bar_style = 'info'
else:
progress = IntProgress(min=0, max=size, value=0)
label = HTML()
box = VBox(children=[label, progress])
display(box)
index = 0
try:
for index, msg in enumerate(sequence, 1):
if index == 1 or index % every == 0:
if is_iterator:
label.value = 'Running: {index} / ?: {msg}...'.format(index=index, msg=msg)
else:
progress.value = index
label.value = 'Running: {index} / {size}: {msg}...'.format(
index=index,
size=size,
msg=msg
)
except:
progress.bar_style = 'danger'
raise
else:
progress.bar_style = 'success'
progress.value = size
# label.value = str(index or '?')
label.value = 'Done.'
default_options = dict(
mt_fraction = 0.9,
const_bg_fraction = 0.,
Emin = -70,
Emax = 70,
dE = 1.,
Qmin = 0.,
Qmax = 14.,
dQ = 0.1,
T = 300.,
Ecutoff = 50.,
ElasticPeakMin = -20,
ElasticPeakMax = 7.,
M = 50.94,
C_ms = 0.3,
Ei = 100.,
workdir = 'work',
)
|
# Copyright 1999-2021 Alibaba Group Holding Ltd.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import warnings
from collections import OrderedDict
import numpy as np
from ... import opcodes
from ...core import Entity, Chunk, CHUNK_TYPE, OutputType, recursive_tile
from ...serialization.serializables import AnyField, StringField
from ..core import IndexValue, DATAFRAME_TYPE, SERIES_TYPE, INDEX_CHUNK_TYPE
from ..operands import DataFrameOperand, DataFrameOperandMixin
from ..utils import parse_index, validate_axis
class DataFrameDrop(DataFrameOperandMixin, DataFrameOperand):
_op_type_ = opcodes.DATAFRAME_DROP
_index = AnyField('index')
_columns = AnyField('columns')
_level = AnyField('level')
_errors = StringField('errors')
def __init__(self, index=None, columns=None, level=None, errors=None, **kw):
super().__init__(_index=index, _columns=columns, _level=level, _errors=errors,
**kw)
@property
def index(self):
return self._index
@property
def columns(self):
return self._columns
@property
def level(self):
return self._level
@property
def errors(self):
return self._errors
def _filter_dtypes(self, dtypes, ignore_errors=False):
if self._columns:
return dtypes.drop(index=self._columns, level=self._level,
errors='ignore' if ignore_errors else self._errors)
else:
return dtypes
def _set_inputs(self, inputs):
super()._set_inputs(inputs)
inputs_iter = iter(self._inputs[1:])
if len(self._inputs) > 1:
self._index = next(inputs_iter)
def __call__(self, df_or_series):
params = df_or_series.params.copy()
shape_list = list(df_or_series.shape)
if self._index is not None:
if isinstance(df_or_series.index_value.value, IndexValue.RangeIndex):
params['index_value'] = parse_index(None, (df_or_series.key, df_or_series.index_value.key))
shape_list[0] = np.nan
if isinstance(df_or_series, DATAFRAME_TYPE):
new_dtypes = self._filter_dtypes(df_or_series.dtypes)
params['columns_value'] = parse_index(new_dtypes.index, store_data=True)
params['dtypes'] = new_dtypes
shape_list[1] = len(new_dtypes)
self.output_types = [OutputType.dataframe]
elif isinstance(df_or_series, SERIES_TYPE):
self.output_types = [OutputType.series]
else:
self.output_types = [OutputType.index]
params['shape'] = tuple(shape_list)
inputs = [df_or_series]
if isinstance(self._index, Entity):
inputs.append(self._index)
return self.new_tileable(inputs, **params)
@classmethod
def tile(cls, op: 'DataFrameDrop'):
inp = op.inputs[0]
out = op.outputs[0]
if len(op.inputs) > 1:
rechunked = yield from recursive_tile(
op.index.rechunk({0: (op.index.shape[0],)}))
index_chunk = rechunked.chunks[0]
else:
index_chunk = op.index
col_to_args = OrderedDict()
chunks = []
for c in inp.chunks:
params = c.params.copy()
if isinstance(inp, DATAFRAME_TYPE):
new_dtypes, new_col_id = col_to_args.get(c.index[1], (None, None))
if new_dtypes is None:
new_col_id = len(col_to_args)
new_dtypes = op._filter_dtypes(c.dtypes, ignore_errors=True)
if len(new_dtypes) == 0:
continue
col_to_args[c.index[1]] = (new_dtypes, new_col_id)
params.update(dict(dtypes=new_dtypes, index=(c.index[0], new_col_id),
index_value=c.index_value,
columns_value=parse_index(new_dtypes.index, store_data=True)))
if op.index is not None:
params.update(dict(shape=(np.nan, len(new_dtypes)),
index_value=parse_index(None, (c.key, c.index_value.key))))
else:
params['shape'] = (c.shape[0], len(new_dtypes))
elif op.index is not None:
params.update(dict(shape=(np.nan,), index_value=parse_index(None, (c.key, c.index_value.key))))
chunk_inputs = [c]
if isinstance(index_chunk, Chunk):
chunk_inputs.append(index_chunk)
new_op = op.copy().reset_key()
new_op._index = index_chunk
chunks.append(new_op.new_chunk(chunk_inputs, **params))
new_op = op.copy().reset_key()
params = out.params.copy()
if op.index is not None:
nsplits_list = [(np.nan,) * inp.chunk_shape[0]]
else:
nsplits_list = [inp.nsplits[0]]
if isinstance(inp, DATAFRAME_TYPE):
nsplits_list.append(tuple(len(dt) for dt, _ in col_to_args.values()))
params.update(dict(chunks=chunks, nsplits=tuple(nsplits_list)))
return new_op.new_tileables(op.inputs, **params)
@classmethod
def execute(cls, ctx, op: 'DataFrameDrop'):
inp = op.inputs[0]
if isinstance(op.index, CHUNK_TYPE):
index_val = ctx[op.index.key]
else:
index_val = op.index
if isinstance(inp, INDEX_CHUNK_TYPE):
ctx[op.outputs[0].key] = ctx[inp.key].drop(index_val, errors='ignore')
else:
ctx[op.outputs[0].key] = ctx[inp.key].drop(
index=index_val, columns=op.columns, level=op.level, errors='ignore')
def _drop(df_or_series, labels=None, axis=0, index=None, columns=None, level=None,
inplace=False, errors='raise'):
axis = validate_axis(axis, df_or_series)
if labels is not None:
if axis == 0:
index = labels
else:
columns = labels
if index is not None and errors == 'raise':
warnings.warn('Errors will not raise for non-existing indices')
if isinstance(columns, Entity):
raise NotImplementedError('Columns cannot be Mars objects')
op = DataFrameDrop(index=index, columns=columns, level=level, errors=errors)
df = op(df_or_series)
if inplace:
df_or_series.data = df.data
else:
return df
def df_drop(df, labels=None, axis=0, index=None, columns=None, level=None,
inplace=False, errors='raise'):
"""
Drop specified labels from rows or columns.
Remove rows or columns by specifying label names and corresponding
axis, or by specifying directly index or column names. When using a
multi-index, labels on different levels can be removed by specifying
the level.
Parameters
----------
labels : single label or list-like
Index or column labels to drop.
axis : {0 or 'index', 1 or 'columns'}, default 0
Whether to drop labels from the index (0 or 'index') or
columns (1 or 'columns').
index : single label or list-like
Alternative to specifying axis (``labels, axis=0``
is equivalent to ``index=labels``).
columns : single label or list-like
Alternative to specifying axis (``labels, axis=1``
is equivalent to ``columns=labels``).
level : int or level name, optional
For MultiIndex, level from which the labels will be removed.
inplace : bool, default False
If True, do operation inplace and return None.
errors : {'ignore', 'raise'}, default 'raise'
If 'ignore', suppress error and only existing labels are
dropped. Note that errors for missing indices will not raise.
Returns
-------
DataFrame
DataFrame without the removed index or column labels.
Raises
------
KeyError
If any of the labels is not found in the selected axis.
See Also
--------
DataFrame.loc : Label-location based indexer for selection by label.
DataFrame.dropna : Return DataFrame with labels on given axis omitted
where (all or any) data are missing.
DataFrame.drop_duplicates : Return DataFrame with duplicate rows
removed, optionally only considering certain columns.
Series.drop : Return Series with specified index labels removed.
Examples
--------
>>> import numpy as np
>>> import pandas as pd
>>> import mars.dataframe as md
>>> df = md.DataFrame(np.arange(12).reshape(3, 4),
... columns=['A', 'B', 'C', 'D'])
>>> df.execute()
A B C D
0 0 1 2 3
1 4 5 6 7
2 8 9 10 11
Drop columns
>>> df.drop(['B', 'C'], axis=1).execute()
A D
0 0 3
1 4 7
2 8 11
>>> df.drop(columns=['B', 'C']).execute()
A D
0 0 3
1 4 7
2 8 11
Drop a row by index
>>> df.drop([0, 1]).execute()
A B C D
2 8 9 10 11
Drop columns and/or rows of MultiIndex DataFrame
>>> midx = pd.MultiIndex(levels=[['lama', 'cow', 'falcon'],
... ['speed', 'weight', 'length']],
... codes=[[0, 0, 0, 1, 1, 1, 2, 2, 2],
... [0, 1, 2, 0, 1, 2, 0, 1, 2]])
>>> df = md.DataFrame(index=midx, columns=['big', 'small'],
... data=[[45, 30], [200, 100], [1.5, 1], [30, 20],
... [250, 150], [1.5, 0.8], [320, 250],
... [1, 0.8], [0.3, 0.2]])
>>> df.execute()
big small
lama speed 45.0 30.0
weight 200.0 100.0
length 1.5 1.0
cow speed 30.0 20.0
weight 250.0 150.0
length 1.5 0.8
falcon speed 320.0 250.0
weight 1.0 0.8
length 0.3 0.2
>>> df.drop(index='cow', columns='small').execute()
big
lama speed 45.0
weight 200.0
length 1.5
falcon speed 320.0
weight 1.0
length 0.3
>>> df.drop(index='length', level=1).execute()
big small
lama speed 45.0 30.0
weight 200.0 100.0
cow speed 30.0 20.0
weight 250.0 150.0
falcon speed 320.0 250.0
weight 1.0 0.8
"""
return _drop(df, labels=labels, axis=axis, index=index, columns=columns,
level=level, inplace=inplace, errors=errors)
def df_pop(df, item):
"""
Return item and drop from frame. Raise KeyError if not found.
Parameters
----------
item : str
Label of column to be popped.
Returns
-------
Series
Examples
--------
>>> import numpy as np
>>> import mars.dataframe as md
>>> df = md.DataFrame([('falcon', 'bird', 389.0),
... ('parrot', 'bird', 24.0),
... ('lion', 'mammal', 80.5),
... ('monkey', 'mammal', np.nan)],
... columns=('name', 'class', 'max_speed'))
>>> df.execute()
name class max_speed
0 falcon bird 389.0
1 parrot bird 24.0
2 lion mammal 80.5
3 monkey mammal NaN
>>> df.pop('class').execute()
0 bird
1 bird
2 mammal
3 mammal
Name: class, dtype: object
>>> df.execute()
name max_speed
0 falcon 389.0
1 parrot 24.0
2 lion 80.5
3 monkey NaN
"""
series = df.data[item]
df_drop(df, item, axis=1, inplace=True)
return series
def series_drop(series, labels=None, axis=0, index=None, columns=None, level=None,
inplace=False, errors='raise'):
"""
Return Series with specified index labels removed.
Remove elements of a Series based on specifying the index labels.
When using a multi-index, labels on different levels can be removed
by specifying the level.
Parameters
----------
labels : single label or list-like
Index labels to drop.
axis : 0, default 0
Redundant for application on Series.
index : single label or list-like
Redundant for application on Series, but 'index' can be used instead
of 'labels'.
.. versionadded:: 0.21.0
columns : single label or list-like
No change is made to the Series; use 'index' or 'labels' instead.
.. versionadded:: 0.21.0
level : int or level name, optional
For MultiIndex, level for which the labels will be removed.
inplace : bool, default False
If True, do operation inplace and return None.
errors : {'ignore', 'raise'}, default 'raise'
Note that this argument is kept only for compatibility, and errors
will not raise even if ``errors=='raise'``.
Returns
-------
Series
Series with specified index labels removed.
Raises
------
KeyError
If none of the labels are found in the index.
See Also
--------
Series.reindex : Return only specified index labels of Series.
Series.dropna : Return series without null values.
Series.drop_duplicates : Return Series with duplicate values removed.
DataFrame.drop : Drop specified labels from rows or columns.
Examples
--------
>>> import numpy as np
>>> import pandas as pd
>>> import mars.dataframe as md
>>> s = md.Series(data=np.arange(3), index=['A', 'B', 'C'])
>>> s.execute()
A 0
B 1
C 2
dtype: int64
Drop labels B en C
>>> s.drop(labels=['B', 'C']).execute()
A 0
dtype: int64
Drop 2nd level label in MultiIndex Series
>>> midx = pd.MultiIndex(levels=[['lama', 'cow', 'falcon'],
... ['speed', 'weight', 'length']],
... codes=[[0, 0, 0, 1, 1, 1, 2, 2, 2],
... [0, 1, 2, 0, 1, 2, 0, 1, 2]])
>>> s = md.Series([45, 200, 1.2, 30, 250, 1.5, 320, 1, 0.3],
... index=midx)
>>> s.execute()
lama speed 45.0
weight 200.0
length 1.2
cow speed 30.0
weight 250.0
length 1.5
falcon speed 320.0
weight 1.0
length 0.3
dtype: float64
>>> s.drop(labels='weight', level=1).execute()
lama speed 45.0
length 1.2
cow speed 30.0
length 1.5
falcon speed 320.0
length 0.3
dtype: float64
"""
return _drop(series, labels=labels, axis=axis, index=index, columns=columns,
level=level, inplace=inplace, errors=errors)
def index_drop(index, labels, errors='raise'):
"""
Make new Index with passed list of labels deleted.
Parameters
----------
labels : array-like
errors : {'ignore', 'raise'}, default 'raise'
Note that this argument is kept only for compatibility, and errors
will not raise even if ``errors=='raise'``.
Returns
-------
dropped : Index
Raises
------
KeyError
If not all of the labels are found in the selected axis
"""
return _drop(index, labels=labels, errors=errors)
|
def Articles():
return [
{
'id': 1,
'title': 'Article 1',
'body': 'Body of first article',
'author': 'Tom Daley',
'create_date': '07-28-2019'
},
{
'id': 2,
'title': 'Article 2',
'body': 'Body of second article',
'author': 'Ava Daley',
'create_date': '07-28-2019'
},
{
'id': 3,
'title': 'Article 3',
'body': 'Body of third article',
'author': 'Marissa Daley',
'create_date': '07-28-2019'
}
]
|
import boto3
import botocore
class S3:
def __init__(self, key, secret, bucket):
self.Key = key
self.Secret = secret
self.Bucket = bucket
return
def upload_file(self, local_file, remote_file):
s3 = boto3.resource(
's3',
aws_access_key_id=self.Key,
aws_secret_access_key=self.Secret)
try:
s3.Bucket(self.Bucket).upload_file(local_file, remote_file)
except botocore.exceptions.ClientError as e:
if e.response['Error']['Code'] == "404":
print("The object does not exist.")
else:
raise
def download_file(self, remote_file, local_file):
s3 = boto3.resource('s3')
try:
s3.Bucket(self.Bucket).download_file(remote_file, local_file)
except botocore.exceptions.ClientError as e:
if e.response['Error']['Code'] == "404":
print("The object does not exist.")
else:
raise
|
# Copyright 2013-2020 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
import os.path
import shutil
import sys
import tempfile
import spack.util.environment
class Octave(AutotoolsPackage, GNUMirrorPackage):
"""GNU Octave is a high-level language, primarily intended for numerical
computations.
It provides a convenient command line interface for solving linear and
nonlinear problems numerically, and for performing other numerical
experiments using a language that is mostly compatible with Matlab.
It may also be used as a batch-oriented language.
"""
homepage = "https://www.gnu.org/software/octave/"
gnu_mirror_path = "octave/octave-4.0.0.tar.gz"
maintainers = ['mtmiller']
extendable = True
version('5.1.0', sha256='e36b1124cac27c7caa51cc57de408c31676d5f0096349b4d50b57bfe1bcd7495')
version('4.4.1', sha256='09fbd0f212f4ef21e53f1d9c41cf30ce3d7f9450fb44911601e21ed64c67ae97')
version('4.4.0', sha256='72f846379fcec7e813d46adcbacd069d72c4f4d8f6003bcd92c3513aafcd6e96')
version('4.2.2', sha256='77b84395d8e7728a1ab223058fe5e92dc38c03bc13f7358e6533aab36f76726e')
version('4.2.1', sha256='80c28f6398576b50faca0e602defb9598d6f7308b0903724442c2a35a605333b')
version('4.2.0', sha256='443ba73782f3531c94bcf016f2f0362a58e186ddb8269af7dcce973562795567')
version('4.0.2', sha256='39cd8fd36c218fc00adace28d74a6c7c9c6faab7113a5ba3c4372324c755bdc1')
version('4.0.0', sha256='4c7ee0957f5dd877e3feb9dfe07ad5f39b311f9373932f0d2a289dc97cca3280')
# patches
# see https://savannah.gnu.org/bugs/?50234
patch('patch_4.2.1_inline.diff', when='@4.2.1')
# Variants
variant('readline', default=True)
variant('arpack', default=False)
variant('curl', default=False)
variant('fftw', default=False)
variant('fltk', default=False)
variant('fontconfig', default=False)
variant('freetype', default=False)
variant('glpk', default=False)
variant('gl2ps', default=False)
variant('gnuplot', default=False)
variant('magick', default=False)
variant('hdf5', default=False)
variant('jdk', default=False)
variant('llvm', default=False)
variant('opengl', default=False)
variant('qhull', default=False)
variant('qrupdate', default=False)
variant('qscintilla', default=False)
variant('qt', default=False)
variant('suitesparse', default=False)
variant('zlib', default=False)
# Required dependencies
depends_on('blas')
depends_on('lapack')
# Octave does not configure with sed from darwin:
depends_on('sed', when=sys.platform == 'darwin', type='build')
depends_on('pcre')
depends_on('pkgconfig', type='build')
# Strongly recommended dependencies
depends_on('readline', when='+readline')
# Optional dependencies
depends_on('arpack-ng', when='+arpack')
depends_on('curl', when='+curl')
depends_on('fftw', when='+fftw')
depends_on('fltk', when='+fltk')
depends_on('fontconfig', when='+fontconfig')
depends_on('freetype', when='+freetype')
depends_on('glpk', when='+glpk')
depends_on('gl2ps', when='+gl2ps')
depends_on('gnuplot', when='+gnuplot')
depends_on('imagemagick', when='+magick')
depends_on('hdf5', when='+hdf5')
depends_on('java', when='+jdk') # TODO: requires Java 6 ?
depends_on('llvm', when='+llvm')
# depends_on('opengl', when='+opengl') # TODO: add package
depends_on('qhull', when='+qhull')
depends_on('qrupdate', when='+qrupdate')
# depends_on('qscintilla', when='+qscintilla) # TODO: add package
depends_on('qt+opengl', when='+qt')
depends_on('suite-sparse', when='+suitesparse')
depends_on('zlib', when='+zlib')
def patch(self):
# Filter mkoctfile.in.cc to use underlying compilers and not
# Spack compiler wrappers. We are patching the template file
# and not mkoctfile.cc since the latter is generated as part
# of the build.
mkoctfile_in = os.path.join(
self.stage.source_path, 'src', 'mkoctfile.in.cc'
)
quote = lambda s: '"' + s + '"'
entries_to_patch = {
r'%OCTAVE_CONF_MKOCTFILE_CC%': quote(self.compiler.cc),
r'%OCTAVE_CONF_MKOCTFILE_CXX%': quote(self.compiler.cxx),
r'%OCTAVE_CONF_MKOCTFILE_F77%': quote(self.compiler.f77),
r'%OCTAVE_CONF_MKOCTFILE_DL_LD%': quote(self.compiler.cxx),
r'%OCTAVE_CONF_MKOCTFILE_LD_CXX%': quote(self.compiler.cxx)
}
for pattern, subst in entries_to_patch.items():
filter_file(pattern, subst, mkoctfile_in)
@run_after('install')
@on_package_attributes(run_tests=True)
def check_mkoctfile_works_outside_of_build_env(self):
# Check that mkoctfile is properly configured and can compile
# Octave extensions outside of the build env
mkoctfile = Executable(os.path.join(self.prefix, 'bin', 'mkoctfile'))
helloworld_cc = os.path.join(
os.path.dirname(__file__), 'helloworld.cc'
)
tmp_dir = tempfile.mkdtemp()
shutil.copy(helloworld_cc, tmp_dir)
# We need to unset these variables since we are still within
# Spack's build environment when running tests
vars_to_unset = ['CC', 'CXX', 'F77', 'FC']
with spack.util.environment.preserve_environment(*vars_to_unset):
# Delete temporarily the environment variables that point
# to Spack compiler wrappers
for v in vars_to_unset:
del os.environ[v]
# Check that mkoctfile outputs the expected value for CC
cc = mkoctfile('-p', 'CC', output=str)
msg = "mkoctfile didn't output the expected CC compiler"
assert self.compiler.cc in cc, msg
# Try to compile an Octave extension
shutil.copy(helloworld_cc, tmp_dir)
with working_dir(tmp_dir):
mkoctfile('helloworld.cc')
def configure_args(self):
# See
# https://github.com/macports/macports-ports/blob/master/math/octave/
# https://github.com/Homebrew/homebrew-science/blob/master/octave.rb
spec = self.spec
config_args = []
# Required dependencies
config_args.extend([
"--with-blas=%s" % spec['blas'].libs.ld_flags,
"--with-lapack=%s" % spec['lapack'].libs.ld_flags
])
# Strongly recommended dependencies
if '+readline' in spec:
config_args.append('--enable-readline')
else:
config_args.append('--disable-readline')
# Optional dependencies
if '+arpack' in spec:
sa = spec['arpack-ng']
config_args.extend([
"--with-arpack-includedir=%s" % sa.prefix.include,
"--with-arpack-libdir=%s" % sa.prefix.lib
])
else:
config_args.append("--without-arpack")
if '+curl' in spec:
config_args.extend([
"--with-curl-includedir=%s" % spec['curl'].prefix.include,
"--with-curl-libdir=%s" % spec['curl'].prefix.lib
])
else:
config_args.append("--without-curl")
if '+fftw' in spec:
config_args.extend([
"--with-fftw3-includedir=%s" % spec['fftw'].prefix.include,
"--with-fftw3-libdir=%s" % spec['fftw'].prefix.lib,
"--with-fftw3f-includedir=%s" % spec['fftw'].prefix.include,
"--with-fftw3f-libdir=%s" % spec['fftw'].prefix.lib
])
else:
config_args.extend([
"--without-fftw3",
"--without-fftw3f"
])
if '+fltk' in spec:
config_args.extend([
"--with-fltk-prefix=%s" % spec['fltk'].prefix,
"--with-fltk-exec-prefix=%s" % spec['fltk'].prefix
])
else:
config_args.append("--without-fltk")
if '+glpk' in spec:
config_args.extend([
"--with-glpk-includedir=%s" % spec['glpk'].prefix.include,
"--with-glpk-libdir=%s" % spec['glpk'].prefix.lib
])
else:
config_args.append("--without-glpk")
if '+magick' in spec:
config_args.append("--with-magick=%s"
% spec['imagemagick'].prefix.lib)
else:
config_args.append("--without-magick")
if '+hdf5' in spec:
config_args.extend([
"--with-hdf5-includedir=%s" % spec['hdf5'].prefix.include,
"--with-hdf5-libdir=%s" % spec['hdf5'].prefix.lib
])
else:
config_args.append("--without-hdf5")
if '+jdk' in spec:
config_args.extend([
"--with-java-homedir=%s" % spec['java'].home,
"--with-java-includedir=%s" % spec['java'].home.include,
"--with-java-libdir=%s" % spec['java'].libs.directories[0]
])
else:
config_args.append("--disable-java")
if '~opengl' in spec:
config_args.extend([
"--without-opengl",
"--without-framework-opengl"
])
# TODO: opengl dependency and package is missing?
if '+qhull' in spec:
config_args.extend([
"--with-qhull-includedir=%s" % spec['qhull'].prefix.include,
"--with-qhull-libdir=%s" % spec['qhull'].prefix.lib
])
else:
config_args.append("--without-qhull")
if '+qrupdate' in spec:
config_args.extend([
"--with-qrupdate-includedir=%s"
% spec['qrupdate'].prefix.include,
"--with-qrupdate-libdir=%s" % spec['qrupdate'].prefix.lib
])
else:
config_args.append("--without-qrupdate")
if '+zlib' in spec:
config_args.extend([
"--with-z-includedir=%s" % spec['zlib'].prefix.include,
"--with-z-libdir=%s" % spec['zlib'].prefix.lib
])
else:
config_args.append("--without-z")
return config_args
# ========================================================================
# Set up environment to make install easy for Octave extensions.
# ========================================================================
def setup_dependent_package(self, module, dependent_spec):
"""Called before Octave modules' install() methods.
In most cases, extensions will only need to have one line:
octave('--eval', 'pkg install %s' % self.stage.archive_file)
"""
# Octave extension builds can have a global Octave executable function
module.octave = Executable(join_path(self.spec.prefix.bin, 'octave'))
|
"""
WSGI config for travellog project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/2.0/howto/deployment/wsgi/
"""
import os
from django.core.wsgi import get_wsgi_application
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "travellog.settings")
application = get_wsgi_application()
|
from .net_stream_interface import INetStream
class NetStream(INetStream):
def __init__(self, muxed_stream):
self.muxed_stream = muxed_stream
self.mplex_conn = muxed_stream.mplex_conn
self.protocol_id = None
def get_protocol(self):
"""
:return: protocol id that stream runs on
"""
return self.protocol_id
def set_protocol(self, protocol_id):
"""
:param protocol_id: protocol id that stream runs on
:return: true if successful
"""
self.protocol_id = protocol_id
async def read(self):
"""
read from stream
:return: bytes of input until EOF
"""
return await self.muxed_stream.read()
async def write(self, data):
"""
write to stream
:return: number of bytes written
"""
return await self.muxed_stream.write(data)
async def close(self):
"""
close stream
:return: true if successful
"""
await self.muxed_stream.close()
return True
|
""" common utilities """
import itertools
import numpy as np
from pandas import (
DataFrame,
Float64Index,
MultiIndex,
Series,
UInt64Index,
date_range,
)
import pandas._testing as tm
def _mklbl(prefix, n):
return [f"{prefix}{i}" for i in range(n)]
def _axify(obj, key, axis):
# create a tuple accessor
axes = [slice(None)] * obj.ndim
axes[axis] = key
return tuple(axes)
class Base:
""" indexing comprehensive base class """
_kinds = {"series", "frame"}
_typs = {
"ints",
"uints",
"labels",
"mixed",
"ts",
"floats",
"empty",
"ts_rev",
"multi",
}
def setup_method(self, method):
self.series_ints = Series(np.random.rand(4), index=np.arange(0, 8, 2))
self.frame_ints = DataFrame(
np.random.randn(4, 4), index=np.arange(0, 8, 2), columns=np.arange(0, 12, 3)
)
self.series_uints = Series(
np.random.rand(4), index=UInt64Index(np.arange(0, 8, 2))
)
self.frame_uints = DataFrame(
np.random.randn(4, 4),
index=UInt64Index(range(0, 8, 2)),
columns=UInt64Index(range(0, 12, 3)),
)
self.series_floats = Series(
np.random.rand(4), index=Float64Index(range(0, 8, 2))
)
self.frame_floats = DataFrame(
np.random.randn(4, 4),
index=Float64Index(range(0, 8, 2)),
columns=Float64Index(range(0, 12, 3)),
)
m_idces = [
MultiIndex.from_product([[1, 2], [3, 4]]),
MultiIndex.from_product([[5, 6], [7, 8]]),
MultiIndex.from_product([[9, 10], [11, 12]]),
]
self.series_multi = Series(np.random.rand(4), index=m_idces[0])
self.frame_multi = DataFrame(
np.random.randn(4, 4), index=m_idces[0], columns=m_idces[1]
)
self.series_labels = Series(np.random.randn(4), index=list("abcd"))
self.frame_labels = DataFrame(
np.random.randn(4, 4), index=list("abcd"), columns=list("ABCD")
)
self.series_mixed = Series(np.random.randn(4), index=[2, 4, "null", 8])
self.frame_mixed = DataFrame(np.random.randn(4, 4), index=[2, 4, "null", 8])
self.series_ts = Series(
np.random.randn(4), index=date_range("20130101", periods=4)
)
self.frame_ts = DataFrame(
np.random.randn(4, 4), index=date_range("20130101", periods=4)
)
dates_rev = date_range("20130101", periods=4).sort_values(ascending=False)
self.series_ts_rev = Series(np.random.randn(4), index=dates_rev)
self.frame_ts_rev = DataFrame(np.random.randn(4, 4), index=dates_rev)
self.frame_empty = DataFrame()
self.series_empty = Series(dtype=object)
# form agglomerates
for kind in self._kinds:
d = {}
for typ in self._typs:
d[typ] = getattr(self, f"{kind}_{typ}")
setattr(self, kind, d)
def generate_indices(self, f, values=False):
"""
generate the indices
if values is True , use the axis values
is False, use the range
"""
axes = f.axes
if values:
axes = (list(range(len(ax))) for ax in axes)
return itertools.product(*axes)
def get_value(self, name, f, i, values=False):
""" return the value for the location i """
# check against values
if values:
return f.values[i]
elif name == "iat":
return f.iloc[i]
else:
assert name == "at"
return f.loc[i]
def check_values(self, f, func, values=False):
if f is None:
return
axes = f.axes
indicies = itertools.product(*axes)
for i in indicies:
result = getattr(f, func)[i]
# check against values
if values:
expected = f.values[i]
else:
expected = f
for a in reversed(i):
expected = expected.__getitem__(a)
tm.assert_almost_equal(result, expected)
def check_result(self, method, key, typs=None, axes=None, fails=None):
def _eq(axis, obj, key):
""" compare equal for these 2 keys """
axified = _axify(obj, key, axis)
try:
getattr(obj, method).__getitem__(axified)
except (IndexError, TypeError, KeyError) as detail:
# if we are in fails, the ok, otherwise raise it
if fails is not None:
if isinstance(detail, fails):
return
raise
if typs is None:
typs = self._typs
if axes is None:
axes = [0, 1]
else:
assert axes in [0, 1]
axes = [axes]
# check
for kind in self._kinds:
d = getattr(self, kind)
for ax in axes:
for typ in typs:
assert typ in self._typs
obj = d[typ]
if ax < obj.ndim:
_eq(axis=ax, obj=obj, key=key)
|
def CalculateApacheIpHits(logfile_pathname):
# make a dictionary to store Ip's and their hit counts and read the
# contents of the logfile line by line
IpHitListing = {}
Contents = open(logfile_pathname, "r").readlines()
# go through each line of the logfile
for line in Contents:
#split the string to isolate the ip
Ip = line.split(" ")[0]
# ensure length of the ip is proper: see discussion
if 6 < len(Ip) < 15:
# Increase by 1 if ip exists else hit count = 1
IpHitListing[Ip] = IpHitListing.get(Ip, 0) + 1
return IpHitListing
# example usage
HitsDictionary = CalculateApacheIpHits("/usr/local/nusphere/apache/logs/access_log")
print HitsDictionary["127.0.0.1"]
|
from .base import BaseGrader
class SimpleAI(BaseGrader):
def grade(self, submission, score):
try:
points = int(submission.text)
except ValueError:
points = 0
submission.points = points
submission.is_graded = True
submission.save()
score.points = points
score.save()
submission.competitor.update_total_score()
|
# -------------------------------------------------------------------- #
# This example was designed to show the project-level optimization
# option in GIAMS. This example was used in the original paper as well
# -------------------------------------------------------------------- #
import time
import ast
from Network import IndianaNetwork
from LifeCycleAnalyzer.Simulators import MainSimulator
from LifeCycleAnalyzer import LCA
from Optimizer import HillClimbing
from Optimizer import BruteForce
from Optimizer import GA
from Optimizer import IUC
from Optimizer import PSO
from utils.PredictiveModels.Linear import Linear
from utils.AwesomeTimeIt import timeit
from utils.GeneralSettings import *
class GeneralSettings:
n_elements = 1
n_states = 8
dt = 2
horizon = 20
discount_rate = 0.03
init_year = 0
n_steps = int(horizon/dt)
def lca_instance():
# Creating the settings instance
settings = GeneralSettings()
# Creating the network
session_name = 'IndianaSHM'
mynetwork = DummySHMNetwork(file_name = "INDIANA2019",
settings = settings,
n_assets = 1,
is_deck = False,
is_superstructure = True,
is_substructure = False)
mynetwork.load_network()
mynetwork.set_current_budget_limit(100000)
mynetwork.set_budget_limit_model(Linear(X0 = 100000, drift = 0, settings = settings))
mynetwork.set_npv_budget_limit(10000)
# Creating the simulator
simulator = MainSimulator(settings = settings)
# shaping the main LCA
lca = LCA(lca_name = session_name,
settings = settings,
network = mynetwork,
simulator = simulator,
random = True,
is_hazard = True,
n_simulations = 10,
should_report = True)
return lca
def obj_func(**kwargs):
return kwargs['Utility'] / kwargs['UserCost'] ** 0.2
def GA_test():
optimizer = GA(lca_instance)
optimizer.set_hyperparameters(crossver_prob = 0.75,
mutation_prob = 0.03,
population_size = 200,
n_generations = 200,
n_elites = 5,
optimzition_type = 'max',
n_jobs = 1)
# optimizer.optimize(rounds = 3)
optimizer.validate()
if __name__ == "__main__":
example1()
GA_test(lca_instance)
|
# -*- coding: utf-8 -*-
# Part of Odoo. See LICENSE file for full copyright and licensing details.
from odoo.tests import common, Form
from odoo.tools import mute_logger
class TestDropship(common.TransactionCase):
def test_change_qty(self):
# enable the dropship and MTO route on the product
prod = self.env.ref('product.product_product_8')
dropshipping_route = self.env.ref('stock_dropshipping.route_drop_shipping')
mto_route = self.env.ref('stock.route_warehouse0_mto')
prod.write({'route_ids': [(6, 0, [dropshipping_route.id, mto_route.id])]})
# add a vendor
vendor1 = self.env['res.partner'].create({'name': 'vendor1'})
seller1 = self.env['product.supplierinfo'].create({
'name': vendor1.id,
'price': 8,
})
prod.write({'seller_ids': [(6, 0, [seller1.id])]})
# sell one unit of this product
cust = self.env['res.partner'].create({'name': 'customer1'})
so = self.env['sale.order'].create({
'partner_id': cust.id,
'partner_invoice_id': cust.id,
'partner_shipping_id': cust.id,
'order_line': [(0, 0, {
'name': prod.name,
'product_id': prod.id,
'product_uom_qty': 1.00,
'product_uom': prod.uom_id.id,
'price_unit': 12,
})],
'pricelist_id': self.env.ref('product.list0').id,
'picking_policy': 'direct',
})
so.action_confirm()
po = self.env['purchase.order'].search([('group_id', '=', so.procurement_group_id.id)])
po_line = po.order_line
# Check the qty on the P0
self.assertAlmostEqual(po_line.product_qty, 1.00)
# Update qty on SO and check PO
so.order_line.product_uom_qty = 2.00
self.assertAlmostEqual(po_line.product_qty, 2.00)
# Create a new so line
sol2 = self.env['sale.order.line'].create({
'order_id': so.id,
'name': prod.name,
'product_id': prod.id,
'product_uom_qty': 3.00,
'product_uom': prod.uom_id.id,
'price_unit': 12,
})
# there is a new line
pol2 = po.order_line - po_line
# the first line is unchanged
self.assertAlmostEqual(po_line.product_qty, 2.00)
# the new line matches the new line on the so
self.assertAlmostEqual(pol2.product_qty, sol2.product_uom_qty)
def test_00_dropship(self):
# Create a vendor
supplier_dropship = self.env['res.partner'].create({'name': 'Vendor of Dropshipping test'})
# Create new product without any routes
drop_shop_product = self.env['product.product'].create({
'name': "Pen drive",
'type': "product",
'categ_id': self.env.ref('product.product_category_1').id,
'lst_price': 100.0,
'standard_price': 0.0,
'uom_id': self.env.ref('uom.product_uom_unit').id,
'uom_po_id': self.env.ref('uom.product_uom_unit').id,
'seller_ids': [(0, 0, {
'delay': 1,
'name': supplier_dropship.id,
'min_qty': 2.0
})]
})
# Create a sales order with a line of 200 PCE incoming shipment, with route_id drop shipping
so_form = Form(self.env['sale.order'])
so_form.partner_id = self.env.ref('base.res_partner_2')
so_form.payment_term_id = self.env.ref('account.account_payment_term')
with mute_logger('odoo.tests.common.onchange'):
# otherwise complains that there's not enough inventory and
# apparently that's normal according to @jco and @sle
with so_form.order_line.new() as line:
line.product_id = drop_shop_product
line.product_uom_qty = 200
line.price_unit = 1.00
line.route_id = self.env.ref('stock_dropshipping.route_drop_shipping')
sale_order_drp_shpng = so_form.save()
# Confirm sales order
sale_order_drp_shpng.action_confirm()
# Check the sales order created a procurement group which has a procurement of 200 pieces
self.assertTrue(sale_order_drp_shpng.procurement_group_id, 'SO should have procurement group')
# Check a quotation was created to a certain vendor and confirm so it becomes a confirmed purchase order
purchase = self.env['purchase.order'].search([('partner_id', '=', supplier_dropship.id)])
self.assertTrue(purchase, "an RFQ should have been created by the scheduler")
purchase.button_confirm()
self.assertEquals(purchase.state, 'purchase', 'Purchase order should be in the approved state')
self.assertEquals(len(purchase.ids), 1, 'There should be one picking')
# Send the 200 pieces
purchase.picking_ids.move_lines.quantity_done = purchase.picking_ids.move_lines.product_qty
purchase.picking_ids.button_validate()
# Check one move line was created in Customers location with 200 pieces
move_line = self.env['stock.move.line'].search([
('location_dest_id', '=', self.env.ref('stock.stock_location_customers').id),
('product_id', '=', drop_shop_product.id)])
self.assertEquals(len(move_line.ids), 1, 'There should be exactly one move line')
|
from django.conf.urls import patterns, include, url
from django.contrib import admin
admin.autodiscover()
urlpatterns = patterns(
'',
url(r'^admin/', include(admin.site.urls)),
url(r'^accounts/', include('imagr_users.urls'))
)
|
import argparse
import os
import numpy as np
import math
import itertools
import torchvision.transforms as transforms
from torchvision.utils import save_image
from torch.utils.data import DataLoader
from torchvision import datasets
from torch.autograd import Variable
from mnistm import MNISTM
import torch.nn as nn
import torch.nn.functional as F
import torch
os.makedirs('images', exist_ok=True)
parser = argparse.ArgumentParser()
parser.add_argument('--n_epochs', type=int, default=200, help='number of epochs of training')
parser.add_argument('--batch_size', type=int, default=64, help='size of the batches')
parser.add_argument('--lr', type=float, default=0.0002, help='adam: learning rate')
parser.add_argument('--b1', type=float, default=0.5, help='adam: decay of first order momentum of gradient')
parser.add_argument('--b2', type=float, default=0.999, help='adam: decay of first order momentum of gradient')
parser.add_argument('--n_cpu', type=int, default=8, help='number of cpu threads to use during batch generation')
parser.add_argument('--n_residual_blocks', type=int, default=1, help='number of residual blocks in generator')
parser.add_argument('--latent_dim', type=int, default=10, help='dimensionality of the noise input')
parser.add_argument('--img_size', type=int, default=32, help='size of each image dimension')
parser.add_argument('--channels', type=int, default=3, help='number of image channels')
parser.add_argument('--n_classes', type=int, default=10, help='number of classes in the dataset')
parser.add_argument('--sample_interval', type=int, default=300, help='interval betwen image samples')
opt = parser.parse_args()
print(opt)
# Calculate output of image discriminator (PatchGAN)
patch = int(opt.img_size / 2**4)
patch = (1, patch, patch)
cuda = True if torch.cuda.is_available() else False
def weights_init_normal(m):
classname = m.__class__.__name__
print("classname : {}".format(classname))
if classname.find('Conv') != -1:
torch.nn.init.normal_(m.weight.data, 0.0, 0.02)
elif classname.find('BatchNorm') != -1:
torch.nn.init.normal_(m.weight.data, 1.0, 0.02)
torch.nn.init.constant_(m.bias.data, 0.0)
class ResidualBlock_back(nn.Module):
def __init__(self, in_features=64, out_features=64):
super(ResidualBlock, self).__init__()
self.block = nn.Sequential(
nn.Conv2d(in_features, in_features, 3, 1, 1),
nn.BatchNorm2d(in_features),
nn.ReLU(inplace=True),
nn.Conv2d(in_features, in_features, 3, 1, 1),
nn.BatchNorm2d(in_features)
)
def forward(self, x):
return x + self.block(x)
class ResidualBlock(nn.Module):
def __init__(self, in_features=64, out_features=64):
super(ResidualBlock, self).__init__()
# calculate same padding:
# (w - k + 2*p)/s + 1 = o
# => p = (s(o-1) - w + k)/2
(2(128-1)-64 +3)/2
### ENCODER
self.encode_block = nn.Sequential(
nn.Conv2d(in_channels=1*in_features,out_channels=2*in_features,kernel_size=(3, 3),stride=(2, 2),padding=0),
nn.BatchNorm2d(2*in_features),
nn.LeakyReLU(inplace=True),
nn.Conv2d(in_channels=2*in_features,out_channels=4*in_features,kernel_size=(3, 3),stride=(2, 2),padding=2),
nn.BatchNorm2d(4*in_features),
nn.LeakyReLU(inplace=True)
)
print("self.encode_block : {}".format(self.encode_block))
self.decode_block = nn.Sequential(
nn.ConvTranspose2d(in_channels=4*in_features,out_channels=2*in_features,kernel_size=(3, 3),stride=(2, 2), padding=2),
nn.BatchNorm2d(2*in_features),
nn.LeakyReLU(inplace=True),
nn.ConvTranspose2d(in_channels=2*in_features,out_channels=1*in_features,kernel_size=(3, 3),stride=(2, 2),padding=0),
nn.BatchNorm2d(1*in_features),
nn.LeakyReLU(inplace=True)
)
print("self.decode_block : {}".format(self.decode_block))
def forward(self, x):
encode_x = self.encode_block(x)
decode_x = self.decode_block(encode_x)
# decode_x = decode_x[:, :, :-1, :-1]
# decode_x = F.sigmoid(decode_x)
return x + decode_x
class Generator(nn.Module):
def __init__(self):
super(Generator, self).__init__()
# Fully-connected layer which constructs image channel shaped output from noise
self.fc = nn.Linear(opt.latent_dim, opt.channels*opt.img_size**2)
self.l1 = nn.Sequential(nn.Conv2d(opt.channels*2, 64, 3, 1, 1), nn.ReLU(inplace=True))
resblocks = []
for _ in range(opt.n_residual_blocks):
# resblocks.append(ResidualBlock())
resblocks.append(ResidualBlock())
self.resblocks = nn.Sequential(*resblocks)
self.l2 = nn.Sequential(nn.Conv2d(64, opt.channels, 3, 1, 1), nn.Tanh())
def forward(self, img, z):
gen_input = torch.cat((img, self.fc(z).view(*img.shape)), 1)
out = self.l1(gen_input)
out = self.resblocks(out)
img_ = self.l2(out)
return img_
class Discriminator(nn.Module):
def __init__(self):
super(Discriminator, self).__init__()
def block(in_features, out_features, normalization=True):
"""Discriminator block"""
layers = [ nn.Conv2d(in_features, out_features, 3, stride=2, padding=1),
nn.LeakyReLU(0.2, inplace=True) ]
if normalization:
layers.append(nn.InstanceNorm2d(out_features))
return layers
self.model = nn.Sequential(
*block(opt.channels, 64, normalization=False),
*block(64, 128),
*block(128, 256),
*block(256, 512),
nn.Conv2d(512, 1, 3, 1, 1)
)
def forward(self, img):
validity = self.model(img)
return validity
class Classifier(nn.Module):
def __init__(self):
super(Classifier, self).__init__()
def block(in_features, out_features, normalization=True):
"""Classifier block"""
layers = [ nn.Conv2d(in_features, out_features, 3, stride=2, padding=1),
nn.LeakyReLU(0.2, inplace=True) ]
if normalization:
layers.append(nn.InstanceNorm2d(out_features))
return layers
self.model = nn.Sequential(
*block(opt.channels, 64, normalization=False),
*block(64, 128),
*block(128, 256),
*block(256, 512)
)
input_size = opt.img_size // 2**4
self.output_layer = nn.Sequential(
nn.Linear(512*input_size**2, opt.n_classes),
nn.Softmax()
)
def forward(self, img):
feature_repr = self.model(img)
feature_repr = feature_repr.view(feature_repr.size(0), -1)
label = self.output_layer(feature_repr)
return label
# Loss function
adversarial_loss = torch.nn.MSELoss()
task_loss = torch.nn.CrossEntropyLoss()
# Loss weights
lambda_adv = 1
lambda_task = 0.1
# Initialize generator and discriminator
generator = Generator()
discriminator = Discriminator()
classifier = Classifier()
if cuda:
generator.cuda()
discriminator.cuda()
classifier.cuda()
adversarial_loss.cuda()
task_loss.cuda()
# Initialize weights
generator.apply(weights_init_normal)
discriminator.apply(weights_init_normal)
classifier.apply(weights_init_normal)
# Configure data loader
os.makedirs('../../data/mnist', exist_ok=True)
dataloader_A = torch.utils.data.DataLoader(
datasets.MNIST('../../data/mnist', train=True, download=True,
transform=transforms.Compose([
transforms.Resize(opt.img_size),
transforms.ToTensor(),
transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))
])),
batch_size=opt.batch_size, shuffle=True)
os.makedirs('../../data/mnistm', exist_ok=True)
dataloader_B = torch.utils.data.DataLoader(
MNISTM('../../data/mnistm', train=True, download=True,
transform=transforms.Compose([
transforms.Resize(opt.img_size),
transforms.ToTensor(),
transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))
])),
batch_size=opt.batch_size, shuffle=True)
# Optimizers
optimizer_G = torch.optim.Adam( itertools.chain(generator.parameters(), classifier.parameters()),
lr=opt.lr, betas=(opt.b1, opt.b2))
optimizer_D = torch.optim.Adam(discriminator.parameters(), lr=opt.lr, betas=(opt.b1, opt.b2))
FloatTensor = torch.cuda.FloatTensor if cuda else torch.FloatTensor
LongTensor = torch.cuda.LongTensor if cuda else torch.LongTensor
# ----------
# Training
# ----------
# Keeps 100 accuracy measurements
task_performance = []
target_performance = []
for epoch in range(opt.n_epochs):
for i, ((imgs_A, labels_A), (imgs_B, labels_B)) in enumerate(zip(dataloader_A, dataloader_B)):
batch_size = imgs_A.size(0)
# Adversarial ground truths
valid = Variable(FloatTensor(batch_size, *patch).fill_(1.0), requires_grad=False)
fake = Variable(FloatTensor(batch_size, *patch).fill_(0.0), requires_grad=False)
# Configure input
imgs_A = Variable(imgs_A.type(FloatTensor).expand(batch_size, 3, opt.img_size, opt.img_size))
labels_A = Variable(labels_A.type(LongTensor))
imgs_B = Variable(imgs_B.type(FloatTensor))
# -----------------
# Train Generator
# -----------------
optimizer_G.zero_grad()
# Sample noise
z = Variable(FloatTensor(np.random.uniform(-1, 1, (batch_size, opt.latent_dim))))
# Generate a batch of images
fake_B = generator(imgs_A, z)
# Perform task on translated source image
label_pred = classifier(fake_B)
# Calculate the task loss
task_loss_ = (task_loss(label_pred, labels_A) + \
task_loss(classifier(imgs_A), labels_A)) / 2
# Loss measures generator's ability to fool the discriminator
g_loss = lambda_adv * adversarial_loss(discriminator(fake_B), valid) + \
lambda_task * task_loss_
g_loss.backward()
optimizer_G.step()
# ---------------------
# Train Discriminator
# ---------------------
optimizer_D.zero_grad()
# Measure discriminator's ability to classify real from generated samples
real_loss = adversarial_loss(discriminator(imgs_B), valid)
fake_loss = adversarial_loss(discriminator(fake_B.detach()), fake)
d_loss = (real_loss + fake_loss) / 2
d_loss.backward()
optimizer_D.step()
# ---------------------------------------
# Evaluate Performance on target domain
# ---------------------------------------
# Evaluate performance on translated Domain A
acc = np.mean(np.argmax(label_pred.data.cpu().numpy(), axis=1) == labels_A.data.cpu().numpy())
task_performance.append(acc)
if len(task_performance) > 100:
task_performance.pop(0)
# Evaluate performance on Domain B
pred_B = classifier(imgs_B)
target_acc = np.mean(np.argmax(pred_B.data.cpu().numpy(), axis=1) == labels_B.numpy())
target_performance.append(target_acc)
if len(target_performance) > 100:
target_performance.pop(0)
print ("[Epoch %d/%d] [Batch %d/%d] [D loss: %f] [G loss: %f] [CLF acc: %3d%% (%3d%%), target_acc: %3d%% (%3d%%)]" %
(epoch, opt.n_epochs,
i, len(dataloader_A),
d_loss.item(), g_loss.item(),
100*acc, 100*np.mean(task_performance),
100*target_acc, 100*np.mean(target_performance)))
batches_done = len(dataloader_A) * epoch + i
if batches_done % opt.sample_interval == 0:
sample = torch.cat((imgs_A.data[:5], fake_B.data[:5], imgs_B.data[:5]), -2)
save_image(sample, 'images/%d.png' % batches_done, nrow=int(math.sqrt(batch_size)), normalize=True)
|
from goalgen import *
class NewGuide:
def __init__(self, useTFBase, useTFFire, catchArsonist, useMA, prioritizeNew):
self.tfBase = useTFBase
self.tfFire = useTFFire
self.catchArsonist = catchArsonist
self.useMA = useMA
self.prioritizeNew = prioritizeNew
self.step = 0
def init(self, world, mem, memKeys):
self.memKeys = memKeys
if self.tfBase:
self.tfGen = gengoal.TFStackGen(mem, memKeys)
else:
self.predefGen = gengoal.ExogenousGoalGen(world)
self.fireGen = gengoal.TFFireGen(mem, memKeys)
if self.useMA:
self.arsonGen = gengoal.XPGoalGen(mem, memKeys)
else:
self.arsonGen = gengoal.ArsonistCatcher(mem, memKeys)
self.mem = mem
mem.set(self.memKeys.MEM_GOALS, goalorg.GoalQueue())
def prioritize(self, goals):
for goal in goals:
goal.priority += float(self.step) / (self.step + 1)
def get_goals(self, world, verbose, goalgen):
goals = goalgen.gen_goals(verbose)
if not goals or [None] == goals:
goals = []
if not hasattr(goals, '__iter__'):
goals = [goals]
if self.prioritizeNew:
self.prioritize(goals)
return goals
def get_new_goals(self, world, verbose = 2):
if self.tfBase:
newgoals = self.get_goals(world, verbose, self.tfGen)
if verbose >= 2:
print "TF tree stacking goal generator activated. Goals:"
for goal in newgoals:
print "\t", goal, " ",
print
if newgoals:
self.mem._update(self.memKeys.MEM_GOALS, newgoals)
else:
newgoals = self.get_goals(world, verbose, self.predefGen)
if verbose >= 2:
print "Loading from predefined goals. Goals:"
for goal in newgoals:
print "\t", goal, " ",
print
if newgoals:
self.mem._update(self.memKeys.MEM_GOALS, newgoals)
if self.tfFire:
newgoals = self.get_goals(world, verbose, self.fireGen)
if verbose >= 2:
print "TF tree fire goal generator activated. Goals:"
for goal in newgoals:
print "\t", goal, " ",
print
if newgoals:
self.mem._update(self.memKeys.MEM_GOALS, newgoals)
if self.catchArsonist:
newgoals = self.get_goals(world, verbose, self.arsonGen)
if self.prioritizeNew:
self.prioritize(newgoals)
if verbose >= 2:
if not self.useMA:
print "Simulated ",
print "GDA K-track goal generation activated. Goals:"
for goal in newgoals:
print "\t", goal, " ",
print
if newgoals:
self.mem._update(self.memKeys.MEM_GOALS, newgoals)
self.step += 1
def run(self, verbose = 2):
world = self.mem.get(self.memKeys.MEM_STATES)[-1]
self.get_new_goals(world, verbose)
|
import numpy as np
import matplotlib.patches as patches
import matplotlib.pyplot as plt
ax = plt.axes(polar = True)
theta = np.linspace(0, 2 * np.pi, 8, endpoint = False)
radius = .25 + .75 * np.random.random(size = len(theta))
points = np.vstack((theta, radius)).transpose()
plt.gca().add_patch(patches.Polygon(points, color = '.75'))
plt.show()
|
import os
import re
import inspect
def _get_parser_list(dirname):
files = [ f.replace('.py','') for f in os.listdir(dirname) if not f.startswith('__')
]
return files
def _import_parsers(parserfiles):
m = re.compile('.+parsers',re.I)
_modules = __import__('weatherterm.parsers',globals(),locals(),parserfiles,0)
_parsers = [(k,v) for k,v in inspect.getmembers(_modules) if inspect.ismodule(v) and m.match(k)]
_classes = dict()
for k,v in _parsers:
_classes.update({k:v for k,v in inspect.getmembers(v) if inspect.isclass(v) and m.match(k)})
return _classes
def load(dirname):
parserfiles = _get_parser_list(dirname)
return _import_parsers(parserfiles)
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.