text
stringlengths 2
999k
|
|---|
from enum import Enum
import pandas as pd
from typing import Dict
import json
class SupportedDataTypes(Enum):
number_type = 1
string_type = 2
list_type = 3
bool_type = 4
def is_bool(value) -> bool:
return str(value).lower() in ["true", "false"]
def is_number(value) -> bool:
try:
float(str(value))
return True
except ValueError:
return False
def is_string(value) -> bool:
return isinstance(value, str)
def is_list(value) -> bool:
try:
json.loads(str(value).replace("'", '"'))
return True
except Exception:
return False
def get_datatype(value):
if is_number(value):
return SupportedDataTypes.number_type
elif is_bool(value):
return SupportedDataTypes.bool_type
if is_list(value):
return SupportedDataTypes.list_type
else:
return SupportedDataTypes.string_type
def infer_datatypes_for_columns(df: pd.DataFrame) -> Dict[str, SupportedDataTypes]:
def check_series(series: pd.Series, fun):
return series.apply(fun).all()
def infer_datatye(col_series: pd.Series):
if check_series(col_series, is_number):
return SupportedDataTypes.number_type
elif check_series(col_series, is_bool):
return SupportedDataTypes.bool_type
elif check_series(col_series, is_list):
return SupportedDataTypes.list_type
else:
return SupportedDataTypes.string_type
cols = df.columns
return {col: infer_datatye(df[col]) for col in cols}
def convert_value_to_supported_data_type(value: str, data_type: SupportedDataTypes):
if data_type == SupportedDataTypes.number_type:
return float(value)
elif data_type == SupportedDataTypes.bool_type:
return value.lower() == "true"
elif data_type == SupportedDataTypes.list_type:
return json.loads(str(value))
else:
return str(value)
|
from __future__ import absolute_import, print_function, unicode_literals
# Environment variables to disable Git stdin prompts for username, password, etc
GIT_NO_PROMPTS = {
"GIT_SSH_COMMAND": "ssh -oBatchMode=yes",
"GIT_TERMINAL_PROMPT": "0",
}
BREWWEB_URL = "https://brewweb.engineering.redhat.com/brew"
# Environment variables that should be set for doozer interaction with db for storing and retrieving build records.
# DB ENV VARS
DB_HOST = "DOOZER_DB_HOST"
DB_PORT = "DOOZER_DB_PORT"
DB_USER = "DOOZER_DB_USER"
DB_PWD = "DOOZER_DB_PASSWORD"
DB_NAME = "DOOZER_DB_NAME"
# default db parameters
default_db_params = {
DB_NAME: "doozer_build",
DB_HOST: "localhost",
DB_PORT: "3306"
}
|
# -*- coding: utf-8 -*-
DESCRIPTION = (
'A pyramid extension that provides one application programming interfac' +
'e to read and write data in different excel file formats' +
''
)
extensions = [
'sphinx.ext.autodoc',
'sphinx.ext.doctest',
'sphinx.ext.intersphinx',
'sphinx.ext.viewcode',
]
intersphinx_mapping = {
'pyexcel': ('http://pyexcel.readthedocs.io/en/latest/', None),
}
spelling_word_list_filename = 'spelling_wordlist.txt'
templates_path = ['_templates']
source_suffix = '.rst'
master_doc = 'index'
project = u'pyramid-excel'
copyright = u'2015-2017 Onni Software Ltd.'
version = '0.0.5'
release = '0.0.5'
exclude_patterns = []
pygments_style = 'sphinx'
html_theme = 'pyramid'
html_theme_options = {'github_url': 'https://github.com/Pylons/pyramid'}
html_theme_path = ['_themes']
html_static_path = ['_static']
htmlhelp_basename = 'pyramid-exceldoc'
latex_elements = {}
latex_documents = [
('index', 'pyramid-excel.tex',
'pyramid-excel Documentation',
'Onni Software Ltd.', 'manual'),
]
man_pages = [
('index', 'pyramid-excel',
'pyramid-excel Documentation',
[u'Onni Software Ltd.'], 1)
]
texinfo_documents = [
('index', 'pyramid-excel',
'pyramid-excel Documentation',
'Onni Software Ltd.', 'pyramid-excel',
DESCRIPTION,
'Miscellaneous'),
]
|
import logging
from bootstrapvz.base import Task
from bootstrapvz.common import phases
from bootstrapvz.providers.ec2.tasks import ami
# TODO: Merge with the method available in wip-integration-tests branch
def waituntil(predicate, timeout=5, interval=0.05):
import time
threshhold = time.time() + timeout
while time.time() < threshhold:
if predicate():
return True
time.sleep(interval)
return False
class LaunchEC2Instance(Task):
description = 'Launching EC2 instance'
phase = phases.image_registration
predecessors = [ami.RegisterAMI]
@classmethod
def run(cls, info):
conn = info._ec2['connection']
r = conn.run_instances(ImageId=info._ec2['image']['ImageId'],
MinCount=1,
MaxCount=1,
SecurityGroupIds=info.manifest.plugins['ec2_launch'].get('security_group_ids'),
KeyName=info.manifest.plugins['ec2_launch'].get('ssh_key'),
InstanceType=info.manifest.plugins['ec2_launch'].get('instance_type',
'm3.medium'))
info._ec2['instance'] = r['Instances'][0]
if 'tags' in info.manifest.plugins['ec2_launch']:
raw_tags = info.manifest.plugins['ec2_launch']['tags']
formatted_tags = {k: v.format(**info.manifest_vars) for k, v in raw_tags.items()}
tags = [{'Key': k, 'Value': v} for k, v in formatted_tags.items()]
conn.create_tags(Resources=[info._ec2['instance']['InstanceId']],
Tags=tags)
class PrintPublicIPAddress(Task):
description = 'Waiting for the instance to launch'
phase = phases.image_registration
predecessors = [LaunchEC2Instance]
@classmethod
def run(cls, info):
conn = info._ec2['connection']
logger = logging.getLogger(__name__)
filename = info.manifest.plugins['ec2_launch']['print_public_ip']
if not filename:
filename = '/dev/null'
f = open(filename, 'w')
try:
waiter = conn.get_waiter('instance_status_ok')
waiter.wait(InstanceIds=[info._ec2['instance']['InstanceId']],
Filters=[{'Name': 'instance-state-name', 'Values': ['running']}])
info._ec2['instance'] = conn.describe_instances(InstanceIds=[info._ec2['instance']['InstanceId']])['Reservations'][0]['Instances'][0]
logger.info('******* EC2 IP ADDRESS: %s *******' % info._ec2['instance']['PublicIpAddress'])
f.write(info._ec2['instance']['PublicIpAddress'])
except Exception:
logger.error('Could not get IP address for the instance')
f.write('')
f.close()
class DeregisterAMI(Task):
description = 'Deregistering AMI'
phase = phases.image_registration
predecessors = [LaunchEC2Instance]
@classmethod
def run(cls, info):
ec2 = info._ec2
logger = logging.getLogger(__name__)
def instance_running():
ec2['instance'].update()
return ec2['instance'].state == 'running'
if waituntil(instance_running, timeout=120, interval=5):
info._ec2['connection'].deregister_image(info._ec2['image'])
info._ec2['snapshot'].delete()
else:
logger.error('Timeout while booting instance')
|
from .script_ck import *
|
from graphserver.graphdb import GraphDatabase
from graphserver.ext.gtfs.gtfsdb import GTFSDatabase
from graphserver.ext.osm.osmdb import OSMDB
from graphserver.ext.osm.profiledb import ProfileDB
from graphserver import compiler
from graphserver.core import Graph
import sys
from sys import argv
def process_street_graph(osmdb_filename, graphdb_filename, profiledb_filename, slogs={}):
OSMDB_FILENAME = "ext/osm/bartarea.sqlite"
GRAPHDB_FILENAME = "bartstreets.db"
print( "Opening OSM-DB '%s'"%osmdb_filename )
osmdb = OSMDB( osmdb_filename )
if profiledb_filename:
print( "Opening ProfileDB '%s'"%profiledb_filename )
profiledb = ProfileDB( profiledb_filename )
else:
print( "No ProfileDB supplied" )
profiledb = None
g = Graph()
compiler.load_streets_to_graph( g, osmdb, profiledb, slogs, reporter=sys.stdout )
graphdb = GraphDatabase( graphdb_filename, overwrite=True )
graphdb.populate( g, reporter=sys.stdout )
def process_transit_graph(graphdb_filename, gtfsdb_filenames, osmdb_filename=None, profiledb_filename=None, agency_id=None, link_stations=False, slogs={}):
g = Graph()
if profiledb_filename:
print( "Opening ProfileDB '%s'"%profiledb_filename )
profiledb = ProfileDB( profiledb_filename )
else:
print( "No ProfileDB supplied" )
profiledb = None
if osmdb_filename:
# Load osmdb ===============================
print( "Opening OSM-DB '%s'"%osmdb_filename )
osmdb = OSMDB( osmdb_filename )
compiler.load_streets_to_graph( g, osmdb, profiledb, slogs, reporter=sys.stdout )
# Load gtfsdb ==============================
for i, gtfsdb_filename in enumerate(gtfsdb_filenames):
gtfsdb = GTFSDatabase( gtfsdb_filename )
service_ids = [x.encode("ascii") for x in gtfsdb.service_ids()]
compiler.load_gtfsdb_to_boardalight_graph(g, str(i), gtfsdb, agency_id=agency_id, service_ids=service_ids)
if osmdb_filename:
compiler.load_transit_street_links_to_graph( g, osmdb, gtfsdb, reporter=sys.stdout )
if link_stations:
compiler.link_nearby_stops( g, gtfsdb )
# Export to graphdb ========================
graphdb = GraphDatabase( graphdb_filename, overwrite=True )
graphdb.populate( g, reporter=sys.stdout )
def main():
from optparse import OptionParser
usage = """usage: python compile_graph.py [options] <graphdb_filename> """
parser = OptionParser(usage=usage)
parser.add_option("-o", "--osmdb", dest="osmdb_filename", default=None,
help="conflate with the compiled OSMDB", metavar="FILE")
parser.add_option("-l", "--link",
action="store_true", dest="link", default=False,
help="create walking links between adjacent/nearby stations if not compiling with an OSMDB")
parser.add_option("-g", "--gtfsdb",
action="append", dest="gtfsdb_files", default=[],
help="compile with the specified GTFS file(s)")
parser.add_option("-p", "--profiledb",
dest="profiledb_filename", default=None,
help="compile road network with elevation information from profiledb")
parser.add_option("-s", "--slog",
action="append", dest="slog_strings", default=[],
help="specify slog for highway type, in highway_type:slog form. For example, 'motorway:10.5'")
(options, args) = parser.parse_args()
slogs = {}
for slog_string in options.slog_strings:
highway_type,slog_penalty = slog_string.split(":")
slogs[highway_type] = float(slog_penalty)
print slogs
if len(args) != 1 or not options.osmdb_filename and not len(options.gtfsdb_files):
parser.print_help()
exit(-1)
graphdb_filename = args[0]
# just street graph compilation
if options.osmdb_filename and not len(options.gtfsdb_files):
process_street_graph(options.osmdb_filename, graphdb_filename, options.profiledb_filename, slogs)
exit(0)
process_transit_graph(graphdb_filename, options.gtfsdb_files,
osmdb_filename=options.osmdb_filename,
profiledb_filename=options.profiledb_filename,
link_stations=options.link and not options.osmdb_filename, slogs=slogs)
exit(0)
if __name__=='__main__': main()
|
# Copyright 2013-2020 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
from spack import *
class Gaudi(CMakePackage):
"""An experiment-independent HEP event data processing framework"""
homepage = "http://gaudi.web.cern.ch/gaudi/"
git = "https://gitlab.cern.ch/gaudi/Gaudi.git"
url = "https://gitlab.cern.ch/gaudi/Gaudi/-/archive/v33r1/Gaudi-v33r1.tar.gz"
tags = ['hep']
version('master', branch='master')
version('35.0', sha256='c01b822f9592a7bf875b9997cbeb3c94dea97cb13d523c12649dbbf5d69b5fa6')
version('34.0', sha256='28fc4abb5a6b08da5a6b1300451c7e8487f918b055939877219d454abf7668ae')
version('33.2', sha256='26aaf9c4ff237a60ec79af9bd18ad249fc91c16e297ba77e28e4a256123db6e5')
version('33.1', sha256='7eb6b2af64aeb965228d4b6ea66c7f9f57f832f93d5b8ad55c9105235af5b042')
version('33.0', sha256='76a967c41f579acc432593d498875dd4dc1f8afd5061e692741a355a9cf233c8')
version('32.2', sha256='e9ef3eb57fd9ac7b9d5647e278a84b2e6263f29f0b14dbe1321667d44d969d2e')
version('31.0', commit='aeb156f0c40571b5753a9e1dab31e331491b2f3e')
version('30.5', commit='2c70e73ee5b543b26197b90dd59ea4e4d359d230')
maintainers = ['drbenmorgan', "vvolkl"]
variant('optional', default=False,
description='Build most optional components and tests')
variant('docs', default=False,
description='Build documentation with Doxygen')
variant('vtune', default=False,
description='Build with Intel VTune profiler support')
# only build subdirectory GaudiExamples when +optional
patch("build_testing.patch", when="@:34.99")
# fixes for the cmake config which could not find newer boost versions
patch("link_target_fixes.patch", when="@33.0:34.99")
patch("link_target_fixes32.patch", when="@:32.2")
# These dependencies are needed for a minimal Gaudi build
depends_on('aida')
depends_on('boost@1.67.0: +python')
depends_on('clhep')
depends_on('cmake', type='build')
depends_on('cppgsl')
depends_on('fmt', when='@33.2:')
depends_on('intel-tbb')
depends_on('libuuid')
depends_on('nlohmann-json', when="@35.0:")
depends_on('python', type=('build', 'run'))
depends_on('python@:3.7.99', when='@32.2:34.99', type=('build', 'run'))
depends_on('python@:2.99.99', when='@:32.1', type=('build', 'run'))
depends_on('py-setuptools@:45.99.99', when='^python@:2.7.99', type='build')
depends_on('py-six', type=('build', 'run'))
depends_on('py-xenv@1:', type=('build', 'run'))
depends_on('range-v3')
depends_on('root +python +root7 +ssl +tbb +threads')
depends_on('zlib')
# todo: this should be a test dependency only,
depends_on('py-nose', when="@35.0", type=('build', 'run'))
# Adding these dependencies triggers the build of most optional components
depends_on('cppgsl', when='+optional')
depends_on('cppunit', when='+optional')
depends_on('doxygen +graphviz', when='+docs')
depends_on('gperftools', when='+optional')
depends_on('gdb', when='+optional')
depends_on('gsl', when='+optional')
depends_on('heppdt@:2.99.99', when='+optional')
depends_on('jemalloc', when='+optional')
depends_on('libpng', when='+optional')
depends_on('libunwind', when='+optional')
depends_on('py-networkx@:2.2', when='+optional ^python@:2.7.99')
depends_on('py-networkx', when='+optional ^python@3.0.0:')
depends_on('py-setuptools', when='+optional')
depends_on('py-nose', when='+optional')
depends_on('relax', when='@:33.99 +optional')
depends_on('xerces-c', when='+optional')
# NOTE: pocl cannot be added as a minimal OpenCL implementation because
# ROOT does not like being exposed to LLVM symbols.
# The Intel VTune dependency is taken aside because it requires a license
depends_on('intel-parallel-studio -mpi +vtune', when='+vtune')
def cmake_args(self):
args = [
self.define_from_variant("BUILD_TESTING", "optional"),
self.define_from_variant("GAUDI_USE_AIDA", "optional"),
self.define_from_variant("GAUDI_USE_CPPUNIT", "optional"),
self.define_from_variant("GAUDI_USE_HEPPDT", "optional"),
self.define_from_variant("GAUDI_USE_JEMALLOC", "optional"),
self.define_from_variant("GAUDI_USE_UNWIND", "optional"),
self.define_from_variant("GAUDI_USE_XERCESC", "optional"),
self.define_from_variant("GAUDI_USE_DOXYGEN", "docs"),
# needed to build core services like rndmsvc
self.define("GAUDI_USE_CLHEP", True),
self.define("GAUDI_USE_PYTHON_MAJOR",
str(self.spec['python'].version.up_to(1))),
# todo:
self.define("GAUDI_USE_INTELAMPLIFIER", False),
self.define("GAUDI_USE_GPERFTOOLS", False), ]
# this is not really used in spack builds, but needs to be set
if self.spec.version < Version('34.99'):
args.append("-DHOST_BINARY_TAG=x86_64-linux-gcc9-opt")
return args
def setup_run_environment(self, env):
# environment as in Gaudi.xenv
env.prepend_path('PATH', self.prefix.scripts)
env.prepend_path('PYTHONPATH', self.prefix.python)
env.prepend_path('ROOT_INCLUDE_PATH', self.prefix.include)
def url_for_version(self, version):
major = str(version[0])
minor = str(version[1])
url = "https://gitlab.cern.ch/gaudi/Gaudi/-/archive/v{0}r{1}/Gaudi-v{0}r{1}.tar.gz".format(major, minor)
return url
|
# coding=utf-8
# *** WARNING: this file was generated by the Pulumi SDK Generator. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union
from ... import _utilities, _tables
from . import outputs
__all__ = [
'GetIotHubResourceResult',
'AwaitableGetIotHubResourceResult',
'get_iot_hub_resource',
]
@pulumi.output_type
class GetIotHubResourceResult:
"""
The description of the IoT hub.
"""
def __init__(__self__, etag=None, id=None, location=None, name=None, properties=None, sku=None, tags=None, type=None):
if etag and not isinstance(etag, str):
raise TypeError("Expected argument 'etag' to be a str")
pulumi.set(__self__, "etag", etag)
if id and not isinstance(id, str):
raise TypeError("Expected argument 'id' to be a str")
pulumi.set(__self__, "id", id)
if location and not isinstance(location, str):
raise TypeError("Expected argument 'location' to be a str")
pulumi.set(__self__, "location", location)
if name and not isinstance(name, str):
raise TypeError("Expected argument 'name' to be a str")
pulumi.set(__self__, "name", name)
if properties and not isinstance(properties, dict):
raise TypeError("Expected argument 'properties' to be a dict")
pulumi.set(__self__, "properties", properties)
if sku and not isinstance(sku, dict):
raise TypeError("Expected argument 'sku' to be a dict")
pulumi.set(__self__, "sku", sku)
if tags and not isinstance(tags, dict):
raise TypeError("Expected argument 'tags' to be a dict")
pulumi.set(__self__, "tags", tags)
if type and not isinstance(type, str):
raise TypeError("Expected argument 'type' to be a str")
pulumi.set(__self__, "type", type)
@property
@pulumi.getter
def etag(self) -> Optional[str]:
"""
The Etag field is *not* required. If it is provided in the response body, it must also be provided as a header per the normal ETag convention.
"""
return pulumi.get(self, "etag")
@property
@pulumi.getter
def id(self) -> str:
"""
The resource identifier.
"""
return pulumi.get(self, "id")
@property
@pulumi.getter
def location(self) -> str:
"""
The resource location.
"""
return pulumi.get(self, "location")
@property
@pulumi.getter
def name(self) -> str:
"""
The resource name.
"""
return pulumi.get(self, "name")
@property
@pulumi.getter
def properties(self) -> 'outputs.IotHubPropertiesResponse':
"""
IotHub properties
"""
return pulumi.get(self, "properties")
@property
@pulumi.getter
def sku(self) -> 'outputs.IotHubSkuInfoResponse':
"""
IotHub SKU info
"""
return pulumi.get(self, "sku")
@property
@pulumi.getter
def tags(self) -> Optional[Mapping[str, str]]:
"""
The resource tags.
"""
return pulumi.get(self, "tags")
@property
@pulumi.getter
def type(self) -> str:
"""
The resource type.
"""
return pulumi.get(self, "type")
class AwaitableGetIotHubResourceResult(GetIotHubResourceResult):
# pylint: disable=using-constant-test
def __await__(self):
if False:
yield self
return GetIotHubResourceResult(
etag=self.etag,
id=self.id,
location=self.location,
name=self.name,
properties=self.properties,
sku=self.sku,
tags=self.tags,
type=self.type)
def get_iot_hub_resource(resource_group_name: Optional[str] = None,
resource_name: Optional[str] = None,
opts: Optional[pulumi.InvokeOptions] = None) -> AwaitableGetIotHubResourceResult:
"""
The description of the IoT hub.
:param str resource_group_name: The name of the resource group that contains the IoT hub.
:param str resource_name: The name of the IoT hub.
"""
__args__ = dict()
__args__['resourceGroupName'] = resource_group_name
__args__['resourceName'] = resource_name
if opts is None:
opts = pulumi.InvokeOptions()
if opts.version is None:
opts.version = _utilities.get_version()
__ret__ = pulumi.runtime.invoke('azure-native:devices/v20180401:getIotHubResource', __args__, opts=opts, typ=GetIotHubResourceResult).value
return AwaitableGetIotHubResourceResult(
etag=__ret__.etag,
id=__ret__.id,
location=__ret__.location,
name=__ret__.name,
properties=__ret__.properties,
sku=__ret__.sku,
tags=__ret__.tags,
type=__ret__.type)
|
# Copyright 2016 Pants project contributors (see CONTRIBUTORS.md).
# Licensed under the Apache License, Version 2.0 (see LICENSE).
import contextlib
import os
import re
import shutil
from textwrap import dedent
from pants.base.build_environment import get_buildroot
from pants.util.contextutil import temporary_dir
from pants.util.dirutil import safe_file_dump
from pants_test.pants_run_integration_test import PantsRunIntegrationTest
class IncompleteCustomScalaIntegrationTest(PantsRunIntegrationTest):
@contextlib.contextmanager
def tmp_custom_scala(self, path_suffix):
"""Temporarily create a BUILD file in the root for custom scala testing."""
if os.path.exists(self.tmp_build_file_path):
raise RuntimeError('BUILD file exists failing to avoid overwritting file.'
'Ensure that file does not exist from a previous run')
path = os.path.join(self.target_path, path_suffix)
try:
# Bootstrap the BUILD file.
shutil.copyfile(path, self.tmp_build_file_path)
# And create an empty scalastyle config.
with self.tmp_scalastyle_config() as scalastyle_config_option:
yield scalastyle_config_option
finally:
os.remove(self.tmp_build_file_path)
@contextlib.contextmanager
def tmp_scalastyle_config(self):
with temporary_dir(root_dir=get_buildroot()) as scalastyle_dir:
path = os.path.join(scalastyle_dir, 'config.xml')
safe_file_dump(path, '''<scalastyle/>''')
yield '--lint-scalastyle-config={}'.format(path)
def pants_run(self, options=None):
if options is None:
options = []
full_options = options + ['clean-all', 'compile', 'lint', self.target_path]
return self.run_pants(full_options)
def run_repl(self, target, program, options=None):
"""Run a repl for the given target with the given input, and return stdout_data."""
command = ['repl']
if options:
command.extend(options)
command.extend([target, '--quiet'])
return self.run_pants(command=command, stdin_data=program)
@classmethod
def hermetic(cls):
return True
def setUp(self):
self.target_path = 'testprojects/src/scala/org/pantsbuild/testproject/custom_scala_platform'
self.tmp_build_file_path = 'BUILD.CustomScalaIntegTests'
def test_working_210(self):
with self.tmp_scalastyle_config() as scalastyle_config_option:
pants_run = self.pants_run(options=['--scala-version=2.10', scalastyle_config_option])
self.assert_success(pants_run)
assert re.search('bootstrap-scalastyle_2_10', pants_run.stdout_data), pants_run.stdout_data
def test_working_211(self):
with self.tmp_scalastyle_config() as scalastyle_config_option:
pants_run = self.pants_run(options=['--scala-version=2.11', scalastyle_config_option])
self.assert_success(pants_run)
assert re.search('bootstrap-scalastyle_2_11', pants_run.stdout_data), pants_run.stdout_data
def test_working_212(self):
with self.tmp_scalastyle_config() as scalastyle_config_option:
pants_run = self.pants_run(options=['--scala-version=2.12', scalastyle_config_option])
self.assert_success(pants_run)
assert re.search('bootstrap-scalastyle_2_12', pants_run.stdout_data), pants_run.stdout_data
def test_repl_working_custom_211(self):
with self.tmp_custom_scala('custom_211_scalatools.build') as scalastyle_config_option:
pants_run = self.run_repl(
'testprojects/src/scala/org/pantsbuild/testproject/custom_scala_platform',
dedent("""
import org.pantsbuild.testproject.custom_scala_platform
Hello.main(Seq("World").toArray))
"""),
options=[
'--scala-version=custom',
'--scala-suffix-version=2.11',
scalastyle_config_option,
]
)
# Make sure this didn't happen:
# FAILURE: No bootstrap callback registered for //:scala-repl in scala
self.assert_success(pants_run)
def test_working_custom_211(self):
with self.tmp_custom_scala('custom_211_scalatools.build') as scalastyle_config_option:
pants_run = self.pants_run(
options=[
'--scala-version=custom',
'--scala-suffix-version=2.11',
scalastyle_config_option,
]
)
self.assert_success(pants_run)
assert not re.search('bootstrap-scalastyle_2_10', pants_run.stdout_data)
assert not re.search('bootstrap-scalastyle_2_11', pants_run.stdout_data)
def test_working_custom_212(self):
with self.tmp_custom_scala('custom_212_scalatools.build') as scalastyle_config_option:
pants_run = self.pants_run(
options=[
'--scala-version=custom',
'--scala-suffix-version=2.12',
scalastyle_config_option,
]
)
self.assert_success(pants_run)
assert not re.search('bootstrap-scalastyle_2_11', pants_run.stdout_data)
assert not re.search('bootstrap-scalastyle_2_12', pants_run.stdout_data)
def test_missing_compiler(self):
with self.tmp_custom_scala('custom_211_missing_compiler.build') as scalastyle_config_option:
pants_run = self.pants_run(
options=[
'--scala-version=custom',
'--scala-suffix-version=2.11',
scalastyle_config_option,
]
)
self.assert_failure(pants_run)
assert "Unable to bootstrap tool: 'scalac'" in pants_run.stdout_data
def test_missing_runtime(self):
with self.tmp_custom_scala('custom_211_missing_runtime.build') as scalastyle_config_option:
pants_run = self.pants_run(
options=[
'--scala-version=custom',
'--scala-suffix-version=2.11',
scalastyle_config_option,
]
)
self.assert_failure(pants_run)
|
import os
from kivy.app import App
from kivy.factory import Factory
from kivy.properties import ObjectProperty
from kivy.lang import Builder
from electrum_smart.util import base_units
from ...i18n import _
from .label_dialog import LabelDialog
Builder.load_string('''
#:import os os
<WalletDialog@Popup>:
title: _('Wallets')
id: popup
path: os.path.dirname(app.get_wallet_path())
BoxLayout:
orientation: 'vertical'
padding: '10dp'
FileChooserListView:
id: wallet_selector
dirselect: False
filter_dirs: True
filter: '*.*'
path: root.path
rootpath: root.path
size_hint_y: 0.6
Widget
size_hint_y: 0.1
GridLayout:
cols: 3
size_hint_y: 0.1
Button:
id: open_button
size_hint: 0.1, None
height: '48dp'
text: _('New')
on_release:
popup.dismiss()
root.new_wallet(app, wallet_selector.path)
Button:
id: open_button
size_hint: 0.1, None
height: '48dp'
text: _('Open')
disabled: not wallet_selector.selection
on_release:
popup.dismiss()
root.open_wallet(app)
''')
class WalletDialog(Factory.Popup):
def new_wallet(self, app, dirname):
def cb(text):
if text:
app.load_wallet_by_name(os.path.join(dirname, text))
d = LabelDialog(_('Enter wallet name'), '', cb)
d.open()
def open_wallet(self, app):
app.load_wallet_by_name(self.ids.wallet_selector.selection[0])
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('mail', '0042_noticesettings_review_count'),
]
operations = [
migrations.AddField(
model_name='noticesettings',
name='bulk_count',
field=models.IntegerField(default=200, help_text='\u7528\u6237\u5f53\u5929\u4e2d\u7ee7\u7684\u62d2\u7edd\u6570\u91cf\u9600\u503c\uff0c\u5982\u679c\u8d85\u8fc7\u8be5\u503c\uff0c\u5219\u53d1\u76f8\u5e94\u901a\u77e5', verbose_name='\u7fa4\u53d1\u6570\u91cf\u9600\u503c'),
),
migrations.AlterField(
model_name='noticesettings',
name='bulk_content',
field=models.TextField(help_text='\u7528\u6237\u7fa4\u53d1\u90ae\u4ef6\u7684\u65f6\u5019\u901a\u77e5\u5ba2\u6237\u548c\u5bf9\u5e94\u7684\u6280\u672f\u652f\u6301,\u652f\u6301\u53d8\u91cf: {count}\u8868\u793a\u62d2\u7edd\u6570\u91cf', verbose_name='\u7fa4\u53d1\u90ae\u4ef6\u901a\u77e5'),
),
]
|
import uuid
import json
import health_inspector
g_client = None
CATEGORY_WORKER = 4
HEALTH_INSPECTOR_MODULE_ID = uuid.UUID('4e5f74d0-4705-11ec-abd0-e12370ec4fc6')
def init(client, **kwargs):
"""
:param client:
:param kwargs:
:return:
"""
global g_client
g_client = client
return True
def run(message, **kwargs):
"""
:param bytes message:
:param kwargs:
:return bytes or None: None if post will happen asynchronously
"""
#message_dict = json.loads(message.decode('utf-8'))
result = health_inspector.main()
result = "\n".join(result)
result = ''.join([c for c in result if ord(c) > 31 or ord(c) == 9])
# Turn into bytes
message = result.encode('utf-8')
#return message
return message
def getinfo():
"""
:return:
"""
return { "type": CATEGORY_WORKER, "version" : {"major": 2, "minor": 0}, "id" : HEALTH_INSPECTOR_MODULE_ID}
def deinit(**kwargs):
"""
:param kwargs:
:return:
"""
return True
|
import os
os.environ["OMP_NUM_THREADS"] = "10"
import sys
import pandas as pd
import numpy as np
import turicreate as tc
for i in range(1, 14):
print("running batch %d" % i)
batch = pd.read_csv("batches/batch_%d_train.dat" % i)
test_users = pd.read_csv("batches/batch_%d_test.dat" % i)
model = tc.ranking_factorization_recommender.create(
tc.SFrame(batch),
'user',
'item',
num_factors=10,
verbose=True,
solver='ials',
max_iterations=50,
ials_confidence_scaling_factor=30
)
results = model.recommend(users=test_users.user.values, k=100, exclude_known=True, verbose=False)
results.to_dataframe()[['user', 'item', 'rank']].to_csv('batches/batch_%d_predictions.dat' % i, sep=' ', header=False, index=False)
|
# -*- coding: utf-8 -*-
from django.apps import AppConfig
class UsersConfig(AppConfig):
name = 'users'
verbose_name = u'用户信息'
|
"""Ray constants used in the Python code."""
import logging
import math
import os
logger = logging.getLogger(__name__)
def env_integer(key, default):
if key in os.environ:
value = os.environ[key]
if value.isdigit():
return int(os.environ[key])
logger.debug(f"Found {key} in environment, but value must "
f"be an integer. Got: {value}. Returning "
f"provided default {default}.")
return default
return default
def env_bool(key, default):
if key in os.environ:
return True if os.environ[key].lower() == "true" else False
return default
ID_SIZE = 28
# The default maximum number of bytes to allocate to the object store unless
# overridden by the user.
DEFAULT_OBJECT_STORE_MAX_MEMORY_BYTES = 200 * 10**9
# The default proportion of available memory allocated to the object store
DEFAULT_OBJECT_STORE_MEMORY_PROPORTION = 0.3
# The smallest cap on the memory used by the object store that we allow.
# This must be greater than MEMORY_RESOURCE_UNIT_BYTES
OBJECT_STORE_MINIMUM_MEMORY_BYTES = 75 * 1024 * 1024
# The default maximum number of bytes that the non-primary Redis shards are
# allowed to use unless overridden by the user.
DEFAULT_REDIS_MAX_MEMORY_BYTES = 10**10
# The smallest cap on the memory used by Redis that we allow.
REDIS_MINIMUM_MEMORY_BYTES = 10**7
# Above this number of bytes, raise an error by default unless the user sets
# RAY_ALLOW_SLOW_STORAGE=1. This avoids swapping with large object stores.
REQUIRE_SHM_SIZE_THRESHOLD = 10**10
# If a user does not specify a port for the primary Ray service,
# we attempt to start the service running at this port.
DEFAULT_PORT = 6379
RAY_ADDRESS_ENVIRONMENT_VARIABLE = "RAY_ADDRESS"
RAY_NAMESPACE_ENVIRONMENT_VARIABLE = "RAY_NAMESPACE"
RAY_RUNTIME_ENV_ENVIRONMENT_VARIABLE = "RAY_RUNTIME_ENV"
DEFAULT_DASHBOARD_IP = "127.0.0.1"
DEFAULT_DASHBOARD_PORT = 8265
REDIS_KEY_DASHBOARD = "dashboard"
PROMETHEUS_SERVICE_DISCOVERY_FILE = "prom_metrics_service_discovery.json"
# Default resource requirements for actors when no resource requirements are
# specified.
DEFAULT_ACTOR_METHOD_CPU_SIMPLE = 1
DEFAULT_ACTOR_CREATION_CPU_SIMPLE = 0
# Default resource requirements for actors when some resource requirements are
# specified in .
DEFAULT_ACTOR_METHOD_CPU_SPECIFIED = 0
DEFAULT_ACTOR_CREATION_CPU_SPECIFIED = 1
# Default number of return values for each actor method.
DEFAULT_ACTOR_METHOD_NUM_RETURN_VALS = 1
# If a remote function or actor (or some other export) has serialized size
# greater than this quantity, print an warning.
PICKLE_OBJECT_WARNING_SIZE = 10**7
# If remote functions with the same source are imported this many times, then
# print a warning.
DUPLICATE_REMOTE_FUNCTION_THRESHOLD = 100
# The maximum resource quantity that is allowed. TODO(rkn): This could be
# relaxed, but the current implementation of the node manager will be slower
# for large resource quantities due to bookkeeping of specific resource IDs.
MAX_RESOURCE_QUANTITY = 100e12
# Each memory "resource" counts as this many bytes of memory.
MEMORY_RESOURCE_UNIT_BYTES = 1
# Number of units 1 resource can be subdivided into.
MIN_RESOURCE_GRANULARITY = 0.0001
def round_to_memory_units(memory_bytes, round_up):
"""Round bytes to the nearest memory unit."""
return from_memory_units(to_memory_units(memory_bytes, round_up))
def from_memory_units(memory_units):
"""Convert from memory units -> bytes."""
return memory_units * MEMORY_RESOURCE_UNIT_BYTES
def to_memory_units(memory_bytes, round_up):
"""Convert from bytes -> memory units."""
value = memory_bytes / MEMORY_RESOURCE_UNIT_BYTES
if value < 1:
raise ValueError(
"The minimum amount of memory that can be requested is {} bytes, "
"however {} bytes was asked.".format(MEMORY_RESOURCE_UNIT_BYTES,
memory_bytes))
if isinstance(value, float) and not value.is_integer():
# TODO(ekl) Ray currently does not support fractional resources when
# the quantity is greater than one. We should fix memory resources to
# be allocated in units of bytes and not 100MB.
if round_up:
value = int(math.ceil(value))
else:
value = int(math.floor(value))
return int(value)
# Different types of Ray errors that can be pushed to the driver.
# TODO(rkn): These should be defined in flatbuffers and must be synced with
# the existing C++ definitions.
WAIT_FOR_CLASS_PUSH_ERROR = "wait_for_class"
PICKLING_LARGE_OBJECT_PUSH_ERROR = "pickling_large_object"
WAIT_FOR_FUNCTION_PUSH_ERROR = "wait_for_function"
TASK_PUSH_ERROR = "task"
REGISTER_REMOTE_FUNCTION_PUSH_ERROR = "register_remote_function"
FUNCTION_TO_RUN_PUSH_ERROR = "function_to_run"
VERSION_MISMATCH_PUSH_ERROR = "version_mismatch"
CHECKPOINT_PUSH_ERROR = "checkpoint"
REGISTER_ACTOR_PUSH_ERROR = "register_actor"
WORKER_CRASH_PUSH_ERROR = "worker_crash"
WORKER_DIED_PUSH_ERROR = "worker_died"
WORKER_POOL_LARGE_ERROR = "worker_pool_large"
PUT_RECONSTRUCTION_PUSH_ERROR = "put_reconstruction"
INFEASIBLE_TASK_ERROR = "infeasible_task"
RESOURCE_DEADLOCK_ERROR = "resource_deadlock"
REMOVED_NODE_ERROR = "node_removed"
MONITOR_DIED_ERROR = "monitor_died"
LOG_MONITOR_DIED_ERROR = "log_monitor_died"
REPORTER_DIED_ERROR = "reporter_died"
DASHBOARD_AGENT_DIED_ERROR = "dashboard_agent_died"
DASHBOARD_DIED_ERROR = "dashboard_died"
RAYLET_CONNECTION_ERROR = "raylet_connection_error"
DETACHED_ACTOR_ANONYMOUS_NAMESPACE_ERROR = "detached_actor_anonymous_namespace"
# Used in gpu detection
RESOURCE_CONSTRAINT_PREFIX = "accelerator_type:"
RESOURCES_ENVIRONMENT_VARIABLE = "RAY_OVERRIDE_RESOURCES"
# The reporter will report its statistics this often (milliseconds).
REPORTER_UPDATE_INTERVAL_MS = env_integer("REPORTER_UPDATE_INTERVAL_MS", 2500)
# Number of attempts to ping the Redis server. See
# `services.py:wait_for_redis_to_start`.
START_REDIS_WAIT_RETRIES = env_integer("RAY_START_REDIS_WAIT_RETRIES", 16)
LOGGER_FORMAT = (
"%(asctime)s\t%(levelname)s %(filename)s:%(lineno)s -- %(message)s")
LOGGER_FORMAT_HELP = f"The logging format. default='{LOGGER_FORMAT}'"
LOGGER_LEVEL = "info"
LOGGER_LEVEL_CHOICES = ["debug", "info", "warning", "error", "critical"]
LOGGER_LEVEL_HELP = ("The logging level threshold, choices=['debug', 'info',"
" 'warning', 'error', 'critical'], default='info'")
LOGGING_ROTATE_BYTES = 512 * 1024 * 1024 # 512MB.
LOGGING_ROTATE_BACKUP_COUNT = 5 # 5 Backup files at max.
# Constants used to define the different process types.
PROCESS_TYPE_REAPER = "reaper"
PROCESS_TYPE_MONITOR = "monitor"
PROCESS_TYPE_RAY_CLIENT_SERVER = "ray_client_server"
PROCESS_TYPE_LOG_MONITOR = "log_monitor"
# TODO(sang): Delete it.
PROCESS_TYPE_REPORTER = "reporter"
PROCESS_TYPE_DASHBOARD = "dashboard"
PROCESS_TYPE_DASHBOARD_AGENT = "dashboard_agent"
PROCESS_TYPE_WORKER = "worker"
PROCESS_TYPE_RAYLET = "raylet"
PROCESS_TYPE_REDIS_SERVER = "redis_server"
PROCESS_TYPE_WEB_UI = "web_ui"
PROCESS_TYPE_GCS_SERVER = "gcs_server"
PROCESS_TYPE_PYTHON_CORE_WORKER_DRIVER = "python-core-driver"
PROCESS_TYPE_PYTHON_CORE_WORKER = "python-core-worker"
# Log file names
MONITOR_LOG_FILE_NAME = f"{PROCESS_TYPE_MONITOR}.log"
LOG_MONITOR_LOG_FILE_NAME = f"{PROCESS_TYPE_LOG_MONITOR}.log"
WORKER_PROCESS_TYPE_IDLE_WORKER = "ray::IDLE"
WORKER_PROCESS_TYPE_SPILL_WORKER_NAME = "SpillWorker"
WORKER_PROCESS_TYPE_RESTORE_WORKER_NAME = "RestoreWorker"
WORKER_PROCESS_TYPE_SPILL_WORKER_IDLE = (
f"ray::IDLE_{WORKER_PROCESS_TYPE_SPILL_WORKER_NAME}")
WORKER_PROCESS_TYPE_RESTORE_WORKER_IDLE = (
f"ray::IDLE_{WORKER_PROCESS_TYPE_RESTORE_WORKER_NAME}")
WORKER_PROCESS_TYPE_SPILL_WORKER = (
f"ray::SPILL_{WORKER_PROCESS_TYPE_SPILL_WORKER_NAME}")
WORKER_PROCESS_TYPE_RESTORE_WORKER = (
f"ray::RESTORE_{WORKER_PROCESS_TYPE_RESTORE_WORKER_NAME}")
WORKER_PROCESS_TYPE_SPILL_WORKER_DELETE = (
f"ray::DELETE_{WORKER_PROCESS_TYPE_SPILL_WORKER_NAME}")
WORKER_PROCESS_TYPE_RESTORE_WORKER_DELETE = (
f"ray::DELETE_{WORKER_PROCESS_TYPE_RESTORE_WORKER_NAME}")
LOG_MONITOR_MAX_OPEN_FILES = 200
# The object metadata field uses the following format: It is a comma
# separated list of fields. The first field is mandatory and is the
# type of the object (see types below) or an integer, which is interpreted
# as an error value. The second part is optional and if present has the
# form DEBUG:<breakpoint_id>, it is used for implementing the debugger.
# A constant used as object metadata to indicate the object is cross language.
OBJECT_METADATA_TYPE_CROSS_LANGUAGE = b"XLANG"
# A constant used as object metadata to indicate the object is python specific.
OBJECT_METADATA_TYPE_PYTHON = b"PYTHON"
# A constant used as object metadata to indicate the object is raw bytes.
OBJECT_METADATA_TYPE_RAW = b"RAW"
# A constant used as object metadata to indicate the object is an actor handle.
# This value should be synchronized with the Java definition in
# ObjectSerializer.java
# TODO(fyrestone): Serialize the ActorHandle via the custom type feature
# of XLANG.
OBJECT_METADATA_TYPE_ACTOR_HANDLE = b"ACTOR_HANDLE"
# A constant indicating the debugging part of the metadata (see above).
OBJECT_METADATA_DEBUG_PREFIX = b"DEBUG:"
AUTOSCALER_RESOURCE_REQUEST_CHANNEL = b"autoscaler_resource_request"
# The default password to prevent redis port scanning attack.
# Hex for ray.
REDIS_DEFAULT_PASSWORD = "5241590000000000"
# The default module path to a Python function that sets up the worker env.
DEFAULT_WORKER_SETUP_HOOK = "ray.workers.setup_runtime_env.setup_worker"
# The default module path to a Python function that sets up runtime envs.
DEFAULT_RUNTIME_ENV_SETUP_HOOK = \
"ray.workers.setup_runtime_env.setup_runtime_env"
# The default ip address to bind to.
NODE_DEFAULT_IP = "127.0.0.1"
# The Mach kernel page size in bytes.
MACH_PAGE_SIZE_BYTES = 4096
# Max 64 bit integer value, which is needed to ensure against overflow
# in C++ when passing integer values cross-language.
MAX_INT64_VALUE = 9223372036854775807
# Object Spilling related constants
DEFAULT_OBJECT_PREFIX = "ray_spilled_objects"
GCS_PORT_ENVIRONMENT_VARIABLE = "RAY_GCS_SERVER_PORT"
HEALTHCHECK_EXPIRATION_S = os.environ.get("RAY_HEALTHCHECK_EXPIRATION_S", 10)
# Filename of "shim process" that sets up Python worker environment.
# Should be kept in sync with kSetupWorkerFilename in
# src/ray/common/constants.h.
SETUP_WORKER_FILENAME = "setup_worker.py"
|
# coding: utf-8
"""
Strava API v3
The [Swagger Playground](https://developers.strava.com/playground) is the easiest way to familiarize yourself with the Strava API by submitting HTTP requests and observing the responses before you write any client code. It will show what a response will look like with different endpoints depending on the authorization scope you receive from your athletes. To use the Playground, go to https://www.strava.com/settings/api and change your “Authorization Callback Domain” to developers.strava.com. Please note, we only support Swagger 2.0. There is a known issue where you can only select one scope at a time. For more information, please check the section “client code” at https://developers.strava.com/docs. # noqa: E501
OpenAPI spec version: 3.0.0
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
from __future__ import absolute_import
import unittest
import swagger_client
from swagger_client.models.temperature_stream import TemperatureStream # noqa: E501
from swagger_client.rest import ApiException
class TestTemperatureStream(unittest.TestCase):
"""TemperatureStream unit test stubs"""
def setUp(self):
pass
def tearDown(self):
pass
def testTemperatureStream(self):
"""Test TemperatureStream"""
# FIXME: construct object with mandatory attributes with example values
# model = swagger_client.models.temperature_stream.TemperatureStream() # noqa: E501
pass
if __name__ == '__main__':
unittest.main()
|
# coding=utf-8
import json
import requests
import importlib # for .reload()
from globalvars import GlobalVars
import threading
# noinspection PyPackageRequirements
import websocket
try:
from collections.abc import Iterable
except ImportError:
from collections import Iterable
from datetime import datetime, timedelta
from glob import glob
from regex import sub
import sys
import traceback
import time
import os
import subprocess as sp
import datahandling
import parsing
import apigetpost
import spamhandling
import classes
import chatcommunicate
from helpers import log, exit_mode, only_blacklists_changed, \
only_modules_changed, blacklist_integrity_check, reload_modules, log_exception
from gitmanager import GitManager
import findspam
from socketscience import SocketScience
import metasmoke_cache
MS_WEBSOCKET_LONG_INTERVAL = 60
MAX_MS_WEBSOCKET_RETRIES_TO_LONG_INTERVAL = 5
MAX_FAILURES = 10 # Preservative, 10 errors = MS down
NO_ACTIVITY_PINGS_TO_REBOOT = 4
NO_ACTIVITY_PINGS_TO_STANDBY = 5 # This is effectively disabled
NO_ACTIVITY_PINGS_TO_REPORT = 3
# noinspection PyClassHasNoInit,PyBroadException,PyUnresolvedReferences,PyProtectedMember
class Metasmoke:
status_pings_since_scan_activity = 0
scan_stat_snapshot = None
class AutoSwitch:
""" Automatically switch metasmoke status """
MAX_FAILURES = 10 # More than 10 failures == ms down
MAX_SUCCESSES = 1 # More than 1 success == ms up
ping_failure_counter = 0 # Negative values indicate consecutive successes
autoswitch_is_on = True
rw_lock = threading.Lock()
@staticmethod
def ping_failed():
""" Indicate a metasmoke status ping connection failure """
with Metasmoke.AutoSwitch.rw_lock:
if Metasmoke.AutoSwitch.ping_failure_counter < 0:
# Consecutive counter. Switch sign.
Metasmoke.AutoSwitch.ping_failure_counter = 0
Metasmoke.AutoSwitch.ping_failure_counter += 1
current_counter = Metasmoke.AutoSwitch.ping_failure_counter
current_auto = Metasmoke.AutoSwitch.autoswitch_is_on
# MAX_FAILURES is constant so no lock.
if current_counter > Metasmoke.AutoSwitch.MAX_FAILURES and\
GlobalVars.MSStatus.is_up() and current_auto:
log("warning", "Last {} connection(s) to metasmoke failed".format(current_counter) +
" Setting metasmoke status to down.")
chatcommunicate.tell_rooms_with("debug", "**Warning**: {}: ".format(GlobalVars.location) +
"Last {} connection(s) to metasmoke".format(current_counter) +
" failed. Setting metasmoke status to **down**.")
Metasmoke.set_ms_down(tell=False)
@staticmethod
def ping_succeeded():
""" Indicate a metasmoke status ping connection success """
with Metasmoke.AutoSwitch.rw_lock:
if Metasmoke.AutoSwitch.ping_failure_counter > 0:
# Consecutive counter. Switch sign.
Metasmoke.AutoSwitch.ping_failure_counter = 0
Metasmoke.AutoSwitch.ping_failure_counter -= 1
# Negative values for success
current_counter = -Metasmoke.AutoSwitch.ping_failure_counter
current_auto = Metasmoke.AutoSwitch.autoswitch_is_on
# MAX_SUCCESSES is constant so no lock.
if current_counter > Metasmoke.AutoSwitch.MAX_SUCCESSES and\
GlobalVars.MSStatus.is_down() and current_auto:
# Why use warning? Because some action may be needed if people don't think metasmoke is up.
log("warning", "Last {} connection(s) to metasmoke succeeded".format(current_counter) +
" Setting metasmoke status to up.")
chatcommunicate.tell_rooms_with("debug", "**Notice**: {}: ".format(GlobalVars.location) +
"Last {} connection(s) to metasmoke".format(current_counter) +
" succeeded. Setting metasmoke status to **up**.")
Metasmoke.set_ms_up(tell=False)
@staticmethod
def enable_autoswitch(to_enable):
""" Enable or disable auto status switch """
switch_auto_msg = ""
with Metasmoke.AutoSwitch.rw_lock:
if Metasmoke.AutoSwitch.autoswitch_is_on is not to_enable:
# Log and post chat message only if there really is a change.
switch_auto_msg = "Metasmoke status autoswitch is now {}abled.".format("en" if to_enable else "dis")
Metasmoke.AutoSwitch.autoswitch_is_on = to_enable
if switch_auto_msg:
log("info", switch_auto_msg)
chatcommunicate.tell_rooms_with("debug", switch_auto_msg)
@staticmethod
def get_ping_failure():
""" Get ping failure count. Negative number is ping success count. """
with Metasmoke.AutoSwitch.rw_lock:
return Metasmoke.AutoSwitch.ping_failure_counter
@staticmethod
def reset_switch():
""" Reset class Metasmoke.AutoSwitch to default values """
with Metasmoke.AutoSwitch.rw_lock:
Metasmoke.AutoSwitch.ping_failure_counter = 0
Metasmoke.AutoSwitch.autoswitch_is_on = True
@staticmethod
def set_ms_up(tell=True):
""" Switch metasmoke status to up """
# We must first set metasmoke to up, then say that metasmoke is up, not the other way around.
ms_msg = ""
if GlobalVars.MSStatus.is_down():
ms_msg = "Metasmoke status: set to up."
GlobalVars.MSStatus.set_up()
if ms_msg:
log("info", ms_msg)
if tell:
chatcommunicate.tell_rooms_with("debug", "{}: {}".format(GlobalVars.location, ms_msg))
@staticmethod
def set_ms_down(tell=True):
""" Switch metasmoke status to down """
ms_msg = ""
if GlobalVars.MSStatus.is_up():
ms_msg = "Metasmoke status: set to down."
GlobalVars.MSStatus.set_down()
if ms_msg:
log("info", ms_msg)
if tell:
chatcommunicate.tell_rooms_with("debug", "{}: {}".format(GlobalVars.location, ms_msg))
@staticmethod
def connect_websocket():
GlobalVars.metasmoke_ws = websocket.create_connection(GlobalVars.metasmoke_ws_host,
origin=GlobalVars.metasmoke_host)
payload = json.dumps({"command": "subscribe",
"identifier": "{\"channel\":\"SmokeDetectorChannel\","
"\"key\":\"" + GlobalVars.metasmoke_key + "\"}"})
GlobalVars.metasmoke_ws.send(payload)
GlobalVars.metasmoke_ws.settimeout(10)
@staticmethod
def init_websocket():
has_succeeded = False
failed_connection_attempts = 0
while GlobalVars.metasmoke_key and GlobalVars.metasmoke_ws_host:
try:
Metasmoke.connect_websocket()
has_succeeded = True
while True:
a = GlobalVars.metasmoke_ws.recv()
try:
data = json.loads(a)
Metasmoke.handle_websocket_data(data)
GlobalVars.MSStatus.succeeded()
failed_connection_attempts = 0
except ConnectionError:
raise
except Exception as e:
log('error', '{}: {}'.format(type(e).__name__, e))
log_exception(*sys.exc_info())
GlobalVars.MSStatus.failed()
Metasmoke.connect_websocket()
except Exception:
GlobalVars.MSStatus.failed()
log('error', "Couldn't bind to MS websocket")
if not has_succeeded:
failed_connection_attempts += 1
if failed_connection_attempts == MAX_MS_WEBSOCKET_RETRIES_TO_LONG_INTERVAL:
chatcommunicate.tell_rooms_with("debug", "Cannot initiate MS websocket."
" Continuing to retry at longer intervals.")
log('warning', "Cannot initiate MS websocket."
" Continuing to retry at longer intervals.")
if failed_connection_attempts >= MAX_MS_WEBSOCKET_RETRIES_TO_LONG_INTERVAL:
time.sleep(MS_WEBSOCKET_LONG_INTERVAL)
else:
# Wait and hopefully network issues will be solved
time.sleep(10)
else:
time.sleep(10)
@staticmethod
def handle_websocket_data(data):
if "message" not in data:
if "type" in data and data['type'] == "reject_subscription":
log('error', "MS WebSocket subscription was rejected. Check your MS key.")
raise ConnectionError("MS WebSocket connection rejected")
return
message = data['message']
if not isinstance(message, Iterable):
return
if "message" in message:
from_ms = message['message']
if (from_ms.startswith("[ [charcoal-se.github.io](https://github.com/Charcoal-SE/charcoal-se.github.io) ]"
" continuous-integration/travis-ci/push")):
from_ms = from_ms.replace(": ",
", or the [SD wiki](//git.io/vyDZv)"
" ([history](//github.com/Charcoal-SE/SmokeDetector/wiki/_history)): ", 1)
from_ms = from_ms.replace("https:", "")
chatcommunicate.tell_rooms_with("metasmoke", from_ms)
elif "autoflag_fp" in message:
event = message["autoflag_fp"]
chatcommunicate.tell_rooms(event["message"], ("debug", "site-" + event["site"]),
("no-site-" + event["site"],), notify_site="/autoflag_fp")
elif "exit" in message:
os._exit(message["exit"])
elif "blacklist" in message:
ids = (message['blacklist']['uid'], message['blacklist']['site'])
datahandling.add_blacklisted_user(ids, "metasmoke", message['blacklist']['post'])
datahandling.last_feedbacked = (ids, time.time() + 60)
elif "unblacklist" in message:
ids = (message['unblacklist']['uid'], message['unblacklist']['site'])
datahandling.remove_blacklisted_user(ids)
elif "naa" in message:
post_site_id = parsing.fetch_post_id_and_site_from_url(message["naa"]["post_link"])
datahandling.add_ignored_post(post_site_id[0:2])
elif "fp" in message:
post_site_id = parsing.fetch_post_id_and_site_from_url(message["fp"]["post_link"])
datahandling.add_false_positive(post_site_id[0:2])
elif "report" in message:
import chatcommands # Do it here
chatcommands.report_posts([message["report"]["post_link"]], message["report"]["user"],
True, "the metasmoke API")
elif "deploy_updated" in message:
return # Disabled
sha = message["deploy_updated"]["head_commit"]["id"]
if sha != os.popen('git log -1 --pretty="%H"').read():
if "autopull" in message["deploy_updated"]["head_commit"]["message"]:
if only_blacklists_changed(GitManager.get_remote_diff()):
commit_md = "[`{0}`](https://github.com/{1}/commit/{0})" \
.format(sha[:7], GlobalVars.bot_repo_slug)
integrity = blacklist_integrity_check()
if len(integrity) == 0: # No issues
GitManager.pull_remote()
findspam.reload_blacklists()
chatcommunicate.tell_rooms_with("debug", "No code modified in {0}, only blacklists"
" reloaded.".format(commit_md))
else:
integrity.append("please fix before pulling.")
chatcommunicate.tell_rooms_with("debug", ", ".join(integrity))
elif "commit_status" in message:
c = message["commit_status"]
sha = c["commit_sha"][:7]
recent_commits = sp.check_output(["git", "log", "-50", "--pretty=%H"]).decode('utf-8').strip().split('\n')
if c["commit_sha"] in recent_commits:
return # Same rev, or earlier rev (e.g. when watching things faster than CI completes), nothing to do
if c["status"] == "success":
if "autopull" in c["commit_message"] or c["commit_message"].startswith("!") or \
c["commit_message"].startswith("Auto "):
s = "[CI]({ci_link}) on [`{commit_sha}`](https://github.com/{repo}/" \
"commit/{commit_sha}) succeeded. Message contains 'autopull', pulling...".format(
ci_link=c["ci_url"], repo=GlobalVars.bot_repo_slug, commit_sha=sha)
remote_diff = GitManager.get_remote_diff()
if only_blacklists_changed(remote_diff):
GitManager.pull_remote()
if not GlobalVars.on_branch:
# Restart if HEAD detached
log('warning', "Pulling remote with HEAD detached, checkout deploy", f=True)
exit_mode("checkout_deploy")
GlobalVars.reload()
findspam.FindSpam.reload_blacklists()
chatcommunicate.tell_rooms_with('debug', GlobalVars.s_norestart_blacklists)
elif only_modules_changed(remote_diff):
GitManager.pull_remote()
if not GlobalVars.on_branch:
# Restart if HEAD detached
log('warning', "Pulling remote with HEAD detached, checkout deploy", f=True)
exit_mode("checkout_deploy")
GlobalVars.reload()
reload_modules()
chatcommunicate.tell_rooms_with('debug', GlobalVars.s_norestart_findspam)
else:
chatcommunicate.tell_rooms_with('debug', s, notify_site="/ci")
exit_mode("pull_update")
else:
s = "[CI]({ci_link}) on [`{commit_sha}`](https://github.com/{repo}/commit/{commit_sha}) " \
"succeeded.".format(ci_link=c["ci_url"], repo=GlobalVars.bot_repo_slug, commit_sha=sha)
chatcommunicate.tell_rooms_with("debug", s, notify_site="/ci")
elif c["status"] == "failure":
s = "[CI]({ci_link}) on [`{commit_sha}`](https://github.com/{repo}/commit/{commit_sha}) " \
"failed.".format(ci_link=c["ci_url"], repo=GlobalVars.bot_repo_slug, commit_sha=sha)
chatcommunicate.tell_rooms_with("debug", s, notify_site="/ci")
elif "everything_is_broken" in message:
if message["everything_is_broken"] is True:
exit_mode("shutdown")
elif "domain_whitelist" in message:
if message["domain_whitelist"] == "refresh":
metasmoke_cache.MetasmokeCache.delete('whitelisted-domains')
@staticmethod
def send_stats_on_post(title, link, reasons, body, markdown, username, user_link, why, owner_rep,
post_score, up_vote_count, down_vote_count):
if GlobalVars.metasmoke_host is None:
log('info', 'Attempted to send stats but metasmoke_host is undefined. Ignoring.')
return
elif GlobalVars.MSStatus.is_down():
log('warning', "Metasmoke down, not sending stats.")
return
metasmoke_key = GlobalVars.metasmoke_key
try:
if len(why) > 4096:
why = why[:2048] + ' ... ' + why[-2043:] # Basic maths
post = {'title': title, 'link': link, 'reasons': reasons, 'body': body, 'markdown': markdown,
'username': username, 'user_link': user_link,
'why': why, 'user_reputation': owner_rep, 'score': post_score,
'upvote_count': up_vote_count, 'downvote_count': down_vote_count}
# Remove None values (if they somehow manage to get through)
post = {k: v for k, v in post.items() if v}
payload = {'post': post, 'key': metasmoke_key}
headers = {'Content-type': 'application/json'}
Metasmoke.post("/posts.json", data=json.dumps(payload), headers=headers)
except Exception as e:
log('error', e)
@staticmethod
def send_feedback_for_post(post_link, feedback_type, user_name, user_id, chat_host):
if GlobalVars.metasmoke_host is None:
log('info', 'Received chat feedback but metasmoke_host is undefined. Ignoring.')
return
elif GlobalVars.MSStatus.is_down():
log('warning', "Metasmoke is down, not sending feedback")
return
metasmoke_key = GlobalVars.metasmoke_key
try:
payload = {
'feedback': {
'user_name': user_name,
'chat_user_id': user_id,
'chat_host': chat_host,
'feedback_type': feedback_type,
'post_link': post_link
},
'key': metasmoke_key
}
headers = {'Content-type': 'application/json'}
Metasmoke.post("/feedbacks.json", data=json.dumps(payload), headers=headers)
except Exception as e:
log('error', e)
@staticmethod
def send_deletion_stats_for_post(post_link, is_deleted):
if GlobalVars.metasmoke_host is None:
log('info', 'Attempted to send deletion data but metasmoke_host is undefined. Ignoring.')
return
elif GlobalVars.MSStatus.is_down():
log('warning', "Metasmoke is down, not sending deletion stats")
return
metasmoke_key = GlobalVars.metasmoke_key
try:
payload = {
'deletion_log': {
'is_deleted': is_deleted,
'post_link': post_link
},
'key': metasmoke_key
}
headers = {'Content-type': 'application/json'}
Metasmoke.post("/deletion_logs.json", data=json.dumps(payload), headers=headers)
except Exception as e:
log('error', e)
@staticmethod
def send_status_ping():
if GlobalVars.metasmoke_host is None:
log('info', 'Attempted to send status ping but metasmoke_host is undefined. Not sent.')
return
elif GlobalVars.MSStatus.is_down():
payload = {
"location": GlobalVars.location,
"timestamp": time.time()
}
SocketScience.send(payload)
metasmoke_key = GlobalVars.metasmoke_key
try:
payload = {
'location': GlobalVars.location,
'key': metasmoke_key,
'standby': GlobalVars.standby_mode or GlobalVars.no_se_activity_scan
}
headers = {'content-type': 'application/json'}
response = Metasmoke.post("/status-update.json",
data=json.dumps(payload), headers=headers, ignore_down=True)
try:
response = response.json()
if response.get('pull_update', False):
log('info', "Received pull command from MS ping response")
exit_mode("pull_update")
if ('failover' in response and GlobalVars.standby_mode and not GlobalVars.no_se_activity_scan):
# If we're not scanning, then we don't want to become officially active due to failover.
if response['failover']:
GlobalVars.standby_mode = False
chatcommunicate.tell_rooms_with("debug", GlobalVars.location + " received failover signal.",
notify_site="/failover")
if response.get('standby', False):
chatcommunicate.tell_rooms_with("debug",
GlobalVars.location + " entering metasmoke-forced standby.")
time.sleep(2)
exit_mode("standby")
if response.get('shutdown', False):
exit_mode("shutdown")
except Exception: # TODO: What could happen here?
pass
except Exception as e:
log('error', e)
@staticmethod
def update_code_privileged_users_list():
if GlobalVars.MSStatus.is_down():
log('warning', "Metasmoke is down, can't update blacklist manager privilege list")
return
payload = {'key': GlobalVars.metasmoke_key}
headers = {'Content-type': 'application/json'}
try:
response = Metasmoke.get("/api/users/code_privileged",
data=json.dumps(payload), headers=headers).json()['items']
except Exception as e:
log('error', e)
return
GlobalVars.code_privileged_users = set()
for id in response["stackexchange_chat_ids"]:
GlobalVars.code_privileged_users.add(("stackexchange.com", id))
for id in response["meta_stackexchange_chat_ids"]:
GlobalVars.code_privileged_users.add(("meta.stackexchange.com", id))
for id in response["stackoverflow_chat_ids"]:
GlobalVars.code_privileged_users.add(("stackoverflow.com", id))
@staticmethod
def determine_if_autoflagged(post_url):
"""
Given the URL for a post, determine whether or not it has been autoflagged.
"""
payload = {
'key': GlobalVars.metasmoke_key,
'filter': 'GFGJGHFMHGOLMMJMJJJGHIGOMKFKKILF', # id and autoflagged
'urls': post_url
}
try:
response = Metasmoke.get("/api/v2.0/posts/urls", params=payload).json()
except Exception as e:
log('error', e)
return False, []
# The first report of a URL is the only one that will be autoflagged. MS responses to the
# /posts/urls endpoint have the oldest report last.
if len(response["items"]) > 0 and response["items"][-1]["autoflagged"]:
# get flagger names
id = str(response["items"][-1]["id"])
payload = {'key': GlobalVars.metasmoke_key}
flags = Metasmoke.get("/api/v2.0/posts/" + id + "/flags", params=payload).json()
if len(flags["items"]) > 0:
return True, [user["username"] for user in flags["items"][0]["autoflagged"]["users"]]
return False, []
@staticmethod
def stop_autoflagging():
payload = {'key': GlobalVars.metasmoke_key}
headers = {'Content-type': 'application/json'}
Metasmoke.post("/flagging/smokey_disable",
data=json.dumps(payload), headers=headers)
@staticmethod
def send_statistics():
if GlobalVars.MSStatus.is_down():
log('warning', "Metasmoke is down, not sending statistics")
return
# Get current apiquota from globalvars
GlobalVars.apiquota_rw_lock.acquire()
current_apiquota = GlobalVars.apiquota
GlobalVars.apiquota_rw_lock.release()
posts_scanned, scan_time, posts_per_second = GlobalVars.PostScanStat.get_stat()
payload = {'key': GlobalVars.metasmoke_key,
'statistic': {'posts_scanned': posts_scanned,
'api_quota': current_apiquota}}
if posts_per_second:
# Send scan rate as well, if applicable.
payload['statistic']['post_scan_rate'] = posts_per_second
GlobalVars.PostScanStat.reset_stat()
headers = {'Content-type': 'application/json'}
if GlobalVars.metasmoke_host is not None:
log('info', 'Sent statistics to metasmoke: ', payload['statistic'])
Metasmoke.post("/statistics.json",
data=json.dumps(payload), headers=headers)
@staticmethod
def post_auto_comment(msg, user, url=None, ids=None):
if not GlobalVars.metasmoke_key:
return
response = None
if url is not None:
params = {"key": GlobalVars.metasmoke_key, "urls": url, "filter": "GFGJGHFJNFGNHKNIKHGGOMILHKLJIFFN"}
response = Metasmoke.get("/api/v2.0/posts/urls", params=params).json()
elif ids is not None:
post_id, site = ids
site = parsing.api_parameter_from_link(site)
params = {"key": GlobalVars.metasmoke_key, "filter": "GFGJGHFJNFGNHKNIKHGGOMILHKLJIFFN"}
try:
response = Metasmoke.get("/api/v2.0/posts/uid/{}/{}".format(site, post_id), params=params).json()
except AttributeError:
response = None
if response and "items" in response and len(response["items"]) > 0:
ms_id = response["items"][0]["id"]
params = {"key": GlobalVars.metasmoke_key,
"text": msg[:1].upper() + msg[1:], # Capitalise the first letter of the comment
"chat_user_id": user.id,
"chat_host": user._client.host}
Metasmoke.post("/api/v2.0/comments/post/{}".format(ms_id), params=params)
@staticmethod
def get_post_bodies_from_ms(post_url):
if not GlobalVars.metasmoke_key:
return None
payload = {
'key': GlobalVars.metasmoke_key,
'filter': 'GHOOIJGNLKHIIOIKGILKIJGHFMNKKGFJ', # posts.id, posts.body, posts.created_at
'urls': parsing.to_protocol_relative(post_url)
}
try:
response = Metasmoke.get('/api/v2.0/posts/urls', params=payload).json()
except AttributeError:
return None
except Exception as e:
log('error', '{}: {}'.format(type(e).__name__, e))
log_exception(*sys.exc_info())
exception_only = ''.join(traceback.format_exception_only(type(e), e)).strip()
chatcommunicate.tell_rooms_with("debug", "{}: In getting MS post information, recovered from `{}`"
.format(GlobalVars.location, exception_only))
return None
return response['items']
@staticmethod
def get_reason_weights():
if not GlobalVars.metasmoke_key:
return None
payload = {
'key': GlobalVars.metasmoke_key,
'per_page': 100,
'page': 1,
}
items = []
try:
while True:
response = Metasmoke.get('/api/v2.0/reasons', params=payload).json()
items.extend(response['items'])
if not response['has_more']:
break
payload['page'] += 1
except AttributeError:
return None
return items
# Some sniffy stuff
@staticmethod
def request_sender(method):
def func(url, *args, ignore_down=False, **kwargs):
if not GlobalVars.metasmoke_host or (GlobalVars.MSStatus.is_down() and not ignore_down):
return None
if 'timeout' not in kwargs:
kwargs['timeout'] = 10.000 # Don't throttle by MS
response = None # Should return None upon failure, if any
try:
response = method(GlobalVars.metasmoke_host + url, *args, **kwargs)
except Exception:
GlobalVars.MSStatus.failed()
if ignore_down:
# Means that this is a status ping
Metasmoke.AutoSwitch.ping_failed()
# No need to log here because it's re-raised
raise # Maintain minimal difference to the original get/post methods
else:
GlobalVars.MSStatus.succeeded()
if ignore_down:
# Means that this is a status ping
Metasmoke.AutoSwitch.ping_succeeded()
return response
return func
get = request_sender.__func__(requests.get)
post = request_sender.__func__(requests.post)
@staticmethod
def send_status_ping_and_verify_scanning_if_active():
def reboot_or_standby(action):
error_message = "There's been no scan activity for {} status pings. Going to {}." \
.format(Metasmoke.status_pings_since_scan_activity, action)
log('error', error_message)
chatcommunicate.tell_rooms_with("debug", error_message)
if action == "standby":
GlobalVars.standby_mode = True
# Let MS know immediately, to lessen potential wait time (e.g. if we fail to reboot).
Metasmoke.send_status_ping()
time.sleep(8)
exit_mode(action)
in_standby_mode = GlobalVars.standby_mode or GlobalVars.no_se_activity_scan
if not in_standby_mode:
# This is the active instance, so should be scanning. If it's not scanning, then report or go to standby.
if GlobalVars.PostScanStat.get_stat() == Metasmoke.scan_stat_snapshot:
# There's been no actvity since the last ping.
Metasmoke.status_pings_since_scan_activity += 1
if Metasmoke.status_pings_since_scan_activity >= NO_ACTIVITY_PINGS_TO_REBOOT:
# Assume something is very wrong. Report to debug rooms and go into standby mode.
reboot_or_standby("reboot")
elif Metasmoke.status_pings_since_scan_activity >= NO_ACTIVITY_PINGS_TO_STANDBY:
# Assume something is very wrong. Report to debug rooms and go into standby mode.
reboot_or_standby("standby")
elif Metasmoke.status_pings_since_scan_activity >= NO_ACTIVITY_PINGS_TO_REPORT:
# Something might be wrong. Let people in debug rooms know.
status_message = "There's been no scan activity for {} status pings. There may be a problem." \
.format(Metasmoke.status_pings_since_scan_activity)
log('warning', status_message)
chatcommunicate.tell_rooms_with("debug", status_message)
else:
Metasmoke.status_pings_since_scan_activity = 0
Metasmoke.scan_stat_snapshot = GlobalVars.PostScanStat.get_stat()
Metasmoke.send_status_ping()
|
from gbdxtools.images.base import RDABaseImage
from gbdxtools.images.drivers import RDADaskImageDriver
from gbdxtools.rda.util import reproject_params
from gbdxtools.rda.interface import RDA
rda = RDA()
from shapely.geometry import box
class DemDriver(RDADaskImageDriver):
image_option_support = ["proj", "bbox"]
__image_option_defaults__ = {"bbox": None}
class DemImage(RDABaseImage):
''' Image class for Digital Elevation Model (DEM) data from the NED/SRTM dataset.
This class has no Catalog IDs and is created by passing an AOI. It shares most of the same methods as CatalogImage objects.
Args:
aoi (list): list of coordinate in BBOX format
proj (str): (optional) EPSG string of projection reproject to. Native projection is "EPSG:4326" (WGS84)
Example:
>>> dem = DemImage(aoi=[5.279, 60.358, 5.402, 60.419])'''
__Driver__ = DemDriver
__rda_id__ = "dgdem-v20180406-DEFLATED-ca4649c5acb"
def __post_new_hook__(self, **kwargs):
self = self.aoi(**kwargs)
if self.rda.metadata["image"]["minX"] == -1:
return self[:, :, 1:-1]
return self
@classmethod
def _build_graph(cls, aoi, proj="EPSG:4326", **kwargs):
wkt = box(*aoi).wkt
dem = rda.GeospatialCrop(rda.IdahoRead(bucketName="idaho-dems-2018", imageId="dgdem-v20180406-DEFLATED-ca4649c5acb", objectStore="S3"), geospatialWKT=str(wkt))
if proj is not "EPSG:4326":
dem = rda.Reproject(dem, **reproject_params(proj))
return dem
|
# Auto-generated at 2021-09-27T17:12:33.419763+08:00
# from: Justice Lobby Service (1.33.0)
# Copyright (c) 2018 - 2021 AccelByte Inc. All Rights Reserved.
# This is licensed software from AccelByte Inc, for limitations
# and restrictions contact your company contract manager.
# pylint: disable=duplicate-code
# pylint: disable=line-too-long
# pylint: disable=missing-function-docstring
# pylint: disable=missing-module-docstring
# pylint: disable=too-many-arguments
# pylint: disable=too-many-branches
# pylint: disable=too-many-instance-attributes
# pylint: disable=too-many-lines
# pylint: disable=too-many-locals
# pylint: disable=too-many-public-methods
# pylint: disable=too-many-return-statements
# pylint: disable=too-many-statements
# pylint: disable=unused-import
from __future__ import annotations
from typing import Any, Dict, List, Optional, Tuple, Union
from ....core import Model
class ModelCreateTopicRequest(Model):
"""Model create topic request
Properties:
description: (description) REQUIRED str
topic: (topic) REQUIRED str
"""
# region fields
description: str # REQUIRED
topic: str # REQUIRED
# endregion fields
# region with_x methods
def with_description(self, value: str) -> ModelCreateTopicRequest:
self.description = value
return self
def with_topic(self, value: str) -> ModelCreateTopicRequest:
self.topic = value
return self
# endregion with_x methods
# region to methods
def to_dict(self, include_empty: bool = False) -> dict:
result = {}
if hasattr(self, "description") and self.description:
result["description"] = str(self.description)
elif include_empty:
result["description"] = str()
if hasattr(self, "topic") and self.topic:
result["topic"] = str(self.topic)
elif include_empty:
result["topic"] = str()
return result
# endregion to methods
# region static methods
@classmethod
def create(
cls,
description: str,
topic: str,
) -> ModelCreateTopicRequest:
instance = cls()
instance.description = description
instance.topic = topic
return instance
@classmethod
def create_from_dict(cls, dict_: dict, include_empty: bool = False) -> ModelCreateTopicRequest:
instance = cls()
if not dict_:
return instance
if "description" in dict_ and dict_["description"] is not None:
instance.description = str(dict_["description"])
elif include_empty:
instance.description = str()
if "topic" in dict_ and dict_["topic"] is not None:
instance.topic = str(dict_["topic"])
elif include_empty:
instance.topic = str()
return instance
@staticmethod
def get_field_info() -> Dict[str, str]:
return {
"description": "description",
"topic": "topic",
}
# endregion static methods
|
# --------------------------------------------------------
# SMNet FaceNet
# Licensed under The MIT License [see LICENSE for details]
# Copyright 2019 smarsu. All Rights Reserved.
# --------------------------------------------------------
import os.path as osp
import numpy as np
from sklearn import metrics
import matplotlib.pyplot as plt
from euclidean import euclidean_distance
EPS = 1e-12
def load_feature_map_from_txt(path_txt):
""""""
with open(path_txt, 'r') as fb:
lines = fb.readlines()
feature_map = {}
for line in lines:
line = line.strip().split()
name = line[0]
feature = [float(v) for v in line[1:]]
feature_map[name] = np.array(feature, dtype=np.float64)
return feature_map
def load_pairs(pair_path):
with open(pair_path, 'r') as fb:
lfw_root = '/datasets/lfw_detected'
lines = fb.readlines()
pairs = []
for line in lines:
fst, snd, match = line.strip().split()
fst = osp.join(lfw_root, fst)
snd = osp.join(lfw_root, snd)
pairs.append([fst, snd, int(match)])
return pairs
def l2_norm(x):
"""
Args:
x: ndarray, [n, feature_len]
"""
x = np.array(x, dtype=np.float64)
return x / (np.sqrt(np.sum(np.square(x), axis=-1, keepdims=True)) + EPS)
def cosine_similarity(a, b):
"""
Args:
a: ndarray, [feature_len]
b: ndarray, [feature_len]
"""
a = np.array(a, dtype=np.float64)
b = np.array(b, dtype=np.float64)
return np.sum(a * b)
def auc(scores, labels):
""""""
return metrics.roc_auc_score(labels, scores)
def roc(scores, labels):
""""""
fpr, tpr, thresholds = metrics.roc_curve(labels, scores)
plt.plot(fpr, tpr)
plt.savefig('roc.png')
def main():
feature_map = load_feature_map_from_txt('features.txt')
feature_map = {k: l2_norm(v) for k, v in feature_map.items()}
pairs = load_pairs('parsed_pair.txt')
scores = []
labels = []
for fst, snd, match in pairs:
labels.append(match)
if fst not in feature_map:
scores.append(1)
print('WARNING: not found', fst)
continue
elif snd not in feature_map:
scores.append(1)
print('WARNING: not found', snd)
continue
score = 2 - euclidean_distance(feature_map[fst], feature_map[snd])
scores.append(score)
print(scores)
print(labels)
print(min(scores))
print(max(scores))
print(auc(scores, labels))
roc(scores, labels)
if __name__ == '__main__':
main()
|
"""
Generalized Linear models.
"""
# Author: Alexandre Gramfort <alexandre.gramfort@inria.fr>
# Fabian Pedregosa <fabian.pedregosa@inria.fr>
# Olivier Grisel <olivier.grisel@ensta.org>
# Vincent Michel <vincent.michel@inria.fr>
# Peter Prettenhofer <peter.prettenhofer@gmail.com>
# Mathieu Blondel <mathieu@mblondel.org>
# Lars Buitinck
# Maryan Morel <maryan.morel@polytechnique.edu>
# Giorgio Patrini <giorgio.patrini@anu.edu.au>
# License: BSD 3 clause
from __future__ import division
from abc import ABCMeta, abstractmethod
import numbers
import warnings
import numpy as np
import scipy.sparse as sp
from scipy import linalg
from scipy import sparse
from ..externals import six
from ..externals.joblib import Parallel, delayed
from ..base import BaseEstimator, ClassifierMixin, RegressorMixin
from ..utils import check_array, check_X_y, deprecated, as_float_array
from ..utils.validation import FLOAT_DTYPES
from ..utils import check_random_state
from ..utils.extmath import safe_sparse_dot
from ..utils.sparsefuncs import mean_variance_axis, inplace_column_scale
from ..utils.fixes import sparse_lsqr
from ..utils.seq_dataset import ArrayDataset, CSRDataset
from ..utils.validation import check_is_fitted
from ..exceptions import NotFittedError
from ..preprocessing.data import normalize as f_normalize
# TODO: bayesian_ridge_regression and bayesian_regression_ard
# should be squashed into its respective objects.
SPARSE_INTERCEPT_DECAY = 0.01
# For sparse data intercept updates are scaled by this decay factor to avoid
# intercept oscillation.
def make_dataset(X, y, sample_weight, random_state=None):
"""Create ``Dataset`` abstraction for sparse and dense inputs.
This also returns the ``intercept_decay`` which is different
for sparse datasets.
"""
rng = check_random_state(random_state)
# seed should never be 0 in SequentialDataset
seed = rng.randint(1, np.iinfo(np.int32).max)
if sp.issparse(X):
dataset = CSRDataset(X.data, X.indptr, X.indices, y, sample_weight,
seed=seed)
intercept_decay = SPARSE_INTERCEPT_DECAY
else:
dataset = ArrayDataset(X, y, sample_weight, seed=seed)
intercept_decay = 1.0
return dataset, intercept_decay
@deprecated("sparse_center_data was deprecated in version 0.18 and will be "
"removed in 0.20. Use utilities in preprocessing.data instead")
def sparse_center_data(X, y, fit_intercept, normalize=False):
"""
Compute information needed to center data to have mean zero along
axis 0. Be aware that X will not be centered since it would break
the sparsity, but will be normalized if asked so.
"""
if fit_intercept:
# we might require not to change the csr matrix sometimes
# store a copy if normalize is True.
# Change dtype to float64 since mean_variance_axis accepts
# it that way.
if sp.isspmatrix(X) and X.getformat() == 'csr':
X = sp.csr_matrix(X, copy=normalize, dtype=np.float64)
else:
X = sp.csc_matrix(X, copy=normalize, dtype=np.float64)
X_offset, X_var = mean_variance_axis(X, axis=0)
if normalize:
# transform variance to std in-place
X_var *= X.shape[0]
X_std = np.sqrt(X_var, X_var)
del X_var
X_std[X_std == 0] = 1
inplace_column_scale(X, 1. / X_std)
else:
X_std = np.ones(X.shape[1])
y_offset = y.mean(axis=0)
y = y - y_offset
else:
X_offset = np.zeros(X.shape[1])
X_std = np.ones(X.shape[1])
y_offset = 0. if y.ndim == 1 else np.zeros(y.shape[1], dtype=X.dtype)
return X, y, X_offset, y_offset, X_std
@deprecated("center_data was deprecated in version 0.18 and will be removed in "
"0.20. Use utilities in preprocessing.data instead")
def center_data(X, y, fit_intercept, normalize=False, copy=True,
sample_weight=None):
"""
Centers data to have mean zero along axis 0. This is here because
nearly all linear models will want their data to be centered.
If sample_weight is not None, then the weighted mean of X and y
is zero, and not the mean itself
"""
X = as_float_array(X, copy)
if fit_intercept:
if isinstance(sample_weight, numbers.Number):
sample_weight = None
if sp.issparse(X):
X_offset = np.zeros(X.shape[1])
X_std = np.ones(X.shape[1])
else:
X_offset = np.average(X, axis=0, weights=sample_weight)
X -= X_offset
# XXX: currently scaled to variance=n_samples
if normalize:
X_std = np.sqrt(np.sum(X ** 2, axis=0))
X_std[X_std == 0] = 1
X /= X_std
else:
X_std = np.ones(X.shape[1])
y_offset = np.average(y, axis=0, weights=sample_weight)
y = y - y_offset
else:
X_offset = np.zeros(X.shape[1])
X_std = np.ones(X.shape[1])
y_offset = 0. if y.ndim == 1 else np.zeros(y.shape[1], dtype=X.dtype)
return X, y, X_offset, y_offset, X_std
def _preprocess_data(X, y, fit_intercept, normalize=False, copy=True,
sample_weight=None, return_mean=False):
"""
Centers data to have mean zero along axis 0. If fit_intercept=False or if
the X is a sparse matrix, no centering is done, but normalization can still
be applied. The function returns the statistics necessary to reconstruct
the input data, which are X_offset, y_offset, X_scale, such that the output
X = (X - X_offset) / X_scale
X_scale is the L2 norm of X - X_offset. If sample_weight is not None,
then the weighted mean of X and y is zero, and not the mean itself. If
return_mean=True, the mean, eventually weighted, is returned, independently
of whether X was centered (option used for optimization with sparse data in
coordinate_descend).
This is here because nearly all linear models will want their data to be
centered.
"""
if isinstance(sample_weight, numbers.Number):
sample_weight = None
X = check_array(X, copy=copy, accept_sparse=['csr', 'csc'],
dtype=FLOAT_DTYPES)
if fit_intercept:
if sp.issparse(X):
X_offset, X_var = mean_variance_axis(X, axis=0)
if not return_mean:
X_offset = np.zeros(X.shape[1])
if normalize:
# TODO: f_normalize could be used here as well but the function
# inplace_csr_row_normalize_l2 must be changed such that it
# can return also the norms computed internally
# transform variance to norm in-place
X_var *= X.shape[0]
X_scale = np.sqrt(X_var, X_var)
del X_var
X_scale[X_scale == 0] = 1
inplace_column_scale(X, 1. / X_scale)
else:
X_scale = np.ones(X.shape[1])
else:
X_offset = np.average(X, axis=0, weights=sample_weight)
X -= X_offset
if normalize:
X, X_scale = f_normalize(X, axis=0, copy=False,
return_norm=True)
else:
X_scale = np.ones(X.shape[1])
y_offset = np.average(y, axis=0, weights=sample_weight)
y = y - y_offset
else:
X_offset = np.zeros(X.shape[1])
X_scale = np.ones(X.shape[1])
y_offset = 0. if y.ndim == 1 else np.zeros(y.shape[1], dtype=X.dtype)
return X, y, X_offset, y_offset, X_scale
# TODO: _rescale_data should be factored into _preprocess_data.
# Currently, the fact that sag implements its own way to deal with
# sample_weight makes the refactoring tricky.
def _rescale_data(X, y, sample_weight):
"""Rescale data so as to support sample_weight"""
n_samples = X.shape[0]
sample_weight = sample_weight * np.ones(n_samples)
sample_weight = np.sqrt(sample_weight)
sw_matrix = sparse.dia_matrix((sample_weight, 0),
shape=(n_samples, n_samples))
X = safe_sparse_dot(sw_matrix, X)
y = safe_sparse_dot(sw_matrix, y)
return X, y
class LinearModel(six.with_metaclass(ABCMeta, BaseEstimator)):
"""Base class for Linear Models"""
@abstractmethod
def fit(self, X, y):
"""Fit model."""
@deprecated(" and will be removed in 0.19.")
def decision_function(self, X):
"""Decision function of the linear model.
Parameters
----------
X : {array-like, sparse matrix}, shape = (n_samples, n_features)
Samples.
Returns
-------
C : array, shape = (n_samples,)
Returns predicted values.
"""
return self._decision_function(X)
def _decision_function(self, X):
check_is_fitted(self, "coef_")
X = check_array(X, accept_sparse=['csr', 'csc', 'coo'])
return safe_sparse_dot(X, self.coef_.T,
dense_output=True) + self.intercept_
def predict(self, X):
"""Predict using the linear model
Parameters
----------
X : {array-like, sparse matrix}, shape = (n_samples, n_features)
Samples.
Returns
-------
C : array, shape = (n_samples,)
Returns predicted values.
"""
return self._decision_function(X)
_preprocess_data = staticmethod(_preprocess_data)
def _set_intercept(self, X_offset, y_offset, X_scale):
"""Set the intercept_
"""
if self.fit_intercept:
self.coef_ = self.coef_ / X_scale
self.intercept_ = y_offset - np.dot(X_offset, self.coef_.T)
else:
self.intercept_ = 0.
# XXX Should this derive from LinearModel? It should be a mixin, not an ABC.
# Maybe the n_features checking can be moved to LinearModel.
class LinearClassifierMixin(ClassifierMixin):
"""Mixin for linear classifiers.
Handles prediction for sparse and dense X.
"""
def decision_function(self, X):
"""Predict confidence scores for samples.
The confidence score for a sample is the signed distance of that
sample to the hyperplane.
Parameters
----------
X : {array-like, sparse matrix}, shape = (n_samples, n_features)
Samples.
Returns
-------
array, shape=(n_samples,) if n_classes == 2 else (n_samples, n_classes)
Confidence scores per (sample, class) combination. In the binary
case, confidence score for self.classes_[1] where >0 means this
class would be predicted.
"""
if not hasattr(self, 'coef_') or self.coef_ is None:
raise NotFittedError("This %(name)s instance is not fitted "
"yet" % {'name': type(self).__name__})
X = check_array(X, accept_sparse='csr')
n_features = self.coef_.shape[1]
if X.shape[1] != n_features:
raise ValueError("X has %d features per sample; expecting %d"
% (X.shape[1], n_features))
scores = safe_sparse_dot(X, self.coef_.T,
dense_output=True) + self.intercept_
return scores.ravel() if scores.shape[1] == 1 else scores
def predict(self, X):
"""Predict class labels for samples in X.
Parameters
----------
X : {array-like, sparse matrix}, shape = [n_samples, n_features]
Samples.
Returns
-------
C : array, shape = [n_samples]
Predicted class label per sample.
"""
scores = self.decision_function(X)
if len(scores.shape) == 1:
indices = (scores > 0).astype(np.int)
else:
indices = scores.argmax(axis=1)
return self.classes_[indices]
def _predict_proba_lr(self, X):
"""Probability estimation for OvR logistic regression.
Positive class probabilities are computed as
1. / (1. + np.exp(-self.decision_function(X)));
multiclass is handled by normalizing that over all classes.
"""
prob = self.decision_function(X)
prob *= -1
np.exp(prob, prob)
prob += 1
np.reciprocal(prob, prob)
if prob.ndim == 1:
return np.vstack([1 - prob, prob]).T
else:
# OvR normalization, like LibLinear's predict_probability
prob /= prob.sum(axis=1).reshape((prob.shape[0], -1))
return prob
class SparseCoefMixin(object):
"""Mixin for converting coef_ to and from CSR format.
L1-regularizing estimators should inherit this.
"""
def densify(self):
"""Convert coefficient matrix to dense array format.
Converts the ``coef_`` member (back) to a numpy.ndarray. This is the
default format of ``coef_`` and is required for fitting, so calling
this method is only required on models that have previously been
sparsified; otherwise, it is a no-op.
Returns
-------
self: estimator
"""
msg = "Estimator, %(name)s, must be fitted before densifying."
check_is_fitted(self, "coef_", msg=msg)
if sp.issparse(self.coef_):
self.coef_ = self.coef_.toarray()
return self
def sparsify(self):
"""Convert coefficient matrix to sparse format.
Converts the ``coef_`` member to a scipy.sparse matrix, which for
L1-regularized models can be much more memory- and storage-efficient
than the usual numpy.ndarray representation.
The ``intercept_`` member is not converted.
Notes
-----
For non-sparse models, i.e. when there are not many zeros in ``coef_``,
this may actually *increase* memory usage, so use this method with
care. A rule of thumb is that the number of zero elements, which can
be computed with ``(coef_ == 0).sum()``, must be more than 50% for this
to provide significant benefits.
After calling this method, further fitting with the partial_fit
method (if any) will not work until you call densify.
Returns
-------
self: estimator
"""
msg = "Estimator, %(name)s, must be fitted before sparsifying."
check_is_fitted(self, "coef_", msg=msg)
self.coef_ = sp.csr_matrix(self.coef_)
return self
class LinearRegression(LinearModel, RegressorMixin):
"""
Ordinary least squares Linear Regression.
Parameters
----------
fit_intercept : boolean, optional
whether to calculate the intercept for this model. If set
to false, no intercept will be used in calculations
(e.g. data is expected to be already centered).
normalize : boolean, optional, default False
If True, the regressors X will be normalized before regression.
This parameter is ignored when `fit_intercept` is set to False.
When the regressors are normalized, note that this makes the
hyperparameters learnt more robust and almost independent of the number
of samples. The same property is not valid for standardized data.
However, if you wish to standardize, please use
`preprocessing.StandardScaler` before calling `fit` on an estimator
with `normalize=False`.
copy_X : boolean, optional, default True
If True, X will be copied; else, it may be overwritten.
n_jobs : int, optional, default 1
The number of jobs to use for the computation.
If -1 all CPUs are used. This will only provide speedup for
n_targets > 1 and sufficient large problems.
Attributes
----------
coef_ : array, shape (n_features, ) or (n_targets, n_features)
Estimated coefficients for the linear regression problem.
If multiple targets are passed during the fit (y 2D), this
is a 2D array of shape (n_targets, n_features), while if only
one target is passed, this is a 1D array of length n_features.
residues_ : array, shape (n_targets,) or (1,) or empty
Sum of residuals. Squared Euclidean 2-norm for each target passed
during the fit. If the linear regression problem is under-determined
(the number of linearly independent rows of the training matrix is less
than its number of linearly independent columns), this is an empty
array. If the target vector passed during the fit is 1-dimensional,
this is a (1,) shape array.
intercept_ : array
Independent term in the linear model.
Notes
-----
From the implementation point of view, this is just plain Ordinary
Least Squares (scipy.linalg.lstsq) wrapped as a predictor object.
"""
def __init__(self, fit_intercept=True, normalize=False, copy_X=True,
n_jobs=1):
self.fit_intercept = fit_intercept
self.normalize = normalize
self.copy_X = copy_X
self.n_jobs = n_jobs
@property
@deprecated("``residues_`` is deprecated and will be removed in 0.19")
def residues_(self):
"""Get the residues of the fitted model."""
return self._residues
def fit(self, X, y, sample_weight=None):
"""
Fit linear model.
Parameters
----------
X : numpy array or sparse matrix of shape [n_samples,n_features]
Training data
y : numpy array of shape [n_samples, n_targets]
Target values
sample_weight : numpy array of shape [n_samples]
Individual weights for each sample
.. versionadded:: 0.17
parameter *sample_weight* support to LinearRegression.
Returns
-------
self : returns an instance of self.
"""
n_jobs_ = self.n_jobs
X, y = check_X_y(X, y, accept_sparse=['csr', 'csc', 'coo'],
y_numeric=True, multi_output=True)
if sample_weight is not None and np.atleast_1d(sample_weight).ndim > 1:
raise ValueError("Sample weights must be 1D array or scalar")
X, y, X_offset, y_offset, X_scale = self._preprocess_data(
X, y, fit_intercept=self.fit_intercept, normalize=self.normalize,
copy=self.copy_X, sample_weight=sample_weight)
if sample_weight is not None:
# Sample weight can be implemented via a simple rescaling.
X, y = _rescale_data(X, y, sample_weight)
if sp.issparse(X):
if y.ndim < 2:
out = sparse_lsqr(X, y)
self.coef_ = out[0]
self._residues = out[3]
else:
# sparse_lstsq cannot handle y with shape (M, K)
outs = Parallel(n_jobs=n_jobs_)(
delayed(sparse_lsqr)(X, y[:, j].ravel())
for j in range(y.shape[1]))
self.coef_ = np.vstack(out[0] for out in outs)
self._residues = np.vstack(out[3] for out in outs)
else:
self.coef_, self._residues, self.rank_, self.singular_ = \
linalg.lstsq(X, y)
self.coef_ = self.coef_.T
if y.ndim == 1:
self.coef_ = np.ravel(self.coef_)
self._set_intercept(X_offset, y_offset, X_scale)
return self
def _pre_fit(X, y, Xy, precompute, normalize, fit_intercept, copy):
"""Aux function used at beginning of fit in linear models"""
n_samples, n_features = X.shape
if sparse.isspmatrix(X):
precompute = False
X, y, X_offset, y_offset, X_scale = _preprocess_data(
X, y, fit_intercept=fit_intercept, normalize=normalize,
return_mean=True)
else:
# copy was done in fit if necessary
X, y, X_offset, y_offset, X_scale = _preprocess_data(
X, y, fit_intercept=fit_intercept, normalize=normalize, copy=copy)
if hasattr(precompute, '__array__') and (
fit_intercept and not np.allclose(X_offset, np.zeros(n_features)) or
normalize and not np.allclose(X_scale, np.ones(n_features))):
warnings.warn("Gram matrix was provided but X was centered"
" to fit intercept, "
"or X was normalized : recomputing Gram matrix.",
UserWarning)
# recompute Gram
precompute = 'auto'
Xy = None
# precompute if n_samples > n_features
if isinstance(precompute, six.string_types) and precompute == 'auto':
precompute = (n_samples > n_features)
if precompute is True:
# make sure that the 'precompute' array is contiguous.
precompute = np.empty(shape=(n_features, n_features), dtype=X.dtype,
order='C')
np.dot(X.T, X, out=precompute)
if not hasattr(precompute, '__array__'):
Xy = None # cannot use Xy if precompute is not Gram
if hasattr(precompute, '__array__') and Xy is None:
common_dtype = np.find_common_type([X.dtype, y.dtype], [])
if y.ndim == 1:
# Xy is 1d, make sure it is contiguous.
Xy = np.empty(shape=n_features, dtype=common_dtype, order='C')
np.dot(X.T, y, out=Xy)
else:
# Make sure that Xy is always F contiguous even if X or y are not
# contiguous: the goal is to make it fast to extract the data for a
# specific target.
n_targets = y.shape[1]
Xy = np.empty(shape=(n_features, n_targets), dtype=common_dtype,
order='F')
np.dot(y.T, X, out=Xy.T)
return X, y, X_offset, y_offset, X_scale, precompute, Xy
|
import os
import torch
import csv
import numpy as np
from torch.utils.data import Dataset
from PIL import Image
def split_csv(file):
data = []
a_train_file = r'/home/huangyinyue/ISIC_2019/train.csv'
a_test_file = r'/home/huangyinyue/ISIC_2019/test.csv'
seed = 3
np.random.seed(seed)
train_indices = np.random.choice(25331, 20265, replace=False) # 设置随机数生成从0-150中随机挑选120个随机数
test_indices = np.array(list(set(range(25331)) - set(train_indices)))
# test_indices = np.random.choice(len(residue), 30, replace=False) # 如果训练集和测试集综合的数据加起来就是一整个数据集则不需要这个操作
with open(file)as afile:
a_reader = csv.reader(afile) # 从原始数据集中将所有数据读取出来并保存到a_reader中
labels = next(a_reader) # 提取第一行设置为labels
for row in a_reader: # 将a_reader中每一行的数据提取出来并保存到data的列表中
data.append(row)
# 生成训练数据集
if not os.path.exists(a_train_file):
with open(a_train_file, "w", newline='') as a_trian:
writer = csv.writer(a_trian)
writer.writerows([labels]) # 第一行为标签行
writer.writerows(np.array(data)[train_indices])
a_trian.close()
# 生成测试数据集
if not os.path.exists(a_test_file):
with open(a_test_file, "w", newline='')as a_test:
writer = csv.writer(a_test)
writer.writerows([labels]) # 第一行为标签行
writer.writerows(np.array(data)[test_indices])
a_test.close()
def read_labels_csv(file,header=True):
images = []
num_categories = 0
with open(file, 'r') as f:
reader = csv.reader(f)
rownum = 0
for row in reader:
if header and rownum == 0:
header = row
else:
if num_categories == 0:
num_categories = len(row) - 1
name = row[0]
labels = (np.asarray(row[1:num_categories + 1])).astype(np.float32)
labels = torch.from_numpy(labels)
item = (name, labels)
images.append(item)
rownum += 1
return images
class ISICDataset(Dataset):
def __init__(self,csv_file,image_path,transform=None):
self.images = read_labels_csv(csv_file)
self.root_dir = image_path
self.transform = transform
def __len__(self):
return len(self.images)
def __getitem__(self, index):
image_name,target = self.images[index]
# print(os.path.join(self.root_dir,image_name+'.jpg'))
image = Image.open(os.path.join(self.root_dir,image_name+'.jpg')).convert('RGB')
if self.transform is not None:
image = self.transform(image)
return image,target
if __name__ == '__main__':
split_csv(file=r"/home/huangyinyue/ISIC_2019/ISIC_2019_Training_GroundTruth.csv")
|
"""Package for useful ox_mon commands and tasks
"""
|
coins = 0
price = round(float(input()), 2)
while price != 0:
if price >= 2:
price -= 2
coins += 1
elif price >= 1:
price -= 1
coins += 1
elif price >= 0.50:
price -= 0.50
coins += 1
elif price >= 0.20:
price -= 0.20
coins += 1
elif price >= 0.10:
price -= 0.10
coins += 1
elif price >= 0.05:
price -= 0.05
coins += 1
elif price >= 0.02:
price -= 0.02
coins += 1
elif price >= 0.01:
price -= 0.01
coins += 1
price = round(price, 2)
print (coins)
|
# extract and plot each detected face in a photograph
from facenet_pytorch import MTCNN
from cv2 import cv2
from PIL import Image
import numpy as np
from matplotlib import pyplot as plt
from tqdm.notebook import tqdm
import os
import tensorflow as tf
from torchvision import models
import torch
from torchvision import transforms
from pathlib import Path
def getface_from_video(path):
# Create face detector
mtcnn = MTCNN(margin=20, post_process=False)
# Load a video
v_cap = cv2.VideoCapture(path)
v_len = int(v_cap.get(cv2.CAP_PROP_FRAME_COUNT))
# Loop through video, taking a handful of frames to form a batch
frames = []
for i in tqdm(range(v_len)):
# Load frame
success = v_cap.grab()
if i % 50 == 0:
success, frame = v_cap.retrieve()
else:
continue
if not success:
continue
# Add to batch
frame = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)
frames.append(Image.fromarray(frame))
# Detect faces in batch
try:
faces = mtcnn(frames)
for i in range(len(faces)):
plt.imshow(faces[i].permute(1, 2, 0).int().numpy())
plt.axis('off')
#plt.show()
except:
print("Error in detection")
return plt
dir(models)
os.environ["KMP_DUPLICATE_LIB_OK"]="TRUE"
TRAIN_DIR = "E:\\F Y P\\Personality prediction using image processing\\datasets\\First Impressions V2 (CVPR'17)\\train\\Extracted\\train videos\\"
TEST_DIR = "E:\\F Y P\\Personality prediction using image processing\\datasets\\First Impressions V2 (CVPR'17)\\test\\Extracted\\test videos\\"
VAL_DIR = "E:\\F Y P\\Personality prediction using image processing\\datasets\\First Impressions V2 (CVPR'17)\\validate\\Extracted\\validation videos\\"
PIC_TRAIN_DIR = "E:\\F Y P\\Personality prediction using image processing\\datasets\\First Impressions V2 (CVPR'17)\\train\\Extracted\\face\\"
PIC_TEST_DIR = "E:\\F Y P\\Personality prediction using image processing\\datasets\\First Impressions V2 (CVPR'17)\\test\\Extracted\\face\\"
PIC_VAL_DIR = "E:\\F Y P\\Personality prediction using image processing\\datasets\\First Impressions V2 (CVPR'17)\\validate\\Extracted\\face\\"
train_videos = [TRAIN_DIR+i for i in os.listdir(TRAIN_DIR)]
test_videos = [TEST_DIR+i for i in os.listdir(TEST_DIR)]
val_videos = [VAL_DIR+i for i in os.listdir(VAL_DIR)]
i=0
while (i<len(val_videos)):
#print(train_videos[i])
fig = getface_from_video(val_videos[i])
fig.savefig(os.path.splitext(PIC_VAL_DIR+Path(val_videos[i]).name)[0] +".jpg", bbox_inches='tight')
i+=1
|
from scheme import *
from scheme.supplemental import ObjectReference
from spire.runtime.runtime import Runtime
from spire.support.daemon import *
SCHEMA = Structure({
'detached': Boolean(default=True),
'gid': Text(nonnull=True),
'pidfile': Text(nonnull=True),
'uid': Text(nonnull=True),
})
class Runtime(Runtime):
def __init__(self, configuration=None, assembly=None, **params):
super(Runtime, self).__init__(configuration, assembly)
self.deploy()
self.startup()
configuration = self.configuration.get('daemon') or {}
configuration = SCHEMA.process(configuration, serialized=True)
for attr, value in params.iteritems():
if value is not None:
configuration[attr] = value
self.daemon = self.assembly.collate(Daemon, single=True)
if not self.daemon:
raise Exception('no daemon component')
if configuration['detached']:
detach_process()
self.pidfile = None
if 'pidfile' in configuration:
self.pidfile = Pidfile(configuration['pidfile'])
self.pidfile.write()
if 'uid' in configuration:
switch_user(configuration['uid'], configuration.get('gid'))
self.daemon.run()
|
import pywt
import matplotlib.pyplot as plt
from matplotlib.image import imread
import numpy as np
"""Image compression using discrete Wavelet transform."""
plt.rcParams['figure.figsize'] = [8, 8]
plt.rcParams.update({'font.size': 18})
im = imread('data/dog.jpg')
im_gray = np.mean(im, -1) # convert RGB to gray scale
# Wavelet Compression
n = 4
# Use Daubechies 1 wavelet family.
w = 'db1'
coeffs = pywt.wavedec2(im_gray, wavelet=w, level=n)
coeff_arr, coeff_slices = pywt.coeffs_to_array(coeffs)
Csort = np.sort(np.abs(coeff_arr.reshape(-1)))
for keep in (0.1, 0.05, 0.01, 0.005):
thresh = Csort[int(np.floor((1 - keep) * len(Csort)))]
ind = np.abs(coeff_arr) > thresh
Cfilt = coeff_arr * ind # Threshold small indices
coeffs_filt = pywt.array_to_coeffs(Cfilt, coeff_slices, output_format='wavedec2')
# Plot reconstruction
Arecon = pywt.waverec2(coeffs_filt, wavelet=w)
plt.figure()
plt.imshow(Arecon.astype('uint8'), cmap='gray')
plt.axis('off')
plt.title('keep = ' + str(keep))
plt.show()
# Conclusion. As we can see, image compression works batter when we using Wavelets in compare with FFT
|
from keepr.__main__ import run_application
from click.testing import CliRunner
import sys
sys.path.append('..')
def test_install_package():
runner = CliRunner()
result = runner.invoke(run_application, ['install', 'click'])
assert result.exit_code == 0
def test_install_package_req():
runner = CliRunner()
result = runner.invoke(
run_application, [
'install', '-r', 'requirements_test.txt'])
assert result.exit_code == 0
def test_uninstall_package():
runner = CliRunner()
result = runner.invoke(run_application, ['uninstall', 'click'])
assert result.exit_code == 0
def test_update_package():
runner = CliRunner()
result = runner.invoke(run_application, ['install', '-u', 'click'])
assert result.exit_code == 0
|
import click
from backend.bots import CCIBot, CGroupBot
@click.command()
@click.argument("bot_name", nargs=1)
def cci(bot_name):
if bot_name == "cci_bot":
CCIBot().run()
elif bot_name == "cgroup_bot":
CGroupBot().run()
else:
click.echo("No such bot yet...")
if __name__ == '__main__':
cci()
|
# MIT License
#
# Copyright (c) 2020 - Present nxtlo
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
"""Aiobungie unit tests."""
|
# Open3D: www.open3d.org
# The MIT License (MIT)
# See license file or visit www.open3d.org for details
# examples/Python/Utility/file.py
from os import listdir, makedirs
from os.path import exists, isfile, join, splitext
import shutil
import re
def sorted_alphanum(file_list_ordered):
convert = lambda text: int(text) if text.isdigit() else text
alphanum_key = lambda key: [convert(c) for c in re.split('([0-9]+)', key)]
return sorted(file_list_ordered, key=alphanum_key)
def get_file_list(path, extension=None):
if extension is None:
file_list = [path + f for f in listdir(path) if isfile(join(path, f))]
else:
file_list = [path + f for f in listdir(path)
if isfile(join(path, f)) and splitext(f)[1] == extension]
file_list = sorted_alphanum(file_list)
return file_list
def add_if_exists(path_dataset, folder_names):
for folder_name in folder_names:
if exists(join(path_dataset, folder_name)):
path = join(path_dataset, folder_name)
return path
def get_rgbd_folders(path_dataset):
path_color = add_if_exists(path_dataset, ["image/", "rgb/", "color/"])
path_depth = join(path_dataset, "depth/")
return path_color, path_depth
def get_rgbd_file_lists(path_dataset):
path_color, path_depth = get_rgbd_folders(path_dataset)
color_files = get_file_list(path_color, ".jpg") + \
get_file_list(path_color, ".png")
depth_files = get_file_list(path_depth, ".png")
return color_files, depth_files
def make_clean_folder(path_folder):
if not exists(path_folder):
makedirs(path_folder)
else:
shutil.rmtree(path_folder)
makedirs(path_folder)
def check_folder_structure(path_dataset):
path_color, path_depth = get_rgbd_folders(path_dataset)
assert exists(path_depth), \
"Path %s is not exist!" % path_depth
assert exists(path_color), \
"Path %s is not exist!" % path_color
|
# Copyright (c) 2021.
# The copyright lies with Timo Hirsch-Hoffmann, the further use is only permitted with reference to source
import urllib.request
from RiotGames.API.RiotApi import RiotApi
class Match(RiotApi):
__timeline_by_match_id_url: str = "https://{}.api.riotgames.com/lol/match/v4/timelines/by-match/{}?api_key={}"
def __init__(self, apikey: str):
"""
:param apikey:
"""
super().__init__(apikey)
self.__super = super()
def by_id(self, match_id: int, region: str):
"""
Special Function still in development
https://developer.riotgames.com/apis#match-v4/GET_getMatchlist
TODO
:param match_id:
:param region:
:return:
"""
pass
def matchlist_by_account_id(self, account_id: str, begin_time: int = None, end_time: int = None,
begin_index: int = None, end_index: int = None, champions: list = None,
queue: list = None, season: list = None):
"""
Special Function still in development
https://developer.riotgames.com/apis#match-v4/GET_getMatchlist
TODO
format url
:param account_id:
encrypted account id
:param begin_time:
:param end_time:
:param begin_index:
:param end_index:
:param champions:
:param queue:
:param season:
:return:
"""
pass
def timeline_by_match_id(self, match_id: int, region: str) -> dict:
"""
:param match_id:
:param region:
:return:
"""
return eval(bytes(
urllib.request.urlopen(
self.__timeline_by_match_id_url.format(region, match_id, super()._get_key())).read()).decode())
|
########################################################
# Rodrigo Leite - drigols #
# Last update: 31/10/2021 #
########################################################
from sklearn.datasets import load_iris
import pandas as pd
iris = load_iris()
x = pd.DataFrame(iris.data, columns=[iris.feature_names])
y = pd.Series(iris.target)
print("Load Iris dataset dimensions: {0}".format(x.shape))
print("Load Iris dataset features:\n", x.head(10))
|
from . import views
from django.urls import path
urlpatterns = [
path('',views.Home,name="Home"),
path('index/',views.index,name="index"),
path('registered/',views.registered,name="registered"),
path('exportmeout/',views.export,name="export"),
# path('',views.Registered,name="registered")
]
|
import cv2
import numpy as np
import dlib
from imutils import face_utils, translate
class Camera(object):
def __init__(self):
self.camera = cv2.VideoCapture(0)
p = "../data/shape_predictor_68_face_landmarks.dat"
self.detector = dlib.get_frontal_face_detector()
self.predictor = dlib.shape_predictor(p)
self.effect = "contours"
def __del__(self):
self.camera.release()
def return_jpg(self, frame):
ret, jpeg = cv2.imencode('.jpeg', frame)
return jpeg.tobytes()
def return_effect(self):
if self.effect == "contours":
frame = self.effect_canny()
elif self.effect == "baby":
frame = self.effect_baby_face()
elif self.effect == "blurr":
frame = self.effect_bluring_face()
elif self.effect == "cartoon":
frame = self.effect_cartoon()
elif self.effect == "doggy":
frame = self.effect_dog_face()
elif self.effect == "large":
frame = self.effect_enlarged()
elif self.effect == "mirrors":
frame = self.effect_mirror()
elif self.effect == "triangle":
frame = self.effect_delaunay_triangle()
elif self.effect == "glasses":
frame = self.effect_glasses()
return frame
# ---------------
# BABY FACE
# ---------------
def effect_baby_face(self):
ret, frame = self.camera.read()
if not ret:
return False
offset = 4
scale = 1.3
frame_2 = frame.copy()
mask = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
mask = np.zeros(frame.shape, frame.dtype)
eye_mask = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
eye_mask = np.zeros(frame.shape, frame.dtype)
gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
rects = self.detector(gray, 0)
for rect in rects:
shape = self.predictor(gray, rect)
shape = face_utils.shape_to_np(shape)
l_eye, r_eye = shape[36:42], shape[42:48]
(lx, ly, lw, lh) = cv2.boundingRect(l_eye)
(rx, ry, rw, rh) = cv2.boundingRect(r_eye)
l_eye = frame[ly-offset:ly+lh+offset, lx-offset:lx+lw+offset]
r_eye = frame[ry-offset:ry+rh+offset, rx-offset:rx+rw+offset]
center_ly = lx + int(lw / 2)
center_lx = ly + int(lh / 2) + 20
center_ry = rx + int(rw / 2)
center_rx = ry + int(rh / 2) + 20
mouth = shape[48:69]
(mx, my, mw, mh) = cv2.boundingRect(mouth)
mouth = frame[my-offset:my+mh+offset, mx-offset:mx+mw+offset]
center_my = mx + int(mw / 2)
center_mx = my + int(mh / 2)
ly_scaled = int((l_eye.shape[1]*scale)/2)
lx_scaled = int((l_eye.shape[0]*scale)/2)
ry_scaled = int((r_eye.shape[1]*scale)/2)
rx_scaled = int((r_eye.shape[0]*scale)/2)
l_eye = cv2.resize(l_eye, (ly_scaled*2, lx_scaled*2), interpolation = cv2.INTER_AREA)
r_eye = cv2.resize(r_eye, (ry_scaled*2, rx_scaled*2), interpolation = cv2.INTER_AREA)
frame[center_lx-lx_scaled:center_lx+lx_scaled, center_ly-ly_scaled:center_ly+ly_scaled] = l_eye
mask[center_lx-lx_scaled:center_lx+lx_scaled, center_ly-ly_scaled:center_ly+ly_scaled] = 255
frame[center_rx-rx_scaled:center_rx+rx_scaled, center_ry-ry_scaled:center_ry+ry_scaled] = r_eye
mask[center_rx-rx_scaled:center_rx+rx_scaled, center_ry-ry_scaled:center_ry+ry_scaled] = 255
final_center_x = int(np.mean([center_lx, center_rx]))
final_center_y = int(np.mean([center_ly, center_ry]))
frame = cv2.seamlessClone(frame, frame_2, mask, (final_center_y, final_center_x), cv2.NORMAL_CLONE)
return self.return_jpg(frame)
# ------------------
# ENLARGED EYES
# ------------------
def effect_enlarged(self):
offset = 4
scale = 2
ret, frame = self.camera.read()
if not ret:
return False
frame_2 = frame.copy()
mask = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
mask = np.zeros(frame.shape, frame.dtype)
l_eye, r_eye = 0, 0
gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
rects = self.detector(gray, 0)
for rect in rects:
shape = self.predictor(gray, rect)
shape = face_utils.shape_to_np(shape)
l_eye, r_eye = shape[36:42], shape[42:48]
(lx, ly, lw, lh) = cv2.boundingRect(l_eye)
(rx, ry, rw, rh) = cv2.boundingRect(r_eye)
l_eye = frame[ly-offset:ly+lh+offset, lx-offset:lx+lw+offset]
r_eye = frame[ry-offset:ry+rh+offset, rx-offset:rx+rw+offset]
center_ly = lx + int(lw / 2)
center_lx = ly + int(lh / 2) + 20
center_ry = rx + int(rw / 2)
center_rx = ry + int(rh / 2) + 20
mouth = shape[48:69]
(mx, my, mw, mh) = cv2.boundingRect(mouth)
mouth = frame[my-offset:my+mh+offset, mx-offset:mx+mw+offset]
center_my = mx + int(mw / 2)
center_mx = my + int(mh / 2)
ly_scaled = int((l_eye.shape[1]*1.7)/2)
lx_scaled = int((l_eye.shape[0]*1.7)/2)
ry_scaled = int((r_eye.shape[1]*1.7)/2)
rx_scaled = int((r_eye.shape[0]*1.7)/2)
l_eye = cv2.resize(l_eye, (ly_scaled*2, lx_scaled*2), interpolation = cv2.INTER_AREA)
r_eye = cv2.resize(r_eye, (ry_scaled*2, rx_scaled*2), interpolation = cv2.INTER_AREA)
my_scaled = int((mouth.shape[1]*scale)/2)
mx_scaled = int((mouth.shape[0]*scale)/2)
mouth = cv2.resize(mouth, (my_scaled*2, mx_scaled*2), interpolation = cv2.INTER_AREA)
frame[center_mx-mx_scaled:center_mx+mx_scaled, center_my-my_scaled:center_my+my_scaled] = mouth
mask[center_mx-mx_scaled:center_mx+mx_scaled, center_my-my_scaled:center_my+my_scaled] = 255
frame[center_lx-lx_scaled:center_lx+lx_scaled, center_ly-ly_scaled:center_ly+ly_scaled] = l_eye
mask[center_lx-lx_scaled:center_lx+lx_scaled, center_ly-ly_scaled:center_ly+ly_scaled] = 255
frame[center_rx-rx_scaled:center_rx+rx_scaled, center_ry-ry_scaled:center_ry+ry_scaled] = r_eye
mask[center_rx-rx_scaled:center_rx+rx_scaled, center_ry-ry_scaled:center_ry+ry_scaled] = 255
final_center_x = int(np.mean([center_lx, center_mx, center_rx]))
final_center_y = int(np.mean([center_ly, center_my, center_ry]))
frame = cv2.seamlessClone(frame, frame_2, mask, (final_center_y, final_center_x), cv2.NORMAL_CLONE)
return self.return_jpg(frame)
# ------------------
# BLURRING FACE
# ------------------
def effect_bluring_face(self):
ret, frame = self.camera.read()
if not ret:
return False
face = 0
gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
rects = self.detector(gray, 0)
for rect in rects:
shape = self.predictor(gray, rect)
shape = face_utils.shape_to_np(shape)
(x, y, w, h) = face_utils.rect_to_bb(rect)
face = frame[y:y+h, x:x+w]
face = blurr_face(face)
face = pixel_face(face)
frame[y:y+h, x:x+w] = face
return self.return_jpg(frame)
# ------------------------
# DELAUNAY TRIANGLE
# ------------------------
def effect_delaunay_triangle(self):
ret, frame = self.camera.read()
if not ret:
return False
jaw = [0, 17]
r_eyebrow, l_eyebrow = [18, 22], [23, 27]
nose = [28, 36]
r_eye, l_eye = [37, 42], [43, 48]
mouth = [49, 68]
gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
mask = np.zeros_like(gray)
faces = self.detector(gray, 0)
for face in faces:
landmark = self.predictor(gray, face)
landmark_points = []
for n in range(68):
x = landmark.part(n).x
y = landmark.part(n).y
landmark_points.append((x, y))
points = np.array(landmark_points, np.int32)
convexhull = cv2.convexHull(points)
cv2.fillConvexPoly(mask, convexhull, 255)
face = cv2.bitwise_and(frame, frame, mask=mask)
gray = delaunay_traingle(convexhull, landmark_points, gray, landmark_points)
return self.return_jpg(gray)
# --------------
# DOG FACE
# --------------
def effect_dog_face(self):
ret, frame = self.camera.read()
if not ret:
return False
dog_nose = cv2.imread("../images/nose.png", -1)
dog_ears = cv2.imread("../images/ears.png", -1)
gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
rects = self.detector(gray, 0)
for rect in rects:
shape = self.predictor(gray, rect)
shape = face_utils.shape_to_np(shape)
ears_width = int(abs(shape[0][0] - shape[16][0]) * 1.5)
ears_height = int(ears_width * 0.4)
ears_x = int((shape[22][0] + shape[23][0])/2)
ears_y = shape[20][1] - 50
half_width = int(ears_width/2.0)
half_height = int(ears_height/2.0)
y1, y2 = ears_y - half_height, ears_y + half_height
x1, x2 = ears_x - half_width, ears_x + half_width
dog_ears = cv2.resize(dog_ears, (half_width*2, half_height*2), interpolation = cv2.INTER_AREA)
alpha_s = dog_ears[:, :, 3] / 255.0
alpha_l = 1.0 - alpha_s
for c in range(0, 3):
frame[y1:y2, x1:x2, c] = (alpha_s * dog_ears[:, :, c] +
alpha_l * frame[y1:y2, x1:x2, c])
nose_width = int(abs(shape[36][0] - shape[32][0]) * 1.7)
nose_height = int(nose_width * 0.7)
(nose_x, nose_y) = shape[30]
half_width = int(nose_width/2.0)
half_height = int(nose_height/2.0)
y1, y2 = nose_y - half_height, nose_y + half_height
x1, x2 = nose_x - half_width, nose_x + half_width
dog_nose = cv2.resize(dog_nose, (half_width*2, half_height*2), interpolation = cv2.INTER_AREA)
alpha_s = dog_nose[:, :, 3] / 255.0
alpha_l = 1.0 - alpha_s
for c in range(0, 3):
frame[y1:y2, x1:x2, c] = (alpha_s * dog_nose[:, :, c] +
alpha_l * frame[y1:y2, x1:x2, c])
return self.return_jpg(frame)
# -----------------
# FUNNY GLASSES
# -----------------
def effect_glasses(self):
ret, frame = self.camera.read()
if not ret:
return False
glasses = cv2.imread("../images/glasses.png", -1)
gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
rects = self.detector(gray, 0)
for rect in rects:
shape = self.predictor(gray, rect)
shape = face_utils.shape_to_np(shape)
glasses_width = int(abs(shape[36][0] - shape[32][0]) * 4)
glasses_height = int(glasses_width * 0.7)
(glasses_x, glasses_y) = shape[30]
glasses_y -= 20
half_width = int(glasses_width/2.0)
half_height = int(glasses_height/2.0)
y1, y2 = glasses_y - half_height, glasses_y + half_height
x1, x2 = glasses_x - half_width, glasses_x + half_width
glasses = cv2.resize(glasses, (half_width*2, half_height*2), interpolation = cv2.INTER_AREA)
alpha_s = glasses[:, :, 3] / 255.0
alpha_l = 1.0 - alpha_s
for c in range(0, 3):
frame[y1:y2, x1:x2, c] = (alpha_s * glasses[:, :, c] +
alpha_l * frame[y1:y2, x1:x2, c])
return self.return_jpg(frame)
# ----------------------
# CARTOON-ISH
# ----------------------
def effect_cartoon(self):
ret, frame = self.camera.read()
gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
gray = cv2.medianBlur(gray, 5)
edges = cv2.adaptiveThreshold(gray, 255, cv2.ADAPTIVE_THRESH_MEAN_C, cv2.THRESH_BINARY, 9, 6)
color = cv2.bilateralFilter(frame, 9, 150, 0.25)
cartoon = cv2.bitwise_and(color, color, mask=edges)
return self.return_jpg(cartoon)
# ------------
# CANNY
# ------------
def effect_canny(self):
ret, frame = self.camera.read()
gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
blurred = cv2.GaussianBlur(gray, (3, 3), 0)
median = np.median(blurred)
l_edge = int(max(0, 0.77 * median))
u_edge = int(max(0, 1.33 * median))
canny = cv2.Canny(blurred, l_edge, u_edge)
return self.return_jpg(canny)
# ------------
# MIRRORS
# ------------
def effect_mirror(self):
ret, frame = self.camera.read()
split = frame.shape[1] // 2
one_half = frame[:, :split, :]
sec_half = cv2.flip(one_half, 1)
frame = np.hstack((one_half, sec_half))
return self.return_jpg(frame)
# ---------------------
# ADDITIONAL FUNCTIONS
# ---------------------
def blurr_face(image):
(h, w) = image.shape[:2]
kernel_w = int(w/3.0)
kernel_h = int(h/3.0)
if kernel_w % 2 == 0:
kernel_w -= 1
else: kernel_w = 5
if kernel_h % 2 == 0:
kernel_h -= 1
else: kernel_h = 5
img = cv2.GaussianBlur(image, (kernel_w, kernel_h), 0)
return img
def pixel_face(image):
blocks = 16
(h, w) = image.shape[:2]
xSteps = np.linspace(0, w, blocks+1, dtype="int")
ySteps = np.linspace(0, h, blocks+1, dtype="int")
for i in range(1, len(ySteps)):
for j in range(1, len(xSteps)):
startX = xSteps[j - 1]
startY = ySteps[i - 1]
endX = xSteps[j]
endY = ySteps[i]
roi = image[startY:endY, startX:endX]
(B, G, R) = [int(x) for x in cv2.mean(roi)[:3]]
cv2.rectangle(image, (startX, startY), (endX, endY),
(B, G, R), -1)
return image
def delaunay_traingle(convexHull, points, frame, landmark_points):
rect = cv2.boundingRect(convexHull)
subdiv = cv2.Subdiv2D(rect)
subdiv.insert(landmark_points)
triangles = subdiv.getTriangleList()
triangles = np.array(triangles, dtype=np.int32)
for t in triangles:
A, B, C = (t[0], t[1]), (t[2], t[3]), (t[4], t[5])
cv2.line(frame, A, B, (255, 255, 255), 1, cv2.LINE_AA, 0)
cv2.line(frame, B, C, (255, 255, 255), 1, cv2.LINE_AA, 0)
cv2.line(frame, A, C, (255, 255, 255), 1, cv2.LINE_AA, 0)
return frame
|
import requests
url = 'https://api.wildwildhack.ai/api/signedurl?extension=jpg'
headers = {"Authorization": "Bearer 211acc3c360b4dccaffefbab0b14d0c4"}
auth_token = '211acc3c360b4dccaffefbab0b14d0c4'
json_headers = {
'authorization': f'Bearer {auth_token}',
'content-type': 'application/json',
}
signed_url_first = requests.post(url, json={"extension": "jpg"}, headers=headers).json()
image_url_first = signed_url_first['url']
# "https://storage.googleapis.com/prod-reflect-videos/data/images/f590a9cb-172f-4fb0-8021-914e7afaa48d.jpg?GoogleAccessId=prod-images-admin@reface-prod-stage.iam.gserviceaccount.com&Expires=1631376256&Signature=0WBqKY1pfnU3oPP8AooDiMgmY9VPBi3LBVlrg%2BO9VGnoxytzX87dz%2FPS2mksb5GqHPVzWAsiIQBdGPPE2O1wUjCHOuH8gUpl5spgJnFPZGX2LlYx%2FxDyLcKOHpJ%2BrIcWNdaUMxlz%2B4K%2F2gHyUmd5bh5VdkodlPxmy59P5t3iIC8xBalu8fHxxPNBrftCKiF%2B6giAoe3l39MMkDBGyQi3yKs2xFHVj9pqcgAw0Ja5xcBpqxBAw0xS81L4efl%2Fe%2B1csanIMOvBRuGYiXHkTvhwu%2BRf2oMXr5L%2FPMakO0ElTxpKEH4%2BciIGbX6PrFzVYG4IGhsAsYemJShy5bFbnVRNVw=="
print(image_url_first)
img_put_headers = {
'content-type': 'image/jpeg',
}
# files = {'media': open('files/photo_2021-09-11_17-53-41.jpg', 'rb')}
with open('/Users/admin/Documents/RF_hckt/t_bot/files/photo_2021-09-11_17-53-41.jpg', 'rb') as f:
img_data = f.read()
res = requests.put(image_url_first, headers=img_put_headers, data=img_data)
img_path = res.url.split('?')[0]
img_put_headers = {
'Content-Type': 'application/json',
'accept': 'application/json',
'authorization': f'Bearer {auth_token}',
}
res = requests.post('https://api.wildwildhack.ai/api/face', headers=img_put_headers, json={
'path': 'https://storage.googleapis.com/prod-reflect-videos/data/images/81f2f429-2cca-464f-98a4-f5ae2c1e57e0.jpg'})
video_json_headers = {
'authorization': f'Bearer {auth_token}'
}
signed_video_url_first = requests.post(url, json={"extension": "mp4"}, headers=video_json_headers).json()
video_url_first = signed_video_url_first['url']
print(video_url_first)
video_put_headers = {
'content-type': 'video/mp4',
}
# files = {'media': open('files/photo_2021-09-11_17-53-41.jpg', 'rb')}
with open('files/IMG_5344.mp4', 'rb') as f:
video_data = f.read()
res = requests.put(video_url_first, headers=video_put_headers, data=video_data)
video_path = res.url.split('?')[0]
print(video_path)
video_put_headers = {
'Content-Type': 'application/json',
'accept': 'application/json',
'authorization': f'Bearer {auth_token}',
}
res = requests.post('https://api.wildwildhack.ai/api/video', headers=video_put_headers, json={
'path': 'https://storage.googleapis.com/prod-reflect-videos/data/inputs/b894f8dc-df16-4e24-bf76-63446fb01ebd.mp4'})
print(res)
#wait
res = requests.get('https://api.wildwildhack.ai/api/video/b894f8dc-df16-4e24-bf76-63446fb01ebd', headers=headers)
print(res)
#swap
res = requests.post('https://api.wildwildhack.ai/api/swap-video', headers=headers, json={
"video_id": "b894f8dc-df16-4e24-bf76-63446fb01ebd",
"facemapping":{"47f3b946-1358-4e28-b20b-c01e4b84750b":["e7c64439-bc38-4a50-9937-cf0d215e4c69"]}})
print(res)
|
class Solution:
def maxProfit(self, prices):
"""
:type prices: List[int]
:rtype: int
"""
if len(prices) == 0:
return 0
min_price = prices[0]
result = 0
for price in prices[1:]:
result = max(price - min_price, result)
min_price = min(price, min_price)
return result
if __name__ == "__main__":
print(Solution().maxProfit([7, 1, 5, 3, 6, 4]))
print(Solution().maxProfit([7, 6, 4, 3, 1]))
|
# -*- coding: utf-8 -*-
"""
sphinxjp.themes.revealjs.directives
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
:author: tell-k <ffk2005@gmail.com>
:copyright: tell-k. All Rights Reserved.
"""
from docutils import nodes
from docutils.parsers.rst import directives
from docutils.parsers.rst.roles import set_classes
from docutils.parsers.rst import Directive
from . import compat
__docformat__ = 'reStructuredText'
class revealjs(nodes.General, nodes.Element):
""" node for revealjs """
class rv_code(nodes.General, nodes.Element):
""" node for revealjs code section """
class rv_small(nodes.General, nodes.Element):
""" node for revealjs small text section """
class rv_note(nodes.General, nodes.Element):
""" node for revealjs presentation note """
def heading(argument):
""" directives choices for heading tag """
return directives.choice(argument, ('h1', 'h2', 'h3', 'h4', 'h5', 'h6'))
class RevealjsDirective(Directive):
""" Reveal.JS slide entry """
has_content = True
required_arguments = 0
optional_arguments = 100
final_argument_whitespace = False
option_spec = {
'id': directives.unchanged,
'class': directives.class_option,
'noheading': directives.flag,
'title-heading': heading,
'subtitle': directives.unchanged,
'subtitle-heading': directives.unchanged,
'data-autoslide': directives.unchanged,
'data-markdown': directives.unchanged,
'data-transition': directives.unchanged,
'data-transition-speed': directives.unchanged,
'data-background': directives.unchanged,
'data-background-repeat': directives.unchanged,
'data-background-size': directives.unchanged,
'data-background-transition': directives.unchanged,
'data-state': directives.unchanged,
'data-separator': directives.unchanged,
'data-separator-vertical': directives.unchanged,
'data-separator-notes': directives.unchanged,
'data-charset': directives.unchanged,
}
node_class = revealjs
def run(self):
""" build revealjs node """
set_classes(self.options)
text = '\n'.join(self.content)
node = self.node_class(text, **self.options)
self.add_name(node)
if "data-markdown" not in self.options:
self.state.nested_parse(self.content, self.content_offset, node)
if self.arguments:
node['title'] = " ".join(self.arguments)
node['noheading'] = ('noheading' in self.options)
options_list = (
'id',
'title-heading',
'subtitle-heading',
'data-autoslide',
'data-transition',
'data-transition-speed',
'data-background',
'data-background-repeat',
'data-background-size',
'data-background-transition',
'data-state',
'data-markdown',
'data-separator',
'data-separator-vertical',
'data-separator-notes',
'data-charset',
)
for option in options_list:
if option in self.options:
node[option] = self.options.get(option)
return [node]
class RvSmallDirective(Directive):
"""
Create small text tag.
"""
has_content = True
required_arguments = 0
optional_arguments = 0
final_argument_whitespace = False
option_spec = {
'class': directives.class_option,
}
node_class = rv_small
def run(self):
""" build rv_small node """
set_classes(self.options)
self.assert_has_content()
text = '\n'.join(self.content)
node = self.node_class(text, **self.options)
self.add_name(node)
self.state.nested_parse(self.content, self.content_offset, node)
return [node]
class RvNoteDirective(Directive):
"""
Directive for a notes tag.
"""
has_content = True
required_arguments = 0
optional_arguments = 0
final_argument_whitespace = False
option_spec = {
'class': directives.class_option,
}
node_class = rv_note
def run(self):
""" build rv_note node """
set_classes(self.options)
self.assert_has_content()
text = '\n'.join(self.content)
node = self.node_class(text, **self.options)
self.add_name(node)
self.state.nested_parse(self.content, self.content_offset, node)
return [node]
class RvCodeDirective(Directive):
"""
Directive for a code block with highlight.js
"""
has_content = True
required_arguments = 0
optional_arguments = 0
final_argument_whitespace = False
option_spec = {
'id': directives.unchanged,
'class': directives.class_option,
}
node_class = rv_code
def run(self):
""" build rv_code node """
set_classes(self.options)
self.assert_has_content()
node = self.node_class('\n'.join(self.content), **self.options)
return [node]
def visit_revealjs(self, node):
""" build start tag for revealjs """
section_attr = {}
markdown_headings = {"h1": "#", "h2": "##", "h3": "###",
"h4": "####", "h5": "#####", "h6": "######"}
if node.get("id"):
section_attr.update({"ids": [node.get("id")]})
attr_list = (
'data-autoslide',
'data-transition',
'data-transition-speed',
'data-background',
'data-background-repeat',
'data-background-size',
'data-background-transition',
'data-state',
'data-markdown',
'data-separator',
'data-separator-vertical',
'data-separator-notes',
'data-charset',
)
for attr in attr_list:
if node.get(attr) is not None:
section_attr.update({attr: node.get(attr)})
title = None
if node.get("title") and not node.get('noheading'):
title = node.get("title")
title_heading = node.get('title-heading', 'h2')
subtitle = node.get("subtitle")
subtitle_heading = node.get('subtitle-heading', 'h3')
if node.get("data-markdown") is not None:
title_base = compat.text("%(heading)s %(title)s \n")
title_text = None
if title:
title_text = title_base % dict(
heading=markdown_headings.get(title_heading),
title=title
)
subtitle_text = None
if subtitle:
subtitle_text = title_base % dict(
heading=markdown_headings.get(subtitle_heading),
title=subtitle
)
else:
title_base = compat.text("<%(heading)s>%(title)s</%(heading)s>\n")
title_text = None
if title:
title_text = title_base % dict(
title=title,
heading=title_heading)
subtitle_text = None
if subtitle:
subtitle_text = title_base % dict(
title=subtitle,
heading=subtitle_heading)
if node.get("data-markdown") is not None:
self.body.append(self.starttag(node, 'section', **section_attr))
if node.get("data-markdown") == compat.text(""):
self.body.append("<script type='text/template'>\n")
if title_text:
self.body.append(title_text)
if subtitle_text:
self.body.append(subtitle_text)
self.body.append(node.rawsource)
self.body.append("</script>\n")
else:
self.body.append(self.starttag(node, 'section', **section_attr))
if title_text:
self.body.append(title_text)
if subtitle_text:
self.body.append(subtitle_text)
self.set_first_last(node)
def depart_revealjs(self, node=None):
""" build end tag for revealjs """
self.body.append('</section>\n')
def visit_rv_code(self, node):
""" build start tag for rv_code """
self.body.append(self.starttag(node, 'pre'))
self.body.append("<code data-trim contenteditable>")
self.body.append(compat.escape_html(node.rawsource))
def depart_rv_code(self, node=None):
""" build end tag for rv_code """
self.body.append("</code>")
self.body.append("</pre>\n")
def visit_rv_small(self, node):
""" build start tag for rv_small """
self.body.append(self.starttag(node, 'small'))
self.set_first_last(node)
def depart_rv_small(self, node=None):
""" build end tag for rv_small"""
self.body.append("</small>\n")
def visit_rv_note(self, node):
""" build start tag for rv_note """
self.body.append(self.starttag(node, 'aside', **{'class': 'notes'}))
self.set_first_last(node)
def depart_rv_note(self, node=None):
""" build end tag for rv_note """
self.body.append("</aside>\n")
def setup(app):
"""Initialize """
app.add_node(revealjs, html=(visit_revealjs, depart_revealjs))
app.add_node(rv_code, html=(visit_rv_code, depart_rv_code))
app.add_node(rv_note, html=(visit_rv_note, depart_rv_note))
app.add_node(rv_small, html=(visit_rv_small, depart_rv_small))
app.add_directive('revealjs', RevealjsDirective)
app.add_directive('rv_code', RvCodeDirective)
app.add_directive('rv_note', RvNoteDirective)
app.add_directive('rv_small', RvSmallDirective)
return app
|
"""
"""
import contextvars
from ..langref import ATOM_TYPES
__all__ = ['is_atom', 'context']
_CURRENT_CONTEXT = contextvars.ContextVar('current-runtime-context', default=None)
def is_atom(value):
"""
"""
return isinstance(value, ATOM_TYPES)
class _CurrentContextProxy:
__slots__ = ()
__getattr__ = lambda s, n: getattr(_CURRENT_CONTEXT.get(), n)
__setattr__ = lambda s, n, v: setattr(_CURRENT_CONTEXT.get(), n, v)
__delattr__ = lambda s, n: delattr(_CURRENT_CONTEXT.get(), n)
__getitem__ = lambda s, n: _CURRENT_CONTEXT.get().__getitem__(n)
__setitem__ = lambda s, n, v: _CURRENT_CONTEXT.get().__setitem__(n, v)
__delitem__ = lambda s, n: _CURRENT_CONTEXT.get().__delitem__(n)
__enter__ = lambda s: _CURRENT_CONTEXT.get().__enter__()
__exit__ = lambda s: _CURRENT_CONTEXT.get().__exit__()
__contains__ = lambda s, n: _CURRENT_CONTEXT.get().__contains__(n)
__dir__ = lambda s: dir(_CURRENT_CONTEXT.get())
__call__ = lambda s, v: _CURRENT_CONTEXT.get()(v)
__str__ = lambda s: _CURRENT_CONTEXT.get().__str__()
__repr__ = lambda s: _CURRENT_CONTEXT.get().__repr__()
__bool__ = lambda s: _CURRENT_CONTEXT.get() is not None
context = _CurrentContextProxy()
"""The current :class:`~rollit.runtime.core.RuntimeContext`.
"""
del _CurrentContextProxy
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Basic interface to Amazon MWS
# Based on http://code.google.com/p/amazon-mws-python
# Extended to include finances object
from __future__ import unicode_literals
import urllib
import hashlib
import hmac
import base64
import six
from erpnext.erpnext_integrations.doctype.amazon_mws_settings import xml_utils
import re
try:
from xml.etree.ElementTree import ParseError as XMLError
except ImportError:
from xml.parsers.expat import ExpatError as XMLError
from time import strftime, gmtime
from requests import request
from requests.exceptions import HTTPError
__all__ = [
'Feeds',
'Inventory',
'MWSError',
'Reports',
'Orders',
'Products',
'Recommendations',
'Sellers',
'Finances'
]
# See https://images-na.ssl-images-amazon.com/images/G/01/mwsportal/doc/en_US/bde/MWSDeveloperGuide._V357736853_.pdf page 8
# for a list of the end points and marketplace IDs
MARKETPLACES = {
"CA": "https://mws.amazonservices.ca", #A2EUQ1WTGCTBG2
"US": "https://mws.amazonservices.com", #ATVPDKIKX0DER",
"DE": "https://mws-eu.amazonservices.com", #A1PA6795UKMFR9
"ES": "https://mws-eu.amazonservices.com", #A1RKKUPIHCS9HS
"FR": "https://mws-eu.amazonservices.com", #A13V1IB3VIYZZH
"IN": "https://mws.amazonservices.in", #A21TJRUUN4KGV
"IT": "https://mws-eu.amazonservices.com", #APJ6JRA9NG5V4
"UK": "https://mws-eu.amazonservices.com", #A1F83G8C2ARO7P
"JP": "https://mws.amazonservices.jp", #A1VC38T7YXB528
"CN": "https://mws.amazonservices.com.cn", #AAHKV2X7AFYLW
"AE": " https://mws.amazonservices.ae", #A2VIGQ35RCS4UG
"MX": "https://mws.amazonservices.com.mx", #A1AM78C64UM0Y8
"BR": "https://mws.amazonservices.com", #A2Q3Y263D00KWC
}
class MWSError(Exception):
"""
Main MWS Exception class
"""
# Allows quick access to the response object.
# Do not rely on this attribute, always check if its not None.
response = None
def calc_md5(string):
"""Calculates the MD5 encryption for the given string
"""
md = hashlib.md5()
md.update(string)
return base64.encodestring(md.digest()).strip('\n') if six.PY2 \
else base64.encodebytes(md.digest()).decode().strip()
def remove_empty(d):
"""
Helper function that removes all keys from a dictionary (d),
that have an empty value.
"""
for key in list(d):
if not d[key]:
del d[key]
return d
def remove_namespace(xml):
xml = xml.decode('utf-8')
regex = re.compile(' xmlns(:ns2)?="[^"]+"|(ns2:)|(xml:)')
return regex.sub('', xml)
class DictWrapper(object):
def __init__(self, xml, rootkey=None):
self.original = xml
self._rootkey = rootkey
self._mydict = xml_utils.xml2dict().fromstring(remove_namespace(xml))
self._response_dict = self._mydict.get(list(self._mydict)[0], self._mydict)
@property
def parsed(self):
if self._rootkey:
return self._response_dict.get(self._rootkey)
else:
return self._response_dict
class DataWrapper(object):
"""
Text wrapper in charge of validating the hash sent by Amazon.
"""
def __init__(self, data, header):
self.original = data
if 'content-md5' in header:
hash_ = calc_md5(self.original)
if header['content-md5'] != hash_:
raise MWSError("Wrong Contentlength, maybe amazon error...")
@property
def parsed(self):
return self.original
class MWS(object):
""" Base Amazon API class """
# This is used to post/get to the different uris used by amazon per api
# ie. /Orders/2011-01-01
# All subclasses must define their own URI only if needed
URI = "/"
# The API version varies in most amazon APIs
VERSION = "2009-01-01"
# There seem to be some xml namespace issues. therefore every api subclass
# is recommended to define its namespace, so that it can be referenced
# like so AmazonAPISubclass.NS.
# For more information see http://stackoverflow.com/a/8719461/389453
NS = ''
# Some APIs are available only to either a "Merchant" or "Seller"
# the type of account needs to be sent in every call to the amazon MWS.
# This constant defines the exact name of the parameter Amazon expects
# for the specific API being used.
# All subclasses need to define this if they require another account type
# like "Merchant" in which case you define it like so.
# ACCOUNT_TYPE = "Merchant"
# Which is the name of the parameter for that specific account type.
ACCOUNT_TYPE = "SellerId"
def __init__(self, access_key, secret_key, account_id, region='US', domain='', uri="", version=""):
self.access_key = access_key
self.secret_key = secret_key
self.account_id = account_id
self.version = version or self.VERSION
self.uri = uri or self.URI
if domain:
self.domain = domain
elif region in MARKETPLACES:
self.domain = MARKETPLACES[region]
else:
error_msg = "Incorrect region supplied ('%(region)s'). Must be one of the following: %(marketplaces)s" % {
"marketplaces" : ', '.join(MARKETPLACES.keys()),
"region" : region,
}
raise MWSError(error_msg)
def make_request(self, extra_data, method="GET", **kwargs):
"""Make request to Amazon MWS API with these parameters
"""
# Remove all keys with an empty value because
# Amazon's MWS does not allow such a thing.
extra_data = remove_empty(extra_data)
params = {
'AWSAccessKeyId': self.access_key,
self.ACCOUNT_TYPE: self.account_id,
'SignatureVersion': '2',
'Timestamp': self.get_timestamp(),
'Version': self.version,
'SignatureMethod': 'HmacSHA256',
}
params.update(extra_data)
quote = urllib.quote if six.PY2 else urllib.parse.quote
request_description = '&'.join(['%s=%s' % (k, quote(params[k], safe='-_.~')) for k in sorted(params)])
signature = self.calc_signature(method, request_description)
url = '%s%s?%s&Signature=%s' % (self.domain, self.uri, request_description, quote(signature))
headers = {'User-Agent': 'python-amazon-mws/0.0.1 (Language=Python)'}
headers.update(kwargs.get('extra_headers', {}))
try:
# Some might wonder as to why i don't pass the params dict as the params argument to request.
# My answer is, here i have to get the url parsed string of params in order to sign it, so
# if i pass the params dict as params to request, request will repeat that step because it will need
# to convert the dict to a url parsed string, so why do it twice if i can just pass the full url :).
response = request(method, url, data=kwargs.get('body', ''), headers=headers)
response.raise_for_status()
# When retrieving data from the response object,
# be aware that response.content returns the content in bytes while response.text calls
# response.content and converts it to unicode.
data = response.content
# I do not check the headers to decide which content structure to server simply because sometimes
# Amazon's MWS API returns XML error responses with "text/plain" as the Content-Type.
try:
parsed_response = DictWrapper(data, extra_data.get("Action") + "Result")
except XMLError:
parsed_response = DataWrapper(data, response.headers)
except HTTPError as e:
error = MWSError(str(e))
error.response = e.response
raise error
# Store the response object in the parsed_response for quick access
parsed_response.response = response
return parsed_response
def get_service_status(self):
"""
Returns a GREEN, GREEN_I, YELLOW or RED status.
Depending on the status/availability of the API its being called from.
"""
return self.make_request(extra_data=dict(Action='GetServiceStatus'))
def calc_signature(self, method, request_description):
"""Calculate MWS signature to interface with Amazon
"""
sig_data = method + '\n' + self.domain.replace('https://', '').lower() + '\n' + self.uri + '\n' + request_description
sig_data = sig_data.encode('utf-8')
secret_key = self.secret_key.encode('utf-8')
digest = hmac.new(secret_key, sig_data, hashlib.sha256).digest()
return base64.b64encode(digest).decode('utf-8')
def get_timestamp(self):
"""
Returns the current timestamp in proper format.
"""
return strftime("%Y-%m-%dT%H:%M:%SZ", gmtime())
def enumerate_param(self, param, values):
"""
Builds a dictionary of an enumerated parameter.
Takes any iterable and returns a dictionary.
ie.
enumerate_param('MarketplaceIdList.Id', (123, 345, 4343))
returns
{
MarketplaceIdList.Id.1: 123,
MarketplaceIdList.Id.2: 345,
MarketplaceIdList.Id.3: 4343
}
"""
params = {}
if values is not None:
if not param.endswith('.'):
param = "%s." % param
for num, value in enumerate(values):
params['%s%d' % (param, (num + 1))] = value
return params
class Feeds(MWS):
""" Amazon MWS Feeds API """
ACCOUNT_TYPE = "Merchant"
def submit_feed(self, feed, feed_type, marketplaceids=None,
content_type="text/xml", purge='false'):
"""
Uploads a feed ( xml or .tsv ) to the seller's inventory.
Can be used for creating/updating products on Amazon.
"""
data = dict(Action='SubmitFeed',
FeedType=feed_type,
PurgeAndReplace=purge)
data.update(self.enumerate_param('MarketplaceIdList.Id.', marketplaceids))
md = calc_md5(feed)
return self.make_request(data, method="POST", body=feed,
extra_headers={'Content-MD5': md, 'Content-Type': content_type})
def get_feed_submission_list(self, feedids=None, max_count=None, feedtypes=None,
processingstatuses=None, fromdate=None, todate=None):
"""
Returns a list of all feed submissions submitted in the previous 90 days.
That match the query parameters.
"""
data = dict(Action='GetFeedSubmissionList',
MaxCount=max_count,
SubmittedFromDate=fromdate,
SubmittedToDate=todate,)
data.update(self.enumerate_param('FeedSubmissionIdList.Id', feedids))
data.update(self.enumerate_param('FeedTypeList.Type.', feedtypes))
data.update(self.enumerate_param('FeedProcessingStatusList.Status.', processingstatuses))
return self.make_request(data)
def get_submission_list_by_next_token(self, token):
data = dict(Action='GetFeedSubmissionListByNextToken', NextToken=token)
return self.make_request(data)
def get_feed_submission_count(self, feedtypes=None, processingstatuses=None, fromdate=None, todate=None):
data = dict(Action='GetFeedSubmissionCount',
SubmittedFromDate=fromdate,
SubmittedToDate=todate)
data.update(self.enumerate_param('FeedTypeList.Type.', feedtypes))
data.update(self.enumerate_param('FeedProcessingStatusList.Status.', processingstatuses))
return self.make_request(data)
def cancel_feed_submissions(self, feedids=None, feedtypes=None, fromdate=None, todate=None):
data = dict(Action='CancelFeedSubmissions',
SubmittedFromDate=fromdate,
SubmittedToDate=todate)
data.update(self.enumerate_param('FeedSubmissionIdList.Id.', feedids))
data.update(self.enumerate_param('FeedTypeList.Type.', feedtypes))
return self.make_request(data)
def get_feed_submission_result(self, feedid):
data = dict(Action='GetFeedSubmissionResult', FeedSubmissionId=feedid)
return self.make_request(data)
class Reports(MWS):
""" Amazon MWS Reports API """
ACCOUNT_TYPE = "Merchant"
## REPORTS ###
def get_report(self, report_id):
data = dict(Action='GetReport', ReportId=report_id)
return self.make_request(data)
def get_report_count(self, report_types=(), acknowledged=None, fromdate=None, todate=None):
data = dict(Action='GetReportCount',
Acknowledged=acknowledged,
AvailableFromDate=fromdate,
AvailableToDate=todate)
data.update(self.enumerate_param('ReportTypeList.Type.', report_types))
return self.make_request(data)
def get_report_list(self, requestids=(), max_count=None, types=(), acknowledged=None,
fromdate=None, todate=None):
data = dict(Action='GetReportList',
Acknowledged=acknowledged,
AvailableFromDate=fromdate,
AvailableToDate=todate,
MaxCount=max_count)
data.update(self.enumerate_param('ReportRequestIdList.Id.', requestids))
data.update(self.enumerate_param('ReportTypeList.Type.', types))
return self.make_request(data)
def get_report_list_by_next_token(self, token):
data = dict(Action='GetReportListByNextToken', NextToken=token)
return self.make_request(data)
def get_report_request_count(self, report_types=(), processingstatuses=(), fromdate=None, todate=None):
data = dict(Action='GetReportRequestCount',
RequestedFromDate=fromdate,
RequestedToDate=todate)
data.update(self.enumerate_param('ReportTypeList.Type.', report_types))
data.update(self.enumerate_param('ReportProcessingStatusList.Status.', processingstatuses))
return self.make_request(data)
def get_report_request_list(self, requestids=(), types=(), processingstatuses=(),
max_count=None, fromdate=None, todate=None):
data = dict(Action='GetReportRequestList',
MaxCount=max_count,
RequestedFromDate=fromdate,
RequestedToDate=todate)
data.update(self.enumerate_param('ReportRequestIdList.Id.', requestids))
data.update(self.enumerate_param('ReportTypeList.Type.', types))
data.update(self.enumerate_param('ReportProcessingStatusList.Status.', processingstatuses))
return self.make_request(data)
def get_report_request_list_by_next_token(self, token):
data = dict(Action='GetReportRequestListByNextToken', NextToken=token)
return self.make_request(data)
def request_report(self, report_type, start_date=None, end_date=None, marketplaceids=()):
data = dict(Action='RequestReport',
ReportType=report_type,
StartDate=start_date,
EndDate=end_date)
data.update(self.enumerate_param('MarketplaceIdList.Id.', marketplaceids))
return self.make_request(data)
### ReportSchedule ###
def get_report_schedule_list(self, types=()):
data = dict(Action='GetReportScheduleList')
data.update(self.enumerate_param('ReportTypeList.Type.', types))
return self.make_request(data)
def get_report_schedule_count(self, types=()):
data = dict(Action='GetReportScheduleCount')
data.update(self.enumerate_param('ReportTypeList.Type.', types))
return self.make_request(data)
class Orders(MWS):
""" Amazon Orders API """
URI = "/Orders/2013-09-01"
VERSION = "2013-09-01"
NS = '{https://mws.amazonservices.com/Orders/2011-01-01}'
def list_orders(self, marketplaceids, created_after=None, created_before=None, lastupdatedafter=None,
lastupdatedbefore=None, orderstatus=(), fulfillment_channels=(),
payment_methods=(), buyer_email=None, seller_orderid=None, max_results='100'):
data = dict(Action='ListOrders',
CreatedAfter=created_after,
CreatedBefore=created_before,
LastUpdatedAfter=lastupdatedafter,
LastUpdatedBefore=lastupdatedbefore,
BuyerEmail=buyer_email,
SellerOrderId=seller_orderid,
MaxResultsPerPage=max_results,
)
data.update(self.enumerate_param('OrderStatus.Status.', orderstatus))
data.update(self.enumerate_param('MarketplaceId.Id.', marketplaceids))
data.update(self.enumerate_param('FulfillmentChannel.Channel.', fulfillment_channels))
data.update(self.enumerate_param('PaymentMethod.Method.', payment_methods))
return self.make_request(data)
def list_orders_by_next_token(self, token):
data = dict(Action='ListOrdersByNextToken', NextToken=token)
return self.make_request(data)
def get_order(self, amazon_order_ids):
data = dict(Action='GetOrder')
data.update(self.enumerate_param('AmazonOrderId.Id.', amazon_order_ids))
return self.make_request(data)
def list_order_items(self, amazon_order_id):
data = dict(Action='ListOrderItems', AmazonOrderId=amazon_order_id)
return self.make_request(data)
def list_order_items_by_next_token(self, token):
data = dict(Action='ListOrderItemsByNextToken', NextToken=token)
return self.make_request(data)
class Products(MWS):
""" Amazon MWS Products API """
URI = '/Products/2011-10-01'
VERSION = '2011-10-01'
NS = '{http://mws.amazonservices.com/schema/Products/2011-10-01}'
def list_matching_products(self, marketplaceid, query, contextid=None):
""" Returns a list of products and their attributes, ordered by
relevancy, based on a search query that you specify.
Your search query can be a phrase that describes the product
or it can be a product identifier such as a UPC, EAN, ISBN, or JAN.
"""
data = dict(Action='ListMatchingProducts',
MarketplaceId=marketplaceid,
Query=query,
QueryContextId=contextid)
return self.make_request(data)
def get_matching_product(self, marketplaceid, asins):
""" Returns a list of products and their attributes, based on a list of
ASIN values that you specify.
"""
data = dict(Action='GetMatchingProduct', MarketplaceId=marketplaceid)
data.update(self.enumerate_param('ASINList.ASIN.', asins))
return self.make_request(data)
def get_matching_product_for_id(self, marketplaceid, type, id):
""" Returns a list of products and their attributes, based on a list of
product identifier values (asin, sellersku, upc, ean, isbn and JAN)
Added in Fourth Release, API version 2011-10-01
"""
data = dict(Action='GetMatchingProductForId',
MarketplaceId=marketplaceid,
IdType=type)
data.update(self.enumerate_param('IdList.Id', id))
return self.make_request(data)
def get_competitive_pricing_for_sku(self, marketplaceid, skus):
""" Returns the current competitive pricing of a product,
based on the SellerSKU and MarketplaceId that you specify.
"""
data = dict(Action='GetCompetitivePricingForSKU', MarketplaceId=marketplaceid)
data.update(self.enumerate_param('SellerSKUList.SellerSKU.', skus))
return self.make_request(data)
def get_competitive_pricing_for_asin(self, marketplaceid, asins):
""" Returns the current competitive pricing of a product,
based on the ASIN and MarketplaceId that you specify.
"""
data = dict(Action='GetCompetitivePricingForASIN', MarketplaceId=marketplaceid)
data.update(self.enumerate_param('ASINList.ASIN.', asins))
return self.make_request(data)
def get_lowest_offer_listings_for_sku(self, marketplaceid, skus, condition="Any", excludeme="False"):
data = dict(Action='GetLowestOfferListingsForSKU',
MarketplaceId=marketplaceid,
ItemCondition=condition,
ExcludeMe=excludeme)
data.update(self.enumerate_param('SellerSKUList.SellerSKU.', skus))
return self.make_request(data)
def get_lowest_offer_listings_for_asin(self, marketplaceid, asins, condition="Any", excludeme="False"):
data = dict(Action='GetLowestOfferListingsForASIN',
MarketplaceId=marketplaceid,
ItemCondition=condition,
ExcludeMe=excludeme)
data.update(self.enumerate_param('ASINList.ASIN.', asins))
return self.make_request(data)
def get_product_categories_for_sku(self, marketplaceid, sku):
data = dict(Action='GetProductCategoriesForSKU',
MarketplaceId=marketplaceid,
SellerSKU=sku)
return self.make_request(data)
def get_product_categories_for_asin(self, marketplaceid, asin):
data = dict(Action='GetProductCategoriesForASIN',
MarketplaceId=marketplaceid,
ASIN=asin)
return self.make_request(data)
def get_my_price_for_sku(self, marketplaceid, skus, condition=None):
data = dict(Action='GetMyPriceForSKU',
MarketplaceId=marketplaceid,
ItemCondition=condition)
data.update(self.enumerate_param('SellerSKUList.SellerSKU.', skus))
return self.make_request(data)
def get_my_price_for_asin(self, marketplaceid, asins, condition=None):
data = dict(Action='GetMyPriceForASIN',
MarketplaceId=marketplaceid,
ItemCondition=condition)
data.update(self.enumerate_param('ASINList.ASIN.', asins))
return self.make_request(data)
class Sellers(MWS):
""" Amazon MWS Sellers API """
URI = '/Sellers/2011-07-01'
VERSION = '2011-07-01'
NS = '{http://mws.amazonservices.com/schema/Sellers/2011-07-01}'
def list_marketplace_participations(self):
"""
Returns a list of marketplaces a seller can participate in and
a list of participations that include seller-specific information in that marketplace.
The operation returns only those marketplaces where the seller's account is in an active state.
"""
data = dict(Action='ListMarketplaceParticipations')
return self.make_request(data)
def list_marketplace_participations_by_next_token(self, token):
"""
Takes a "NextToken" and returns the same information as "list_marketplace_participations".
Based on the "NextToken".
"""
data = dict(Action='ListMarketplaceParticipations', NextToken=token)
return self.make_request(data)
#### Fulfillment APIs ####
class InboundShipments(MWS):
URI = "/FulfillmentInboundShipment/2010-10-01"
VERSION = '2010-10-01'
# To be completed
class Inventory(MWS):
""" Amazon MWS Inventory Fulfillment API """
URI = '/FulfillmentInventory/2010-10-01'
VERSION = '2010-10-01'
NS = "{http://mws.amazonaws.com/FulfillmentInventory/2010-10-01}"
def list_inventory_supply(self, skus=(), datetime=None, response_group='Basic'):
""" Returns information on available inventory """
data = dict(Action='ListInventorySupply',
QueryStartDateTime=datetime,
ResponseGroup=response_group,
)
data.update(self.enumerate_param('SellerSkus.member.', skus))
return self.make_request(data, "POST")
def list_inventory_supply_by_next_token(self, token):
data = dict(Action='ListInventorySupplyByNextToken', NextToken=token)
return self.make_request(data, "POST")
class OutboundShipments(MWS):
URI = "/FulfillmentOutboundShipment/2010-10-01"
VERSION = "2010-10-01"
# To be completed
class Recommendations(MWS):
""" Amazon MWS Recommendations API """
URI = '/Recommendations/2013-04-01'
VERSION = '2013-04-01'
NS = "{https://mws.amazonservices.com/Recommendations/2013-04-01}"
def get_last_updated_time_for_recommendations(self, marketplaceid):
"""
Checks whether there are active recommendations for each category for the given marketplace, and if there are,
returns the time when recommendations were last updated for each category.
"""
data = dict(Action='GetLastUpdatedTimeForRecommendations',
MarketplaceId=marketplaceid)
return self.make_request(data, "POST")
def list_recommendations(self, marketplaceid, recommendationcategory=None):
"""
Returns your active recommendations for a specific category or for all categories for a specific marketplace.
"""
data = dict(Action="ListRecommendations",
MarketplaceId=marketplaceid,
RecommendationCategory=recommendationcategory)
return self.make_request(data, "POST")
def list_recommendations_by_next_token(self, token):
"""
Returns the next page of recommendations using the NextToken parameter.
"""
data = dict(Action="ListRecommendationsByNextToken",
NextToken=token)
return self.make_request(data, "POST")
class Finances(MWS):
""" Amazon Finances API"""
URI = '/Finances/2015-05-01'
VERSION = '2015-05-01'
NS = "{https://mws.amazonservices.com/Finances/2015-05-01}"
def list_financial_events(self , posted_after=None, posted_before=None,
amazon_order_id=None, max_results='100'):
data = dict(Action='ListFinancialEvents',
PostedAfter=posted_after,
PostedBefore=posted_before,
AmazonOrderId=amazon_order_id,
MaxResultsPerPage=max_results,
)
return self.make_request(data)
|
# Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from nvidia.dali.pipeline import Pipeline
import nvidia.dali.types as types
import nvidia.dali.fn as fn
import numpy as np
import cv2
from scipy.ndimage import convolve1d
import os
from nose.tools import raises
from nose.plugins.attrib import attr
from test_utils import get_dali_extra_path, check_batch, compare_pipelines, RandomlyShapedDataIterator, dali_type
data_root = get_dali_extra_path()
images_dir = os.path.join(data_root, 'db', 'single', 'jpeg')
test_iters = 4
shape_layout_axes_cases = [((20, 20, 30, 3), "DHWC", 3), ((20, 20, 30), "", 3),
((20, 30, 3), "HWC", 2), ((20, 30), "HW", 2),
((3, 30, 20), "CWH", 2), ((5, 20, 30, 3), "FHWC", 2),
((5, 10, 10, 7, 3), "FDHWC", 3), ((5, 3, 20, 30), "FCHW", 2),
((3, 5, 10, 10, 7), "CFDHW", 3)]
def to_batch(tl, batch_size):
return [np.array(tl[i]) for i in range(batch_size)]
def to_cv_sigma(sigma, axes=2):
if sigma is None:
return (0,) * axes
elif isinstance(sigma, (int, float)):
return (sigma,) * axes
elif (isinstance(sigma, np.ndarray) and len(sigma.shape) == 0):
return (float(sigma),) * axes
elif len(sigma) == 1:
return (sigma[0],) * axes
return tuple(reversed(sigma))
def to_cv_win_size(window_size, axes=2, sigma=None):
if window_size is None:
# when using cv2.getGaussianKernel we need to always provide window size
if sigma is not None:
sigma = to_cv_sigma(sigma, axes)
return tuple([int(3 * s + 0.5) * 2 + 1 for s in sigma])
return (0,) * axes
elif isinstance(window_size, int):
return (int(window_size),) * axes
elif (isinstance(window_size, np.ndarray) and len(window_size.shape) == 0):
return (int(window_size),) * axes
elif len(window_size) == 1:
return (int(window_size[0]),) * axes
# OpenCV shape is the other way round: (width, height)
return tuple(int(x) for x in reversed(window_size))
def gaussian_cv(image, sigma, window_size):
sigma_x, sigma_y = to_cv_sigma(sigma)
window_size_cv = to_cv_win_size(window_size)
# compute on floats and round like a sane person (in mathematically complicit way)
blurred = cv2.GaussianBlur(np.float32(image), window_size_cv, sigmaX=sigma_x, sigmaY=sigma_y)
return np.uint8(blurred + 0.5)
def gaussian_baseline(image, sigma, window_size, axes=2, skip_axes=0, dtype=np.uint8):
sigma_xyz = to_cv_sigma(sigma, axes)
win_xyz = to_cv_win_size(window_size, axes, sigma)
filters = [cv2.getGaussianKernel(win_xyz[i], sigma_xyz[i]) for i in range(axes)]
filters = [np.float32(f).squeeze() for f in filters]
filters.reverse()
for i in reversed(range(axes)):
axis = i + skip_axes
image = convolve1d(np.float32(image), filters[i], axis, mode="mirror")
if dtype == np.float32:
return image
else:
return dtype(image + 0.5)
def get_gaussian_pipe(batch_size, sigma, window_size, op_type):
pipe = Pipeline(batch_size=batch_size, num_threads=4, device_id=0)
with pipe:
input, _ = fn.file_reader(file_root=images_dir, shard_id=0, num_shards=1)
decoded = fn.image_decoder(input, device="cpu", output_type=types.RGB)
if op_type == "gpu":
decoded = decoded.gpu()
blurred = fn.gaussian_blur(decoded, device=op_type, sigma=sigma, window_size=window_size)
pipe.set_outputs(blurred, decoded)
return pipe
def check_gaussian_blur(batch_size, sigma, window_size, op_type="cpu"):
pipe = get_gaussian_pipe(batch_size, sigma, window_size, op_type)
pipe.build()
for _ in range(test_iters):
result, input = pipe.run()
if op_type == "gpu":
result = result.as_cpu()
input = input.as_cpu()
input = to_batch(input, batch_size)
baseline_cv = [gaussian_cv(img, sigma, window_size) for img in input]
check_batch(result, baseline_cv, batch_size, max_allowed_error=1, expected_layout="HWC")
def test_image_gaussian_blur():
for dev in ["cpu", "gpu"]:
for sigma in [1.0]:
for window_size in [3, 5, None]:
if sigma is None and window_size is None:
continue
yield check_gaussian_blur, 10, sigma, window_size, dev
# OpenCv uses fixed values for small windows that are different that Gaussian funcion
yield check_gaussian_blur, 10, None, 11, dev
@attr('slow')
def test_image_gaussian_blur_slow():
for dev in ["cpu", "gpu"]:
for sigma in [1.0, [1.0, 2.0]]:
for window_size in [3, 5, [7, 5], [5, 9], None]:
if sigma is None and window_size is None:
continue
yield check_gaussian_blur, 10, sigma, window_size, dev
# OpenCv uses fixed values for small windows that are different that Gaussian funcion
for window_size in [15, [17, 31]]:
yield check_gaussian_blur, 10, None, window_size, dev
def check_gaussian_blur_cpu_gpu(batch_size, sigma, window_size):
cpu_pipe = get_gaussian_pipe(batch_size, sigma, window_size, "cpu")
gpu_pipe = get_gaussian_pipe(batch_size, sigma, window_size, "gpu")
compare_pipelines(cpu_pipe, gpu_pipe, batch_size, 16, max_allowed_error=1)
def test_gaussian_blur_cpu_gpu():
for window_size in [5, [7, 13]]:
yield check_gaussian_blur_cpu_gpu, 10, None, window_size
@attr('slow')
def test_gaussian_blur_cpu_gpu_slow():
for sigma in [1.0, [1.0, 2.0], None]:
for window_size in [3, 5, [7, 5], [5, 9], 11, 15, 31, None]:
if sigma is None and window_size is None:
continue
yield check_gaussian_blur_cpu_gpu, 10, sigma, window_size
def count_skip_axes(layout):
if layout.startswith("FC") or layout.startswith("CF"):
return 2
elif layout.startswith("F") or layout.startswith("C"):
return 1
else:
return 0
def check_generic_gaussian_blur(
batch_size, sigma, window_size, shape, layout, axes, op_type="cpu", in_dtype=np.uint8,
out_dtype=types.NO_TYPE):
pipe = Pipeline(batch_size=batch_size, num_threads=4, device_id=0)
data = RandomlyShapedDataIterator(batch_size, max_shape=shape, dtype=in_dtype)
# Extract the numpy type from DALI, we can have float32 or the same as input
if out_dtype == types.NO_TYPE:
result_type = in_dtype
elif dali_type(in_dtype) == out_dtype:
result_type = in_dtype
else:
result_type = np.float32
with pipe:
input = fn.external_source(data, layout=layout)
if op_type == "gpu":
input = input.gpu()
blurred = fn.gaussian_blur(input, device=op_type, sigma=sigma,
window_size=window_size, dtype=out_dtype)
pipe.set_outputs(blurred, input)
pipe.build()
for _ in range(test_iters):
result, input = pipe.run()
if op_type == "gpu":
result = result.as_cpu()
input = input.as_cpu()
input = to_batch(input, batch_size)
skip_axes = count_skip_axes(layout)
baseline = [
gaussian_baseline(img, sigma, window_size, axes, skip_axes, dtype=result_type)
for img in input]
max_error = 1 if result_type != np.float32 else 1e-04
check_batch(result, baseline, batch_size, max_allowed_error=max_error, expected_layout=layout)
# Generate tests for single or per-axis sigma and window_size arguments
def generate_generic_cases(dev, t_in, t_out):
for shape, layout, axes in shape_layout_axes_cases:
for sigma in [1.0, [1.0, 2.0, 3.0]]:
for window_size in [3, 5, [7, 5, 9], [3, 5, 9], None]:
if isinstance(sigma, list):
sigma = sigma[0:axes]
if isinstance(window_size, list):
window_size = window_size[0:axes]
yield check_generic_gaussian_blur, 10, sigma, window_size, shape, layout, axes, dev, t_in, t_out
for window_size in [11, 15]:
yield check_generic_gaussian_blur, 10, None, window_size, shape, layout, axes, dev, t_in, t_out
def test_generic_gaussian_blur():
for dev in ["cpu", "gpu"]:
for (t_in, t_out) in [(np.uint8, types.NO_TYPE), (np.float32, types.FLOAT), (np.uint8, types.FLOAT)]:
yield from generate_generic_cases(dev, t_in, t_out)
@attr('slow')
def test_generic_gaussian_blur_slow():
for dev in ["cpu", "gpu"]:
for t_in in [np.uint8, np.int32, np.float32]:
for t_out in [types.NO_TYPE, types.FLOAT, dali_type(t_in)]:
yield from generate_generic_cases(dev, t_in, t_out)
def check_per_sample_gaussian_blur(
batch_size, sigma_dim, window_size_dim, shape, layout, axes, op_type="cpu"):
pipe = Pipeline(batch_size=batch_size, num_threads=4, device_id=0)
data = RandomlyShapedDataIterator(batch_size, max_shape=shape)
with pipe:
if sigma_dim is not None:
sigma = fn.random.uniform(range=[0.5, 3], shape=[sigma_dim])
sigma_arg = sigma
else:
# placeholder, so we can return something
sigma = fn.coin_flip(probability=0)
sigma_arg = None
if window_size_dim is not None:
window_radius = fn.random.uniform(range=[5, 10], shape=[window_size_dim])
window_size = fn.cast(window_radius, dtype=types.INT32) * 2 + 1
window_arg = window_size
else:
window_size = fn.coin_flip(probability=0)
window_arg = None
input = fn.external_source(data, layout=layout)
if op_type == "gpu":
input = input.gpu()
blurred = fn.gaussian_blur(input, device=op_type, sigma=sigma_arg, window_size=window_arg)
pipe.set_outputs(blurred, input, sigma, window_size)
pipe.build()
for _ in range(test_iters):
result, input, sigma, window_size = pipe.run()
if op_type == "gpu":
result = result.as_cpu()
input = input.as_cpu()
input = to_batch(input, batch_size)
sigma = to_batch(sigma, batch_size)
window_size = to_batch(window_size, batch_size)
baseline = []
for i in range(batch_size):
sigma_arg = sigma[i] if sigma is not None else None
window_arg = window_size[i] if window_size_dim is not None else None
skip_axes = count_skip_axes(layout)
baseline.append(gaussian_baseline(input[i], sigma_arg, window_arg, axes, skip_axes))
check_batch(result, baseline, batch_size, max_allowed_error=1, expected_layout=layout)
# TODO(klecki): consider checking mixed ArgumentInput/Scalar value cases
def test_per_sample_gaussian_blur():
for dev in ["cpu", "gpu"]:
for shape, layout, axes in shape_layout_axes_cases:
for sigma_dim in [None, 1, axes]:
for window_size_dim in [None, 1, axes]:
if sigma_dim is None and window_size_dim is None:
continue
yield check_per_sample_gaussian_blur, 10, sigma_dim, window_size_dim, shape, layout, axes, dev
@raises(RuntimeError)
def check_fail_gaussian_blur(batch_size, sigma, window_size, shape, layout, axes, op_type, in_dtype=np.uint8, out_dtype=types.NO_TYPE):
check_generic_gaussian_blur(batch_size, sigma, window_size, shape, layout, axes, op_type, in_dtype, out_dtype)
def test_fail_gaussian_blur():
for dev in ["cpu", "gpu"]:
# Check layout and channel placement errors
for shape, layout, axes in [((20, 20, 30, 3), "DHCW", 3), ((5, 20, 30, 3), "HFWC", 2),
((5, 10, 10, 10, 7, 3), "FWXYZC", 4),
((5, 3, 20, 3, 30), "FCHCW", 2),
((5, 3, 20, 3, 30), "FCCHW", 2)]:
yield check_fail_gaussian_blur, 10, 1.0, 11, shape, layout, axes, dev
# Negative, disallowed or both unspecified values of sigma and window size
yield check_fail_gaussian_blur, 10, 0.0, 0, (100, 20, 3), "HWC", 3, dev
yield check_fail_gaussian_blur, 10, -1.0, 0, (100, 20, 3), "HWC", 3, dev
yield check_fail_gaussian_blur, 10, 0.0, -11, (100, 20, 3), "HWC", 3, dev
yield check_fail_gaussian_blur, 10, 0.0, 2, (100, 20, 3), "HWC", 3, dev
|
# -*- coding: utf-8 -*-
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import decimal
import json
from collections import OrderedDict
from datetime import date, datetime, time, timedelta
import numpy as np
import numpy.testing as npt
import pandas as pd
import pandas.util.testing as tm
import pytest
import pyarrow as pa
import pyarrow.types as patypes
from pyarrow.compat import PY2
from .pandas_examples import dataframe_with_arrays, dataframe_with_lists
def _alltypes_example(size=100):
return pd.DataFrame({
'uint8': np.arange(size, dtype=np.uint8),
'uint16': np.arange(size, dtype=np.uint16),
'uint32': np.arange(size, dtype=np.uint32),
'uint64': np.arange(size, dtype=np.uint64),
'int8': np.arange(size, dtype=np.int16),
'int16': np.arange(size, dtype=np.int16),
'int32': np.arange(size, dtype=np.int32),
'int64': np.arange(size, dtype=np.int64),
'float32': np.arange(size, dtype=np.float32),
'float64': np.arange(size, dtype=np.float64),
'bool': np.random.randn(size) > 0,
# TODO(wesm): Pandas only support ns resolution, Arrow supports s, ms,
# us, ns
'datetime': np.arange("2016-01-01T00:00:00.001", size,
dtype='datetime64[ms]'),
'str': [str(x) for x in range(size)],
'str_with_nulls': [None] + [str(x) for x in range(size - 2)] + [None],
'empty_str': [''] * size
})
def _check_pandas_roundtrip(df, expected=None, use_threads=False,
expected_schema=None,
check_dtype=True, schema=None,
preserve_index=False,
as_batch=False):
klass = pa.RecordBatch if as_batch else pa.Table
table = klass.from_pandas(df, schema=schema,
preserve_index=preserve_index,
nthreads=2 if use_threads else 1)
result = table.to_pandas(use_threads=use_threads)
if expected_schema:
# all occurences of _check_pandas_roundtrip passes expected_schema
# without the pandas generated key-value metadata, so we need to
# add it before checking schema equality
expected_schema = expected_schema.add_metadata(table.schema.metadata)
assert table.schema.equals(expected_schema)
if expected is None:
expected = df
tm.assert_frame_equal(result, expected, check_dtype=check_dtype,
check_index_type=('equiv' if preserve_index
else False))
def _check_series_roundtrip(s, type_=None, expected_pa_type=None):
arr = pa.array(s, from_pandas=True, type=type_)
if type_ is not None and expected_pa_type is None:
expected_pa_type = type_
if expected_pa_type is not None:
assert arr.type == expected_pa_type
result = pd.Series(arr.to_pandas(), name=s.name)
if patypes.is_timestamp(arr.type) and arr.type.tz is not None:
result = (result.dt.tz_localize('utc')
.dt.tz_convert(arr.type.tz))
tm.assert_series_equal(s, result)
def _check_array_roundtrip(values, expected=None, mask=None,
type=None):
arr = pa.array(values, from_pandas=True, mask=mask, type=type)
result = arr.to_pandas()
values_nulls = pd.isnull(values)
if mask is None:
assert arr.null_count == values_nulls.sum()
else:
assert arr.null_count == (mask | values_nulls).sum()
if mask is None:
tm.assert_series_equal(pd.Series(result), pd.Series(values),
check_names=False)
else:
expected = pd.Series(np.ma.masked_array(values, mask=mask))
tm.assert_series_equal(pd.Series(result), expected,
check_names=False)
def _check_array_from_pandas_roundtrip(np_array):
arr = pa.array(np_array, from_pandas=True)
result = arr.to_pandas()
npt.assert_array_equal(result, np_array)
class TestConvertMetadata(object):
"""
Conversion tests for Pandas metadata & indices.
"""
def test_non_string_columns(self):
df = pd.DataFrame({0: [1, 2, 3]})
table = pa.Table.from_pandas(df)
assert table.column(0).name == '0'
def test_from_pandas_with_columns(self):
df = pd.DataFrame({0: [1, 2, 3], 1: [1, 3, 3], 2: [2, 4, 5]})
table = pa.Table.from_pandas(df, columns=[0, 1])
expected = pa.Table.from_pandas(df[[0, 1]])
assert expected.equals(table)
record_batch_table = pa.RecordBatch.from_pandas(df, columns=[0, 1])
record_batch_expected = pa.RecordBatch.from_pandas(df[[0, 1]])
assert record_batch_expected.equals(record_batch_table)
def test_column_index_names_are_preserved(self):
df = pd.DataFrame({'data': [1, 2, 3]})
df.columns.names = ['a']
_check_pandas_roundtrip(df, preserve_index=True)
def test_multiindex_columns(self):
columns = pd.MultiIndex.from_arrays([
['one', 'two'], ['X', 'Y']
])
df = pd.DataFrame([(1, 'a'), (2, 'b'), (3, 'c')], columns=columns)
_check_pandas_roundtrip(df, preserve_index=True)
def test_multiindex_columns_with_dtypes(self):
columns = pd.MultiIndex.from_arrays(
[
['one', 'two'],
pd.DatetimeIndex(['2017-08-01', '2017-08-02']),
],
names=['level_1', 'level_2'],
)
df = pd.DataFrame([(1, 'a'), (2, 'b'), (3, 'c')], columns=columns)
_check_pandas_roundtrip(df, preserve_index=True)
def test_multiindex_columns_unicode(self):
columns = pd.MultiIndex.from_arrays([[u'あ', u'い'], ['X', 'Y']])
df = pd.DataFrame([(1, 'a'), (2, 'b'), (3, 'c')], columns=columns)
_check_pandas_roundtrip(df, preserve_index=True)
def test_integer_index_column(self):
df = pd.DataFrame([(1, 'a'), (2, 'b'), (3, 'c')])
_check_pandas_roundtrip(df, preserve_index=True)
def test_index_metadata_field_name(self):
# test None case, and strangely named non-index columns
df = pd.DataFrame(
[(1, 'a', 3.1), (2, 'b', 2.2), (3, 'c', 1.3)],
index=pd.MultiIndex.from_arrays(
[['c', 'b', 'a'], [3, 2, 1]],
names=[None, 'foo']
),
columns=['a', None, '__index_level_0__'],
)
t = pa.Table.from_pandas(df, preserve_index=True)
raw_metadata = t.schema.metadata
js = json.loads(raw_metadata[b'pandas'].decode('utf8'))
col1, col2, col3, idx0, foo = js['columns']
assert col1['name'] == 'a'
assert col1['name'] == col1['field_name']
assert col2['name'] is None
assert col2['field_name'] == 'None'
assert col3['name'] == '__index_level_0__'
assert col3['name'] == col3['field_name']
idx0_name, foo_name = js['index_columns']
assert idx0_name == '__index_level_0__'
assert idx0['field_name'] == idx0_name
assert idx0['name'] is None
assert foo_name == 'foo'
assert foo['field_name'] == foo_name
assert foo['name'] == foo_name
def test_categorical_column_index(self):
df = pd.DataFrame(
[(1, 'a', 2.0), (2, 'b', 3.0), (3, 'c', 4.0)],
columns=pd.Index(list('def'), dtype='category')
)
t = pa.Table.from_pandas(df, preserve_index=True)
raw_metadata = t.schema.metadata
js = json.loads(raw_metadata[b'pandas'].decode('utf8'))
column_indexes, = js['column_indexes']
assert column_indexes['name'] is None
assert column_indexes['pandas_type'] == 'categorical'
assert column_indexes['numpy_type'] == 'int8'
md = column_indexes['metadata']
assert md['num_categories'] == 3
assert md['ordered'] is False
def test_string_column_index(self):
df = pd.DataFrame(
[(1, 'a', 2.0), (2, 'b', 3.0), (3, 'c', 4.0)],
columns=pd.Index(list('def'), name='stringz')
)
t = pa.Table.from_pandas(df, preserve_index=True)
raw_metadata = t.schema.metadata
js = json.loads(raw_metadata[b'pandas'].decode('utf8'))
column_indexes, = js['column_indexes']
assert column_indexes['name'] == 'stringz'
assert column_indexes['name'] == column_indexes['field_name']
assert column_indexes['pandas_type'] == ('bytes' if PY2 else 'unicode')
assert column_indexes['numpy_type'] == 'object'
md = column_indexes['metadata']
if not PY2:
assert len(md) == 1
assert md['encoding'] == 'UTF-8'
else:
assert md is None or 'encoding' not in md
def test_datetimetz_column_index(self):
df = pd.DataFrame(
[(1, 'a', 2.0), (2, 'b', 3.0), (3, 'c', 4.0)],
columns=pd.date_range(
start='2017-01-01', periods=3, tz='America/New_York'
)
)
t = pa.Table.from_pandas(df, preserve_index=True)
raw_metadata = t.schema.metadata
js = json.loads(raw_metadata[b'pandas'].decode('utf8'))
column_indexes, = js['column_indexes']
assert column_indexes['name'] is None
assert column_indexes['pandas_type'] == 'datetimetz'
assert column_indexes['numpy_type'] == 'datetime64[ns]'
md = column_indexes['metadata']
assert md['timezone'] == 'America/New_York'
def test_datetimetz_row_index(self):
df = pd.DataFrame({
'a': pd.date_range(
start='2017-01-01', periods=3, tz='America/New_York'
)
})
df = df.set_index('a')
_check_pandas_roundtrip(df, preserve_index=True)
def test_categorical_row_index(self):
df = pd.DataFrame({'a': [1, 2, 3], 'b': [1, 2, 3]})
df['a'] = df.a.astype('category')
df = df.set_index('a')
_check_pandas_roundtrip(df, preserve_index=True)
def test_duplicate_column_names_does_not_crash(self):
df = pd.DataFrame([(1, 'a'), (2, 'b')], columns=list('aa'))
with pytest.raises(ValueError):
pa.Table.from_pandas(df)
def test_dictionary_indices_boundscheck(self):
# ARROW-1658. No validation of indices leads to segfaults in pandas
indices = [[0, 1], [0, -1]]
for inds in indices:
arr = pa.DictionaryArray.from_arrays(inds, ['a'], safe=False)
batch = pa.RecordBatch.from_arrays([arr], ['foo'])
table = pa.Table.from_batches([batch, batch, batch])
with pytest.raises(pa.ArrowInvalid):
arr.to_pandas()
with pytest.raises(pa.ArrowInvalid):
table.to_pandas()
def test_unicode_with_unicode_column_and_index(self):
df = pd.DataFrame({u'あ': [u'い']}, index=[u'う'])
_check_pandas_roundtrip(df, preserve_index=True)
def test_mixed_unicode_column_names(self):
df = pd.DataFrame({u'あ': [u'い'], b'a': 1}, index=[u'う'])
# TODO(phillipc): Should this raise?
with pytest.raises(AssertionError):
_check_pandas_roundtrip(df, preserve_index=True)
def test_binary_column_name(self):
column_data = [u'い']
key = u'あ'.encode('utf8')
data = {key: column_data}
df = pd.DataFrame(data)
# we can't use _check_pandas_roundtrip here because our metdata
# is always decoded as utf8: even if binary goes in, utf8 comes out
t = pa.Table.from_pandas(df, preserve_index=True)
df2 = t.to_pandas()
assert df.values[0] == df2.values[0]
assert df.index.values[0] == df2.index.values[0]
assert df.columns[0] == key
def test_multiindex_duplicate_values(self):
num_rows = 3
numbers = list(range(num_rows))
index = pd.MultiIndex.from_arrays(
[['foo', 'foo', 'bar'], numbers],
names=['foobar', 'some_numbers'],
)
df = pd.DataFrame({'numbers': numbers}, index=index)
table = pa.Table.from_pandas(df)
result_df = table.to_pandas()
tm.assert_frame_equal(result_df, df)
def test_metadata_with_mixed_types(self):
df = pd.DataFrame({'data': [b'some_bytes', u'some_unicode']})
table = pa.Table.from_pandas(df)
metadata = table.schema.metadata
assert b'mixed' not in metadata[b'pandas']
js = json.loads(metadata[b'pandas'].decode('utf8'))
data_column = js['columns'][0]
assert data_column['pandas_type'] == 'bytes'
assert data_column['numpy_type'] == 'object'
def test_list_metadata(self):
df = pd.DataFrame({'data': [[1], [2, 3, 4], [5] * 7]})
schema = pa.schema([pa.field('data', type=pa.list_(pa.int64()))])
table = pa.Table.from_pandas(df, schema=schema)
metadata = table.schema.metadata
assert b'mixed' not in metadata[b'pandas']
js = json.loads(metadata[b'pandas'].decode('utf8'))
data_column = js['columns'][0]
assert data_column['pandas_type'] == 'list[int64]'
assert data_column['numpy_type'] == 'object'
def test_decimal_metadata(self):
expected = pd.DataFrame({
'decimals': [
decimal.Decimal('394092382910493.12341234678'),
-decimal.Decimal('314292388910493.12343437128'),
]
})
table = pa.Table.from_pandas(expected)
metadata = table.schema.metadata
assert b'mixed' not in metadata[b'pandas']
js = json.loads(metadata[b'pandas'].decode('utf8'))
data_column = js['columns'][0]
assert data_column['pandas_type'] == 'decimal'
assert data_column['numpy_type'] == 'object'
assert data_column['metadata'] == {'precision': 26, 'scale': 11}
def test_table_column_subset_metadata(self):
# ARROW-1883
df = pd.DataFrame({
'a': [1, 2, 3],
'b': pd.date_range("2017-01-01", periods=3, tz='Europe/Brussels')})
table = pa.Table.from_pandas(df)
table_subset = table.remove_column(1)
result = table_subset.to_pandas()
tm.assert_frame_equal(result, df[['a']])
table_subset2 = table_subset.remove_column(1)
result = table_subset2.to_pandas()
tm.assert_frame_equal(result, df[['a']])
# non-default index
for index in [
pd.Index(['a', 'b', 'c'], name='index'),
pd.date_range("2017-01-01", periods=3, tz='Europe/Brussels')]:
df = pd.DataFrame({'a': [1, 2, 3],
'b': [.1, .2, .3]}, index=index)
table = pa.Table.from_pandas(df)
table_subset = table.remove_column(1)
result = table_subset.to_pandas()
tm.assert_frame_equal(result, df[['a']])
table_subset2 = table_subset.remove_column(1)
result = table_subset2.to_pandas()
tm.assert_frame_equal(result, df[['a']].reset_index(drop=True))
def test_empty_list_metadata(self):
# Create table with array of empty lists, forced to have type
# list(string) in pyarrow
c1 = [["test"], ["a", "b"], None]
c2 = [[], [], []]
arrays = OrderedDict([
('c1', pa.array(c1, type=pa.list_(pa.string()))),
('c2', pa.array(c2, type=pa.list_(pa.string()))),
])
rb = pa.RecordBatch.from_arrays(
list(arrays.values()),
list(arrays.keys())
)
tbl = pa.Table.from_batches([rb])
# First roundtrip changes schema, because pandas cannot preserve the
# type of empty lists
df = tbl.to_pandas()
tbl2 = pa.Table.from_pandas(df, preserve_index=True)
md2 = json.loads(tbl2.schema.metadata[b'pandas'].decode('utf8'))
# Second roundtrip
df2 = tbl2.to_pandas()
expected = pd.DataFrame(OrderedDict([('c1', c1), ('c2', c2)]))
tm.assert_frame_equal(df2, expected)
assert md2['columns'] == [
{
'name': 'c1',
'field_name': 'c1',
'metadata': None,
'numpy_type': 'object',
'pandas_type': 'list[unicode]',
},
{
'name': 'c2',
'field_name': 'c2',
'metadata': None,
'numpy_type': 'object',
'pandas_type': 'list[empty]',
},
{
'name': None,
'field_name': '__index_level_0__',
'metadata': None,
'numpy_type': 'int64',
'pandas_type': 'int64',
}
]
class TestConvertPrimitiveTypes(object):
"""
Conversion tests for primitive (e.g. numeric) types.
"""
def test_float_no_nulls(self):
data = {}
fields = []
dtypes = [('f2', pa.float16()),
('f4', pa.float32()),
('f8', pa.float64())]
num_values = 100
for numpy_dtype, arrow_dtype in dtypes:
values = np.random.randn(num_values)
data[numpy_dtype] = values.astype(numpy_dtype)
fields.append(pa.field(numpy_dtype, arrow_dtype))
df = pd.DataFrame(data)
schema = pa.schema(fields)
_check_pandas_roundtrip(df, expected_schema=schema)
def test_float_nulls(self):
num_values = 100
null_mask = np.random.randint(0, 10, size=num_values) < 3
dtypes = [('f2', pa.float16()),
('f4', pa.float32()),
('f8', pa.float64())]
names = ['f2', 'f4', 'f8']
expected_cols = []
arrays = []
fields = []
for name, arrow_dtype in dtypes:
values = np.random.randn(num_values).astype(name)
arr = pa.array(values, from_pandas=True, mask=null_mask)
arrays.append(arr)
fields.append(pa.field(name, arrow_dtype))
values[null_mask] = np.nan
expected_cols.append(values)
ex_frame = pd.DataFrame(dict(zip(names, expected_cols)),
columns=names)
table = pa.Table.from_arrays(arrays, names)
assert table.schema.equals(pa.schema(fields))
result = table.to_pandas()
tm.assert_frame_equal(result, ex_frame)
def test_float_nulls_to_ints(self):
# ARROW-2135
df = pd.DataFrame({"a": [1.0, 2.0, pd.np.NaN]})
schema = pa.schema([pa.field("a", pa.int16(), nullable=True)])
table = pa.Table.from_pandas(df, schema=schema)
assert table[0].to_pylist() == [1, 2, None]
tm.assert_frame_equal(df, table.to_pandas())
def test_integer_no_nulls(self):
data = OrderedDict()
fields = []
numpy_dtypes = [
('i1', pa.int8()), ('i2', pa.int16()),
('i4', pa.int32()), ('i8', pa.int64()),
('u1', pa.uint8()), ('u2', pa.uint16()),
('u4', pa.uint32()), ('u8', pa.uint64()),
('longlong', pa.int64()), ('ulonglong', pa.uint64())
]
num_values = 100
for dtype, arrow_dtype in numpy_dtypes:
info = np.iinfo(dtype)
values = np.random.randint(max(info.min, np.iinfo(np.int_).min),
min(info.max, np.iinfo(np.int_).max),
size=num_values)
data[dtype] = values.astype(dtype)
fields.append(pa.field(dtype, arrow_dtype))
df = pd.DataFrame(data)
schema = pa.schema(fields)
_check_pandas_roundtrip(df, expected_schema=schema)
def test_all_integer_types(self):
# Test all Numpy integer aliases
data = OrderedDict()
numpy_dtypes = ['i1', 'i2', 'i4', 'i8', 'u1', 'u2', 'u4', 'u8',
'byte', 'ubyte', 'short', 'ushort', 'intc', 'uintc',
'int_', 'uint', 'longlong', 'ulonglong']
for dtype in numpy_dtypes:
data[dtype] = np.arange(12, dtype=dtype)
df = pd.DataFrame(data)
_check_pandas_roundtrip(df)
def test_integer_with_nulls(self):
# pandas requires upcast to float dtype
int_dtypes = ['i1', 'i2', 'i4', 'i8', 'u1', 'u2', 'u4', 'u8']
num_values = 100
null_mask = np.random.randint(0, 10, size=num_values) < 3
expected_cols = []
arrays = []
for name in int_dtypes:
values = np.random.randint(0, 100, size=num_values)
arr = pa.array(values, mask=null_mask)
arrays.append(arr)
expected = values.astype('f8')
expected[null_mask] = np.nan
expected_cols.append(expected)
ex_frame = pd.DataFrame(dict(zip(int_dtypes, expected_cols)),
columns=int_dtypes)
table = pa.Table.from_arrays(arrays, int_dtypes)
result = table.to_pandas()
tm.assert_frame_equal(result, ex_frame)
def test_array_from_pandas_type_cast(self):
arr = np.arange(10, dtype='int64')
target_type = pa.int8()
result = pa.array(arr, type=target_type)
expected = pa.array(arr.astype('int8'))
assert result.equals(expected)
def test_boolean_no_nulls(self):
num_values = 100
np.random.seed(0)
df = pd.DataFrame({'bools': np.random.randn(num_values) > 0})
field = pa.field('bools', pa.bool_())
schema = pa.schema([field])
_check_pandas_roundtrip(df, expected_schema=schema)
def test_boolean_nulls(self):
# pandas requires upcast to object dtype
num_values = 100
np.random.seed(0)
mask = np.random.randint(0, 10, size=num_values) < 3
values = np.random.randint(0, 10, size=num_values) < 5
arr = pa.array(values, mask=mask)
expected = values.astype(object)
expected[mask] = None
field = pa.field('bools', pa.bool_())
schema = pa.schema([field])
ex_frame = pd.DataFrame({'bools': expected})
table = pa.Table.from_arrays([arr], ['bools'])
assert table.schema.equals(schema)
result = table.to_pandas()
tm.assert_frame_equal(result, ex_frame)
def test_float_object_nulls(self):
arr = np.array([None, 1.5, np.float64(3.5)] * 5, dtype=object)
df = pd.DataFrame({'floats': arr})
expected = pd.DataFrame({'floats': pd.to_numeric(arr)})
field = pa.field('floats', pa.float64())
schema = pa.schema([field])
_check_pandas_roundtrip(df, expected=expected,
expected_schema=schema)
def test_int_object_nulls(self):
arr = np.array([None, 1, np.int64(3)] * 5, dtype=object)
df = pd.DataFrame({'ints': arr})
expected = pd.DataFrame({'ints': pd.to_numeric(arr)})
field = pa.field('ints', pa.int64())
schema = pa.schema([field])
_check_pandas_roundtrip(df, expected=expected,
expected_schema=schema)
def test_boolean_object_nulls(self):
arr = np.array([False, None, True] * 100, dtype=object)
df = pd.DataFrame({'bools': arr})
field = pa.field('bools', pa.bool_())
schema = pa.schema([field])
_check_pandas_roundtrip(df, expected_schema=schema)
def test_all_nulls_cast_numeric(self):
arr = np.array([None], dtype=object)
def _check_type(t):
a2 = pa.array(arr, type=t)
assert a2.type == t
assert a2[0].as_py() is None
_check_type(pa.int32())
_check_type(pa.float64())
def test_half_floats_from_numpy(self):
arr = np.array([1.5, np.nan], dtype=np.float16)
a = pa.array(arr, type=pa.float16())
x, y = a.to_pylist()
assert isinstance(x, np.float16)
assert x == 1.5
assert isinstance(y, np.float16)
assert np.isnan(y)
a = pa.array(arr, type=pa.float16(), from_pandas=True)
x, y = a.to_pylist()
assert isinstance(x, np.float16)
assert x == 1.5
assert y is None
@pytest.mark.parametrize('dtype',
['i1', 'i2', 'i4', 'i8', 'u1', 'u2', 'u4', 'u8'])
def test_array_integer_object_nulls_option(dtype):
num_values = 100
null_mask = np.random.randint(0, 10, size=num_values) < 3
values = np.random.randint(0, 100, size=num_values, dtype=dtype)
array = pa.array(values, mask=null_mask)
if null_mask.any():
expected = values.astype('O')
expected[null_mask] = None
else:
expected = values
result = array.to_pandas(integer_object_nulls=True)
np.testing.assert_equal(result, expected)
@pytest.mark.parametrize('dtype',
['i1', 'i2', 'i4', 'i8', 'u1', 'u2', 'u4', 'u8'])
def test_table_integer_object_nulls_option(dtype):
num_values = 100
null_mask = np.random.randint(0, 10, size=num_values) < 3
values = np.random.randint(0, 100, size=num_values, dtype=dtype)
array = pa.array(values, mask=null_mask)
if null_mask.any():
expected = values.astype('O')
expected[null_mask] = None
else:
expected = values
expected = pd.DataFrame({dtype: expected})
table = pa.Table.from_arrays([array], [dtype])
result = table.to_pandas(integer_object_nulls=True)
tm.assert_frame_equal(result, expected)
class TestConvertDateTimeLikeTypes(object):
"""
Conversion tests for datetime- and timestamp-like types (date64, etc.).
"""
def test_timestamps_notimezone_no_nulls(self):
df = pd.DataFrame({
'datetime64': np.array([
'2007-07-13T01:23:34.123456789',
'2006-01-13T12:34:56.432539784',
'2010-08-13T05:46:57.437699912'],
dtype='datetime64[ns]')
})
field = pa.field('datetime64', pa.timestamp('ns'))
schema = pa.schema([field])
_check_pandas_roundtrip(
df,
expected_schema=schema,
)
def test_timestamps_notimezone_nulls(self):
df = pd.DataFrame({
'datetime64': np.array([
'2007-07-13T01:23:34.123456789',
None,
'2010-08-13T05:46:57.437699912'],
dtype='datetime64[ns]')
})
field = pa.field('datetime64', pa.timestamp('ns'))
schema = pa.schema([field])
_check_pandas_roundtrip(
df,
expected_schema=schema,
)
def test_timestamps_with_timezone(self):
df = pd.DataFrame({
'datetime64': np.array([
'2007-07-13T01:23:34.123',
'2006-01-13T12:34:56.432',
'2010-08-13T05:46:57.437'],
dtype='datetime64[ms]')
})
df['datetime64'] = (df['datetime64'].dt.tz_localize('US/Eastern')
.to_frame())
_check_pandas_roundtrip(df)
_check_series_roundtrip(df['datetime64'])
# drop-in a null and ns instead of ms
df = pd.DataFrame({
'datetime64': np.array([
'2007-07-13T01:23:34.123456789',
None,
'2006-01-13T12:34:56.432539784',
'2010-08-13T05:46:57.437699912'],
dtype='datetime64[ns]')
})
df['datetime64'] = (df['datetime64'].dt.tz_localize('US/Eastern')
.to_frame())
_check_pandas_roundtrip(df)
def test_python_datetime(self):
# ARROW-2106
date_array = [datetime.today() + timedelta(days=x) for x in range(10)]
df = pd.DataFrame({
'datetime': pd.Series(date_array, dtype=object)
})
table = pa.Table.from_pandas(df)
assert isinstance(table[0].data.chunk(0), pa.TimestampArray)
result = table.to_pandas()
expected_df = pd.DataFrame({
'datetime': date_array
})
tm.assert_frame_equal(expected_df, result)
def test_python_datetime_subclass(self):
class MyDatetime(datetime):
# see https://github.com/pandas-dev/pandas/issues/21142
nanosecond = 0.0
date_array = [MyDatetime(2000, 1, 1, 1, 1, 1)]
df = pd.DataFrame({"datetime": pd.Series(date_array, dtype=object)})
table = pa.Table.from_pandas(df)
assert isinstance(table[0].data.chunk(0), pa.TimestampArray)
result = table.to_pandas()
expected_df = pd.DataFrame({"datetime": date_array})
# https://github.com/pandas-dev/pandas/issues/21142
expected_df["datetime"] = pd.to_datetime(expected_df["datetime"])
tm.assert_frame_equal(expected_df, result)
def test_python_date_subclass(self):
class MyDate(date):
pass
date_array = [MyDate(2000, 1, 1)]
df = pd.DataFrame({"date": pd.Series(date_array, dtype=object)})
table = pa.Table.from_pandas(df)
assert isinstance(table[0].data.chunk(0), pa.Date32Array)
result = table.to_pandas()
expected_df = pd.DataFrame(
{"date": np.array(["2000-01-01"], dtype="datetime64[ns]")}
)
tm.assert_frame_equal(expected_df, result)
def test_datetime64_to_date32(self):
# ARROW-1718
arr = pa.array([date(2017, 10, 23), None])
c = pa.Column.from_array("d", arr)
s = c.to_pandas()
arr2 = pa.Array.from_pandas(s, type=pa.date32())
assert arr2.equals(arr.cast('date32'))
@pytest.mark.parametrize('mask', [
None,
np.ones(3),
np.array([True, False, False]),
])
def test_pandas_datetime_to_date64(self, mask):
s = pd.to_datetime([
'2018-05-10T00:00:00',
'2018-05-11T00:00:00',
'2018-05-12T00:00:00',
])
arr = pa.Array.from_pandas(s, type=pa.date64(), mask=mask)
data = np.array([
date(2018, 5, 10),
date(2018, 5, 11),
date(2018, 5, 12)
])
expected = pa.array(data, mask=mask, type=pa.date64())
assert arr.equals(expected)
@pytest.mark.parametrize('mask', [
None,
np.ones(3),
np.array([True, False, False])
])
def test_pandas_datetime_to_date64_failures(self, mask):
s = pd.to_datetime([
'2018-05-10T10:24:01',
'2018-05-11T10:24:01',
'2018-05-12T10:24:01',
])
expected_msg = 'Timestamp value had non-zero intraday milliseconds'
with pytest.raises(pa.ArrowInvalid, match=expected_msg):
pa.Array.from_pandas(s, type=pa.date64(), mask=mask)
def test_date_infer(self):
df = pd.DataFrame({
'date': [date(2000, 1, 1),
None,
date(1970, 1, 1),
date(2040, 2, 26)]})
table = pa.Table.from_pandas(df, preserve_index=False)
field = pa.field('date', pa.date32())
# schema's metadata is generated by from_pandas conversion
expected_schema = pa.schema([field], metadata=table.schema.metadata)
assert table.schema.equals(expected_schema)
result = table.to_pandas()
expected = df.copy()
expected['date'] = pd.to_datetime(df['date'])
tm.assert_frame_equal(result, expected)
def test_date_mask(self):
arr = np.array([date(2017, 4, 3), date(2017, 4, 4)],
dtype='datetime64[D]')
mask = [True, False]
result = pa.array(arr, mask=np.array(mask))
expected = np.array([None, date(2017, 4, 4)], dtype='datetime64[D]')
expected = pa.array(expected, from_pandas=True)
assert expected.equals(result)
def test_date_objects_typed(self):
arr = np.array([
date(2017, 4, 3),
None,
date(2017, 4, 4),
date(2017, 4, 5)], dtype=object)
arr_i4 = np.array([17259, -1, 17260, 17261], dtype='int32')
arr_i8 = arr_i4.astype('int64') * 86400000
mask = np.array([False, True, False, False])
t32 = pa.date32()
t64 = pa.date64()
a32 = pa.array(arr, type=t32)
a64 = pa.array(arr, type=t64)
a32_expected = pa.array(arr_i4, mask=mask, type=t32)
a64_expected = pa.array(arr_i8, mask=mask, type=t64)
assert a32.equals(a32_expected)
assert a64.equals(a64_expected)
# Test converting back to pandas
colnames = ['date32', 'date64']
table = pa.Table.from_arrays([a32, a64], colnames)
table_pandas = table.to_pandas()
ex_values = (np.array(['2017-04-03', '2017-04-04', '2017-04-04',
'2017-04-05'],
dtype='datetime64[D]')
.astype('datetime64[ns]'))
ex_values[1] = pd.NaT.value
expected_pandas = pd.DataFrame({'date32': ex_values,
'date64': ex_values},
columns=colnames)
tm.assert_frame_equal(table_pandas, expected_pandas)
def test_dates_from_integers(self):
t1 = pa.date32()
t2 = pa.date64()
arr = np.array([17259, 17260, 17261], dtype='int32')
arr2 = arr.astype('int64') * 86400000
a1 = pa.array(arr, type=t1)
a2 = pa.array(arr2, type=t2)
expected = date(2017, 4, 3)
assert a1[0].as_py() == expected
assert a2[0].as_py() == expected
@pytest.mark.xfail(reason="not supported ATM",
raises=NotImplementedError)
def test_timedelta(self):
# TODO(jreback): Pandas only support ns resolution
# Arrow supports ??? for resolution
df = pd.DataFrame({
'timedelta': np.arange(start=0, stop=3 * 86400000,
step=86400000,
dtype='timedelta64[ms]')
})
pa.Table.from_pandas(df)
def test_pytime_from_pandas(self):
pytimes = [time(1, 2, 3, 1356),
time(4, 5, 6, 1356)]
# microseconds
t1 = pa.time64('us')
aobjs = np.array(pytimes + [None], dtype=object)
parr = pa.array(aobjs)
assert parr.type == t1
assert parr[0].as_py() == pytimes[0]
assert parr[1].as_py() == pytimes[1]
assert parr[2] is pa.NA
# DataFrame
df = pd.DataFrame({'times': aobjs})
batch = pa.RecordBatch.from_pandas(df)
assert batch[0].equals(parr)
# Test ndarray of int64 values
arr = np.array([_pytime_to_micros(v) for v in pytimes],
dtype='int64')
a1 = pa.array(arr, type=pa.time64('us'))
assert a1[0].as_py() == pytimes[0]
a2 = pa.array(arr * 1000, type=pa.time64('ns'))
assert a2[0].as_py() == pytimes[0]
a3 = pa.array((arr / 1000).astype('i4'),
type=pa.time32('ms'))
assert a3[0].as_py() == pytimes[0].replace(microsecond=1000)
a4 = pa.array((arr / 1000000).astype('i4'),
type=pa.time32('s'))
assert a4[0].as_py() == pytimes[0].replace(microsecond=0)
def test_arrow_time_to_pandas(self):
pytimes = [time(1, 2, 3, 1356),
time(4, 5, 6, 1356),
time(0, 0, 0)]
expected = np.array(pytimes[:2] + [None])
expected_ms = np.array([x.replace(microsecond=1000)
for x in pytimes[:2]] +
[None])
expected_s = np.array([x.replace(microsecond=0)
for x in pytimes[:2]] +
[None])
arr = np.array([_pytime_to_micros(v) for v in pytimes],
dtype='int64')
arr = np.array([_pytime_to_micros(v) for v in pytimes],
dtype='int64')
null_mask = np.array([False, False, True], dtype=bool)
a1 = pa.array(arr, mask=null_mask, type=pa.time64('us'))
a2 = pa.array(arr * 1000, mask=null_mask,
type=pa.time64('ns'))
a3 = pa.array((arr / 1000).astype('i4'), mask=null_mask,
type=pa.time32('ms'))
a4 = pa.array((arr / 1000000).astype('i4'), mask=null_mask,
type=pa.time32('s'))
names = ['time64[us]', 'time64[ns]', 'time32[ms]', 'time32[s]']
batch = pa.RecordBatch.from_arrays([a1, a2, a3, a4], names)
arr = a1.to_pandas()
assert (arr == expected).all()
arr = a2.to_pandas()
assert (arr == expected).all()
arr = a3.to_pandas()
assert (arr == expected_ms).all()
arr = a4.to_pandas()
assert (arr == expected_s).all()
df = batch.to_pandas()
expected_df = pd.DataFrame({'time64[us]': expected,
'time64[ns]': expected,
'time32[ms]': expected_ms,
'time32[s]': expected_s},
columns=names)
tm.assert_frame_equal(df, expected_df)
def test_numpy_datetime64_columns(self):
datetime64_ns = np.array([
'2007-07-13T01:23:34.123456789',
None,
'2006-01-13T12:34:56.432539784',
'2010-08-13T05:46:57.437699912'],
dtype='datetime64[ns]')
_check_array_from_pandas_roundtrip(datetime64_ns)
datetime64_us = np.array([
'2007-07-13T01:23:34.123456',
None,
'2006-01-13T12:34:56.432539',
'2010-08-13T05:46:57.437699'],
dtype='datetime64[us]')
_check_array_from_pandas_roundtrip(datetime64_us)
datetime64_ms = np.array([
'2007-07-13T01:23:34.123',
None,
'2006-01-13T12:34:56.432',
'2010-08-13T05:46:57.437'],
dtype='datetime64[ms]')
_check_array_from_pandas_roundtrip(datetime64_ms)
datetime64_s = np.array([
'2007-07-13T01:23:34',
None,
'2006-01-13T12:34:56',
'2010-08-13T05:46:57'],
dtype='datetime64[s]')
_check_array_from_pandas_roundtrip(datetime64_s)
def test_numpy_datetime64_day_unit(self):
datetime64_d = np.array([
'2007-07-13',
None,
'2006-01-15',
'2010-08-19'],
dtype='datetime64[D]')
_check_array_from_pandas_roundtrip(datetime64_d)
def test_array_from_pandas_date_with_mask(self):
m = np.array([True, False, True])
data = pd.Series([
date(1990, 1, 1),
date(1991, 1, 1),
date(1992, 1, 1)
])
result = pa.Array.from_pandas(data, mask=m)
expected = pd.Series([None, date(1991, 1, 1), None])
assert pa.Array.from_pandas(expected).equals(result)
def test_fixed_offset_timezone(self):
df = pd.DataFrame({
'a': [
pd.Timestamp('2012-11-11 00:00:00+01:00'),
pd.NaT
]
})
_check_pandas_roundtrip(df)
_check_serialize_components_roundtrip(df)
class TestConvertStringLikeTypes(object):
"""
Conversion tests for string and binary types.
"""
def test_unicode(self):
repeats = 1000
values = [u'foo', None, u'bar', u'mañana', np.nan]
df = pd.DataFrame({'strings': values * repeats})
field = pa.field('strings', pa.string())
schema = pa.schema([field])
_check_pandas_roundtrip(df, expected_schema=schema)
def test_bytes_to_binary(self):
values = [u'qux', b'foo', None, bytearray(b'barz'), 'qux', np.nan]
df = pd.DataFrame({'strings': values})
table = pa.Table.from_pandas(df)
assert table[0].type == pa.binary()
values2 = [b'qux', b'foo', None, b'barz', b'qux', np.nan]
expected = pd.DataFrame({'strings': values2})
_check_pandas_roundtrip(df, expected)
@pytest.mark.large_memory
def test_bytes_exceed_2gb(self):
v1 = b'x' * 100000000
v2 = b'x' * 147483646
# ARROW-2227, hit exactly 2GB on the nose
df = pd.DataFrame({
'strings': [v1] * 20 + [v2] + ['x'] * 20
})
arr = pa.array(df['strings'])
assert isinstance(arr, pa.ChunkedArray)
assert arr.num_chunks == 2
arr = None
table = pa.Table.from_pandas(df)
assert table[0].data.num_chunks == 2
def test_fixed_size_bytes(self):
values = [b'foo', None, bytearray(b'bar'), None, None, b'hey']
df = pd.DataFrame({'strings': values})
schema = pa.schema([pa.field('strings', pa.binary(3))])
table = pa.Table.from_pandas(df, schema=schema)
assert table.schema[0].type == schema[0].type
assert table.schema[0].name == schema[0].name
result = table.to_pandas()
tm.assert_frame_equal(result, df)
def test_fixed_size_bytes_does_not_accept_varying_lengths(self):
values = [b'foo', None, b'ba', None, None, b'hey']
df = pd.DataFrame({'strings': values})
schema = pa.schema([pa.field('strings', pa.binary(3))])
with pytest.raises(pa.ArrowInvalid):
pa.Table.from_pandas(df, schema=schema)
def test_variable_size_bytes(self):
s = pd.Series([b'123', b'', b'a', None])
_check_series_roundtrip(s, type_=pa.binary())
def test_binary_from_bytearray(self):
s = pd.Series([bytearray(b'123'), bytearray(b''), bytearray(b'a'),
None])
# Explicitly set type
_check_series_roundtrip(s, type_=pa.binary())
# Infer type from bytearrays
_check_series_roundtrip(s, expected_pa_type=pa.binary())
def test_table_empty_str(self):
values = ['', '', '', '', '']
df = pd.DataFrame({'strings': values})
field = pa.field('strings', pa.string())
schema = pa.schema([field])
table = pa.Table.from_pandas(df, schema=schema)
result1 = table.to_pandas(strings_to_categorical=False)
expected1 = pd.DataFrame({'strings': values})
tm.assert_frame_equal(result1, expected1, check_dtype=True)
result2 = table.to_pandas(strings_to_categorical=True)
expected2 = pd.DataFrame({'strings': pd.Categorical(values)})
tm.assert_frame_equal(result2, expected2, check_dtype=True)
def test_selective_categoricals(self):
values = ['', '', '', '', '']
df = pd.DataFrame({'strings': values})
field = pa.field('strings', pa.string())
schema = pa.schema([field])
table = pa.Table.from_pandas(df, schema=schema)
expected_str = pd.DataFrame({'strings': values})
expected_cat = pd.DataFrame({'strings': pd.Categorical(values)})
result1 = table.to_pandas(categories=['strings'])
tm.assert_frame_equal(result1, expected_cat, check_dtype=True)
result2 = table.to_pandas(categories=[])
tm.assert_frame_equal(result2, expected_str, check_dtype=True)
result3 = table.to_pandas(categories=('strings',))
tm.assert_frame_equal(result3, expected_cat, check_dtype=True)
result4 = table.to_pandas(categories=tuple())
tm.assert_frame_equal(result4, expected_str, check_dtype=True)
def test_table_str_to_categorical_without_na(self):
values = ['a', 'a', 'b', 'b', 'c']
df = pd.DataFrame({'strings': values})
field = pa.field('strings', pa.string())
schema = pa.schema([field])
table = pa.Table.from_pandas(df, schema=schema)
result = table.to_pandas(strings_to_categorical=True)
expected = pd.DataFrame({'strings': pd.Categorical(values)})
tm.assert_frame_equal(result, expected, check_dtype=True)
with pytest.raises(pa.ArrowInvalid):
table.to_pandas(strings_to_categorical=True,
zero_copy_only=True)
def test_table_str_to_categorical_with_na(self):
values = [None, 'a', 'b', np.nan]
df = pd.DataFrame({'strings': values})
field = pa.field('strings', pa.string())
schema = pa.schema([field])
table = pa.Table.from_pandas(df, schema=schema)
result = table.to_pandas(strings_to_categorical=True)
expected = pd.DataFrame({'strings': pd.Categorical(values)})
tm.assert_frame_equal(result, expected, check_dtype=True)
with pytest.raises(pa.ArrowInvalid):
table.to_pandas(strings_to_categorical=True,
zero_copy_only=True)
# Regression test for ARROW-2101
def test_array_of_bytes_to_strings(self):
converted = pa.array(np.array([b'x'], dtype=object), pa.string())
assert converted.type == pa.string()
# Make sure that if an ndarray of bytes is passed to the array
# constructor and the type is string, it will fail if those bytes
# cannot be converted to utf-8
def test_array_of_bytes_to_strings_bad_data(self):
with pytest.raises(
pa.lib.ArrowInvalid,
match=("'(utf8|utf-8)' codec can't decode byte 0x80 "
"in position 0: invalid start byte")):
pa.array(np.array([b'\x80\x81'], dtype=object), pa.string())
def test_numpy_string_array_to_fixed_size_binary(self):
arr = np.array([b'foo', b'bar', b'baz'], dtype='|S3')
converted = pa.array(arr, type=pa.binary(3))
expected = pa.array(list(arr), type=pa.binary(3))
assert converted.equals(expected)
mask = np.array([True, False, True])
converted = pa.array(arr, type=pa.binary(3), mask=mask)
expected = pa.array([b'foo', None, b'baz'], type=pa.binary(3))
assert converted.equals(expected)
with pytest.raises(pa.lib.ArrowInvalid,
match='Got bytestring of length 3 \(expected 4\)'):
arr = np.array([b'foo', b'bar', b'baz'], dtype='|S3')
pa.array(arr, type=pa.binary(4))
with pytest.raises(pa.lib.ArrowInvalid,
match='Got bytestring of length 12 \(expected 3\)'):
arr = np.array([b'foo', b'bar', b'baz'], dtype='|U3')
pa.array(arr, type=pa.binary(3))
class TestConvertDecimalTypes(object):
"""
Conversion test for decimal types.
"""
decimal32 = [
decimal.Decimal('-1234.123'),
decimal.Decimal('1234.439')
]
decimal64 = [
decimal.Decimal('-129934.123331'),
decimal.Decimal('129534.123731')
]
decimal128 = [
decimal.Decimal('394092382910493.12341234678'),
decimal.Decimal('-314292388910493.12343437128')
]
@pytest.mark.parametrize(('values', 'expected_type'), [
pytest.param(decimal32, pa.decimal128(7, 3), id='decimal32'),
pytest.param(decimal64, pa.decimal128(12, 6), id='decimal64'),
pytest.param(decimal128, pa.decimal128(26, 11), id='decimal128')
])
def test_decimal_from_pandas(self, values, expected_type):
expected = pd.DataFrame({'decimals': values})
table = pa.Table.from_pandas(expected, preserve_index=False)
field = pa.field('decimals', expected_type)
# schema's metadata is generated by from_pandas conversion
expected_schema = pa.schema([field], metadata=table.schema.metadata)
assert table.schema.equals(expected_schema)
@pytest.mark.parametrize('values', [
pytest.param(decimal32, id='decimal32'),
pytest.param(decimal64, id='decimal64'),
pytest.param(decimal128, id='decimal128')
])
def test_decimal_to_pandas(self, values):
expected = pd.DataFrame({'decimals': values})
converted = pa.Table.from_pandas(expected)
df = converted.to_pandas()
tm.assert_frame_equal(df, expected)
def test_decimal_fails_with_truncation(self):
data1 = [decimal.Decimal('1.234')]
type1 = pa.decimal128(10, 2)
with pytest.raises(pa.ArrowInvalid):
pa.array(data1, type=type1)
data2 = [decimal.Decimal('1.2345')]
type2 = pa.decimal128(10, 3)
with pytest.raises(pa.ArrowInvalid):
pa.array(data2, type=type2)
def test_decimal_with_different_precisions(self):
data = [
decimal.Decimal('0.01'),
decimal.Decimal('0.001'),
]
series = pd.Series(data)
array = pa.array(series)
assert array.to_pylist() == data
assert array.type == pa.decimal128(3, 3)
array = pa.array(data, type=pa.decimal128(12, 5))
expected = [decimal.Decimal('0.01000'), decimal.Decimal('0.00100')]
assert array.to_pylist() == expected
def test_decimal_with_None_explicit_type(self):
series = pd.Series([decimal.Decimal('3.14'), None])
_check_series_roundtrip(series, type_=pa.decimal128(12, 5))
# Test that having all None values still produces decimal array
series = pd.Series([None] * 2)
_check_series_roundtrip(series, type_=pa.decimal128(12, 5))
def test_decimal_with_None_infer_type(self):
series = pd.Series([decimal.Decimal('3.14'), None])
_check_series_roundtrip(series, expected_pa_type=pa.decimal128(3, 2))
class TestListTypes(object):
"""
Conversion tests for list<> types.
"""
def test_column_of_arrays(self):
df, schema = dataframe_with_arrays()
_check_pandas_roundtrip(df, schema=schema, expected_schema=schema)
table = pa.Table.from_pandas(df, schema=schema, preserve_index=False)
# schema's metadata is generated by from_pandas conversion
expected_schema = schema.add_metadata(table.schema.metadata)
assert table.schema.equals(expected_schema)
for column in df.columns:
field = schema.field_by_name(column)
_check_array_roundtrip(df[column], type=field.type)
def test_column_of_arrays_to_py(self):
# Test regression in ARROW-1199 not caught in above test
dtype = 'i1'
arr = np.array([
np.arange(10, dtype=dtype),
np.arange(5, dtype=dtype),
None,
np.arange(1, dtype=dtype)
])
type_ = pa.list_(pa.int8())
parr = pa.array(arr, type=type_)
assert parr[0].as_py() == list(range(10))
assert parr[1].as_py() == list(range(5))
assert parr[2].as_py() is None
assert parr[3].as_py() == [0]
def test_column_of_lists(self):
df, schema = dataframe_with_lists()
_check_pandas_roundtrip(df, schema=schema, expected_schema=schema)
table = pa.Table.from_pandas(df, schema=schema, preserve_index=False)
# schema's metadata is generated by from_pandas conversion
expected_schema = schema.add_metadata(table.schema.metadata)
assert table.schema.equals(expected_schema)
for column in df.columns:
field = schema.field_by_name(column)
_check_array_roundtrip(df[column], type=field.type)
def test_column_of_lists_first_empty(self):
# ARROW-2124
num_lists = [[], [2, 3, 4], [3, 6, 7, 8], [], [2]]
series = pd.Series([np.array(s, dtype=float) for s in num_lists])
arr = pa.array(series)
result = pd.Series(arr.to_pandas())
tm.assert_series_equal(result, series)
def test_column_of_lists_chunked(self):
# ARROW-1357
df = pd.DataFrame({
'lists': np.array([
[1, 2],
None,
[2, 3],
[4, 5],
[6, 7],
[8, 9]
], dtype=object)
})
schema = pa.schema([
pa.field('lists', pa.list_(pa.int64()))
])
t1 = pa.Table.from_pandas(df[:2], schema=schema)
t2 = pa.Table.from_pandas(df[2:], schema=schema)
table = pa.concat_tables([t1, t2])
result = table.to_pandas()
tm.assert_frame_equal(result, df)
def test_column_of_lists_chunked2(self):
data1 = [[0, 1], [2, 3], [4, 5], [6, 7], [10, 11],
[12, 13], [14, 15], [16, 17]]
data2 = [[8, 9], [18, 19]]
a1 = pa.array(data1)
a2 = pa.array(data2)
t1 = pa.Table.from_arrays([a1], names=['a'])
t2 = pa.Table.from_arrays([a2], names=['a'])
concatenated = pa.concat_tables([t1, t2])
result = concatenated.to_pandas()
expected = pd.DataFrame({'a': data1 + data2})
tm.assert_frame_equal(result, expected)
def test_column_of_lists_strided(self):
df, schema = dataframe_with_lists()
df = pd.concat([df] * 6, ignore_index=True)
arr = df['int64'].values[::3]
assert arr.strides[0] != 8
_check_array_roundtrip(arr)
def test_nested_lists_all_none(self):
data = np.array([[None, None], None], dtype=object)
arr = pa.array(data)
expected = pa.array(list(data))
assert arr.equals(expected)
assert arr.type == pa.list_(pa.null())
data2 = np.array([None, None, [None, None],
np.array([None, None], dtype=object)],
dtype=object)
arr = pa.array(data2)
expected = pa.array([None, None, [None, None], [None, None]])
assert arr.equals(expected)
def test_nested_lists_all_empty(self):
# ARROW-2128
data = pd.Series([[], [], []])
arr = pa.array(data)
expected = pa.array(list(data))
assert arr.equals(expected)
assert arr.type == pa.list_(pa.null())
def test_nested_smaller_ints(self):
# ARROW-1345, ARROW-2008, there were some type inference bugs happening
# before
data = pd.Series([np.array([1, 2, 3], dtype='i1'), None])
result = pa.array(data)
result2 = pa.array(data.values)
expected = pa.array([[1, 2, 3], None], type=pa.list_(pa.int8()))
assert result.equals(expected)
assert result2.equals(expected)
data3 = pd.Series([np.array([1, 2, 3], dtype='f4'), None])
result3 = pa.array(data3)
expected3 = pa.array([[1, 2, 3], None], type=pa.list_(pa.float32()))
assert result3.equals(expected3)
def test_infer_lists(self):
data = OrderedDict([
('nan_ints', [[None, 1], [2, 3]]),
('ints', [[0, 1], [2, 3]]),
('strs', [[None, u'b'], [u'c', u'd']]),
('nested_strs', [[[None, u'b'], [u'c', u'd']], None])
])
df = pd.DataFrame(data)
expected_schema = pa.schema([
pa.field('nan_ints', pa.list_(pa.int64())),
pa.field('ints', pa.list_(pa.int64())),
pa.field('strs', pa.list_(pa.string())),
pa.field('nested_strs', pa.list_(pa.list_(pa.string())))
])
_check_pandas_roundtrip(df, expected_schema=expected_schema)
def test_infer_numpy_array(self):
data = OrderedDict([
('ints', [
np.array([0, 1], dtype=np.int64),
np.array([2, 3], dtype=np.int64)
])
])
df = pd.DataFrame(data)
expected_schema = pa.schema([
pa.field('ints', pa.list_(pa.int64()))
])
_check_pandas_roundtrip(df, expected_schema=expected_schema)
@pytest.mark.parametrize('t,data,expected', [
(
pa.int64,
[[1, 2], [3], None],
[None, [3], None]
),
(
pa.string,
[[u'aaa', u'bb'], [u'c'], None],
[None, [u'c'], None]
),
(
pa.null,
[[None, None], [None], None],
[None, [None], None]
)
])
def test_array_from_pandas_typed_array_with_mask(self, t, data, expected):
m = np.array([True, False, True])
s = pd.Series(data)
result = pa.Array.from_pandas(s, mask=m, type=pa.list_(t()))
assert pa.Array.from_pandas(expected,
type=pa.list_(t())).equals(result)
def test_empty_list_roundtrip(self):
empty_list_array = np.empty((3,), dtype=object)
empty_list_array.fill([])
df = pd.DataFrame({'a': np.array(['1', '2', '3']),
'b': empty_list_array})
tbl = pa.Table.from_pandas(df)
result = tbl.to_pandas()
tm.assert_frame_equal(result, df)
def test_array_from_nested_arrays(self):
df, schema = dataframe_with_arrays()
for field in schema:
arr = df[field.name].values
expected = pa.array(list(arr), type=field.type)
result = pa.array(arr)
assert result.type == field.type # == list<scalar>
assert result.equals(expected)
class TestConvertStructTypes(object):
"""
Conversion tests for struct types.
"""
def test_to_pandas(self):
ints = pa.array([None, 2, 3], type=pa.int64())
strs = pa.array([u'a', None, u'c'], type=pa.string())
bools = pa.array([True, False, None], type=pa.bool_())
arr = pa.StructArray.from_arrays(
[ints, strs, bools],
['ints', 'strs', 'bools'])
expected = pd.Series([
{'ints': None, 'strs': u'a', 'bools': True},
{'ints': 2, 'strs': None, 'bools': False},
{'ints': 3, 'strs': u'c', 'bools': None},
])
series = pd.Series(arr.to_pandas())
tm.assert_series_equal(series, expected)
def test_from_numpy(self):
dt = np.dtype([('x', np.int32),
(('y_title', 'y'), np.bool_)])
ty = pa.struct([pa.field('x', pa.int32()),
pa.field('y', pa.bool_())])
data = np.array([], dtype=dt)
arr = pa.array(data, type=ty)
assert arr.to_pylist() == []
data = np.array([(42, True), (43, False)], dtype=dt)
arr = pa.array(data, type=ty)
assert arr.to_pylist() == [{'x': 42, 'y': True},
{'x': 43, 'y': False}]
# With mask
arr = pa.array(data, mask=np.bool_([False, True]), type=ty)
assert arr.to_pylist() == [{'x': 42, 'y': True}, None]
# Trivial struct type
dt = np.dtype([])
ty = pa.struct([])
data = np.array([], dtype=dt)
arr = pa.array(data, type=ty)
assert arr.to_pylist() == []
data = np.array([(), ()], dtype=dt)
arr = pa.array(data, type=ty)
assert arr.to_pylist() == [{}, {}]
def test_from_numpy_nested(self):
dt = np.dtype([('x', np.dtype([('xx', np.int8),
('yy', np.bool_)])),
('y', np.int16)])
ty = pa.struct([pa.field('x', pa.struct([pa.field('xx', pa.int8()),
pa.field('yy', pa.bool_())])),
pa.field('y', pa.int16())])
data = np.array([], dtype=dt)
arr = pa.array(data, type=ty)
assert arr.to_pylist() == []
data = np.array([((1, True), 2), ((3, False), 4)], dtype=dt)
arr = pa.array(data, type=ty)
assert arr.to_pylist() == [{'x': {'xx': 1, 'yy': True}, 'y': 2},
{'x': {'xx': 3, 'yy': False}, 'y': 4}]
@pytest.mark.large_memory
def test_from_numpy_large(self):
# Exercise rechunking + nulls
target_size = 3 * 1024**3 # 4GB
dt = np.dtype([('x', np.float64), ('y', 'object')])
bs = 65536 - dt.itemsize
block = b'.' * bs
n = target_size // (bs + dt.itemsize)
data = np.zeros(n, dtype=dt)
data['x'] = np.random.random_sample(n)
data['y'] = block
# Add implicit nulls
data['x'][data['x'] < 0.2] = np.nan
ty = pa.struct([pa.field('x', pa.float64()),
pa.field('y', pa.binary(bs))])
arr = pa.array(data, type=ty, from_pandas=True)
assert arr.num_chunks == 2
def iter_chunked_array(arr):
for chunk in arr.iterchunks():
for item in chunk:
yield item
def check(arr, data, mask=None):
assert len(arr) == len(data)
xs = data['x']
ys = data['y']
for i, obj in enumerate(iter_chunked_array(arr)):
try:
d = obj.as_py()
if mask is not None and mask[i]:
assert d is None
else:
x = xs[i]
if np.isnan(x):
assert d['x'] is None
else:
assert d['x'] == x
assert d['y'] == ys[i]
except Exception:
print("Failed at index", i)
raise
check(arr, data)
del arr
# Now with explicit mask
mask = np.random.random_sample(n) < 0.2
arr = pa.array(data, type=ty, mask=mask, from_pandas=True)
assert arr.num_chunks == 2
check(arr, data, mask)
del arr
def test_from_numpy_bad_input(self):
ty = pa.struct([pa.field('x', pa.int32()),
pa.field('y', pa.bool_())])
dt = np.dtype([('x', np.int32),
('z', np.bool_)])
data = np.array([], dtype=dt)
with pytest.raises(TypeError,
match="Missing field 'y'"):
pa.array(data, type=ty)
data = np.int32([])
with pytest.raises(TypeError,
match="Expected struct array"):
pa.array(data, type=ty)
class TestZeroCopyConversion(object):
"""
Tests that zero-copy conversion works with some types.
"""
def test_zero_copy_success(self):
result = pa.array([0, 1, 2]).to_pandas(zero_copy_only=True)
npt.assert_array_equal(result, [0, 1, 2])
def test_zero_copy_dictionaries(self):
arr = pa.DictionaryArray.from_arrays(
np.array([0, 0]),
np.array([5]))
result = arr.to_pandas(zero_copy_only=True)
values = pd.Categorical([5, 5])
tm.assert_series_equal(pd.Series(result), pd.Series(values),
check_names=False)
def check_zero_copy_failure(self, arr):
with pytest.raises(pa.ArrowInvalid):
arr.to_pandas(zero_copy_only=True)
def test_zero_copy_failure_on_object_types(self):
self.check_zero_copy_failure(pa.array(['A', 'B', 'C']))
def test_zero_copy_failure_with_int_when_nulls(self):
self.check_zero_copy_failure(pa.array([0, 1, None]))
def test_zero_copy_failure_with_float_when_nulls(self):
self.check_zero_copy_failure(pa.array([0.0, 1.0, None]))
def test_zero_copy_failure_on_bool_types(self):
self.check_zero_copy_failure(pa.array([True, False]))
def test_zero_copy_failure_on_list_types(self):
arr = pa.array([[1, 2], [8, 9]], type=pa.list_(pa.int64()))
self.check_zero_copy_failure(arr)
def test_zero_copy_failure_on_timestamp_types(self):
arr = np.array(['2007-07-13'], dtype='datetime64[ns]')
self.check_zero_copy_failure(pa.array(arr))
class TestConvertMisc(object):
"""
Miscellaneous conversion tests.
"""
type_pairs = [
(np.int8, pa.int8()),
(np.int16, pa.int16()),
(np.int32, pa.int32()),
(np.int64, pa.int64()),
(np.uint8, pa.uint8()),
(np.uint16, pa.uint16()),
(np.uint32, pa.uint32()),
(np.uint64, pa.uint64()),
(np.float16, pa.float16()),
(np.float32, pa.float32()),
(np.float64, pa.float64()),
# XXX unsupported
# (np.dtype([('a', 'i2')]), pa.struct([pa.field('a', pa.int16())])),
(np.object, pa.string()),
(np.object, pa.binary()),
(np.object, pa.binary(10)),
(np.object, pa.list_(pa.int64())),
]
def test_all_none_objects(self):
df = pd.DataFrame({'a': [None, None, None]})
_check_pandas_roundtrip(df)
def test_all_none_category(self):
df = pd.DataFrame({'a': [None, None, None]})
df['a'] = df['a'].astype('category')
_check_pandas_roundtrip(df)
def test_empty_arrays(self):
for dtype, pa_type in self.type_pairs:
arr = np.array([], dtype=dtype)
_check_array_roundtrip(arr, type=pa_type)
def test_threaded_conversion(self):
df = _alltypes_example()
_check_pandas_roundtrip(df, use_threads=True)
_check_pandas_roundtrip(df, use_threads=True, as_batch=True)
def test_category(self):
repeats = 5
v1 = ['foo', None, 'bar', 'qux', np.nan]
v2 = [4, 5, 6, 7, 8]
v3 = [b'foo', None, b'bar', b'qux', np.nan]
df = pd.DataFrame({'cat_strings': pd.Categorical(v1 * repeats),
'cat_ints': pd.Categorical(v2 * repeats),
'cat_binary': pd.Categorical(v3 * repeats),
'cat_strings_ordered': pd.Categorical(
v1 * repeats, categories=['bar', 'qux', 'foo'],
ordered=True),
'ints': v2 * repeats,
'ints2': v2 * repeats,
'strings': v1 * repeats,
'strings2': v1 * repeats,
'strings3': v3 * repeats})
_check_pandas_roundtrip(df)
arrays = [
pd.Categorical(v1 * repeats),
pd.Categorical(v2 * repeats),
pd.Categorical(v3 * repeats)
]
for values in arrays:
_check_array_roundtrip(values)
def test_empty_category(self):
# ARROW-2443
df = pd.DataFrame({'cat': pd.Categorical([])})
_check_pandas_roundtrip(df)
def test_mixed_types_fails(self):
data = pd.DataFrame({'a': ['a', 1, 2.0]})
with pytest.raises(pa.ArrowTypeError):
pa.Table.from_pandas(data)
data = pd.DataFrame({'a': [1, True]})
with pytest.raises(pa.ArrowTypeError):
pa.Table.from_pandas(data)
def test_strided_data_import(self):
cases = []
columns = ['a', 'b', 'c']
N, K = 100, 3
random_numbers = np.random.randn(N, K).copy() * 100
numeric_dtypes = ['i1', 'i2', 'i4', 'i8', 'u1', 'u2', 'u4', 'u8',
'f4', 'f8']
for type_name in numeric_dtypes:
cases.append(random_numbers.astype(type_name))
# strings
cases.append(np.array([tm.rands(10) for i in range(N * K)],
dtype=object)
.reshape(N, K).copy())
# booleans
boolean_objects = (np.array([True, False, True] * N, dtype=object)
.reshape(N, K).copy())
# add some nulls, so dtype comes back as objects
boolean_objects[5] = None
cases.append(boolean_objects)
cases.append(np.arange("2016-01-01T00:00:00.001", N * K,
dtype='datetime64[ms]')
.reshape(N, K).copy())
strided_mask = (random_numbers > 0).astype(bool)[:, 0]
for case in cases:
df = pd.DataFrame(case, columns=columns)
col = df['a']
_check_pandas_roundtrip(df)
_check_array_roundtrip(col)
_check_array_roundtrip(col, mask=strided_mask)
def test_all_nones(self):
def _check_series(s):
converted = pa.array(s)
assert isinstance(converted, pa.NullArray)
assert len(converted) == 3
assert converted.null_count == 3
assert converted[0] is pa.NA
_check_series(pd.Series([None] * 3, dtype=object))
_check_series(pd.Series([np.nan] * 3, dtype=object))
_check_series(pd.Series([np.sqrt(-1)] * 3, dtype=object))
def test_partial_schema(self):
data = OrderedDict([
('a', [0, 1, 2, 3, 4]),
('b', np.array([-10, -5, 0, 5, 10], dtype=np.int32)),
('c', [-10, -5, 0, 5, 10])
])
df = pd.DataFrame(data)
partial_schema = pa.schema([
pa.field('a', pa.int64()),
pa.field('b', pa.int32())
])
expected_schema = pa.schema([
pa.field('a', pa.int64()),
pa.field('b', pa.int32()),
pa.field('c', pa.int64())
])
_check_pandas_roundtrip(df, schema=partial_schema,
expected_schema=expected_schema)
def test_table_batch_empty_dataframe(self):
df = pd.DataFrame({})
_check_pandas_roundtrip(df)
_check_pandas_roundtrip(df, as_batch=True)
df2 = pd.DataFrame({}, index=[0, 1, 2])
_check_pandas_roundtrip(df2, preserve_index=True)
_check_pandas_roundtrip(df2, as_batch=True, preserve_index=True)
def test_convert_empty_table(self):
arr = pa.array([], type=pa.int64())
tm.assert_almost_equal(arr.to_pandas(), np.array([], dtype=np.int64))
arr = pa.array([], type=pa.string())
tm.assert_almost_equal(arr.to_pandas(), np.array([], dtype=object))
arr = pa.array([], type=pa.list_(pa.int64()))
tm.assert_almost_equal(arr.to_pandas(), np.array([], dtype=object))
arr = pa.array([], type=pa.struct([pa.field('a', pa.int64())]))
tm.assert_almost_equal(arr.to_pandas(), np.array([], dtype=object))
def test_non_natural_stride(self):
"""
ARROW-2172: converting from a Numpy array with a stride that's
not a multiple of itemsize.
"""
dtype = np.dtype([('x', np.int32), ('y', np.int16)])
data = np.array([(42, -1), (-43, 2)], dtype=dtype)
assert data.strides == (6,)
arr = pa.array(data['x'], type=pa.int32())
assert arr.to_pylist() == [42, -43]
arr = pa.array(data['y'], type=pa.int16())
assert arr.to_pylist() == [-1, 2]
def test_mixed_integer_columns(self):
row = [[], []]
df = pd.DataFrame(data=[row], columns=['foo', 123])
expected_df = pd.DataFrame(data=[row], columns=['foo', '123'])
_check_pandas_roundtrip(df, expected=expected_df, preserve_index=True)
def _fully_loaded_dataframe_example():
from distutils.version import LooseVersion
index = pd.MultiIndex.from_arrays([
pd.date_range('2000-01-01', periods=5).repeat(2),
np.tile(np.array(['foo', 'bar'], dtype=object), 5)
])
c1 = pd.date_range('2000-01-01', periods=10)
data = {
0: c1,
1: c1.tz_localize('utc'),
2: c1.tz_localize('US/Eastern'),
3: c1[::2].tz_localize('utc').repeat(2).astype('category'),
4: ['foo', 'bar'] * 5,
5: pd.Series(['foo', 'bar'] * 5).astype('category').values,
6: [True, False] * 5,
7: np.random.randn(10),
8: np.random.randint(0, 100, size=10),
9: pd.period_range('2013', periods=10, freq='M')
}
if LooseVersion(pd.__version__) >= '0.21':
# There is an issue with pickling IntervalIndex in pandas 0.20.x
data[10] = pd.interval_range(start=1, freq=1, periods=10)
return pd.DataFrame(data, index=index)
@pytest.mark.parametrize('columns', ([b'foo'], ['foo']))
def test_roundtrip_with_bytes_unicode(columns):
df = pd.DataFrame(columns=columns)
table1 = pa.Table.from_pandas(df)
table2 = pa.Table.from_pandas(table1.to_pandas())
assert table1.equals(table2)
assert table1.schema.equals(table2.schema)
assert table1.schema.metadata == table2.schema.metadata
def _check_serialize_components_roundtrip(df):
ctx = pa.default_serialization_context()
components = ctx.serialize(df).to_components()
deserialized = ctx.deserialize_components(components)
tm.assert_frame_equal(df, deserialized)
def test_serialize_deserialize_pandas():
# ARROW-1784, serialize and deserialize DataFrame by decomposing
# BlockManager
df = _fully_loaded_dataframe_example()
_check_serialize_components_roundtrip(df)
def _pytime_from_micros(val):
microseconds = val % 1000000
val //= 1000000
seconds = val % 60
val //= 60
minutes = val % 60
hours = val // 60
return time(hours, minutes, seconds, microseconds)
def _pytime_to_micros(pytime):
return (pytime.hour * 3600000000 +
pytime.minute * 60000000 +
pytime.second * 1000000 +
pytime.microsecond)
|
# flake8: noqa: F401
from . import base
from . import test_base
from . import test_ecephys
from . import test_nwbfile
|
import codecs
import math
from Model.EngHindiDataPreprocess import config, EnglishTokenizer as ENG_Tok, HindiTokenizer as HIN_TOK, \
IndicTokenizer as IND_TOK
from nltk import word_tokenize
from Model.EngHindiDataPreprocess import config
def load_data_sp(path):
with codecs.open(path, encoding='utf-8') as f:
data = f.read().split('\n')
print('Num of Lines in Data:', len(data))
return data
def tokenizer(data: list, flag: bool = True, max_length: int = math.inf):
assert type(data) == list, 'Raw Data should be in list data type'
print(max_length)
token_list = list()
for line in data:
if flag:
tokens = word_tokenize(line)
else:
tokens = ['<sos>'] + line.split(' ') + ['<eos>']
token_list.append(tokens)
if len(token_list) > max_length:
break
return token_list
# Vocab Code for Hindi
hin_vocab_intToText = dict()
hin_vocab_textToInt = dict()
# Vocab Code for English
eng_vocab_intToText = dict()
eng_vocab_textToInt = dict()
# Setting up some Special Token value for Hindi Vocab
hin_vocab_textToInt[config.SOS_TOKEN] = config.SOS_TOKEN_IDX
hin_vocab_textToInt[config.EOS_TOKEN] = config.EOS_TOKEN_IDX
hin_vocab_textToInt[config.PAD_TOKEN] = config.PAD_TOKEN_IDX
hin_vocab_textToInt[config.UNK_TOKEN] = config.UNK_TOKEN_IDX
hin_vocab_intToText[config.SOS_TOKEN_IDX] = config.SOS_TOKEN
hin_vocab_intToText[config.EOS_TOKEN_IDX] = config.EOS_TOKEN
hin_vocab_intToText[config.PAD_TOKEN_IDX] = config.PAD_TOKEN
hin_vocab_intToText[config.UNK_TOKEN_IDX] = config.UNK_TOKEN
# Setting up some Special Token value for Hindi Vocab
eng_vocab_textToInt[config.SOS_TOKEN] = config.SOS_TOKEN_IDX
eng_vocab_textToInt[config.EOS_TOKEN] = config.EOS_TOKEN_IDX
eng_vocab_textToInt[config.PAD_TOKEN] = config.PAD_TOKEN_IDX
eng_vocab_textToInt[config.UNK_TOKEN] = config.UNK_TOKEN_IDX
eng_vocab_intToText[config.SOS_TOKEN_IDX] = config.SOS_TOKEN
eng_vocab_intToText[config.EOS_TOKEN_IDX] = config.EOS_TOKEN
eng_vocab_intToText[config.PAD_TOKEN_IDX] = config.PAD_TOKEN
eng_vocab_intToText[config.UNK_TOKEN_IDX] = config.UNK_TOKEN
count_eng = 4
count_hin = 4
def create_hindi_vocab(data: list):
global count_hin
for arr in data:
for token in arr:
if token in hin_vocab_textToInt.keys():
continue
else:
hin_vocab_textToInt[token] = count_hin
hin_vocab_intToText[count_hin] = token
count_hin += 1
return hin_vocab_textToInt, hin_vocab_intToText
def create_eng_vocab(data: list):
global count_eng
for arr in data:
for token in arr:
if token in eng_vocab_textToInt.keys():
continue
else:
eng_vocab_textToInt[token] = count_eng
eng_vocab_intToText[count_eng] = token
count_eng += 1
return eng_vocab_textToInt, eng_vocab_intToText
def convert_seq_to_int(data: list, flag: bool):
arr = list()
for line in data:
tok_line = list()
for token in line:
if flag:
if token in eng_vocab_textToInt.keys():
tok_line.append(eng_vocab_textToInt[token])
else:
tok_line.append(eng_vocab_textToInt['<unk>'])
else:
if token in hin_vocab_textToInt.keys():
tok_line.append(hin_vocab_textToInt[token])
else:
tok_line.append(hin_vocab_textToInt['<unk>'])
arr.append(tok_line)
return arr
print('Vocab Creation in Progress...')
# English Dataset Tokenization and Vocab Creation
ENG_TOKENIZER = ENG_Tok.EnglishTokenizer()
eng_read = ENG_TOKENIZER.read_from_file(path='mlc_train.hi-en.en')
ENG_TOKENS = ENG_TOKENIZER.tokenize()
create_eng_vocab(ENG_TOKENS)
# Hindi Dataset Tokenization and Vocab Creation
hin_read = IND_TOK.get_sentences(filepath='mlc_train.hi-en.hi')
HIN_TOKENS = IND_TOK.get_token(filepath='mlc_train.hi-en.hi')
create_hindi_vocab(HIN_TOKENS)
# Printing Vocab Size
# print('English Vocab Size:', count_eng, 'Hindi Vocab Size:', count_hin)
print('----------------------Vocab Creation Done for Both----------------------')
def vocab_creator(vocab_dict: dict, flag: bool):
if flag:
out = codecs.open('hindi_vocab.txt', encoding='utf-8', mode='w')
else:
out = codecs.open('english_vocab.txt', encoding='utf-8', mode='w')
for key in vocab_dict.keys():
out.write(f'{key}_:_{vocab_dict[key]}')
out.write('\n')
out.close()
# Vocab txt File Creation for both English and Hindi <For Vocab Creation in txt>
# vocab_creator(eng_vocab_textToInt, flag=False)
# vocab_creator(hin_vocab_textToInt, flag=True)
# Vocab Checker:
# print('English Vocab:', eng_vocab_textToInt)
# print('Hindi Vocab:', hin_vocab_textToInt)
print('Data Conversion to Integer in Progress...')
max_length = -math.inf
def max_length_updator(seq: list):
global max_length
for sent in seq:
if len(sent) > max_length:
max_length = len(sent)
def padding_seq(seq: list):
global max_length
new_seq = list()
for idx in range(len(seq)):
padding = [config.PAD_TOKEN_IDX]*int(max_length - len(seq[idx]))
new_seq.append(seq[idx] + padding)
return new_seq
# Sequence Tokens Convert to Integer Form
ENG_DATA = convert_seq_to_int(ENG_TOKENS, flag=True)
HIN_DATA = convert_seq_to_int(HIN_TOKENS, flag=False)
# Updating Max-Length for Dataset Padding
max_length_updator(ENG_DATA)
max_length_updator(HIN_DATA)
# Adding Padding to Dataset
ENG_DATA_PADDED = padding_seq(ENG_DATA)
HIN_DATA_PADDED = padding_seq(HIN_DATA)
print('Data Conversion to Integer Done...')
# Check for Correct Tokenization
# print(ENG_DATA[:20])
# print(HIN_DATA[:20])
|
# coding=utf-8
# Copyright 2018 The HuggingFace Inc. team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tokenization classes for DistilBERT."""
from .tokenization_bert import BertTokenizer
from .utils import logging
logger = logging.get_logger(__name__)
VOCAB_FILES_NAMES = {"vocab_file": "vocab.txt"}
PRETRAINED_VOCAB_FILES_MAP = {
"vocab_file": {
"distilbert-base-uncased": "https://s3.amazonaws.com/models.huggingface.co/bert/bert-base-uncased-vocab.txt",
"distilbert-base-uncased-distilled-squad": "https://s3.amazonaws.com/models.huggingface.co/bert/bert-large-uncased-vocab.txt",
"distilbert-base-cased": "https://s3.amazonaws.com/models.huggingface.co/bert/bert-base-cased-vocab.txt",
"distilbert-base-cased-distilled-squad": "https://s3.amazonaws.com/models.huggingface.co/bert/bert-large-cased-vocab.txt",
"distilbert-base-german-cased": "https://s3.amazonaws.com/models.huggingface.co/bert/distilbert-base-german-cased-vocab.txt",
"distilbert-base-multilingual-cased": "https://s3.amazonaws.com/models.huggingface.co/bert/bert-base-multilingual-cased-vocab.txt",
}
}
PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES = {
"distilbert-base-uncased": 512,
"distilbert-base-uncased-distilled-squad": 512,
"distilbert-base-cased": 512,
"distilbert-base-cased-distilled-squad": 512,
"distilbert-base-german-cased": 512,
"distilbert-base-multilingual-cased": 512,
}
PRETRAINED_INIT_CONFIGURATION = {
"distilbert-base-uncased": {"do_lower_case": True},
"distilbert-base-uncased-distilled-squad": {"do_lower_case": True},
"distilbert-base-cased": {"do_lower_case": False},
"distilbert-base-cased-distilled-squad": {"do_lower_case": False},
"distilbert-base-german-cased": {"do_lower_case": False},
"distilbert-base-multilingual-cased": {"do_lower_case": False},
}
class DistilBertTokenizer(BertTokenizer):
r"""
Construct a DistilBERT tokenizer.
:class:`~transformers.DistilBertTokenizer` is identical to :class:`~transformers.BertTokenizer` and runs end-to-end
tokenization: punctuation splitting and wordpiece.
Refer to superclass :class:`~transformers.BertTokenizer` for usage examples and documentation concerning
parameters.
"""
vocab_files_names = VOCAB_FILES_NAMES
pretrained_vocab_files_map = PRETRAINED_VOCAB_FILES_MAP
max_model_input_sizes = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
pretrained_init_configuration = PRETRAINED_INIT_CONFIGURATION
model_input_names = ["attention_mask"]
|
# A part of NonVisual Desktop Access (NVDA)
# Copyright (C) 2009-2021 NV Access Limited
# This file may be used under the terms of the GNU General Public License, version 2 or later.
# For more details see: https://www.gnu.org/licenses/gpl-2.0.html
import enum
class RPC(enum.IntEnum):
E_CALL_CANCELED = -2147418110
S_SERVER_UNAVAILABLE = 1722
S_CALL_FAILED_DNE = 1727
E_CALL_REJECTED = -2147418111
E_DISCONNECTED = -2147417848
|
import boto3
from zipfile import ZipFile
import json
import csv
from retention_times import memory_retention, magnetic_retention, table_name
from math import ceil
def read_s3(Session, event):
"""This method gets an object from an AWS S3 bucket (Backup previously made by
the plugin AWS S3) and prepares the data stored in to be written in
AWS Timestream
Args:
Session: boto3 Session that allows to create service clients and
resources
event: Information about the object set in the trigger
Returns:
the records to be stored and the name of the table to be create in
timestream or None on error
"""
print('Reading s3')
# Creates a s3 client
s3 = Session.client('s3')
# Get info from a new bucket object
s3_bucket_object = event.get('Records')[0].get('s3').get('object')
s3_bucket_name = event.get(
'Records')[0].get('s3').get('bucket').get('name')
# Get the name of the zip File
print('Bucket: ', s3_bucket_name)
print('Object: ', s3_bucket_object.get('key'))
for item in s3_bucket_object.get('key').split('/'):
if '.zip' in item:
zip_file = item
# Download the file from the client's S3 bucket
s3.download_file(
s3_bucket_name,
s3_bucket_object.get('key'),
'/tmp/{}'.format(zip_file)
)
# Data formatting for TimeStream
# open file zip
with ZipFile('/tmp/{}'.format(zip_file), 'r') as zip:
records = []
# Go over each csv file
for file_name in zip.namelist():
if '.csv' not in file_name:
continue
with zip.open(file_name, 'r', pwd=None) as csv_file:
print('csv_file: ', csv_file)
device_name = file_name.split('/')[1]
variable_name = file_name.split('/')[2]
# Each line needs to be decode into utf-8
lines = [line.decode('utf-8') for line in csv_file.readlines()]
reader = csv.reader(lines)
parsed_csv = list(reader)
for row in parsed_csv[1:]:
# Clean the context variable inside csv file
context = json.loads(row[3][2:-1])
dimensions = [{
'Name': 'device',
'Value': device_name
}]
# If context is not empty, it is added to the dimension
if len(context) != 0:
for key, value in context.items():
dimensions.append({
'Name': key,
'Value': str(value)
})
# Each line is stored as new timestream record
records.append({
'Dimensions': dimensions,
'MeasureName': variable_name,
'MeasureValue': str(row[2]),
'Time': row[0],
})
# If the zip file is empty or no csv files were found
if records is []:
return None, None
return records
def write_timestream(Session, records, t_name):
"""This method write records on AWS timestream
Args:
Session: boto3 Session that allows to create service clients and
resources
records: data to be stored in AWS timestream
t_name: table name to be created in AWS timestream inside
ubidots_s3_backup
Returns:
Nothing
"""
print('Writing to timestream')
print('Number of records:', len(records))
timestream = Session.client('timestream-write')
# Creates the database
try:
print('Creating Database')
timestream.create_database(
DatabaseName='ubidots_s3_backup'
)
# Checks if the database already exists
except timestream.exceptions.ConflictException:
print('Database already exists')
pass
# Creates the table
try:
print('Creating table')
timestream.create_table(
DatabaseName='ubidots_s3_backup',
TableName=t_name,
RetentionProperties={
'MemoryStoreRetentionPeriodInHours': memory_retention,
'MagneticStoreRetentionPeriodInDays': magnetic_retention
}
)
# Checks if the table already exists
except timestream.exceptions.ConflictException:
print('Table already exists. Updating table properties')
timestream.update_table(
DatabaseName='ubidots_s3_backup',
TableName=t_name,
RetentionProperties={
'MemoryStoreRetentionPeriodInHours': memory_retention,
'MagneticStoreRetentionPeriodInDays': magnetic_retention
}
)
# Write the records
try:
calls = ceil(len(records) / 100)
print('Calls:', calls)
for i in range(calls):
timestream.write_records(
DatabaseName='ubidots_s3_backup',
TableName=t_name,
Records=records[100 * i:100 * (i + 1)]
)
# If an error occurs the error is printed as warning
except IndexError:
timestream.write_records(
DatabaseName='ubidots_s3_backup',
TableName=t_name,
Records=records[100 * i:]
)
except timestream.exceptions.RejectedRecordsException as err:
print('Warning: Some records were rejected. See RejectedRecords for \
more information')
print('RejectedRecords: ', err.response['RejectedRecords'])
def lambda_handler(event, context):
"""This method is the handler for the AWS Lambda function
Args:
event: Information about the object set in the trigger
context: LambdaContext Object
Returns:
Status code and the corresponding message
"""
Session = boto3.Session()
records= read_s3(Session, event)
if records is None:
return {
'statusCode': 400,
'body': json.dumps('No records found!')
}
else:
write_timestream(Session, records, table_name)
return {
'statusCode': 200,
'body': json.dumps('Records written successfully!')
}
|
import poets
def main():
print(poets.poets())
if __name__ == '__main__':
main()
|
from typing import List
from starlette.status import HTTP_200_OK, HTTP_202_ACCEPTED, HTTP_404_NOT_FOUND
from fastapi import Request, APIRouter, File
from fastapi.datastructures import UploadFile
from pydantic import BaseModel
from .abstraction import create_linked_issue, create_new_issue
class IssueTable(BaseModel):
Issue_Name: str
Issue_Tags: List[str]
Issue_description: str
isActive: bool
uniqueID: str
LinkedIssue_id: str
User_id: str
class IssueTableNew(BaseModel):
Issue_Name: str
Issue_Tags: List[str]
Issue_description: str
isActive: bool
uniqueID: str
User_Id: str
issues = APIRouter()
@issues.get("/get-all-issues")
def show_all_issues():
from Logic.models import Issues
return list(Issues.objects.all())
@issues.get("/get-issue/{issue_id}")
def get_specific_issue(issue_id: str):
from Logic.models import Issues
specific_issue = Issues.objects.get(Issue_Name=issue_id)
return specific_issue
@issues.post("/post-new-issue")
def post_new_issue(request: Request, table: IssueTableNew):
create_new_issue(request, table)
return {HTTP_200_OK:"New issue was added"}
@issues.delete("/delete-issues")
def delete_an_issue(id: str):
from Logic.models import Issues
try:
instance = Issues.objects.get(uniqueID=id)
instance.delete()
return {HTTP_202_ACCEPTED : f"{id} was deleted"}
except Exception:
return {HTTP_404_NOT_FOUND:"Image not added"}
@issues.post("/add-image")
def create_file(unique_id: str, file: UploadFile = File(...)):
from Logic.models import Issues
try:
instance = Issues.objects.get(uniqueID = unique_id)
instance.Issues_image = file.file.read()
instance.save() #Images aren't being loaded on to POSTGRES
return {HTTP_202_ACCEPTED:"New image was added."}
except Exception:
return {HTTP_404_NOT_FOUND:"Image not added"}
@issues.post("/post-linked-issue")
def post_a_linked_issue(issuesTable: IssueTable):
from Logic.models import Issues, UserModel
if len(Issues.objects.filter(uniqueID=issuesTable.LinkedIssue_id)):
create_linked_issue(IssueTable)
return {HTTP_200_OK:"New issue was saved."}
else:
return {HTTP_404_NOT_FOUND:"Instance not found"}
|
##############################################################################
# Copyright (c) 2013-2018, Lawrence Livermore National Security, LLC.
# Produced at the Lawrence Livermore National Laboratory.
#
# This file is part of Spack.
# Created by Todd Gamblin, tgamblin@llnl.gov, All rights reserved.
# LLNL-CODE-647188
#
# For details, see https://github.com/spack/spack
# Please also see the NOTICE and LICENSE files for our notice and the LGPL.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License (as
# published by the Free Software Foundation) version 2.1, February 1999.
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the IMPLIED WARRANTY OF
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the terms and
# conditions of the GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
##############################################################################
from spack import *
class PyQuantities(PythonPackage):
"""Support for physical quantities with units, based on numpy"""
homepage = "http://python-quantities.readthedocs.org"
url = "https://pypi.io/packages/source/q/quantities/quantities-0.12.1.tar.gz"
version('0.12.1', '9c9ecda15e905cccfc420e5341199512')
version('0.11.1', 'f4c6287bfd2e93322b25a7c1311a0243',
url="https://pypi.io/packages/source/q/quantities/quantities-0.11.1.zip")
conflicts('py-numpy@1.13:', when='@:0.11.99')
depends_on('python@2.6.0:')
depends_on('py-numpy@1.4.0:', type=('build', 'run'))
|
import requests
from pyquery import PyQuery as pq
import json
def get_one_page(url):
headers = {
'User-Agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_12_3) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/58.0.3029.110 Safari/537.36'
}
res = requests.get(url, headers=headers)
text = res.text
doc = pq(text)
results = []
for item in doc.find('.explore-feed.feed-item').items():
temp = {
'title': item.find('h2').text(),
'anwser': item.find('.zh-summary.summary').text(),
'author': item.find('.author-link-line').text()
}
results.append(temp)
return results
def write_to_txt(results):
for item in results:
with open('./output/zhihu_data.txt', 'a', encoding='utf-8') as f:
print(item)
question = ''
question = question + item['title'] + '\n'
question = question + item['anwser'] + '\n'
question = question + item['author'] + '\n'
question = question + '========\n'
f.write(question)
def write_to_json(results):
with open('./output/zhihu_data.json', 'w', encoding='utf-8') as f:
f.write(json.dumps(results, indent=4, ensure_ascii=False))
def main():
url = 'https://www.zhihu.com/explore'
results = get_one_page(url)
# write_to_txt(results)
write_to_json(results)
print(results)
main()
|
from .ndfa import *
from .dfa import *
__all__ = []
__all__ += ndfa.__all__
__all__ += dfa.__all__
|
import asyncio
import tempfile
import urllib.parse
from django.conf import settings
from django.http import FileResponse
from core.models import CodeSignToken
from core.utils import get_core_settings, get_mesh_device_id, get_mesh_ws_url
from tacticalrmm.constants import MeshAgentIdent
def get_agent_url(arch: str, plat: str) -> str:
if plat == "windows":
endpoint = "winagents"
dl_url = settings.DL_32 if arch == "32" else settings.DL_64
else:
endpoint = "linuxagents"
dl_url = ""
token = CodeSignToken.objects.first()
if not token:
return dl_url
if token.is_valid:
base_url = settings.EXE_GEN_URL + f"/api/v1/{endpoint}/?"
params = {
"version": settings.LATEST_AGENT_VER,
"arch": arch,
"token": token.token,
}
dl_url = base_url + urllib.parse.urlencode(params)
return dl_url
def generate_linux_install(
client: str,
site: str,
agent_type: str,
arch: str,
token: str,
api: str,
download_url: str,
) -> FileResponse:
match arch:
case "amd64":
arch_id = MeshAgentIdent.LINUX64
case "386":
arch_id = MeshAgentIdent.LINUX32
case "arm64":
arch_id = MeshAgentIdent.LINUX_ARM_64
case "arm":
arch_id = MeshAgentIdent.LINUX_ARM_HF
case _:
arch_id = "not_found"
core = get_core_settings()
uri = get_mesh_ws_url()
mesh_id = asyncio.run(get_mesh_device_id(uri, core.mesh_device_group))
mesh_dl = (
f"{core.mesh_site}/meshagents?id={mesh_id}&installflags=0&meshinstall={arch_id}"
)
sh = settings.LINUX_AGENT_SCRIPT
with open(sh, "r") as f:
text = f.read()
replace = {
"agentDLChange": download_url,
"meshDLChange": mesh_dl,
"clientIDChange": client,
"siteIDChange": site,
"agentTypeChange": agent_type,
"tokenChange": token,
"apiURLChange": api,
}
for i, j in replace.items():
text = text.replace(i, j)
with tempfile.NamedTemporaryFile() as fp:
with open(fp.name, "w") as f:
f.write(text)
f.write("\n")
return FileResponse(
open(fp.name, "rb"), as_attachment=True, filename="linux_agent_install.sh"
)
|
try:
import logo as logo_print
except ModuleNotFoundError:
missingfile = str(input("The program is missing a file. Continue anyways? "))
if missingfile.lower() == "yes" or "y" or "yea":
pass
else:
os.exit(0)
try:
from bs4 import BeautifulSoup
import requests, time, re, os, random
from termcolor import colored
from colorama import init
except ModuleNotFoundError and ImportError:
print("The program is missing essential libraries. Read the Github's tutorial how to install all the libraries.")
os.exit(0)
os.system("mode con cols=150 lines=75")
decision = ''
init()
colors = ['red', 'green', 'yellow', 'blue', 'magenta', 'cyan']
# DEFS BELOW
def print_status():
obj = time.localtime()
currentime = time.asctime(obj)
if decision.lower() == 'sample':
pass
else:
print(decision)
time.sleep(a)
source = requests.get('https://www.robloxforum.com').text
soup = BeautifulSoup(source, 'lxml')
stringy = soup.find('span', class_='block-footer-counter').text
usernames = soup.find_all('span', class_=['username--style1', 'username--style2', 'username--style3', 'username--style4',
'username--style5', 'username--style6', 'username--style7', 'username--style8', 'username--style9', 'username--style10'
'username--style11'])
whitespace_remove = stringy.replace(' Robots', "Robots")
print(currentime)
print(whitespace_remove)
for span in usernames:
attr = span.attrs['class']
numbas = re.findall(r'\d+', str(attr))
if numbas[0] == "2":
print(span.text)
elif numbas[0] == "3":
print(colored(span.text, 'red', attrs=['bold']))
elif numbas[0] == "4":
print(colored(span.text, 'blue', attrs=['bold']))
elif numbas[0] == "6":
print(colored(span.text, 'green', attrs=['bold']))
elif numbas[0] == "7":
print(colored(span.text, 'green'))
elif numbas[0] == "8":
print(colored(span.text, 'blue'))
elif numbas[0] == "9":
print(colored(span.text, 'yellow'))
elif numbas[0] == "10":
def strike(text):
return ''.join([u'\u0336{}'.format(c) for c in text])
black = (colored(span.text, 'yellow'))
print(strike(black))
elif numbas[0] == "11":
print(colored(span.text, 'blue', attrs=['bold']))
print('\n')
if decision == 'SAMPLE' or 'sample':
print()
else:
if b.lower() == "y" or "yes" or "yea":
with open("log.txt", "a") as o:
encoded_string = stringy.encode("ascii", "ignore")
decode_string = encoded_string.decode()
whitespace_remove = decode_string.replace(' Robots', "Robots")
o.write(whitespace_remove)
if c.lower() == "y" or "yes" or "yea": o.write(currentime + '\n')
for span in usernames:
attr = span.attrs['class']
numbas = re.findall(r'\d+', str(attr))
sp = span.text
obj = time.localtime()
currentime = time.asctime(obj)
if c.lower() == "y" or "yes" or "yea":
if numbas[0] == "2":
o.write(sp + " | normal user")
o.write('\n')
elif numbas[0] == "3":
o.write(sp + " | administrator")
o.write('\n')
elif numbas[0] == "4":
o.write(sp + " | moderator")
o.write('\n')
elif numbas[0] == "6":
o.write(sp + " | verified")
o.write('\n')
elif numbas[0] == "7":
o.write(sp + " | vip")
o.write('\n')
elif numbas[0] == "8":
o.write(sp + " | pro")
o.write('\n')
elif numbas[0] == "9":
o.write(sp + " | ultra")
o.write('\n')
elif numbas[0] == "10":
o.write(sp + " | banned")
o.write('\n')
o.write('\n')
else:
pass
else:
pass
def run():
process = 1
while process == 1:
print_status()
# DEFS ABOVE
try:
print(colored(logo_print.final_str, random.choice(colors)))
except ModuleNotFoundError:
pass
print(colored("RF trackbot - credits to MATIEO33", 'blue'))
print(colored("RF: https://robloxforum.com/members/matieo33.8832/", 'red'))
print(colored("Github: https://github.com/matieo33", 'green'))
print("Available options: TRACK SAMPLE HELP \n")
if __name__ == '__main__':
in_menu = 1
while in_menu == 1:
decision = str(input())
if decision.lower() == 'help':
print("I made this bot purely for the purpose of entertainment, and if ever happens - maybe also will come in handy for somebody.")
print("Wanna help this bot grow? DM me.")
print('Important: CTRL + C will stop the program entirely! Make sure to answer with "Y" if you wish to save the data to a TXT file.')
print(
"TRACK: Prints the activity of the site per amount of seconds you select.")
print(
"SAMPLE: Prints the activity of the site one time as an example of the program's work.")
elif decision.lower() == 'sample':
print('')
print_status()
elif decision.lower() == 'track':
print('')
in_menu = 0
else:
print("ERROR: unknown command " + "'" + decision + "'")
a = int(input(
"Every how much seconds do you wish to recieve updates on the site activity? "))
b = str(input("Do you wish the data to be saved to a TXT file? "))
if b.lower() == "y" or "yes" or "yea":
c = str(input('Do you wish to include the list of all online users? '))
while 1:
print_status()
else:
while 1:
print_status()
|
# Copyright 2013-2021 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
import inspect
from spack.directives import extends
from spack.package import PackageBase, run_after
class RPackage(PackageBase):
"""Specialized class for packages that are built using R.
For more information on the R build system, see:
https://stat.ethz.ch/R-manual/R-devel/library/utils/html/INSTALL.html
This class provides a single phase that can be overridden:
1. :py:meth:`~.RPackage.install`
It has sensible defaults, and for many packages the only thing
necessary will be to add dependencies
"""
phases = ['install']
maintainers = ['glennpj']
#: This attribute is used in UI queries that need to know the build
#: system base class
build_system_class = 'RPackage'
extends('r')
def configure_args(self):
"""Arguments to pass to install via ``--configure-args``."""
return []
def configure_vars(self):
"""Arguments to pass to install via ``--configure-vars``."""
return []
def install(self, spec, prefix):
"""Installs an R package."""
config_args = self.configure_args()
config_vars = self.configure_vars()
args = [
'CMD',
'INSTALL'
]
if config_args:
args.append('--configure-args={0}'.format(' '.join(config_args)))
if config_vars:
args.append('--configure-vars={0}'.format(' '.join(config_vars)))
args.extend([
'--library={0}'.format(self.module.r_lib_dir),
self.stage.source_path
])
inspect.getmodule(self).R(*args)
# Check that self.prefix is there after installation
run_after('install')(PackageBase.sanity_check_prefix)
|
# coding:utf-8
import time
from threading import Thread
value = 0
def getlock():
global value
new = value + 1
time.sleep(0.001) # 使用sleep让线程有机会切换
value = new
threads = []
for i in range(100):
t = Thread(target=getlock)
t.start()
threads.append(t)
for t in threads:
t.join()
print value
|
# Copyright (c) 2006-2007 The Regents of The University of Michigan
# Copyright (c) 2010 Advanced Micro Devices, Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met: redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer;
# redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution;
# neither the name of the copyright holders nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
# Authors: Ron Dreslinski
import m5
from m5.objects import *
from m5.defines import buildEnv
from m5.util import addToPath
import os, optparse, sys
# Get paths we might need
config_path = os.path.dirname(os.path.abspath(__file__))
config_root = os.path.dirname(config_path)
m5_root = os.path.dirname(config_root)
addToPath(config_root+'/configs/common')
addToPath(config_root+'/configs/ruby')
addToPath(config_root+'/configs/topologies')
import Ruby
import Options
parser = optparse.OptionParser()
Options.addCommonOptions(parser)
# Add the ruby specific and protocol specific options
Ruby.define_options(parser)
(options, args) = parser.parse_args()
#
# Set the default cache size and associativity to be very small to encourage
# races between requests and writebacks.
#
options.l1d_size="256B"
options.l1i_size="256B"
options.l2_size="512B"
options.l3_size="1kB"
options.l1d_assoc=2
options.l1i_assoc=2
options.l2_assoc=2
options.l3_assoc=2
options.ports=32
#MAX CORES IS 8 with the fals sharing method
nb_cores = 8
# ruby does not support atomic, functional, or uncacheable accesses
cpus = [ MemTest(atomic=False, percent_functional=50,
percent_uncacheable=0, suppress_func_warnings=True) \
for i in xrange(nb_cores) ]
# overwrite options.num_cpus with the nb_cores value
options.num_cpus = nb_cores
# system simulated
system = System(cpu = cpus,
funcmem = SimpleMemory(in_addr_map = False),
funcbus = NoncoherentXBar())
# Dummy voltage domain for all our clock domains
system.voltage_domain = VoltageDomain()
system.clk_domain = SrcClockDomain(clock = '1GHz',
voltage_domain = system.voltage_domain)
# Create a seperate clock domain for components that should run at
# CPUs frequency
system.cpu_clk_domain = SrcClockDomain(clock = '2GHz',
voltage_domain = system.voltage_domain)
# All cpus are associated with cpu_clk_domain
for cpu in cpus:
cpu.clk_domain = system.cpu_clk_domain
system.mem_ranges = AddrRange('256MB')
Ruby.create_system(options, False, system)
# Create a separate clock domain for Ruby
system.ruby.clk_domain = SrcClockDomain(clock = options.ruby_clock,
voltage_domain = system.voltage_domain)
assert(len(cpus) == len(system.ruby._cpu_ports))
for (i, ruby_port) in enumerate(system.ruby._cpu_ports):
#
# Tie the cpu test and functional ports to the ruby cpu ports and
# physmem, respectively
#
cpus[i].test = ruby_port.slave
cpus[i].functional = system.funcbus.slave
#
# Since the memtester is incredibly bursty, increase the deadlock
# threshold to 1 million cycles
#
ruby_port.deadlock_threshold = 1000000
# connect reference memory to funcbus
system.funcmem.port = system.funcbus.master
# -----------------------
# run simulation
# -----------------------
root = Root(full_system = False, system = system)
root.system.mem_mode = 'timing'
# Not much point in this being higher than the L1 latency
m5.ticks.setGlobalFrequency('1ns')
|
import operator as op
import os
from typing import Dict, Callable
from dolfin import Expression, Mesh
from dolfin.cpp.mesh import MeshFunctionSizet
class Material:
def __init__(self, rho: float, young_modulus: float, shear_modulus: float):
self._rho = rho
self._young_modulus = young_modulus
self._shear_modulus = shear_modulus
@property
def rho(self) -> float:
return self._rho
@property
def young_modulus(self) -> float:
return self._young_modulus
@property
def shear_modulus(self) -> float:
return self._shear_modulus
@property
def lambda_(self) -> float:
return self.shear_modulus * (self.young_modulus - 2 * self.shear_modulus) / (
3 * self.shear_modulus - self.young_modulus)
@property
def mu(self) -> float:
return self.shear_modulus
def __repr__(self):
return '<Material rho={rho} young_modulus={young_modulus} shear_modulus={shear_modulus}>'.format(
rho=self.rho, young_modulus=self.young_modulus, shear_modulus=self.shear_modulus,
)
class Base:
def __init__(self, g: float, mesh_dir: str, materials: Dict[int, Material]):
self._g = g
self._mesh_dir = mesh_dir
self._materials = materials
self._mesh = Mesh(os.path.join(mesh_dir, 'mesh.xml'))
self._subdomains = MeshFunctionSizet(self.mesh, os.path.join(mesh_dir, 'mesh_physical_region.xml'))
self._boundaries = MeshFunctionSizet(self.mesh, os.path.join(mesh_dir, 'mesh_facet_region.xml'))
@property
def g(self):
return self._g
@property
def mesh_dir(self) -> str:
return self._mesh_dir
@property
def mesh(self) -> Mesh:
return self._mesh
@property
def materials(self) -> Dict[int, Material]:
return self._materials
@property
def lambda_(self) -> Expression:
return MaterialGetter.create(materials=self.materials, subdomains=self._subdomains, f=op.attrgetter('lambda_'))
@property
def mu(self) -> Expression:
return MaterialGetter.create(materials=self.materials, subdomains=self._subdomains, f=op.attrgetter('mu'))
@property
def rho(self):
return MaterialGetter.create(materials=self.materials, subdomains=self._subdomains, f=op.attrgetter('rho'))
def __repr__(self) -> str:
return '<Base g={g} mesh_dir={mesh_dir} materials={materials}>'.format(
g=self.g, mesh_dir=self.mesh_dir, materials=self.materials
)
class UnknownDomainException(Exception):
pass
class MaterialGetter(Expression):
@staticmethod
def create(materials: Dict[int, Material], subdomains: MeshFunctionSizet, f: Callable[[Material], float]):
a = MaterialGetter(degree=0)
a._f = f
a._subdomains = subdomains
a._materials = materials
return a
def eval_cell(self, values, x, cell):
material = self._materials.get(self._subdomains[cell.index])
if material:
values[0] = self._f(material)
else:
raise UnknownDomainException()
|
def load():
with open("inputs/7.txt") as f:
yield from map(int, f.readline().split(","))
def solve(cost):
positions = tuple(load())
min_position, max_position = min(positions), max(positions)
return min(
sum(cost(suggestion, crab) for crab in positions)
for suggestion in range(min_position, max_position + 1)
)
print("a:", solve(lambda a, b: abs(a - b)))
print("b:", solve(lambda a, b: (abs(a - b) + abs(a - b) ** 2) // 2))
|
''' Layers
This file contains various layers for the BigGAN models.
'''
import numpy as np
import paddorch as torch
import paddorch.nn as nn
from paddorch.nn import init
import paddorch.optim as optim
import paddorch.nn.functional as F
from paddorch.nn import Parameter as P
# Projection of x onto y
def proj(x, y):
return torch.mm(y, x.t()) * y / torch.mm(y, y.t())
# Orthogonalize x wrt list of vectors ys
def gram_schmidt(x, ys):
for y in ys:
x = x - proj(x, y)
return x
# Apply num_itrs steps of the power method to estimate top N singular values.
def power_iteration(W, u_, update=True, eps=1e-12):
# Lists holding singular vectors and values
Wt=torch.Tensor(W).t()
us, vs, svs = [], [], []
for i, u in enumerate(u_):
# Run one step of the power iteration
with torch.no_grad():
if W.shape[1] == 27:
a = 1
v = torch.matmul(u, W)
# if (W.shape[0]==u.shape[1]) :
# v = torch.matmul(u, W)
# else:
# v = torch.matmul(u, Wt)
# Run Gram-Schmidt to subtract components of all other singular vectors
v = F.normalize(gram_schmidt(v, vs), eps=eps)
# Add to the list
vs += [v]
# Update the other singular vector
u = torch.matmul(v, Wt)
# if (W.shape[0]!=v.shape[1]):
# u = torch.matmul(v, Wt )
# else:
# u = torch.matmul(v, W)
# Run Gram-Schmidt to subtract components of all other singular vectors
u = F.normalize(gram_schmidt(u, us), eps=eps)
# Add to the list
us += [u]
if update:
torch.copy(u,u_[i])
# u_[i][:] = u
# Compute this singular value and add it to the list
svs += [torch.squeeze(torch.matmul(torch.matmul(v, Wt), u.t()))]
# if (W.shape[0]!=v.shape[1]):
# svs += [torch.squeeze(torch.matmul(torch.matmul(v, Wt ), u.t() ))]
# else:
# svs += [torch.squeeze(torch.matmul(torch.matmul(v, W), u.t()))]
#svs += [torch.sum(F.linear(u, W.transpose(0, 1)) * v)]
return svs, us, vs
# Convenience passthrough function
class identity(nn.Module):
def forward(self, input):
return input
# Spectral normalization base class
class SN(object):
def __init__(self, num_svs, num_itrs, num_outputs, transpose=False, eps=1e-12):
# Number of power iterations per step
self.num_itrs = num_itrs
# Number of singular values
self.num_svs = num_svs
# Transposed?
self.transpose = transpose
# Epsilon value for avoiding divide-by-0
self.eps = eps
self.register_buffer=dict()
# Register a singular vector for each sv
self.name="%d_%d_%d"%(num_svs, num_itrs, num_outputs)
for i in range(self.num_svs):
self.__setattr__('u%d' % i,torch.nn.Parameter(torch.randn(1, num_outputs)))
self.__setattr__('sv%d' % i, torch.nn.Parameter(torch.ones(1)))
# self.register_buffer['u%d' % i]=
# self.register_buffer['sv%d' % i]= torch.ones(1)
# Singular vectors (u side)
@property
def u(self):
DD=[self.state_dict()['u%d' % i] for i in range(self.num_svs)]
return DD
# return [self.register_buffer['u%d' % i] for i in range(self.num_svs)]
# Singular values;
# note that these buffers are just for logging and are not used in training.
@property
def sv(self):
return [self.state_dict()['sv%d' % i] for i in range(self.num_svs)]
# return [self.register_buffer['sv%d' % i] for i in range(self.num_svs)]
# Compute the spectrally-normalized weight
def W_(self):
self.training=True
if isinstance(self,SNLinear):
W_mat = torch.Tensor(self.weight).t() ##linear layer weight is different from pytorch weight, need to transpose
else:
W_mat = torch.Tensor(self.weight).view(self.weight.shape[0], -1)
if self.transpose:
W_mat = W_mat.t()
# Apply num_itrs power iterations
for _ in range(self.num_itrs):
svs, us, vs = power_iteration(W_mat, self.u, update=self.training, eps=self.eps)
# Update the svs
if self.training:
with torch.no_grad(): # Make sure to do this in a no_grad() context or you'll get memory leaks!
for i, sv in enumerate(svs):
torch.copy(sv,self.sv[i])
# self.sv[i][:] = sv
return self.weight / svs[0]
# 2D Conv layer with spectral norm
class SNConv2d(nn.Conv2d, SN):
def __init__(self, in_channels, out_channels, kernel_size, stride=1,
padding=0, dilation=1, groups=1, bias=True,
num_svs=1, num_itrs=1, eps=1e-12):
nn.Conv2d.__init__(self, in_channels, out_channels, kernel_size, stride,
padding, dilation, groups, bias)
SN.__init__(self, num_svs, num_itrs, out_channels, eps=eps)
self.stride=stride
self.dilation=dilation
self.groups=groups
self.padding=padding
def forward(self, x):
return F.conv2d(x, self.W_(), self.bias, self.stride,
self.padding, self.dilation, self.groups)
# Linear layer with spectral norm
class SNLinear(nn.Linear, SN):
def __init__(self, in_features, out_features, bias=True,
num_svs=1, num_itrs=1, eps=1e-12):
nn.Linear.__init__(self, in_features, out_features, bias)
SN.__init__(self, num_svs, num_itrs, out_features, eps=eps)
def forward(self, x):
return F.linear(x, self.W_(), self.bias)
# Embedding layer with spectral norm
# We use num_embeddings as the dim instead of embedding_dim here
# for convenience sake
class SNEmbedding(nn.Embedding, SN):
def __init__(self, num_embeddings, embedding_dim, padding_idx=None,
max_norm=None, norm_type=2, scale_grad_by_freq=False,
sparse=False, _weight=None,
num_svs=1, num_itrs=1, eps=1e-12):
nn.Embedding.__init__(self, num_embeddings, embedding_dim, padding_idx,
max_norm, norm_type, scale_grad_by_freq,
sparse, _weight)
SN.__init__(self, num_svs, num_itrs, num_embeddings, eps=eps)
def forward(self, x):
return F.embedding(x ,self.W_())
# A non-local block as used in SA-GAN
# Note that the implementation as described in the paper is largely incorrect;
# refer to the released code for the actual implementation.
class Attention(nn.Module):
def __init__(self, ch, which_conv=SNConv2d, name='attention'):
super(Attention, self).__init__()
# Channel multiplier
self.ch = ch
self.which_conv = which_conv
self.theta = self.which_conv(self.ch, self.ch // 8, kernel_size=1, padding=0, bias=False)
self.phi = self.which_conv(self.ch, self.ch // 8, kernel_size=1, padding=0, bias=False)
self.g = self.which_conv(self.ch, self.ch // 2, kernel_size=1, padding=0, bias=False)
self.o = self.which_conv(self.ch // 2, self.ch, kernel_size=1, padding=0, bias=False)
# Learnable gain parameter
self.gamma = P(torch.tensor(0.), requires_grad=True)
def forward(self, x, y=None):
# Apply convs
theta = self.theta(x)
phi = F.max_pool2d(self.phi(x), [2,2])
g = F.max_pool2d(self.g(x), [2,2])
# Perform reshapes
theta = theta.view(-1, self. ch // 8, x.shape[2] * x.shape[3])
phi = phi.view(-1, self. ch // 8, x.shape[2] * x.shape[3] // 4)
g = g.view(-1, self. ch // 2, x.shape[2] * x.shape[3] // 4)
# Matmul and softmax to get attention maps
beta = F.softmax(torch.bmm(theta.transpose(1, 2), phi), -1)
# Attention map times g path
o = self.o(torch.bmm(g, beta.transpose(1,2)).view(-1, self.ch // 2, x.shape[2], x.shape[3]))
return self.gamma * o + x
# Fused batchnorm op
def fused_bn(x, mean, var, gain=None, bias=None, eps=1e-5):
# Apply scale and shift--if gain and bias are provided, fuse them here
# Prepare scale
scale = torch.rsqrt(var + eps)
# If a gain is provided, use it
if gain is not None:
scale = scale * gain
# Prepare shift
shift = mean * scale
# If bias is provided, use it
if bias is not None:
shift = shift - bias
return x * scale - shift
#return ((x - mean) / ((var + eps) ** 0.5)) * gain + bias # The unfused way.
# Manual BN
# Calculate means and variances using mean-of-squares minus mean-squared
def manual_bn(x, gain=None, bias=None, return_mean_var=False, eps=1e-5):
# Cast x to float32 if necessary
float_x = x.float()
# Calculate expected value of x (m) and expected value of x**2 (m2)
# Mean of x
m = torch.mean(float_x, [0, 2, 3], keepdim=True)
# Mean of x squared
m2 = torch.mean(float_x ** 2, [0, 2, 3], keepdim=True)
# Calculate variance as mean of squared minus mean squared.
var = (m2 - m **2)
# Cast back to float 16 if necessary
var = var.type(x.type())
m = m.type(x.type())
# Return mean and variance for updating stored mean/var if requested
if return_mean_var:
return fused_bn(x, m, var, gain, bias, eps), m.squeeze(), var.squeeze()
else:
return fused_bn(x, m, var, gain, bias, eps)
# My batchnorm, supports standing stats
class myBN(nn.Module):
def __init__(self, num_channels, eps=1e-5, momentum=0.1):
super(myBN, self).__init__()
# momentum for updating running stats
self.momentum = momentum
# epsilon to avoid dividing by 0
self.eps = eps
# Momentum
self.momentum = momentum
# Register buffers
self.stored_mean= torch.nn.Parameter( torch.zeros(num_channels))
self.stored_var= torch.nn.Parameter( torch.ones(num_channels))
self.accumulation_counter= torch.nn.Parameter( torch.zeros(1))
# Accumulate running means and vars
self.accumulate_standing = False
# reset standing stats
def reset_stats(self):
self.stored_mean[:] = 0
self.stored_var[:] = 0
self.accumulation_counter[:] = 0
def forward(self, x, gain, bias):
if self.training:
out, mean, var = manual_bn(x, gain, bias, return_mean_var=True, eps=self.eps)
# If accumulating standing stats, increment them
if self.accumulate_standing:
self.stored_mean[:] = self.stored_mean + mean.data
self.stored_var[:] = self.stored_var + var.data
self.accumulation_counter += 1.0
# If not accumulating standing stats, take running averages
else:
self.stored_mean[:] = self.stored_mean * (1 - self.momentum) + mean * self.momentum
self.stored_var[:] = self.stored_var * (1 - self.momentum) + var * self.momentum
return out
# If not in training mode, use the stored statistics
else:
mean = self.stored_mean.view(1, -1, 1, 1)
var = self.stored_var.view(1, -1, 1, 1)
# If using standing stats, divide them by the accumulation counter
if self.accumulate_standing:
mean = mean / self.accumulation_counter
var = var / self.accumulation_counter
return fused_bn(x, mean, var, gain, bias, self.eps)
# Simple function to handle groupnorm norm stylization
def groupnorm(x, norm_style):
# If number of channels specified in norm_style:
if 'ch' in norm_style:
ch = int(norm_style.split('_')[-1])
groups = max(int(x.shape[1]) // ch, 1)
# If number of groups specified in norm style
elif 'grp' in norm_style:
groups = int(norm_style.split('_')[-1])
# If neither, default to groups = 16
else:
groups = 16
return F.group_norm(x, groups)
# Class-conditional bn
# output size is the number of channels, input size is for the linear layers
# Andy's Note: this class feels messy but I'm not really sure how to clean it up
# Suggestions welcome! (By which I mean, refactor this and make a pull request
# if you want to make this more readable/usable).
class ccbn(nn.Module):
def __init__(self, output_size, input_size, which_linear, eps=1e-5, momentum=0.1,
cross_replica=False, mybn=False, norm_style='bn',):
super(ccbn, self).__init__()
self.output_size, self.input_size = output_size, input_size
# Prepare gain and bias layers
self.gain = which_linear(input_size, output_size)
self.bias = which_linear(input_size, output_size)
# epsilon to avoid dividing by 0
self.eps = eps
# Momentum
self.momentum = momentum
# Use cross-replica batchnorm?
self.cross_replica = cross_replica
# Use my batchnorm?
self.mybn = mybn
# Norm style?
self.norm_style = norm_style
if self.cross_replica:
self.bn = SyncBN2d(output_size, eps=self.eps, momentum=self.momentum, affine=False)
elif self.mybn:
self.bn = myBN(output_size, self.eps, self.momentum)
elif self.norm_style in ['bn', 'in']:
self.stored_mean=torch.nn.Parameter(torch.zeros(output_size))
self.stored_var=torch.nn.Parameter(torch.ones(output_size))
def forward(self, x, y):
# Calculate class-conditional gains and biases
gain = torch.Tensor(1 + self.gain(y)).view(y.size(0), -1, 1, 1)
bias = torch.Tensor(self.bias(y)).view(y.size(0), -1, 1, 1)
# If using my batchnorm
if self.mybn or self.cross_replica:
return self.bn(x, gain=gain, bias=bias)
# else:
else:
if self.norm_style == 'bn':
out = F.batch_norm(x, self.stored_mean, self.stored_var, None, None,
self.training, 0.1, self.eps)
elif self.norm_style == 'in':
out = F.instance_norm(x, self.stored_mean, self.stored_var, None, None,
self.training, 0.1, self.eps)
elif self.norm_style == 'gn':
out = groupnorm(x, self.normstyle)
elif self.norm_style == 'nonorm':
out = x
return out * gain + bias
def extra_repr(self):
s = 'out: {output_size}, in: {input_size},'
s +=' cross_replica={cross_replica}'
return s.format(**self.__dict__)
# Normal, non-class-conditional BN
class bn(nn.Module):
def __init__(self, output_size, eps=1e-5, momentum=0.1,
cross_replica=False, mybn=False):
super(bn, self).__init__()
self.output_size= output_size
# Prepare gain and bias layers
self.gain = torch.nn.Parameter(output_size,1.0)
self.bias = torch.nn.Parameter(output_size,0.0)
# epsilon to avoid dividing by 0
self.eps = eps
# Momentum
self.momentum = momentum
# Use cross-replica batchnorm?
self.cross_replica = cross_replica
# Use my batchnorm?
self.mybn = mybn
if self.cross_replica:
self.bn = SyncBN2d(output_size, eps=self.eps, momentum=self.momentum, affine=False)
elif mybn:
self.bn = myBN(output_size, self.eps, self.momentum)
# Register buffers if neither of the above
else:
self.stored_mean = torch.nn.Parameter(torch.zeros(output_size) )
self.stored_var = torch.nn.Parameter(torch.ones(output_size))
def forward(self, x, y=None):
if self.cross_replica or self.mybn:
gain = self.gain.view(1,-1,1,1)
bias = self.bias.view(1,-1,1,1)
return self.bn(x, gain=gain, bias=bias)
else:
return F.batch_norm(x, self.stored_mean, self.stored_var, self.gain,
self.bias, self.training, self.momentum, self.eps)
# Generator blocks
# Note that this class assumes the kernel size and padding (and any other
# settings) have been selected in the main generator module and passed in
# through the which_conv arg. Similar rules apply with which_bn (the input
# size [which is actually the number of channels of the conditional info] must
# be preselected)
class GBlock(nn.Module):
def __init__(self, in_channels, out_channels,
which_conv=nn.Conv2d, which_bn=bn, activation=None,
upsample=None):
super(GBlock, self).__init__()
self.in_channels, self.out_channels = in_channels, out_channels
self.which_conv, self.which_bn = which_conv, which_bn
self.activation = activation
self.upsample = upsample
# Conv layers
self.conv1 = self.which_conv(self.in_channels, self.out_channels)
self.conv2 = self.which_conv(self.out_channels, self.out_channels)
self.learnable_sc = in_channels != out_channels or upsample
if self.learnable_sc:
self.conv_sc = self.which_conv(in_channels, out_channels,
kernel_size=1, padding=0)
# Batchnorm layers
self.bn1 = self.which_bn(in_channels)
self.bn2 = self.which_bn(out_channels)
# upsample layers
self.upsample = upsample
def forward(self, x, y):
h = self.activation(self.bn1(x, y))
if self.upsample:
h = self.upsample(h)
x = self.upsample(x)
h = self.conv1(h)
h = self.activation(self.bn2(h, y))
h = self.conv2(h)
if self.learnable_sc:
x = self.conv_sc(x)
return h + x
# Residual block for the discriminator
class DBlock(nn.Module):
def __init__(self, in_channels, out_channels, which_conv=SNConv2d, wide=True,
preactivation=False, activation=None, downsample=None,):
super(DBlock, self).__init__()
self.in_channels, self.out_channels = in_channels, out_channels
# If using wide D (as in SA-GAN and BigGAN), change the channel pattern
self.hidden_channels = self.out_channels if wide else self.in_channels
self.which_conv = which_conv
self.preactivation = preactivation
self.activation = activation
self.downsample = downsample
# Conv layers
self.conv1 = self.which_conv(self.in_channels, self.hidden_channels)
self.conv2 = self.which_conv(self.hidden_channels, self.out_channels)
self.learnable_sc = True if (in_channels != out_channels) or downsample else False
if self.learnable_sc:
self.conv_sc = self.which_conv(in_channels, out_channels,
kernel_size=1, padding=0)
def shortcut(self, x):
if self.preactivation:
if self.learnable_sc:
x = self.conv_sc(x)
if self.downsample:
x = self.downsample(x)
else:
if self.downsample:
x = self.downsample(x)
if self.learnable_sc:
x = self.conv_sc(x)
return x
def forward(self, x):
if self.preactivation:
# h = self.activation(x) # NOT TODAY SATAN
# Andy's note: This line *must* be an out-of-place ReLU or it
# will negatively affect the shortcut connection.
h = F.relu(x)
else:
h = x
h = self.conv1(h)
h = self.conv2(self.activation(h))
if self.downsample:
h = self.downsample(h)
return h + self.shortcut(x)
# dogball
|
# Copyright 2018 Fujitsu.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from oslo_versionedobjects import base as object_base
from barbican.model import models
from barbican.model import repositories as repos
from barbican.objects import base
from barbican.objects import fields
@object_base.VersionedObjectRegistry.register
class SecretStores(base.BarbicanObject, base.BarbicanPersistentObject,
object_base.VersionedObjectDictCompat):
fields = {
'store_plugin': fields.StringField(),
'crypto_plugin': fields.StringField(nullable=True),
'global_default': fields.BooleanField(default=False),
'name': fields.StringField(),
'status': fields.StringField(nullable=True, default=base.States.ACTIVE)
}
db_model = models.SecretStores
db_repo = repos.get_secret_stores_repository()
@classmethod
def get_all(cls, session=None):
secret_stores_db = cls.db_repo.get_all(session)
secret_stores_obj = [cls()._from_db_object(secret_store_db) for
secret_store_db in secret_stores_db]
return secret_stores_obj
|
import pytest
from graphql.type import (
GraphQLArgument,
GraphQLEnumType,
GraphQLEnumValue,
GraphQLField,
GraphQLInputObjectField,
GraphQLInputObjectType,
GraphQLInterfaceType,
GraphQLObjectType,
GraphQLString,
)
from ..dynamic import Dynamic
from ..enum import Enum
from ..field import Field
from ..inputfield import InputField
from ..inputobjecttype import InputObjectType
from ..interface import Interface
from ..objecttype import ObjectType
from ..scalars import Int, String
from ..structures import List, NonNull
from ..typemap import TypeMap, resolve_type
def test_enum():
class MyEnum(Enum):
"""Description"""
foo = 1
bar = 2
@property
def description(self):
return "Description {}={}".format(self.name, self.value)
@property
def deprecation_reason(self):
if self == MyEnum.foo:
return "Is deprecated"
typemap = TypeMap([MyEnum])
assert "MyEnum" in typemap
graphql_enum = typemap["MyEnum"]
assert isinstance(graphql_enum, GraphQLEnumType)
assert graphql_enum.name == "MyEnum"
assert graphql_enum.description == "Description"
values = graphql_enum.values
assert values == [
GraphQLEnumValue(
name="foo",
value=1,
description="Description foo=1",
deprecation_reason="Is deprecated",
),
GraphQLEnumValue(name="bar", value=2, description="Description bar=2"),
]
def test_objecttype():
class MyObjectType(ObjectType):
"""Description"""
foo = String(
bar=String(description="Argument description", default_value="x"),
description="Field description",
)
bar = String(name="gizmo")
def resolve_foo(self, bar):
return bar
typemap = TypeMap([MyObjectType])
assert "MyObjectType" in typemap
graphql_type = typemap["MyObjectType"]
assert isinstance(graphql_type, GraphQLObjectType)
assert graphql_type.name == "MyObjectType"
assert graphql_type.description == "Description"
fields = graphql_type.fields
assert list(fields.keys()) == ["foo", "gizmo"]
foo_field = fields["foo"]
assert isinstance(foo_field, GraphQLField)
assert foo_field.description == "Field description"
assert foo_field.args == {
"bar": GraphQLArgument(
GraphQLString,
description="Argument description",
default_value="x",
out_name="bar",
)
}
def test_dynamic_objecttype():
class MyObjectType(ObjectType):
"""Description"""
bar = Dynamic(lambda: Field(String))
own = Field(lambda: MyObjectType)
typemap = TypeMap([MyObjectType])
assert "MyObjectType" in typemap
assert list(MyObjectType._meta.fields.keys()) == ["bar", "own"]
graphql_type = typemap["MyObjectType"]
fields = graphql_type.fields
assert list(fields.keys()) == ["bar", "own"]
assert fields["bar"].type == GraphQLString
assert fields["own"].type == graphql_type
def test_interface():
class MyInterface(Interface):
"""Description"""
foo = String(
bar=String(description="Argument description", default_value="x"),
description="Field description",
)
bar = String(name="gizmo", first_arg=String(), other_arg=String(name="oth_arg"))
own = Field(lambda: MyInterface)
def resolve_foo(self, args, info):
return args.get("bar")
typemap = TypeMap([MyInterface])
assert "MyInterface" in typemap
graphql_type = typemap["MyInterface"]
assert isinstance(graphql_type, GraphQLInterfaceType)
assert graphql_type.name == "MyInterface"
assert graphql_type.description == "Description"
fields = graphql_type.fields
assert list(fields.keys()) == ["foo", "gizmo", "own"]
assert fields["own"].type == graphql_type
assert list(fields["gizmo"].args.keys()) == ["firstArg", "oth_arg"]
foo_field = fields["foo"]
assert isinstance(foo_field, GraphQLField)
assert foo_field.description == "Field description"
assert not foo_field.resolver # Resolver not attached in interfaces
assert foo_field.args == {
"bar": GraphQLArgument(
GraphQLString,
description="Argument description",
default_value="x",
out_name="bar",
)
}
def test_inputobject():
class OtherObjectType(InputObjectType):
thingy = NonNull(Int)
class MyInnerObjectType(InputObjectType):
some_field = String()
some_other_field = List(OtherObjectType)
class MyInputObjectType(InputObjectType):
"""Description"""
foo_bar = String(description="Field description")
bar = String(name="gizmo")
baz = NonNull(MyInnerObjectType)
own = InputField(lambda: MyInputObjectType)
def resolve_foo_bar(self, args, info):
return args.get("bar")
typemap = TypeMap([MyInputObjectType])
assert "MyInputObjectType" in typemap
graphql_type = typemap["MyInputObjectType"]
assert isinstance(graphql_type, GraphQLInputObjectType)
assert graphql_type.name == "MyInputObjectType"
assert graphql_type.description == "Description"
other_graphql_type = typemap["OtherObjectType"]
inner_graphql_type = typemap["MyInnerObjectType"]
container = graphql_type.create_container(
{
"bar": "oh!",
"baz": inner_graphql_type.create_container(
{
"some_other_field": [
other_graphql_type.create_container({"thingy": 1}),
other_graphql_type.create_container({"thingy": 2}),
]
}
),
}
)
assert isinstance(container, MyInputObjectType)
assert "bar" in container
assert container.bar == "oh!"
assert "foo_bar" not in container
assert container.foo_bar is None
assert container.baz.some_field is None
assert container.baz.some_other_field[0].thingy == 1
assert container.baz.some_other_field[1].thingy == 2
fields = graphql_type.fields
assert list(fields.keys()) == ["fooBar", "gizmo", "baz", "own"]
own_field = fields["own"]
assert own_field.type == graphql_type
foo_field = fields["fooBar"]
assert isinstance(foo_field, GraphQLInputObjectField)
assert foo_field.description == "Field description"
def test_objecttype_camelcase():
class MyObjectType(ObjectType):
"""Description"""
foo_bar = String(bar_foo=String())
typemap = TypeMap([MyObjectType])
assert "MyObjectType" in typemap
graphql_type = typemap["MyObjectType"]
assert isinstance(graphql_type, GraphQLObjectType)
assert graphql_type.name == "MyObjectType"
assert graphql_type.description == "Description"
fields = graphql_type.fields
assert list(fields.keys()) == ["fooBar"]
foo_field = fields["fooBar"]
assert isinstance(foo_field, GraphQLField)
assert foo_field.args == {
"barFoo": GraphQLArgument(GraphQLString, out_name="bar_foo")
}
def test_objecttype_camelcase_disabled():
class MyObjectType(ObjectType):
"""Description"""
foo_bar = String(bar_foo=String())
typemap = TypeMap([MyObjectType], auto_camelcase=False)
assert "MyObjectType" in typemap
graphql_type = typemap["MyObjectType"]
assert isinstance(graphql_type, GraphQLObjectType)
assert graphql_type.name == "MyObjectType"
assert graphql_type.description == "Description"
fields = graphql_type.fields
assert list(fields.keys()) == ["foo_bar"]
foo_field = fields["foo_bar"]
assert isinstance(foo_field, GraphQLField)
assert foo_field.args == {
"bar_foo": GraphQLArgument(GraphQLString, out_name="bar_foo")
}
def test_objecttype_with_possible_types():
class MyObjectType(ObjectType):
"""Description"""
class Meta:
possible_types = (dict,)
foo_bar = String()
typemap = TypeMap([MyObjectType])
graphql_type = typemap["MyObjectType"]
assert graphql_type.is_type_of
assert graphql_type.is_type_of({}, None) is True
assert graphql_type.is_type_of(MyObjectType(), None) is False
def test_resolve_type_with_missing_type():
class MyObjectType(ObjectType):
foo_bar = String()
class MyOtherObjectType(ObjectType):
fizz_buzz = String()
def resolve_type_func(root, info):
return MyOtherObjectType
typemap = TypeMap([MyObjectType])
with pytest.raises(AssertionError) as excinfo:
resolve_type(resolve_type_func, typemap, "MyOtherObjectType", {}, {})
assert "MyOtherObjectTyp" in str(excinfo.value)
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
#
# michael a.g. aïvázis
# orthologue
# (c) 1998-2018 all rights reserved
#
"""
Sanity check: verify that the selector factory is accessible
"""
def test():
from pyre.ipc import selector
return
# main
if __name__ == "__main__":
test()
# end of file
|
"""
Copyright 2019 EUROCONTROL
==========================================
Redistribution and use in source and binary forms, with or without modification, are permitted provided that the
following conditions are met:
1. Redistributions of source code must retain the above copyright notice, this list of conditions and the following
disclaimer.
2. Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following
disclaimer in the documentation and/or other materials provided with the distribution.
3. Neither the name of the copyright holder nor the names of its contributors may be used to endorse or promote products
derived from this software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES,
INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE
USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
==========================================
Editorial note: this license is an instance of the BSD license template as provided by the Open Source Initiative:
http://opensource.org/licenses/BSD-3-Clause
Details on EUROCONTROL: http://www.eurocontrol.int
"""
from subscription_manager.db import topics as db
from subscription_manager.events.subscription_handlers import delete_subscription_handler
__author__ = "EUROCONTROL (SWIM)"
def create_topic_handler(topic):
db.create_topic(topic)
# def update_topic_handler(current_topic, updated_topic):
# db.update_topic(updated_topic)
#
# for subscription in updated_topic.subscriptions:
# broker.delete_queue_binding(queue=subscription.queue, topic=current_topic.name)
# broker.bind_queue_to_topic(queue=subscription.queue, topic=updated_topic.name, durable=subscription.durable)
def delete_topic_handler(topic):
db.delete_topic(topic)
def delete_topic_subscriptions_handler(topic):
for subscription in topic.subscriptions:
delete_subscription_handler(subscription)
|
#!/usr/bin/env python2
# Copyright (c) 2014-2015 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
from test_framework.test_framework import BitcoinTestFramework
from test_framework.util import *
try:
import urllib.parse as urlparse
except ImportError:
import urlparse
class AbandonConflictTest(BitcoinTestFramework):
def setup_network(self):
self.nodes = []
self.nodes.append(start_node(0, self.options.tmpdir, ["-debug","-logtimemicros","-minrelaytxfee=0.00001"]))
self.nodes.append(start_node(1, self.options.tmpdir, ["-debug","-logtimemicros"]))
connect_nodes(self.nodes[0], 1)
def run_test(self):
self.nodes[1].wallet.generate(100)
sync_blocks(self.nodes)
balance = self.nodes[0].getbalance()
txA = self.nodes[0].sendtoaddress(self.nodes[0].getnewaddress(), Decimal("10"))
txB = self.nodes[0].sendtoaddress(self.nodes[0].getnewaddress(), Decimal("10"))
txC = self.nodes[0].sendtoaddress(self.nodes[0].getnewaddress(), Decimal("10"))
sync_mempools(self.nodes)
self.nodes[1].wallet.generate(1)
sync_blocks(self.nodes)
newbalance = self.nodes[0].getbalance()
assert(balance - newbalance < Decimal("0.001")) #no more than fees lost
balance = newbalance
url = urlparse.urlparse(self.nodes[1].url)
self.nodes[0].disconnectnode(url.hostname+":"+str(p2p_port(1)))
# Identify the 10btc outputs
nA = next(i for i, vout in enumerate(self.nodes[0].getrawtransaction(txA, 1)["vout"]) if vout["value"] == Decimal("10"))
nB = next(i for i, vout in enumerate(self.nodes[0].getrawtransaction(txB, 1)["vout"]) if vout["value"] == Decimal("10"))
nC = next(i for i, vout in enumerate(self.nodes[0].getrawtransaction(txC, 1)["vout"]) if vout["value"] == Decimal("10"))
inputs =[]
# spend 10btc outputs from txA and txB
inputs.append({"txid":txA, "vout":nA})
inputs.append({"txid":txB, "vout":nB})
outputs = {}
outputs[self.nodes[0].getnewaddress()] = Decimal("14.99998")
outputs[self.nodes[1].getnewaddress()] = Decimal("5")
signed = self.nodes[0].signrawtransaction(self.nodes[0].createrawtransaction(inputs, outputs))
txAB1 = self.nodes[0].sendrawtransaction(signed["hex"])
# Identify the 14.99998btc output
nAB = next(i for i, vout in enumerate(self.nodes[0].getrawtransaction(txAB1, 1)["vout"]) if vout["value"] == Decimal("14.99998"))
#Create a child tx spending AB1 and C
inputs = []
inputs.append({"txid":txAB1, "vout":nAB})
inputs.append({"txid":txC, "vout":nC})
outputs = {}
outputs[self.nodes[0].getnewaddress()] = Decimal("24.9996")
signed2 = self.nodes[0].signrawtransaction(self.nodes[0].createrawtransaction(inputs, outputs))
txABC2 = self.nodes[0].sendrawtransaction(signed2["hex"])
# In mempool txs from self should increase balance from change
newbalance = self.nodes[0].getbalance()
assert(newbalance == balance - Decimal("30") + Decimal("24.9996"))
balance = newbalance
# Restart the node with a higher min relay fee so the parent tx is no longer in mempool
# TODO: redo with eviction
# Note had to make sure tx did not have AllowFree priority
stop_node(self.nodes[0],0)
self.nodes[0]=start_node(0, self.options.tmpdir, ["-debug","-logtimemicros","-minrelaytxfee=0.0001"])
# Verify txs no longer in mempool
assert(len(self.nodes[0].getrawmempool()) == 0)
# Not in mempool txs from self should only reduce balance
# inputs are still spent, but change not received
newbalance = self.nodes[0].getbalance()
assert(newbalance == balance - Decimal("24.9996"))
# Unconfirmed received funds that are not in mempool, also shouldn't show
# up in unconfirmed balance
unconfbalance = self.nodes[0].getunconfirmedbalance() + self.nodes[0].getbalance()
assert(unconfbalance == newbalance)
# Also shouldn't show up in listunspent
assert(not txABC2 in [utxo["txid"] for utxo in self.nodes[0].listunspent(0)])
balance = newbalance
# Abandon original transaction and verify inputs are available again
# including that the child tx was also abandoned
self.nodes[0].abandontransaction(txAB1)
newbalance = self.nodes[0].getbalance()
assert(newbalance == balance + Decimal("30"))
balance = newbalance
# Verify that even with a low min relay fee, the tx is not reaccepted from wallet on startup once abandoned
stop_node(self.nodes[0],0)
self.nodes[0]=start_node(0, self.options.tmpdir, ["-debug","-logtimemicros","-minrelaytxfee=0.00001"])
assert(len(self.nodes[0].getrawmempool()) == 0)
assert(self.nodes[0].getbalance() == balance)
# But if its received again then it is unabandoned
# And since now in mempool, the change is available
# But its child tx remains abandoned
self.nodes[0].sendrawtransaction(signed["hex"])
newbalance = self.nodes[0].getbalance()
assert(newbalance == balance - Decimal("20") + Decimal("14.99998"))
balance = newbalance
# Send child tx again so its unabandoned
self.nodes[0].sendrawtransaction(signed2["hex"])
newbalance = self.nodes[0].getbalance()
assert(newbalance == balance - Decimal("10") - Decimal("14.99998") + Decimal("24.9996"))
balance = newbalance
# Remove using high relay fee again
stop_node(self.nodes[0],0)
self.nodes[0]=start_node(0, self.options.tmpdir, ["-debug","-logtimemicros","-minrelaytxfee=0.0001"])
assert(len(self.nodes[0].getrawmempool()) == 0)
newbalance = self.nodes[0].getbalance()
assert(newbalance == balance - Decimal("24.9996"))
balance = newbalance
# Create a double spend of AB1 by spending again from only A's 10 output
# Mine double spend from node 1
inputs =[]
inputs.append({"txid":txA, "vout":nA})
outputs = {}
outputs[self.nodes[1].getnewaddress()] = Decimal("9.9999")
tx = self.nodes[0].createrawtransaction(inputs, outputs)
signed = self.nodes[0].signrawtransaction(tx)
self.nodes[1].sendrawtransaction(signed["hex"])
self.nodes[1].wallet.generate(1)
connect_nodes(self.nodes[0], 1)
sync_blocks(self.nodes)
# Verify that B and C's 10 BTC outputs are available for spending again because AB1 is now conflicted
newbalance = self.nodes[0].getbalance()
assert(newbalance == balance + Decimal("20"))
balance = newbalance
# There is currently a minor bug around this and so this test doesn't work. See Issue #7315
# Invalidate the block with the double spend and B's 10 BTC output should no longer be available
# Don't think C's should either
self.nodes[0].invalidateblock(self.nodes[0].getbestblockhash())
newbalance = self.nodes[0].getbalance()
#assert(newbalance == balance - Decimal("10"))
print "If balance has not declined after invalidateblock then out of mempool wallet tx which is no longer"
print "conflicted has not resumed causing its inputs to be seen as spent. See Issue #7315"
print balance , " -> " , newbalance , " ?"
if __name__ == '__main__':
AbandonConflictTest().main()
|
###############################################################################
# -*- coding: utf-8 -*-
# Order: A tool to characterize the local structure of liquid water
# by geometric order parameters
#
# Authors: Pu Du
#
# Released under the MIT License
###############################################################################
from __future__ import print_function, division
import os
import six
from six.moves import range
import numpy as np
from progress.bar import ChargingBar
from .util import pbc
from . import oto
class StructureFactor():
"""Structure Factor"""
pass
|
#!/usr/bin/env python
import os
import re
from setuptools import find_packages
from setuptools import setup
NAME = 'idisplay'
VERSION = '0.1.2'
AUTHOR = 'Lev Givon'
AUTHOR_EMAIL = 'lev@columbia.edu'
URL = 'https://github.com/lebedov/idisplay/'
DESCRIPTION = 'IPython rich display magic functions'
with open('README.rst', 'r') as f:
LONG_DESCRIPTION = f.read()
LONG_DESCRIPTION = re.search('.*(^Package Description.*)', LONG_DESCRIPTION, re.MULTILINE|re.DOTALL).group(1)
DOWNLOAD_URL = URL
LICENSE = 'BSD'
CLASSIFIERS = [
'Development Status :: 3 - Alpha',
'Environment :: Console',
'Intended Audience :: Developers',
'License :: OSI Approved :: BSD License',
'Operating System :: OS Independent',
'Programming Language :: Python',
'Topic :: Software Development']
PACKAGES = find_packages()
if __name__ == "__main__":
if os.path.exists('MANIFEST'):
os.remove('MANIFEST')
setup(
name = NAME,
version = VERSION,
author = AUTHOR,
author_email = AUTHOR_EMAIL,
license = LICENSE,
classifiers = CLASSIFIERS,
description = DESCRIPTION,
long_description = LONG_DESCRIPTION,
url = URL,
packages = find_packages(),
install_requires = ['ipython>=1.0'])
|
import unittest
from class_methods import *
from delta import *
from dynamic import *
from indexes import *
from inheritance import *
from instance import *
from json_serialisation import *
from validation import *
if __name__ == '__main__':
unittest.main()
|
from threading import Timer
from ..core import MachineError, listify
import logging
logger = logging.getLogger(__name__)
logger.addHandler(logging.NullHandler())
class Tags(object):
def __init__(self, *args, **kwargs):
self.tags = kwargs.pop('tags', [])
super(Tags, self).__init__(*args, **kwargs)
def __getattr__(self, item):
if item.startswith('is_'):
return item[3:] in self.tags
else:
return super(Tags, self).__getattribute__(item)
class Error(Tags):
def __init__(self, *args, **kwargs):
tags = kwargs.get('tags', [])
accepted = kwargs.pop('accepted', False)
if accepted:
tags.append('accepted')
kwargs['tags'] = tags
super(Error, self).__init__(*args, **kwargs)
def enter(self, event_data):
if len(event_data.machine.get_triggers(self.name)) == 0 and not self.is_accepted:
raise MachineError("Error state '{0}' reached!".format(self.name))
class Timeout(object):
def __init__(self, *args, **kwargs):
self.timeout = kwargs.pop('timeout', 0)
self._on_timeout = None
if self.timeout > 0:
try:
self.on_timeout = kwargs.pop('on_timeout')
except KeyError:
raise AttributeError("Timeout state requires 'on_timeout' when timeout is set.")
self.runner = {}
super(Timeout, self).__init__(*args, **kwargs)
def enter(self, event_data):
if self.timeout > 0:
t = Timer(self.timeout, self._process_timeout, args=(event_data,))
t.start()
self.runner[id(event_data.model)] = t
super(Timeout, self).enter(event_data)
def exit(self, event_data):
t = self.runner.get(id(event_data.model), None)
if t is not None and t.is_alive:
t.cancel()
super(Timeout, self).exit(event_data)
def _process_timeout(self, event_data):
logger.debug("%sTimeout state %s. Processing callbacks...", event_data.machine.id, self.name)
for oe in self.on_timeout:
event_data.machine._callback(oe, event_data)
logger.info("%sTimeout state %s processed.", event_data.machine.id, self.name)
@property
def on_timeout(self):
return self._on_timeout
@on_timeout.setter
def on_timeout(self, value):
self._on_timeout = listify(value)
class Volatile(object):
def __init__(self, *args, **kwargs):
self.volatile_cls = kwargs.pop('volatile', VolatileObject)
self.volatile_hook = kwargs.pop('hook', 'scope')
super(Volatile, self).__init__(*args, **kwargs)
self.initialized = True
def enter(self, event_data):
setattr(event_data.model, self.volatile_hook, self.volatile_cls())
super(Volatile, self).enter(event_data)
def exit(self, event_data):
super(Volatile, self).exit(event_data)
try:
delattr(event_data.model, self.volatile_hook)
except AttributeError:
pass
def add_state_features(*args):
def class_decorator(cls):
class CustomState(type('CustomMixins', args, {}), cls.state_cls):
pass
cls.state_cls = CustomState
return cls
return class_decorator
class VolatileObject(object):
pass
|
# -*- coding: utf-8 -*-
"""
Application exception views.
Views rendered by the web application in response to exceptions thrown within
views.
"""
from pyramid.view import forbidden_view_config
from pyramid.view import notfound_view_config
from pyramid.view import view_config
from h.i18n import TranslationString as _ # noqa: N813
from h.util.view import handle_exception, json_view
@forbidden_view_config(renderer="h:templates/notfound.html.jinja2")
@notfound_view_config(renderer="h:templates/notfound.html.jinja2", append_slash=True)
def notfound(request):
"""Handle a request for an unknown/forbidden resource."""
request.response.status_int = 404
return {}
@view_config(
context=Exception, accept="text/html", renderer="h:templates/5xx.html.jinja2"
)
def error(context, request):
"""Handle a request for which the handler threw an exception."""
handle_exception(request, exception=context)
return {}
@json_view(context=Exception)
def json_error(context, request):
"""Handle an unexpected exception where the request asked for JSON."""
handle_exception(request, exception=context)
message = _(
"Hypothesis had a problem while handling this request. "
"Our team has been notified. Please contact support@hypothes.is"
" if the problem persists."
)
return {"status": "failure", "reason": message}
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
#
# python-adbus documentation build configuration file, created by
# sphinx-quickstart on Mon Jul 24 13:33:19 2017.
#
# This file is execfile()d with the current directory set to its
# containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#
import os
import sys
sys.path.insert(0, os.path.abspath('../../'))
# -- General configuration ------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#
# needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = ['sphinx.ext.autodoc', 'sphinx.ext.viewcode',
'sphinx.ext.napoleon', 'sphinx.ext.githubpages']
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix(es) of source filenames.
# You can specify multiple suffix as a list of string:
#
# source_suffix = ['.rst', '.md']
source_suffix = '.rst'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = 'python-adbus'
copyright = '2017, CCX Technolgies'
author = 'Charles Eidsness'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
from adbus import __version__
version = '.'.join(__version__.split()[:-1])
# The full version, including alpha/beta/rc tags.
release = __version__
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#
# This is also used if you do content translation via gettext catalogs.
# Usually you set "language" from the command line for these cases.
language = None
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
# This patterns also effect to html_static_path and html_extra_path
exclude_patterns = []
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# If true, `todo` and `todoList` produce output, else they produce nothing.
todo_include_todos = False
# -- Options for HTML output ----------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
#
html_theme = 'sphinx_rtd_theme'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#
# html_theme_options = {}
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
#html_static_path = ['_static']
# Custom sidebar templates, must be a dictionary that maps document names
# to template names.
#
# This is required for the alabaster theme
# refs: http://alabaster.readthedocs.io/en/latest/installation.html#sidebars
html_sidebars = {
'**': [
'about.html',
'navigation.html',
'relations.html', # needs 'show_related': True theme option to display
'searchbox.html',
'donate.html',
]
}
# -- Options for HTMLHelp output ------------------------------------------
# Output file base name for HTML help builder.
htmlhelp_basename = 'python-adbusdoc'
# -- Options for LaTeX output ---------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#
# 'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#
# 'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#
# 'preamble': '',
# Latex figure (float) alignment
#
# 'figure_align': 'htbp',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
(master_doc, 'python-adbus.tex', 'python-adbus Documentation',
'Charles Eidsness', 'manual'),
]
# -- Options for manual page output ---------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
(master_doc, 'python-adbus', 'python-adbus Documentation',
[author], 1)
]
# -- Options for Texinfo output -------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
(master_doc, 'python-adbus', 'python-adbus Documentation',
author, 'python-adbus', 'One line description of project.',
'Miscellaneous'),
]
|
"""practice URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/4.0/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: path('', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.urls import include, path
2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))
"""
from django.conf import settings
# from django.contrib import admin
from django.urls import include, path
urlpatterns = [
# path("admin/", admin.site.urls),
path("", include("dictionary.urls")),
]
if "debug_toolbar" in settings.INSTALLED_APPS:
urlpatterns.insert(0, path("__debug__/", include("debug_toolbar.urls")))
|
import os
import sys
import logging
from setuptools import setup, find_packages
logging.basicConfig(stream=sys.stderr, level=logging.INFO)
log = logging.getLogger()
# package description and keywords
description = ('Python tools for obtaining and working with elevation data '
'from Spire GNSS grazing angle altimetry')
keywords = 'Spire GNSS, altimetry, grazing angle, surface elevation and change'
# get long_description from README.rst
with open("README.rst", "r") as fh:
long_description = fh.read()
long_description_content_type = "text/x-rst"
# install requirements and dependencies
on_rtd = os.environ.get('READTHEDOCS') == 'True'
if on_rtd:
install_requires = []
dependency_links = []
else:
# get install requirements
with open('requirements.txt') as fh:
install_requires = [line.split().pop(0) for line in fh.read().splitlines()]
dependency_links = []
# get version
with open('version.txt') as fh:
fallback_version = fh.read()
# list of all scripts to be included with package
scripts=[os.path.join('scripts',f) for f in os.listdir('scripts') if f.endswith('.py')]
# semantic version configuration for setuptools-scm
setup_requires = ["setuptools_scm"]
use_scm_version = {
"relative_to": __file__,
"local_scheme": "node-and-date",
"version_scheme": "python-simplified-semver",
"fallback_version":fallback_version,
}
setup(
name='spire-toolkit',
description=description,
long_description=long_description,
long_description_content_type=long_description_content_type,
url='https://github.com/tsutterley/Spire-GNSS',
author='Tyler Sutterley',
author_email='tsutterl@uw.edu',
license='MIT',
classifiers=[
'Development Status :: 3 - Alpha',
'Intended Audience :: Science/Research',
'Topic :: Scientific/Engineering :: Physics',
'License :: OSI Approved :: MIT License',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.6',
'Programming Language :: Python :: 3.7',
'Programming Language :: Python :: 3.8',
],
keywords=keywords,
packages=find_packages(),
install_requires=install_requires,
setup_requires=setup_requires,
dependency_links=dependency_links,
use_scm_version=use_scm_version,
scripts=scripts,
include_package_data=True,
)
|
def rearrange (arr , n ):
j = 0
for i in range(0 , n) :
if(arr[i] < 0):
temp = arr[i]
arr[i] = arr[j]
arr[j] = temp
j = j + 1
print(arr)
#Driver code
sequence = [1 , 3, - 6 , 9 , -3 , -1]
length = len(sequence)
print(sequence.sort)
rearrange(sequence , length)
|
def Jsi(gts, ps, label):
g_set = set()
p_set = set()
for seg in gts:
seg_points, g_l = seg
s, e = seg_points
if g_l == label:
g_set.update(range(s, e + 1, 1))
for seg in ps:
seg_points, p_l = seg
s, e = seg_points
if p_l == label:
p_set.update(range(s, e + 1, 1))
inter_set = g_set & p_set
union_set = g_set | p_set
inter_v = len(inter_set)
union_v = len(union_set)
if union_v == 0:
jsi = 0
else:
jsi = float(inter_v) / float(union_v)
# if jsi > 0.6:
# return 1.
# elif jsi < 0.2:
# return 0.
# else:
# return jsi
return jsi
|
# coding: utf-8
"""
Isilon SDK
Isilon SDK - Language bindings for the OneFS API # noqa: E501
OpenAPI spec version: 8
Contact: sdk@isilon.com
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
import pprint
import re # noqa: F401
import six
from isi_sdk_8_2_1.models.mapping_users_rules_rule_user2 import MappingUsersRulesRuleUser2 # noqa: F401,E501
class MappingUsersRulesRuleOptionsDefaultUser(object):
"""NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
"""
Attributes:
swagger_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
swagger_types = {
'domain': 'str',
'user': 'str'
}
attribute_map = {
'domain': 'domain',
'user': 'user'
}
def __init__(self, domain=None, user=None): # noqa: E501
"""MappingUsersRulesRuleOptionsDefaultUser - a model defined in Swagger""" # noqa: E501
self._domain = None
self._user = None
self.discriminator = None
if domain is not None:
self.domain = domain
self.user = user
@property
def domain(self):
"""Gets the domain of this MappingUsersRulesRuleOptionsDefaultUser. # noqa: E501
:return: The domain of this MappingUsersRulesRuleOptionsDefaultUser. # noqa: E501
:rtype: str
"""
return self._domain
@domain.setter
def domain(self, domain):
"""Sets the domain of this MappingUsersRulesRuleOptionsDefaultUser.
:param domain: The domain of this MappingUsersRulesRuleOptionsDefaultUser. # noqa: E501
:type: str
"""
if domain is not None and len(domain) > 255:
raise ValueError("Invalid value for `domain`, length must be less than or equal to `255`") # noqa: E501
if domain is not None and len(domain) < 0:
raise ValueError("Invalid value for `domain`, length must be greater than or equal to `0`") # noqa: E501
self._domain = domain
@property
def user(self):
"""Gets the user of this MappingUsersRulesRuleOptionsDefaultUser. # noqa: E501
:return: The user of this MappingUsersRulesRuleOptionsDefaultUser. # noqa: E501
:rtype: str
"""
return self._user
@user.setter
def user(self, user):
"""Sets the user of this MappingUsersRulesRuleOptionsDefaultUser.
:param user: The user of this MappingUsersRulesRuleOptionsDefaultUser. # noqa: E501
:type: str
"""
if user is None:
raise ValueError("Invalid value for `user`, must not be `None`") # noqa: E501
if user is not None and len(user) > 255:
raise ValueError("Invalid value for `user`, length must be less than or equal to `255`") # noqa: E501
if user is not None and len(user) < 0:
raise ValueError("Invalid value for `user`, length must be greater than or equal to `0`") # noqa: E501
self._user = user
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, MappingUsersRulesRuleOptionsDefaultUser):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
|
from pathlib import Path
from typer import Option, Argument
from basis.cli.services.deploy import deploy_graph_version
from basis.cli.services.graph_components import create_graph_component
from basis.cli.services.lookup import IdLookup
from basis.cli.services.output import sprint, abort_on_error
from basis.cli.services.upload import upload_graph_version
_graph_help = "The location of the graph.yml file for the graph to upload"
_deploy_help = "Whether or not to automatically deploy the graph after upload"
_organization_help = "The name of the Basis organization to upload to"
_environment_help = "The name of the Basis environment to use if deploying the graph"
_component_help = "After uploading, publish the graph version as a public component"
def upload(
deploy: bool = Option(True, "--deploy/--no-deploy", help=_deploy_help),
organization: str = Option("", "-o", "--organization", help=_organization_help),
environment: str = Option("", "-e", "--environment", help=_environment_help),
graph: Path = Argument(None, exists=True, help=_graph_help),
publish_component: bool = Option(False, help=_component_help),
):
"""Upload a new version of a graph to Basis"""
ids = IdLookup(
environment_name=environment,
organization_name=organization,
explicit_graph_path=graph,
)
with abort_on_error("Upload failed"):
resp = upload_graph_version(
ids.graph_file_path,
ids.organization_id,
add_missing_node_ids=not publish_component,
)
graph_version_id = resp["uid"]
ui_url = resp["ui_url"]
manifest = resp["manifest"]
sprint(f"\n[success]Uploaded new graph version with id [b]{graph_version_id}")
if manifest.get("errors"):
sprint(f"[error]Graph contains the following errors:")
for error in manifest["errors"]:
sprint(f"\t[error]{error}")
if publish_component:
with abort_on_error("Error creating component"):
resp = create_graph_component(graph_version_id)
resp_org = resp["organization"]["slug"]
resp_versions = resp["version_names"]
resp_component = resp["component"]["slug"]
resp_id = resp["uid"]
sprint(
f"[success]Published graph component "
f"[b]{resp_org}/{resp_component}[/b] "
f"with versions [b]{resp_versions}[/b] "
f"at id [b]{resp_id}"
)
elif deploy:
with abort_on_error("Deploy failed"):
deploy_graph_version(graph_version_id, ids.environment_id)
sprint(f"[success]Graph deployed")
sprint(f"\n[info]Visit [code]{ui_url}[/code] to view your graph")
|
# Copyright (C) 2012 Hewlett-Packard Development Company, L.P.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Tests for Backup code."""
import copy
import ddt
import os
import uuid
import mock
from os_brick.initiator.connectors import fake as fake_connectors
from oslo_config import cfg
from oslo_db import exception as db_exc
from oslo_utils import importutils
from oslo_utils import timeutils
import cinder
from cinder.backup import api
from cinder.backup import manager
from cinder import context
from cinder import db
from cinder import exception
from cinder import objects
from cinder.objects import fields
from cinder import test
from cinder.tests import fake_driver
from cinder.tests.unit import fake_constants as fake
from cinder.tests.unit import utils
from cinder.volume import rpcapi as volume_rpcapi
CONF = cfg.CONF
class FakeBackupException(Exception):
pass
class BaseBackupTest(test.TestCase):
def setUp(self):
super(BaseBackupTest, self).setUp()
self.backup_mgr = importutils.import_object(CONF.backup_manager)
self.backup_mgr.host = 'testhost'
self.backup_mgr.is_initialized = True
self.ctxt = context.get_admin_context()
paths = ['cinder.volume.rpcapi.VolumeAPI.delete_snapshot',
'cinder.volume.rpcapi.VolumeAPI.delete_volume',
'cinder.volume.rpcapi.VolumeAPI.detach_volume',
'cinder.volume.rpcapi.VolumeAPI.'
'secure_file_operations_enabled']
self.volume_patches = {}
self.volume_mocks = {}
for path in paths:
name = path.split('.')[-1]
self.volume_patches[name] = mock.patch(path)
self.volume_mocks[name] = self.volume_patches[name].start()
self.addCleanup(self.volume_patches[name].stop)
def _create_backup_db_entry(self, volume_id=str(uuid.uuid4()),
restore_volume_id=None,
display_name='test_backup',
display_description='this is a test backup',
container='volumebackups',
status=fields.BackupStatus.CREATING,
size=1,
object_count=0,
project_id=str(uuid.uuid4()),
service=None,
temp_volume_id=None,
temp_snapshot_id=None,
snapshot_id=None,
metadata=None,
parent_id=None,
encryption_key_id=None):
"""Create a backup entry in the DB.
Return the entry ID
"""
kwargs = {}
kwargs['volume_id'] = volume_id
kwargs['restore_volume_id'] = restore_volume_id
kwargs['user_id'] = str(uuid.uuid4())
kwargs['project_id'] = project_id
kwargs['host'] = 'testhost'
kwargs['availability_zone'] = '1'
kwargs['display_name'] = display_name
kwargs['display_description'] = display_description
kwargs['container'] = container
kwargs['status'] = status
kwargs['fail_reason'] = ''
kwargs['service'] = service or CONF.backup_driver
kwargs['snapshot_id'] = snapshot_id
kwargs['parent_id'] = parent_id
kwargs['size'] = size
kwargs['object_count'] = object_count
kwargs['temp_volume_id'] = temp_volume_id
kwargs['temp_snapshot_id'] = temp_snapshot_id
kwargs['metadata'] = metadata or {}
kwargs['encryption_key_id'] = encryption_key_id
backup = objects.Backup(context=self.ctxt, **kwargs)
backup.create()
return backup
def _create_volume_db_entry(self, display_name='test_volume',
display_description='this is a test volume',
status='backing-up',
previous_status='available',
size=1,
host='testhost',
encryption_key_id=None):
"""Create a volume entry in the DB.
Return the entry ID
"""
vol = {}
vol['size'] = size
vol['host'] = host
vol['user_id'] = str(uuid.uuid4())
vol['project_id'] = str(uuid.uuid4())
vol['status'] = status
vol['display_name'] = display_name
vol['display_description'] = display_description
vol['attach_status'] = fields.VolumeAttachStatus.DETACHED
vol['availability_zone'] = '1'
vol['previous_status'] = previous_status
vol['encryption_key_id'] = encryption_key_id
volume = objects.Volume(context=self.ctxt, **vol)
volume.create()
return volume.id
def _create_snapshot_db_entry(self, display_name='test_snapshot',
display_description='test snapshot',
status=fields.SnapshotStatus.AVAILABLE,
size=1,
volume_id=str(uuid.uuid4()),
provider_location=None):
"""Create a snapshot entry in the DB.
Return the entry ID.
"""
kwargs = {}
kwargs['size'] = size
kwargs['user_id'] = str(uuid.uuid4())
kwargs['project_id'] = str(uuid.uuid4())
kwargs['status'] = status
kwargs['display_name'] = display_name
kwargs['display_description'] = display_description
kwargs['volume_id'] = volume_id
kwargs['cgsnapshot_id'] = None
kwargs['volume_size'] = size
kwargs['metadata'] = {}
kwargs['provider_location'] = provider_location
snapshot_obj = objects.Snapshot(context=self.ctxt, **kwargs)
snapshot_obj.create()
return snapshot_obj
def _create_volume_attach(self, volume_id):
values = {'volume_id': volume_id,
'attach_status': fields.VolumeAttachStatus.ATTACHED, }
attachment = db.volume_attach(self.ctxt, values)
db.volume_attached(self.ctxt, attachment['id'], None, 'testhost',
'/dev/vd0')
def _create_exported_record_entry(self, vol_size=1, exported_id=None):
"""Create backup metadata export entry."""
vol_id = self._create_volume_db_entry(status='available',
size=vol_size)
backup = self._create_backup_db_entry(
status=fields.BackupStatus.AVAILABLE, volume_id=vol_id)
if exported_id is not None:
backup.id = exported_id
export = self.backup_mgr.export_record(self.ctxt, backup)
return export
def _create_export_record_db_entry(self,
volume_id=str(uuid.uuid4()),
status=fields.BackupStatus.CREATING,
project_id=str(uuid.uuid4()),
backup_id=None):
"""Create a backup entry in the DB.
Return the entry ID
"""
kwargs = {}
kwargs['volume_id'] = volume_id
kwargs['user_id'] = str(uuid.uuid4())
kwargs['project_id'] = project_id
kwargs['status'] = status
if backup_id:
kwargs['id'] = backup_id
backup = objects.BackupImport(context=self.ctxt, **kwargs)
backup.create()
return backup
@ddt.ddt
class BackupTestCase(BaseBackupTest):
"""Test Case for backups."""
@mock.patch.object(cinder.tests.fake_driver.FakeLoggingVolumeDriver,
'set_initialized')
@mock.patch.object(cinder.tests.fake_driver.FakeLoggingVolumeDriver,
'do_setup')
@mock.patch.object(cinder.tests.fake_driver.FakeLoggingVolumeDriver,
'check_for_setup_error')
@mock.patch('cinder.context.get_admin_context')
def test_init_host(self, mock_get_admin_context, mock_check, mock_setup,
mock_set_initialized):
"""Test stuck volumes and backups.
Make sure stuck volumes and backups are reset to correct
states when backup_manager.init_host() is called
"""
def get_admin_context():
return self.ctxt
self.override_config('backup_service_inithost_offload', False)
self.override_config('periodic_interval', 0)
vol1_id = self._create_volume_db_entry()
self._create_volume_attach(vol1_id)
db.volume_update(self.ctxt, vol1_id, {'status': 'backing-up'})
vol2_id = self._create_volume_db_entry()
self._create_volume_attach(vol2_id)
db.volume_update(self.ctxt, vol2_id, {'status': 'restoring-backup'})
vol3_id = self._create_volume_db_entry()
db.volume_update(self.ctxt, vol3_id, {'status': 'available'})
vol4_id = self._create_volume_db_entry()
db.volume_update(self.ctxt, vol4_id, {'status': 'backing-up'})
temp_vol_id = self._create_volume_db_entry()
db.volume_update(self.ctxt, temp_vol_id, {'status': 'available'})
vol5_id = self._create_volume_db_entry()
db.volume_update(self.ctxt, vol5_id, {'status': 'backing-up'})
temp_snap = self._create_snapshot_db_entry()
temp_snap.status = fields.SnapshotStatus.AVAILABLE
temp_snap.save()
backup1 = self._create_backup_db_entry(
status=fields.BackupStatus.CREATING, volume_id=vol1_id)
backup2 = self._create_backup_db_entry(
status=fields.BackupStatus.RESTORING,
restore_volume_id=vol2_id)
backup3 = self._create_backup_db_entry(
status=fields.BackupStatus.DELETING, volume_id=vol3_id)
self._create_backup_db_entry(status=fields.BackupStatus.CREATING,
volume_id=vol4_id,
temp_volume_id=temp_vol_id)
self._create_backup_db_entry(status=fields.BackupStatus.CREATING,
volume_id=vol5_id,
temp_snapshot_id=temp_snap.id)
mock_get_admin_context.side_effect = get_admin_context
self.volume = importutils.import_object(CONF.volume_manager)
self.backup_mgr.init_host()
vol1 = db.volume_get(self.ctxt, vol1_id)
self.assertEqual('available', vol1['status'])
vol2 = db.volume_get(self.ctxt, vol2_id)
self.assertEqual('error_restoring', vol2['status'])
vol3 = db.volume_get(self.ctxt, vol3_id)
self.assertEqual('available', vol3['status'])
vol4 = db.volume_get(self.ctxt, vol4_id)
self.assertEqual('available', vol4['status'])
vol5 = db.volume_get(self.ctxt, vol5_id)
self.assertEqual('available', vol5['status'])
backup1 = db.backup_get(self.ctxt, backup1.id)
self.assertEqual(fields.BackupStatus.ERROR, backup1['status'])
backup2 = db.backup_get(self.ctxt, backup2.id)
self.assertEqual(fields.BackupStatus.AVAILABLE, backup2['status'])
self.assertRaises(exception.BackupNotFound,
db.backup_get,
self.ctxt,
backup3.id)
temp_vol = objects.Volume.get_by_id(self.ctxt, temp_vol_id)
self.volume_mocks['delete_volume'].assert_called_once_with(
self.ctxt, temp_vol)
self.assertTrue(self.volume_mocks['detach_volume'].called)
@mock.patch('cinder.objects.backup.BackupList.get_all_by_host')
@mock.patch('cinder.manager.ThreadPoolManager._add_to_threadpool')
def test_init_host_with_service_inithost_offload(self,
mock_add_threadpool,
mock_get_all_by_host):
vol1_id = self._create_volume_db_entry()
db.volume_update(self.ctxt, vol1_id, {'status': 'available'})
backup1 = self._create_backup_db_entry(
status=fields.BackupStatus.DELETING, volume_id=vol1_id)
vol2_id = self._create_volume_db_entry()
db.volume_update(self.ctxt, vol2_id, {'status': 'available'})
backup2 = self._create_backup_db_entry(
status=fields.BackupStatus.DELETING, volume_id=vol2_id)
mock_get_all_by_host.return_value = [backup1, backup2]
self.backup_mgr.init_host()
calls = [mock.call(self.backup_mgr.delete_backup, mock.ANY, backup1),
mock.call(self.backup_mgr.delete_backup, mock.ANY, backup2)]
mock_add_threadpool.assert_has_calls(calls, any_order=True)
self.assertEqual(2, mock_add_threadpool.call_count)
@mock.patch('cinder.objects.service.Service.get_minimum_rpc_version')
@mock.patch('cinder.objects.service.Service.get_minimum_obj_version')
@mock.patch('cinder.rpc.LAST_RPC_VERSIONS', {'cinder-backup': '1.3',
'cinder-volume': '1.7'})
@mock.patch('cinder.rpc.LAST_OBJ_VERSIONS', {'cinder-backup': '1.2',
'cinder-volume': '1.4'})
def test_reset(self, get_min_obj, get_min_rpc):
get_min_obj.return_value = 'liberty'
backup_mgr = manager.BackupManager()
backup_rpcapi = backup_mgr.backup_rpcapi
volume_rpcapi = backup_mgr.volume_rpcapi
self.assertEqual('1.3', backup_rpcapi.client.version_cap)
self.assertEqual('1.2',
backup_rpcapi.client.serializer._base.version_cap)
self.assertEqual('1.7', volume_rpcapi.client.version_cap)
self.assertEqual('1.4',
volume_rpcapi.client.serializer._base.version_cap)
get_min_obj.return_value = objects.base.OBJ_VERSIONS.get_current()
backup_mgr.reset()
backup_rpcapi = backup_mgr.backup_rpcapi
volume_rpcapi = backup_mgr.volume_rpcapi
self.assertEqual(get_min_rpc.return_value,
backup_rpcapi.client.version_cap)
self.assertEqual(get_min_obj.return_value,
backup_rpcapi.client.serializer._base.version_cap)
self.assertIsNone(backup_rpcapi.client.serializer._base.manifest)
self.assertEqual(get_min_rpc.return_value,
volume_rpcapi.client.version_cap)
self.assertEqual(get_min_obj.return_value,
volume_rpcapi.client.serializer._base.version_cap)
self.assertIsNone(volume_rpcapi.client.serializer._base.manifest)
@ddt.data(True, False)
def test_is_working(self, initialized):
self.backup_mgr.is_initialized = initialized
self.assertEqual(initialized, self.backup_mgr.is_working())
def test_cleanup_incomplete_backup_operations_with_exceptions(self):
"""Test cleanup resilience in the face of exceptions."""
fake_backup_list = [{'id': str(uuid.uuid4())},
{'id': str(uuid.uuid4())},
{'id': str(uuid.uuid4())}]
mock_backup_get_by_host = self.mock_object(
objects.BackupList, 'get_all_by_host')
mock_backup_get_by_host.return_value = fake_backup_list
mock_backup_cleanup = self.mock_object(
self.backup_mgr, '_cleanup_one_backup')
mock_backup_cleanup.side_effect = [Exception]
mock_temp_cleanup = self.mock_object(
self.backup_mgr, '_cleanup_temp_volumes_snapshots_for_one_backup')
mock_temp_cleanup.side_effect = [Exception]
self.assertIsNone(
self.backup_mgr._cleanup_incomplete_backup_operations(
self.ctxt))
self.assertEqual(len(fake_backup_list), mock_backup_cleanup.call_count)
self.assertEqual(len(fake_backup_list), mock_temp_cleanup.call_count)
def test_cleanup_one_backing_up_volume(self):
"""Test cleanup_one_volume for volume status 'backing-up'."""
volume_id = self._create_volume_db_entry(status='backing-up',
previous_status='available')
volume = db.volume_get(self.ctxt, volume_id)
self.backup_mgr._cleanup_one_volume(self.ctxt, volume)
volume = db.volume_get(self.ctxt, volume_id)
self.assertEqual('available', volume['status'])
def test_cleanup_one_restoring_backup_volume(self):
"""Test cleanup_one_volume for volume status 'restoring-backup'."""
volume_id = self._create_volume_db_entry(status='restoring-backup')
volume = db.volume_get(self.ctxt, volume_id)
self.backup_mgr._cleanup_one_volume(self.ctxt, volume)
volume = db.volume_get(self.ctxt, volume_id)
self.assertEqual('error_restoring', volume['status'])
def test_cleanup_one_creating_backup(self):
"""Test cleanup_one_backup for volume status 'creating'."""
vol1_id = self._create_volume_db_entry()
self._create_volume_attach(vol1_id)
db.volume_update(self.ctxt, vol1_id, {'status': 'backing-up', })
backup = self._create_backup_db_entry(
status=fields.BackupStatus.CREATING,
volume_id=vol1_id)
self.backup_mgr._cleanup_one_backup(self.ctxt, backup)
self.assertEqual(fields.BackupStatus.ERROR, backup.status)
volume = objects.Volume.get_by_id(self.ctxt, vol1_id)
self.assertEqual('available', volume.status)
def test_cleanup_one_restoring_backup(self):
"""Test cleanup_one_backup for volume status 'restoring'."""
vol1_id = self._create_volume_db_entry()
db.volume_update(self.ctxt, vol1_id, {'status': 'restoring-backup', })
backup = self._create_backup_db_entry(
status=fields.BackupStatus.RESTORING,
restore_volume_id=vol1_id)
self.backup_mgr._cleanup_one_backup(self.ctxt, backup)
self.assertEqual(fields.BackupStatus.AVAILABLE, backup.status)
volume = objects.Volume.get_by_id(self.ctxt, vol1_id)
self.assertEqual('error_restoring', volume.status)
def test_cleanup_one_deleting_backup(self):
"""Test cleanup_one_backup for backup status 'deleting'."""
self.override_config('backup_service_inithost_offload', False)
backup = self._create_backup_db_entry(
status=fields.BackupStatus.DELETING)
self.backup_mgr._cleanup_one_backup(self.ctxt, backup)
self.assertRaises(exception.BackupNotFound,
db.backup_get,
self.ctxt,
backup.id)
def test_cleanup_one_deleting_encrypted_backup(self):
"""Test cleanup of backup status 'deleting' (encrypted)."""
self.override_config('backup_service_inithost_offload', False)
backup = self._create_backup_db_entry(
status=fields.BackupStatus.DELETING,
encryption_key_id=fake.ENCRYPTION_KEY_ID)
self.backup_mgr._cleanup_one_backup(self.ctxt, backup)
backup = db.backup_get(self.ctxt, backup.id)
self.assertIsNotNone(backup)
self.assertEqual(fields.BackupStatus.ERROR_DELETING,
backup.status)
def test_detach_all_attachments_handles_exceptions(self):
"""Test detach_all_attachments with exceptions."""
mock_log = self.mock_object(manager, 'LOG')
self.volume_mocks['detach_volume'].side_effect = [Exception]
fake_attachments = [
{
'id': str(uuid.uuid4()),
'attached_host': 'testhost',
'instance_uuid': None,
},
{
'id': str(uuid.uuid4()),
'attached_host': 'testhost',
'instance_uuid': None,
}
]
fake_volume = {
'id': str(uuid.uuid4()),
'volume_attachment': fake_attachments
}
self.backup_mgr._detach_all_attachments(self.ctxt,
fake_volume)
self.assertEqual(len(fake_attachments), mock_log.exception.call_count)
@ddt.data(KeyError, exception.VolumeNotFound)
def test_cleanup_temp_volumes_snapshots_for_one_backup_volume_not_found(
self, err):
"""Ensure we handle missing volume for a backup."""
mock_volume_get = self.mock_object(db, 'volume_get')
mock_volume_get.side_effect = [err]
backup = self._create_backup_db_entry(
status=fields.BackupStatus.CREATING)
self.assertIsNone(
self.backup_mgr._cleanup_temp_volumes_snapshots_for_one_backup(
self.ctxt,
backup))
def test_cleanup_temp_snapshot_for_one_backup_not_found(self):
"""Ensure we handle missing temp snapshot for a backup."""
vol1_id = self._create_volume_db_entry()
self._create_volume_attach(vol1_id)
db.volume_update(self.ctxt, vol1_id, {'status': 'backing-up'})
backup = self._create_backup_db_entry(
status=fields.BackupStatus.ERROR,
volume_id=vol1_id,
temp_snapshot_id=str(uuid.uuid4()))
self.assertIsNone(
self.backup_mgr._cleanup_temp_volumes_snapshots_for_one_backup(
self.ctxt,
backup))
self.assertFalse(self.volume_mocks['delete_snapshot'].called)
self.assertIsNone(backup.temp_snapshot_id)
backup.destroy()
db.volume_destroy(self.ctxt, vol1_id)
def test_cleanup_temp_volume_for_one_backup_not_found(self):
"""Ensure we handle missing temp volume for a backup."""
vol1_id = self._create_volume_db_entry()
self._create_volume_attach(vol1_id)
db.volume_update(self.ctxt, vol1_id, {'status': 'backing-up'})
backup = self._create_backup_db_entry(status=fields.BackupStatus.ERROR,
volume_id=vol1_id,
temp_volume_id=str(uuid.uuid4()))
self.assertIsNone(
self.backup_mgr._cleanup_temp_volumes_snapshots_for_one_backup(
self.ctxt,
backup))
self.assertFalse(self.volume_mocks['delete_volume'].called)
self.assertIsNone(backup.temp_volume_id)
backup.destroy()
db.volume_destroy(self.ctxt, vol1_id)
def test_create_backup_with_bad_volume_status(self):
"""Test creating a backup from a volume with a bad status."""
vol_id = self._create_volume_db_entry(status='restoring', size=1)
backup = self._create_backup_db_entry(volume_id=vol_id)
self.assertRaises(exception.InvalidVolume,
self.backup_mgr.create_backup,
self.ctxt,
backup)
def test_create_backup_with_bad_backup_status(self):
"""Test creating a backup with a backup with a bad status."""
vol_id = self._create_volume_db_entry(size=1)
backup = self._create_backup_db_entry(
status=fields.BackupStatus.AVAILABLE, volume_id=vol_id)
self.assertRaises(exception.InvalidBackup,
self.backup_mgr.create_backup,
self.ctxt,
backup)
def test_create_backup_with_error(self):
"""Test error handling when error occurs during backup creation."""
vol_id = self._create_volume_db_entry(size=1)
backup = self._create_backup_db_entry(volume_id=vol_id)
mock_run_backup = self.mock_object(self.backup_mgr, '_run_backup')
mock_run_backup.side_effect = FakeBackupException(str(uuid.uuid4()))
self.assertRaises(FakeBackupException,
self.backup_mgr.create_backup,
self.ctxt,
backup)
vol = db.volume_get(self.ctxt, vol_id)
self.assertEqual('available', vol['status'])
self.assertEqual('error_backing-up', vol['previous_status'])
backup = db.backup_get(self.ctxt, backup.id)
self.assertEqual(fields.BackupStatus.ERROR, backup['status'])
self.assertTrue(mock_run_backup.called)
@mock.patch('cinder.backup.manager.BackupManager._run_backup',
side_effect=FakeBackupException(str(uuid.uuid4())))
def test_create_backup_with_snapshot_error(self, mock_run_backup):
"""Test error handling when error occurs during backup creation."""
vol_id = self._create_volume_db_entry(size=1)
snapshot = self._create_snapshot_db_entry(status='backing-up',
volume_id=vol_id)
backup = self._create_backup_db_entry(volume_id=vol_id,
snapshot_id=snapshot.id)
self.assertRaises(FakeBackupException,
self.backup_mgr.create_backup,
self.ctxt,
backup)
snapshot.refresh()
self.assertEqual('available', snapshot.status)
backup.refresh()
self.assertEqual(fields.BackupStatus.ERROR, backup.status)
self.assertTrue(mock_run_backup.called)
@mock.patch('cinder.utils.brick_get_connector_properties')
@mock.patch('cinder.volume.rpcapi.VolumeAPI.get_backup_device')
@mock.patch('cinder.utils.temporary_chown')
@mock.patch('six.moves.builtins.open')
@mock.patch.object(os.path, 'isdir', return_value=False)
def test_create_backup(self, mock_isdir, mock_open, mock_temporary_chown,
mock_get_backup_device, mock_get_conn):
"""Test normal backup creation."""
vol_size = 1
vol_id = self._create_volume_db_entry(size=vol_size)
backup = self._create_backup_db_entry(volume_id=vol_id)
vol = objects.Volume.get_by_id(self.ctxt, vol_id)
backup_device_dict = {'backup_device': vol, 'secure_enabled': False,
'is_snapshot': False, }
mock_get_backup_device.return_value = (
objects.BackupDeviceInfo.from_primitive(backup_device_dict,
self.ctxt,
['admin_metadata',
'metadata']))
attach_info = {'device': {'path': '/dev/null'}}
mock_detach_device = self.mock_object(self.backup_mgr,
'_detach_device')
mock_attach_device = self.mock_object(self.backup_mgr,
'_attach_device')
mock_attach_device.return_value = attach_info
properties = {}
mock_get_conn.return_value = properties
mock_open.return_value = open('/dev/null', 'rb')
self.backup_mgr.create_backup(self.ctxt, backup)
mock_temporary_chown.assert_called_once_with('/dev/null')
mock_attach_device.assert_called_once_with(self.ctxt, vol,
properties, False)
mock_get_backup_device.assert_called_once_with(self.ctxt, backup, vol)
mock_get_conn.assert_called_once_with()
mock_detach_device.assert_called_once_with(self.ctxt, attach_info,
vol, properties, False,
force=True,
ignore_errors=True)
vol = objects.Volume.get_by_id(self.ctxt, vol_id)
self.assertEqual('available', vol['status'])
self.assertEqual('backing-up', vol['previous_status'])
backup = db.backup_get(self.ctxt, backup.id)
self.assertEqual(fields.BackupStatus.AVAILABLE, backup['status'])
self.assertEqual(vol_size, backup['size'])
self.assertIsNone(backup.encryption_key_id)
@mock.patch('cinder.utils.brick_get_connector_properties')
@mock.patch('cinder.volume.rpcapi.VolumeAPI.get_backup_device')
@mock.patch('cinder.utils.temporary_chown')
@mock.patch('six.moves.builtins.open')
@mock.patch.object(os.path, 'isdir', return_value=True)
def test_create_backup_set_parent_id_to_none(self, mock_isdir, mock_open,
mock_chown,
mock_backup_device,
mock_brick):
vol_size = 1
vol_id = self._create_volume_db_entry(size=vol_size)
backup = self._create_backup_db_entry(volume_id=vol_id,
parent_id='mock')
with mock.patch.object(self.backup_mgr, 'get_backup_driver') as \
mock_get_backup_driver:
mock_get_backup_driver.return_value.backup.return_value = (
{'parent_id': None})
with mock.patch.object(self.backup_mgr, '_detach_device'):
device_path = '/fake/disk/path/'
attach_info = {'device': {'path': device_path}}
mock_attach_device = self.mock_object(self.backup_mgr,
'_attach_device')
mock_attach_device.return_value = attach_info
properties = {}
mock_brick.return_value = properties
mock_open.return_value = open('/dev/null', 'rb')
mock_brick.return_value = properties
self.backup_mgr.create_backup(self.ctxt, backup)
backup = db.backup_get(self.ctxt, backup.id)
self.assertEqual(fields.BackupStatus.AVAILABLE, backup.status)
self.assertEqual(vol_size, backup.size)
self.assertIsNone(backup.parent_id)
@mock.patch('cinder.utils.brick_get_connector_properties')
@mock.patch('cinder.volume.rpcapi.VolumeAPI.get_backup_device')
@mock.patch('cinder.utils.temporary_chown')
@mock.patch('six.moves.builtins.open')
@mock.patch.object(os.path, 'isdir', return_value=True)
def test_create_backup_set_parent_id(self, mock_isdir, mock_open,
mock_chown, mock_backup_device,
mock_brick):
vol_size = 1
vol_id = self._create_volume_db_entry(size=vol_size)
backup = self._create_backup_db_entry(volume_id=vol_id)
parent_backup = self._create_backup_db_entry(size=vol_size)
with mock.patch.object(self.backup_mgr, 'get_backup_driver') as \
mock_get_backup_driver:
mock_get_backup_driver.return_value.backup.return_value = (
{'parent_id': parent_backup.id})
with mock.patch.object(self.backup_mgr, '_detach_device'):
device_path = '/fake/disk/path/'
attach_info = {'device': {'path': device_path}}
mock_attach_device = self.mock_object(self.backup_mgr,
'_attach_device')
mock_attach_device.return_value = attach_info
properties = {}
mock_brick.return_value = properties
mock_open.return_value = open('/dev/null', 'rb')
mock_brick.return_value = properties
self.backup_mgr.create_backup(self.ctxt, backup)
backup = db.backup_get(self.ctxt, backup.id)
self.assertEqual(fields.BackupStatus.AVAILABLE, backup.status)
self.assertEqual(vol_size, backup.size)
self.assertEqual(parent_backup.id, backup.parent_id)
@mock.patch('cinder.utils.brick_get_connector_properties')
@mock.patch('cinder.volume.rpcapi.VolumeAPI.get_backup_device')
@mock.patch('cinder.utils.temporary_chown')
@mock.patch('six.moves.builtins.open')
@mock.patch.object(os.path, 'isdir', return_value=True)
def test_create_backup_fail_with_excep(self, mock_isdir, mock_open,
mock_chown, mock_backup_device,
mock_brick):
vol_id = self._create_volume_db_entry()
backup = self._create_backup_db_entry(volume_id=vol_id)
with mock.patch.object(self.backup_mgr, 'get_backup_driver') as \
mock_get_backup_driver:
mock_get_backup_driver.return_value.backup.side_effect = (
FakeBackupException('fake'))
with mock.patch.object(self.backup_mgr, '_detach_device'):
device_path = '/fake/disk/path/'
attach_info = {'device': {'path': device_path}}
mock_attach_device = self.mock_object(self.backup_mgr,
'_attach_device')
mock_attach_device.return_value = attach_info
properties = {}
mock_brick.return_value = properties
mock_open.return_value = open('/dev/null', 'rb')
mock_brick.return_value = properties
self.assertRaises(FakeBackupException,
self.backup_mgr.create_backup,
self.ctxt, backup)
vol = db.volume_get(self.ctxt, vol_id)
self.assertEqual('available', vol.status)
self.assertEqual('error_backing-up', vol.previous_status)
backup = db.backup_get(self.ctxt, backup.id)
self.assertEqual(fields.BackupStatus.ERROR, backup.status)
@mock.patch('cinder.utils.brick_get_connector_properties')
@mock.patch('cinder.volume.rpcapi.VolumeAPI.get_backup_device')
@mock.patch('cinder.utils.temporary_chown')
@mock.patch('six.moves.builtins.open')
@mock.patch.object(os.path, 'isdir', return_value=True)
def test_run_backup_with_dir_device_path(self, mock_isdir,
mock_open,
mock_chown,
mock_backup_device,
mock_brick):
backup_service = lambda: None
backup_service.backup = mock.Mock(
return_value=mock.sentinel.backup_update)
self.backup_mgr.get_backup_driver = lambda x: backup_service
vol_id = self._create_volume_db_entry()
backup = self._create_backup_db_entry(volume_id=vol_id)
volume = objects.Volume.get_by_id(self.ctxt, vol_id)
# device_path is represented by a directory
device_path = '/fake/disk/path/'
attach_info = {'device': {'path': device_path}}
self.backup_mgr._attach_device = mock.Mock(
return_value=attach_info)
self.backup_mgr._detach_device = mock.Mock()
output = self.backup_mgr._run_backup(self.ctxt, backup, volume)
mock_chown.assert_not_called()
mock_open.assert_not_called()
backup_service.backup.assert_called_once_with(
backup, device_path)
self.assertEqual(mock.sentinel.backup_update, output)
@mock.patch('cinder.backup.manager.BackupManager._run_backup')
@ddt.data((fields.SnapshotStatus.BACKING_UP, 'available'),
(fields.SnapshotStatus.BACKING_UP, 'in-use'),
(fields.SnapshotStatus.AVAILABLE, 'available'),
(fields.SnapshotStatus.AVAILABLE, 'in-use'))
@ddt.unpack
def test_create_backup_with_snapshot(self, snapshot_status, volume_status,
mock_run_backup):
vol_id = self._create_volume_db_entry(status=volume_status)
snapshot = self._create_snapshot_db_entry(volume_id=vol_id,
status=snapshot_status)
backup = self._create_backup_db_entry(volume_id=vol_id,
snapshot_id=snapshot.id)
if snapshot_status == fields.SnapshotStatus.BACKING_UP:
self.backup_mgr.create_backup(self.ctxt, backup)
vol = objects.Volume.get_by_id(self.ctxt, vol_id)
snapshot = objects.Snapshot.get_by_id(self.ctxt, snapshot.id)
self.assertEqual(volume_status, vol.status)
self.assertEqual(fields.SnapshotStatus.AVAILABLE, snapshot.status)
else:
self.assertRaises(exception.InvalidSnapshot,
self.backup_mgr.create_backup, self.ctxt, backup)
@mock.patch('cinder.utils.brick_get_connector_properties')
@mock.patch('cinder.volume.rpcapi.VolumeAPI.get_backup_device')
@mock.patch('cinder.utils.temporary_chown')
@mock.patch('six.moves.builtins.open')
@mock.patch.object(os.path, 'isdir', return_value=False)
def test_create_backup_with_temp_snapshot(self, mock_isdir,
mock_open,
mock_temporary_chown,
mock_get_backup_device,
mock_get_conn):
"""Test backup in-use volume using temp snapshot."""
self.override_config('backup_use_same_host', True)
vol_size = 1
vol_id = self._create_volume_db_entry(size=vol_size,
previous_status='in-use')
backup = self._create_backup_db_entry(volume_id=vol_id)
snap = self._create_snapshot_db_entry(volume_id=vol_id)
vol = objects.Volume.get_by_id(self.ctxt, vol_id)
mock_get_backup_device.return_value = (
objects.BackupDeviceInfo.from_primitive({
'backup_device': snap, 'secure_enabled': False,
'is_snapshot': True, },
self.ctxt, expected_attrs=['metadata']))
attach_info = {
'device': {'path': '/dev/null'},
'conn': {'data': {}},
'connector': fake_connectors.FakeConnector(None)}
mock_terminate_connection_snapshot = self.mock_object(
volume_rpcapi.VolumeAPI,
'terminate_connection_snapshot')
mock_initialize_connection_snapshot = self.mock_object(
volume_rpcapi.VolumeAPI,
'initialize_connection_snapshot')
mock_connect_device = self.mock_object(
manager.BackupManager,
'_connect_device')
mock_connect_device.return_value = attach_info
properties = {}
mock_get_conn.return_value = properties
mock_open.return_value = open('/dev/null', 'rb')
self.backup_mgr.create_backup(self.ctxt, backup)
mock_temporary_chown.assert_called_once_with('/dev/null')
mock_initialize_connection_snapshot.assert_called_once_with(
self.ctxt, snap, properties)
mock_get_backup_device.assert_called_once_with(self.ctxt, backup, vol)
mock_get_conn.assert_called_once_with()
mock_terminate_connection_snapshot.assert_called_once_with(
self.ctxt, snap, properties, force=True)
vol = objects.Volume.get_by_id(self.ctxt, vol_id)
self.assertEqual('in-use', vol['status'])
self.assertEqual('backing-up', vol['previous_status'])
backup = objects.Backup.get_by_id(self.ctxt, backup.id)
self.assertEqual(fields.BackupStatus.AVAILABLE, backup.status)
self.assertEqual(vol_size, backup.size)
@mock.patch.object(fake_driver.FakeLoggingVolumeDriver, 'create_snapshot')
def test_create_temp_snapshot(self, mock_create_snapshot):
volume_manager = importutils.import_object(CONF.volume_manager)
volume_manager.driver.set_initialized()
vol_size = 1
vol_id = self._create_volume_db_entry(size=vol_size,
previous_status='in-use')
vol = objects.Volume.get_by_id(self.ctxt, vol_id)
mock_create_snapshot.return_value = {'provider_id':
'fake_provider_id'}
temp_snap = volume_manager.driver._create_temp_snapshot(
self.ctxt, vol)
self.assertEqual('available', temp_snap['status'])
self.assertEqual('fake_provider_id', temp_snap['provider_id'])
@mock.patch.object(fake_driver.FakeLoggingVolumeDriver,
'create_cloned_volume')
def test_create_temp_cloned_volume(self, mock_create_cloned_volume):
volume_manager = importutils.import_object(CONF.volume_manager)
volume_manager.driver.set_initialized()
vol_size = 1
vol_id = self._create_volume_db_entry(size=vol_size,
previous_status='in-use')
vol = objects.Volume.get_by_id(self.ctxt, vol_id)
mock_create_cloned_volume.return_value = {'provider_id':
'fake_provider_id'}
temp_vol = volume_manager.driver._create_temp_cloned_volume(
self.ctxt, vol)
self.assertEqual('available', temp_vol['status'])
self.assertEqual('fake_provider_id', temp_vol['provider_id'])
@mock.patch.object(fake_driver.FakeLoggingVolumeDriver,
'create_volume_from_snapshot')
def test_create_temp_volume_from_snapshot(self, mock_create_vol_from_snap):
volume_manager = importutils.import_object(CONF.volume_manager)
volume_manager.driver.set_initialized()
vol_size = 1
vol_id = self._create_volume_db_entry(size=vol_size,
previous_status='in-use')
vol = objects.Volume.get_by_id(self.ctxt, vol_id)
snap = self._create_snapshot_db_entry(volume_id=vol_id)
mock_create_vol_from_snap.return_value = {'provider_id':
'fake_provider_id'}
temp_vol = volume_manager.driver._create_temp_volume_from_snapshot(
self.ctxt, vol, snap)
self.assertEqual('available', temp_vol['status'])
self.assertEqual('fake_provider_id', temp_vol['provider_id'])
@mock.patch('cinder.volume.utils.notify_about_backup_usage')
def test_create_backup_with_notify(self, notify):
"""Test normal backup creation with notifications."""
vol_size = 1
vol_id = self._create_volume_db_entry(size=vol_size)
backup = self._create_backup_db_entry(volume_id=vol_id)
self.mock_object(self.backup_mgr, '_run_backup')
self.backup_mgr.create_backup(self.ctxt, backup)
self.assertEqual(2, notify.call_count)
@mock.patch('cinder.volume.rpcapi.VolumeAPI.get_backup_device')
@mock.patch('cinder.volume.utils.clone_encryption_key')
@mock.patch('cinder.utils.brick_get_connector_properties')
def test_create_backup_encrypted_volume(self,
mock_connector_properties,
mock_clone_encryption_key,
mock_get_backup_device):
"""Test backup of encrypted volume.
Test whether the volume's encryption key ID is cloned and
saved in the backup.
"""
vol_id = self._create_volume_db_entry(encryption_key_id=fake.UUID1)
backup = self._create_backup_db_entry(volume_id=vol_id)
self.mock_object(self.backup_mgr, '_detach_device')
mock_attach_device = self.mock_object(self.backup_mgr,
'_attach_device')
mock_attach_device.return_value = {'device': {'path': '/dev/null'}}
mock_clone_encryption_key.return_value = fake.UUID2
self.backup_mgr.create_backup(self.ctxt, backup)
mock_clone_encryption_key.assert_called_once_with(self.ctxt,
mock.ANY,
fake.UUID1)
backup = db.backup_get(self.ctxt, backup.id)
self.assertEqual(fake.UUID2, backup.encryption_key_id)
@mock.patch('cinder.volume.rpcapi.VolumeAPI.get_backup_device')
@mock.patch('cinder.volume.utils.clone_encryption_key')
@mock.patch('cinder.utils.brick_get_connector_properties')
def test_create_backup_encrypted_volume_again(self,
mock_connector_properties,
mock_clone_encryption_key,
mock_get_backup_device):
"""Test backup of encrypted volume.
Test when the backup already has a clone of the volume's encryption
key ID.
"""
vol_id = self._create_volume_db_entry(encryption_key_id=fake.UUID1)
backup = self._create_backup_db_entry(volume_id=vol_id,
encryption_key_id=fake.UUID2)
self.mock_object(self.backup_mgr, '_detach_device')
mock_attach_device = self.mock_object(self.backup_mgr,
'_attach_device')
mock_attach_device.return_value = {'device': {'path': '/dev/null'}}
self.backup_mgr.create_backup(self.ctxt, backup)
mock_clone_encryption_key.assert_not_called()
def test_restore_backup_with_bad_volume_status(self):
"""Test error handling.
Test error handling when restoring a backup to a volume
with a bad status.
"""
vol_id = self._create_volume_db_entry(status='available', size=1)
backup = self._create_backup_db_entry(volume_id=vol_id)
self.assertRaises(exception.InvalidVolume,
self.backup_mgr.restore_backup,
self.ctxt,
backup,
vol_id)
backup = db.backup_get(self.ctxt, backup.id)
vol = db.volume_get(self.ctxt, vol_id)
self.assertEqual('error_restoring', vol['status'])
self.assertEqual(fields.BackupStatus.AVAILABLE, backup['status'])
def test_restore_backup_with_bad_backup_status(self):
"""Test error handling.
Test error handling when restoring a backup with a backup
with a bad status.
"""
vol_id = self._create_volume_db_entry(status='restoring-backup',
size=1)
backup = self._create_backup_db_entry(
status=fields.BackupStatus.AVAILABLE, volume_id=vol_id)
self.assertRaises(exception.InvalidBackup,
self.backup_mgr.restore_backup,
self.ctxt,
backup,
vol_id)
vol = db.volume_get(self.ctxt, vol_id)
self.assertEqual('error', vol['status'])
backup = db.backup_get(self.ctxt, backup.id)
self.assertEqual(fields.BackupStatus.ERROR, backup['status'])
def test_restore_backup_with_driver_error(self):
"""Test error handling when an error occurs during backup restore."""
vol_id = self._create_volume_db_entry(status='restoring-backup',
size=1)
backup = self._create_backup_db_entry(
status=fields.BackupStatus.RESTORING, volume_id=vol_id)
mock_run_restore = self.mock_object(
self.backup_mgr,
'_run_restore')
mock_run_restore.side_effect = FakeBackupException('fake')
self.assertRaises(FakeBackupException,
self.backup_mgr.restore_backup,
self.ctxt,
backup,
vol_id)
vol = db.volume_get(self.ctxt, vol_id)
self.assertEqual('error_restoring', vol['status'])
backup = db.backup_get(self.ctxt, backup.id)
self.assertEqual(fields.BackupStatus.AVAILABLE, backup['status'])
self.assertTrue(mock_run_restore.called)
def test_restore_backup_with_bad_service(self):
"""Test error handling.
Test error handling when attempting a restore of a backup
with a different service to that used to create the backup.
"""
vol_id = self._create_volume_db_entry(status='restoring-backup',
size=1)
service = 'cinder.tests.backup.bad_service'
backup = self._create_backup_db_entry(
status=fields.BackupStatus.RESTORING, volume_id=vol_id,
service=service)
self.assertRaises(exception.InvalidBackup,
self.backup_mgr.restore_backup,
self.ctxt,
backup,
vol_id)
vol = db.volume_get(self.ctxt, vol_id)
self.assertEqual('error', vol['status'])
backup = db.backup_get(self.ctxt, backup.id)
self.assertEqual(fields.BackupStatus.AVAILABLE, backup['status'])
@mock.patch('cinder.utils.brick_get_connector_properties')
@mock.patch('cinder.utils.temporary_chown')
@mock.patch('six.moves.builtins.open')
@mock.patch.object(os.path, 'isdir', return_value=False)
def test_restore_backup(self, mock_isdir, mock_open,
mock_temporary_chown, mock_get_conn):
"""Test normal backup restoration."""
vol_size = 1
vol_id = self._create_volume_db_entry(status='restoring-backup',
size=vol_size)
backup = self._create_backup_db_entry(
status=fields.BackupStatus.RESTORING, volume_id=vol_id)
properties = {}
mock_get_conn.return_value = properties
mock_open.return_value = open('/dev/null', 'wb')
mock_secure_enabled = (
self.volume_mocks['secure_file_operations_enabled'])
mock_secure_enabled.return_value = False
vol = objects.Volume.get_by_id(self.ctxt, vol_id)
attach_info = {'device': {'path': '/dev/null'}}
mock_detach_device = self.mock_object(self.backup_mgr,
'_detach_device')
mock_attach_device = self.mock_object(self.backup_mgr,
'_attach_device')
mock_attach_device.return_value = attach_info
self.backup_mgr.restore_backup(self.ctxt, backup, vol_id)
mock_temporary_chown.assert_called_once_with('/dev/null')
mock_get_conn.assert_called_once_with()
mock_secure_enabled.assert_called_once_with(self.ctxt, vol)
mock_attach_device.assert_called_once_with(self.ctxt, vol,
properties)
mock_detach_device.assert_called_once_with(self.ctxt, attach_info,
vol, properties, force=True)
vol = objects.Volume.get_by_id(self.ctxt, vol_id)
self.assertEqual('available', vol['status'])
backup = db.backup_get(self.ctxt, backup.id)
self.assertEqual(fields.BackupStatus.AVAILABLE, backup['status'])
@mock.patch('cinder.volume.utils.notify_about_backup_usage')
def test_restore_backup_with_notify(self, notify):
"""Test normal backup restoration with notifications."""
vol_size = 1
vol_id = self._create_volume_db_entry(status='restoring-backup',
size=vol_size)
backup = self._create_backup_db_entry(
status=fields.BackupStatus.RESTORING, volume_id=vol_id)
self.backup_mgr._run_restore = mock.Mock()
self.backup_mgr.restore_backup(self.ctxt, backup, vol_id)
self.assertEqual(2, notify.call_count)
@mock.patch('cinder.volume.utils.clone_encryption_key')
@mock.patch('cinder.volume.utils.delete_encryption_key')
@mock.patch(
'cinder.tests.unit.backup.fake_service.FakeBackupService.restore')
@mock.patch('cinder.utils.brick_get_connector_properties')
def test_restore_backup_encrypted_volume(self,
mock_connector_properties,
mock_backup_driver_restore,
mock_delete_encryption_key,
mock_clone_encryption_key):
"""Test restore of encrypted volume.
Test restoring a volume from its own backup. In this situation,
the volume's encryption key ID shouldn't change.
"""
vol_id = self._create_volume_db_entry(status='restoring-backup',
encryption_key_id=fake.UUID1)
backup = self._create_backup_db_entry(
volume_id=vol_id,
status=fields.BackupStatus.RESTORING,
encryption_key_id=fake.UUID2)
self.mock_object(self.backup_mgr, '_detach_device')
mock_attach_device = self.mock_object(self.backup_mgr,
'_attach_device')
mock_attach_device.return_value = {'device': {'path': '/dev/null'}}
self.backup_mgr.restore_backup(self.ctxt, backup, vol_id)
volume = db.volume_get(self.ctxt, vol_id)
self.assertEqual(fake.UUID1, volume.encryption_key_id)
mock_clone_encryption_key.assert_not_called()
mock_delete_encryption_key.assert_not_called()
@mock.patch('cinder.volume.utils.clone_encryption_key')
@mock.patch('cinder.volume.utils.delete_encryption_key')
@mock.patch(
'cinder.tests.unit.backup.fake_service.FakeBackupService.restore')
@mock.patch('cinder.utils.brick_get_connector_properties')
def test_restore_backup_new_encrypted_volume(self,
mock_connector_properties,
mock_backup_driver_restore,
mock_delete_encryption_key,
mock_clone_encryption_key):
"""Test restore of encrypted volume.
Test handling of encryption key IDs when retoring to another
encrypted volume, i.e. a volume whose key ID is different from
the volume originally backed up.
- The volume's prior encryption key ID is deleted.
- The volume is assigned a fresh clone of the backup's encryption
key ID.
"""
vol_id = self._create_volume_db_entry(status='restoring-backup',
encryption_key_id=fake.UUID1)
backup = self._create_backup_db_entry(
volume_id=vol_id,
status=fields.BackupStatus.RESTORING,
encryption_key_id=fake.UUID2)
self.mock_object(self.backup_mgr, '_detach_device')
mock_attach_device = self.mock_object(self.backup_mgr,
'_attach_device')
mock_attach_device.return_value = {'device': {'path': '/dev/null'}}
mock_clone_encryption_key.return_value = fake.UUID3
# Mimic the driver's side effect where it updates the volume's
# metadata. For backups of encrypted volumes, this will essentially
# overwrite the volume's encryption key ID prior to the restore.
def restore_side_effect(backup, volume_id, volume_file):
db.volume_update(self.ctxt,
volume_id,
{'encryption_key_id': fake.UUID4})
mock_backup_driver_restore.side_effect = restore_side_effect
self.backup_mgr.restore_backup(self.ctxt, backup, vol_id)
# Volume's original encryption key ID should be deleted
mock_delete_encryption_key.assert_called_once_with(self.ctxt,
mock.ANY,
fake.UUID1)
# Backup's encryption key ID should have been cloned
mock_clone_encryption_key.assert_called_once_with(self.ctxt,
mock.ANY,
fake.UUID2)
# Volume should have the cloned backup key ID
volume = db.volume_get(self.ctxt, vol_id)
self.assertEqual(fake.UUID3, volume.encryption_key_id)
# Backup's key ID should not have changed
backup = db.backup_get(self.ctxt, backup.id)
self.assertEqual(fake.UUID2, backup.encryption_key_id)
@mock.patch('cinder.volume.utils.clone_encryption_key')
@mock.patch('cinder.volume.utils.delete_encryption_key')
@mock.patch(
'cinder.tests.unit.backup.fake_service.FakeBackupService.restore')
@mock.patch('cinder.utils.brick_get_connector_properties')
def test_restore_backup_glean_key_id(self,
mock_connector_properties,
mock_backup_driver_restore,
mock_delete_encryption_key,
mock_clone_encryption_key):
"""Test restore of encrypted volume.
Test restoring a backup that was created prior to when the encryption
key ID is saved in the backup DB. The backup encryption key ID is
gleaned from the restored volume.
"""
vol_id = self._create_volume_db_entry(status='restoring-backup',
encryption_key_id=fake.UUID1)
backup = self._create_backup_db_entry(
volume_id=vol_id,
status=fields.BackupStatus.RESTORING)
self.mock_object(self.backup_mgr, '_detach_device')
mock_attach_device = self.mock_object(self.backup_mgr,
'_attach_device')
mock_attach_device.return_value = {'device': {'path': '/dev/null'}}
mock_clone_encryption_key.return_value = fake.UUID3
# Mimic the driver's side effect where it updates the volume's
# metadata. For backups of encrypted volumes, this will essentially
# overwrite the volume's encryption key ID prior to the restore.
def restore_side_effect(backup, volume_id, volume_file):
db.volume_update(self.ctxt,
volume_id,
{'encryption_key_id': fake.UUID4})
mock_backup_driver_restore.side_effect = restore_side_effect
self.backup_mgr.restore_backup(self.ctxt, backup, vol_id)
# Volume's original encryption key ID should be deleted
mock_delete_encryption_key.assert_called_once_with(self.ctxt,
mock.ANY,
fake.UUID1)
# Backup's encryption key ID should have been cloned from
# the value restored from the metadata.
mock_clone_encryption_key.assert_called_once_with(self.ctxt,
mock.ANY,
fake.UUID4)
# Volume should have the cloned backup key ID
volume = db.volume_get(self.ctxt, vol_id)
self.assertEqual(fake.UUID3, volume.encryption_key_id)
# Backup's key ID should have been gleaned from value restored
# from the backup's metadata
backup = db.backup_get(self.ctxt, backup.id)
self.assertEqual(fake.UUID4, backup.encryption_key_id)
def test_delete_backup_with_bad_backup_status(self):
"""Test error handling.
Test error handling when deleting a backup with a backup
with a bad status.
"""
vol_id = self._create_volume_db_entry(size=1)
backup = self._create_backup_db_entry(
status=fields.BackupStatus.AVAILABLE, volume_id=vol_id)
self.assertRaises(exception.InvalidBackup,
self.backup_mgr.delete_backup,
self.ctxt,
backup)
backup = db.backup_get(self.ctxt, backup.id)
self.assertEqual(fields.BackupStatus.ERROR, backup['status'])
def test_delete_backup_with_error(self):
"""Test error handling when an error occurs during backup deletion."""
vol_id = self._create_volume_db_entry(size=1)
backup = self._create_backup_db_entry(
status=fields.BackupStatus.DELETING,
display_name='fail_on_delete', volume_id=vol_id)
self.assertRaises(IOError,
self.backup_mgr.delete_backup,
self.ctxt,
backup)
backup = db.backup_get(self.ctxt, backup.id)
self.assertEqual(fields.BackupStatus.ERROR, backup['status'])
def test_delete_backup_with_bad_service(self):
"""Test error handling.
Test error handling when attempting a delete of a backup
with a different service to that used to create the backup.
"""
vol_id = self._create_volume_db_entry(size=1)
service = 'cinder.tests.backup.bad_service'
backup = self._create_backup_db_entry(
status=fields.BackupStatus.DELETING, volume_id=vol_id,
service=service)
self.assertRaises(exception.InvalidBackup,
self.backup_mgr.delete_backup,
self.ctxt,
backup)
backup = db.backup_get(self.ctxt, backup.id)
self.assertEqual(fields.BackupStatus.ERROR, backup['status'])
def test_delete_backup_with_no_service(self):
"""Test error handling.
Test error handling when attempting a delete of a backup
with no service defined for that backup, relates to bug #1162908
"""
vol_id = self._create_volume_db_entry(size=1)
backup = self._create_backup_db_entry(
status=fields.BackupStatus.DELETING, volume_id=vol_id)
backup.service = None
backup.save()
self.backup_mgr.delete_backup(self.ctxt, backup)
@ddt.data('cinder.tests.unit.backup.fake_service.FakeBackupService',
'cinder.tests.unit.backup.fake_service')
def test_delete_backup(self, service):
"""Test normal backup deletion."""
vol_id = self._create_volume_db_entry(size=1)
backup = self._create_backup_db_entry(
status=fields.BackupStatus.DELETING, volume_id=vol_id,
service=service)
self.backup_mgr.delete_backup(self.ctxt, backup)
self.assertRaises(exception.BackupNotFound,
db.backup_get,
self.ctxt,
backup.id)
ctxt_read_deleted = context.get_admin_context('yes')
backup = db.backup_get(ctxt_read_deleted, backup.id)
self.assertTrue(backup.deleted)
self.assertGreaterEqual(timeutils.utcnow(), backup.deleted_at)
self.assertEqual(fields.BackupStatus.DELETED, backup.status)
@mock.patch('cinder.volume.utils.delete_encryption_key')
def test_delete_backup_of_encrypted_volume(self,
mock_delete_encryption_key):
"""Test deletion of backup of encrypted volume"""
vol_id = self._create_volume_db_entry(
encryption_key_id=fake.UUID1)
backup = self._create_backup_db_entry(
volume_id=vol_id,
status=fields.BackupStatus.DELETING,
encryption_key_id=fake.UUID2)
self.backup_mgr.delete_backup(self.ctxt, backup)
mock_delete_encryption_key.assert_called_once_with(self.ctxt,
mock.ANY,
fake.UUID2)
ctxt_read_deleted = context.get_admin_context('yes')
backup = db.backup_get(ctxt_read_deleted, backup.id)
self.assertTrue(backup.deleted)
self.assertIsNone(backup.encryption_key_id)
@mock.patch('cinder.volume.utils.notify_about_backup_usage')
def test_delete_backup_with_notify(self, notify):
"""Test normal backup deletion with notifications."""
vol_id = self._create_volume_db_entry(size=1)
backup = self._create_backup_db_entry(
status=fields.BackupStatus.DELETING, volume_id=vol_id)
self.backup_mgr.delete_backup(self.ctxt, backup)
self.assertEqual(2, notify.call_count)
def test_list_backup(self):
project_id = str(uuid.uuid4())
backups = db.backup_get_all_by_project(self.ctxt, project_id)
self.assertEqual(0, len(backups))
self._create_backup_db_entry()
b2 = self._create_backup_db_entry(project_id=project_id)
backups = db.backup_get_all_by_project(self.ctxt, project_id)
self.assertEqual(1, len(backups))
self.assertEqual(b2.id, backups[0].id)
def test_backup_get_all_by_project_with_deleted(self):
"""Test deleted backups.
Test deleted backups don't show up in backup_get_all_by_project.
Unless context.read_deleted is 'yes'.
"""
project_id = str(uuid.uuid4())
backups = db.backup_get_all_by_project(self.ctxt, project_id)
self.assertEqual(0, len(backups))
backup_keep = self._create_backup_db_entry(project_id=project_id)
backup = self._create_backup_db_entry(project_id=project_id)
db.backup_destroy(self.ctxt, backup.id)
backups = db.backup_get_all_by_project(self.ctxt, project_id)
self.assertEqual(1, len(backups))
self.assertEqual(backup_keep.id, backups[0].id)
ctxt_read_deleted = context.get_admin_context('yes')
backups = db.backup_get_all_by_project(ctxt_read_deleted, project_id)
self.assertEqual(2, len(backups))
def test_backup_get_all_by_host_with_deleted(self):
"""Test deleted backups.
Test deleted backups don't show up in backup_get_all_by_project.
Unless context.read_deleted is 'yes'
"""
backups = db.backup_get_all_by_host(self.ctxt, 'testhost')
self.assertEqual(0, len(backups))
backup_keep = self._create_backup_db_entry()
backup = self._create_backup_db_entry()
db.backup_destroy(self.ctxt, backup.id)
backups = db.backup_get_all_by_host(self.ctxt, 'testhost')
self.assertEqual(1, len(backups))
self.assertEqual(backup_keep.id, backups[0].id)
ctxt_read_deleted = context.get_admin_context('yes')
backups = db.backup_get_all_by_host(ctxt_read_deleted, 'testhost')
self.assertEqual(2, len(backups))
def test_backup_manager_driver_name(self):
"""Test mapping between backup services and backup drivers."""
self.override_config('backup_driver', "cinder.backup.services.swift")
backup_mgr = \
importutils.import_object(CONF.backup_manager)
self.assertEqual('cinder.backup.drivers.swift',
backup_mgr.driver_name)
def test_export_record_with_bad_service(self):
"""Test error handling.
Test error handling when attempting an export of a backup
record with a different service to that used to create the backup.
"""
vol_id = self._create_volume_db_entry(size=1)
service = 'cinder.tests.backup.bad_service'
backup = self._create_backup_db_entry(
status=fields.BackupStatus.AVAILABLE, volume_id=vol_id,
service=service)
self.assertRaises(exception.InvalidBackup,
self.backup_mgr.export_record,
self.ctxt,
backup)
def test_export_record_with_bad_backup_status(self):
"""Test error handling.
Test error handling when exporting a backup record with a backup
with a bad status.
"""
vol_id = self._create_volume_db_entry(status='available',
size=1)
backup = self._create_backup_db_entry(status=fields.BackupStatus.ERROR,
volume_id=vol_id)
self.assertRaises(exception.InvalidBackup,
self.backup_mgr.export_record,
self.ctxt,
backup)
@ddt.data('cinder.tests.unit.backup.fake_service.FakeBackupService',
'cinder.tests.unit.backup.fake_service')
def test_export_record(self, service):
"""Test normal backup record export."""
vol_size = 1
vol_id = self._create_volume_db_entry(status='available',
size=vol_size)
backup = self._create_backup_db_entry(
status=fields.BackupStatus.AVAILABLE, volume_id=vol_id,
service=service)
export = self.backup_mgr.export_record(self.ctxt, backup)
self.assertEqual(service, export['backup_service'])
self.assertIn('backup_url', export)
def test_import_record_with_verify_not_implemented(self):
"""Test normal backup record import.
Test the case when import succeeds for the case that the
driver does not support verify.
"""
vol_size = 1
backup_id = uuid.uuid4()
export = self._create_exported_record_entry(vol_size=vol_size,
exported_id=backup_id)
imported_record = self._create_export_record_db_entry(
backup_id=backup_id)
backup_hosts = []
self.backup_mgr.import_record(self.ctxt,
imported_record,
export['backup_service'],
export['backup_url'],
backup_hosts)
backup = db.backup_get(self.ctxt, imported_record.id)
self.assertEqual(fields.BackupStatus.AVAILABLE, backup['status'])
self.assertEqual(vol_size, backup['size'])
def test_import_record_with_wrong_id(self):
"""Test normal backup record import.
Test the case when import succeeds for the case that the
driver does not support verify.
"""
vol_size = 1
export = self._create_exported_record_entry(vol_size=vol_size)
imported_record = self._create_export_record_db_entry()
backup_hosts = []
self.assertRaises(exception.InvalidBackup,
self.backup_mgr.import_record,
self.ctxt,
imported_record,
export['backup_service'],
export['backup_url'],
backup_hosts)
def test_import_record_with_bad_service(self):
"""Test error handling.
Test error handling when attempting an import of a backup
record with a different service to that used to create the backup.
"""
export = self._create_exported_record_entry()
export['backup_service'] = 'cinder.tests.unit.backup.bad_service'
imported_record = self._create_export_record_db_entry()
# Test the case where the additional hosts list is empty
backup_hosts = []
self.assertRaises(exception.ServiceNotFound,
self.backup_mgr.import_record,
self.ctxt,
imported_record,
export['backup_service'],
export['backup_url'],
backup_hosts)
# Test that the import backup keeps calling other hosts to find a
# suitable host for the backup service
backup_hosts = ['fake1', 'fake2']
backup_hosts_expect = list(backup_hosts)
BackupAPI_import = 'cinder.backup.rpcapi.BackupAPI.import_record'
with mock.patch(BackupAPI_import) as _mock_backup_import:
self.backup_mgr.import_record(self.ctxt,
imported_record,
export['backup_service'],
export['backup_url'],
backup_hosts)
next_host = backup_hosts_expect.pop()
_mock_backup_import.assert_called_once_with(
self.ctxt,
next_host,
imported_record,
export['backup_service'],
export['backup_url'],
backup_hosts_expect)
def test_import_record_with_invalid_backup(self):
"""Test error handling.
Test error handling when attempting an import of a backup
record where the backup driver returns an exception.
"""
export = self._create_exported_record_entry()
backup_driver = self.backup_mgr.get_backup_driver(self.ctxt)
_mock_record_import_class = ('%s.%s.%s' %
(backup_driver.__module__,
backup_driver.__class__.__name__,
'import_record'))
imported_record = self._create_export_record_db_entry()
backup_hosts = []
with mock.patch(_mock_record_import_class) as _mock_record_import:
_mock_record_import.side_effect = FakeBackupException('fake')
self.assertRaises(exception.InvalidBackup,
self.backup_mgr.import_record,
self.ctxt,
imported_record,
export['backup_service'],
export['backup_url'],
backup_hosts)
self.assertTrue(_mock_record_import.called)
backup = db.backup_get(self.ctxt, imported_record.id)
self.assertEqual(fields.BackupStatus.ERROR, backup['status'])
def test_not_supported_driver_to_force_delete(self):
"""Test force delete check method for not supported drivers."""
self.override_config('backup_driver', 'cinder.backup.drivers.ceph')
self.backup_mgr = importutils.import_object(CONF.backup_manager)
result = self.backup_mgr.check_support_to_force_delete(self.ctxt)
self.assertFalse(result)
@mock.patch('cinder.backup.drivers.nfs.NFSBackupDriver.'
'_init_backup_repo_path', return_value=None)
@mock.patch('cinder.backup.drivers.nfs.NFSBackupDriver.'
'check_for_setup_error', return_value=None)
def test_check_support_to_force_delete(self, mock_check_configuration,
mock_init_backup_repo_path):
"""Test force delete check method for supported drivers."""
self.override_config('backup_driver', 'cinder.backup.drivers.nfs')
self.backup_mgr = importutils.import_object(CONF.backup_manager)
result = self.backup_mgr.check_support_to_force_delete(self.ctxt)
self.assertTrue(result)
def test_backup_has_dependent_backups(self):
"""Test backup has dependent backups.
Test the query of has_dependent_backups in backup object is correct.
"""
vol_size = 1
vol_id = self._create_volume_db_entry(size=vol_size)
backup = self._create_backup_db_entry(volume_id=vol_id)
self.assertFalse(backup.has_dependent_backups)
class BackupTestCaseWithVerify(BaseBackupTest):
"""Test Case for backups."""
def setUp(self):
self.override_config(
"backup_driver",
"cinder.tests.unit.backup.fake_service_with_verify")
super(BackupTestCaseWithVerify, self).setUp()
def test_import_record_with_verify(self):
"""Test normal backup record import.
Test the case when import succeeds for the case that the
driver implements verify.
"""
vol_size = 1
backup_id = uuid.uuid4()
export = self._create_exported_record_entry(
vol_size=vol_size, exported_id=backup_id)
imported_record = self._create_export_record_db_entry(
backup_id=backup_id)
backup_hosts = []
backup_driver = self.backup_mgr.get_backup_driver(self.ctxt)
_mock_backup_verify_class = ('%s.%s.%s' %
(backup_driver.__module__,
backup_driver.__class__.__name__,
'verify'))
def mock_verify(backup_id):
backup = db.backup_get(self.ctxt, backup_id)
self.assertEqual(fields.BackupStatus.CREATING, backup['status'])
with mock.patch(_mock_backup_verify_class) as mock_backup_verify:
mock_backup_verify.side_effect = mock_verify
self.backup_mgr.import_record(self.ctxt,
imported_record,
export['backup_service'],
export['backup_url'],
backup_hosts)
backup = db.backup_get(self.ctxt, imported_record.id)
self.assertEqual(fields.BackupStatus.AVAILABLE, backup['status'])
self.assertEqual(vol_size, backup['size'])
def test_import_record_with_verify_invalid_backup(self):
"""Test error handling.
Test error handling when attempting an import of a backup
record where the backup driver returns an exception.
"""
vol_size = 1
backup_id = uuid.uuid4()
export = self._create_exported_record_entry(
vol_size=vol_size, exported_id=backup_id)
imported_record = self._create_export_record_db_entry(
backup_id=backup_id)
backup_hosts = []
backup_driver = self.backup_mgr.get_backup_driver(self.ctxt)
_mock_backup_verify_class = ('%s.%s.%s' %
(backup_driver.__module__,
backup_driver.__class__.__name__,
'verify'))
with mock.patch(_mock_backup_verify_class) as _mock_record_verify:
_mock_record_verify.side_effect = \
exception.InvalidBackup(reason='fake')
self.assertRaises(exception.InvalidBackup,
self.backup_mgr.import_record,
self.ctxt,
imported_record,
export['backup_service'],
export['backup_url'],
backup_hosts)
self.assertTrue(_mock_record_verify.called)
backup = db.backup_get(self.ctxt, imported_record.id)
self.assertEqual(fields.BackupStatus.ERROR, backup['status'])
@mock.patch.object(manager.BackupManager,
'_cleanup_temp_volumes_snapshots_for_one_backup')
def test_backup_reset_status_from_nonrestoring_to_available(
self, mock_clean_temp):
vol_id = self._create_volume_db_entry(status='available',
size=1)
backup = self._create_backup_db_entry(status=fields.BackupStatus.ERROR,
volume_id=vol_id)
with mock.patch.object(manager.BackupManager,
'_map_service_to_driver') as \
mock_map_service_to_driver:
# It should works when the service name is a string
backup_driver = 'cinder.tests.unit.backup.fake_service_with_verify'
mock_map_service_to_driver.return_value = backup_driver
self.backup_mgr.reset_status(self.ctxt,
backup,
fields.BackupStatus.AVAILABLE)
mock_clean_temp.assert_called_once_with(self.ctxt, backup)
new_backup = db.backup_get(self.ctxt, backup.id)
self.assertEqual(fields.BackupStatus.AVAILABLE,
new_backup['status'])
mock_map_service_to_driver.return_value = backup_driver
self.backup_mgr.reset_status(self.ctxt,
backup,
fields.BackupStatus.ERROR)
mock_clean_temp.reset_mock()
self.backup_mgr.reset_status(self.ctxt,
backup,
fields.BackupStatus.AVAILABLE)
mock_clean_temp.assert_called_once_with(self.ctxt, backup)
backup = db.backup_get(self.ctxt, backup.id)
self.assertEqual(fields.BackupStatus.AVAILABLE, backup['status'])
def test_backup_reset_status_to_available_invalid_backup(self):
volume = db.volume_create(self.ctxt, {'status': 'available',
'host': 'test',
'provider_location': '',
'size': 1})
backup = self._create_backup_db_entry(status=fields.BackupStatus.ERROR,
volume_id=volume['id'])
backup_driver = self.backup_mgr.get_backup_driver(self.ctxt)
_mock_backup_verify_class = ('%s.%s.%s' %
(backup_driver.__module__,
backup_driver.__class__.__name__,
'verify'))
with mock.patch(_mock_backup_verify_class) as \
_mock_record_verify:
_mock_record_verify.side_effect = \
exception.BackupVerifyUnsupportedDriver(reason='fake')
self.assertRaises(exception.BackupVerifyUnsupportedDriver,
self.backup_mgr.reset_status,
self.ctxt,
backup,
fields.BackupStatus.AVAILABLE)
backup = db.backup_get(self.ctxt, backup.id)
self.assertEqual(fields.BackupStatus.ERROR, backup['status'])
@mock.patch.object(manager.BackupManager,
'_cleanup_temp_volumes_snapshots_for_one_backup')
def test_backup_reset_status_from_restoring_to_available(
self, mock_clean_temp):
volume = db.volume_create(self.ctxt,
{'status': 'available',
'host': 'test',
'provider_location': '',
'size': 1})
backup = self._create_backup_db_entry(
status=fields.BackupStatus.RESTORING,
volume_id=volume['id'])
self.backup_mgr.reset_status(self.ctxt, backup,
fields.BackupStatus.AVAILABLE)
mock_clean_temp.assert_called_once_with(self.ctxt, backup)
backup = db.backup_get(self.ctxt, backup.id)
self.assertEqual(fields.BackupStatus.AVAILABLE, backup['status'])
@mock.patch.object(manager.BackupManager,
'_cleanup_temp_volumes_snapshots_for_one_backup')
def test_backup_reset_status_to_error(self, mock_clean_temp):
volume = db.volume_create(self.ctxt,
{'status': 'available',
'host': 'test',
'provider_location': '',
'size': 1})
backup = self._create_backup_db_entry(
status=fields.BackupStatus.CREATING,
volume_id=volume['id'])
self.backup_mgr.reset_status(self.ctxt, backup,
fields.BackupStatus.ERROR)
mock_clean_temp.assert_called_once_with(self.ctxt, backup)
backup = db.backup_get(self.ctxt, backup['id'])
self.assertEqual(fields.BackupStatus.ERROR, backup['status'])
@ddt.ddt
class BackupAPITestCase(BaseBackupTest):
def setUp(self):
super(BackupAPITestCase, self).setUp()
self.api = api.API()
def test_get_all_wrong_all_tenants_value(self):
self.assertRaises(exception.InvalidParameterValue,
self.api.get_all, self.ctxt, {'all_tenants': 'bad'})
@mock.patch.object(objects, 'BackupList')
def test_get_all_no_all_tenants_value(self, mock_backuplist):
result = self.api.get_all(self.ctxt, {'key': 'value'})
self.assertFalse(mock_backuplist.get_all.called)
self.assertEqual(mock_backuplist.get_all_by_project.return_value,
result)
mock_backuplist.get_all_by_project.assert_called_once_with(
self.ctxt, self.ctxt.project_id, {'key': 'value'}, None, None,
None, None, None)
@mock.patch.object(objects, 'BackupList')
@ddt.data(False, 'false', '0', 0, 'no')
def test_get_all_false_value_all_tenants(
self, false_value, mock_backuplist):
result = self.api.get_all(self.ctxt, {'all_tenants': false_value,
'key': 'value'})
self.assertFalse(mock_backuplist.get_all.called)
self.assertEqual(mock_backuplist.get_all_by_project.return_value,
result)
mock_backuplist.get_all_by_project.assert_called_once_with(
self.ctxt, self.ctxt.project_id, {'key': 'value'}, None, None,
None, None, None)
@mock.patch.object(objects, 'BackupList')
@ddt.data(True, 'true', '1', 1, 'yes')
def test_get_all_true_value_all_tenants(
self, true_value, mock_backuplist):
result = self.api.get_all(self.ctxt, {'all_tenants': true_value,
'key': 'value'})
self.assertFalse(mock_backuplist.get_all_by_project.called)
self.assertEqual(mock_backuplist.get_all.return_value,
result)
mock_backuplist.get_all.assert_called_once_with(
self.ctxt, {'key': 'value'}, None, None, None, None, None)
@mock.patch.object(objects, 'BackupList')
def test_get_all_true_value_all_tenants_non_admin(self, mock_backuplist):
ctxt = context.RequestContext(uuid.uuid4(), uuid.uuid4())
result = self.api.get_all(ctxt, {'all_tenants': '1',
'key': 'value'})
self.assertFalse(mock_backuplist.get_all.called)
self.assertEqual(mock_backuplist.get_all_by_project.return_value,
result)
mock_backuplist.get_all_by_project.assert_called_once_with(
ctxt, ctxt.project_id, {'key': 'value'}, None, None, None, None,
None)
@mock.patch.object(api.API, '_get_available_backup_service_host',
return_value='fake_host')
@mock.patch.object(db, 'backup_create',
side_effect=db_exc.DBError())
def test_create_when_failed_to_create_backup_object(
self, mock_create,
mock_get_service):
# Create volume in admin context
volume_id = utils.create_volume(self.ctxt)['id']
# Will try to backup from a different context
new_context = copy.copy(self.ctxt)
new_context.user_id = uuid.uuid4()
new_context.project_id = uuid.uuid4()
# The opposite side of this test case is a "NotImplementedError:
# Cannot load 'id' in the base class" being raised.
# More detailed, in the try clause, if backup.create() failed
# with DB exception, backup.id won't be assigned. However,
# in the except clause, backup.destroy() is invoked to do cleanup,
# which internally tries to access backup.id.
self.assertRaises(db_exc.DBError, self.api.create,
context=new_context,
name="test_backup",
description="test backup description",
volume_id=volume_id,
container='volumebackups')
@mock.patch.object(api.API, '_get_available_backup_service_host',
return_value='fake_host')
@mock.patch.object(objects.Backup, '__init__',
side_effect=exception.InvalidInput(
reason='Failed to new'))
def test_create_when_failed_to_new_backup_object(self, mock_new,
mock_get_service):
volume_id = utils.create_volume(self.ctxt)['id']
# The opposite side of this test case is that a "UnboundLocalError:
# local variable 'backup' referenced before assignment" is raised.
# More detailed, in the try clause, backup = objects.Backup(...)
# raises exception, so 'backup' is not assigned. But in the except
# clause, 'backup' is referenced to invoke cleanup methods.
self.assertRaises(exception.InvalidInput, self.api.create,
context=self.ctxt,
name="test_backup",
description="test backup description",
volume_id=volume_id,
container='volumebackups')
@mock.patch('cinder.backup.rpcapi.BackupAPI.create_backup')
@mock.patch('cinder.backup.api.API._is_backup_service_enabled')
def test_create_backup_in_same_host(self, mock_is_enable,
mock_create):
self.override_config('backup_use_same_host', True)
mock_is_enable.return_value = True
self.ctxt.user_id = 'fake_user'
self.ctxt.project_id = 'fake_project'
volume_id = self._create_volume_db_entry(status='available',
host='testhost#lvm',
size=1)
backup = self.api.create(self.ctxt, None, None, volume_id, None)
self.assertEqual('testhost', backup.host)
@mock.patch.object(api.API, '_get_available_backup_service_host',
return_value='fake_host')
@mock.patch('cinder.backup.rpcapi.BackupAPI.create_backup')
def test_create_backup_from_snapshot_with_volume_in_use(
self, mock_create, mock_get_service):
self.ctxt.user_id = 'fake_user'
self.ctxt.project_id = 'fake_project'
volume_id = self._create_volume_db_entry(status='in-use')
snapshot = self._create_snapshot_db_entry(volume_id=volume_id)
backup = self.api.create(self.ctxt, None, None, volume_id, None,
snapshot_id=snapshot.id)
self.assertEqual(fields.BackupStatus.CREATING, backup.status)
volume = objects.Volume.get_by_id(self.ctxt, volume_id)
snapshot = objects.Snapshot.get_by_id(self.ctxt, snapshot.id)
self.assertEqual(fields.SnapshotStatus.BACKING_UP, snapshot.status)
self.assertEqual('in-use', volume.status)
@mock.patch.object(api.API, '_get_available_backup_service_host',
return_value='fake_host')
@mock.patch('cinder.backup.rpcapi.BackupAPI.create_backup')
@ddt.data(True, False)
def test_create_backup_resource_status(self, is_snapshot, mock_create,
mock_get_service):
self.ctxt.user_id = 'fake_user'
self.ctxt.project_id = 'fake_project'
volume_id = self._create_volume_db_entry(status='available')
snapshot = self._create_snapshot_db_entry(volume_id=volume_id)
if is_snapshot:
self.api.create(self.ctxt, None, None, volume_id, None,
snapshot_id=snapshot.id)
volume = objects.Volume.get_by_id(self.ctxt, volume_id)
snapshot = objects.Snapshot.get_by_id(self.ctxt, snapshot.id)
self.assertEqual('backing-up', snapshot.status)
self.assertEqual('available', volume.status)
else:
self.api.create(self.ctxt, None, None, volume_id, None)
volume = objects.Volume.get_by_id(self.ctxt, volume_id)
snapshot = objects.Snapshot.get_by_id(self.ctxt, snapshot.id)
self.assertEqual('available', snapshot.status)
self.assertEqual('backing-up', volume.status)
@mock.patch('cinder.backup.api.API._get_available_backup_service_host')
@mock.patch('cinder.backup.rpcapi.BackupAPI.restore_backup')
def test_restore_volume(self,
mock_rpcapi_restore,
mock_get_backup_host):
volume_id = self._create_volume_db_entry(status='available',
size=1)
backup = self._create_backup_db_entry(size=1,
status='available')
mock_get_backup_host.return_value = 'testhost'
self.api.restore(self.ctxt, backup.id, volume_id)
backup = objects.Backup.get_by_id(self.ctxt, backup.id)
self.assertEqual(volume_id, backup.restore_volume_id)
|
#! /usr/bin/env python3
'''
CBC Radio streams player/downloader
'''
from datetime import datetime
from argparse import ArgumentParser, OPTIONAL
from collections import namedtuple
import subprocess
import readline
import requests
from lxml import html
_STREAM_SNAPSHOT = [
("Radio One", "BC", "Kamloops",
"http://cbc_r1_kam.akacast.akamaistream.net/7/440/451661/v1/rc.akacast.akamaistream.net/cbc_r1_kam"),
("Radio One", "BC", "Kelowna",
"http://cbc_r1_kel.akacast.akamaistream.net/7/229/451661/v1/rc.akacast.akamaistream.net/cbc_r1_kel"),
("Radio One", "BC", "Prince George",
"http://cbc_r1_prg.akacast.akamaistream.net/7/966/451661/v1/rc.akacast.akamaistream.net/cbc_r1_prg"),
("Radio One", "BC", "Vancouver",
"http://cbc_r1_vcr.akacast.akamaistream.net/7/723/451661/v1/rc.akacast.akamaistream.net/cbc_r1_vcr"),
("Radio One", "BC", "Victoria",
"http://cbc_r1_vic.akacast.akamaistream.net/7/728/451661/v1/rc.akacast.akamaistream.net/cbc_r1_vic"),
("Radio One", "Yukon", "Whitehorse",
"http://cbc_r1_whs.akacast.akamaistream.net/7/319/451661/v1/rc.akacast.akamaistream.net/cbc_r1_whs"),
("Radio One", "Alberta", "Calgary",
"http://cbc_r1_cgy.akacast.akamaistream.net/7/298/451661/v1/rc.akacast.akamaistream.net/cbc_r1_cgy"),
("Radio One", "Alberta", "Edmonton",
"http://cbc_r1_edm.akacast.akamaistream.net/7/904/451661/v1/rc.akacast.akamaistream.net/cbc_r1_edm"),
("Radio One", "Saskatchewan", "Regina",
"http://cbc_r1_reg.akacast.akamaistream.net/7/666/451661/v1/rc.akacast.akamaistream.net/cbc_r1_reg"),
("Radio One", "Saskatchewan", "Saskatoon",
"http://cbc_r1_ssk.akacast.akamaistream.net/7/842/451661/v1/rc.akacast.akamaistream.net/cbc_r1_ssk"),
("Radio One", "Manitoba", "Winnipeg",
"http://cbc_r1_wpg.akacast.akamaistream.net/7/831/451661/v1/rc.akacast.akamaistream.net/cbc_r1_wpg"),
("Radio One", "Nunavut", "Iqaluit",
"http://cbc_r1_iqa.akacast.akamaistream.net/7/325/451661/v1/rc.akacast.akamaistream.net/cbc_r1_iqa"),
("Radio One", "Ontario", "Kitchener-Waterloo",
"http://cbc_r1_ekw.akacast.akamaistream.net/7/63/451661/v1/rc.akacast.akamaistream.net/cbc_r1_ekw"),
("Radio One", "Ontario", "London",
"http://cbc_r1_ldn.akacast.akamaistream.net/7/104/451661/v1/rc.akacast.akamaistream.net/cbc_r1_ldn"),
("Radio One", "Ontario", "Ottawa",
"http://cbc_r1_ott.akacast.akamaistream.net/7/613/451661/v1/rc.akacast.akamaistream.net/cbc_r1_ott"),
("Radio One", "Ontario", "Sudbury",
"http://cbc_r1_sud.akacast.akamaistream.net/7/380/451661/v1/rc.akacast.akamaistream.net/cbc_r1_sud"),
("Radio One", "Ontario", "Thunder Bay",
"http://cbc_r1_tba.akacast.akamaistream.net/7/245/451661/v1/rc.akacast.akamaistream.net/cbc_r1_tba"),
("Radio One", "Ontario", "Toronto",
"http://cbc_r1_tor.akacast.akamaistream.net/7/632/451661/v1/rc.akacast.akamaistream.net/cbc_r1_tor"),
("Radio One", "Ontario", "Windsor",
"http://cbc_r1_wdr.akacast.akamaistream.net/7/813/451661/v1/rc.akacast.akamaistream.net/cbc_r1_wdr"),
("Radio One", "Quebec", "Montreal",
"http://cbc_r1_mtl.akacast.akamaistream.net/7/35/451661/v1/rc.akacast.akamaistream.net/cbc_r1_mtl"),
("Radio One", "Quebec", "Nord Quebec",
"http://cbc_r1_n_mtl.akacast.akamaistream.net/7/823/451661/v1/rc.akacast.akamaistream.net/cbc_r1_n_mtl"),
("Radio One", "Quebec", "Quebec City",
"http://cbc_r1_qqu.akacast.akamaistream.net/7/29/451661/v1/rc.akacast.akamaistream.net/cbc_r1_qqu"),
("Radio One", "New Brunswick", "Fredericton",
"http://cbc_r1_frd.akacast.akamaistream.net/7/553/451661/v1/rc.akacast.akamaistream.net/cbc_r1_frd"),
("Radio One", "New Brunswick", "Moncton",
"http://cbc_r1_mct.akacast.akamaistream.net/7/383/451661/v1/rc.akacast.akamaistream.net/cbc_r1_mct"),
("Radio One", "New Brunswick", "Saint John",
"http://cbc_r1_snb.akacast.akamaistream.net/7/754/451661/v1/rc.akacast.akamaistream.net/cbc_r1_snb"),
("Radio One", "Prince Edward Island", "Charlottetown",
"http://cbc_r1_chr.akacast.akamaistream.net/7/169/451661/v1/rc.akacast.akamaistream.net/cbc_r1_chr"),
("Radio One", "Nova Scotia", "Cape Breton",
"http://cbc_r1_syd.akacast.akamaistream.net/7/897/451661/v1/rc.akacast.akamaistream.net/cbc_r1_syd"),
("Radio One", "Nova Scotia", "Halifax",
"http://cbc_r1_hfx.akacast.akamaistream.net/7/981/451661/v1/rc.akacast.akamaistream.net/cbc_r1_hfx"),
("Radio One", "Newfoundland & Labrador", "Corner Brook",
"http://cbc_r2_cor.akacast.akamaistream.net/7/550/451661/v1/rc.akacast.akamaistream.net/cbc_r1_cor"),
("Radio One", "Newfoundland & Labrador", "Grand Falls/Gander",
"http://cbc_r1_gfa.akacast.akamaistream.net/7/492/451661/v1/rc.akacast.akamaistream.net/cbc_r1_gfa"),
("Radio One", "Newfoundland & Labrador", "Labrador",
"http://cbc_r1_gba.akacast.akamaistream.net/7/274/451661/v1/rc.akacast.akamaistream.net/cbc_r1_gba"),
("Radio One", "Newfoundland & Labrador", "St. John's",
"http://cbc_r1_snf.akacast.akamaistream.net/7/750/451661/v1/rc.akacast.akamaistream.net/cbc_r1_snf"),
("Radio One", "Northwest Territories", "Inuvik",
"http://cbc_r1_ink.akacast.akamaistream.net/7/967/451661/v1/rc.akacast.akamaistream.net/cbc_r1_ink"),
("Radio One", "Northwest Territories", "Yellowknife",
"http://cbc_r1_ykn.akacast.akamaistream.net/7/369/451661/v1/rc.akacast.akamaistream.net/cbc_r1_ykn"),
("Radio Two", "Atlantic", "Halifax",
"http://cbc_r2_hfx.akacast.akamaistream.net/7/917/451661/v1/rc.akacast.akamaistream.net/cbc_r2_hfx"),
("Radio Two", "Eastern", "Toronto",
"http://cbc_r2_tor.akacast.akamaistream.net/7/364/451661/v1/rc.akacast.akamaistream.net/cbc_r2_tor"),
("Radio Two", "Central", "Winnipeg",
"http://cbc_r2_wpg.akacast.akamaistream.net/7/233/451661/v1/rc.akacast.akamaistream.net/cbc_r2_wpg"),
("Radio Two", "Mountain", "Edmonton",
"http://cbc_r2_edm.akacast.akamaistream.net/7/40/451661/v1/rc.akacast.akamaistream.net/cbc_r2_edm"),
("Radio Two", "Pacific", "Vancouver",
"http://cbc_r2_vcr.akacast.akamaistream.net/7/773/451661/v1/rc.akacast.akamaistream.net/cbc_r2_vcr"),
("Radio Two", "International", "Pacific",
"http://cbc_r2_ipt.akacast.akamaistream.net/7/669/451661/v1/rc.akacast.akamaistream.net/cbc_r2_ipt"),
("Radio Two", "International", "Eastern",
"http://cbc_r2_iet.akacast.akamaistream.net/7/50/451661/v1/rc.akacast.akamaistream.net/cbc_r2_iet"),
]
# CBC Music stream list page
_STREAMS = 'http://www.cbc.ca/radio/includes/streams.html'
# CBC Radio 2 Eastern (Toronto) stream URL
CBC_RADIO_2 = 'http://cbc_r2_tor.akacast.akamaistream.net' \
'/7/364/451661/v1/rc.akacast.akamaistream.net/cbc_r2_tor'
# CBC Radio 1 Ottawa stream URL
CBC_RADIO_1 = 'http://cbc_r1_ott.akacast.akamaistream.net' \
'/7/613/451661/v1/rc.akacast.akamaistream.net/cbc_r1_ott'
argument_parser = ArgumentParser(__doc__)
argument_parser.add_argument('-l', '--list', action='store_true')
argument_parser.add_argument('-t', '--tee', action='store_true')
mutex_group = argument_parser.add_mutually_exclusive_group(required=False)
# Yuck, wish it was multiple arguments,
# but argparse doesn't support anything but OPTIONAL.
mutex_group.add_argument('stream', nargs=OPTIONAL, type=str.split,
help='Name of stream to play/record')
mutex_group.add_argument('-1', '--one', action='store_const', const=CBC_RADIO_1,
dest='url', help='CBC Radio One Eastern')
mutex_group.add_argument('-2', '--two', action='store_const', const=CBC_RADIO_2,
dest='url', help='CBC Radio Two Eastern')
PlaylistItem = namedtuple('PlaylistItem', ['radio', 'province', 'city', 'url'])
_COMPLETION_INDEX = {' '.join((radio, region, city)): url
for radio, region, city, url
in _STREAM_SNAPSHOT}
def get_streams():
'''
Get CBC Radio music streams as {name: stream_url}.
'''
r = requests.get(_STREAMS)
r.raise_for_status()
h = html.fromstring(r.content, base_url=r.url) # noqa
radio_one, radio_two = h.cssselect('table')
for row in radio_one.cssselect('tbody td'):
raise NotImplementedError()
for row in radio_two.cssselect('tbody td'):
raise NotImplementedError()
class Completer:
def __init__(self, streams):
self.streams = streams
self.previous_prefix = None
def complete(self, text, state):
if text != self.previous_prefix:
#print('!' * 200)
self.completions = [stream
for stream
in self.streams
if readline.get_line_buffer().strip() in stream]
self.previous_prefix = text
try:
return self.completions[state]
except IndexError:
return None
def mpv_cmdline(input_url):
'''
Return an mpv command-line to play BUT NOT record input_url.
'''
return ['mpv', '--vo=null', input_url]
def ffmpeg_cmdline(input_url, tee):
'''
Return a ffmpeg command to play and maybe record input_url.
:param tee: if True, also save to disk.
'''
return ['ffmpeg',
'-hide_banner',
'-nostdin',
'-i', f'async:{input_url}',
*([] if not tee else
['-f', 'mpegts',
'-c', 'copy',
f'''./{datetime.now()
.replace(microsecond=0)
.isoformat()}.m2ts''']),
'-f', 'alsa',
'default']
def play(input_url, tee=False):
'''
Play input_url, optionally also saving to disk.
'''
subprocess.check_call(ffmpeg_cmdline(input_url, tee=tee))
def print_streams(streams):
'''
Pretty print streams.
'''
print(*sorted(streams), sep='\n')
def autocomplete(streams):
'''
List choices, and prompt with autocompletion one item from streams.
'''
print_streams(streams)
# readline API doesn't make this undoable
readline.parse_and_bind('tab: complete')
try:
old_delims = readline.get_completer_delims()
readline.set_completer_delims('')
try:
old_completer = readline.get_completer()
readline.set_completer(Completer(streams).complete)
return streams[input('Playlist: ')]
finally:
readline.set_completer(old_completer)
finally:
readline.set_completer_delims(old_delims)
if __name__ == '__main__':
from sys import exit
args = argument_parser.parse_args()
#streams = get_streams()
streams = _COMPLETION_INDEX
if args.list:
print_streams(streams)
exit()
if args.url is not None:
stream_url = args.url
elif args.stream is None:
try:
stream_url = autocomplete(streams)
except (KeyboardInterrupt, EOFError):
exit(1)
else:
matches = {stream: url
for stream, url
in streams.items()
if all(map(stream.__contains__,
args.stream))}
if not matches:
exit(f'Not a valid stream: {" ".join(args.stream)}')
elif len(matches) > 1:
try:
stream_url = autocomplete(matches)
except (KeyboardInterrupt, EOFError):
exit(1)
else:
stream_url = next(iter(matches.values()))
play(stream_url, tee=args.tee)
|
# -*- coding: utf-8 -*-
"""
tests.unit.cloud
~~~~~~~~~~~~~~~~
"""
from __future__ import absolute_import, print_function, unicode_literals
import salt.cloud
from tests.support.unit import TestCase
class CloudTest(TestCase):
def test_vm_config_merger(self):
"""
Validate the vm's config is generated correctly.
https://github.com/saltstack/salt/issues/49226
"""
main = {
"minion": {"master": "172.31.39.213"},
"log_file": "var/log/salt/cloud.log",
"pool_size": 10,
}
provider = {
"private_key": "dwoz.pem",
"grains": {"foo1": "bar", "foo2": "bang"},
"availability_zone": "us-west-2b",
"driver": "ec2",
"ssh_interface": "private_ips",
"ssh_username": "admin",
"location": "us-west-2",
}
profile = {
"profile": "default",
"grains": {"meh2": "bar", "meh1": "foo"},
"provider": "ec2-default:ec2",
"ssh_username": "admin",
"image": "ami-0a1fbca0e5b419fd1",
"size": "t2.micro",
}
vm = salt.cloud.Cloud.vm_config("test_vm", main, provider, profile, {})
self.assertEqual(
{
"minion": {"master": "172.31.39.213"},
"log_file": "var/log/salt/cloud.log",
"pool_size": 10,
"private_key": "dwoz.pem",
"grains": {
"foo1": "bar",
"foo2": "bang",
"meh2": "bar",
"meh1": "foo",
},
"availability_zone": "us-west-2b",
"driver": "ec2",
"ssh_interface": "private_ips",
"ssh_username": "admin",
"location": "us-west-2",
"profile": "default",
"provider": "ec2-default:ec2",
"image": "ami-0a1fbca0e5b419fd1",
"size": "t2.micro",
"name": "test_vm",
},
vm,
)
|
import pandas as pd
import pingouin as pg
def pw_corr(data_path="data/TMA36_project/Radiomics/processed/rad_healthmyne.csv",
cde_path="data/TMA36_project/CDE/CDE_TMA36_2020FEB25_SA_MF.csv"):
rad_hm = pd.read_csv(data_path, index_col=0)
cde = pd.read_csv(cde_path, index_col=1)
cde_sila = pd.DataFrame(cde['SILA'])
rad_hm_sila = pd.merge(rad_hm, cde_sila, how='left', left_index=True, right_index=True)
pairwise = rad_hm_sila.pairwise_corr(method='spearman',padjust='holm', columns=['SILA'])
pairwise_sig = pairwise[pairwise['p-corr']<0.05]
return pairwise_sig
|
import math
import numpy as np
from skimage.morphology.convex_hull import convex_hull_image
from scipy.ndimage.morphology import binary_dilation
def check_grasp_margin(target_mask_heightmap, depth_heightmap):
margin_mask = binary_dilation(target_mask_heightmap, iterations=10).astype(np.float32)-target_mask_heightmap
margin_depth = margin_mask * depth_heightmap
margin_depth[np.isnan(margin_depth)] = 0
margin_depth[margin_depth > 0.3] = 0
margin_depth[margin_depth < 0.02] = 0
margin_depth[margin_depth > 0] = 1
margin_value = np.sum(margin_depth)
return margin_value/np.sum(margin_mask), margin_value/np.sum(target_mask_heightmap)
def check_push_target_oriented(best_pix_ind, push_end_pix_yx, target_mask_heightmap, mask_count_threshold=5):
mask_hull = convex_hull_image(target_mask_heightmap)
mask_count = 0
x1 = best_pix_ind[2]
y1 = best_pix_ind[1]
x2 = push_end_pix_yx[1]
y2 = push_end_pix_yx[0]
x_range = abs(x2-x1)
y_range = abs(y2-y1)
if x_range > y_range:
k = (y2-y1)/(x2-x1)
b = y1-k*x1
for x in range(min(int(x1), int(x2)), max(int(x1), int(x2))+1):
y = int(k*x+b)
try:
mask_count += mask_hull[y, x]
except IndexError:
pass
else:
k = (x2-x1)/(y2-y1)
b = x1-k*y1
for y in range(min(int(y1), int(y2)), max(int(y1), int(y2))+1):
x = int(k*y+b)
try:
mask_count += mask_hull[y, x]
except IndexError:
pass
if mask_count > mask_count_threshold:
return True
else:
return False
def check_grasp_target_oriented(best_pix_ind, target_mask_heightmap):
mask_hull = convex_hull_image(target_mask_heightmap)
if mask_hull[int(best_pix_ind[1]), int(best_pix_ind[2])]:
return True
else:
return False
def get_push_pix(push_maps, num_rotations):
push_pix_ind = np.unravel_index(np.argmax(push_maps), push_maps.shape)
push_end_pix_yx = get_push_end_pix_yx(push_pix_ind, num_rotations)
return push_pix_ind, push_end_pix_yx
def get_push_end_pix_yx(push_pix_ind, num_rotations):
push_orientation = [1.0, 0.0]
push_length_pix = 0.1/0.002
rotation_angle = np.deg2rad(push_pix_ind[0]*(360.0/num_rotations))
push_direction = np.asarray([push_orientation[0] * np.cos(rotation_angle) - push_orientation[1] * np.sin(rotation_angle),
push_orientation[0] * np.sin(rotation_angle) + push_orientation[1] * np.cos(rotation_angle)])
return [push_pix_ind[1] + push_direction[1] * push_length_pix, push_pix_ind[2] + push_direction[0] * push_length_pix]
def check_env_depth_change(prev_depth_heightmap, depth_heightmap, change_threshold=300):
depth_diff = abs(prev_depth_heightmap-depth_heightmap)
depth_diff[np.isnan(depth_diff)] = 0
depth_diff[depth_diff > 0.3] = 0
depth_diff[depth_diff < 0.02] = 0
depth_diff[depth_diff > 0] = 1
change_value = np.sum(depth_diff)
change_detected = change_value > change_threshold
return change_detected, change_value
def check_target_depth_change(prev_depth_heightmap, prev_target_mask_heightmap, depth_heightmap, change_threshold=50):
prev_mask_hull = binary_dilation(convex_hull_image(prev_target_mask_heightmap), iterations=5)
depth_diff = prev_mask_hull*(prev_depth_heightmap-depth_heightmap)
depth_diff[np.isnan(depth_diff)] = 0
depth_diff[depth_diff > 0.3] = 0
depth_diff[depth_diff < 0.02] = 0
depth_diff[depth_diff > 0] = 1
change_value = np.sum(depth_diff)
change_detected = change_value > change_threshold
return change_detected, change_value
def process_mask_heightmaps(segment_results, seg_mask_heightmaps):
names = []
heightmaps = []
for i in range(len(segment_results['labels'])):
name = segment_results['labels'][i]
heightmap = seg_mask_heightmaps[:, :, i]
if np.sum(heightmap) > 10:
names.append(name)
heightmaps.append(heightmap)
return {'names': names, 'heightmaps': heightmaps}
def get_replay_id(predicted_value_log, label_value_log, reward_value_log, sample_ind, replay_type):
# Prioritized experience replay, find sample with highest surprise value
sample_ind = np.asarray(sample_ind)
predicted_values = np.asarray(predicted_value_log)[sample_ind]
label_values = np.asarray(label_value_log)[sample_ind]
reward_values = np.asarray(reward_value_log)[sample_ind]
if replay_type == 'augment':
# assume predicted_value for different mask input are close
label_values = label_values - reward_values + 1.0
sample_surprise_values = np.abs(predicted_values - label_values)
sorted_surprise_ind = np.argsort(sample_surprise_values[:, 0])
sorted_sample_ind = sample_ind[sorted_surprise_ind]
pow_law_exp = 2
rand_sample_ind = int(np.round(np.random.power(pow_law_exp, 1) * (sample_ind.size - 1)))
sample_iteration = sorted_sample_ind[rand_sample_ind]
print(replay_type.capitalize(), 'replay: iteration %d (surprise value: %f)' %
(sample_iteration, sample_surprise_values[sorted_surprise_ind[rand_sample_ind]]))
return sample_iteration
def get_pointcloud(color_img, depth_img, masks_imgs, camera_intrinsics):
# Get depth image size
im_h = depth_img.shape[0]
im_w = depth_img.shape[1]
# Project depth into 3D point cloud in camera coordinates
pix_x, pix_y = np.meshgrid(np.linspace(0, im_w-1, im_w), np.linspace(0, im_h-1, im_h))
cam_pts_x = np.multiply(pix_x-camera_intrinsics[0][2],depth_img/camera_intrinsics[0][0])
cam_pts_y = np.multiply(pix_y-camera_intrinsics[1][2],depth_img/camera_intrinsics[1][1])
cam_pts_z = depth_img.copy()
cam_pts_x.shape = (im_h*im_w, 1)
cam_pts_y.shape = (im_h*im_w, 1)
cam_pts_z.shape = (im_h*im_w, 1)
# Reshape image into colors for 3D point cloud
rgb_pts_r = color_img[:, :, 0]
rgb_pts_g = color_img[:, :, 1]
rgb_pts_b = color_img[:, :, 2]
rgb_pts_r.shape = (im_h*im_w, 1)
rgb_pts_g.shape = (im_h*im_w, 1)
rgb_pts_b.shape = (im_h*im_w, 1)
num_masks = masks_imgs.shape[2]
masks_pts = masks_imgs.copy()
masks_pts = masks_pts.transpose(2, 0, 1).reshape(num_masks, -1)
cam_pts = np.concatenate((cam_pts_x, cam_pts_y, cam_pts_z), axis=1)
rgb_pts = np.concatenate((rgb_pts_r, rgb_pts_g, rgb_pts_b), axis=1)
return cam_pts, rgb_pts, masks_pts
def get_heightmap(color_img, depth_img, masks_imgs, cam_intrinsics, cam_pose, workspace_limits, heightmap_resolution):
num_masks = masks_imgs.shape[2]
# Compute heightmap size
heightmap_size = np.round(((workspace_limits[1][1] - workspace_limits[1][0])/heightmap_resolution, (workspace_limits[0][1] - workspace_limits[0][0])/heightmap_resolution)).astype(int)
# Get 3D point cloud from RGB-D images
surface_pts, color_pts, masks_pts = get_pointcloud(color_img, depth_img, masks_imgs, cam_intrinsics)
# Transform 3D point cloud from camera coordinates to robot coordinates
surface_pts = np.transpose(np.dot(cam_pose[0:3,0:3],np.transpose(surface_pts)) + np.tile(cam_pose[0:3,3:],(1,surface_pts.shape[0])))
# Sort surface points by z value
sort_z_ind = np.argsort(surface_pts[:,2])
surface_pts = surface_pts[sort_z_ind]
color_pts = color_pts[sort_z_ind]
masks_pts = masks_pts[:, sort_z_ind]
# Filter out surface points outside heightmap boundaries
heightmap_valid_ind = np.logical_and(np.logical_and(np.logical_and(np.logical_and(surface_pts[:,0] >= workspace_limits[0][0], surface_pts[:,0] < workspace_limits[0][1]), surface_pts[:,1] >= workspace_limits[1][0]), surface_pts[:,1] < workspace_limits[1][1]), surface_pts[:,2] < workspace_limits[2][1])
surface_pts = surface_pts[heightmap_valid_ind]
color_pts = color_pts[heightmap_valid_ind]
masks_pts = masks_pts[:, heightmap_valid_ind]
# Create orthographic top-down-view RGB-D heightmaps
color_heightmap_r = np.zeros((heightmap_size[0], heightmap_size[1], 1), dtype=np.uint8)
color_heightmap_g = np.zeros((heightmap_size[0], heightmap_size[1], 1), dtype=np.uint8)
color_heightmap_b = np.zeros((heightmap_size[0], heightmap_size[1], 1), dtype=np.uint8)
masks_heightmaps = np.zeros((heightmap_size[0], heightmap_size[1], num_masks), dtype=np.uint8)
depth_heightmap = np.zeros(heightmap_size)
heightmap_pix_x = np.floor((surface_pts[:,0] - workspace_limits[0][0])/heightmap_resolution).astype(int)
heightmap_pix_y = np.floor((surface_pts[:,1] - workspace_limits[1][0])/heightmap_resolution).astype(int)
color_heightmap_r[heightmap_pix_y,heightmap_pix_x] = color_pts[:, [0]]
color_heightmap_g[heightmap_pix_y,heightmap_pix_x] = color_pts[:, [1]]
color_heightmap_b[heightmap_pix_y,heightmap_pix_x] = color_pts[:, [2]]
color_heightmap = np.concatenate((color_heightmap_r, color_heightmap_g, color_heightmap_b), axis=2)
for c in range(num_masks):
masks_heightmaps[heightmap_pix_y, heightmap_pix_x, c] = masks_pts[c, :]
depth_heightmap[heightmap_pix_y, heightmap_pix_x] = surface_pts[:, 2]
z_bottom = workspace_limits[2][0]
depth_heightmap = depth_heightmap - z_bottom
depth_heightmap[depth_heightmap < 0] = 0
depth_heightmap[depth_heightmap == -z_bottom] = np.nan
return color_heightmap, depth_heightmap, masks_heightmaps
# Get rotation matrix from euler angles
def euler2rotm(theta):
R_x = np.array([[1, 0, 0],
[0, math.cos(theta[0]), -math.sin(theta[0])],
[0, math.sin(theta[0]), math.cos(theta[0])]])
R_y = np.array([[math.cos(theta[1]), 0, math.sin(theta[1])],
[0, 1, 0],
[-math.sin(theta[1]), 0, math.cos(theta[1])]])
R_z = np.array([[math.cos(theta[2]), -math.sin(theta[2]), 0],
[math.sin(theta[2]), math.cos(theta[2]), 0],
[0, 0, 1]])
R = np.dot(R_z, np.dot(R_y, R_x))
return R
|
nome = input('Digite seu nome: ')
print(f'Seja bem vindo {nome}!')
|
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
import msal
from typing import Optional, TYPE_CHECKING
from cdm.enums import EnvironmentType, AzureCloudEndpoint
if TYPE_CHECKING:
from cdm.utilities.network.token_provider import TokenProvider
class TelemetryConfig:
"""
Configuration information to establish a connection with the database for telemetry collection.
"""
# Default Kusto database log table names
CDM_INFOLOG_TABLE = 'infoLogs'
CDM_WARNINGLOG_TABLE = 'warningLogs'
CDM_ERRORLOG_TABLE = 'errorLogs'
def __init__(self, ingest_at_level: 'EnvironmentType', region: Optional[str] = None, **kwargs) -> None:
self.ingest_at_level = ingest_at_level # type: EnvironmentType
self.region = region # type: Optional[str]
self.tenant_id = kwargs.get('tenant_id', None) # type: Optional[str]
self.client_id = kwargs.get('client_id', None) # type: Optional[str]
self.secret = kwargs.get('secret', None) # type: Optional[str]
self.remove_user_content = kwargs.get('remove_user_content', True) # type: Optional[bool]
self.kusto_cluster_name = kwargs.get('cluster_name', None) # type: Optional[str]
self.kusto_database_name = kwargs.get('database_name', None) # type: Optional[str]
self.kusto_info_log_table = kwargs.get('info_table', self.CDM_INFOLOG_TABLE) # type: str
self.kusto_warning_log_table = kwargs.get('warning_table', self.CDM_WARNINGLOG_TABLE) # type: str
self.kusto_error_log_table = kwargs.get('error_table', self.CDM_ERRORLOG_TABLE) # type: str
self.cloud_instance = kwargs.get('cloud_instance', AzureCloudEndpoint.AZURE_PUBLIC) # type: AzureCloudEndpoint
self.token_provider = kwargs.get('token_provider', None) # type: Optional[TokenProvider]
# --- internal ---
self._context = None
def _get_authentication_token(self) -> Optional[str]:
"""
Get the authentication token either using AAD App credentials or user-defined token provider.
"""
# User-defined token provider
if self.token_provider:
return self.token_provider.get_token()
# Get token by supplying AAD App credentials
elif self.tenant_id and self.client_id and self.secret \
and self.kusto_cluster_name and self.kusto_database_name \
and self.kusto_info_log_table and self.kusto_warning_log_table and self.kusto_error_log_table:
result = self._generate_kusto_token()
return result['token_type'] + ' ' + result['access_token']
# Throw an exception if neither method is configured
else:
raise Exception('Failed to get authentication token: No method configured to provide a token.')
def _build_context(self):
"""
Build context when users make the first call.
Need to ensure client Id, tenant and secret are not null.
"""
if self._context is None:
self._context = msal.ConfidentialClientApplication(
self.client_id,
authority=self.cloud_instance.value + self.tenant_id,
client_credential=self.secret)
def _generate_kusto_token(self) -> Optional[dict]:
"""
Generate a Bearer token for accessing the Kusto resource using MSAL.
"""
# Define the resource scope to be the current Kusto cluster
scope = ['https://{0}.kusto.windows.net/.default'.format(self.kusto_cluster_name)]
# Authenticate with AAD App credentials
self._build_context()
# Acquire token using MSAL
result = self._context.acquire_token_for_client(scopes=scope)
if result and 'error' in result:
error_description = result['error']
if 'error_description' in result:
error_description += ' error_description: ' + result['error_description']
raise Exception('There was an error while acquiring Kusto authorization Token with client ID/secret authentication. '
'Exception: ' + error_description)
if result is None or 'access_token' not in result or 'token_type' not in result:
raise Exception('Received invalid Kusto authentication result. '
'The result may be None, or missing access_toke and/or token_type authorization header from the authentication result.')
return result
|
#!/usr/bin/env python
####################
# Required Modules #
####################
# Generic/Built-in
import os
import logging
import time
from typing import Callable
# Libs
import pytest
import structlog
# Custom
from conftest import (
SYSMETRIC_SUPPORTED_METADATA,
SYSMETRIC_TRACKERS,
DURATION,
POLL_INTERVAL,
extract_name,
reconfigure_global_structlog_params
)
from synlogger.config import SYSMETRICS_PREFIX, SYSMETRICS_PORT
from synlogger.utils import StructlogUtils
##################
# Configurations #
##################
file_path = os.path.abspath(__file__)
class_name = "SysmetricLoggerTest"
function_name = "test_SysmetricLogger_initialise"
###########################
# Tests - SysmetricLogger #
###########################
def test_SysmetricLogger_default_attibutes(sysmetric_logger_default_params):
"""
Tests for the correct initialisation defaults for the TTPLogger class
# C1: logger_name defaults to "TTP_XXX"
# C2: logging_variant defaults to "basic"
# C3: server does not need to be specified by default
# C4: port is assigned TTP_PORT by default
# C5: logging_level defaults to logging.INFO
# C6: debugging_fields defaults to False
# C7: filter_functions defaults to an empty list
# C8: censor_keys defaults to an empty list (i.e. no information censored)
"""
# C1
assert SYSMETRICS_PREFIX in sysmetric_logger_default_params.logger_name
# C2
assert sysmetric_logger_default_params.logging_variant == "basic"
# C3
assert sysmetric_logger_default_params.server is None
# C4
assert sysmetric_logger_default_params.port == SYSMETRICS_PORT
# C5
assert sysmetric_logger_default_params.logging_level == logging.INFO
# C6
assert sysmetric_logger_default_params.debugging_fields == False
# C7
assert len(sysmetric_logger_default_params.filter_functions) == 0
# C8
assert len(sysmetric_logger_default_params.censor_keys) == 0
def test_SysmetricLogger_configure_processors(sysmetric_logger_default_params):
"""
Tests if sysmetric processors loaded are valid
# C1: All processors returned are functions
# C2: logging_renderer must be the last processor of the list
# C3: Sysmetric processors must be included alongside default processors
"""
# C1
processors = sysmetric_logger_default_params._configure_processors()
assert all(isinstance(_funct, Callable) for _funct in processors)
# C2
last_processor = processors[-1]
assert any(
extract_name(last_processor) == extract_name(_funct)
for _funct in [
StructlogUtils.graypy_structlog_processor,
structlog.processors.JSONRenderer(indent=1)
]
)
# C3
processors_names = [extract_name(processor) for processor in processors]
assert all(
extract_name(sys_tracker) in processors_names
for sys_tracker in SYSMETRIC_TRACKERS
)
def test_SysmetricLogger_is_tracking(sysmetric_logger):
"""
Tests if tracking state is toggling correctly. Note that while the state
tested here is dependent on .track() & .terminate(), we are only testing
for the change of state. The correctness of .track() & .terminate() is not
enforced and is assumed to work here.
# C1: is_tracking returns False before tracking is started
# C2: is_tracking returns True after tracking is started
# C3: is_tracking returns False after tracking has been terminated
"""
# C1
assert sysmetric_logger.is_tracking() == False
# C2
sysmetric_logger.track(
file_path=file_path,
class_name=class_name,
function_name=function_name,
resolution=POLL_INTERVAL
)
assert sysmetric_logger.is_tracking() == True
# C3
sysmetric_logger.terminate()
assert sysmetric_logger.is_tracking() == False
def test_SysmetricLogger_track(sysmetric_logger):
"""
Tests if sysmetric process tracking starts & polls correctly
# C1: Before tracking is initialised, sysmetric_logger.tracker is None
# C2: After tracking is initialised, sysmetric_logger.tracker is not None
# C3: After tracking is initialised, tracking process is actively running
# C4: No. of trials recorded tallies with expected no. of records given
a predetermined polling interval over a specified duration
# C5: Each record detected has the appropriate metadata logged
# C6: Each sysmetric metadata logged has valid values
"""
# C1
assert sysmetric_logger.tracker is None
# Start tracking process to check for state changes
sysmetric_logger.track(
file_path=file_path,
class_name=class_name,
function_name=function_name,
resolution=POLL_INTERVAL
)
# C2
assert sysmetric_logger.tracker is not None
# C3
assert sysmetric_logger.tracker.is_alive()
with reconfigure_global_structlog_params(sysmetric_logger) as cap_logs:
sysmetric_logger.synlog.setLevel(logging.INFO) # V.IMPT!!!
###########################
# Implementation Footnote #
###########################
# [Cause]
# Structlog's log capture mechanism does not allow for log capturing
# from multiprocessed loggers with custom processors (i.e. non-global).
# [Problems]
# Sysmetric tracking is performed by running a backgrounded process
# polling for logs once every specified interval. Being a struclog
# logger, it suffers from the aforementioned limitations DURING TESTING.
# This results in the failure to capture logs for analysis/testing.
# [Solution]
# Manually simulate probing behaviour in the global context, using
# custom processors that are same as the ones running in the
# backgrounded logger.
trial_count = int(DURATION/POLL_INTERVAL)
for _ in range(trial_count):
sysmetric_logger._probe(
resolution=POLL_INTERVAL,
descriptors={
"ID_path": file_path,
"ID_class": class_name,
"ID_function": function_name
}
)
# C4
assert len(cap_logs) == trial_count
# C5
assert all(
set(SYSMETRIC_SUPPORTED_METADATA).issubset(list(record.keys()))
for record in cap_logs
)
for record in cap_logs:
# C6
assert record.get('logger') == sysmetric_logger.logger_name
assert record.get('file_path') == sysmetric_logger.file_path
level_name = logging.getLevelName(sysmetric_logger.logging_level)
assert record.get('level') == level_name.lower()
assert record.get('log_level') == level_name.lower()
assert record.get('level_number') == sysmetric_logger.logging_level
assert isinstance(record.get('timestamp'), str)
assert isinstance(record.get('ID_path'), str)
assert isinstance(record.get('ID_class'), str)
assert isinstance(record.get('ID_function'), str)
assert isinstance(record.get('cpu_percent'), float)
assert isinstance(record.get('memory_total'), int)
assert isinstance(record.get('memory_available'), int)
assert isinstance(record.get('memory_used'), int)
assert isinstance(record.get('memory_free'), int)
assert isinstance(record.get('disk_read_counter'), int)
assert isinstance(record.get('disk_write_counter'), int)
assert isinstance(record.get('disk_read_bytes'), int)
assert isinstance(record.get('disk_write_bytes'), int)
assert isinstance(record.get('net_bytes_sent'), int)
assert isinstance(record.get('net_bytes_recv'), int)
assert isinstance(record.get('net_packets_sent'), int)
assert isinstance(record.get('net_packets_recv'), int)
# Manually clean up process (no dependency on .terminate())
sysmetric_logger.tracker.terminate() # send SIGTERM signal to the child
sysmetric_logger.tracker.join()
exit_code = sysmetric_logger.tracker.exitcode
sysmetric_logger.tracker.close()
sysmetric_logger.tracker = None # Reset state of tracker
# assert exit_code == 0 # successful termination
def test_SysmetricLogger_terminate(sysmetric_logger):
"""
Tests if sysmetric process tracking terminates correctly
# C1: Before tracking is terminated, sysmetric_logger.tracker is not None
# C2: Before tracking is terminated, tracking process is actively running
# C3: After tracking is terminated, sysmetric_logger.tracker is None
# C4: After tracking is terminated, saved tracker is no longer running
# C5: Tracking was terminated gracefully
"""
sysmetric_logger.track(
file_path=file_path,
class_name=class_name,
function_name=function_name,
resolution=POLL_INTERVAL
)
time.sleep(DURATION)
# C1
assert sysmetric_logger.tracker is not None
# C2
assert sysmetric_logger.tracker.is_alive()
saved_tracker = sysmetric_logger.tracker
exit_code = sysmetric_logger.terminate()
# C3
assert sysmetric_logger.tracker is None
# C4
assert saved_tracker._closed
# C5
assert exit_code == 0
@pytest.mark.xfail(raises=RuntimeError)
def test_SysmetricLogger_premature_termination(sysmetric_logger):
"""
Tests if premature termination condition was caught and handled
# C1: Check that 'RuntimeError(f"Attempted to terminate logger XXX before
initialisation!")' is caught, due to Exception being raised when
checking for initialisation state in sysmetric_logger
"""
sysmetric_logger.terminate()
|
import hashlib
from ecdsa.curves import Ed25519, SECP256k1
from .principal import Principal
import ecdsa
class Identity:
def __init__(self, privkey = "", type = "ed25519", anonymous = False):
privkey = bytes(bytearray.fromhex(privkey))
self.anonymous = anonymous
if anonymous:
return
self.key_type = type
if type == 'secp256k1':
if len(privkey) > 0:
self.sk = ecdsa.SigningKey.from_string(privkey, curve=ecdsa.SECP256k1, hashfunc=hashlib.sha256)
else:
self.sk = ecdsa.SigningKey.generate(curve=ecdsa.SECP256k1, hashfunc=hashlib.sha256)
self._privkey = self.sk.to_string().hex()
self.vk = self.sk.get_verifying_key()
self._pubkey = self.vk.to_string().hex()
self._der_pubkey = self.vk.to_der()
elif type == 'ed25519':
if len(privkey) > 0:
self.sk = ecdsa.SigningKey.from_string(privkey, curve=ecdsa.Ed25519)
else:
self.sk = ecdsa.SigningKey.generate(curve=ecdsa.Ed25519)
self._privkey = self.sk.to_string().hex()
self.vk = self.sk.get_verifying_key()
self._pubkey = self.vk.to_string().hex()
self._der_pubkey = self.vk.to_der()
else:
raise 'unsupported identity type'
@staticmethod
def from_pem(pem: str):
key = ecdsa.SigningKey.from_pem(pem)
privkey = key.to_string().hex()
type = "unknown"
if key.curve == Ed25519:
type = 'ed25519'
elif key.curve == SECP256k1:
type = 'secp256k1'
return Identity(privkey=privkey, type=type)
def to_pem(self):
pem = self.sk.to_pem(format="pkcs8")
return pem
def sender(self):
if self.anonymous:
return Principal.anonymous()
return Principal.self_authenticating(self._der_pubkey)
def sign(self, msg: bytes):
if self.anonymous:
return (None, None)
if self.key_type == 'ed25519':
sig = self.sk.sign(msg)
return (self._der_pubkey, sig)
elif self.key_type == 'secp256k1':
sig = self.sk.sign(msg)
return (self._der_pubkey, sig)
@property
def privkey(self):
return self._privkey
@property
def pubkey(self):
return self._pubkey
@property
def der_pubkey(self):
return self._der_pubkey
def __repr__(self):
return "Identity(" + self.key_type + ', ' + self._privkey + ", " + self._pubkey + ")"
def __str__(self):
return "(" + self.key_type + ', ' + self._privkey + ", " + self._pubkey + ")"
|
__version__ = '4.0.6'
def print_ludwig(s):
print(f'Ludwig-{__version__}: {s}', flush=True)
|
from indy_common.authorize import auth_map
def test_auth_map_node():
node_rules = [(auth_map.adding_new_node, "0--ADD--services--*--['VALIDATOR']"),
(auth_map.adding_new_node_with_empty_services, "0--ADD--services--*--[]"),
(auth_map.demote_node, "0--EDIT--services--['VALIDATOR']--[]"),
(auth_map.promote_node, "0--EDIT--services--[]--['VALIDATOR']"),
(auth_map.change_node_ip, '0--EDIT--node_ip--*--*'),
(auth_map.change_node_port, '0--EDIT--node_port--*--*'),
(auth_map.change_client_ip, '0--EDIT--client_ip--*--*'),
(auth_map.change_client_port, '0--EDIT--client_port--*--*'),
(auth_map.change_bls_key, '0--EDIT--blskey--*--*')]
for (rule, rule_str) in node_rules:
assert rule.get_action_id() == rule_str
assert rule_str in auth_map.auth_map.keys()
def test_auth_map_nym():
nym_rules = [(auth_map.add_new_trustee, "1--ADD--role--*--0"),
(auth_map.add_new_steward, "1--ADD--role--*--2"),
(auth_map.add_new_endorser, "1--ADD--role--*--101"),
(auth_map.add_new_network_monitor, "1--ADD--role--*--201"),
(auth_map.add_new_identity_owner, '1--ADD--role--*--'),
(auth_map.key_rotation, '1--EDIT--verkey--*--*')]
for (rule, rule_str) in nym_rules:
assert rule.get_action_id() == rule_str
assert rule_str in auth_map.auth_map.keys()
def test_auth_map_txn_author_agreement():
rules = [(auth_map.txn_author_agreement, "4--ADD--*--*--*"), ]
for (rule, rule_str) in rules:
assert rule.get_action_id() == rule_str
assert rule_str in auth_map.auth_map.keys()
def test_auth_map_txn_author_agreement_aml():
rules = [(auth_map.txn_author_agreement_aml, "5--ADD--*--*--*"), ]
for (rule, rule_str) in rules:
assert rule.get_action_id() == rule_str
assert rule_str in auth_map.auth_map.keys()
def test_auth_map_attrib():
rules = [(auth_map.add_attrib, "100--ADD--*--*--*"),
(auth_map.edit_attrib, "100--EDIT--*--*--*")]
for (rule, rule_str) in rules:
assert rule.get_action_id() == rule_str
assert rule_str in auth_map.auth_map.keys()
def test_auth_map_schema():
rules = [(auth_map.add_schema, "101--ADD--*--*--*")]
for (rule, rule_str) in rules:
assert rule.get_action_id() == rule_str
assert rule_str in auth_map.auth_map.keys()
def test_auth_map_schema_for_omitted():
rules = [(auth_map.edit_schema, "101--EDIT--*--*--*")]
for (rule, rule_str) in rules:
assert rule.get_action_id() == rule_str
assert rule_str in auth_map.auth_map.keys()
def test_auth_map_claim_def():
rules = [(auth_map.add_claim_def, "102--ADD--*--*--*"),
(auth_map.edit_claim_def, "102--EDIT--*--*--*")]
for (rule, rule_str) in rules:
assert rule.get_action_id() == rule_str
assert rule_str in auth_map.auth_map.keys()
def test_auth_map_upgrade():
rules = [(auth_map.start_upgrade, "109--ADD--action--*--start"),
(auth_map.cancel_upgrade, "109--EDIT--action--start--cancel")]
for (rule, rule_str) in rules:
assert rule.get_action_id() == rule_str
assert rule_str in auth_map.auth_map.keys()
def test_auth_map_config():
rules = [(auth_map.pool_config, "111--EDIT--action--*--*"), ]
for (rule, rule_str) in rules:
assert rule.get_action_id() == rule_str
assert rule_str in auth_map.auth_map.keys()
def test_auth_map_action():
nym_rules = [(auth_map.pool_restart, "118--ADD--action--*--*"),
(auth_map.auth_rule, "120--EDIT--*--*--*"),
(auth_map.auth_rules, "122--EDIT--*--*--*"),
(auth_map.validator_info, "119--ADD--*--*--*")]
for (rule, rule_str) in nym_rules:
assert rule.get_action_id() == rule_str
assert rule_str in auth_map.auth_map.keys()
def test_auth_map_revoc_reg():
nym_rules = [(auth_map.add_revoc_reg_def, "113--ADD--*--*--*"),
(auth_map.add_revoc_reg_entry, "114--ADD--*--*--*"),
(auth_map.edit_revoc_reg_def, "113--EDIT--*--*--*"),
(auth_map.edit_revoc_reg_entry, "114--EDIT--*--*--*")]
for (rule, rule_str) in nym_rules:
assert rule.get_action_id() == rule_str
assert rule_str in auth_map.auth_map.keys()
def test_auth_map_disable_taa():
rules = [(auth_map.disable_txn_author_agreement, '8--ADD--*--*--*')]
for (rule, rule_str) in rules:
assert rule.get_action_id() == rule_str
assert rule_str in auth_map.auth_map.keys()
|
# Copyright 2014 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import base64
import datetime
import hashlib
import io
import json
import os
import tempfile
import unittest
import mock
import pytest
import six
from six.moves import http_client
from google.cloud.storage.retry import DEFAULT_RETRY_IF_GENERATION_SPECIFIED
def _make_credentials():
import google.auth.credentials
return mock.Mock(spec=google.auth.credentials.Credentials)
class Test_Blob(unittest.TestCase):
@staticmethod
def _make_one(*args, **kw):
from google.cloud.storage.blob import Blob
properties = kw.pop("properties", {})
blob = Blob(*args, **kw)
blob._properties.update(properties)
return blob
@staticmethod
def _get_default_timeout():
from google.cloud.storage.constants import _DEFAULT_TIMEOUT
return _DEFAULT_TIMEOUT
def test_ctor_wo_encryption_key(self):
BLOB_NAME = "blob-name"
bucket = _Bucket()
properties = {"key": "value"}
blob = self._make_one(BLOB_NAME, bucket=bucket, properties=properties)
self.assertIs(blob.bucket, bucket)
self.assertEqual(blob.name, BLOB_NAME)
self.assertEqual(blob._properties, properties)
self.assertFalse(blob._acl.loaded)
self.assertIs(blob._acl.blob, blob)
self.assertEqual(blob._encryption_key, None)
self.assertEqual(blob.kms_key_name, None)
def test_ctor_with_encoded_unicode(self):
blob_name = b"wet \xe2\x9b\xb5"
blob = self._make_one(blob_name, bucket=None)
unicode_name = u"wet \N{sailboat}"
self.assertNotIsInstance(blob.name, bytes)
self.assertIsInstance(blob.name, six.text_type)
self.assertEqual(blob.name, unicode_name)
def test_ctor_w_encryption_key(self):
KEY = b"01234567890123456789012345678901" # 32 bytes
BLOB_NAME = "blob-name"
bucket = _Bucket()
blob = self._make_one(BLOB_NAME, bucket=bucket, encryption_key=KEY)
self.assertEqual(blob._encryption_key, KEY)
self.assertEqual(blob.kms_key_name, None)
def test_ctor_w_kms_key_name_and_encryption_key(self):
KEY = b"01234567890123456789012345678901" # 32 bytes
KMS_RESOURCE = (
"projects/test-project-123/"
"locations/us/"
"keyRings/test-ring/"
"cryptoKeys/test-key"
)
BLOB_NAME = "blob-name"
bucket = _Bucket()
with self.assertRaises(ValueError):
self._make_one(
BLOB_NAME, bucket=bucket, encryption_key=KEY, kms_key_name=KMS_RESOURCE
)
def test_ctor_w_kms_key_name(self):
KMS_RESOURCE = (
"projects/test-project-123/"
"locations/us/"
"keyRings/test-ring/"
"cryptoKeys/test-key"
)
BLOB_NAME = "blob-name"
bucket = _Bucket()
blob = self._make_one(BLOB_NAME, bucket=bucket, kms_key_name=KMS_RESOURCE)
self.assertEqual(blob._encryption_key, None)
self.assertEqual(blob.kms_key_name, KMS_RESOURCE)
def test_ctor_with_generation(self):
BLOB_NAME = "blob-name"
GENERATION = 12345
bucket = _Bucket()
blob = self._make_one(BLOB_NAME, bucket=bucket, generation=GENERATION)
self.assertEqual(blob.generation, GENERATION)
def _set_properties_helper(self, kms_key_name=None):
import datetime
from google.cloud._helpers import UTC
from google.cloud._helpers import _RFC3339_MICROS
now = datetime.datetime.utcnow().replace(tzinfo=UTC)
NOW = now.strftime(_RFC3339_MICROS)
BLOB_NAME = "blob-name"
GENERATION = 12345
BLOB_ID = "name/{}/{}".format(BLOB_NAME, GENERATION)
SELF_LINK = "http://example.com/self/"
METAGENERATION = 23456
SIZE = 12345
MD5_HASH = "DEADBEEF"
MEDIA_LINK = "http://example.com/media/"
ENTITY = "project-owner-12345"
ENTITY_ID = "23456"
CRC32C = "FACE0DAC"
COMPONENT_COUNT = 2
ETAG = "ETAG"
resource = {
"id": BLOB_ID,
"selfLink": SELF_LINK,
"generation": GENERATION,
"metageneration": METAGENERATION,
"contentType": "text/plain",
"timeCreated": NOW,
"updated": NOW,
"timeDeleted": NOW,
"storageClass": "NEARLINE",
"timeStorageClassUpdated": NOW,
"size": SIZE,
"md5Hash": MD5_HASH,
"mediaLink": MEDIA_LINK,
"contentEncoding": "gzip",
"contentDisposition": "inline",
"contentLanguage": "en-US",
"cacheControl": "private",
"metadata": {"foo": "Foo"},
"owner": {"entity": ENTITY, "entityId": ENTITY_ID},
"crc32c": CRC32C,
"componentCount": COMPONENT_COUNT,
"etag": ETAG,
"customTime": NOW,
}
if kms_key_name is not None:
resource["kmsKeyName"] = kms_key_name
bucket = _Bucket()
blob = self._make_one(BLOB_NAME, bucket=bucket)
blob._set_properties(resource)
self.assertEqual(blob.id, BLOB_ID)
self.assertEqual(blob.self_link, SELF_LINK)
self.assertEqual(blob.generation, GENERATION)
self.assertEqual(blob.metageneration, METAGENERATION)
self.assertEqual(blob.content_type, "text/plain")
self.assertEqual(blob.time_created, now)
self.assertEqual(blob.updated, now)
self.assertEqual(blob.time_deleted, now)
self.assertEqual(blob.storage_class, "NEARLINE")
self.assertEqual(blob.size, SIZE)
self.assertEqual(blob.md5_hash, MD5_HASH)
self.assertEqual(blob.media_link, MEDIA_LINK)
self.assertEqual(blob.content_encoding, "gzip")
self.assertEqual(blob.content_disposition, "inline")
self.assertEqual(blob.content_language, "en-US")
self.assertEqual(blob.cache_control, "private")
self.assertEqual(blob.metadata, {"foo": "Foo"})
self.assertEqual(blob.owner, {"entity": ENTITY, "entityId": ENTITY_ID})
self.assertEqual(blob.crc32c, CRC32C)
self.assertEqual(blob.component_count, COMPONENT_COUNT)
self.assertEqual(blob.etag, ETAG)
self.assertEqual(blob.custom_time, now)
if kms_key_name is not None:
self.assertEqual(blob.kms_key_name, kms_key_name)
else:
self.assertIsNone(blob.kms_key_name)
def test__set_properties_wo_kms_key_name(self):
self._set_properties_helper()
def test__set_properties_w_kms_key_name(self):
kms_resource = (
"projects/test-project-123/"
"locations/us/"
"keyRings/test-ring/"
"cryptoKeys/test-key"
)
self._set_properties_helper(kms_key_name=kms_resource)
def test_chunk_size_ctor(self):
from google.cloud.storage.blob import Blob
BLOB_NAME = "blob-name"
BUCKET = object()
chunk_size = 10 * Blob._CHUNK_SIZE_MULTIPLE
blob = self._make_one(BLOB_NAME, bucket=BUCKET, chunk_size=chunk_size)
self.assertEqual(blob._chunk_size, chunk_size)
def test_chunk_size_getter(self):
BLOB_NAME = "blob-name"
BUCKET = object()
blob = self._make_one(BLOB_NAME, bucket=BUCKET)
self.assertIsNone(blob.chunk_size)
VALUE = object()
blob._chunk_size = VALUE
self.assertIs(blob.chunk_size, VALUE)
def test_chunk_size_setter(self):
BLOB_NAME = "blob-name"
BUCKET = object()
blob = self._make_one(BLOB_NAME, bucket=BUCKET)
self.assertIsNone(blob._chunk_size)
blob._CHUNK_SIZE_MULTIPLE = 10
blob.chunk_size = 20
self.assertEqual(blob._chunk_size, 20)
def test_chunk_size_setter_bad_value(self):
BLOB_NAME = "blob-name"
BUCKET = object()
blob = self._make_one(BLOB_NAME, bucket=BUCKET)
self.assertIsNone(blob._chunk_size)
blob._CHUNK_SIZE_MULTIPLE = 10
with self.assertRaises(ValueError):
blob.chunk_size = 11
def test_acl_property(self):
from google.cloud.storage.acl import ObjectACL
fake_bucket = _Bucket()
blob = self._make_one(u"name", bucket=fake_bucket)
acl = blob.acl
self.assertIsInstance(acl, ObjectACL)
self.assertIs(acl, blob._acl)
def test_path_bad_bucket(self):
fake_bucket = object()
name = u"blob-name"
blob = self._make_one(name, bucket=fake_bucket)
self.assertRaises(AttributeError, getattr, blob, "path")
def test_path_no_name(self):
bucket = _Bucket()
blob = self._make_one(u"", bucket=bucket)
self.assertRaises(ValueError, getattr, blob, "path")
def test_path_normal(self):
BLOB_NAME = "blob-name"
bucket = _Bucket()
blob = self._make_one(BLOB_NAME, bucket=bucket)
self.assertEqual(blob.path, "/b/name/o/%s" % BLOB_NAME)
def test_path_w_slash_in_name(self):
BLOB_NAME = "parent/child"
bucket = _Bucket()
blob = self._make_one(BLOB_NAME, bucket=bucket)
self.assertEqual(blob.path, "/b/name/o/parent%2Fchild")
def test_path_with_non_ascii(self):
blob_name = u"Caf\xe9"
bucket = _Bucket()
blob = self._make_one(blob_name, bucket=bucket)
self.assertEqual(blob.path, "/b/name/o/Caf%C3%A9")
def test_bucket_readonly_property(self):
blob_name = "BLOB"
bucket = _Bucket()
other = _Bucket()
blob = self._make_one(blob_name, bucket=bucket)
with self.assertRaises(AttributeError):
blob.bucket = other
def test_client(self):
blob_name = "BLOB"
bucket = _Bucket()
blob = self._make_one(blob_name, bucket=bucket)
self.assertIs(blob.client, bucket.client)
def test_user_project(self):
user_project = "user-project-123"
blob_name = "BLOB"
bucket = _Bucket(user_project=user_project)
blob = self._make_one(blob_name, bucket=bucket)
self.assertEqual(blob.user_project, user_project)
def test__encryption_headers_wo_encryption_key(self):
BLOB_NAME = "blob-name"
bucket = _Bucket()
blob = self._make_one(BLOB_NAME, bucket=bucket)
expected = {}
self.assertEqual(blob._encryption_headers(), expected)
def test__encryption_headers_w_encryption_key(self):
key = b"aa426195405adee2c8081bb9e7e74b19"
header_key_value = "YWE0MjYxOTU0MDVhZGVlMmM4MDgxYmI5ZTdlNzRiMTk="
header_key_hash_value = "V3Kwe46nKc3xLv96+iJ707YfZfFvlObta8TQcx2gpm0="
BLOB_NAME = "blob-name"
bucket = _Bucket()
blob = self._make_one(BLOB_NAME, bucket=bucket, encryption_key=key)
expected = {
"X-Goog-Encryption-Algorithm": "AES256",
"X-Goog-Encryption-Key": header_key_value,
"X-Goog-Encryption-Key-Sha256": header_key_hash_value,
}
self.assertEqual(blob._encryption_headers(), expected)
def test__query_params_default(self):
BLOB_NAME = "blob-name"
bucket = _Bucket()
blob = self._make_one(BLOB_NAME, bucket=bucket)
self.assertEqual(blob._query_params, {})
def test__query_params_w_user_project(self):
user_project = "user-project-123"
BLOB_NAME = "BLOB"
bucket = _Bucket(user_project=user_project)
blob = self._make_one(BLOB_NAME, bucket=bucket)
self.assertEqual(blob._query_params, {"userProject": user_project})
def test__query_params_w_generation(self):
generation = 123456
BLOB_NAME = "BLOB"
bucket = _Bucket()
blob = self._make_one(BLOB_NAME, bucket=bucket, generation=generation)
self.assertEqual(blob._query_params, {"generation": generation})
def test_public_url(self):
BLOB_NAME = "blob-name"
bucket = _Bucket()
blob = self._make_one(BLOB_NAME, bucket=bucket)
self.assertEqual(
blob.public_url, "https://storage.googleapis.com/name/%s" % BLOB_NAME
)
def test_public_url_w_slash_in_name(self):
BLOB_NAME = "parent/child"
bucket = _Bucket()
blob = self._make_one(BLOB_NAME, bucket=bucket)
self.assertEqual(
blob.public_url, "https://storage.googleapis.com/name/parent/child"
)
def test_public_url_w_tilde_in_name(self):
BLOB_NAME = "foo~bar"
bucket = _Bucket()
blob = self._make_one(BLOB_NAME, bucket=bucket)
self.assertEqual(blob.public_url, "https://storage.googleapis.com/name/foo~bar")
def test_public_url_with_non_ascii(self):
blob_name = u"winter \N{snowman}"
bucket = _Bucket()
blob = self._make_one(blob_name, bucket=bucket)
expected_url = "https://storage.googleapis.com/name/winter%20%E2%98%83"
self.assertEqual(blob.public_url, expected_url)
def test_generate_signed_url_w_invalid_version(self):
BLOB_NAME = "blob-name"
EXPIRATION = "2014-10-16T20:34:37.000Z"
connection = _Connection()
client = _Client(connection)
bucket = _Bucket(client)
blob = self._make_one(BLOB_NAME, bucket=bucket)
with self.assertRaises(ValueError):
blob.generate_signed_url(EXPIRATION, version="nonesuch")
def _generate_signed_url_helper(
self,
version=None,
blob_name="blob-name",
api_access_endpoint=None,
method="GET",
content_md5=None,
content_type=None,
response_type=None,
response_disposition=None,
generation=None,
headers=None,
query_parameters=None,
credentials=None,
expiration=None,
encryption_key=None,
access_token=None,
service_account_email=None,
virtual_hosted_style=False,
bucket_bound_hostname=None,
scheme="http",
):
from six.moves.urllib import parse
from google.cloud._helpers import UTC
from google.cloud.storage._helpers import _bucket_bound_hostname_url
from google.cloud.storage.blob import _API_ACCESS_ENDPOINT
from google.cloud.storage.blob import _get_encryption_headers
api_access_endpoint = api_access_endpoint or _API_ACCESS_ENDPOINT
delta = datetime.timedelta(hours=1)
if expiration is None:
expiration = datetime.datetime.utcnow().replace(tzinfo=UTC) + delta
connection = _Connection()
client = _Client(connection)
bucket = _Bucket(client)
blob = self._make_one(blob_name, bucket=bucket, encryption_key=encryption_key)
if version is None:
effective_version = "v2"
else:
effective_version = version
to_patch = "google.cloud.storage.blob.generate_signed_url_{}".format(
effective_version
)
with mock.patch(to_patch) as signer:
signed_uri = blob.generate_signed_url(
expiration=expiration,
api_access_endpoint=api_access_endpoint,
method=method,
credentials=credentials,
content_md5=content_md5,
content_type=content_type,
response_type=response_type,
response_disposition=response_disposition,
generation=generation,
headers=headers,
query_parameters=query_parameters,
version=version,
access_token=access_token,
service_account_email=service_account_email,
virtual_hosted_style=virtual_hosted_style,
bucket_bound_hostname=bucket_bound_hostname,
)
self.assertEqual(signed_uri, signer.return_value)
if credentials is None:
expected_creds = _Connection.credentials
else:
expected_creds = credentials
encoded_name = blob_name.encode("utf-8")
quoted_name = parse.quote(encoded_name, safe=b"/~")
if virtual_hosted_style:
expected_api_access_endpoint = "https://{}.storage.googleapis.com".format(
bucket.name
)
elif bucket_bound_hostname:
expected_api_access_endpoint = _bucket_bound_hostname_url(
bucket_bound_hostname, scheme
)
else:
expected_api_access_endpoint = api_access_endpoint
expected_resource = "/{}/{}".format(bucket.name, quoted_name)
if virtual_hosted_style or bucket_bound_hostname:
expected_resource = "/{}".format(quoted_name)
if encryption_key is not None:
expected_headers = headers or {}
if effective_version == "v2":
expected_headers["X-Goog-Encryption-Algorithm"] = "AES256"
else:
expected_headers.update(_get_encryption_headers(encryption_key))
else:
expected_headers = headers
expected_kwargs = {
"resource": expected_resource,
"expiration": expiration,
"api_access_endpoint": expected_api_access_endpoint,
"method": method.upper(),
"content_md5": content_md5,
"content_type": content_type,
"response_type": response_type,
"response_disposition": response_disposition,
"generation": generation,
"headers": expected_headers,
"query_parameters": query_parameters,
"access_token": access_token,
"service_account_email": service_account_email,
}
signer.assert_called_once_with(expected_creds, **expected_kwargs)
def test_generate_signed_url_no_version_passed_warning(self):
self._generate_signed_url_helper()
def _generate_signed_url_v2_helper(self, **kw):
version = "v2"
self._generate_signed_url_helper(version, **kw)
def test_generate_signed_url_v2_w_defaults(self):
self._generate_signed_url_v2_helper()
def test_generate_signed_url_v2_w_expiration(self):
from google.cloud._helpers import UTC
expiration = datetime.datetime.utcnow().replace(tzinfo=UTC)
self._generate_signed_url_v2_helper(expiration=expiration)
def test_generate_signed_url_v2_w_non_ascii_name(self):
BLOB_NAME = u"\u0410\u043a\u043a\u043e\u0440\u0434\u044b.txt"
self._generate_signed_url_v2_helper(blob_name=BLOB_NAME)
def test_generate_signed_url_v2_w_slash_in_name(self):
BLOB_NAME = "parent/child"
self._generate_signed_url_v2_helper(blob_name=BLOB_NAME)
def test_generate_signed_url_v2_w_tilde_in_name(self):
BLOB_NAME = "foo~bar"
self._generate_signed_url_v2_helper(blob_name=BLOB_NAME)
def test_generate_signed_url_v2_w_endpoint(self):
self._generate_signed_url_v2_helper(
api_access_endpoint="https://api.example.com/v1"
)
def test_generate_signed_url_v2_w_method(self):
self._generate_signed_url_v2_helper(method="POST")
def test_generate_signed_url_v2_w_lowercase_method(self):
self._generate_signed_url_v2_helper(method="get")
def test_generate_signed_url_v2_w_content_md5(self):
self._generate_signed_url_v2_helper(content_md5="FACEDACE")
def test_generate_signed_url_v2_w_content_type(self):
self._generate_signed_url_v2_helper(content_type="text.html")
def test_generate_signed_url_v2_w_response_type(self):
self._generate_signed_url_v2_helper(response_type="text.html")
def test_generate_signed_url_v2_w_response_disposition(self):
self._generate_signed_url_v2_helper(response_disposition="inline")
def test_generate_signed_url_v2_w_generation(self):
self._generate_signed_url_v2_helper(generation=12345)
def test_generate_signed_url_v2_w_headers(self):
self._generate_signed_url_v2_helper(headers={"x-goog-foo": "bar"})
def test_generate_signed_url_v2_w_csek(self):
self._generate_signed_url_v2_helper(encryption_key=os.urandom(32))
def test_generate_signed_url_v2_w_csek_and_headers(self):
self._generate_signed_url_v2_helper(
encryption_key=os.urandom(32), headers={"x-goog-foo": "bar"}
)
def test_generate_signed_url_v2_w_credentials(self):
credentials = object()
self._generate_signed_url_v2_helper(credentials=credentials)
def _generate_signed_url_v4_helper(self, **kw):
version = "v4"
self._generate_signed_url_helper(version, **kw)
def test_generate_signed_url_v4_w_defaults(self):
self._generate_signed_url_v4_helper()
def test_generate_signed_url_v4_w_non_ascii_name(self):
BLOB_NAME = u"\u0410\u043a\u043a\u043e\u0440\u0434\u044b.txt"
self._generate_signed_url_v4_helper(blob_name=BLOB_NAME)
def test_generate_signed_url_v4_w_slash_in_name(self):
BLOB_NAME = "parent/child"
self._generate_signed_url_v4_helper(blob_name=BLOB_NAME)
def test_generate_signed_url_v4_w_tilde_in_name(self):
BLOB_NAME = "foo~bar"
self._generate_signed_url_v4_helper(blob_name=BLOB_NAME)
def test_generate_signed_url_v4_w_endpoint(self):
self._generate_signed_url_v4_helper(
api_access_endpoint="https://api.example.com/v1"
)
def test_generate_signed_url_v4_w_method(self):
self._generate_signed_url_v4_helper(method="POST")
def test_generate_signed_url_v4_w_lowercase_method(self):
self._generate_signed_url_v4_helper(method="get")
def test_generate_signed_url_v4_w_content_md5(self):
self._generate_signed_url_v4_helper(content_md5="FACEDACE")
def test_generate_signed_url_v4_w_content_type(self):
self._generate_signed_url_v4_helper(content_type="text.html")
def test_generate_signed_url_v4_w_response_type(self):
self._generate_signed_url_v4_helper(response_type="text.html")
def test_generate_signed_url_v4_w_response_disposition(self):
self._generate_signed_url_v4_helper(response_disposition="inline")
def test_generate_signed_url_v4_w_generation(self):
self._generate_signed_url_v4_helper(generation=12345)
def test_generate_signed_url_v4_w_headers(self):
self._generate_signed_url_v4_helper(headers={"x-goog-foo": "bar"})
def test_generate_signed_url_v4_w_csek(self):
self._generate_signed_url_v4_helper(encryption_key=os.urandom(32))
def test_generate_signed_url_v4_w_csek_and_headers(self):
self._generate_signed_url_v4_helper(
encryption_key=os.urandom(32), headers={"x-goog-foo": "bar"}
)
def test_generate_signed_url_v4_w_virtual_hostname(self):
self._generate_signed_url_v4_helper(virtual_hosted_style=True)
def test_generate_signed_url_v4_w_bucket_bound_hostname_w_scheme(self):
self._generate_signed_url_v4_helper(
bucket_bound_hostname="http://cdn.example.com"
)
def test_generate_signed_url_v4_w_bucket_bound_hostname_w_bare_hostname(self):
self._generate_signed_url_v4_helper(bucket_bound_hostname="cdn.example.com")
def test_generate_signed_url_v4_w_credentials(self):
credentials = object()
self._generate_signed_url_v4_helper(credentials=credentials)
def test_exists_miss(self):
NONESUCH = "nonesuch"
not_found_response = ({"status": http_client.NOT_FOUND}, b"")
connection = _Connection(not_found_response)
client = _Client(connection)
bucket = _Bucket(client)
blob = self._make_one(NONESUCH, bucket=bucket)
self.assertFalse(blob.exists(timeout=42))
self.assertEqual(len(connection._requested), 1)
self.assertEqual(
connection._requested[0],
{
"method": "GET",
"path": "/b/name/o/{}".format(NONESUCH),
"query_params": {"fields": "name"},
"_target_object": None,
"timeout": 42,
},
)
def test_exists_hit_w_user_project(self):
BLOB_NAME = "blob-name"
USER_PROJECT = "user-project-123"
found_response = ({"status": http_client.OK}, b"")
connection = _Connection(found_response)
client = _Client(connection)
bucket = _Bucket(client, user_project=USER_PROJECT)
blob = self._make_one(BLOB_NAME, bucket=bucket)
bucket._blobs[BLOB_NAME] = 1
self.assertTrue(blob.exists())
self.assertEqual(len(connection._requested), 1)
self.assertEqual(
connection._requested[0],
{
"method": "GET",
"path": "/b/name/o/{}".format(BLOB_NAME),
"query_params": {"fields": "name", "userProject": USER_PROJECT},
"_target_object": None,
"timeout": self._get_default_timeout(),
},
)
def test_exists_hit_w_generation(self):
BLOB_NAME = "blob-name"
GENERATION = 123456
found_response = ({"status": http_client.OK}, b"")
connection = _Connection(found_response)
client = _Client(connection)
bucket = _Bucket(client)
blob = self._make_one(BLOB_NAME, bucket=bucket, generation=GENERATION)
bucket._blobs[BLOB_NAME] = 1
self.assertTrue(blob.exists())
self.assertEqual(len(connection._requested), 1)
self.assertEqual(
connection._requested[0],
{
"method": "GET",
"path": "/b/name/o/{}".format(BLOB_NAME),
"query_params": {"fields": "name", "generation": GENERATION},
"_target_object": None,
"timeout": self._get_default_timeout(),
},
)
def test_exists_w_generation_match(self):
BLOB_NAME = "blob-name"
GENERATION_NUMBER = 123456
METAGENERATION_NUMBER = 6
found_response = ({"status": http_client.OK}, b"")
connection = _Connection(found_response)
client = _Client(connection)
bucket = _Bucket(client)
blob = self._make_one(BLOB_NAME, bucket=bucket)
bucket._blobs[BLOB_NAME] = 1
self.assertTrue(
blob.exists(
if_generation_match=GENERATION_NUMBER,
if_metageneration_match=METAGENERATION_NUMBER,
)
)
self.assertEqual(len(connection._requested), 1)
self.assertEqual(
connection._requested[0],
{
"method": "GET",
"path": "/b/name/o/{}".format(BLOB_NAME),
"query_params": {
"fields": "name",
"ifGenerationMatch": GENERATION_NUMBER,
"ifMetagenerationMatch": METAGENERATION_NUMBER,
},
"_target_object": None,
"timeout": self._get_default_timeout(),
},
)
def test_delete_wo_generation(self):
BLOB_NAME = "blob-name"
not_found_response = ({"status": http_client.NOT_FOUND}, b"")
connection = _Connection(not_found_response)
client = _Client(connection)
bucket = _Bucket(client)
blob = self._make_one(BLOB_NAME, bucket=bucket)
bucket._blobs[BLOB_NAME] = 1
blob.delete()
self.assertFalse(blob.exists())
self.assertEqual(
bucket._deleted,
[
(
BLOB_NAME,
None,
None,
self._get_default_timeout(),
None,
None,
None,
None,
)
],
)
def test_delete_w_generation(self):
BLOB_NAME = "blob-name"
GENERATION = 123456
not_found_response = ({"status": http_client.NOT_FOUND}, b"")
connection = _Connection(not_found_response)
client = _Client(connection)
bucket = _Bucket(client)
blob = self._make_one(BLOB_NAME, bucket=bucket, generation=GENERATION)
bucket._blobs[BLOB_NAME] = 1
blob.delete(timeout=42)
self.assertFalse(blob.exists())
self.assertEqual(
bucket._deleted, [(BLOB_NAME, None, GENERATION, 42, None, None, None, None)]
)
def test_delete_w_generation_match(self):
BLOB_NAME = "blob-name"
GENERATION = 123456
not_found_response = ({"status": http_client.NOT_FOUND}, b"")
connection = _Connection(not_found_response)
client = _Client(connection)
bucket = _Bucket(client)
blob = self._make_one(BLOB_NAME, bucket=bucket, generation=GENERATION)
bucket._blobs[BLOB_NAME] = 1
blob.delete(timeout=42, if_generation_match=GENERATION)
self.assertFalse(blob.exists())
self.assertEqual(
bucket._deleted,
[(BLOB_NAME, None, GENERATION, 42, GENERATION, None, None, None)],
)
def test__get_transport(self):
client = mock.Mock(spec=[u"_credentials", "_http"])
client._http = mock.sentinel.transport
blob = self._make_one(u"blob-name", bucket=None)
transport = blob._get_transport(client)
self.assertIs(transport, mock.sentinel.transport)
def test__get_download_url_with_media_link(self):
blob_name = "something.txt"
bucket = _Bucket(name="IRRELEVANT")
blob = self._make_one(blob_name, bucket=bucket)
media_link = "http://test.invalid"
# Set the media link on the blob
blob._properties["mediaLink"] = media_link
client = mock.Mock(_connection=_Connection)
client._connection.API_BASE_URL = "https://storage.googleapis.com"
download_url = blob._get_download_url(client)
self.assertEqual(download_url, media_link)
def test__get_download_url_with_generation_match(self):
GENERATION_NUMBER = 6
MEDIA_LINK = "http://test.invalid"
blob = self._make_one("something.txt", bucket=_Bucket(name="IRRELEVANT"))
# Set the media link on the blob
blob._properties["mediaLink"] = MEDIA_LINK
client = mock.Mock(_connection=_Connection)
client._connection.API_BASE_URL = "https://storage.googleapis.com"
download_url = blob._get_download_url(
client, if_generation_match=GENERATION_NUMBER
)
self.assertEqual(
download_url,
"{}?ifGenerationMatch={}".format(MEDIA_LINK, GENERATION_NUMBER),
)
def test__get_download_url_with_media_link_w_user_project(self):
blob_name = "something.txt"
user_project = "user-project-123"
bucket = _Bucket(name="IRRELEVANT", user_project=user_project)
blob = self._make_one(blob_name, bucket=bucket)
media_link = "http://test.invalid"
# Set the media link on the blob
blob._properties["mediaLink"] = media_link
client = mock.Mock(_connection=_Connection)
client._connection.API_BASE_URL = "https://storage.googleapis.com"
download_url = blob._get_download_url(client)
self.assertEqual(
download_url, "{}?userProject={}".format(media_link, user_project)
)
def test__get_download_url_on_the_fly(self):
blob_name = "bzzz-fly.txt"
bucket = _Bucket(name="buhkit")
blob = self._make_one(blob_name, bucket=bucket)
self.assertIsNone(blob.media_link)
client = mock.Mock(_connection=_Connection)
client._connection.API_BASE_URL = "https://storage.googleapis.com"
download_url = blob._get_download_url(client)
expected_url = (
"https://storage.googleapis.com/download/storage/v1/b/"
"buhkit/o/bzzz-fly.txt?alt=media"
)
self.assertEqual(download_url, expected_url)
def test__get_download_url_on_the_fly_with_generation(self):
blob_name = "pretend.txt"
bucket = _Bucket(name="fictional")
blob = self._make_one(blob_name, bucket=bucket)
generation = 1493058489532987
# Set the media link on the blob
blob._properties["generation"] = str(generation)
self.assertIsNone(blob.media_link)
client = mock.Mock(_connection=_Connection)
client._connection.API_BASE_URL = "https://storage.googleapis.com"
download_url = blob._get_download_url(client)
expected_url = (
"https://storage.googleapis.com/download/storage/v1/b/"
"fictional/o/pretend.txt?alt=media&generation=1493058489532987"
)
self.assertEqual(download_url, expected_url)
def test__get_download_url_on_the_fly_with_user_project(self):
blob_name = "pretend.txt"
user_project = "user-project-123"
bucket = _Bucket(name="fictional", user_project=user_project)
blob = self._make_one(blob_name, bucket=bucket)
self.assertIsNone(blob.media_link)
client = mock.Mock(_connection=_Connection)
client._connection.API_BASE_URL = "https://storage.googleapis.com"
download_url = blob._get_download_url(client)
expected_url = (
"https://storage.googleapis.com/download/storage/v1/b/"
"fictional/o/pretend.txt?alt=media&userProject={}".format(user_project)
)
self.assertEqual(download_url, expected_url)
def test__get_download_url_on_the_fly_with_kms_key_name(self):
kms_resource = (
"projects/test-project-123/"
"locations/us/"
"keyRings/test-ring/"
"cryptoKeys/test-key"
)
blob_name = "bzzz-fly.txt"
bucket = _Bucket(name="buhkit")
blob = self._make_one(blob_name, bucket=bucket, kms_key_name=kms_resource)
self.assertIsNone(blob.media_link)
client = mock.Mock(_connection=_Connection)
client._connection.API_BASE_URL = "https://storage.googleapis.com"
download_url = blob._get_download_url(client)
expected_url = (
"https://storage.googleapis.com/download/storage/v1/b/"
"buhkit/o/bzzz-fly.txt?alt=media"
)
self.assertEqual(download_url, expected_url)
@staticmethod
def _mock_requests_response(status_code, headers, content=b""):
import requests
response = requests.Response()
response.status_code = status_code
response.headers.update(headers)
response.raw = None
response._content = content
response.request = requests.Request("POST", "http://example.com").prepare()
return response
def _do_download_helper_wo_chunks(self, w_range, raw_download, timeout=None):
blob_name = "blob-name"
client = mock.Mock()
bucket = _Bucket(client)
blob = self._make_one(blob_name, bucket=bucket)
self.assertIsNone(blob.chunk_size)
transport = object()
file_obj = io.BytesIO()
download_url = "http://test.invalid"
headers = {}
if raw_download:
patch = mock.patch("google.cloud.storage.blob.RawDownload")
else:
patch = mock.patch("google.cloud.storage.blob.Download")
if timeout is None:
expected_timeout = self._get_default_timeout()
timeout_kwarg = {}
else:
expected_timeout = timeout
timeout_kwarg = {"timeout": timeout}
with patch as patched:
if w_range:
blob._do_download(
transport,
file_obj,
download_url,
headers,
start=1,
end=3,
raw_download=raw_download,
**timeout_kwarg
)
else:
blob._do_download(
transport,
file_obj,
download_url,
headers,
raw_download=raw_download,
**timeout_kwarg
)
if w_range:
patched.assert_called_once_with(
download_url,
stream=file_obj,
headers=headers,
start=1,
end=3,
checksum="md5",
)
else:
patched.assert_called_once_with(
download_url,
stream=file_obj,
headers=headers,
start=None,
end=None,
checksum="md5",
)
patched.return_value.consume.assert_called_once_with(
transport, timeout=expected_timeout
)
def test__do_download_wo_chunks_wo_range_wo_raw(self):
self._do_download_helper_wo_chunks(w_range=False, raw_download=False)
def test__do_download_wo_chunks_w_range_wo_raw(self):
self._do_download_helper_wo_chunks(w_range=True, raw_download=False)
def test__do_download_wo_chunks_wo_range_w_raw(self):
self._do_download_helper_wo_chunks(w_range=False, raw_download=True)
def test__do_download_wo_chunks_w_range_w_raw(self):
self._do_download_helper_wo_chunks(w_range=True, raw_download=True)
def test__do_download_wo_chunks_w_custom_timeout(self):
self._do_download_helper_wo_chunks(
w_range=False, raw_download=False, timeout=9.58
)
def _do_download_helper_w_chunks(
self, w_range, raw_download, timeout=None, checksum="md5"
):
blob_name = "blob-name"
client = mock.Mock(_credentials=_make_credentials(), spec=["_credentials"])
bucket = _Bucket(client)
blob = self._make_one(blob_name, bucket=bucket)
blob._CHUNK_SIZE_MULTIPLE = 1
chunk_size = blob.chunk_size = 3
transport = object()
file_obj = io.BytesIO()
download_url = "http://test.invalid"
headers = {}
download = mock.Mock(finished=False, spec=["finished", "consume_next_chunk"])
def side_effect(*args, **kwargs):
download.finished = True
download.consume_next_chunk.side_effect = side_effect
if raw_download:
patch = mock.patch("google.cloud.storage.blob.RawChunkedDownload")
else:
patch = mock.patch("google.cloud.storage.blob.ChunkedDownload")
if timeout is None:
expected_timeout = self._get_default_timeout()
timeout_kwarg = {}
else:
expected_timeout = timeout
timeout_kwarg = {"timeout": timeout}
with patch as patched:
patched.return_value = download
if w_range:
blob._do_download(
transport,
file_obj,
download_url,
headers,
start=1,
end=3,
raw_download=raw_download,
checksum=checksum,
**timeout_kwarg
)
else:
blob._do_download(
transport,
file_obj,
download_url,
headers,
raw_download=raw_download,
checksum=checksum,
**timeout_kwarg
)
if w_range:
patched.assert_called_once_with(
download_url, chunk_size, file_obj, headers=headers, start=1, end=3
)
else:
patched.assert_called_once_with(
download_url, chunk_size, file_obj, headers=headers, start=0, end=None
)
download.consume_next_chunk.assert_called_once_with(
transport, timeout=expected_timeout
)
def test__do_download_w_chunks_wo_range_wo_raw(self):
self._do_download_helper_w_chunks(w_range=False, raw_download=False)
def test__do_download_w_chunks_w_range_wo_raw(self):
self._do_download_helper_w_chunks(w_range=True, raw_download=False)
def test__do_download_w_chunks_wo_range_w_raw(self):
self._do_download_helper_w_chunks(w_range=False, raw_download=True)
def test__do_download_w_chunks_w_range_w_raw(self):
self._do_download_helper_w_chunks(w_range=True, raw_download=True)
def test__do_download_w_chunks_w_custom_timeout(self):
self._do_download_helper_w_chunks(w_range=True, raw_download=True, timeout=9.58)
def test__do_download_w_chunks_w_checksum(self):
from google.cloud.storage import blob as blob_module
with mock.patch("logging.info") as patch:
self._do_download_helper_w_chunks(
w_range=False, raw_download=False, checksum="md5"
)
patch.assert_called_once_with(
blob_module._CHUNKED_DOWNLOAD_CHECKSUM_MESSAGE.format("md5")
)
def test__do_download_w_chunks_wo_checksum(self):
with mock.patch("logging.info") as patch:
self._do_download_helper_w_chunks(
w_range=False, raw_download=False, checksum=None
)
patch.assert_not_called()
def test_download_to_file_with_failure(self):
import requests
from google.resumable_media import InvalidResponse
from google.cloud import exceptions
raw_response = requests.Response()
raw_response.status_code = http_client.NOT_FOUND
raw_request = requests.Request("GET", "http://example.com")
raw_response.request = raw_request.prepare()
grmp_response = InvalidResponse(raw_response)
blob_name = "blob-name"
media_link = "http://test.invalid"
client = mock.Mock(spec=[u"_http"])
bucket = _Bucket(client)
blob = self._make_one(blob_name, bucket=bucket)
blob._properties["mediaLink"] = media_link
blob._do_download = mock.Mock()
blob._do_download.side_effect = grmp_response
file_obj = io.BytesIO()
with self.assertRaises(exceptions.NotFound):
blob.download_to_file(file_obj)
self.assertEqual(file_obj.tell(), 0)
headers = {"accept-encoding": "gzip"}
blob._do_download.assert_called_once_with(
client._http,
file_obj,
media_link,
headers,
None,
None,
False,
timeout=self._get_default_timeout(),
checksum="md5",
)
def test_download_to_file_wo_media_link(self):
blob_name = "blob-name"
client = mock.Mock(_connection=_Connection, spec=[u"_http"])
client._connection.API_BASE_URL = "https://storage.googleapis.com"
bucket = _Bucket(client)
blob = self._make_one(blob_name, bucket=bucket)
blob._do_download = mock.Mock()
file_obj = io.BytesIO()
blob.download_to_file(file_obj)
# Make sure the media link is still unknown.
self.assertIsNone(blob.media_link)
expected_url = (
"https://storage.googleapis.com/download/storage/v1/b/"
"name/o/blob-name?alt=media"
)
headers = {"accept-encoding": "gzip"}
blob._do_download.assert_called_once_with(
client._http,
file_obj,
expected_url,
headers,
None,
None,
False,
timeout=self._get_default_timeout(),
checksum="md5",
)
def test_download_to_file_w_generation_match(self):
GENERATION_NUMBER = 6
HEADERS = {"accept-encoding": "gzip"}
EXPECTED_URL = (
"https://storage.googleapis.com/download/storage/v1/b/"
"name/o/blob-name?alt=media&ifGenerationNotMatch={}".format(
GENERATION_NUMBER
)
)
client = mock.Mock(_connection=_Connection, spec=[u"_http"])
client._connection.API_BASE_URL = "https://storage.googleapis.com"
blob = self._make_one("blob-name", bucket=_Bucket(client))
blob._do_download = mock.Mock()
file_obj = io.BytesIO()
blob.download_to_file(file_obj, if_generation_not_match=GENERATION_NUMBER)
blob._do_download.assert_called_once_with(
client._http,
file_obj,
EXPECTED_URL,
HEADERS,
None,
None,
False,
timeout=self._get_default_timeout(),
checksum="md5",
)
def _download_to_file_helper(self, use_chunks, raw_download, timeout=None):
blob_name = "blob-name"
client = mock.Mock(spec=[u"_http"])
bucket = _Bucket(client)
media_link = "http://example.com/media/"
properties = {"mediaLink": media_link}
blob = self._make_one(blob_name, bucket=bucket, properties=properties)
if use_chunks:
blob._CHUNK_SIZE_MULTIPLE = 1
blob.chunk_size = 3
blob._do_download = mock.Mock()
if timeout is None:
expected_timeout = self._get_default_timeout()
timeout_kwarg = {}
else:
expected_timeout = timeout
timeout_kwarg = {"timeout": timeout}
file_obj = io.BytesIO()
if raw_download:
blob.download_to_file(file_obj, raw_download=True, **timeout_kwarg)
else:
blob.download_to_file(file_obj, **timeout_kwarg)
headers = {"accept-encoding": "gzip"}
blob._do_download.assert_called_once_with(
client._http,
file_obj,
media_link,
headers,
None,
None,
raw_download,
timeout=expected_timeout,
checksum="md5",
)
def test_download_to_file_wo_chunks_wo_raw(self):
self._download_to_file_helper(use_chunks=False, raw_download=False)
def test_download_to_file_w_chunks_wo_raw(self):
self._download_to_file_helper(use_chunks=True, raw_download=False)
def test_download_to_file_wo_chunks_w_raw(self):
self._download_to_file_helper(use_chunks=False, raw_download=True)
def test_download_to_file_w_chunks_w_raw(self):
self._download_to_file_helper(use_chunks=True, raw_download=True)
def test_download_to_file_w_custom_timeout(self):
self._download_to_file_helper(
use_chunks=False, raw_download=False, timeout=9.58
)
def _download_to_filename_helper(self, updated, raw_download, timeout=None):
import os
from google.cloud.storage._helpers import _convert_to_timestamp
from google.cloud._testing import _NamedTemporaryFile
blob_name = "blob-name"
client = mock.Mock(spec=["_http"])
bucket = _Bucket(client)
media_link = "http://example.com/media/"
properties = {"mediaLink": media_link}
if updated is not None:
properties["updated"] = updated
blob = self._make_one(blob_name, bucket=bucket, properties=properties)
blob._do_download = mock.Mock()
with _NamedTemporaryFile() as temp:
if timeout is None:
blob.download_to_filename(temp.name, raw_download=raw_download)
else:
blob.download_to_filename(
temp.name, raw_download=raw_download, timeout=timeout,
)
if updated is None:
self.assertIsNone(blob.updated)
else:
mtime = os.path.getmtime(temp.name)
if six.PY2:
updated_time = _convert_to_timestamp(blob.updated)
else:
updated_time = blob.updated.timestamp()
self.assertEqual(mtime, updated_time)
expected_timeout = self._get_default_timeout() if timeout is None else timeout
headers = {"accept-encoding": "gzip"}
blob._do_download.assert_called_once_with(
client._http,
mock.ANY,
media_link,
headers,
None,
None,
raw_download,
timeout=expected_timeout,
checksum="md5",
)
stream = blob._do_download.mock_calls[0].args[1]
self.assertEqual(stream.name, temp.name)
def test_download_to_filename_w_generation_match(self):
from google.cloud._testing import _NamedTemporaryFile
GENERATION_NUMBER = 6
MEDIA_LINK = "http://example.com/media/"
EXPECTED_LINK = MEDIA_LINK + "?ifGenerationMatch={}".format(GENERATION_NUMBER)
HEADERS = {"accept-encoding": "gzip"}
client = mock.Mock(spec=["_http"])
blob = self._make_one(
"blob-name", bucket=_Bucket(client), properties={"mediaLink": MEDIA_LINK}
)
blob._do_download = mock.Mock()
with _NamedTemporaryFile() as temp:
blob.download_to_filename(temp.name, if_generation_match=GENERATION_NUMBER)
blob._do_download.assert_called_once_with(
client._http,
mock.ANY,
EXPECTED_LINK,
HEADERS,
None,
None,
False,
timeout=self._get_default_timeout(),
checksum="md5",
)
def test_download_to_filename_w_updated_wo_raw(self):
updated = "2014-12-06T13:13:50.690Z"
self._download_to_filename_helper(updated=updated, raw_download=False)
def test_download_to_filename_wo_updated_wo_raw(self):
self._download_to_filename_helper(updated=None, raw_download=False)
def test_download_to_filename_w_updated_w_raw(self):
updated = "2014-12-06T13:13:50.690Z"
self._download_to_filename_helper(updated=updated, raw_download=True)
def test_download_to_filename_wo_updated_w_raw(self):
self._download_to_filename_helper(updated=None, raw_download=True)
def test_download_to_filename_w_custom_timeout(self):
self._download_to_filename_helper(
updated=None, raw_download=False, timeout=9.58
)
def test_download_to_filename_corrupted(self):
from google.resumable_media import DataCorruption
blob_name = "blob-name"
client = mock.Mock(spec=["_http"])
bucket = _Bucket(client)
media_link = "http://example.com/media/"
properties = {"mediaLink": media_link}
blob = self._make_one(blob_name, bucket=bucket, properties=properties)
blob._do_download = mock.Mock()
blob._do_download.side_effect = DataCorruption("testing")
# Try to download into a temporary file (don't use
# `_NamedTemporaryFile` it will try to remove after the file is
# already removed)
filehandle, filename = tempfile.mkstemp()
os.close(filehandle)
self.assertTrue(os.path.exists(filename))
with self.assertRaises(DataCorruption):
blob.download_to_filename(filename)
# Make sure the file was cleaned up.
self.assertFalse(os.path.exists(filename))
headers = {"accept-encoding": "gzip"}
blob._do_download.assert_called_once_with(
client._http,
mock.ANY,
media_link,
headers,
None,
None,
False,
timeout=self._get_default_timeout(),
checksum="md5",
)
stream = blob._do_download.mock_calls[0].args[1]
self.assertEqual(stream.name, filename)
def test_download_to_filename_w_key(self):
from google.cloud._testing import _NamedTemporaryFile
from google.cloud.storage.blob import _get_encryption_headers
blob_name = "blob-name"
# Create a fake client/bucket and use them in the Blob() constructor.
client = mock.Mock(spec=["_http"])
bucket = _Bucket(client)
media_link = "http://example.com/media/"
properties = {"mediaLink": media_link}
key = b"aa426195405adee2c8081bb9e7e74b19"
blob = self._make_one(
blob_name, bucket=bucket, properties=properties, encryption_key=key
)
blob._do_download = mock.Mock()
with _NamedTemporaryFile() as temp:
blob.download_to_filename(temp.name)
headers = {"accept-encoding": "gzip"}
headers.update(_get_encryption_headers(key))
blob._do_download.assert_called_once_with(
client._http,
mock.ANY,
media_link,
headers,
None,
None,
False,
timeout=self._get_default_timeout(),
checksum="md5",
)
stream = blob._do_download.mock_calls[0].args[1]
self.assertEqual(stream.name, temp.name)
def _download_as_bytes_helper(self, raw_download, timeout=None):
blob_name = "blob-name"
client = mock.Mock(spec=["_http"])
bucket = _Bucket(client)
media_link = "http://example.com/media/"
properties = {"mediaLink": media_link}
blob = self._make_one(blob_name, bucket=bucket, properties=properties)
blob._do_download = mock.Mock()
if timeout is None:
expected_timeout = self._get_default_timeout()
fetched = blob.download_as_bytes(raw_download=raw_download)
else:
expected_timeout = timeout
fetched = blob.download_as_bytes(raw_download=raw_download, timeout=timeout)
self.assertEqual(fetched, b"")
headers = {"accept-encoding": "gzip"}
blob._do_download.assert_called_once_with(
client._http,
mock.ANY,
media_link,
headers,
None,
None,
raw_download,
timeout=expected_timeout,
checksum="md5",
)
stream = blob._do_download.mock_calls[0].args[1]
self.assertIsInstance(stream, io.BytesIO)
def test_download_as_string_w_response_headers(self):
blob_name = "blob-name"
client = mock.Mock(spec=["_http"])
bucket = _Bucket(client)
media_link = "http://example.com/media/"
properties = {"mediaLink": media_link}
blob = self._make_one(blob_name, bucket=bucket, properties=properties)
response = self._mock_requests_response(
http_client.OK,
headers={
"Content-Type": "application/json",
"Content-Language": "ko-kr",
"Cache-Control": "max-age=1337;public",
"Content-Encoding": "gzip",
"X-Goog-Storage-Class": "STANDARD",
"X-Goog-Hash": "crc32c=4gcgLQ==,md5=CS9tHYTtyFntzj7B9nkkJQ==",
},
# { "x": 5 } gzipped
content=b"\x1f\x8b\x08\x00\xcfo\x17_\x02\xff\xabVP\xaaP\xb2R0U\xa8\x05\x00\xa1\xcaQ\x93\n\x00\x00\x00",
)
blob._extract_headers_from_download(response)
self.assertEqual(blob.content_type, "application/json")
self.assertEqual(blob.content_language, "ko-kr")
self.assertEqual(blob.content_encoding, "gzip")
self.assertEqual(blob.cache_control, "max-age=1337;public")
self.assertEqual(blob.storage_class, "STANDARD")
self.assertEqual(blob.md5_hash, "CS9tHYTtyFntzj7B9nkkJQ==")
self.assertEqual(blob.crc32c, "4gcgLQ==")
response = self._mock_requests_response(
http_client.OK,
headers={
"Content-Type": "application/octet-stream",
"Content-Language": "en-US",
"Cache-Control": "max-age=1337;public",
"Content-Encoding": "gzip",
"X-Goog-Storage-Class": "STANDARD",
"X-Goog-Hash": "crc32c=4/c+LQ==,md5=CS9tHYTt/+ntzj7B9nkkJQ==",
},
content=b"",
)
blob._extract_headers_from_download(response)
self.assertEqual(blob.content_type, "application/octet-stream")
self.assertEqual(blob.content_language, "en-US")
self.assertEqual(blob.md5_hash, "CS9tHYTt/+ntzj7B9nkkJQ==")
self.assertEqual(blob.crc32c, "4/c+LQ==")
def test_download_as_string_w_hash_response_header_none(self):
blob_name = "blob-name"
md5_hash = "CS9tHYTtyFntzj7B9nkkJQ=="
crc32c = "4gcgLQ=="
client = mock.Mock(spec=["_http"])
bucket = _Bucket(client)
media_link = "http://example.com/media/"
properties = {
"mediaLink": media_link,
"md5Hash": md5_hash,
"crc32c": crc32c,
}
blob = self._make_one(blob_name, bucket=bucket, properties=properties)
response = self._mock_requests_response(
http_client.OK,
headers={"X-Goog-Hash": ""},
# { "x": 5 } gzipped
content=b"\x1f\x8b\x08\x00\xcfo\x17_\x02\xff\xabVP\xaaP\xb2R0U\xa8\x05\x00\xa1\xcaQ\x93\n\x00\x00\x00",
)
blob._extract_headers_from_download(response)
self.assertEqual(blob.md5_hash, md5_hash)
self.assertEqual(blob.crc32c, crc32c)
def test_download_as_bytes_w_generation_match(self):
GENERATION_NUMBER = 6
MEDIA_LINK = "http://example.com/media/"
client = mock.Mock(spec=["_http"])
blob = self._make_one(
"blob-name", bucket=_Bucket(client), properties={"mediaLink": MEDIA_LINK}
)
blob.download_to_file = mock.Mock()
fetched = blob.download_as_bytes(if_generation_match=GENERATION_NUMBER)
self.assertEqual(fetched, b"")
blob.download_to_file.assert_called_once_with(
mock.ANY,
client=None,
start=None,
end=None,
raw_download=False,
if_generation_match=GENERATION_NUMBER,
if_generation_not_match=None,
if_metageneration_match=None,
if_metageneration_not_match=None,
timeout=self._get_default_timeout(),
checksum="md5",
)
def test_download_as_bytes_wo_raw(self):
self._download_as_bytes_helper(raw_download=False)
def test_download_as_bytes_w_raw(self):
self._download_as_bytes_helper(raw_download=True)
def test_download_as_byte_w_custom_timeout(self):
self._download_as_bytes_helper(raw_download=False, timeout=9.58)
def _download_as_text_helper(self, raw_download, encoding=None, timeout=None):
blob_name = "blob-name"
client = mock.Mock(spec=["_http"])
bucket = _Bucket(client)
media_link = "http://example.com/media/"
properties = {"mediaLink": media_link}
if encoding:
properties["contentEncoding"] = encoding
blob = self._make_one(blob_name, bucket=bucket, properties=properties)
blob._do_download = mock.Mock()
if timeout is None:
expected_timeout = self._get_default_timeout()
fetched = blob.download_as_text(raw_download=raw_download)
else:
expected_timeout = timeout
fetched = blob.download_as_text(raw_download=raw_download, timeout=timeout)
self.assertEqual(fetched, "")
headers = {"accept-encoding": "gzip"}
blob._do_download.assert_called_once_with(
client._http,
mock.ANY,
media_link,
headers,
None,
None,
raw_download,
timeout=expected_timeout,
checksum="md5",
)
stream = blob._do_download.mock_calls[0].args[1]
self.assertIsInstance(stream, io.BytesIO)
def test_download_as_text_w_generation_match(self):
GENERATION_NUMBER = 6
MEDIA_LINK = "http://example.com/media/"
client = mock.Mock(spec=["_http"])
blob = self._make_one(
"blob-name", bucket=_Bucket(client), properties={"mediaLink": MEDIA_LINK}
)
blob.download_to_file = mock.Mock()
fetched = blob.download_as_text(if_generation_match=GENERATION_NUMBER)
self.assertEqual(fetched, "")
blob.download_to_file.assert_called_once_with(
mock.ANY,
client=None,
start=None,
end=None,
raw_download=False,
if_generation_match=GENERATION_NUMBER,
if_generation_not_match=None,
if_metageneration_match=None,
if_metageneration_not_match=None,
timeout=self._get_default_timeout(),
checksum="md5",
)
def test_download_as_text_wo_raw(self):
self._download_as_text_helper(raw_download=False)
def test_download_as_text_w_raw(self):
self._download_as_text_helper(raw_download=True)
def test_download_as_text_w_custom_timeout(self):
self._download_as_text_helper(raw_download=False, timeout=9.58)
def test_download_as_text_w_encoding(self):
self._download_as_text_helper(raw_download=False, encoding="utf-8")
@mock.patch("warnings.warn")
def test_download_as_string(self, mock_warn):
MEDIA_LINK = "http://example.com/media/"
client = mock.Mock(spec=["_http"])
blob = self._make_one(
"blob-name", bucket=_Bucket(client), properties={"mediaLink": MEDIA_LINK}
)
blob.download_to_file = mock.Mock()
fetched = blob.download_as_string()
self.assertEqual(fetched, b"")
blob.download_to_file.assert_called_once_with(
mock.ANY,
client=None,
start=None,
end=None,
raw_download=False,
if_generation_match=None,
if_generation_not_match=None,
if_metageneration_match=None,
if_metageneration_not_match=None,
timeout=self._get_default_timeout(),
checksum="md5",
)
mock_warn.assert_called_with(
"Blob.download_as_string() is deprecated and will be removed in future."
"Use Blob.download_as_bytes() instead.",
PendingDeprecationWarning,
stacklevel=1,
)
def test__get_content_type_explicit(self):
blob = self._make_one(u"blob-name", bucket=None)
content_type = u"text/plain"
return_value = blob._get_content_type(content_type)
self.assertEqual(return_value, content_type)
def test__get_content_type_from_blob(self):
blob = self._make_one(u"blob-name", bucket=None)
blob.content_type = u"video/mp4"
return_value = blob._get_content_type(None)
self.assertEqual(return_value, blob.content_type)
def test__get_content_type_from_filename(self):
blob = self._make_one(u"blob-name", bucket=None)
return_value = blob._get_content_type(None, filename="archive.tar")
self.assertEqual(return_value, "application/x-tar")
def test__get_content_type_default(self):
blob = self._make_one(u"blob-name", bucket=None)
return_value = blob._get_content_type(None)
self.assertEqual(return_value, u"application/octet-stream")
def test__get_writable_metadata_no_changes(self):
name = u"blob-name"
blob = self._make_one(name, bucket=None)
object_metadata = blob._get_writable_metadata()
expected = {"name": name}
self.assertEqual(object_metadata, expected)
def test__get_writable_metadata_with_changes(self):
name = u"blob-name"
blob = self._make_one(name, bucket=None)
blob.storage_class = "NEARLINE"
blob.cache_control = "max-age=3600"
blob.metadata = {"color": "red"}
object_metadata = blob._get_writable_metadata()
expected = {
"cacheControl": blob.cache_control,
"metadata": blob.metadata,
"name": name,
"storageClass": blob.storage_class,
}
self.assertEqual(object_metadata, expected)
def test__get_writable_metadata_unwritable_field(self):
name = u"blob-name"
properties = {"updated": "2016-10-16T18:18:18.181Z"}
blob = self._make_one(name, bucket=None, properties=properties)
# Fake that `updated` is in changes.
blob._changes.add("updated")
object_metadata = blob._get_writable_metadata()
expected = {"name": name}
self.assertEqual(object_metadata, expected)
def test__set_metadata_to_none(self):
name = u"blob-name"
blob = self._make_one(name, bucket=None)
blob.storage_class = "NEARLINE"
blob.cache_control = "max-age=3600"
with mock.patch("google.cloud.storage.blob.Blob._patch_property") as patch_prop:
blob.metadata = None
patch_prop.assert_called_once_with("metadata", None)
def test__get_upload_arguments(self):
name = u"blob-name"
key = b"[pXw@,p@@AfBfrR3x-2b2SCHR,.?YwRO"
blob = self._make_one(name, bucket=None, encryption_key=key)
blob.content_disposition = "inline"
content_type = u"image/jpeg"
info = blob._get_upload_arguments(content_type)
headers, object_metadata, new_content_type = info
header_key_value = "W3BYd0AscEBAQWZCZnJSM3gtMmIyU0NIUiwuP1l3Uk8="
header_key_hash_value = "G0++dxF4q5rG4o9kE8gvEKn15RH6wLm0wXV1MgAlXOg="
expected_headers = {
"X-Goog-Encryption-Algorithm": "AES256",
"X-Goog-Encryption-Key": header_key_value,
"X-Goog-Encryption-Key-Sha256": header_key_hash_value,
}
self.assertEqual(headers, expected_headers)
expected_metadata = {
"contentDisposition": blob.content_disposition,
"name": name,
}
self.assertEqual(object_metadata, expected_metadata)
self.assertEqual(new_content_type, content_type)
def _mock_transport(self, status_code, headers, content=b""):
fake_transport = mock.Mock(spec=["request"])
fake_response = self._mock_requests_response(
status_code, headers, content=content
)
fake_transport.request.return_value = fake_response
return fake_transport
def _do_multipart_success(
self,
mock_get_boundary,
size=None,
num_retries=None,
user_project=None,
predefined_acl=None,
if_generation_match=None,
if_generation_not_match=None,
if_metageneration_match=None,
if_metageneration_not_match=None,
kms_key_name=None,
timeout=None,
):
from six.moves.urllib.parse import urlencode
bucket = _Bucket(name="w00t", user_project=user_project)
blob = self._make_one(u"blob-name", bucket=bucket, kms_key_name=kms_key_name)
self.assertIsNone(blob.chunk_size)
# Create mocks to be checked for doing transport.
transport = self._mock_transport(http_client.OK, {})
# Create some mock arguments.
client = mock.Mock(_http=transport, _connection=_Connection, spec=["_http"])
client._connection.API_BASE_URL = "https://storage.googleapis.com"
data = b"data here hear hier"
stream = io.BytesIO(data)
content_type = u"application/xml"
if timeout is None:
expected_timeout = self._get_default_timeout()
timeout_kwarg = {}
else:
expected_timeout = timeout
timeout_kwarg = {"timeout": timeout}
response = blob._do_multipart_upload(
client,
stream,
content_type,
size,
num_retries,
predefined_acl,
if_generation_match,
if_generation_not_match,
if_metageneration_match,
if_metageneration_not_match,
**timeout_kwarg
)
# Check the mocks and the returned value.
self.assertIs(response, transport.request.return_value)
if size is None:
data_read = data
self.assertEqual(stream.tell(), len(data))
else:
data_read = data[:size]
self.assertEqual(stream.tell(), size)
mock_get_boundary.assert_called_once_with()
upload_url = (
"https://storage.googleapis.com/upload/storage/v1" + bucket.path + "/o"
)
qs_params = [("uploadType", "multipart")]
if user_project is not None:
qs_params.append(("userProject", user_project))
if predefined_acl is not None:
qs_params.append(("predefinedAcl", predefined_acl))
if kms_key_name is not None and "cryptoKeyVersions" not in kms_key_name:
qs_params.append(("kmsKeyName", kms_key_name))
if if_generation_match is not None:
qs_params.append(("ifGenerationMatch", if_generation_match))
if if_generation_not_match is not None:
qs_params.append(("ifGenerationNotMatch", if_generation_not_match))
if if_metageneration_match is not None:
qs_params.append(("ifMetagenerationMatch", if_metageneration_match))
if if_metageneration_not_match is not None:
qs_params.append(("ifMetaGenerationNotMatch", if_metageneration_not_match))
upload_url += "?" + urlencode(qs_params)
payload = (
b"--==0==\r\n"
+ b"content-type: application/json; charset=UTF-8\r\n\r\n"
+ b'{"name": "blob-name"}\r\n'
+ b"--==0==\r\n"
+ b"content-type: application/xml\r\n\r\n"
+ data_read
+ b"\r\n--==0==--"
)
headers = {"content-type": b'multipart/related; boundary="==0=="'}
transport.request.assert_called_once_with(
"POST", upload_url, data=payload, headers=headers, timeout=expected_timeout
)
@mock.patch(u"google.resumable_media._upload.get_boundary", return_value=b"==0==")
def test__do_multipart_upload_no_size(self, mock_get_boundary):
self._do_multipart_success(mock_get_boundary, predefined_acl="private")
@mock.patch(u"google.resumable_media._upload.get_boundary", return_value=b"==0==")
def test__do_multipart_upload_with_size(self, mock_get_boundary):
self._do_multipart_success(mock_get_boundary, size=10)
@mock.patch(u"google.resumable_media._upload.get_boundary", return_value=b"==0==")
def test__do_multipart_upload_with_user_project(self, mock_get_boundary):
user_project = "user-project-123"
self._do_multipart_success(mock_get_boundary, user_project=user_project)
@mock.patch(u"google.resumable_media._upload.get_boundary", return_value=b"==0==")
def test__do_multipart_upload_with_kms(self, mock_get_boundary):
kms_resource = (
"projects/test-project-123/"
"locations/us/"
"keyRings/test-ring/"
"cryptoKeys/test-key"
)
self._do_multipart_success(mock_get_boundary, kms_key_name=kms_resource)
@mock.patch(u"google.resumable_media._upload.get_boundary", return_value=b"==0==")
def test__do_multipart_upload_with_kms_with_version(self, mock_get_boundary):
kms_resource = (
"projects/test-project-123/"
"locations/us/"
"keyRings/test-ring/"
"cryptoKeys/test-key"
"cryptoKeyVersions/1"
)
self._do_multipart_success(mock_get_boundary, kms_key_name=kms_resource)
@mock.patch(u"google.resumable_media._upload.get_boundary", return_value=b"==0==")
def test__do_multipart_upload_with_retry(self, mock_get_boundary):
self._do_multipart_success(mock_get_boundary, num_retries=8)
@mock.patch(u"google.resumable_media._upload.get_boundary", return_value=b"==0==")
def test__do_multipart_upload_with_generation_match(self, mock_get_boundary):
self._do_multipart_success(
mock_get_boundary, if_generation_match=4, if_metageneration_match=4
)
@mock.patch(u"google.resumable_media._upload.get_boundary", return_value=b"==0==")
def test__do_multipart_upload_with_custom_timeout(self, mock_get_boundary):
self._do_multipart_success(mock_get_boundary, timeout=9.58)
@mock.patch(u"google.resumable_media._upload.get_boundary", return_value=b"==0==")
def test__do_multipart_upload_with_generation_not_match(self, mock_get_boundary):
self._do_multipart_success(
mock_get_boundary, if_generation_not_match=4, if_metageneration_not_match=4
)
def test__do_multipart_upload_bad_size(self):
blob = self._make_one(u"blob-name", bucket=None)
data = b"data here hear hier"
stream = io.BytesIO(data)
size = 50
self.assertGreater(size, len(data))
with self.assertRaises(ValueError) as exc_info:
blob._do_multipart_upload(
None, stream, None, size, None, None, None, None, None, None
)
exc_contents = str(exc_info.exception)
self.assertIn("was specified but the file-like object only had", exc_contents)
self.assertEqual(stream.tell(), len(data))
def _initiate_resumable_helper(
self,
size=None,
extra_headers=None,
chunk_size=None,
num_retries=None,
user_project=None,
predefined_acl=None,
if_generation_match=None,
if_generation_not_match=None,
if_metageneration_match=None,
if_metageneration_not_match=None,
blob_chunk_size=786432,
kms_key_name=None,
timeout=None,
):
from six.moves.urllib.parse import urlencode
from google.resumable_media.requests import ResumableUpload
from google.cloud.storage.blob import _DEFAULT_CHUNKSIZE
bucket = _Bucket(name="whammy", user_project=user_project)
blob = self._make_one(u"blob-name", bucket=bucket, kms_key_name=kms_key_name)
blob.metadata = {"rook": "takes knight"}
blob.chunk_size = blob_chunk_size
if blob_chunk_size is not None:
self.assertIsNotNone(blob.chunk_size)
else:
self.assertIsNone(blob.chunk_size)
# Need to make sure **same** dict is used because ``json.dumps()``
# will depend on the hash order.
object_metadata = blob._get_writable_metadata()
blob._get_writable_metadata = mock.Mock(return_value=object_metadata, spec=[])
# Create mocks to be checked for doing transport.
resumable_url = "http://test.invalid?upload_id=hey-you"
response_headers = {"location": resumable_url}
transport = self._mock_transport(http_client.OK, response_headers)
# Create some mock arguments and call the method under test.
client = mock.Mock(_http=transport, _connection=_Connection, spec=[u"_http"])
client._connection.API_BASE_URL = "https://storage.googleapis.com"
data = b"hello hallo halo hi-low"
stream = io.BytesIO(data)
content_type = u"text/plain"
if timeout is None:
expected_timeout = self._get_default_timeout()
timeout_kwarg = {}
else:
expected_timeout = timeout
timeout_kwarg = {"timeout": timeout}
upload, transport = blob._initiate_resumable_upload(
client,
stream,
content_type,
size,
num_retries,
extra_headers=extra_headers,
chunk_size=chunk_size,
predefined_acl=predefined_acl,
if_generation_match=if_generation_match,
if_generation_not_match=if_generation_not_match,
if_metageneration_match=if_metageneration_match,
if_metageneration_not_match=if_metageneration_not_match,
**timeout_kwarg
)
# Check the returned values.
self.assertIsInstance(upload, ResumableUpload)
upload_url = (
"https://storage.googleapis.com/upload/storage/v1" + bucket.path + "/o"
)
qs_params = [("uploadType", "resumable")]
if user_project is not None:
qs_params.append(("userProject", user_project))
if predefined_acl is not None:
qs_params.append(("predefinedAcl", predefined_acl))
if kms_key_name is not None and "cryptoKeyVersions" not in kms_key_name:
qs_params.append(("kmsKeyName", kms_key_name))
if if_generation_match is not None:
qs_params.append(("ifGenerationMatch", if_generation_match))
if if_generation_not_match is not None:
qs_params.append(("ifGenerationNotMatch", if_generation_not_match))
if if_metageneration_match is not None:
qs_params.append(("ifMetagenerationMatch", if_metageneration_match))
if if_metageneration_not_match is not None:
qs_params.append(("ifMetaGenerationNotMatch", if_metageneration_not_match))
upload_url += "?" + urlencode(qs_params)
self.assertEqual(upload.upload_url, upload_url)
if extra_headers is None:
self.assertEqual(upload._headers, {})
else:
self.assertEqual(upload._headers, extra_headers)
self.assertIsNot(upload._headers, extra_headers)
self.assertFalse(upload.finished)
if chunk_size is None:
if blob_chunk_size is None:
self.assertEqual(upload._chunk_size, _DEFAULT_CHUNKSIZE)
else:
self.assertEqual(upload._chunk_size, blob.chunk_size)
else:
self.assertNotEqual(blob.chunk_size, chunk_size)
self.assertEqual(upload._chunk_size, chunk_size)
self.assertIs(upload._stream, stream)
if size is None:
self.assertIsNone(upload._total_bytes)
else:
self.assertEqual(upload._total_bytes, size)
self.assertEqual(upload._content_type, content_type)
self.assertEqual(upload.resumable_url, resumable_url)
retry_strategy = upload._retry_strategy
self.assertEqual(retry_strategy.max_sleep, 64.0)
if num_retries is None:
self.assertEqual(retry_strategy.max_cumulative_retry, 600.0)
self.assertIsNone(retry_strategy.max_retries)
else:
self.assertIsNone(retry_strategy.max_cumulative_retry)
self.assertEqual(retry_strategy.max_retries, num_retries)
self.assertIs(transport, transport)
# Make sure we never read from the stream.
self.assertEqual(stream.tell(), 0)
# Check the mocks.
blob._get_writable_metadata.assert_called_once_with()
payload = json.dumps(object_metadata).encode("utf-8")
expected_headers = {
"content-type": "application/json; charset=UTF-8",
"x-upload-content-type": content_type,
}
if size is not None:
expected_headers["x-upload-content-length"] = str(size)
if extra_headers is not None:
expected_headers.update(extra_headers)
transport.request.assert_called_once_with(
"POST",
upload_url,
data=payload,
headers=expected_headers,
timeout=expected_timeout,
)
def test__initiate_resumable_upload_with_custom_timeout(self):
self._initiate_resumable_helper(timeout=9.58)
def test__initiate_resumable_upload_no_size(self):
self._initiate_resumable_helper()
def test__initiate_resumable_upload_with_size(self):
self._initiate_resumable_helper(size=10000)
def test__initiate_resumable_upload_with_user_project(self):
user_project = "user-project-123"
self._initiate_resumable_helper(user_project=user_project)
def test__initiate_resumable_upload_with_kms(self):
kms_resource = (
"projects/test-project-123/"
"locations/us/"
"keyRings/test-ring/"
"cryptoKeys/test-key"
)
self._initiate_resumable_helper(kms_key_name=kms_resource)
def test__initiate_resumable_upload_with_kms_with_version(self):
kms_resource = (
"projects/test-project-123/"
"locations/us/"
"keyRings/test-ring/"
"cryptoKeys/test-key"
"cryptoKeyVersions/1"
)
self._initiate_resumable_helper(kms_key_name=kms_resource)
def test__initiate_resumable_upload_without_chunk_size(self):
self._initiate_resumable_helper(blob_chunk_size=None)
def test__initiate_resumable_upload_with_chunk_size(self):
one_mb = 1048576
self._initiate_resumable_helper(chunk_size=one_mb)
def test__initiate_resumable_upload_with_extra_headers(self):
extra_headers = {"origin": "http://not-in-kansas-anymore.invalid"}
self._initiate_resumable_helper(extra_headers=extra_headers)
def test__initiate_resumable_upload_with_retry(self):
self._initiate_resumable_helper(num_retries=11)
def test__initiate_resumable_upload_with_generation_match(self):
self._initiate_resumable_helper(
if_generation_match=4, if_metageneration_match=4
)
def test__initiate_resumable_upload_with_generation_not_match(self):
self._initiate_resumable_helper(
if_generation_not_match=4, if_metageneration_not_match=4
)
def test__initiate_resumable_upload_with_predefined_acl(self):
self._initiate_resumable_helper(predefined_acl="private")
def _make_resumable_transport(
self, headers1, headers2, headers3, total_bytes, data_corruption=False
):
from google import resumable_media
fake_transport = mock.Mock(spec=["request"])
fake_response1 = self._mock_requests_response(http_client.OK, headers1)
fake_response2 = self._mock_requests_response(
resumable_media.PERMANENT_REDIRECT, headers2
)
json_body = '{{"size": "{:d}"}}'.format(total_bytes)
if data_corruption:
fake_response3 = resumable_media.DataCorruption(None)
else:
fake_response3 = self._mock_requests_response(
http_client.OK, headers3, content=json_body.encode("utf-8")
)
responses = [fake_response1, fake_response2, fake_response3]
fake_transport.request.side_effect = responses
return fake_transport, responses
@staticmethod
def _do_resumable_upload_call0(
blob,
content_type,
size=None,
predefined_acl=None,
if_generation_match=None,
if_generation_not_match=None,
if_metageneration_match=None,
if_metageneration_not_match=None,
timeout=None,
):
# First mock transport.request() does initiates upload.
upload_url = (
"https://storage.googleapis.com/upload/storage/v1"
+ blob.bucket.path
+ "/o?uploadType=resumable"
)
if predefined_acl is not None:
upload_url += "&predefinedAcl={}".format(predefined_acl)
expected_headers = {
"content-type": "application/json; charset=UTF-8",
"x-upload-content-type": content_type,
}
if size is not None:
expected_headers["x-upload-content-length"] = str(size)
payload = json.dumps({"name": blob.name}).encode("utf-8")
return mock.call(
"POST", upload_url, data=payload, headers=expected_headers, timeout=timeout
)
@staticmethod
def _do_resumable_upload_call1(
blob,
content_type,
data,
resumable_url,
size=None,
predefined_acl=None,
if_generation_match=None,
if_generation_not_match=None,
if_metageneration_match=None,
if_metageneration_not_match=None,
timeout=None,
):
# Second mock transport.request() does sends first chunk.
if size is None:
content_range = "bytes 0-{:d}/*".format(blob.chunk_size - 1)
else:
content_range = "bytes 0-{:d}/{:d}".format(blob.chunk_size - 1, size)
expected_headers = {
"content-type": content_type,
"content-range": content_range,
}
payload = data[: blob.chunk_size]
return mock.call(
"PUT",
resumable_url,
data=payload,
headers=expected_headers,
timeout=timeout,
)
@staticmethod
def _do_resumable_upload_call2(
blob,
content_type,
data,
resumable_url,
total_bytes,
predefined_acl=None,
if_generation_match=None,
if_generation_not_match=None,
if_metageneration_match=None,
if_metageneration_not_match=None,
timeout=None,
):
# Third mock transport.request() does sends last chunk.
content_range = "bytes {:d}-{:d}/{:d}".format(
blob.chunk_size, total_bytes - 1, total_bytes
)
expected_headers = {
"content-type": content_type,
"content-range": content_range,
}
payload = data[blob.chunk_size :]
return mock.call(
"PUT",
resumable_url,
data=payload,
headers=expected_headers,
timeout=timeout,
)
def _do_resumable_helper(
self,
use_size=False,
num_retries=None,
predefined_acl=None,
if_generation_match=None,
if_generation_not_match=None,
if_metageneration_match=None,
if_metageneration_not_match=None,
timeout=None,
data_corruption=False,
):
bucket = _Bucket(name="yesterday")
blob = self._make_one(u"blob-name", bucket=bucket)
blob.chunk_size = blob._CHUNK_SIZE_MULTIPLE
self.assertIsNotNone(blob.chunk_size)
# Data to be uploaded.
data = b"<html>" + (b"A" * blob.chunk_size) + b"</html>"
total_bytes = len(data)
if use_size:
size = total_bytes
else:
size = None
# Create mocks to be checked for doing transport.
resumable_url = "http://test.invalid?upload_id=and-then-there-was-1"
headers1 = {"location": resumable_url}
headers2 = {"range": "bytes=0-{:d}".format(blob.chunk_size - 1)}
transport, responses = self._make_resumable_transport(
headers1, headers2, {}, total_bytes, data_corruption=data_corruption
)
# Create some mock arguments and call the method under test.
client = mock.Mock(_http=transport, _connection=_Connection, spec=["_http"])
client._connection.API_BASE_URL = "https://storage.googleapis.com"
stream = io.BytesIO(data)
content_type = u"text/html"
if timeout is None:
expected_timeout = self._get_default_timeout()
timeout_kwarg = {}
else:
expected_timeout = timeout
timeout_kwarg = {"timeout": timeout}
response = blob._do_resumable_upload(
client,
stream,
content_type,
size,
num_retries,
predefined_acl,
if_generation_match,
if_generation_not_match,
if_metageneration_match,
if_metageneration_not_match,
**timeout_kwarg
)
# Check the returned values.
self.assertIs(response, responses[2])
self.assertEqual(stream.tell(), total_bytes)
# Check the mocks.
call0 = self._do_resumable_upload_call0(
blob,
content_type,
size=size,
predefined_acl=predefined_acl,
if_generation_match=if_generation_match,
if_generation_not_match=if_generation_not_match,
if_metageneration_match=if_metageneration_match,
if_metageneration_not_match=if_metageneration_not_match,
timeout=expected_timeout,
)
call1 = self._do_resumable_upload_call1(
blob,
content_type,
data,
resumable_url,
size=size,
predefined_acl=predefined_acl,
if_generation_match=if_generation_match,
if_generation_not_match=if_generation_not_match,
if_metageneration_match=if_metageneration_match,
if_metageneration_not_match=if_metageneration_not_match,
timeout=expected_timeout,
)
call2 = self._do_resumable_upload_call2(
blob,
content_type,
data,
resumable_url,
total_bytes,
predefined_acl=predefined_acl,
if_generation_match=if_generation_match,
if_generation_not_match=if_generation_not_match,
if_metageneration_match=if_metageneration_match,
if_metageneration_not_match=if_metageneration_not_match,
timeout=expected_timeout,
)
self.assertEqual(transport.request.mock_calls, [call0, call1, call2])
def test__do_resumable_upload_with_custom_timeout(self):
self._do_resumable_helper(timeout=9.58)
def test__do_resumable_upload_no_size(self):
self._do_resumable_helper()
def test__do_resumable_upload_with_size(self):
self._do_resumable_helper(use_size=True)
def test__do_resumable_upload_with_retry(self):
self._do_resumable_helper(num_retries=6)
def test__do_resumable_upload_with_predefined_acl(self):
self._do_resumable_helper(predefined_acl="private")
def test__do_resumable_upload_with_data_corruption(self):
from google.resumable_media import DataCorruption
with mock.patch("google.cloud.storage.blob.Blob.delete") as patch:
try:
self._do_resumable_helper(data_corruption=True)
except Exception as e:
self.assertTrue(patch.called)
self.assertIsInstance(e, DataCorruption)
def _do_upload_helper(
self,
chunk_size=None,
num_retries=None,
predefined_acl=None,
if_generation_match=None,
if_generation_not_match=None,
if_metageneration_match=None,
if_metageneration_not_match=None,
size=None,
timeout=None,
):
from google.cloud.storage.blob import _MAX_MULTIPART_SIZE
blob = self._make_one(u"blob-name", bucket=None)
# Create a fake response.
response = mock.Mock(spec=[u"json"])
response.json.return_value = mock.sentinel.json
# Mock **both** helpers.
blob._do_multipart_upload = mock.Mock(return_value=response, spec=[])
blob._do_resumable_upload = mock.Mock(return_value=response, spec=[])
if chunk_size is None:
self.assertIsNone(blob.chunk_size)
else:
blob.chunk_size = chunk_size
self.assertIsNotNone(blob.chunk_size)
client = mock.sentinel.client
stream = mock.sentinel.stream
content_type = u"video/mp4"
if size is None:
size = 12345654321
if timeout is None:
expected_timeout = self._get_default_timeout()
timeout_kwarg = {}
else:
expected_timeout = timeout
timeout_kwarg = {"timeout": timeout}
# Make the request and check the mocks.
created_json = blob._do_upload(
client,
stream,
content_type,
size,
num_retries,
predefined_acl,
if_generation_match,
if_generation_not_match,
if_metageneration_match,
if_metageneration_not_match,
**timeout_kwarg
)
self.assertIs(created_json, mock.sentinel.json)
response.json.assert_called_once_with()
if size is not None and size <= _MAX_MULTIPART_SIZE:
blob._do_multipart_upload.assert_called_once_with(
client,
stream,
content_type,
size,
num_retries,
predefined_acl,
if_generation_match,
if_generation_not_match,
if_metageneration_match,
if_metageneration_not_match,
timeout=expected_timeout,
checksum=None,
)
blob._do_resumable_upload.assert_not_called()
else:
blob._do_multipart_upload.assert_not_called()
blob._do_resumable_upload.assert_called_once_with(
client,
stream,
content_type,
size,
num_retries,
predefined_acl,
if_generation_match,
if_generation_not_match,
if_metageneration_match,
if_metageneration_not_match,
timeout=expected_timeout,
checksum=None,
)
def test__do_upload_uses_multipart(self):
from google.cloud.storage.blob import _MAX_MULTIPART_SIZE
self._do_upload_helper(size=_MAX_MULTIPART_SIZE)
def test__do_upload_uses_multipart_w_custom_timeout(self):
from google.cloud.storage.blob import _MAX_MULTIPART_SIZE
self._do_upload_helper(size=_MAX_MULTIPART_SIZE, timeout=9.58)
def test__do_upload_uses_resumable(self):
from google.cloud.storage.blob import _MAX_MULTIPART_SIZE
chunk_size = 256 * 1024 # 256KB
self._do_upload_helper(chunk_size=chunk_size, size=_MAX_MULTIPART_SIZE + 1)
def test__do_upload_uses_resumable_w_custom_timeout(self):
from google.cloud.storage.blob import _MAX_MULTIPART_SIZE
chunk_size = 256 * 1024 # 256KB
self._do_upload_helper(
chunk_size=chunk_size, size=_MAX_MULTIPART_SIZE + 1, timeout=9.58
)
def test__do_upload_with_retry(self):
self._do_upload_helper(num_retries=20)
def _upload_from_file_helper(self, side_effect=None, **kwargs):
from google.cloud._helpers import UTC
blob = self._make_one("blob-name", bucket=None)
# Mock low-level upload helper on blob (it is tested elsewhere).
created_json = {"updated": "2017-01-01T09:09:09.081Z"}
blob._do_upload = mock.Mock(return_value=created_json, spec=[])
if side_effect is not None:
blob._do_upload.side_effect = side_effect
# Make sure `updated` is empty before the request.
self.assertIsNone(blob.updated)
data = b"data is here"
stream = io.BytesIO(data)
stream.seek(2) # Not at zero.
content_type = u"font/woff"
client = mock.sentinel.client
predefined_acl = kwargs.get("predefined_acl", None)
if_generation_match = kwargs.get("if_generation_match", None)
if_generation_not_match = kwargs.get("if_generation_not_match", None)
if_metageneration_match = kwargs.get("if_metageneration_match", None)
if_metageneration_not_match = kwargs.get("if_metageneration_not_match", None)
ret_val = blob.upload_from_file(
stream, size=len(data), content_type=content_type, client=client, **kwargs
)
# Check the response and side-effects.
self.assertIsNone(ret_val)
new_updated = datetime.datetime(2017, 1, 1, 9, 9, 9, 81000, tzinfo=UTC)
self.assertEqual(blob.updated, new_updated)
expected_timeout = kwargs.get("timeout", self._get_default_timeout())
# Check the mock.
num_retries = kwargs.get("num_retries")
blob._do_upload.assert_called_once_with(
client,
stream,
content_type,
len(data),
num_retries,
predefined_acl,
if_generation_match,
if_generation_not_match,
if_metageneration_match,
if_metageneration_not_match,
timeout=expected_timeout,
checksum=None,
)
return stream
def test_upload_from_file_success(self):
stream = self._upload_from_file_helper(predefined_acl="private")
assert stream.tell() == 2
@mock.patch("warnings.warn")
def test_upload_from_file_with_retries(self, mock_warn):
from google.cloud.storage import blob as blob_module
self._upload_from_file_helper(num_retries=20)
mock_warn.assert_called_once_with(
blob_module._NUM_RETRIES_MESSAGE, DeprecationWarning, stacklevel=2
)
def test_upload_from_file_with_rewind(self):
stream = self._upload_from_file_helper(rewind=True)
assert stream.tell() == 0
def test_upload_from_file_with_custom_timeout(self):
self._upload_from_file_helper(timeout=9.58)
def test_upload_from_file_failure(self):
import requests
from google.resumable_media import InvalidResponse
from google.cloud import exceptions
message = "Someone is already in this spot."
response = requests.Response()
response.status_code = http_client.CONFLICT
response.request = requests.Request("POST", "http://example.com").prepare()
side_effect = InvalidResponse(response, message)
with self.assertRaises(exceptions.Conflict) as exc_info:
self._upload_from_file_helper(side_effect=side_effect)
self.assertIn(message, exc_info.exception.message)
self.assertEqual(exc_info.exception.errors, [])
def _do_upload_mock_call_helper(
self, blob, client, content_type, size, timeout=None
):
self.assertEqual(blob._do_upload.call_count, 1)
mock_call = blob._do_upload.mock_calls[0]
call_name, pos_args, kwargs = mock_call
self.assertEqual(call_name, "")
self.assertEqual(len(pos_args), 10)
self.assertEqual(pos_args[0], client)
self.assertEqual(pos_args[2], content_type)
self.assertEqual(pos_args[3], size)
self.assertIsNone(pos_args[4]) # num_retries
self.assertIsNone(pos_args[5]) # predefined_acl
self.assertIsNone(pos_args[6]) # if_generation_match
self.assertIsNone(pos_args[7]) # if_generation_not_match
self.assertIsNone(pos_args[8]) # if_metageneration_match
self.assertIsNone(pos_args[9]) # if_metageneration_not_match
expected_timeout = self._get_default_timeout() if timeout is None else timeout
self.assertEqual(kwargs, {"timeout": expected_timeout, "checksum": None})
return pos_args[1]
def test_upload_from_filename(self):
from google.cloud._testing import _NamedTemporaryFile
blob = self._make_one("blob-name", bucket=None)
# Mock low-level upload helper on blob (it is tested elsewhere).
created_json = {"metadata": {"mint": "ice-cream"}}
blob._do_upload = mock.Mock(return_value=created_json, spec=[])
# Make sure `metadata` is empty before the request.
self.assertIsNone(blob.metadata)
data = b"soooo much data"
content_type = u"image/svg+xml"
client = mock.sentinel.client
with _NamedTemporaryFile() as temp:
with open(temp.name, "wb") as file_obj:
file_obj.write(data)
ret_val = blob.upload_from_filename(
temp.name, content_type=content_type, client=client
)
# Check the response and side-effects.
self.assertIsNone(ret_val)
self.assertEqual(blob.metadata, created_json["metadata"])
# Check the mock.
stream = self._do_upload_mock_call_helper(blob, client, content_type, len(data))
self.assertTrue(stream.closed)
self.assertEqual(stream.mode, "rb")
self.assertEqual(stream.name, temp.name)
def test_upload_from_filename_w_custom_timeout(self):
from google.cloud._testing import _NamedTemporaryFile
blob = self._make_one("blob-name", bucket=None)
# Mock low-level upload helper on blob (it is tested elsewhere).
created_json = {"metadata": {"mint": "ice-cream"}}
blob._do_upload = mock.Mock(return_value=created_json, spec=[])
# Make sure `metadata` is empty before the request.
self.assertIsNone(blob.metadata)
data = b"soooo much data"
content_type = u"image/svg+xml"
client = mock.sentinel.client
with _NamedTemporaryFile() as temp:
with open(temp.name, "wb") as file_obj:
file_obj.write(data)
blob.upload_from_filename(
temp.name, content_type=content_type, client=client, timeout=9.58
)
# Check the mock.
self._do_upload_mock_call_helper(
blob, client, content_type, len(data), timeout=9.58
)
def _upload_from_string_helper(self, data, **kwargs):
from google.cloud._helpers import _to_bytes
blob = self._make_one("blob-name", bucket=None)
# Mock low-level upload helper on blob (it is tested elsewhere).
created_json = {"componentCount": "5"}
blob._do_upload = mock.Mock(return_value=created_json, spec=[])
# Make sure `metadata` is empty before the request.
self.assertIsNone(blob.component_count)
client = mock.sentinel.client
ret_val = blob.upload_from_string(data, client=client, **kwargs)
# Check the response and side-effects.
self.assertIsNone(ret_val)
self.assertEqual(blob.component_count, 5)
# Check the mock.
payload = _to_bytes(data, encoding="utf-8")
stream = self._do_upload_mock_call_helper(
blob,
client,
"text/plain",
len(payload),
kwargs.get("timeout", self._get_default_timeout()),
)
self.assertIsInstance(stream, io.BytesIO)
self.assertEqual(stream.getvalue(), payload)
def test_upload_from_string_w_custom_timeout(self):
data = b"XB]jb\xb8tad\xe0"
self._upload_from_string_helper(data, timeout=9.58)
def test_upload_from_string_w_bytes(self):
data = b"XB]jb\xb8tad\xe0"
self._upload_from_string_helper(data)
def test_upload_from_string_w_text(self):
data = u"\N{snowman} \N{sailboat}"
self._upload_from_string_helper(data)
def _create_resumable_upload_session_helper(
self, origin=None, side_effect=None, timeout=None
):
bucket = _Bucket(name="alex-trebek")
blob = self._make_one("blob-name", bucket=bucket)
chunk_size = 99 * blob._CHUNK_SIZE_MULTIPLE
blob.chunk_size = chunk_size
# Create mocks to be checked for doing transport.
resumable_url = "http://test.invalid?upload_id=clean-up-everybody"
response_headers = {"location": resumable_url}
transport = self._mock_transport(http_client.OK, response_headers)
if side_effect is not None:
transport.request.side_effect = side_effect
# Create some mock arguments and call the method under test.
content_type = u"text/plain"
size = 10000
client = mock.Mock(_http=transport, _connection=_Connection, spec=[u"_http"])
client._connection.API_BASE_URL = "https://storage.googleapis.com"
if timeout is None:
expected_timeout = self._get_default_timeout()
timeout_kwarg = {}
else:
expected_timeout = timeout
timeout_kwarg = {"timeout": timeout}
new_url = blob.create_resumable_upload_session(
content_type=content_type,
size=size,
origin=origin,
client=client,
**timeout_kwarg
)
# Check the returned value and (lack of) side-effect.
self.assertEqual(new_url, resumable_url)
self.assertEqual(blob.chunk_size, chunk_size)
# Check the mocks.
upload_url = (
"https://storage.googleapis.com/upload/storage/v1"
+ bucket.path
+ "/o?uploadType=resumable"
)
payload = b'{"name": "blob-name"}'
expected_headers = {
"content-type": "application/json; charset=UTF-8",
"x-upload-content-length": str(size),
"x-upload-content-type": content_type,
}
if origin is not None:
expected_headers["Origin"] = origin
transport.request.assert_called_once_with(
"POST",
upload_url,
data=payload,
headers=expected_headers,
timeout=expected_timeout,
)
def test_create_resumable_upload_session(self):
self._create_resumable_upload_session_helper()
def test_create_resumable_upload_session_with_custom_timeout(self):
self._create_resumable_upload_session_helper(timeout=9.58)
def test_create_resumable_upload_session_with_origin(self):
self._create_resumable_upload_session_helper(origin="http://google.com")
def test_create_resumable_upload_session_with_failure(self):
from google.resumable_media import InvalidResponse
from google.cloud import exceptions
message = "5-oh-3 woe is me."
response = self._mock_requests_response(
status_code=http_client.SERVICE_UNAVAILABLE, headers={}
)
side_effect = InvalidResponse(response, message)
with self.assertRaises(exceptions.ServiceUnavailable) as exc_info:
self._create_resumable_upload_session_helper(side_effect=side_effect)
self.assertIn(message, exc_info.exception.message)
self.assertEqual(exc_info.exception.errors, [])
def test_get_iam_policy(self):
from google.cloud.storage.iam import STORAGE_OWNER_ROLE
from google.cloud.storage.iam import STORAGE_EDITOR_ROLE
from google.cloud.storage.iam import STORAGE_VIEWER_ROLE
from google.api_core.iam import Policy
BLOB_NAME = "blob-name"
PATH = "/b/name/o/%s" % (BLOB_NAME,)
ETAG = "DEADBEEF"
VERSION = 1
OWNER1 = "user:phred@example.com"
OWNER2 = "group:cloud-logs@google.com"
EDITOR1 = "domain:google.com"
EDITOR2 = "user:phred@example.com"
VIEWER1 = "serviceAccount:1234-abcdef@service.example.com"
VIEWER2 = "user:phred@example.com"
RETURNED = {
"resourceId": PATH,
"etag": ETAG,
"version": VERSION,
"bindings": [
{"role": STORAGE_OWNER_ROLE, "members": [OWNER1, OWNER2]},
{"role": STORAGE_EDITOR_ROLE, "members": [EDITOR1, EDITOR2]},
{"role": STORAGE_VIEWER_ROLE, "members": [VIEWER1, VIEWER2]},
],
}
after = ({"status": http_client.OK}, RETURNED)
EXPECTED = {
binding["role"]: set(binding["members"]) for binding in RETURNED["bindings"]
}
connection = _Connection(after)
client = _Client(connection)
bucket = _Bucket(client=client)
blob = self._make_one(BLOB_NAME, bucket=bucket)
policy = blob.get_iam_policy(timeout=42)
self.assertIsInstance(policy, Policy)
self.assertEqual(policy.etag, RETURNED["etag"])
self.assertEqual(policy.version, RETURNED["version"])
self.assertEqual(dict(policy), EXPECTED)
kw = connection._requested
self.assertEqual(len(kw), 1)
self.assertEqual(
kw[0],
{
"method": "GET",
"path": "%s/iam" % (PATH,),
"query_params": {},
"_target_object": None,
"timeout": 42,
},
)
def test_get_iam_policy_w_requested_policy_version(self):
from google.cloud.storage.iam import STORAGE_OWNER_ROLE
BLOB_NAME = "blob-name"
PATH = "/b/name/o/%s" % (BLOB_NAME,)
ETAG = "DEADBEEF"
VERSION = 1
OWNER1 = "user:phred@example.com"
OWNER2 = "group:cloud-logs@google.com"
RETURNED = {
"resourceId": PATH,
"etag": ETAG,
"version": VERSION,
"bindings": [{"role": STORAGE_OWNER_ROLE, "members": [OWNER1, OWNER2]}],
}
after = ({"status": http_client.OK}, RETURNED)
connection = _Connection(after)
client = _Client(connection)
bucket = _Bucket(client=client)
blob = self._make_one(BLOB_NAME, bucket=bucket)
blob.get_iam_policy(requested_policy_version=3)
kw = connection._requested
self.assertEqual(len(kw), 1)
self.assertEqual(
kw[0],
{
"method": "GET",
"path": "%s/iam" % (PATH,),
"query_params": {"optionsRequestedPolicyVersion": 3},
"_target_object": None,
"timeout": self._get_default_timeout(),
},
)
def test_get_iam_policy_w_user_project(self):
from google.api_core.iam import Policy
BLOB_NAME = "blob-name"
USER_PROJECT = "user-project-123"
PATH = "/b/name/o/%s" % (BLOB_NAME,)
ETAG = "DEADBEEF"
VERSION = 1
RETURNED = {
"resourceId": PATH,
"etag": ETAG,
"version": VERSION,
"bindings": [],
}
after = ({"status": http_client.OK}, RETURNED)
EXPECTED = {}
connection = _Connection(after)
client = _Client(connection)
bucket = _Bucket(client=client, user_project=USER_PROJECT)
blob = self._make_one(BLOB_NAME, bucket=bucket)
policy = blob.get_iam_policy()
self.assertIsInstance(policy, Policy)
self.assertEqual(policy.etag, RETURNED["etag"])
self.assertEqual(policy.version, RETURNED["version"])
self.assertEqual(dict(policy), EXPECTED)
kw = connection._requested
self.assertEqual(len(kw), 1)
self.assertEqual(
kw[0],
{
"method": "GET",
"path": "%s/iam" % (PATH,),
"query_params": {"userProject": USER_PROJECT},
"_target_object": None,
"timeout": self._get_default_timeout(),
},
)
def test_set_iam_policy(self):
import operator
from google.cloud.storage.iam import STORAGE_OWNER_ROLE
from google.cloud.storage.iam import STORAGE_EDITOR_ROLE
from google.cloud.storage.iam import STORAGE_VIEWER_ROLE
from google.api_core.iam import Policy
BLOB_NAME = "blob-name"
PATH = "/b/name/o/%s" % (BLOB_NAME,)
ETAG = "DEADBEEF"
VERSION = 1
OWNER1 = "user:phred@example.com"
OWNER2 = "group:cloud-logs@google.com"
EDITOR1 = "domain:google.com"
EDITOR2 = "user:phred@example.com"
VIEWER1 = "serviceAccount:1234-abcdef@service.example.com"
VIEWER2 = "user:phred@example.com"
BINDINGS = [
{"role": STORAGE_OWNER_ROLE, "members": [OWNER1, OWNER2]},
{"role": STORAGE_EDITOR_ROLE, "members": [EDITOR1, EDITOR2]},
{"role": STORAGE_VIEWER_ROLE, "members": [VIEWER1, VIEWER2]},
]
RETURNED = {"etag": ETAG, "version": VERSION, "bindings": BINDINGS}
after = ({"status": http_client.OK}, RETURNED)
policy = Policy()
for binding in BINDINGS:
policy[binding["role"]] = binding["members"]
connection = _Connection(after)
client = _Client(connection)
bucket = _Bucket(client=client)
blob = self._make_one(BLOB_NAME, bucket=bucket)
returned = blob.set_iam_policy(policy, timeout=42)
self.assertEqual(returned.etag, ETAG)
self.assertEqual(returned.version, VERSION)
self.assertEqual(dict(returned), dict(policy))
kw = connection._requested
self.assertEqual(len(kw), 1)
self.assertEqual(kw[0]["method"], "PUT")
self.assertEqual(kw[0]["path"], "%s/iam" % (PATH,))
self.assertEqual(kw[0]["query_params"], {})
self.assertEqual(kw[0]["timeout"], 42)
sent = kw[0]["data"]
self.assertEqual(sent["resourceId"], PATH)
self.assertEqual(len(sent["bindings"]), len(BINDINGS))
key = operator.itemgetter("role")
for found, expected in zip(
sorted(sent["bindings"], key=key), sorted(BINDINGS, key=key)
):
self.assertEqual(found["role"], expected["role"])
self.assertEqual(sorted(found["members"]), sorted(expected["members"]))
def test_set_iam_policy_w_user_project(self):
from google.api_core.iam import Policy
BLOB_NAME = "blob-name"
USER_PROJECT = "user-project-123"
PATH = "/b/name/o/%s" % (BLOB_NAME,)
ETAG = "DEADBEEF"
VERSION = 1
BINDINGS = []
RETURNED = {"etag": ETAG, "version": VERSION, "bindings": BINDINGS}
after = ({"status": http_client.OK}, RETURNED)
policy = Policy()
connection = _Connection(after)
client = _Client(connection)
bucket = _Bucket(client=client, user_project=USER_PROJECT)
blob = self._make_one(BLOB_NAME, bucket=bucket)
returned = blob.set_iam_policy(policy)
self.assertEqual(returned.etag, ETAG)
self.assertEqual(returned.version, VERSION)
self.assertEqual(dict(returned), dict(policy))
kw = connection._requested
self.assertEqual(len(kw), 1)
self.assertEqual(kw[0]["method"], "PUT")
self.assertEqual(kw[0]["path"], "%s/iam" % (PATH,))
self.assertEqual(kw[0]["query_params"], {"userProject": USER_PROJECT})
self.assertEqual(kw[0]["data"], {"resourceId": PATH})
def test_test_iam_permissions(self):
from google.cloud.storage.iam import STORAGE_OBJECTS_LIST
from google.cloud.storage.iam import STORAGE_BUCKETS_GET
from google.cloud.storage.iam import STORAGE_BUCKETS_UPDATE
BLOB_NAME = "blob-name"
PATH = "/b/name/o/%s" % (BLOB_NAME,)
PERMISSIONS = [
STORAGE_OBJECTS_LIST,
STORAGE_BUCKETS_GET,
STORAGE_BUCKETS_UPDATE,
]
ALLOWED = PERMISSIONS[1:]
RETURNED = {"permissions": ALLOWED}
after = ({"status": http_client.OK}, RETURNED)
connection = _Connection(after)
client = _Client(connection)
bucket = _Bucket(client=client)
blob = self._make_one(BLOB_NAME, bucket=bucket)
allowed = blob.test_iam_permissions(PERMISSIONS, timeout=42)
self.assertEqual(allowed, ALLOWED)
kw = connection._requested
self.assertEqual(len(kw), 1)
self.assertEqual(kw[0]["method"], "GET")
self.assertEqual(kw[0]["path"], "%s/iam/testPermissions" % (PATH,))
self.assertEqual(kw[0]["query_params"], {"permissions": PERMISSIONS})
self.assertEqual(kw[0]["timeout"], 42)
def test_test_iam_permissions_w_user_project(self):
from google.cloud.storage.iam import STORAGE_OBJECTS_LIST
from google.cloud.storage.iam import STORAGE_BUCKETS_GET
from google.cloud.storage.iam import STORAGE_BUCKETS_UPDATE
BLOB_NAME = "blob-name"
USER_PROJECT = "user-project-123"
PATH = "/b/name/o/%s" % (BLOB_NAME,)
PERMISSIONS = [
STORAGE_OBJECTS_LIST,
STORAGE_BUCKETS_GET,
STORAGE_BUCKETS_UPDATE,
]
ALLOWED = PERMISSIONS[1:]
RETURNED = {"permissions": ALLOWED}
after = ({"status": http_client.OK}, RETURNED)
connection = _Connection(after)
client = _Client(connection)
bucket = _Bucket(client=client, user_project=USER_PROJECT)
blob = self._make_one(BLOB_NAME, bucket=bucket)
allowed = blob.test_iam_permissions(PERMISSIONS)
self.assertEqual(allowed, ALLOWED)
kw = connection._requested
self.assertEqual(len(kw), 1)
self.assertEqual(kw[0]["method"], "GET")
self.assertEqual(kw[0]["path"], "%s/iam/testPermissions" % (PATH,))
self.assertEqual(
kw[0]["query_params"],
{"permissions": PERMISSIONS, "userProject": USER_PROJECT},
)
self.assertEqual(kw[0]["timeout"], self._get_default_timeout())
def test_make_public(self):
from google.cloud.storage.acl import _ACLEntity
BLOB_NAME = "blob-name"
permissive = [{"entity": "allUsers", "role": _ACLEntity.READER_ROLE}]
after = ({"status": http_client.OK}, {"acl": permissive})
connection = _Connection(after)
client = _Client(connection)
bucket = _Bucket(client=client)
blob = self._make_one(BLOB_NAME, bucket=bucket)
blob.acl.loaded = True
blob.make_public()
self.assertEqual(list(blob.acl), permissive)
kw = connection._requested
self.assertEqual(len(kw), 1)
self.assertEqual(kw[0]["method"], "PATCH")
self.assertEqual(kw[0]["path"], "/b/name/o/%s" % BLOB_NAME)
self.assertEqual(kw[0]["data"], {"acl": permissive})
self.assertEqual(kw[0]["query_params"], {"projection": "full"})
def test_make_private(self):
BLOB_NAME = "blob-name"
no_permissions = []
after = ({"status": http_client.OK}, {"acl": no_permissions})
connection = _Connection(after)
client = _Client(connection)
bucket = _Bucket(client=client)
blob = self._make_one(BLOB_NAME, bucket=bucket)
blob.acl.loaded = True
blob.make_private()
self.assertEqual(list(blob.acl), no_permissions)
kw = connection._requested
self.assertEqual(len(kw), 1)
self.assertEqual(kw[0]["method"], "PATCH")
self.assertEqual(kw[0]["path"], "/b/name/o/%s" % BLOB_NAME)
self.assertEqual(kw[0]["data"], {"acl": no_permissions})
self.assertEqual(kw[0]["query_params"], {"projection": "full"})
def test_compose_wo_content_type_set(self):
SOURCE_1 = "source-1"
SOURCE_2 = "source-2"
DESTINATION = "destination"
RESOURCE = {}
after = ({"status": http_client.OK}, RESOURCE)
connection = _Connection(after)
client = _Client(connection)
bucket = _Bucket(client=client)
source_1 = self._make_one(SOURCE_1, bucket=bucket)
source_2 = self._make_one(SOURCE_2, bucket=bucket)
destination = self._make_one(DESTINATION, bucket=bucket)
# no destination.content_type set
destination.compose(sources=[source_1, source_2])
self.assertIsNone(destination.content_type)
kw = connection._requested
self.assertEqual(len(kw), 1)
self.assertEqual(
kw[0],
{
"method": "POST",
"path": "/b/name/o/%s/compose" % DESTINATION,
"query_params": {},
"data": {
"sourceObjects": [{"name": source_1.name}, {"name": source_2.name}],
"destination": {},
},
"_target_object": destination,
"timeout": self._get_default_timeout(),
"retry": DEFAULT_RETRY_IF_GENERATION_SPECIFIED,
},
)
def test_compose_minimal_w_user_project(self):
SOURCE_1 = "source-1"
SOURCE_2 = "source-2"
DESTINATION = "destination"
RESOURCE = {"etag": "DEADBEEF"}
USER_PROJECT = "user-project-123"
after = ({"status": http_client.OK}, RESOURCE)
connection = _Connection(after)
client = _Client(connection)
bucket = _Bucket(client=client, user_project=USER_PROJECT)
source_1 = self._make_one(SOURCE_1, bucket=bucket)
source_2 = self._make_one(SOURCE_2, bucket=bucket)
destination = self._make_one(DESTINATION, bucket=bucket)
destination.content_type = "text/plain"
destination.compose(sources=[source_1, source_2], timeout=42)
self.assertEqual(destination.etag, "DEADBEEF")
kw = connection._requested
self.assertEqual(len(kw), 1)
self.assertEqual(
kw[0],
{
"method": "POST",
"path": "/b/name/o/%s/compose" % DESTINATION,
"query_params": {"userProject": USER_PROJECT},
"data": {
"sourceObjects": [{"name": source_1.name}, {"name": source_2.name}],
"destination": {"contentType": "text/plain"},
},
"_target_object": destination,
"timeout": 42,
"retry": DEFAULT_RETRY_IF_GENERATION_SPECIFIED,
},
)
def test_compose_w_additional_property_changes(self):
SOURCE_1 = "source-1"
SOURCE_2 = "source-2"
DESTINATION = "destination"
RESOURCE = {"etag": "DEADBEEF"}
after = ({"status": http_client.OK}, RESOURCE)
connection = _Connection(after)
client = _Client(connection)
bucket = _Bucket(client=client)
source_1 = self._make_one(SOURCE_1, bucket=bucket)
source_2 = self._make_one(SOURCE_2, bucket=bucket)
destination = self._make_one(DESTINATION, bucket=bucket)
destination.content_type = "text/plain"
destination.content_language = "en-US"
destination.metadata = {"my-key": "my-value"}
destination.compose(sources=[source_1, source_2])
self.assertEqual(destination.etag, "DEADBEEF")
kw = connection._requested
self.assertEqual(len(kw), 1)
self.assertEqual(
kw[0],
{
"method": "POST",
"path": "/b/name/o/%s/compose" % DESTINATION,
"query_params": {},
"data": {
"sourceObjects": [{"name": source_1.name}, {"name": source_2.name}],
"destination": {
"contentType": "text/plain",
"contentLanguage": "en-US",
"metadata": {"my-key": "my-value"},
},
},
"_target_object": destination,
"timeout": self._get_default_timeout(),
"retry": DEFAULT_RETRY_IF_GENERATION_SPECIFIED,
},
)
def test_compose_w_generation_match(self):
SOURCE_1 = "source-1"
SOURCE_2 = "source-2"
DESTINATION = "destination"
RESOURCE = {}
GENERATION_NUMBERS = [6, 9]
METAGENERATION_NUMBERS = [7, 1]
after = ({"status": http_client.OK}, RESOURCE)
connection = _Connection(after)
client = _Client(connection)
bucket = _Bucket(client=client)
source_1 = self._make_one(SOURCE_1, bucket=bucket)
source_2 = self._make_one(SOURCE_2, bucket=bucket)
destination = self._make_one(DESTINATION, bucket=bucket)
destination.compose(
sources=[source_1, source_2],
if_generation_match=GENERATION_NUMBERS,
if_metageneration_match=METAGENERATION_NUMBERS,
)
kw = connection._requested
self.assertEqual(len(kw), 1)
self.assertEqual(
kw[0],
{
"method": "POST",
"path": "/b/name/o/%s/compose" % DESTINATION,
"query_params": {},
"data": {
"sourceObjects": [
{
"name": source_1.name,
"objectPreconditions": {
"ifGenerationMatch": GENERATION_NUMBERS[0],
"ifMetagenerationMatch": METAGENERATION_NUMBERS[0],
},
},
{
"name": source_2.name,
"objectPreconditions": {
"ifGenerationMatch": GENERATION_NUMBERS[1],
"ifMetagenerationMatch": METAGENERATION_NUMBERS[1],
},
},
],
"destination": {},
},
"_target_object": destination,
"timeout": self._get_default_timeout(),
"retry": DEFAULT_RETRY_IF_GENERATION_SPECIFIED,
},
)
def test_compose_w_generation_match_bad_length(self):
SOURCE_1 = "source-1"
SOURCE_2 = "source-2"
DESTINATION = "destination"
GENERATION_NUMBERS = [6]
METAGENERATION_NUMBERS = [7]
after = ({"status": http_client.OK}, {})
connection = _Connection(after)
client = _Client(connection)
bucket = _Bucket(client=client)
source_1 = self._make_one(SOURCE_1, bucket=bucket)
source_2 = self._make_one(SOURCE_2, bucket=bucket)
destination = self._make_one(DESTINATION, bucket=bucket)
with self.assertRaises(ValueError):
destination.compose(
sources=[source_1, source_2], if_generation_match=GENERATION_NUMBERS
)
with self.assertRaises(ValueError):
destination.compose(
sources=[source_1, source_2],
if_metageneration_match=METAGENERATION_NUMBERS,
)
def test_compose_w_generation_match_nones(self):
SOURCE_1 = "source-1"
SOURCE_2 = "source-2"
DESTINATION = "destination"
GENERATION_NUMBERS = [6, None]
after = ({"status": http_client.OK}, {})
connection = _Connection(after)
client = _Client(connection)
bucket = _Bucket(client=client)
source_1 = self._make_one(SOURCE_1, bucket=bucket)
source_2 = self._make_one(SOURCE_2, bucket=bucket)
destination = self._make_one(DESTINATION, bucket=bucket)
destination.compose(
sources=[source_1, source_2], if_generation_match=GENERATION_NUMBERS
)
kw = connection._requested
self.assertEqual(len(kw), 1)
self.assertEqual(
kw[0],
{
"method": "POST",
"path": "/b/name/o/%s/compose" % DESTINATION,
"query_params": {},
"data": {
"sourceObjects": [
{
"name": source_1.name,
"objectPreconditions": {
"ifGenerationMatch": GENERATION_NUMBERS[0]
},
},
{"name": source_2.name},
],
"destination": {},
},
"_target_object": destination,
"timeout": self._get_default_timeout(),
"retry": DEFAULT_RETRY_IF_GENERATION_SPECIFIED,
},
)
def test_rewrite_response_without_resource(self):
SOURCE_BLOB = "source"
DEST_BLOB = "dest"
DEST_BUCKET = "other-bucket"
TOKEN = "TOKEN"
RESPONSE = {
"totalBytesRewritten": 33,
"objectSize": 42,
"done": False,
"rewriteToken": TOKEN,
}
response = ({"status": http_client.OK}, RESPONSE)
connection = _Connection(response)
client = _Client(connection)
source_bucket = _Bucket(client=client)
source_blob = self._make_one(SOURCE_BLOB, bucket=source_bucket)
dest_bucket = _Bucket(client=client, name=DEST_BUCKET)
dest_blob = self._make_one(DEST_BLOB, bucket=dest_bucket)
token, rewritten, size = dest_blob.rewrite(source_blob)
self.assertEqual(token, TOKEN)
self.assertEqual(rewritten, 33)
self.assertEqual(size, 42)
def test_rewrite_w_generations(self):
SOURCE_BLOB = "source"
SOURCE_GENERATION = 42
DEST_BLOB = "dest"
DEST_BUCKET = "other-bucket"
DEST_GENERATION = 43
TOKEN = "TOKEN"
RESPONSE = {
"totalBytesRewritten": 33,
"objectSize": 42,
"done": False,
"rewriteToken": TOKEN,
}
response = ({"status": http_client.OK}, RESPONSE)
connection = _Connection(response)
client = _Client(connection)
source_bucket = _Bucket(client=client)
source_blob = self._make_one(
SOURCE_BLOB, bucket=source_bucket, generation=SOURCE_GENERATION
)
dest_bucket = _Bucket(client=client, name=DEST_BUCKET)
dest_blob = self._make_one(
DEST_BLOB, bucket=dest_bucket, generation=DEST_GENERATION
)
token, rewritten, size = dest_blob.rewrite(source_blob, timeout=42)
self.assertEqual(token, TOKEN)
self.assertEqual(rewritten, 33)
self.assertEqual(size, 42)
(kw,) = connection._requested
self.assertEqual(kw["method"], "POST")
self.assertEqual(
kw["path"],
"/b/%s/o/%s/rewriteTo/b/%s/o/%s"
% (
(source_bucket.name, source_blob.name, dest_bucket.name, dest_blob.name)
),
)
self.assertEqual(kw["query_params"], {"sourceGeneration": SOURCE_GENERATION})
self.assertEqual(kw["timeout"], 42)
def test_rewrite_w_generation_match(self):
SOURCE_BLOB = "source"
SOURCE_GENERATION_NUMBER = 42
DEST_BLOB = "dest"
DEST_BUCKET = "other-bucket"
DEST_GENERATION_NUMBER = 16
TOKEN = "TOKEN"
RESPONSE = {
"totalBytesRewritten": 33,
"objectSize": 42,
"done": False,
"rewriteToken": TOKEN,
}
response = ({"status": http_client.OK}, RESPONSE)
connection = _Connection(response)
client = _Client(connection)
source_bucket = _Bucket(client=client)
source_blob = self._make_one(
SOURCE_BLOB, bucket=source_bucket, generation=SOURCE_GENERATION_NUMBER
)
dest_bucket = _Bucket(client=client, name=DEST_BUCKET)
dest_blob = self._make_one(
DEST_BLOB, bucket=dest_bucket, generation=DEST_GENERATION_NUMBER
)
token, rewritten, size = dest_blob.rewrite(
source_blob,
timeout=42,
if_generation_match=dest_blob.generation,
if_source_generation_match=source_blob.generation,
)
(kw,) = connection._requested
self.assertEqual(kw["method"], "POST")
self.assertEqual(
kw["path"],
"/b/%s/o/%s/rewriteTo/b/%s/o/%s"
% (
(source_bucket.name, source_blob.name, dest_bucket.name, dest_blob.name)
),
)
self.assertEqual(
kw["query_params"],
{
"ifSourceGenerationMatch": SOURCE_GENERATION_NUMBER,
"ifGenerationMatch": DEST_GENERATION_NUMBER,
"sourceGeneration": SOURCE_GENERATION_NUMBER,
},
)
self.assertEqual(kw["timeout"], 42)
def test_rewrite_other_bucket_other_name_no_encryption_partial(self):
SOURCE_BLOB = "source"
DEST_BLOB = "dest"
DEST_BUCKET = "other-bucket"
TOKEN = "TOKEN"
RESPONSE = {
"totalBytesRewritten": 33,
"objectSize": 42,
"done": False,
"rewriteToken": TOKEN,
"resource": {"etag": "DEADBEEF"},
}
response = ({"status": http_client.OK}, RESPONSE)
connection = _Connection(response)
client = _Client(connection)
source_bucket = _Bucket(client=client)
source_blob = self._make_one(SOURCE_BLOB, bucket=source_bucket)
dest_bucket = _Bucket(client=client, name=DEST_BUCKET)
dest_blob = self._make_one(DEST_BLOB, bucket=dest_bucket)
token, rewritten, size = dest_blob.rewrite(source_blob)
self.assertEqual(token, TOKEN)
self.assertEqual(rewritten, 33)
self.assertEqual(size, 42)
kw = connection._requested
self.assertEqual(len(kw), 1)
self.assertEqual(kw[0]["method"], "POST")
PATH = "/b/name/o/%s/rewriteTo/b/%s/o/%s" % (
SOURCE_BLOB,
DEST_BUCKET,
DEST_BLOB,
)
self.assertEqual(kw[0]["path"], PATH)
self.assertEqual(kw[0]["query_params"], {})
SENT = {}
self.assertEqual(kw[0]["data"], SENT)
self.assertEqual(kw[0]["timeout"], self._get_default_timeout())
headers = {key.title(): str(value) for key, value in kw[0]["headers"].items()}
self.assertNotIn("X-Goog-Copy-Source-Encryption-Algorithm", headers)
self.assertNotIn("X-Goog-Copy-Source-Encryption-Key", headers)
self.assertNotIn("X-Goog-Copy-Source-Encryption-Key-Sha256", headers)
self.assertNotIn("X-Goog-Encryption-Algorithm", headers)
self.assertNotIn("X-Goog-Encryption-Key", headers)
self.assertNotIn("X-Goog-Encryption-Key-Sha256", headers)
def test_rewrite_same_name_no_old_key_new_key_done_w_user_project(self):
KEY = b"01234567890123456789012345678901" # 32 bytes
KEY_B64 = base64.b64encode(KEY).rstrip().decode("ascii")
KEY_HASH = hashlib.sha256(KEY).digest()
KEY_HASH_B64 = base64.b64encode(KEY_HASH).rstrip().decode("ascii")
BLOB_NAME = "blob"
USER_PROJECT = "user-project-123"
RESPONSE = {
"totalBytesRewritten": 42,
"objectSize": 42,
"done": True,
"resource": {"etag": "DEADBEEF"},
}
response = ({"status": http_client.OK}, RESPONSE)
connection = _Connection(response)
client = _Client(connection)
bucket = _Bucket(client=client, user_project=USER_PROJECT)
plain = self._make_one(BLOB_NAME, bucket=bucket)
encrypted = self._make_one(BLOB_NAME, bucket=bucket, encryption_key=KEY)
token, rewritten, size = encrypted.rewrite(plain)
self.assertIsNone(token)
self.assertEqual(rewritten, 42)
self.assertEqual(size, 42)
kw = connection._requested
self.assertEqual(len(kw), 1)
self.assertEqual(kw[0]["method"], "POST")
PATH = "/b/name/o/%s/rewriteTo/b/name/o/%s" % (BLOB_NAME, BLOB_NAME)
self.assertEqual(kw[0]["path"], PATH)
self.assertEqual(kw[0]["query_params"], {"userProject": USER_PROJECT})
SENT = {}
self.assertEqual(kw[0]["data"], SENT)
self.assertEqual(kw[0]["timeout"], self._get_default_timeout())
headers = {key.title(): str(value) for key, value in kw[0]["headers"].items()}
self.assertNotIn("X-Goog-Copy-Source-Encryption-Algorithm", headers)
self.assertNotIn("X-Goog-Copy-Source-Encryption-Key", headers)
self.assertNotIn("X-Goog-Copy-Source-Encryption-Key-Sha256", headers)
self.assertEqual(headers["X-Goog-Encryption-Algorithm"], "AES256")
self.assertEqual(headers["X-Goog-Encryption-Key"], KEY_B64)
self.assertEqual(headers["X-Goog-Encryption-Key-Sha256"], KEY_HASH_B64)
def test_rewrite_same_name_no_key_new_key_w_token(self):
SOURCE_KEY = b"01234567890123456789012345678901" # 32 bytes
SOURCE_KEY_B64 = base64.b64encode(SOURCE_KEY).rstrip().decode("ascii")
SOURCE_KEY_HASH = hashlib.sha256(SOURCE_KEY).digest()
SOURCE_KEY_HASH_B64 = base64.b64encode(SOURCE_KEY_HASH).rstrip().decode("ascii")
DEST_KEY = b"90123456789012345678901234567890" # 32 bytes
DEST_KEY_B64 = base64.b64encode(DEST_KEY).rstrip().decode("ascii")
DEST_KEY_HASH = hashlib.sha256(DEST_KEY).digest()
DEST_KEY_HASH_B64 = base64.b64encode(DEST_KEY_HASH).rstrip().decode("ascii")
BLOB_NAME = "blob"
TOKEN = "TOKEN"
RESPONSE = {
"totalBytesRewritten": 42,
"objectSize": 42,
"done": True,
"resource": {"etag": "DEADBEEF"},
}
response = ({"status": http_client.OK}, RESPONSE)
connection = _Connection(response)
client = _Client(connection)
bucket = _Bucket(client=client)
source = self._make_one(BLOB_NAME, bucket=bucket, encryption_key=SOURCE_KEY)
dest = self._make_one(BLOB_NAME, bucket=bucket, encryption_key=DEST_KEY)
token, rewritten, size = dest.rewrite(source, token=TOKEN)
self.assertIsNone(token)
self.assertEqual(rewritten, 42)
self.assertEqual(size, 42)
kw = connection._requested
self.assertEqual(len(kw), 1)
self.assertEqual(kw[0]["method"], "POST")
PATH = "/b/name/o/%s/rewriteTo/b/name/o/%s" % (BLOB_NAME, BLOB_NAME)
self.assertEqual(kw[0]["path"], PATH)
self.assertEqual(kw[0]["query_params"], {"rewriteToken": TOKEN})
SENT = {}
self.assertEqual(kw[0]["data"], SENT)
self.assertEqual(kw[0]["timeout"], self._get_default_timeout())
headers = {key.title(): str(value) for key, value in kw[0]["headers"].items()}
self.assertEqual(headers["X-Goog-Copy-Source-Encryption-Algorithm"], "AES256")
self.assertEqual(headers["X-Goog-Copy-Source-Encryption-Key"], SOURCE_KEY_B64)
self.assertEqual(
headers["X-Goog-Copy-Source-Encryption-Key-Sha256"], SOURCE_KEY_HASH_B64
)
self.assertEqual(headers["X-Goog-Encryption-Algorithm"], "AES256")
self.assertEqual(headers["X-Goog-Encryption-Key"], DEST_KEY_B64)
self.assertEqual(headers["X-Goog-Encryption-Key-Sha256"], DEST_KEY_HASH_B64)
def test_rewrite_same_name_w_old_key_new_kms_key(self):
SOURCE_KEY = b"01234567890123456789012345678901" # 32 bytes
SOURCE_KEY_B64 = base64.b64encode(SOURCE_KEY).rstrip().decode("ascii")
SOURCE_KEY_HASH = hashlib.sha256(SOURCE_KEY).digest()
SOURCE_KEY_HASH_B64 = base64.b64encode(SOURCE_KEY_HASH).rstrip().decode("ascii")
DEST_KMS_RESOURCE = (
"projects/test-project-123/"
"locations/us/"
"keyRings/test-ring/"
"cryptoKeys/test-key"
)
BLOB_NAME = "blob"
RESPONSE = {
"totalBytesRewritten": 42,
"objectSize": 42,
"done": True,
"resource": {"etag": "DEADBEEF"},
}
response = ({"status": http_client.OK}, RESPONSE)
connection = _Connection(response)
client = _Client(connection)
bucket = _Bucket(client=client)
source = self._make_one(BLOB_NAME, bucket=bucket, encryption_key=SOURCE_KEY)
dest = self._make_one(BLOB_NAME, bucket=bucket, kms_key_name=DEST_KMS_RESOURCE)
token, rewritten, size = dest.rewrite(source)
self.assertIsNone(token)
self.assertEqual(rewritten, 42)
self.assertEqual(size, 42)
kw = connection._requested
self.assertEqual(len(kw), 1)
self.assertEqual(kw[0]["method"], "POST")
PATH = "/b/name/o/%s/rewriteTo/b/name/o/%s" % (BLOB_NAME, BLOB_NAME)
self.assertEqual(kw[0]["path"], PATH)
self.assertEqual(
kw[0]["query_params"], {"destinationKmsKeyName": DEST_KMS_RESOURCE}
)
self.assertEqual(kw[0]["timeout"], self._get_default_timeout())
SENT = {"kmsKeyName": DEST_KMS_RESOURCE}
self.assertEqual(kw[0]["data"], SENT)
headers = {key.title(): str(value) for key, value in kw[0]["headers"].items()}
self.assertEqual(headers["X-Goog-Copy-Source-Encryption-Algorithm"], "AES256")
self.assertEqual(headers["X-Goog-Copy-Source-Encryption-Key"], SOURCE_KEY_B64)
self.assertEqual(
headers["X-Goog-Copy-Source-Encryption-Key-Sha256"], SOURCE_KEY_HASH_B64
)
def test_update_storage_class_invalid(self):
BLOB_NAME = "blob-name"
bucket = _Bucket()
blob = self._make_one(BLOB_NAME, bucket=bucket)
with self.assertRaises(ValueError):
blob.update_storage_class(u"BOGUS")
def test_update_storage_class_large_file(self):
BLOB_NAME = "blob-name"
STORAGE_CLASS = u"NEARLINE"
TOKEN = "TOKEN"
INCOMPLETE_RESPONSE = {
"totalBytesRewritten": 42,
"objectSize": 84,
"done": False,
"rewriteToken": TOKEN,
"resource": {"storageClass": STORAGE_CLASS},
}
COMPLETE_RESPONSE = {
"totalBytesRewritten": 84,
"objectSize": 84,
"done": True,
"resource": {"storageClass": STORAGE_CLASS},
}
response_1 = ({"status": http_client.OK}, INCOMPLETE_RESPONSE)
response_2 = ({"status": http_client.OK}, COMPLETE_RESPONSE)
connection = _Connection(response_1, response_2)
client = _Client(connection)
bucket = _Bucket(client=client)
blob = self._make_one(BLOB_NAME, bucket=bucket)
blob.update_storage_class("NEARLINE")
self.assertEqual(blob.storage_class, "NEARLINE")
def test_update_storage_class_with_custom_timeout(self):
BLOB_NAME = "blob-name"
STORAGE_CLASS = u"NEARLINE"
TOKEN = "TOKEN"
INCOMPLETE_RESPONSE = {
"totalBytesRewritten": 42,
"objectSize": 84,
"done": False,
"rewriteToken": TOKEN,
"resource": {"storageClass": STORAGE_CLASS},
}
COMPLETE_RESPONSE = {
"totalBytesRewritten": 84,
"objectSize": 84,
"done": True,
"resource": {"storageClass": STORAGE_CLASS},
}
response_1 = ({"status": http_client.OK}, INCOMPLETE_RESPONSE)
response_2 = ({"status": http_client.OK}, COMPLETE_RESPONSE)
connection = _Connection(response_1, response_2)
client = _Client(connection)
bucket = _Bucket(client=client)
blob = self._make_one(BLOB_NAME, bucket=bucket)
blob.update_storage_class("NEARLINE", timeout=9.58)
self.assertEqual(blob.storage_class, "NEARLINE")
kw = connection._requested
self.assertEqual(len(kw), 2)
for kw_item in kw:
self.assertIn("timeout", kw_item)
self.assertEqual(kw_item["timeout"], 9.58)
def test_update_storage_class_wo_encryption_key(self):
BLOB_NAME = "blob-name"
STORAGE_CLASS = u"NEARLINE"
RESPONSE = {
"totalBytesRewritten": 42,
"objectSize": 42,
"done": True,
"resource": {"storageClass": STORAGE_CLASS},
}
response = ({"status": http_client.OK}, RESPONSE)
connection = _Connection(response)
client = _Client(connection)
bucket = _Bucket(client=client)
blob = self._make_one(BLOB_NAME, bucket=bucket)
blob.update_storage_class("NEARLINE")
self.assertEqual(blob.storage_class, "NEARLINE")
kw = connection._requested
self.assertEqual(len(kw), 1)
self.assertEqual(kw[0]["method"], "POST")
PATH = "/b/name/o/%s/rewriteTo/b/name/o/%s" % (BLOB_NAME, BLOB_NAME)
self.assertEqual(kw[0]["path"], PATH)
self.assertEqual(kw[0]["query_params"], {})
SENT = {"storageClass": STORAGE_CLASS}
self.assertEqual(kw[0]["data"], SENT)
headers = {key.title(): str(value) for key, value in kw[0]["headers"].items()}
# Blob has no key, and therefore the relevant headers are not sent.
self.assertNotIn("X-Goog-Copy-Source-Encryption-Algorithm", headers)
self.assertNotIn("X-Goog-Copy-Source-Encryption-Key", headers)
self.assertNotIn("X-Goog-Copy-Source-Encryption-Key-Sha256", headers)
self.assertNotIn("X-Goog-Encryption-Algorithm", headers)
self.assertNotIn("X-Goog-Encryption-Key", headers)
self.assertNotIn("X-Goog-Encryption-Key-Sha256", headers)
def test_update_storage_class_w_encryption_key_w_user_project(self):
BLOB_NAME = "blob-name"
BLOB_KEY = b"01234567890123456789012345678901" # 32 bytes
BLOB_KEY_B64 = base64.b64encode(BLOB_KEY).rstrip().decode("ascii")
BLOB_KEY_HASH = hashlib.sha256(BLOB_KEY).digest()
BLOB_KEY_HASH_B64 = base64.b64encode(BLOB_KEY_HASH).rstrip().decode("ascii")
STORAGE_CLASS = u"NEARLINE"
USER_PROJECT = "user-project-123"
RESPONSE = {
"totalBytesRewritten": 42,
"objectSize": 42,
"done": True,
"resource": {"storageClass": STORAGE_CLASS},
}
response = ({"status": http_client.OK}, RESPONSE)
connection = _Connection(response)
client = _Client(connection)
bucket = _Bucket(client=client, user_project=USER_PROJECT)
blob = self._make_one(BLOB_NAME, bucket=bucket, encryption_key=BLOB_KEY)
blob.update_storage_class("NEARLINE")
self.assertEqual(blob.storage_class, "NEARLINE")
kw = connection._requested
self.assertEqual(len(kw), 1)
self.assertEqual(kw[0]["method"], "POST")
PATH = "/b/name/o/%s/rewriteTo/b/name/o/%s" % (BLOB_NAME, BLOB_NAME)
self.assertEqual(kw[0]["path"], PATH)
self.assertEqual(kw[0]["query_params"], {"userProject": USER_PROJECT})
SENT = {"storageClass": STORAGE_CLASS}
self.assertEqual(kw[0]["data"], SENT)
headers = {key.title(): str(value) for key, value in kw[0]["headers"].items()}
# Blob has key, and therefore the relevant headers are sent.
self.assertEqual(headers["X-Goog-Copy-Source-Encryption-Algorithm"], "AES256")
self.assertEqual(headers["X-Goog-Copy-Source-Encryption-Key"], BLOB_KEY_B64)
self.assertEqual(
headers["X-Goog-Copy-Source-Encryption-Key-Sha256"], BLOB_KEY_HASH_B64
)
self.assertEqual(headers["X-Goog-Encryption-Algorithm"], "AES256")
self.assertEqual(headers["X-Goog-Encryption-Key"], BLOB_KEY_B64)
self.assertEqual(headers["X-Goog-Encryption-Key-Sha256"], BLOB_KEY_HASH_B64)
def test_update_storage_class_w_generation_match(self):
BLOB_NAME = "blob-name"
STORAGE_CLASS = u"NEARLINE"
GENERATION_NUMBER = 6
SOURCE_GENERATION_NUMBER = 9
RESPONSE = {
"totalBytesRewritten": 42,
"objectSize": 42,
"done": True,
"resource": {"storageClass": STORAGE_CLASS},
}
response = ({"status": http_client.OK}, RESPONSE)
connection = _Connection(response)
client = _Client(connection)
bucket = _Bucket(client=client)
blob = self._make_one(BLOB_NAME, bucket=bucket)
blob.update_storage_class(
"NEARLINE",
if_generation_match=GENERATION_NUMBER,
if_source_generation_match=SOURCE_GENERATION_NUMBER,
)
self.assertEqual(blob.storage_class, "NEARLINE")
kw = connection._requested
self.assertEqual(len(kw), 1)
self.assertEqual(kw[0]["method"], "POST")
PATH = "/b/name/o/%s/rewriteTo/b/name/o/%s" % (BLOB_NAME, BLOB_NAME)
self.assertEqual(kw[0]["path"], PATH)
self.assertEqual(
kw[0]["query_params"],
{
"ifGenerationMatch": GENERATION_NUMBER,
"ifSourceGenerationMatch": SOURCE_GENERATION_NUMBER,
},
)
SENT = {"storageClass": STORAGE_CLASS}
self.assertEqual(kw[0]["data"], SENT)
def test_cache_control_getter(self):
BLOB_NAME = "blob-name"
bucket = _Bucket()
CACHE_CONTROL = "no-cache"
properties = {"cacheControl": CACHE_CONTROL}
blob = self._make_one(BLOB_NAME, bucket=bucket, properties=properties)
self.assertEqual(blob.cache_control, CACHE_CONTROL)
def test_cache_control_setter(self):
BLOB_NAME = "blob-name"
CACHE_CONTROL = "no-cache"
bucket = _Bucket()
blob = self._make_one(BLOB_NAME, bucket=bucket)
self.assertIsNone(blob.cache_control)
blob.cache_control = CACHE_CONTROL
self.assertEqual(blob.cache_control, CACHE_CONTROL)
def test_component_count(self):
BUCKET = object()
COMPONENT_COUNT = 42
blob = self._make_one(
"blob-name", bucket=BUCKET, properties={"componentCount": COMPONENT_COUNT}
)
self.assertEqual(blob.component_count, COMPONENT_COUNT)
def test_component_count_unset(self):
BUCKET = object()
blob = self._make_one("blob-name", bucket=BUCKET)
self.assertIsNone(blob.component_count)
def test_component_count_string_val(self):
BUCKET = object()
COMPONENT_COUNT = 42
blob = self._make_one(
"blob-name",
bucket=BUCKET,
properties={"componentCount": str(COMPONENT_COUNT)},
)
self.assertEqual(blob.component_count, COMPONENT_COUNT)
def test_content_disposition_getter(self):
BLOB_NAME = "blob-name"
bucket = _Bucket()
CONTENT_DISPOSITION = "Attachment; filename=example.jpg"
properties = {"contentDisposition": CONTENT_DISPOSITION}
blob = self._make_one(BLOB_NAME, bucket=bucket, properties=properties)
self.assertEqual(blob.content_disposition, CONTENT_DISPOSITION)
def test_content_disposition_setter(self):
BLOB_NAME = "blob-name"
CONTENT_DISPOSITION = "Attachment; filename=example.jpg"
bucket = _Bucket()
blob = self._make_one(BLOB_NAME, bucket=bucket)
self.assertIsNone(blob.content_disposition)
blob.content_disposition = CONTENT_DISPOSITION
self.assertEqual(blob.content_disposition, CONTENT_DISPOSITION)
def test_content_encoding_getter(self):
BLOB_NAME = "blob-name"
bucket = _Bucket()
CONTENT_ENCODING = "gzip"
properties = {"contentEncoding": CONTENT_ENCODING}
blob = self._make_one(BLOB_NAME, bucket=bucket, properties=properties)
self.assertEqual(blob.content_encoding, CONTENT_ENCODING)
def test_content_encoding_setter(self):
BLOB_NAME = "blob-name"
CONTENT_ENCODING = "gzip"
bucket = _Bucket()
blob = self._make_one(BLOB_NAME, bucket=bucket)
self.assertIsNone(blob.content_encoding)
blob.content_encoding = CONTENT_ENCODING
self.assertEqual(blob.content_encoding, CONTENT_ENCODING)
def test_content_language_getter(self):
BLOB_NAME = "blob-name"
bucket = _Bucket()
CONTENT_LANGUAGE = "pt-BR"
properties = {"contentLanguage": CONTENT_LANGUAGE}
blob = self._make_one(BLOB_NAME, bucket=bucket, properties=properties)
self.assertEqual(blob.content_language, CONTENT_LANGUAGE)
def test_content_language_setter(self):
BLOB_NAME = "blob-name"
CONTENT_LANGUAGE = "pt-BR"
bucket = _Bucket()
blob = self._make_one(BLOB_NAME, bucket=bucket)
self.assertIsNone(blob.content_language)
blob.content_language = CONTENT_LANGUAGE
self.assertEqual(blob.content_language, CONTENT_LANGUAGE)
def test_content_type_getter(self):
BLOB_NAME = "blob-name"
bucket = _Bucket()
CONTENT_TYPE = "image/jpeg"
properties = {"contentType": CONTENT_TYPE}
blob = self._make_one(BLOB_NAME, bucket=bucket, properties=properties)
self.assertEqual(blob.content_type, CONTENT_TYPE)
def test_content_type_setter(self):
BLOB_NAME = "blob-name"
CONTENT_TYPE = "image/jpeg"
bucket = _Bucket()
blob = self._make_one(BLOB_NAME, bucket=bucket)
self.assertIsNone(blob.content_type)
blob.content_type = CONTENT_TYPE
self.assertEqual(blob.content_type, CONTENT_TYPE)
def test_crc32c_getter(self):
BLOB_NAME = "blob-name"
bucket = _Bucket()
CRC32C = "DEADBEEF"
properties = {"crc32c": CRC32C}
blob = self._make_one(BLOB_NAME, bucket=bucket, properties=properties)
self.assertEqual(blob.crc32c, CRC32C)
def test_crc32c_setter(self):
BLOB_NAME = "blob-name"
CRC32C = "DEADBEEF"
bucket = _Bucket()
blob = self._make_one(BLOB_NAME, bucket=bucket)
self.assertIsNone(blob.crc32c)
blob.crc32c = CRC32C
self.assertEqual(blob.crc32c, CRC32C)
def test_etag(self):
BLOB_NAME = "blob-name"
bucket = _Bucket()
ETAG = "ETAG"
properties = {"etag": ETAG}
blob = self._make_one(BLOB_NAME, bucket=bucket, properties=properties)
self.assertEqual(blob.etag, ETAG)
def test_event_based_hold_getter_missing(self):
BLOB_NAME = "blob-name"
bucket = _Bucket()
properties = {}
blob = self._make_one(BLOB_NAME, bucket=bucket, properties=properties)
self.assertIsNone(blob.event_based_hold)
def test_event_based_hold_getter_false(self):
BLOB_NAME = "blob-name"
bucket = _Bucket()
properties = {"eventBasedHold": False}
blob = self._make_one(BLOB_NAME, bucket=bucket, properties=properties)
self.assertFalse(blob.event_based_hold)
def test_event_based_hold_getter_true(self):
BLOB_NAME = "blob-name"
bucket = _Bucket()
properties = {"eventBasedHold": True}
blob = self._make_one(BLOB_NAME, bucket=bucket, properties=properties)
self.assertTrue(blob.event_based_hold)
def test_event_based_hold_setter(self):
BLOB_NAME = "blob-name"
bucket = _Bucket()
blob = self._make_one(BLOB_NAME, bucket=bucket)
self.assertIsNone(blob.event_based_hold)
blob.event_based_hold = True
self.assertEqual(blob.event_based_hold, True)
def test_generation(self):
BUCKET = object()
GENERATION = 42
blob = self._make_one(
"blob-name", bucket=BUCKET, properties={"generation": GENERATION}
)
self.assertEqual(blob.generation, GENERATION)
def test_generation_unset(self):
BUCKET = object()
blob = self._make_one("blob-name", bucket=BUCKET)
self.assertIsNone(blob.generation)
def test_generation_string_val(self):
BUCKET = object()
GENERATION = 42
blob = self._make_one(
"blob-name", bucket=BUCKET, properties={"generation": str(GENERATION)}
)
self.assertEqual(blob.generation, GENERATION)
def test_id(self):
BLOB_NAME = "blob-name"
bucket = _Bucket()
ID = "ID"
properties = {"id": ID}
blob = self._make_one(BLOB_NAME, bucket=bucket, properties=properties)
self.assertEqual(blob.id, ID)
def test_md5_hash_getter(self):
BLOB_NAME = "blob-name"
bucket = _Bucket()
MD5_HASH = "DEADBEEF"
properties = {"md5Hash": MD5_HASH}
blob = self._make_one(BLOB_NAME, bucket=bucket, properties=properties)
self.assertEqual(blob.md5_hash, MD5_HASH)
def test_md5_hash_setter(self):
BLOB_NAME = "blob-name"
MD5_HASH = "DEADBEEF"
bucket = _Bucket()
blob = self._make_one(BLOB_NAME, bucket=bucket)
self.assertIsNone(blob.md5_hash)
blob.md5_hash = MD5_HASH
self.assertEqual(blob.md5_hash, MD5_HASH)
def test_media_link(self):
BLOB_NAME = "blob-name"
bucket = _Bucket()
MEDIA_LINK = "http://example.com/media/"
properties = {"mediaLink": MEDIA_LINK}
blob = self._make_one(BLOB_NAME, bucket=bucket, properties=properties)
self.assertEqual(blob.media_link, MEDIA_LINK)
def test_metadata_getter(self):
BLOB_NAME = "blob-name"
bucket = _Bucket()
METADATA = {"foo": "Foo"}
properties = {"metadata": METADATA}
blob = self._make_one(BLOB_NAME, bucket=bucket, properties=properties)
self.assertEqual(blob.metadata, METADATA)
def test_metadata_setter(self):
BLOB_NAME = "blob-name"
METADATA = {"foo": "Foo"}
bucket = _Bucket()
blob = self._make_one(BLOB_NAME, bucket=bucket)
self.assertIsNone(blob.metadata)
blob.metadata = METADATA
self.assertEqual(blob.metadata, METADATA)
def test_metadata_setter_w_nan(self):
BLOB_NAME = "blob-name"
METADATA = {"foo": float("nan")}
bucket = _Bucket()
blob = self._make_one(BLOB_NAME, bucket=bucket)
self.assertIsNone(blob.metadata)
blob.metadata = METADATA
value = blob.metadata["foo"]
self.assertIsInstance(value, str)
def test_metageneration(self):
BUCKET = object()
METAGENERATION = 42
blob = self._make_one(
"blob-name", bucket=BUCKET, properties={"metageneration": METAGENERATION}
)
self.assertEqual(blob.metageneration, METAGENERATION)
def test_metageneration_unset(self):
BUCKET = object()
blob = self._make_one("blob-name", bucket=BUCKET)
self.assertIsNone(blob.metageneration)
def test_metageneration_string_val(self):
BUCKET = object()
METAGENERATION = 42
blob = self._make_one(
"blob-name",
bucket=BUCKET,
properties={"metageneration": str(METAGENERATION)},
)
self.assertEqual(blob.metageneration, METAGENERATION)
def test_owner(self):
BLOB_NAME = "blob-name"
bucket = _Bucket()
OWNER = {"entity": "project-owner-12345", "entityId": "23456"}
properties = {"owner": OWNER}
blob = self._make_one(BLOB_NAME, bucket=bucket, properties=properties)
owner = blob.owner
self.assertEqual(owner["entity"], "project-owner-12345")
self.assertEqual(owner["entityId"], "23456")
def test_retention_expiration_time(self):
from google.cloud._helpers import _RFC3339_MICROS
from google.cloud._helpers import UTC
BLOB_NAME = "blob-name"
bucket = _Bucket()
TIMESTAMP = datetime.datetime(2014, 11, 5, 20, 34, 37, tzinfo=UTC)
TIME_CREATED = TIMESTAMP.strftime(_RFC3339_MICROS)
properties = {"retentionExpirationTime": TIME_CREATED}
blob = self._make_one(BLOB_NAME, bucket=bucket, properties=properties)
self.assertEqual(blob.retention_expiration_time, TIMESTAMP)
def test_retention_expiration_time_unset(self):
BUCKET = object()
blob = self._make_one("blob-name", bucket=BUCKET)
self.assertIsNone(blob.retention_expiration_time)
def test_self_link(self):
BLOB_NAME = "blob-name"
bucket = _Bucket()
SELF_LINK = "http://example.com/self/"
properties = {"selfLink": SELF_LINK}
blob = self._make_one(BLOB_NAME, bucket=bucket, properties=properties)
self.assertEqual(blob.self_link, SELF_LINK)
def test_size(self):
BUCKET = object()
SIZE = 42
blob = self._make_one("blob-name", bucket=BUCKET, properties={"size": SIZE})
self.assertEqual(blob.size, SIZE)
def test_size_unset(self):
BUCKET = object()
blob = self._make_one("blob-name", bucket=BUCKET)
self.assertIsNone(blob.size)
def test_size_string_val(self):
BUCKET = object()
SIZE = 42
blob = self._make_one(
"blob-name", bucket=BUCKET, properties={"size": str(SIZE)}
)
self.assertEqual(blob.size, SIZE)
def test_storage_class_getter(self):
blob_name = "blob-name"
bucket = _Bucket()
storage_class = "COLDLINE"
properties = {"storageClass": storage_class}
blob = self._make_one(blob_name, bucket=bucket, properties=properties)
self.assertEqual(blob.storage_class, storage_class)
def test_storage_class_setter(self):
blob_name = "blob-name"
bucket = _Bucket()
storage_class = "COLDLINE"
blob = self._make_one(blob_name, bucket=bucket)
self.assertIsNone(blob.storage_class)
blob.storage_class = storage_class
self.assertEqual(blob.storage_class, storage_class)
self.assertEqual(blob._properties, {"storageClass": storage_class})
def test_temporary_hold_getter_missing(self):
BLOB_NAME = "blob-name"
bucket = _Bucket()
properties = {}
blob = self._make_one(BLOB_NAME, bucket=bucket, properties=properties)
self.assertIsNone(blob.temporary_hold)
def test_temporary_hold_getter_false(self):
BLOB_NAME = "blob-name"
bucket = _Bucket()
properties = {"temporaryHold": False}
blob = self._make_one(BLOB_NAME, bucket=bucket, properties=properties)
self.assertFalse(blob.temporary_hold)
def test_temporary_hold_getter_true(self):
BLOB_NAME = "blob-name"
bucket = _Bucket()
properties = {"temporaryHold": True}
blob = self._make_one(BLOB_NAME, bucket=bucket, properties=properties)
self.assertTrue(blob.temporary_hold)
def test_temporary_hold_setter(self):
BLOB_NAME = "blob-name"
bucket = _Bucket()
blob = self._make_one(BLOB_NAME, bucket=bucket)
self.assertIsNone(blob.temporary_hold)
blob.temporary_hold = True
self.assertEqual(blob.temporary_hold, True)
def test_time_deleted(self):
from google.cloud._helpers import _RFC3339_MICROS
from google.cloud._helpers import UTC
BLOB_NAME = "blob-name"
bucket = _Bucket()
TIMESTAMP = datetime.datetime(2014, 11, 5, 20, 34, 37, tzinfo=UTC)
TIME_DELETED = TIMESTAMP.strftime(_RFC3339_MICROS)
properties = {"timeDeleted": TIME_DELETED}
blob = self._make_one(BLOB_NAME, bucket=bucket, properties=properties)
self.assertEqual(blob.time_deleted, TIMESTAMP)
def test_time_deleted_unset(self):
BUCKET = object()
blob = self._make_one("blob-name", bucket=BUCKET)
self.assertIsNone(blob.time_deleted)
def test_time_created(self):
from google.cloud._helpers import _RFC3339_MICROS
from google.cloud._helpers import UTC
BLOB_NAME = "blob-name"
bucket = _Bucket()
TIMESTAMP = datetime.datetime(2014, 11, 5, 20, 34, 37, tzinfo=UTC)
TIME_CREATED = TIMESTAMP.strftime(_RFC3339_MICROS)
properties = {"timeCreated": TIME_CREATED}
blob = self._make_one(BLOB_NAME, bucket=bucket, properties=properties)
self.assertEqual(blob.time_created, TIMESTAMP)
def test_time_created_unset(self):
BUCKET = object()
blob = self._make_one("blob-name", bucket=BUCKET)
self.assertIsNone(blob.time_created)
def test_updated(self):
from google.cloud._helpers import _RFC3339_MICROS
from google.cloud._helpers import UTC
BLOB_NAME = "blob-name"
bucket = _Bucket()
TIMESTAMP = datetime.datetime(2014, 11, 5, 20, 34, 37, tzinfo=UTC)
UPDATED = TIMESTAMP.strftime(_RFC3339_MICROS)
properties = {"updated": UPDATED}
blob = self._make_one(BLOB_NAME, bucket=bucket, properties=properties)
self.assertEqual(blob.updated, TIMESTAMP)
def test_updated_unset(self):
BUCKET = object()
blob = self._make_one("blob-name", bucket=BUCKET)
self.assertIsNone(blob.updated)
def test_custom_time_getter(self):
from google.cloud._helpers import _RFC3339_MICROS
from google.cloud._helpers import UTC
BLOB_NAME = "blob-name"
bucket = _Bucket()
TIMESTAMP = datetime.datetime(2014, 11, 5, 20, 34, 37, tzinfo=UTC)
TIME_CREATED = TIMESTAMP.strftime(_RFC3339_MICROS)
properties = {"customTime": TIME_CREATED}
blob = self._make_one(BLOB_NAME, bucket=bucket, properties=properties)
self.assertEqual(blob.custom_time, TIMESTAMP)
def test_custom_time_setter(self):
from google.cloud._helpers import UTC
BLOB_NAME = "blob-name"
bucket = _Bucket()
TIMESTAMP = datetime.datetime(2014, 11, 5, 20, 34, 37, tzinfo=UTC)
blob = self._make_one(BLOB_NAME, bucket=bucket)
self.assertIsNone(blob.custom_time)
blob.custom_time = TIMESTAMP
self.assertEqual(blob.custom_time, TIMESTAMP)
def test_custom_time_setter_none_value(self):
from google.cloud._helpers import _RFC3339_MICROS
from google.cloud._helpers import UTC
BLOB_NAME = "blob-name"
bucket = _Bucket()
TIMESTAMP = datetime.datetime(2014, 11, 5, 20, 34, 37, tzinfo=UTC)
TIME_CREATED = TIMESTAMP.strftime(_RFC3339_MICROS)
properties = {"customTime": TIME_CREATED}
blob = self._make_one(BLOB_NAME, bucket=bucket, properties=properties)
self.assertEqual(blob.custom_time, TIMESTAMP)
blob.custom_time = None
self.assertIsNone(blob.custom_time)
def test_custom_time_unset(self):
BUCKET = object()
blob = self._make_one("blob-name", bucket=BUCKET)
self.assertIsNone(blob.custom_time)
def test_from_string_w_valid_uri(self):
from google.cloud.storage.blob import Blob
connection = _Connection()
client = _Client(connection)
uri = "gs://BUCKET_NAME/b"
blob = Blob.from_string(uri, client)
self.assertIsInstance(blob, Blob)
self.assertIs(blob.client, client)
self.assertEqual(blob.name, "b")
self.assertEqual(blob.bucket.name, "BUCKET_NAME")
def test_from_string_w_invalid_uri(self):
from google.cloud.storage.blob import Blob
connection = _Connection()
client = _Client(connection)
with pytest.raises(ValueError, match="URI scheme must be gs"):
Blob.from_string("http://bucket_name/b", client)
def test_from_string_w_domain_name_bucket(self):
from google.cloud.storage.blob import Blob
connection = _Connection()
client = _Client(connection)
uri = "gs://buckets.example.com/b"
blob = Blob.from_string(uri, client)
self.assertIsInstance(blob, Blob)
self.assertIs(blob.client, client)
self.assertEqual(blob.name, "b")
self.assertEqual(blob.bucket.name, "buckets.example.com")
class Test__quote(unittest.TestCase):
@staticmethod
def _call_fut(*args, **kw):
from google.cloud.storage.blob import _quote
return _quote(*args, **kw)
def test_bytes(self):
quoted = self._call_fut(b"\xDE\xAD\xBE\xEF")
self.assertEqual(quoted, "%DE%AD%BE%EF")
def test_unicode(self):
helicopter = u"\U0001f681"
quoted = self._call_fut(helicopter)
self.assertEqual(quoted, "%F0%9F%9A%81")
def test_bad_type(self):
with self.assertRaises(TypeError):
self._call_fut(None)
def test_w_slash_default(self):
with_slash = "foo/bar/baz"
quoted = self._call_fut(with_slash)
self.assertEqual(quoted, "foo%2Fbar%2Fbaz")
def test_w_slash_w_safe(self):
with_slash = "foo/bar/baz"
quoted_safe = self._call_fut(with_slash, safe=b"/")
self.assertEqual(quoted_safe, with_slash)
def test_w_tilde(self):
with_tilde = "bam~qux"
quoted = self._call_fut(with_tilde, safe=b"~")
self.assertEqual(quoted, with_tilde)
class Test__maybe_rewind(unittest.TestCase):
@staticmethod
def _call_fut(*args, **kwargs):
from google.cloud.storage.blob import _maybe_rewind
return _maybe_rewind(*args, **kwargs)
def test_default(self):
stream = mock.Mock(spec=[u"seek"])
ret_val = self._call_fut(stream)
self.assertIsNone(ret_val)
stream.seek.assert_not_called()
def test_do_not_rewind(self):
stream = mock.Mock(spec=[u"seek"])
ret_val = self._call_fut(stream, rewind=False)
self.assertIsNone(ret_val)
stream.seek.assert_not_called()
def test_do_rewind(self):
stream = mock.Mock(spec=[u"seek"])
ret_val = self._call_fut(stream, rewind=True)
self.assertIsNone(ret_val)
stream.seek.assert_called_once_with(0, os.SEEK_SET)
class Test__raise_from_invalid_response(unittest.TestCase):
@staticmethod
def _call_fut(error):
from google.cloud.storage.blob import _raise_from_invalid_response
return _raise_from_invalid_response(error)
def _helper(self, message, code=http_client.BAD_REQUEST, reason=None, args=()):
import requests
from google.resumable_media import InvalidResponse
from google.api_core import exceptions
response = requests.Response()
response.request = requests.Request("GET", "http://example.com").prepare()
response._content = reason
response.status_code = code
error = InvalidResponse(response, message, *args)
with self.assertRaises(exceptions.GoogleAPICallError) as exc_info:
self._call_fut(error)
return exc_info
def test_default(self):
message = "Failure"
exc_info = self._helper(message)
expected = "GET http://example.com/: {}".format(message)
self.assertEqual(exc_info.exception.message, expected)
self.assertEqual(exc_info.exception.errors, [])
def test_w_206_and_args(self):
message = "Failure"
reason = b"Not available"
args = ("one", "two")
exc_info = self._helper(
message, code=http_client.PARTIAL_CONTENT, reason=reason, args=args
)
expected = "GET http://example.com/: {}: {}".format(
reason.decode("utf-8"), (message,) + args
)
self.assertEqual(exc_info.exception.message, expected)
self.assertEqual(exc_info.exception.errors, [])
class Test__add_query_parameters(unittest.TestCase):
@staticmethod
def _call_fut(*args, **kwargs):
from google.cloud.storage.blob import _add_query_parameters
return _add_query_parameters(*args, **kwargs)
def test_w_empty_list(self):
BASE_URL = "https://test.example.com/base"
self.assertEqual(self._call_fut(BASE_URL, []), BASE_URL)
def test_wo_existing_qs(self):
BASE_URL = "https://test.example.com/base"
NV_LIST = [("one", "One"), ("two", "Two")]
expected = "&".join(["{}={}".format(name, value) for name, value in NV_LIST])
self.assertEqual(
self._call_fut(BASE_URL, NV_LIST), "{}?{}".format(BASE_URL, expected)
)
def test_w_existing_qs(self):
BASE_URL = "https://test.example.com/base?one=Three"
NV_LIST = [("one", "One"), ("two", "Two")]
expected = "&".join(["{}={}".format(name, value) for name, value in NV_LIST])
self.assertEqual(
self._call_fut(BASE_URL, NV_LIST), "{}&{}".format(BASE_URL, expected)
)
class _Connection(object):
API_BASE_URL = "http://example.com"
USER_AGENT = "testing 1.2.3"
credentials = object()
def __init__(self, *responses):
self._responses = responses[:]
self._requested = []
self._signed = []
def _respond(self, **kw):
self._requested.append(kw)
response, self._responses = self._responses[0], self._responses[1:]
return response
def api_request(self, **kw):
from google.cloud.exceptions import NotFound
info, content = self._respond(**kw)
if info.get("status") == http_client.NOT_FOUND:
raise NotFound(info)
return content
class _Bucket(object):
def __init__(self, client=None, name="name", user_project=None):
if client is None:
connection = _Connection()
client = _Client(connection)
self.client = client
self._blobs = {}
self._copied = []
self._deleted = []
self.name = name
self.path = "/b/" + name
self.user_project = user_project
def delete_blob(
self,
blob_name,
client=None,
generation=None,
timeout=None,
if_generation_match=None,
if_generation_not_match=None,
if_metageneration_match=None,
if_metageneration_not_match=None,
):
del self._blobs[blob_name]
self._deleted.append(
(
blob_name,
client,
generation,
timeout,
if_generation_match,
if_generation_not_match,
if_metageneration_match,
if_metageneration_not_match,
)
)
class _Client(object):
def __init__(self, connection):
self._base_connection = connection
@property
def _connection(self):
return self._base_connection
@property
def _credentials(self):
return self._base_connection.credentials
|
# Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import numpy as np
import unittest
import abc
import os
import enum
import time
import logging
import shutil
import paddle
import paddle.fluid as fluid
from paddle.fluid.initializer import NumpyArrayInitializer
from paddle.fluid.core import PassVersionChecker
import paddle.fluid.core as core
from paddle import compat as cpt
import paddle.inference as paddle_infer
from typing import Optional, List, Callable, Dict, Any, Set
from program_config import TensorConfig, OpConfig, ProgramConfig, create_fake_model, create_quant_model
from itertools import product
from program_config import CxxConfig, TargetType, PrecisionType, DataLayoutType, Place
import hypothesis
from hypothesis import given, settings, seed
import hypothesis.strategies as st
import argparse
parser = argparse.ArgumentParser()
parser.add_argument("--target", choices=['Host', 'X86','CUDA','ARM','OpenCL','FPGA','NPU','MLU','RKNPU','APU','HUAWEI_ASCEND_NPU','INTEL_FPGA'], required=True)
logging.basicConfig(level=logging.INFO, format="%(message)s")
settings.register_profile(
"ci",
max_examples=10,
suppress_health_check=hypothesis.HealthCheck.all(),
deadline=None,
print_blob=True,
derandomize=True,
report_multiple_bugs=False)
settings.load_profile("ci")
class IgnoreReasonsBase(enum.Enum):
# Paddle not support, but paddlelite support, we need to add the feature.
PADDLE_NOT_IMPLEMENTED = 0
# paddlelite not support.
PADDLELITE_NOT_SUPPORT = 1
# Accuracy is abnormal after enabling pass.
ACCURACY_ERROR = 2
class AutoScanBaseTest(unittest.TestCase):
def __init__(self, *args, **kwargs):
self.valid_places = []
self.thread_num = [1]
np.random.seed(1024)
paddle.enable_static()
super(AutoScanBaseTest, self).__init__(*args, **kwargs)
self.ignore_cases = []
abs_dir = os.path.abspath(os.path.dirname(__file__))
self.cache_dir = os.path.join(abs_dir,
str(self.__module__) + '_cache_dir')
self.available_passes_in_framework = set()
self.num_ran_programs = 0
self.num_invalid_programs = 0
self.num_ignore_tests = 0
self.num_predictor_kinds = 0
args = parser.parse_args()
self.args = args
@abc.abstractmethod
def sample_program_configs(self, draw):
'''
Generate all config with the combination of different Input tensor shape and
different Attr values.
'''
raise NotImplementedError
@abc.abstractmethod
def sample_predictor_configs(self):
raise NotImplementedError
@abc.abstractmethod
def add_ignore_check_case(
self,
teller: [Callable[[ProgramConfig, CxxConfig], bool]],
reason: IgnoreReasonsBase,
note: str):
self.ignore_cases.append((teller, reason, note))
@abc.abstractmethod
def is_program_valid(self, program_config: ProgramConfig, predictor_config: CxxConfig) -> bool:
return True
def run_test_config(self, model, params, prog_config, pred_config,
feed_data) -> Dict[str, np.ndarray]:
'''
Test a single case.
'''
pred_config.set_model_buffer(model, len(model), params, len(params))
predictor = paddle_infer.create_predictor(pred_config)
self.available_passes_in_framework = self.available_passes_in_framework | set(
pred_config.pass_builder().all_passes())
for name, _ in prog_config.inputs.items():
input_tensor = predictor.get_input_handle(name)
input_tensor.copy_from_cpu(feed_data[name]['data'])
if feed_data[name]['lod'] is not None:
input_tensor.set_lod(feed_data[name]['lod'])
predictor.run()
result = {}
for out_name, o_name in zip(prog_config.outputs,
predictor.get_output_names()):
result[out_name] = predictor.get_output_handle(o_name).copy_to_cpu()
return result
@abc.abstractmethod
def assert_tensors_near(self,
atol: float,
rtol: float,
tensor: Dict[str, np.array],
baseline: Dict[str, np.array]):
if len(tensor) == 1 and len(baseline) == 1:
tensor_key = list(tensor.keys())
arr = np.array(tensor[tensor_key[0]])
base_key = list(baseline.keys())
base = np.array(baseline[base_key[0]])
self.assertTrue(
base.shape == arr.shape,
"The output shapes are not equal, the baseline shape is " +
str(base.shape) + ', but got ' + str(arr.shape))
self.assertTrue(
np.allclose(
base, arr, atol=atol, rtol=rtol),
"Output has diff. ")
else:
for key in tensor:
opencl_str = "/target_trans"
index = key.rfind(opencl_str)
paddlekey=key
if index > 0:
paddlekey = key[0: index]
if (key == "saved_mean" or key == "saved_variance"):
# training using data
continue
arr = np.array(tensor[key])
self.assertTrue(
baseline[paddlekey].shape == arr.shape,
"The output shapes are not equal, the baseline shape is " +
str(baseline[paddlekey].shape) + ', but got ' + str(arr.shape))
self.assertTrue(
np.allclose(
baseline[paddlekey], arr, atol=atol, rtol=rtol),
"Output has diff. ")
def generate_op_config(self,
ops_config: List[Dict[str, Any]]) -> List[OpConfig]:
ops = []
for i in range(len(ops_config)):
op_config = ops_config[i]
ops.append(
OpConfig(
type=op_config['op_type'],
inputs=op_config['op_inputs'],
outputs=op_config['op_outputs'],
attrs=op_config['op_attrs']))
return ops
@abc.abstractmethod
def ignore_log(self, msg: str):
logging.warning("SKIP: " + msg)
@abc.abstractmethod
def fail_log(self, msg: str):
logging.fatal("FAILE: " + msg)
@abc.abstractmethod
def success_log(self, msg: str):
logging.info("SUCCESS: " + msg)
@abc.abstractmethod
def create_inference_config(self,
passes: Optional[List[str]]=None,
use_gpu: bool=False,
use_mkldnn: bool=False,
ir_optim: Optional[bool]=None):
config = paddle_infer.Config()
config.switch_ir_debug(True)
config.disable_glog_info()
if ir_optim is not None:
config.switch_ir_optim(ir_optim)
if use_gpu:
config.enable_use_gpu(100, 0)
if use_mkldnn:
config.enable_mkldnn()
if passes is not None:
config.pass_builder().set_passes(passes)
self.passes = passes
return config
def run_test(self, quant=False, prog_configs=None):
status = True
paddlelite_configs, op_list_, (atol_, rtol_) = self.sample_predictor_configs()
for prog_config in prog_configs:
# if program is invalid, we should ignore this cases.
program_valid_ = False
for paddlelite_config in paddlelite_configs:
# judge validity of program
if self.is_program_valid(prog_config, paddlelite_config):
program_valid_ = True
if not program_valid_:
self.num_invalid_programs += 1
continue
self.num_ran_programs += 1
model, params = create_fake_model(prog_config)
if quant:
model, params = create_quant_model(model, params)
feed_data = {}
for name, tensor_config in prog_config.inputs.items():
feed_data[name] = {
'data': tensor_config.data,
'lod': tensor_config.lod
}
results: List[Dict[str, np.ndarray]] = []
# baseline: cpu no ir_optim run
base_config = self.create_inference_config(ir_optim=False)
logging.info('[ProgramConfig]: ' + str(prog_config))
results.append(
self.run_test_config(model, params, prog_config, base_config,
feed_data))
for paddlelite_config in paddlelite_configs:
# judge validity of program
if not self.is_program_valid(prog_config, paddlelite_config):
continue
self.num_predictor_kinds += 1
# ignore info
ignore_flag = False
pred_config = paddlelite_config.value()
for ignore_info in self.ignore_cases:
if ignore_info[0](prog_config, paddlelite_config):
ignore_flag = True
self.num_ignore_tests += 1
if ignore_info[1] == IgnoreReasonsBase.ACCURACY_ERROR:
self.ignore_log("[ACCURACY_ERROR] " +
ignore_info[2] + ' ' + ' vs ' + self.
paddlelite_config_str(pred_config))
else:
raise NotImplementedError
break
if os.path.exists(self.cache_dir):
shutil.rmtree(self.cache_dir)
if not os.path.exists(self.cache_dir):
os.mkdir(self.cache_dir)
try:
result, opt_model_bytes = self.run_lite_config(model, params, feed_data, pred_config)
results.append(result)
self.assert_tensors_near(atol_, rtol_, results[-1],
results[0])
if not ignore_flag and self.passes is not None:
self.assert_op_list(opt_model_bytes, op_list_)
except Exception as e:
self.fail_log(
self.paddlelite_config_str(pred_config) +
'\033[1;31m \nERROR INFO: {}\033[0m'.format(str(e)))
if not ignore_flag:
status = False
continue
self.success_log('PredictorConfig: ' + self.
paddlelite_config_str(pred_config))
self.assertTrue(status)
def inference_config_str(self, config) -> bool:
dic = {}
enable_mkldnn = config.mkldnn_enabled()
dic['use_mkldnn'] = enable_mkldnn
enable_gpu = config.use_gpu()
return str(dic)
def paddlelite_config_str(self, config) -> bool:
return str(config)
# method for ignoring
def add_ignore_pass_case(self):
return
# judge if program contain op_list
def assert_op_list(self, model_bytes, op_list_after_fusion):
if not self.passes:
raise ValueError(
"In PassAutoScan you should give a valid pass name.")
pg = paddle.static.deserialize_program(model_bytes)
main_block = pg.desc.block(0)
after_op_list = list()
for i in range(main_block.op_size()):
if main_block.op(i).type() in ["feed", "fetch"]:
continue
after_op_list.append(main_block.op(i).type())
self.assertTrue(
op_list_after_fusion == after_op_list,
"Expected operator list after fusion is {}, but now it's {}".format(
op_list_after_fusion, after_op_list), )
def run_and_statis(
self,
quant=False,
max_examples=100,
reproduce=None,
min_success_num=25,
max_duration=180,
passes=None ):
if os.getenv('HYPOTHESIS_TEST_PROFILE', 'ci') == "dev":
max_examples *= 10
min_success_num *= 10
# while at ce phase, there's no limit on time
max_duration = -1
start_time = time.time()
settings.register_profile(
"ci",
max_examples=max_examples,
suppress_health_check=hypothesis.HealthCheck.all(),
deadline=None,
print_blob=True,
derandomize=True,
report_multiple_bugs=False, )
settings.load_profile("ci")
self.passes = passes
self.add_ignore_pass_case()
def program_generator(draw):
return self.sample_program_configs(draw)
def run_test(prog_config):
return self.run_test(quant=quant, prog_configs=[prog_config])
# if current unittest is not active on the input target, we will exit directly.
if not self.is_actived():
logging.info("Error: This test is not actived on " + self.get_target())
return
generator = st.composite(program_generator)
loop_func = given(generator())(run_test)
if reproduce is not None:
loop_func = reproduce(loop_func)
logging.info("Start to running test of {}".format(type(self)))
loop_func()
logging.info(
"===================Statistical Information===================")
logging.info("Number of Generated Programs: {}".format(
self.num_ran_programs + self.num_invalid_programs))
logging.info("Number of Invalid Programs: {}".format(
self.num_invalid_programs))
logging.info("Number of Ran Programs: {}".format(self.num_ran_programs))
logging.info("Number of Ignored Tests: {}".format(
self.num_ignore_tests))
if self.num_predictor_kinds == 0:
successful_ran_programs = int(self.num_ran_programs)
min_success_num = 0
else:
successful_ran_programs = int(self.num_ran_programs -
self.num_ignore_tests /
self.num_predictor_kinds)
logging.info(
"Number of successfully ran programs approximately equal to {}".
format(successful_ran_programs))
if successful_ran_programs < min_success_num:
logging.warning(
"satisfied_programs = ran_programs - num_ignore_tests / num_predictor_kinds"
)
logging.fatal(
"At least {} programs need to ran successfully, but now only about {} programs satisfied.".
format(min_success_num, successful_ran_programs))
assert False
used_time = time.time() - start_time
if max_duration > 0 and used_time > max_duration:
logging.fatal(
"The duration exceeds {} seconds, if this is neccessary, try to set a larger number for parameter `max_duration`.".
format(max_duration))
assert False
@abc.abstractmethod
def run_lite_config(self, model, params, feed_data, pred_config) -> Dict[str, np.ndarray]:
raise NotImplementedError
# enable a predictor config
# configs will be generated automatically according to inputs
def enable_testing_on_place(self, target=None, precision=None, layout=None, thread=None, places=None) -> None:
# set thread_num
if isinstance(thread,list):
self.thread_num = list(set(self.thread_num + thread))
if isinstance(thread,int):
self.thread_num.append(thread)
self.thread_num = list(self.thread_num)
# if list[Place] is inputed, this will be used directly
if places is not None:
assert isinstance(places, list)
self.valid_places.append(places)
return
# otherwise we will generate a list[Place] from the inputed[target\precision\layout]
assert (target is not None)
target_ = target if isinstance(target,list) else [target]
precision_ = precision if isinstance(precision, list) else [precision]
layout_ = layout if isinstance(layout,list) else [layout]
for tar_, pre_, lay_ in product(target_, precision_, layout_):
self.valid_places.append([Place(tar_, pre_, lay_)])
return
def get_target(self) -> str:
return self.args.target
def is_actived(self) -> bool:
for valid_place_ in self.valid_places:
if self.get_target() in valid_place_[0]:
return True
return False
def get_predictor_configs(self) -> List[CxxConfig]:
return self.target_to_predictor_configs(self, self.get_target())
# get valid test configs
@staticmethod
def target_to_predictor_configs(self,target:str) -> List[CxxConfig]:
configs_ = []
for elem_ in self.valid_places:
if target in elem_[0]:
for thread_ in self.thread_num:
config_ = CxxConfig()
config_.set_valid_places(elem_)
config_.set_threads(thread_)
configs_.append(config_)
return configs_
|
from typing import Any
import discord
from redbot.core import commands
from redbot.core.utils.predicates import MessagePredicate
Cog: Any = getattr(commands, "Cog", object)
class BanMe(Cog):
"""
Ridiculous cog for a ridiculous request
"""
def __init__(self, bot):
self.bot = bot
@commands.command()
async def banme(self, ctx):
"""
Does what it says on the tin, bans yourself.
"""
await ctx.send("Are you sure about banning yourself? Answer with a `Yes` or a `No`")
pred = MessagePredicate.yes_or_no(ctx)
await self.bot.wait_for("message", check=pred)
if pred.result is True:
try:
await ctx.guild.ban(ctx.author, reason="You literally banned yourself")
except Exception as e:
await ctx.send(f"Error: ***{e}***")
|
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Functions that wrap both gfile and gcs.
This module is *not* intended to be a general-purpose IO wrapper library; it
only implements the operations that are necessary for loading event files. The
functions either dispatch to the gcs library or to gfile, depending on whether
the path is a GCS 'pseudo-path' (i.e., it satisfies gcs.IsGCSPath) or not.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
from tensorflow.python.platform import gfile
from tensorflow.python.summary.impl import event_file_loader
from tensorflow.python.summary.impl import gcs
from tensorflow.python.summary.impl import gcs_file_loader
def CreateFileLoader(path):
"""Creates a file loader for the given path.
Args:
path: A string representing either a normal path or a GCS
Returns:
An object with a Load() method that yields event_pb2.Event protos.
"""
if gcs.IsGCSPath(path):
return gcs_file_loader.GCSFileLoader(path)
else:
return event_file_loader.EventFileLoader(path)
def ListDirectoryAbsolute(directory):
"""Yields all files in the given directory. The paths are absolute."""
if gcs.IsGCSPath(directory):
return gcs.ListDirectory(directory)
else:
return (os.path.join(directory, path)
for path in gfile.ListDirectory(directory))
def ListRecursively(top):
"""Walks a directory tree, yielding (dir_path, file_paths) tuples.
For each of `top` and its subdirectories, yields a tuple containing the path
to the directory and the path to each of the contained files. Note that
unlike os.Walk()/gfile.Walk(), this does not list subdirectories and the file
paths are all absolute.
If the directory does not exist, this yields nothing.
Args:
top: A path to a directory..
Yields:
A list of (dir_path, file_paths) tuples.
"""
if gcs.IsGCSPath(top):
for x in gcs.ListRecursively(top):
yield x
else:
for dir_path, _, filenames in gfile.Walk(top):
yield (dir_path, (os.path.join(dir_path, filename)
for filename in filenames))
def IsDirectory(path):
"""Returns true if path exists and is a directory."""
if gcs.IsGCSPath(path):
return gcs.IsDirectory(path)
else:
return gfile.IsDirectory(path)
def Exists(path):
if gcs.IsGCSPath(path):
return gcs.Exists(path)
else:
return gfile.Exists(path)
def Size(path):
"""Returns the number of bytes in the given file. Doesn't work on GCS."""
if gcs.IsGCSPath(path):
raise NotImplementedError("io_wrapper.Size doesn't support GCS paths")
else:
return gfile.Open(path).size()
|
from rest_framework.serializers import ModelSerializer
from dcim.models import DeviceRole, Device
from extras.models import Tag
class PreDeviceRoleSerializer(ModelSerializer):
class Meta:
model = DeviceRole
fields = ('id', 'name')
class PreTagSerializer(ModelSerializer):
class Meta:
model = Tag
fields = ('id', 'name')
class TopologyDummySerializer(ModelSerializer):
class Meta:
model = Device
fields = ('id', 'name')
|
from construct import *
from construct.lib import *
switch_manual_str__opcode__intval = Struct(
'value' / Int8ub,
)
switch_manual_str__opcode__strval = Struct(
'value' / NullTerminated(GreedyString(encoding='ASCII'), term=b'\x00', include=False, consume=True),
)
switch_manual_str__opcode = Struct(
'code' / FixedSized(1, GreedyString(encoding='ASCII')),
'body' / Switch(this.code, {u"I": LazyBound(lambda: switch_manual_str__opcode__intval), u"S": LazyBound(lambda: switch_manual_str__opcode__strval), }),
)
switch_manual_str = Struct(
'opcodes' / GreedyRange(LazyBound(lambda: switch_manual_str__opcode)),
)
_schema = switch_manual_str
|
# coding: utf-8
"""
Intersight REST API
This is Intersight REST API
OpenAPI spec version: 1.0.9-262
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
from pprint import pformat
from six import iteritems
import re
class HyperflexClusterNetworkPolicyList(object):
"""
NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
"""
Attributes:
swagger_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
swagger_types = {
'count': 'int',
'results': 'list[HyperflexClusterNetworkPolicy]'
}
attribute_map = {
'count': 'Count',
'results': 'Results'
}
def __init__(self, count=None, results=None):
"""
HyperflexClusterNetworkPolicyList - a model defined in Swagger
"""
self._count = None
self._results = None
if count is not None:
self.count = count
if results is not None:
self.results = results
@property
def count(self):
"""
Gets the count of this HyperflexClusterNetworkPolicyList.
The number of hyperflexClusterNetworkPolicies matching your request in total for all pages.
:return: The count of this HyperflexClusterNetworkPolicyList.
:rtype: int
"""
return self._count
@count.setter
def count(self, count):
"""
Sets the count of this HyperflexClusterNetworkPolicyList.
The number of hyperflexClusterNetworkPolicies matching your request in total for all pages.
:param count: The count of this HyperflexClusterNetworkPolicyList.
:type: int
"""
self._count = count
@property
def results(self):
"""
Gets the results of this HyperflexClusterNetworkPolicyList.
The array of hyperflexClusterNetworkPolicies matching your request.
:return: The results of this HyperflexClusterNetworkPolicyList.
:rtype: list[HyperflexClusterNetworkPolicy]
"""
return self._results
@results.setter
def results(self, results):
"""
Sets the results of this HyperflexClusterNetworkPolicyList.
The array of hyperflexClusterNetworkPolicies matching your request.
:param results: The results of this HyperflexClusterNetworkPolicyList.
:type: list[HyperflexClusterNetworkPolicy]
"""
self._results = results
def to_dict(self):
"""
Returns the model properties as a dict
"""
result = {}
for attr, _ in iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""
Returns the string representation of the model
"""
return pformat(self.to_dict())
def __repr__(self):
"""
For `print` and `pprint`
"""
return self.to_str()
def __eq__(self, other):
"""
Returns true if both objects are equal
"""
if not isinstance(other, HyperflexClusterNetworkPolicyList):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""
Returns true if both objects are not equal
"""
return not self == other
|
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Training-related part of the Keras engine.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import weakref
import numpy as np
from tensorflow.python.data.ops import dataset_ops
from tensorflow.python.data.ops import iterator_ops
from tensorflow.python.eager import context
from tensorflow.python.framework import errors
from tensorflow.python.framework import ops
from tensorflow.python.framework import tensor_util
from tensorflow.python.keras import backend as K
from tensorflow.python.keras import losses
from tensorflow.python.keras import metrics as metrics_module
from tensorflow.python.keras import optimizers
from tensorflow.python.keras.engine import base_layer
from tensorflow.python.keras.engine import distributed_training_utils
from tensorflow.python.keras.engine import training_arrays
from tensorflow.python.keras.engine import training_distributed
from tensorflow.python.keras.engine import training_eager
from tensorflow.python.keras.engine import training_generator
from tensorflow.python.keras.engine import training_utils
from tensorflow.python.keras.engine.network import Network
from tensorflow.python.keras.utils.generic_utils import slice_arrays
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import weights_broadcast_ops
from tensorflow.python.platform import tf_logging as logging
from tensorflow.python.training import optimizer as tf_optimizer_module
from tensorflow.python.training.checkpointable import base as checkpointable
from tensorflow.python.util.tf_export import tf_export
@tf_export('keras.models.Model', 'keras.Model')
class Model(Network):
"""`Model` groups layers into an object with training and inference features.
There are two ways to instantiate a `Model`:
1 - With the "functional API", where you start from `Input`,
you chain layer calls to specify the model's forward pass,
and finally you create your model from inputs and outputs:
```python
import tensorflow as tf
inputs = tf.keras.Input(shape=(3,))
x = tf.keras.layers.Dense(4, activation=tf.nn.relu)(inputs)
outputs = tf.keras.layers.Dense(5, activation=tf.nn.softmax)(x)
model = tf.keras.Model(inputs=inputs, outputs=outputs)
```
2 - By subclassing the `Model` class: in that case, you should define your
layers in `__init__` and you should implement the model's forward pass
in `call`.
```python
import tensorflow as tf
class MyModel(tf.keras.Model):
def __init__(self):
super(MyModel, self).__init__()
self.dense1 = tf.keras.layers.Dense(4, activation=tf.nn.relu)
self.dense2 = tf.keras.layers.Dense(5, activation=tf.nn.softmax)
def call(self, inputs):
x = self.dense1(inputs)
return self.dense2(x)
model = MyModel()
```
If you subclass `Model`, you can optionally have
a `training` argument (boolean) in `call`, which you can use to specify
a different behavior in training and inference:
```python
import tensorflow as tf
class MyModel(tf.keras.Model):
def __init__(self):
super(MyModel, self).__init__()
self.dense1 = tf.keras.layers.Dense(4, activation=tf.nn.relu)
self.dense2 = tf.keras.layers.Dense(5, activation=tf.nn.softmax)
self.dropout = tf.keras.layers.Dropout(0.5)
def call(self, inputs, training=False):
x = self.dense1(inputs)
if training:
x = self.dropout(x, training=training)
return self.dense2(x)
model = MyModel()
```
"""
def __init__(self, *args, **kwargs):
super(Model, self).__init__(*args, **kwargs)
# Create a cache for iterator get_next op.
self._iterator_get_next = weakref.WeakKeyDictionary()
# Create a cache for dataset - uninitialized iterators
self._dataset_iterator_cache = weakref.WeakKeyDictionary()
# initializing _distribution_strategy here since it is possible to call
# predict on a model without compiling it.
self._distribution_strategy = None
def _set_sample_weight_attributes(self, sample_weight_mode,
skip_target_weighing_indices):
"""Sets sample weight related attributes on the model."""
sample_weights, sample_weight_modes = training_utils.prepare_sample_weights(
self.output_names, sample_weight_mode, skip_target_weighing_indices)
self.sample_weights = sample_weights
self.sample_weight_modes = sample_weight_modes
self._feed_sample_weight_modes = [
sample_weight_modes[i]
for i in range(len(self.outputs))
if i not in skip_target_weighing_indices
]
self._feed_sample_weights = [
sample_weights[i]
for i in range(len(sample_weights))
if i not in skip_target_weighing_indices
]
def _get_metric_name(self, metric, output_index, weighted=False):
"""Returns the metric name corresponding to the given metric input.
Arguments:
metric: Metric function name or reference.
output_index: Index of the current output.
weighted: Boolean indicating if the given metric is weighted.
Returns:
A metric name.
"""
metric_name_prefix = 'weighted_' if weighted else ''
if metric in ('accuracy', 'acc', 'crossentropy', 'ce'):
if metric in ('accuracy', 'acc'):
suffix = 'acc'
elif metric in ('crossentropy', 'ce'):
suffix = 'ce'
else:
metric_fn = metrics_module.get(metric)
# Get metric name as string
if hasattr(metric_fn, 'name'):
suffix = metric_fn.name
else:
suffix = metric_fn.__name__
metric_name = metric_name_prefix + suffix
if len(self.output_names) > 1:
metric_name = '%s_%s' % (self.output_names[output_index], metric_name)
j = 1
base_metric_name = metric_name
while metric_name in self.metrics_names:
metric_name = '%s_%d' % (base_metric_name, j)
j += 1
return metric_name
def _handle_per_output_metrics(self,
metrics,
y_true,
y_pred,
output_index,
output_shape,
loss_fn,
mask,
weights=None):
"""Calls metric functions and sets metric attributes for a single output.
Arguments:
metrics: List of metrics.
y_true: Target output.
y_pred: Predicted output.
output_index: Index of the current output.
output_shape: Shape of the current output.
loss_fn: Loss function corresponding to the current output.
mask: Computed mask value for the current output.
weights: Weights to be applied on the current output.
Returns:
A list of metric result tensors.
"""
metric_results = []
for metric in metrics:
metric_fn = training_utils.get_metric_function(
metric, output_shape=output_shape, loss_fn=loss_fn)
metric_name = self._get_metric_name(
metric, output_index, weighted=weights is not None)
with K.name_scope(metric_name):
# If both outputs and targets are available, call the metric function.
if y_true is not None and y_pred is not None:
if isinstance(metric_fn, metrics_module.Metric):
# Call the stateful metric function.
if mask is not None:
mask = math_ops.cast(mask, y_pred.dtype)
# Update weights with mask.
if weights is None:
weights = mask
else:
# Update shape of weights if possible before adding mask.
# Update dimensions of weights to match with mask if possible.
mask, _, weights = metrics_module.squeeze_or_expand_dimensions(
mask, None, weights)
try:
# Broadcast weights if possible.
weights = weights_broadcast_ops.broadcast_weights(
weights, mask)
except ValueError:
pass
# TODO(psv): Handle case when mask and weight shapes are not
# compatible.
weights *= mask
metric_result = metric_fn(y_true, y_pred, weights)
else:
# Call the stateless metric function.
weighted_metric_fn = training_utils.weighted_masked_objective(
metric_fn)
metric_result = weighted_metric_fn(
y_true, y_pred, weights=weights, mask=mask)
if not context.executing_eagerly():
# Keep track of metric result tensor.
self.metrics_tensors.append(metric_result)
metric_results.append(metric_result)
# Keep track of metric name.
self.metrics_names.append(metric_name)
# Keep track of stateful metric attributes (name and metric function).
if isinstance(metric_fn, base_layer.Layer) and metric_fn.stateful:
self.stateful_metric_names.append(metric_name)
self.stateful_metric_functions.append(metric_fn)
if not context.executing_eagerly():
# Keep track of updates created by stateful metrics.
self.metrics_updates += metric_fn.updates
return metric_results
def _handle_metrics(self,
outputs,
skip_target_indices=None,
targets=None,
sample_weights=None,
masks=None):
"""Handles calling metric functions and setting model metric attributes.
Arguments:
outputs: List of outputs (predictions).
skip_target_indices: Optional. List of target ids to skip.
targets: List of targets.
sample_weights: Optional list of sample weight arrays.
masks: List of computed output mask values.
Returns:
A list of metric result tensors.
"""
skip_target_indices = skip_target_indices or []
metric_results = []
with K.name_scope('metrics'):
for i in range(len(outputs)):
if i in skip_target_indices:
continue
output = outputs[i] if outputs else None
target = targets[i] if targets else None
output_shape = None if output is None else output.get_shape().as_list()
output_mask = masks[i] if masks else None
metric_results.extend(
self._handle_per_output_metrics(
self.nested_metrics[i], target, output, i, output_shape,
self.loss_functions[i], output_mask))
metric_results.extend(
self._handle_per_output_metrics(
self.nested_weighted_metrics[i],
target,
output,
i,
output_shape,
self.loss_functions[i],
output_mask,
weights=sample_weights[i]))
return metric_results
@checkpointable.no_automatic_dependency_tracking
def compile(self,
optimizer,
loss=None,
metrics=None,
loss_weights=None,
sample_weight_mode=None,
weighted_metrics=None,
target_tensors=None,
distribute=None,
**kwargs):
"""Configures the model for training.
Arguments:
optimizer: String (name of optimizer) or optimizer instance.
See [optimizers](/api_docs/python/tf/keras/optimizers).
loss: String (name of objective function) or objective function.
See [losses](/api_docs/python/tf/losses).
If the model has multiple outputs, you can use a different loss
on each output by passing a dictionary or a list of losses.
The loss value that will be minimized by the model
will then be the sum of all individual losses.
metrics: List of metrics to be evaluated by the model
during training and testing.
Typically you will use `metrics=['accuracy']`.
To specify different metrics for different outputs of a
multi-output model, you could also pass a dictionary,
such as `metrics={'output_a': 'accuracy'}`.
loss_weights: Optional list or dictionary specifying scalar
coefficients (Python floats) to weight the loss contributions
of different model outputs.
The loss value that will be minimized by the model
will then be the *weighted sum* of all individual losses,
weighted by the `loss_weights` coefficients.
If a list, it is expected to have a 1:1 mapping
to the model's outputs. If a tensor, it is expected to map
output names (strings) to scalar coefficients.
sample_weight_mode: If you need to do timestep-wise
sample weighting (2D weights), set this to `"temporal"`.
`None` defaults to sample-wise weights (1D).
If the model has multiple outputs, you can use a different
`sample_weight_mode` on each output by passing a
dictionary or a list of modes.
weighted_metrics: List of metrics to be evaluated and weighted
by sample_weight or class_weight during training and testing.
target_tensors: By default, Keras will create placeholders for the
model's target, which will be fed with the target data during
training. If instead you would like to use your own
target tensors (in turn, Keras will not expect external
Numpy data for these targets at training time), you
can specify them via the `target_tensors` argument. It can be
a single tensor (for a single-output model), a list of tensors,
or a dict mapping output names to target tensors.
distribute: The DistributionStrategy instance that we want to use to
distribute the training of the model.
**kwargs: These arguments are passed to `tf.Session.run`.
Raises:
ValueError: In case of invalid arguments for
`optimizer`, `loss`, `metrics` or `sample_weight_mode`.
"""
# Validate that arguments passed by the user to `compile` are supported by
# DistributionStrategy.
if distribute and not isinstance(
optimizer, (tf_optimizer_module.Optimizer, optimizers.TFOptimizer)):
raise NotImplementedError('Only TF native optimizers are supported with '
'DistributionStrategy.')
if distribute and context.executing_eagerly():
raise NotImplementedError('DistributionStrategy is not supported in '
'Eager mode.')
if distribute and sample_weight_mode:
raise NotImplementedError('sample_weight_mode is not supported with '
'DistributionStrategy.')
if distribute and weighted_metrics:
raise NotImplementedError('weighted_metrics is not supported with '
'DistributionStrategy.')
if distribute and target_tensors:
raise ValueError('target_tensors is not supported with '
'DistributionStrategy.')
loss = loss or {}
if context.executing_eagerly() and not isinstance(
optimizer, (tf_optimizer_module.Optimizer, optimizers.TFOptimizer)):
raise ValueError('Only TF native optimizers are supported in Eager mode.')
self.optimizer = optimizers.get(optimizer)
# We've disabled automatic dependency tracking for this method, but do want
# to add a checkpoint dependency on the optimizer if it's checkpointable.
if isinstance(self.optimizer, checkpointable.CheckpointableBase):
self._track_checkpointable(
self.optimizer, name='optimizer', overwrite=True)
self.loss = loss
self.metrics = metrics or []
self.loss_weights = loss_weights
self.sample_weight_mode = sample_weight_mode
self.weighted_metrics = weighted_metrics
if context.executing_eagerly() and target_tensors is not None:
raise ValueError('target_tensors is not supported in Eager mode.')
self.target_tensors = target_tensors
# Set DistributionStrategy specific parameters.
self._distribution_strategy = distribute
if self._distribution_strategy is not None:
self._grouped_model = self._compile_distributed_model(
self._distribution_strategy)
with self._distribution_strategy.scope():
first_replicated_model = self._distribution_strategy.unwrap(
self._grouped_model)[0]
# If the specified metrics in `compile` are stateful, raise an error
# since we currently don't support stateful metrics.
if first_replicated_model.stateful_metric_names:
raise NotImplementedError('Stateful metrics are not supported with '
'DistributionStrategy.')
# We initialize the callback model with the first replicated model.
self._replicated_model = DistributedCallbackModel(first_replicated_model)
self._replicated_model.set_original_model(self)
if not self.built:
# Model is not compilable because it does not know its number of inputs
# and outputs, nor their shapes and names. We will compile after the first
# time the model gets called on training data.
return
self._is_compiled = True
# Prepare loss functions.
if isinstance(loss, dict):
for name in loss:
if name not in self.output_names:
raise ValueError(
'Unknown entry in loss '
'dictionary: "' + name + '". '
'Only expected the following keys: ' + str(self.output_names))
loss_functions = []
for name in self.output_names:
if name not in loss:
logging.warning(
'Output "' + name + '" missing from loss dictionary. We assume '
'this was done on purpose. The fit and evaluate APIs will not be '
'expecting any data to be passed to "' + name + '".')
loss_functions.append(losses.get(loss.get(name)))
elif isinstance(loss, list):
if len(loss) != len(self.outputs):
raise ValueError('When passing a list as loss, '
'it should have one entry per model outputs. '
'The model has ' + str(len(self.outputs)) +
' outputs, but you passed loss=' + str(loss))
loss_functions = [losses.get(l) for l in loss]
else:
loss_function = losses.get(loss)
loss_functions = [loss_function for _ in range(len(self.outputs))]
self.loss_functions = loss_functions
weighted_losses = [training_utils.weighted_masked_objective(fn)
for fn in loss_functions]
skip_target_indices = []
skip_target_weighing_indices = []
self._feed_outputs = []
self._feed_output_names = []
self._feed_output_shapes = []
self._feed_loss_fns = []
for i in range(len(weighted_losses)):
if weighted_losses[i] is None:
skip_target_indices.append(i)
skip_target_weighing_indices.append(i)
# Prepare output masks.
if not context.executing_eagerly():
masks = [getattr(x, '_keras_mask', None) for x in self.outputs]
if not isinstance(masks, list):
masks = [masks]
# Prepare loss weights.
if loss_weights is None:
loss_weights_list = [1. for _ in range(len(self.outputs))]
elif isinstance(loss_weights, dict):
for name in loss_weights:
if name not in self.output_names:
raise ValueError(
'Unknown entry in loss_weights '
'dictionary: "' + name + '". '
'Only expected the following keys: ' + str(self.output_names))
loss_weights_list = []
for name in self.output_names:
loss_weights_list.append(loss_weights.get(name, 1.))
elif isinstance(loss_weights, list):
if len(loss_weights) != len(self.outputs):
raise ValueError(
'When passing a list as loss_weights, '
'it should have one entry per model output. '
'The model has ' + str(len(self.outputs)) +
' outputs, but you passed loss_weights=' + str(loss_weights))
loss_weights_list = loss_weights
else:
raise TypeError('Could not interpret loss_weights argument: ' +
str(loss_weights) + ' - expected a list of dicts.')
self.loss_weights_list = loss_weights_list
# Initialize model metric attributes.
self.metrics_names = ['loss']
self.metrics_tensors = []
self.metrics_updates = []
self.stateful_metric_names = []
self.stateful_metric_functions = []
# Nested metrics is a list of list of metrics.
# One list per output of the model.
self.nested_metrics = training_utils.collect_metrics(
metrics, self.output_names)
self.nested_weighted_metrics = training_utils.collect_metrics(
weighted_metrics, self.output_names)
# Initialization for Eager mode execution.
if context.executing_eagerly():
# Prepare sample weights.
self._set_sample_weight_attributes(sample_weight_mode,
skip_target_weighing_indices)
if target_tensors is not None:
raise ValueError('target_tensors are not currently supported in Eager '
'mode.')
self.total_loss = None
for i in range(len(self.outputs)):
if len(self.outputs) > 1:
self.metrics_names.append(self.output_names[i] + '_loss')
# Set metric attributes on model.
self._handle_metrics(
self.outputs,
skip_target_indices=skip_target_indices,
sample_weights=self.sample_weights)
self.targets = []
for i in range(len(self.outputs)):
self._feed_output_names.append(self.output_names[i])
self._collected_trainable_weights = self.trainable_weights
return
# Prepare targets of model.
self.targets = []
self._feed_targets = []
if target_tensors not in (None, []):
if isinstance(target_tensors, list):
if len(target_tensors) != len(self.outputs):
raise ValueError(
'When passing a list as `target_tensors`, '
'it should have one entry per model output. '
'The model has ' + str(len(self.outputs)) +
' outputs, but you passed target_tensors=' + str(target_tensors))
elif isinstance(target_tensors, dict):
for name in target_tensors:
if name not in self.output_names:
raise ValueError(
'Unknown entry in `target_tensors` '
'dictionary: "' + name + '". '
'Only expected the following keys: ' + str(self.output_names))
tmp_target_tensors = []
for name in self.output_names:
tmp_target_tensors.append(target_tensors.get(name, None))
target_tensors = tmp_target_tensors
else:
raise TypeError('Expected `target_tensors` to be '
'a list or dict, but got:', target_tensors)
for i in range(len(self.outputs)):
if i in skip_target_indices:
self.targets.append(None)
else:
shape = K.int_shape(self.outputs[i])
name = self.output_names[i]
if target_tensors not in (None, []):
target = target_tensors[i]
else:
target = None
if target is None or K.is_placeholder(target):
if target is None:
target = K.placeholder(
ndim=len(shape),
name=name + '_target',
sparse=K.is_sparse(self.outputs[i]),
dtype=K.dtype(self.outputs[i]))
self._feed_targets.append(target)
self._feed_outputs.append(self.outputs[i])
self._feed_output_names.append(name)
self._feed_output_shapes.append(shape)
self._feed_loss_fns.append(self.loss_functions[i])
else:
skip_target_weighing_indices.append(i)
self.targets.append(target)
# Prepare sample weights.
self._set_sample_weight_attributes(sample_weight_mode,
skip_target_weighing_indices)
# Compute total loss.
total_loss = None
with K.name_scope('loss'):
for i in range(len(self.outputs)):
if i in skip_target_indices:
continue
y_true = self.targets[i]
y_pred = self.outputs[i]
weighted_loss = weighted_losses[i]
sample_weight = self.sample_weights[i]
mask = masks[i]
loss_weight = loss_weights_list[i]
with K.name_scope(self.output_names[i] + '_loss'):
output_loss = weighted_loss(y_true, y_pred, sample_weight, mask)
if len(self.outputs) > 1:
self.metrics_tensors.append(output_loss)
self.metrics_names.append(self.output_names[i] + '_loss')
if total_loss is None:
total_loss = loss_weight * output_loss
else:
total_loss += loss_weight * output_loss
if total_loss is None:
if not self.losses:
raise ValueError('The model cannot be compiled '
'because it has no loss to optimize.')
else:
total_loss = 0.
# Add regularization penalties
# and other layer-specific losses.
for loss_tensor in self.losses:
total_loss += loss_tensor
# Invoke metric functions for all the outputs.
self._handle_metrics(
self.outputs,
masks=masks,
targets=self.targets,
skip_target_indices=skip_target_indices,
sample_weights=self.sample_weights)
# Prepare gradient updates and state updates.
self.total_loss = total_loss
# Functions for train, test and predict will
# be compiled lazily when required.
# This saves time when the user is not using all functions.
self._function_kwargs = kwargs
self.train_function = None
self.test_function = None
self.predict_function = None
# Collected trainable weights, sorted in topological order.
trainable_weights = self.trainable_weights
self._collected_trainable_weights = trainable_weights
def _compile_distributed_model(self, distribution_strategy):
# TODO(anjalisridhar): Can we move the clone_and_build_model to outside the
# model?
def _clone_model_per_tower(model):
new_model = training_distributed.clone_and_build_model(model)
return new_model
with distribution_strategy.scope():
# Create a copy of this model on each of the devices.
grouped_models = distribution_strategy.call_for_each_tower(
_clone_model_per_tower, self)
return grouped_models
def _check_trainable_weights_consistency(self):
"""Check trainable weights count consistency.
This will raise a warning if `trainable_weights` and
`_collected_trainable_weights` are inconsistent (i.e. have different
number of parameters).
Inconsistency will typically arise when one modifies `model.trainable`
without calling `model.compile` again.
"""
if not hasattr(self, '_collected_trainable_weights'):
return
if len(self.trainable_weights) != len(self._collected_trainable_weights):
logging.warning(
UserWarning(
'Discrepancy between trainable weights and collected trainable'
' weights, did you set `model.trainable` without calling'
' `model.compile` after ?'))
def _make_train_function(self):
if not hasattr(self, 'train_function'):
raise RuntimeError('You must compile your model before using it.')
self._check_trainable_weights_consistency()
if self.train_function is None:
inputs = (self._feed_inputs +
self._feed_targets +
self._feed_sample_weights)
if self.uses_learning_phase and not isinstance(K.learning_phase(), int):
inputs += [K.learning_phase()]
with K.name_scope('training'):
with K.name_scope(self.optimizer.__class__.__name__):
# Training updates
updates = self.optimizer.get_updates(
params=self._collected_trainable_weights, loss=self.total_loss)
# Unconditional updates
updates += self.get_updates_for(None)
# Conditional updates relevant to this model
updates += self.get_updates_for(self.inputs)
# Stateful metrics updates
updates += self.metrics_updates
# Gets loss and metrics. Updates weights at each call.
self.train_function = K.function(
inputs, [self.total_loss] + self.metrics_tensors,
updates=updates,
name='train_function',
**self._function_kwargs)
def _make_test_function(self):
if not hasattr(self, 'test_function'):
raise RuntimeError('You must compile your model before using it.')
if self.test_function is None:
inputs = (self._feed_inputs +
self._feed_targets +
self._feed_sample_weights)
if self.uses_learning_phase and not isinstance(K.learning_phase(), int):
inputs += [K.learning_phase()]
# Return loss and metrics, no gradient updates.
# Does update the network states.
self.test_function = K.function(
inputs, [self.total_loss] + self.metrics_tensors,
updates=self.state_updates + self.metrics_updates,
name='test_function',
**self._function_kwargs)
def _make_predict_function(self):
if not hasattr(self, 'predict_function'):
self.predict_function = None
if self.predict_function is None:
if self.uses_learning_phase and not isinstance(K.learning_phase(), int):
inputs = self._feed_inputs + [K.learning_phase()]
else:
inputs = self._feed_inputs
# Gets network outputs. Does not update weights.
# Does update the network states.
kwargs = getattr(self, '_function_kwargs', {})
self.predict_function = K.function(
inputs,
self.outputs,
updates=self.state_updates,
name='predict_function',
**kwargs)
def _get_iterator_get_next_tensors(self, iterator):
get_next_op = self._iterator_get_next.get(iterator, None)
if get_next_op is None:
get_next_op = iterator.get_next()
self._iterator_get_next[iterator] = get_next_op
return get_next_op
def _distribution_standardize_user_data(self,
x,
y=None,
sample_weight=None,
class_weight=None,
batch_size=None,
check_steps=False,
steps_name='steps',
steps=None,
validation_split=0):
"""Runs validation checks on input and target data passed by the user.
This is called when using DistributionStrategy to train, evaluate or serve
the model.
Args:
x: Input data. A `tf.data` dataset.
y: Since `x` is a dataset, `y` should not be specified
(since targets will be obtained from the iterator).
sample_weight: An optional sample-weight array passed by the user to
weight the importance of each sample in `x`.
class_weight: An optional class-weight array by the user to
weight the importance of samples in `x` based on the class they belong
to, as conveyed by `y`.
batch_size: Integer batch size. If provided, it is used to run additional
validation checks on stateful models.
check_steps: boolean, True if we want to check for validity of `steps` and
False, otherwise.
steps_name: The public API's parameter name for `steps`.
steps: Integer or `None`. Total number of steps (batches of samples) to
execute.
validation_split: Float between 0 and 1.
Fraction of the training data to be used as validation data.
Returns:
A tuple of 3 lists: input arrays, target arrays, sample-weight arrays.
If the model's input and targets are symbolic, these lists are empty
(since the model takes no user-provided data, instead the data comes
from the symbolic inputs/targets).
Raises:
ValueError: In case of invalid user-provided data.
RuntimeError: If the model was never compiled.
"""
if sample_weight is not None and sample_weight.all():
raise NotImplementedError('sample_weight is currently not supported when '
'using DistributionStrategy.')
if class_weight:
raise NotImplementedError('class_weight is currently not supported when '
'using DistributionStrategy.')
# TODO(anjalisridhar): Can we use the iterator and getnext op cache?
# We require users to pass Datasets since we distribute the dataset across
# multiple devices.
if not isinstance(x, dataset_ops.Dataset):
raise ValueError('When using DistributionStrategy you must specify a '
'Dataset object instead of a %s.' % type(x))
# TODO(anjalisridhar): We want distribute_dataset() to accept a Dataset or a
# function which returns a Dataset. Currently distribute_dataset() only
# accepts a function that returns a Dataset. Once we add support for being
# able to clone a Dataset on multiple workers we can remove this lambda.
result = self._distribution_strategy.distribute_dataset(lambda: x)
iterator = result.make_initializable_iterator()
K.get_session().run(iterator.initializer)
# Validates `steps` argument based on x's type.
if check_steps:
if steps is None:
raise ValueError('When using a Dataset instance as input to a model, '
'you should specify the `{steps_name}` argument.'
.format(steps_name=steps_name))
training_utils.validate_iterator_input(x, y, sample_weight,
validation_split)
# x an y may be PerDevice objects with an input and output tensor
# corresponding to each device. For example, x could be
# PerDevice:{device: get_next tensor,...}.
next_element = iterator.get_next()
if not isinstance(next_element, (list, tuple)) or len(next_element) != 2:
raise ValueError('Please provide data as a list or tuple of 2 elements '
' - input and target pair. Received %s' % next_element)
x, y = next_element
# Validate that all the elements in x and y are of the same type and shape.
# We can then pass the first element of x and y to `_standardize_weights`
# below and be confident of the output. We need to reopen the scope since
# we unwrap values when we validate x and y.
with self._distribution_strategy.scope():
x_values, y_values = distributed_training_utils.\
validate_distributed_dataset_inputs(self._distribution_strategy, x, y)
_, _, sample_weights = self._standardize_weights(x_values,
y_values,
sample_weight,
class_weight,
batch_size)
return x, y, sample_weights
def _standardize_user_data(self,
x,
y=None,
sample_weight=None,
class_weight=None,
batch_size=None,
check_steps=False,
steps_name='steps',
steps=None,
validation_split=0):
"""Runs validation checks on input and target data passed by the user.
Also standardizes the data to lists of arrays, in order.
Also builds and compiles the model on the fly if it is a subclassed model
that has never been called before (and thus has no inputs/outputs).
This is a purely internal method, subject to refactoring at any time.
Args:
x: Input data. It could be:
- A Numpy array (or array-like), or a list of arrays
(in case the model has multiple inputs).
- A TensorFlow tensor, or a list of tensors
(in case the model has multiple inputs).
- A dict mapping input names to the corresponding array/tensors,
if the model has named inputs.
- A `tf.data` dataset or a dataset iterator.
y: Target data. Like the input data `x`,
it could be either Numpy array(s) or TensorFlow tensor(s).
It should be consistent with `x` (you cannot have Numpy inputs and
tensor targets, or inversely). If `x` is a dataset or a
dataset iterator, `y` should not be specified
(since targets will be obtained from the iterator).
sample_weight: An optional sample-weight array passed by the user to
weight the importance of each sample in `x`.
class_weight: An optional class-weight array by the user to
weight the importance of samples in `x` based on the class they belong
to, as conveyed by `y`.
batch_size: Integer batch size. If provided, it is used to run additional
validation checks on stateful models.
check_steps: boolean, True if we want to check for validity of `steps` and
False, otherwise. For example, when we are standardizing one batch of
data for train_on_batch/predict_on_batch/test_on_batch APIs, `steps`
value is not required and we should not check for its validity in these
cases.
steps_name: The public API's parameter name for `steps`.
steps: Integer or `None`. Total number of steps (batches of samples) to
execute.
validation_split: Float between 0 and 1.
Fraction of the training data to be used as validation data.
Returns:
A tuple of 3 lists: input arrays, target arrays, sample-weight arrays.
If the model's input and targets are symbolic, these lists are empty
(since the model takes no user-provided data, instead the data comes
from the symbolic inputs/targets).
Raises:
ValueError: In case of invalid user-provided data.
RuntimeError: If the model was never compiled.
"""
if self._distribution_strategy:
return self._distribution_standardize_user_data(
x,
y,
sample_weight=sample_weight,
class_weight=class_weight,
batch_size=batch_size,
check_steps=check_steps,
steps_name=steps_name,
steps=steps,
validation_split=validation_split)
if isinstance(x, dataset_ops.Dataset):
if context.executing_eagerly():
x = x.make_one_shot_iterator()
else:
if x in self._dataset_iterator_cache:
x = self._dataset_iterator_cache[x]
else:
iterator = x.make_initializable_iterator()
self._dataset_iterator_cache[x] = iterator
x = iterator
K.get_session().run(x.initializer)
# Validates `steps` argument based on x's type.
if check_steps:
training_utils.check_steps_argument(x, steps, steps_name)
is_x_eager_iterator = isinstance(x, iterator_ops.EagerIterator)
is_x_iterator = isinstance(x, iterator_ops.Iterator)
# Validate user inputs when data is given as a dataset or dataset iterator.
if is_x_iterator or is_x_eager_iterator:
training_utils.validate_iterator_input(x, y, sample_weight,
validation_split)
# For eager iterators, when we have to process multiple batches of samples,
# we will standardize the data when we actually loop over iterator and get
# the batches. For now, we just return the iterator as is.
if is_x_eager_iterator and steps is not None:
return x, y, sample_weight
# If input data is a dataset iterator in graph mode or if it is an eager
# iterator and only one batch of samples is required, we fetch the data
# tensors from the iterator and then standardize them.
if is_x_iterator or is_x_eager_iterator:
try:
if is_x_iterator:
next_element = self._get_iterator_get_next_tensors(x)
else:
next_element = x.get_next()
except errors.OutOfRangeError:
raise RuntimeError('Your dataset iterator ran out of data; '
'Make sure that your dataset can generate '
'required number of samples.')
if not isinstance(next_element, (list, tuple)) or len(next_element) != 2:
raise ValueError('Please provide data as a list or tuple of 2 elements '
' - input and target pair. Received %s' % next_element)
x, y = next_element
x, y, sample_weights = self._standardize_weights(x, y, sample_weight,
class_weight, batch_size)
return x, y, sample_weights
def _standardize_weights(self, x, y, sample_weight=None, class_weight=None,
batch_size=None,):
# First, we build/compile the model on the fly if necessary.
all_inputs = []
is_build_called = False
is_compile_called = False
if not self.inputs:
# We need to use `x` to set the model inputs.
# We type-check that `x` and `y` are either single arrays
# or lists of arrays.
if isinstance(x, (list, tuple)):
if not all(isinstance(v, np.ndarray) or
tensor_util.is_tensor(v) for v in x):
raise ValueError('Please provide as model inputs either a single '
'array or a list of arrays. You passed: x=' + str(x))
all_inputs += list(x)
elif isinstance(x, dict):
raise ValueError('Please do not pass a dictionary as model inputs.')
else:
if not isinstance(x, np.ndarray) and not tensor_util.is_tensor(x):
raise ValueError('Please provide as model inputs either a single '
'array or a list of arrays. You passed: x=' + str(x))
all_inputs.append(x)
# Build the model using the retrieved inputs (value or symbolic).
# If values, then in symbolic-mode placeholders will be created
# to match the value shapes.
if not self.inputs:
is_build_called = True
self._set_inputs(x)
if y is not None:
if not self.optimizer:
raise RuntimeError('You must compile a model before '
'training/testing. '
'Use `model.compile(optimizer, loss)`.')
if not self._is_compiled:
# On-the-fly compilation of the model.
# We need to use `y` to set the model targets.
if isinstance(y, (list, tuple)):
if not all(isinstance(v, np.ndarray) or
tensor_util.is_tensor(v) for v in y):
raise ValueError('Please provide as model targets either a single '
'array or a list of arrays. '
'You passed: y=' + str(y))
all_inputs += list(y)
elif isinstance(y, dict):
raise ValueError('Please do not pass a dictionary as model targets.')
else:
if not isinstance(y, np.ndarray) and not tensor_util.is_tensor(y):
raise ValueError('Please provide as model targets either a single '
'array or a list of arrays. '
'You passed: y=' + str(y))
all_inputs.append(y)
# Typecheck that all inputs are *either* value *or* symbolic.
# TODO(fchollet): this check could be removed in Eager mode?
if any(tensor_util.is_tensor(v) for v in all_inputs):
if not all(tensor_util.is_tensor(v) for v in all_inputs):
raise ValueError('Do not pass inputs that mix Numpy arrays and '
'TensorFlow tensors. '
'You passed: x=' + str(x) + '; y=' + str(y))
if context.executing_eagerly():
target_tensors = None
else:
# Handle target tensors if any passed.
if not isinstance(y, (list, tuple)):
y = [y]
target_tensors = [v for v in y if tensor_util.is_tensor(v)]
is_compile_called = True
self.compile(optimizer=self.optimizer,
loss=self.loss,
metrics=self.metrics,
loss_weights=self.loss_weights,
target_tensors=target_tensors)
# In graph mode, if we had just set inputs and targets as symbolic tensors
# by invoking build and compile on the model respectively, we do not have to
# feed anything to the model. Model already has input and target data as
# part of the graph.
# Note: in this case, `any` and `all` are equivalent since we disallow
# mixed symbolic/value inputs.
if (not context.executing_eagerly() and is_build_called and
is_compile_called and
any(tensor_util.is_tensor(v) for v in all_inputs)):
return [], [], []
# What follows is input validation and standardization to list format,
# in the case where all inputs are value arrays.
if context.executing_eagerly():
# In eager mode, do not do shape validation
# since the network has no input nodes (placeholders) to be fed.
feed_input_names = self.input_names
feed_input_shapes = None
elif not self._is_graph_network:
# Case: symbolic-mode subclassed network. Do not do shape validation.
feed_input_names = self._feed_input_names
feed_input_shapes = None
else:
# Case: symbolic-mode graph network.
# In this case, we run extensive shape validation checks.
feed_input_names = self._feed_input_names
feed_input_shapes = self._feed_input_shapes
# Standardize the inputs.
x = training_utils.standardize_input_data(
x,
feed_input_names,
feed_input_shapes,
check_batch_axis=False, # Don't enforce the batch size.
exception_prefix='input')
if y is not None:
if not self._is_graph_network:
feed_output_names = self._feed_output_names
feed_output_shapes = None
# Sample weighting not supported in this case.
# TODO(fchollet): consider supporting it.
feed_sample_weight_modes = [None for _ in self.outputs]
else:
feed_output_names = self._feed_output_names
feed_sample_weight_modes = self._feed_sample_weight_modes
feed_output_shapes = []
for output_shape, loss_fn in zip(self._feed_output_shapes,
self._feed_loss_fns):
if loss_fn is losses.sparse_categorical_crossentropy:
if K.image_data_format() == 'channels_first':
feed_output_shapes.append(
(output_shape[0], 1) + output_shape[2:])
else:
feed_output_shapes.append(output_shape[:-1] + (1,))
elif (not hasattr(loss_fn, '__name__') or
getattr(losses, loss_fn.__name__, None) is None):
# If `loss_fn` is not a function (e.g. callable class)
# or if it not in the `losses` module, then
# it is a user-defined loss and we make no assumptions
# about it.
feed_output_shapes.append(None)
else:
feed_output_shapes.append(output_shape)
# Standardize the outputs.
y = training_utils.standardize_input_data(
y,
feed_output_names,
feed_output_shapes,
check_batch_axis=False, # Don't enforce the batch size.
exception_prefix='target')
# Generate sample-wise weight values given the `sample_weight` and
# `class_weight` arguments.
sample_weights = training_utils.standardize_sample_weights(
sample_weight, feed_output_names)
class_weights = training_utils.standardize_class_weights(
class_weight, feed_output_names)
sample_weights = [
training_utils.standardize_weights(ref, sw, cw, mode)
for (ref, sw, cw, mode) in zip(y, sample_weights, class_weights,
feed_sample_weight_modes)
]
# Check that all arrays have the same length.
if not self._distribution_strategy:
training_utils.check_array_lengths(x, y, sample_weights)
if self._is_graph_network and not context.executing_eagerly():
# Additional checks to avoid users mistakenly using improper loss fns.
training_utils.check_loss_and_target_compatibility(
y, self._feed_loss_fns, feed_output_shapes)
else:
y = []
sample_weights = []
if self.stateful and batch_size:
# Check that for stateful networks, number of samples is a multiple
# of the static batch size.
if x[0].shape[0] % batch_size != 0:
raise ValueError('In a stateful network, '
'you should only pass inputs with '
'a number of samples that can be '
'divided by the batch size. Found: ' +
str(x[0].shape[0]) + ' samples')
return x, y, sample_weights
@checkpointable.no_automatic_dependency_tracking
def _set_inputs(self, inputs, training=None):
"""Set model's input and output specs based on the input data received.
This is to be used for Model subclasses, which do not know at instantiation
time what their inputs look like.
Args:
inputs: Single array, or list of arrays. The arrays could be placeholders,
Numpy arrays, or data tensors.
- if placeholders: the model is built on top of these placeholders,
and we expect Numpy data to be fed for them when calling `fit`/etc.
- if Numpy data: we create placeholders matching the shape of the Numpy
arrays. We expect Numpy data to be fed for these placeholders
when calling `fit`/etc.
- if data tensors: the model is built on top of these tensors.
We do not expect any Numpy data to be provided when calling `fit`/etc.
training: Boolean or None. Only relevant in symbolic mode. Specifies
whether to build the model's graph in inference mode (False), training
mode (True), or using the Keras learning phase (None).
"""
call_convention = getattr(
self,
'_call_convention',
base_layer.CallConvention.EXPLICIT_INPUTS_ARGUMENT)
if call_convention not in (
base_layer.CallConvention.EXPLICIT_INPUTS_ARGUMENT,
base_layer.CallConvention.SINGLE_POSITIONAL_ARGUMENT):
raise NotImplementedError(
'Subclassed Models without "inputs" (or single positional arguments) '
'in their call() signatures do not yet support shape inference. File '
'a feature request if this limitation bothers you.')
if self.__class__.__name__ == 'Sequential':
if tensor_util.is_tensor(inputs):
input_shape = (None,) + tuple(inputs.get_shape().as_list()[1:])
self.build(input_shape=input_shape)
else:
input_shape = (None,) + inputs.shape[1:]
self.build(input_shape=input_shape)
if context.executing_eagerly():
self._eager_set_inputs(inputs)
else:
self._symbolic_set_inputs(inputs, training=training)
@checkpointable.no_automatic_dependency_tracking
def _eager_set_inputs(self, inputs):
"""Set model's input and output specs based on the input data received.
This is to be used for Model subclasses, which do not know at instantiation
time what their inputs look like.
We assume the number and ndim of outputs
does not change over different calls.
Args:
inputs: Argument `x` (input data) passed by the user upon first model use.
Raises:
ValueError: If the model's inputs are already set.
"""
assert context.executing_eagerly()
if self.inputs:
raise ValueError('Model inputs are already set.')
# On-the-fly setting of model inputs/outputs as DeferredTensors,
# to keep track of number of inputs and outputs and their ndim.
if isinstance(inputs, (list, tuple)):
if tensor_util.is_tensor(inputs[0]):
dummy_output_values = self.call(
training_utils.cast_if_floating_dtype(inputs))
else:
dummy_output_values = self.call(
[ops.convert_to_tensor(v, dtype=K.floatx()) for v in inputs])
dummy_input_values = list(inputs)
else:
if tensor_util.is_tensor(inputs):
dummy_output_values = self.call(
training_utils.cast_if_floating_dtype(inputs))
else:
dummy_output_values = self.call(
ops.convert_to_tensor(inputs, dtype=K.floatx()))
dummy_input_values = [inputs]
if isinstance(dummy_output_values, (list, tuple)):
dummy_output_values = list(dummy_output_values)
else:
dummy_output_values = [dummy_output_values]
self.outputs = [
base_layer.DeferredTensor(shape=(None for _ in v.shape),
dtype=v.dtype) for v in dummy_output_values]
self.inputs = [
base_layer.DeferredTensor(shape=(None for _ in v.shape),
dtype=v.dtype) for v in dummy_input_values]
self.input_names = [
'input_%d' % (i + 1) for i in range(len(dummy_input_values))]
self.output_names = [
'output_%d' % (i + 1) for i in range(len(dummy_output_values))]
self.built = True
@checkpointable.no_automatic_dependency_tracking
def _symbolic_set_inputs(self, inputs, outputs=None, training=None):
"""Set model's inputs and output specs based.
This is to be used for Model subclasses, which do not know at instantiation
time what their inputs look like.
Args:
inputs: Argument `x` (input data) passed by the user upon first model use.
outputs: None, a data tensor, or a list of data tensors. If None, the
outputs will be determined by invoking self.call(), otherwise the
provided value will be used.
training: Boolean or None. Only relevant in symbolic mode. Specifies
whether to build the model's graph in inference mode (False), training
mode (True), or using the Keras learning phase (None).
Raises:
ValueError: If the model's inputs are already set.
"""
assert not context.executing_eagerly()
if self.inputs:
raise ValueError('Model inputs are already set.')
# On-the-fly setting of symbolic model inputs (either by using the tensor
# provided, or by creating a placeholder if Numpy data was provided).
self.inputs = []
self.input_names = []
self._feed_inputs = []
self._feed_input_names = []
self._feed_input_shapes = []
if isinstance(inputs, (list, tuple)):
inputs = list(inputs)
else:
inputs = [inputs]
for i, v in enumerate(inputs):
name = 'input_%d' % (i + 1)
self.input_names.append(name)
if isinstance(v, list):
v = np.asarray(v)
if v.ndim == 1:
v = np.expand_dims(v, 1)
if isinstance(v, (np.ndarray)):
# We fix the placeholder shape except the batch size.
# This is suboptimal, but it is the best we can do with the info
# we have. The user should call `model._set_inputs(placeholders)`
# to specify custom placeholders if the need arises.
shape = (None,) + v.shape[1:]
placeholder = K.placeholder(shape=shape, name=name)
self.inputs.append(placeholder)
self._feed_inputs.append(placeholder)
self._feed_input_names.append(name)
self._feed_input_shapes.append(shape)
else:
# Assumed tensor - TODO(fchollet) additional type check?
self.inputs.append(v)
if K.is_placeholder(v):
self._feed_inputs.append(v)
self._feed_input_names.append(name)
self._feed_input_shapes.append(K.int_shape(v))
if outputs is None:
# Obtain symbolic outputs by calling the model.
if len(self.inputs) == 1:
if self._expects_training_arg:
outputs = self.call(self.inputs[0], training=training)
else:
outputs = self.call(self.inputs[0])
else:
if self._expects_training_arg:
outputs = self.call(self.inputs, training=training)
else:
outputs = self.call(self.inputs)
if isinstance(outputs, (list, tuple)):
outputs = list(outputs)
else:
outputs = [outputs]
self.outputs = outputs
self.output_names = [
'output_%d' % (i + 1) for i in range(len(self.outputs))]
self.built = True
def fit(self,
x=None,
y=None,
batch_size=None,
epochs=1,
verbose=1,
callbacks=None,
validation_split=0.,
validation_data=None,
shuffle=True,
class_weight=None,
sample_weight=None,
initial_epoch=0,
steps_per_epoch=None,
validation_steps=None,
**kwargs):
"""Trains the model for a fixed number of epochs (iterations on a dataset).
Arguments:
x: Input data. It could be:
- A Numpy array (or array-like), or a list of arrays
(in case the model has multiple inputs).
- A TensorFlow tensor, or a list of tensors
(in case the model has multiple inputs).
- A dict mapping input names to the corresponding array/tensors,
if the model has named inputs.
- A `tf.data` dataset or a dataset iterator.
y: Target data. Like the input data `x`,
it could be either Numpy array(s) or TensorFlow tensor(s).
It should be consistent with `x` (you cannot have Numpy inputs and
tensor targets, or inversely). If `x` is a dataset or dataset
iterator, `y` should not be specified
(since targets will be obtained from the iterator).
batch_size: Integer or `None`.
Number of samples per gradient update.
If unspecified, `batch_size` will default to 32.
Do not specify the `batch_size` if your data is in the
form of symbolic tensors, datasets, or dataset iterators
(since they generate batches).
epochs: Integer. Number of epochs to train the model.
An epoch is an iteration over the entire `x` and `y`
data provided.
Note that in conjunction with `initial_epoch`,
`epochs` is to be understood as "final epoch".
The model is not trained for a number of iterations
given by `epochs`, but merely until the epoch
of index `epochs` is reached.
verbose: Integer. 0, 1, or 2. Verbosity mode.
0 = silent, 1 = progress bar, 2 = one line per epoch.
callbacks: List of `keras.callbacks.Callback` instances.
List of callbacks to apply during training.
See [callbacks](/api_docs/python/tf/keras/callbacks).
validation_split: Float between 0 and 1.
Fraction of the training data to be used as validation data.
The model will set apart this fraction of the training data,
will not train on it, and will evaluate
the loss and any model metrics
on this data at the end of each epoch.
The validation data is selected from the last samples
in the `x` and `y` data provided, before shuffling. This argument is
not supported when `x` is a dataset or a dataset iterator.
validation_data: Data on which to evaluate
the loss and any model metrics at the end of each epoch.
The model will not be trained on this data.
`validation_data` will override `validation_split`.
`validation_data` could be:
- tuple `(x_val, y_val)` of Numpy arrays or tensors
- tuple `(x_val, y_val, val_sample_weights)` of Numpy arrays
- dataset or a dataset iterator
shuffle: Boolean (whether to shuffle the training data
before each epoch) or str (for 'batch').
'batch' is a special option for dealing with the
limitations of HDF5 data; it shuffles in batch-sized chunks.
Has no effect when `steps_per_epoch` is not `None`.
class_weight: Optional dictionary mapping class indices (integers)
to a weight (float) value, used for weighting the loss function
(during training only).
This can be useful to tell the model to
"pay more attention" to samples from
an under-represented class.
sample_weight: Optional Numpy array of weights for
the training samples, used for weighting the loss function
(during training only). You can either pass a flat (1D)
Numpy array with the same length as the input samples
(1:1 mapping between weights and samples),
or in the case of temporal data,
you can pass a 2D array with shape
`(samples, sequence_length)`,
to apply a different weight to every timestep of every sample.
In this case you should make sure to specify
`sample_weight_mode="temporal"` in `compile()`. This argument is not
supported when `x` is a dataset or a dataset iterator.
initial_epoch: Integer.
Epoch at which to start training
(useful for resuming a previous training run).
steps_per_epoch: Integer or `None`.
Total number of steps (batches of samples)
before declaring one epoch finished and starting the
next epoch. When training with input tensors such as
TensorFlow data tensors, the default `None` is equal to
the number of samples in your dataset divided by
the batch size, or 1 if that cannot be determined.
validation_steps: Only relevant if `steps_per_epoch`
is specified. Total number of steps (batches of samples)
to validate before stopping.
**kwargs: Used for backwards compatibility.
Returns:
A `History` object. Its `History.history` attribute is
a record of training loss values and metrics values
at successive epochs, as well as validation loss values
and validation metrics values (if applicable).
Raises:
RuntimeError: If the model was never compiled.
ValueError: In case of mismatch between the provided input data
and what the model expects.
"""
# TODO(fchollet): this method may be creating reference cycles, which would
# lead to accumulating garbage in memory when called in a loop. Investigate.
# Backwards compatibility
if batch_size is None and steps_per_epoch is None:
batch_size = 32
# Legacy support
if 'nb_epoch' in kwargs:
logging.warning(
'The `nb_epoch` argument in `fit` '
'has been renamed `epochs`.')
epochs = kwargs.pop('nb_epoch')
if kwargs:
raise TypeError('Unrecognized keyword arguments: ' + str(kwargs))
# Validate and standardize user data.
if self._distribution_strategy:
distributed_training_utils.validate_callbacks(callbacks)
x, y, sample_weights = self._standardize_user_data(
x,
y,
sample_weight=sample_weight,
class_weight=class_weight,
batch_size=batch_size,
check_steps=True,
steps_name='steps_per_epoch',
steps=steps_per_epoch,
validation_split=validation_split)
# Prepare validation data.
if validation_data:
if (isinstance(validation_data, iterator_ops.Iterator) or
isinstance(validation_data, iterator_ops.EagerIterator) or
isinstance(validation_data, dataset_ops.Dataset)):
val_x = validation_data
val_y = None
val_sample_weight = None
elif len(validation_data) == 2:
val_x, val_y = validation_data # pylint: disable=unpacking-non-sequence
val_sample_weight = None
elif len(validation_data) == 3:
val_x, val_y, val_sample_weight = validation_data # pylint: disable=unpacking-non-sequence
else:
raise ValueError(
'When passing a `validation_data` argument, '
'it must contain either 2 items (x_val, y_val), '
'or 3 items (x_val, y_val, val_sample_weights), '
'or alternatively it could be a dataset or a '
'dataset or a dataset iterator. '
'However we received `validation_data=%s`' % validation_data)
# Validate and standardize validation data.
val_x, val_y, val_sample_weights = self._standardize_user_data(
val_x,
val_y,
sample_weight=val_sample_weight,
batch_size=batch_size,
steps=validation_steps)
elif validation_split and 0. < validation_split < 1.:
if training_utils.has_symbolic_tensors(x):
raise ValueError('If your data is in the form of symbolic tensors, '
'you cannot use `validation_split`.')
if hasattr(x[0], 'shape'):
split_at = int(x[0].shape[0] * (1. - validation_split))
else:
split_at = int(len(x[0]) * (1. - validation_split))
x, val_x = (slice_arrays(x, 0, split_at), slice_arrays(x, split_at))
y, val_y = (slice_arrays(y, 0, split_at), slice_arrays(y, split_at))
sample_weights, val_sample_weights = (slice_arrays(
sample_weights, 0, split_at), slice_arrays(sample_weights, split_at))
elif validation_steps:
val_x = []
val_y = []
val_sample_weights = []
else:
val_x = None
val_y = None
val_sample_weights = None
if context.executing_eagerly():
return training_eager.fit_loop(
self,
inputs=x,
targets=y,
sample_weights=sample_weights,
class_weight=class_weight,
batch_size=batch_size,
epochs=epochs,
verbose=verbose,
callbacks=callbacks,
val_inputs=val_x,
val_targets=val_y,
val_sample_weights=val_sample_weights,
shuffle=shuffle,
initial_epoch=initial_epoch,
steps_per_epoch=steps_per_epoch,
validation_steps=validation_steps)
elif self._distribution_strategy:
return training_distributed.fit_loop(
self, x, y,
epochs=epochs,
verbose=verbose,
callbacks=callbacks,
val_inputs=val_x,
val_targets=val_y,
initial_epoch=initial_epoch,
steps_per_epoch=steps_per_epoch,
validation_steps=validation_steps)
else:
return training_arrays.fit_loop(
self, x, y,
sample_weights=sample_weights,
batch_size=batch_size,
epochs=epochs,
verbose=verbose,
callbacks=callbacks,
val_inputs=val_x,
val_targets=val_y,
val_sample_weights=val_sample_weights,
shuffle=shuffle,
initial_epoch=initial_epoch,
steps_per_epoch=steps_per_epoch,
validation_steps=validation_steps)
def evaluate(self,
x=None,
y=None,
batch_size=None,
verbose=1,
sample_weight=None,
steps=None):
"""Returns the loss value & metrics values for the model in test mode.
Computation is done in batches.
Arguments:
x: Input data. It could be:
- A Numpy array (or array-like), or a list of arrays
(in case the model has multiple inputs).
- A TensorFlow tensor, or a list of tensors
(in case the model has multiple inputs).
- A dict mapping input names to the corresponding array/tensors,
if the model has named inputs.
- A `tf.data` dataset or a dataset iterator.
y: Target data. Like the input data `x`,
it could be either Numpy array(s) or TensorFlow tensor(s).
It should be consistent with `x` (you cannot have Numpy inputs and
tensor targets, or inversely).
If `x` is a dataset or a dataset iterator, `y` should not be specified
(since targets will be obtained from the iterator/dataset).
batch_size: Integer or `None`.
Number of samples per gradient update.
If unspecified, `batch_size` will default to 32.
Do not specify the `batch_size` is your data is in the
form of symbolic tensors, datasets, or dataset iterators
(since they generate batches).
verbose: 0 or 1. Verbosity mode.
0 = silent, 1 = progress bar.
sample_weight: Optional Numpy array of weights for
the test samples, used for weighting the loss function.
You can either pass a flat (1D)
Numpy array with the same length as the input samples
(1:1 mapping between weights and samples),
or in the case of temporal data,
you can pass a 2D array with shape
`(samples, sequence_length)`,
to apply a different weight to every timestep of every sample.
In this case you should make sure to specify
`sample_weight_mode="temporal"` in `compile()`. This argument is not
supported when `x` is a dataset or a dataset iterator.
steps: Integer or `None`.
Total number of steps (batches of samples)
before declaring the evaluation round finished.
Ignored with the default value of `None`.
Returns:
Scalar test loss (if the model has a single output and no metrics)
or list of scalars (if the model has multiple outputs
and/or metrics). The attribute `model.metrics_names` will give you
the display labels for the scalar outputs.
Raises:
ValueError: in case of invalid arguments.
"""
# Backwards compatibility.
if batch_size is None and steps is None:
batch_size = 32
# Validate and standardize user data.
x, y, sample_weights = self._standardize_user_data(
x,
y,
sample_weight=sample_weight,
batch_size=batch_size,
check_steps=True,
steps_name='steps',
steps=steps)
if context.executing_eagerly():
return training_eager.test_loop(
self,
inputs=x,
targets=y,
sample_weights=sample_weights,
batch_size=batch_size,
verbose=verbose,
steps=steps)
elif self._distribution_strategy:
return training_distributed.test_loop(
self,
inputs=x,
targets=y,
verbose=verbose,
steps=steps)
else:
return training_arrays.test_loop(
self,
inputs=x,
targets=y,
sample_weights=sample_weights,
batch_size=batch_size,
verbose=verbose,
steps=steps)
def predict(self, x, batch_size=None, verbose=0, steps=None):
"""Generates output predictions for the input samples.
Computation is done in batches.
Arguments:
x: Input samples. It could be:
- A Numpy array (or array-like), or a list of arrays
(in case the model has multiple inputs).
- A TensorFlow tensor, or a list of tensors
(in case the model has multiple inputs).
- A `tf.data` dataset or a dataset iterator.
batch_size: Integer or `None`.
Number of samples per gradient update.
If unspecified, `batch_size` will default to 32.
Do not specify the `batch_size` is your data is in the
form of symbolic tensors, dataset, or dataset iterators
(since they generate batches).
verbose: Verbosity mode, 0 or 1.
steps: Total number of steps (batches of samples)
before declaring the prediction round finished.
Ignored with the default value of `None`.
Returns:
Numpy array(s) of predictions.
Raises:
ValueError: In case of mismatch between the provided
input data and the model's expectations,
or in case a stateful model receives a number of samples
that is not a multiple of the batch size.
"""
# Backwards compatibility.
if batch_size is None and steps is None:
batch_size = 32
# Validate and standardize user data.
x, _, _ = self._standardize_user_data(
x, check_steps=True, steps_name='steps', steps=steps)
if context.executing_eagerly():
return training_eager.predict_loop(
self, x, batch_size=batch_size, verbose=verbose, steps=steps)
elif self._distribution_strategy:
return training_distributed.predict_loop(
self, x, verbose=verbose, steps=steps)
else:
return training_arrays.predict_loop(
self, x, batch_size=batch_size, verbose=verbose, steps=steps)
def train_on_batch(self, x, y=None, sample_weight=None, class_weight=None):
"""Runs a single gradient update on a single batch of data.
Arguments:
x: Input data. It could be:
- A Numpy array (or array-like), or a list of arrays
(in case the model has multiple inputs).
- A TensorFlow tensor, or a list of tensors
(in case the model has multiple inputs).
- A dict mapping input names to the corresponding array/tensors,
if the model has named inputs.
- A `tf.data` dataset or a dataset iterator.
y: Target data. Like the input data `x`,
it could be either Numpy array(s) or TensorFlow tensor(s).
It should be consistent with `x` (you cannot have Numpy inputs and
tensor targets, or inversely). If `x` is a dataset or a
dataset iterator, `y` should not be specified
(since targets will be obtained from the iterator).
sample_weight: Optional array of the same length as x, containing
weights to apply to the model's loss for each sample.
In the case of temporal data, you can pass a 2D array
with shape (samples, sequence_length),
to apply a different weight to every timestep of every sample.
In this case you should make sure to specify
sample_weight_mode="temporal" in compile(). This argument is not
supported when `x` is a dataset or a dataset iterator.
class_weight: Optional dictionary mapping
class indices (integers) to
a weight (float) to apply to the model's loss for the samples
from this class during training.
This can be useful to tell the model to "pay more attention" to
samples from an under-represented class.
Returns:
Scalar training loss
(if the model has a single output and no metrics)
or list of scalars (if the model has multiple outputs
and/or metrics). The attribute `model.metrics_names` will give you
the display labels for the scalar outputs.
Raises:
ValueError: In case of invalid user-provided arguments.
"""
if self._distribution_strategy:
raise NotImplementedError('`train_on_batch` is not supported for models '
'compiled with DistributionStrategy.')
# Validate and standardize user data.
x, y, sample_weights = self._standardize_user_data(
x, y, sample_weight=sample_weight, class_weight=class_weight)
if context.executing_eagerly():
outputs = training_eager.train_on_batch(
self, x, y, sample_weights=sample_weights)
else:
if self.uses_learning_phase and not isinstance(K.learning_phase(), int):
ins = x + y + sample_weights + [1]
else:
ins = x + y + sample_weights
self._make_train_function()
outputs = self.train_function(ins)
if len(outputs) == 1:
return outputs[0]
return outputs
def test_on_batch(self, x, y=None, sample_weight=None):
"""Test the model on a single batch of samples.
Arguments:
x: Input data. It could be:
- A Numpy array (or array-like), or a list of arrays
(in case the model has multiple inputs).
- A TensorFlow tensor, or a list of tensors
(in case the model has multiple inputs).
- A dict mapping input names to the corresponding array/tensors,
if the model has named inputs.
- A `tf.data` dataset or a dataset iterator.
y: Target data. Like the input data `x`,
it could be either Numpy array(s) or TensorFlow tensor(s).
It should be consistent with `x` (you cannot have Numpy inputs and
tensor targets, or inversely). If `x` is a dataset or a
dataset iterator, `y` should not be specified
(since targets will be obtained from the iterator).
sample_weight: Optional array of the same length as x, containing
weights to apply to the model's loss for each sample.
In the case of temporal data, you can pass a 2D array
with shape (samples, sequence_length),
to apply a different weight to every timestep of every sample.
In this case you should make sure to specify
sample_weight_mode="temporal" in compile(). This argument is not
supported when `x` is a dataset or a dataset iterator.
Returns:
Scalar test loss (if the model has a single output and no metrics)
or list of scalars (if the model has multiple outputs
and/or metrics). The attribute `model.metrics_names` will give you
the display labels for the scalar outputs.
Raises:
ValueError: In case of invalid user-provided arguments.
"""
if self._distribution_strategy:
raise NotImplementedError('`test_on_batch` is not supported for models '
'compiled with DistributionStrategy.')
# Validate and standardize user data.
x, y, sample_weights = self._standardize_user_data(
x, y, sample_weight=sample_weight)
if context.executing_eagerly():
outputs = training_eager.test_on_batch(
self, x, y, sample_weights=sample_weights)
else:
if self.uses_learning_phase and not isinstance(K.learning_phase(), int):
ins = x + y + sample_weights + [0]
else:
ins = x + y + sample_weights
self._make_test_function()
outputs = self.test_function(ins)
if len(outputs) == 1:
return outputs[0]
return outputs
def predict_on_batch(self, x):
"""Returns predictions for a single batch of samples.
Arguments:
x: Input data. It could be:
- A Numpy array (or array-like), or a list of arrays
(in case the model has multiple inputs).
- A TensorFlow tensor, or a list of tensors
(in case the model has multiple inputs).
- A `tf.data` dataset or a dataset iterator.
Returns:
Numpy array(s) of predictions.
Raises:
ValueError: In case of mismatch between given number of inputs and
expectations of the model.
"""
if self._distribution_strategy:
raise NotImplementedError('`predict_on_batch` is not supported for '
'models compiled with DistributionStrategy.')
# Validate and standardize user data.
inputs, _, _ = self._standardize_user_data(x)
if context.executing_eagerly():
if (isinstance(x, iterator_ops.EagerIterator) or
(isinstance(x, dataset_ops.Dataset) and context.executing_eagerly())):
inputs = training_utils.cast_if_floating_dtype(inputs)
else:
inputs = [
ops.convert_to_tensor(val, dtype=K.floatx()) for val in inputs
]
return self(inputs) # pylint: disable=not-callable
if not context.executing_eagerly():
if self.uses_learning_phase and not isinstance(K.learning_phase(), int):
ins = inputs + [0]
else:
ins = inputs
self._make_predict_function()
outputs = self.predict_function(ins)
if len(outputs) == 1:
return outputs[0]
return outputs
def fit_generator(self,
generator,
steps_per_epoch=None,
epochs=1,
verbose=1,
callbacks=None,
validation_data=None,
validation_steps=None,
class_weight=None,
max_queue_size=10,
workers=1,
use_multiprocessing=False,
shuffle=True,
initial_epoch=0):
"""Fits the model on data yielded batch-by-batch by a Python generator.
The generator is run in parallel to the model, for efficiency.
For instance, this allows you to do real-time data augmentation
on images on CPU in parallel to training your model on GPU.
The use of `keras.utils.Sequence` guarantees the ordering
and guarantees the single use of every input per epoch when
using `use_multiprocessing=True`.
Arguments:
generator: A generator or an instance of `Sequence`
(`keras.utils.Sequence`)
object in order to avoid duplicate data
when using multiprocessing.
The output of the generator must be either
- a tuple `(inputs, targets)`
- a tuple `(inputs, targets, sample_weights)`.
This tuple (a single output of the generator) makes a single batch.
Therefore, all arrays in this tuple must have the same length (equal
to the size of this batch). Different batches may have different
sizes.
For example, the last batch of the epoch is commonly smaller than
the
others, if the size of the dataset is not divisible by the batch
size.
The generator is expected to loop over its data
indefinitely. An epoch finishes when `steps_per_epoch`
batches have been seen by the model.
steps_per_epoch: Total number of steps (batches of samples)
to yield from `generator` before declaring one epoch
finished and starting the next epoch. It should typically
be equal to the number of samples of your dataset
divided by the batch size.
Optional for `Sequence`: if unspecified, will use
the `len(generator)` as a number of steps.
epochs: Integer, total number of iterations on the data.
verbose: Verbosity mode, 0, 1, or 2.
callbacks: List of callbacks to be called during training.
validation_data: This can be either
- a generator for the validation data
- a tuple (inputs, targets)
- a tuple (inputs, targets, sample_weights).
validation_steps: Only relevant if `validation_data`
is a generator. Total number of steps (batches of samples)
to yield from `generator` before stopping.
Optional for `Sequence`: if unspecified, will use
the `len(validation_data)` as a number of steps.
class_weight: Dictionary mapping class indices to a weight
for the class.
max_queue_size: Integer. Maximum size for the generator queue.
If unspecified, `max_queue_size` will default to 10.
workers: Integer. Maximum number of processes to spin up
when using process-based threading.
If unspecified, `workers` will default to 1. If 0, will
execute the generator on the main thread.
use_multiprocessing: Boolean.
If `True`, use process-based threading.
If unspecified, `use_multiprocessing` will default to `False`.
Note that because this implementation relies on multiprocessing,
you should not pass non-picklable arguments to the generator
as they can't be passed easily to children processes.
shuffle: Boolean. Whether to shuffle the order of the batches at
the beginning of each epoch. Only used with instances
of `Sequence` (`keras.utils.Sequence`).
Has no effect when `steps_per_epoch` is not `None`.
initial_epoch: Epoch at which to start training
(useful for resuming a previous training run)
Returns:
A `History` object.
Example:
```python
def generate_arrays_from_file(path):
while 1:
f = open(path)
for line in f:
# create numpy arrays of input data
# and labels, from each line in the file
x1, x2, y = process_line(line)
yield ({'input_1': x1, 'input_2': x2}, {'output': y})
f.close()
model.fit_generator(generate_arrays_from_file('/my_file.txt'),
steps_per_epoch=10000, epochs=10)
```
Raises:
ValueError: In case the generator yields data in an invalid format.
"""
if self._distribution_strategy:
raise NotImplementedError('`fit_generator` is not supported for '
'models compiled with DistributionStrategy.')
if not self.built and not self._is_graph_network:
raise NotImplementedError(
'`fit_generator` is not yet enabled for unbuilt Model subclasses')
return training_generator.fit_generator(
self,
generator,
steps_per_epoch=steps_per_epoch,
epochs=epochs,
verbose=verbose,
callbacks=callbacks,
validation_data=validation_data,
validation_steps=validation_steps,
class_weight=class_weight,
max_queue_size=max_queue_size,
workers=workers,
use_multiprocessing=use_multiprocessing,
shuffle=shuffle,
initial_epoch=initial_epoch)
def evaluate_generator(self,
generator,
steps=None,
max_queue_size=10,
workers=1,
use_multiprocessing=False,
verbose=0):
"""Evaluates the model on a data generator.
The generator should return the same kind of data
as accepted by `test_on_batch`.
Arguments:
generator: Generator yielding tuples (inputs, targets)
or (inputs, targets, sample_weights)
or an instance of Sequence (keras.utils.Sequence)
object in order to avoid duplicate data
when using multiprocessing.
steps: Total number of steps (batches of samples)
to yield from `generator` before stopping.
Optional for `Sequence`: if unspecified, will use
the `len(generator)` as a number of steps.
max_queue_size: maximum size for the generator queue
workers: Integer. Maximum number of processes to spin up
when using process-based threading.
If unspecified, `workers` will default to 1. If 0, will
execute the generator on the main thread.
use_multiprocessing: Boolean.
If `True`, use process-based threading.
If unspecified, `use_multiprocessing` will default to `False`.
Note that because this implementation relies on multiprocessing,
you should not pass non-picklable arguments to the generator
as they can't be passed easily to children processes.
verbose: Verbosity mode, 0 or 1.
Returns:
Scalar test loss (if the model has a single output and no metrics)
or list of scalars (if the model has multiple outputs
and/or metrics). The attribute `model.metrics_names` will give you
the display labels for the scalar outputs.
Raises:
ValueError: in case of invalid arguments.
Raises:
ValueError: In case the generator yields data in an invalid format.
"""
if self._distribution_strategy:
raise NotImplementedError('`evaluate_generator` is not supported for '
'models compiled with DistributionStrategy.')
if not self.built and not self._is_graph_network:
raise NotImplementedError(
'`evaluate_generator` is not yet enabled for '
'unbuilt Model subclasses')
return training_generator.evaluate_generator(
self,
generator,
steps=steps,
max_queue_size=max_queue_size,
workers=workers,
use_multiprocessing=use_multiprocessing,
verbose=verbose)
def predict_generator(self,
generator,
steps=None,
max_queue_size=10,
workers=1,
use_multiprocessing=False,
verbose=0):
"""Generates predictions for the input samples from a data generator.
The generator should return the same kind of data as accepted by
`predict_on_batch`.
Arguments:
generator: Generator yielding batches of input samples
or an instance of Sequence (keras.utils.Sequence)
object in order to avoid duplicate data
when using multiprocessing.
steps: Total number of steps (batches of samples)
to yield from `generator` before stopping.
Optional for `Sequence`: if unspecified, will use
the `len(generator)` as a number of steps.
max_queue_size: Maximum size for the generator queue.
workers: Integer. Maximum number of processes to spin up
when using process-based threading.
If unspecified, `workers` will default to 1. If 0, will
execute the generator on the main thread.
use_multiprocessing: Boolean.
If `True`, use process-based threading.
If unspecified, `use_multiprocessing` will default to `False`.
Note that because this implementation relies on multiprocessing,
you should not pass non-picklable arguments to the generator
as they can't be passed easily to children processes.
verbose: verbosity mode, 0 or 1.
Returns:
Numpy array(s) of predictions.
Raises:
ValueError: In case the generator yields data in an invalid format.
"""
if self._distribution_strategy:
raise NotImplementedError('`predict_generator` is not supported for '
'models compiled with DistributionStrategy.')
if not self.built and not self._is_graph_network:
raise NotImplementedError(
'`predict_generator` is not yet enabled for unbuilt Model subclasses')
return training_generator.predict_generator(
self,
generator,
steps=steps,
max_queue_size=max_queue_size,
workers=workers,
use_multiprocessing=use_multiprocessing,
verbose=verbose)
def _get_callback_model(self):
"""Returns the Callback Model for this Model."""
if hasattr(self, '_replicated_model') and self._replicated_model:
# When using training_distributed, we set the callback model
# to an instance of the `DistributedModel` that we create in
# the `compile` call. The `DistributedModel` is initialized
# with the first replicated model. We need to set the callback
# model to a DistributedModel to allow us to override saving
# and loading weights when we checkpoint the model during training.
return self._replicated_model
if hasattr(self, 'callback_model') and self.callback_model:
return self.callback_model
return self
class DistributedCallbackModel(Model):
"""Model that is used for callbacks with DistributionStrategy."""
def __init__(self, model):
super(DistributedCallbackModel, self).__init__()
# TODO(anjalisridhar): Right now the only attributes set are the layer and
# weights. We may need to set additional attributes as needed since we have
# not called compile on this model.
def set_original_model(self, orig_model):
self._original_model = orig_model
def save_weights(self, filepath, overwrite=True, save_format=None):
self._replicated_model.save_weights(filepath, overwrite=overwrite,
save_format=save_format)
def save(self, filepath, overwrite=True, include_optimizer=True):
# save weights from the distributed model to the original model
distributed_model_weights = self.get_weights()
self._original_model.set_weights(distributed_model_weights)
# TODO(anjalisridhar): Do we need to save the original model here?
# Saving the first replicated model works as well.
self._original_model.save(filepath, overwrite=True, include_optimizer=False)
def load_weights(self, filepath, by_name=False):
self._original_model.load_weights(filepath, by_name=False)
# Copy the weights from the original model to each of the replicated models.
orig_model_weights = self._original_model.get_weights()
distributed_training_utils.set_weights(
self._original_model._distribution_strategy, self, # pylint: disable=protected-access
orig_model_weights)
def __getattr__(self, item):
# Whitelisted atttributes of the model that can be accessed by the user
# during a callback.
if item not in ['_setattr_tracking']:
logging.warning('You are accessing attribute ' + item + 'of the'
'DistributedCallbackModel that may not have been set'
'correctly.')
|
# -*- coding: utf-8 -*-
"""
pip_services3_components.auth.MemoryCredentialStore
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
Memory credential store implementation
:copyright: Conceptual Vision Consulting LLC 2018-2019, see AUTHORS for more details.
:license: MIT, see LICENSE for more details.
"""
from typing import Optional
from pip_services3_commons.config.ConfigParams import ConfigParams
from pip_services3_commons.config.IReconfigurable import IReconfigurable
from pip_services3_commons.data.StringValueMap import StringValueMap
from .CredentialParams import CredentialParams
from .ICredentialStore import ICredentialStore
class MemoryCredentialStore(ICredentialStore, IReconfigurable):
"""
Credential store that keeps credentials in memory.
### Configuration parameters ###
- [credential key 1]:
- ... credential parameters for key 1
- [credential key 2]:
- ... credential parameters for key N
- ...
Example:
.. code-block:: python
config = ConfigParams.from_tuples("key1.user", "jdoe",
"key1.pass", "pass123",
"key2.user", "bsmith",
"key2.pass", "mypass")
credentialStore = MemoryCredentialStore()
credentialStore.read_credentials(config)
credentialStore.lookup("123", "key1")
"""
def __init__(self, config: ConfigParams = None):
"""
Creates a new instance of the credential store.
:param config: (optional) configuration with credential parameters.
"""
self.__items: StringValueMap = StringValueMap()
if not (config is None):
self.configure(config)
def configure(self, config: ConfigParams):
"""
Configures component by passing configuration parameters.
:param config: configuration parameters to be set.
"""
self.read_credentials(config)
def read_credentials(self, config: ConfigParams):
"""
Reads credentials from configuration parameters.
Each section represents an individual CredentialParams
:param config: configuration parameters to be read
"""
self.__items.clear()
for key in config.get_keys():
value = config.get_as_nullable_string(key)
self.__items.append(CredentialParams.from_tuples([key, value]))
def store(self, correlation_id: Optional[str], key: str, credential: CredentialParams):
"""
Stores credential parameters into the store.
:param correlation_id: (optional) transaction id to trace execution through call chain.
:param key: a key to uniquely identify the credential parameters.
:param credential: a credential parameters to be stored.
"""
if not (credential is None):
self.__items.put(key, credential)
else:
self.__items.remove(key)
def lookup(self, correlation_id: Optional[str], key: str) -> CredentialParams:
"""
Lookups credential parameters by its key.
:param correlation_id: (optional) transaction id to trace execution through call chain.
:param key: a key to uniquely identify the credential.
:return: found credential parameters or None if nothing was found
"""
return self.__items.get_as_object(key)
|
###########################################################################
#
# Copyright 2019 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
###########################################################################
'''
--------------------------------------------------------------
Before running this Airflow module...
Install StarThinker in cloud composer from open source:
pip install git+https://github.com/google/starthinker
Or push local code to the cloud composer plugins directory:
source install/deploy.sh
4) Composer Menu
l) Install All
--------------------------------------------------------------
CM Report
Create a CM report from a JSON definition.
Add a an account as [account_id]@[profile_id]
Fetch the report JSON definition. Arguably could be better.
The account is automatically added to the report definition.
'''
from starthinker_airflow.factory import DAG_Factory
# Add the following credentials to your Airflow configuration.
USER_CONN_ID = "starthinker_user" # The connection to use for user authentication.
GCP_CONN_ID = "starthinker_service" # The connection to use for service authentication.
INPUTS = {
'account': '',
'body': '{}',
'delete': False,
}
TASKS = [
{
'dcm': {
'auth': 'user',
'report': {
'account': {
'field': {
'name': 'account',
'kind': 'string',
'order': 1,
'default': ''
}
},
'body': {
'field': {
'name': 'body',
'kind': 'json',
'order': 2,
'default': '{}'
}
}
},
'delete': {
'field': {
'name': 'delete',
'kind': 'boolean',
'order': 3,
'default': False
}
}
}
}
]
DAG_FACTORY = DAG_Factory('dcm', { 'tasks':TASKS }, INPUTS)
DAG_FACTORY.apply_credentails(USER_CONN_ID, GCP_CONN_ID)
DAG = DAG_FACTORY.execute()
if __name__ == "__main__":
DAG_FACTORY.print_commandline()
|
from common_fixtures import * # NOQA
from selenium import webdriver
from selenium.webdriver.phantomjs.service import Service as PhantomJSService
from requests.auth import AuthBase
# test the github auth workflow
USER_SCOPE = 'github_user'
TEAM_SCOPE = 'github_team'
ORG_SCOPE = 'github_org'
class NewService(PhantomJSService):
def __init__(self, *args, **kwargs):
super(NewService, self).__init__(*args, **kwargs)
webdriver.phantomjs.webdriver.Service = NewService
if_github = pytest.mark.skipif(not os.environ.get('API_AUTH_GITHUB'
'_CLIENT_SECRET'),
reason='API_AUTH_GITHUB'
'_CLIENT_SECRET is not set')
BASE_URL = cattle_url() + '/v1/'
URL = BASE_URL + 'schemas'
@pytest.fixture(scope='session')
def config():
needed_vars = [
'API_AUTH_GITHUB_TEST_USER',
'API_AUTH_GITHUB_TEST_PASS',
'API_AUTH_GITHUB_CLIENT_ID',
'API_AUTH_GITHUB_CLIENT_SECRET',
'API_AUTH_RANCHER_TEST_PASS',
]
for a in needed_vars:
if os.getenv(a, None) is None:
raise Exception('Please set ' + a + ' in the environment')
config = {}
config['username'] = os.getenv('API_AUTH_GITHUB_TEST_USER', None)
config['password'] = os.getenv('API_AUTH_GITHUB_TEST_PASS', None)
config['phantomjs_port'] = int(os.getenv('PHANTOMJS_WEBDRIVER_PORT', 4444))
config['phantomjs_bin'] = os.getenv('PHANTOMJS_BIN',
'/usr/local/bin/phantomjs')
assert config['phantomjs_bin'] is not None
config['client_id'] = os.getenv('API_AUTH_GITHUB_CLIENT_ID', None)
config['client_secret'] = os.getenv('API_AUTH_GITHUB_CLIENT_SECRET', None)
config['users'] = {}
config['users']['1'] = {
'password': os.getenv('API_AUTH_RANCHER_TEST_PASS', None),
'username': os.getenv('API_AUTH_RANCHER_TEST_USER_1', 'ranchertest01')
}
config['users']['2'] = {
'password': os.getenv('API_AUTH_RANCHER_TEST_PASS_2', None),
'username': os.getenv('API_AUTH_RANCHER_TEST_USER_2', 'ranchertest02')
}
return config
@pytest.fixture(scope='module')
def github_request_code(config, cattle_url, admin_client, request, user=None):
def fin():
admin_client.create_githubconfig(enabled=False,
accessMode='restricted')
request.addfinalizer(fin)
username = config['username']
password = config['password']
enabled = False
if user is not None:
username = user['username']
password = user['password']
enabled = True
driver = webdriver.PhantomJS(config['phantomjs_bin'],
port=config['phantomjs_port'])
max_wait = 60
driver.set_page_load_timeout(max_wait)
driver.set_script_timeout(max_wait)
driver.implicitly_wait(10)
# undo monkey patching
webdriver.phantomjs.webdriver.Service = PhantomJSService
driver.set_window_size(1120, 550)
admin_client.create_githubconfig(enabled=enabled,
accessMode='unrestricted',
clientId=config['client_id'],
clientSecret=config['client_secret'])
urlx = "https://github.com/login/oauth/authorize?response_type=code&client_id=" +\
config['client_id'] + "&scope=read:org"
driver.get(urlx)
driver.find_element_by_id('login_field').send_keys(username)
driver.find_element_by_id('password').send_keys(password)
driver.find_element_by_name('commit').submit()
try:
driver.find_element_by_class_name('btn-primary').click()
except:
pass
driver.get('https://github.com')
cookie_dict = dict(driver.get_cookie('_gh_sess'))
cookie_dict = {'_gh_sess': cookie_dict['value']}
cookie_dict['user_session'] = driver.get_cookie('user_session')['value']
r = requests.get(urlx, cookies=cookie_dict, allow_redirects=False)
redirect_url = r.headers['location']
code = redirect_url.rsplit('=')[1]
driver.quit()
return code
@pytest.fixture(scope='module')
def github_request_token(github_request_code):
code = github_request_code
c = requests.post(BASE_URL + 'token', {'code': code})
return c.json()['jwt']
@pytest.fixture(scope='module')
def github_client(request, cattle_url, github_request_token, admin_client):
github_client = from_env(url=cattle_url)
github_client.delete_by_id = delete_by_id
assert github_client.valid()
jwt = github_request_token
github_client._auth = GithubAuth(jwt)
return github_client
def delete_by_id(self, type, id):
url = self.schema.types[type].links.collection
if url.endswith('/'):
url = url + id
else:
url = '/'.join([url, id])
return self._delete(url)
def _create_member(name='rancherio', role='member', type=ORG_SCOPE):
return {
'role': role,
'externalId': name,
'externalIdType': type
}
def diff_members(members, got_members):
assert len(members) == len(got_members)
members_a = set([])
members_b = set([])
for member in members:
members_a.add(member['externalId'] + ' ' + member['externalIdType']
+ ' ' + member['role'])
for member in got_members:
members_b.add(member['externalId'] + ' ' + member['externalIdType']
+ ' ' + member['role'])
assert members_a == members_b
def get_plain_members(members):
plain_members = []
for member in members.data:
plain_members.append({
'role': member.role,
'externalId': member.externalId,
'externalIdType': member.externalIdType
})
return plain_members
class GithubAuth(AuthBase):
def __init__(self, jwt, prj_id=None):
# setup any auth-related data here
self.jwt = jwt
self.prj_id = prj_id
def __call__(self, r):
# modify and return the request
r.headers['Authorization'] = 'Bearer ' + self.jwt
if self.prj_id is not None:
r.headers['X-API-Project-Id'] = self.prj_id
return r
def switch_on_auth(admin_client, request, config):
admin_client.create_githubconfig(enabled=True, accessMode='restricted',
clientId=config['client_id'],
clientSecret=config['client_secret'])
def fin():
admin_client.create_githubconfig(enabled=False,
accessMode='restricted',
allowedUsers=[],
allowedOrganizations=[],
clientId='',
clientSecret='')
request.addfinalizer(fin)
@if_github
def test_github_auth_config_unauth_user(request, admin_client,
config):
switch_on_auth(admin_client, request, config)
# do not set any auth headers
no_auth = requests.get(URL)
# test that auth is switched on
assert no_auth.status_code == 401
@if_github
def test_github_auth_config_invalid_user(request,
admin_client, config):
switch_on_auth(admin_client, request, config)
# set invalid auth headers
bad_auth = requests.get(URL,
headers={'Authorization':
'Bearer some_random_string'})
# test that user does not have access
assert bad_auth.status_code == 401
@if_github
def test_github_auth_config_valid_user(github_request_token,
admin_client, request, config):
switch_on_auth(admin_client, request, config)
jwt = github_request_token
# set valid auth headers
schemas = requests.get(URL, headers={'Authorization': 'Bearer ' + jwt})
# test that user has access
assert schemas.status_code == 200
@if_github
def test_github_auth_config_api_whitelist_users(admin_client, request,
github_client, config):
switch_on_auth(admin_client, request, config)
github_client.create_githubconfig(allowedUsers=[
config['users']['1']['username'],
config['users']['2']['username']
],
clientId=config['client_id'],
clientSecret=config['client_secret']
)
# test that these users were whitelisted
r = github_client.list_githubconfig()
users = r[0]['allowedUsers']
assert len(users) == 2
assert config['users']['1']['username'] in users
assert config['users']['2']['username'] in users
assert 'ranchertest02' in users
@if_github
def test_github_auth_config_api_whitelist_orgs(admin_client, request,
config, github_client):
switch_on_auth(admin_client, request, config)
# set whitelisted org
github_client.create_githubconfig(allowedOrganizations=['rancherio'])
# test that these org was whitelisted
r = github_client.list_githubconfig()
orgs = r[0]['allowedOrganizations']
assert len(orgs) == 1
assert 'rancherio' in orgs
@if_github
def test_github_add_whitelisted_user(admin_client, config, github_client,
request):
switch_on_auth(admin_client, request, config)
github_client.create_githubconfig(allowedUsers=[
config['users']['1']['username']
])
# test that these users were whitelisted
r = github_client.list_githubconfig()
users = r[0]['allowedUsers']
assert config['users']['1']['username'] in users
new_token = github_request_code(config, cattle_url, admin_client, request,
user=config['users']['1'])
new_token = github_request_token(new_token)
assert new_token is not None
@if_github
def test_github_projects(cattle_url, config, request,
admin_client, github_client):
user_client = from_env(url=cattle_url)
switch_on_auth(admin_client, request, config)
github_client.create_githubconfig(allowedUsers=[
config['users']['1']['username']
])
# test that the users is whitelisted
r = github_client.list_githubconfig()
users = r[0]['allowedUsers']
assert config['users']['1']['username'] in users
new_token = github_request_code(config, cattle_url, admin_client, request,
user=config['users']['1'])
new_token = github_request_token(new_token)
user_client._auth = GithubAuth(new_token)
members = [_create_member(
name=config['users']['1']['username'],
type=USER_SCOPE,
role='owner'
), _create_member()]
project = user_client.create_project(members=members)
assert len(project.projectMembers()) == 2
diff_members(get_plain_members(project.projectMembers()), members)
project = user_client.wait_success(project)
project = user_client.wait_success(project.deactivate())
project = user_client.wait_success(project.remove())
project = user_client.wait_success(project.purge())
project = user_client.by_id('project', project.id)
assert project.state == 'purged'
@if_github
def test_github_id_name(config, cattle_url, request,
admin_client, github_client):
user_client = from_env(url=cattle_url)
switch_on_auth(admin_client, request, config)
github_client.create_githubconfig(allowedUsers=[
config['users']['1']['username']
])
new_token = github_request_code(config, cattle_url, admin_client, request,
user=config['users']['1'])
new_token = github_request_token(new_token)
user_client._auth = GithubAuth(new_token)
sent_members = [_create_member(
name=config['users']['1']['username'],
type=USER_SCOPE,
role='owner'
),
_create_member()
]
project = user_client.create_project(members=sent_members)
members = get_plain_members(project.projectMembers())
assert len(members) == 2
diff_members(members, sent_members)
|
from flask import Flask, g
from proxypool.storages.redis import RedisClient
from proxypool.setting import API_HOST, API_PORT, API_THREADED, IS_DEV
__all__ = ['app']
app = Flask(__name__)
if IS_DEV:
app.debug = True
def get_conn():
"""
get redis client object
:return:
"""
if not hasattr(g, 'redis'):
g.redis = RedisClient()
return g.redis
@app.route('/')
def index():
"""
get home page, you can define your own templates
:return:
"""
return '<h2>Welcome to Proxy Pool System</h2>'
@app.route('/random')
def get_proxy():
"""
get a random proxy
:return: get a random proxy
"""
conn = get_conn()
return conn.random().string()
@app.route('/all')
def get_proxy_all():
"""
get a random proxy
:return: get a random proxy
"""
conn = get_conn()
proxies = conn.all()
proxies_string = ''
if proxies:
for proxy in proxies:
proxies_string += str(proxy) + '\n'
return proxies_string
@app.route('/count')
def get_count():
"""
get the count of proxies
:return: count, int
"""
conn = get_conn()
return str(conn.count())
if __name__ == '__main__':
app.run(host=API_HOST, port=API_PORT, threaded=API_THREADED)
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.