text
stringlengths 2
999k
|
|---|
"""SCons.Tool.rmic
Tool-specific initialization for rmic.
There normally shouldn't be any need to import this module directly.
It will usually be imported through the generic SCons.Tool.Tool()
selection method.
"""
#
# Copyright (c) 2001, 2002, 2003, 2004, 2005, 2006, 2007 The SCons Foundation
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY
# KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE
# WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
# LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
# WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
#
__revision__ = "/home/scons/scons/branch.0/baseline/src/engine/SCons/Tool/rmic.py 0.97.D001 2007/05/17 11:35:19 knight"
import os.path
import string
import SCons.Action
import SCons.Builder
import SCons.Node.FS
import SCons.Util
def emit_rmic_classes(target, source, env):
"""Create and return lists of Java RMI stub and skeleton
class files to be created from a set of class files.
"""
class_suffix = env.get('JAVACLASSSUFFIX', '.class')
classdir = env.get('JAVACLASSDIR')
if not classdir:
try:
s = source[0]
except IndexError:
classdir = '.'
else:
try:
classdir = s.attributes.java_classdir
except AttributeError:
classdir = '.'
classdir = env.Dir(classdir).rdir()
if str(classdir) == '.':
c_ = None
else:
c_ = str(classdir) + os.sep
slist = []
for src in source:
try:
classname = src.attributes.java_classname
except AttributeError:
classname = str(src)
if c_ and classname[:len(c_)] == c_:
classname = classname[len(c_):]
if class_suffix and classname[:-len(class_suffix)] == class_suffix:
classname = classname[-len(class_suffix):]
s = src.rfile()
s.attributes.java_classdir = classdir
s.attributes.java_classname = classname
slist.append(s)
tlist = []
for s in source:
for suff in ['_Skel', '_Stub']:
fname = string.replace(s.attributes.java_classname, '.', os.sep) + \
suff + class_suffix
t = target[0].File(fname)
t.attributes.java_lookupdir = target[0]
tlist.append(t)
return tlist, source
RMICAction = SCons.Action.Action('$RMICCOM', '$RMICCOMSTR')
RMICBuilder = SCons.Builder.Builder(action = RMICAction,
emitter = emit_rmic_classes,
src_suffix = '$JAVACLASSSUFFIX',
target_factory = SCons.Node.FS.Dir,
source_factory = SCons.Node.FS.File)
def generate(env):
"""Add Builders and construction variables for rmic to an Environment."""
env['BUILDERS']['RMIC'] = RMICBuilder
env['RMIC'] = 'rmic'
env['RMICFLAGS'] = SCons.Util.CLVar('')
env['RMICCOM'] = '$RMIC $RMICFLAGS -d ${TARGET.attributes.java_lookupdir} -classpath ${SOURCE.attributes.java_classdir} ${SOURCES.attributes.java_classname}'
env['JAVACLASSSUFFIX'] = '.class'
def exists(env):
return env.Detect('rmic')
|
# coding=utf-8
from setuptools import setup, find_packages
with open('README.rst') as readme_file:
readme = readme_file.read()
setup(
name='pubplot',
version='0.2.4',
description='Seamless LaTeX and Matplotlib integration for publication plots',
long_description=readme,
packages=find_packages(),
url='',
download_url='https://github.com/hsadok/pubplot',
license='ISC',
author='Hugo Sadok',
author_email='hugo@sadok.com.br',
keywords=['matplotlib', 'latex', 'pgf'],
include_package_data=True,
install_requires=[
'matplotlib',
'pylatex',
'numpy'
],
setup_requires=['pytest-runner'],
tests_require=['pytest'],
classifiers=[
'Development Status :: 4 - Beta',
'Intended Audience :: Science/Research',
'License :: OSI Approved :: ISC License (ISCL)',
'Natural Language :: English',
'Programming Language :: Python',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.4',
'Programming Language :: Python :: 3.5',
'Programming Language :: Python :: 3.6',
'Programming Language :: Python :: 3.7',
'Topic :: Scientific/Engineering :: Visualization',
],
)
|
import converter
converter.convert_to_utf8("splitted/%s.csv" % 'zongheng')
|
from app import app
from flask_script import Manager, Server
manage = Manager(app)
# manage.add_command("runserver", Server(
# host = '0.0.0.0')
# )
if __name__ == "__main__":
manage.run()
|
# Copyright 2022 Cruise LLC
import warnings
from collections import OrderedDict
import logging
import torch.distributed as dist
import torch.distributed.algorithms.model_averaging.utils as utils
logger = logging.getLogger(__name__)
class HierarchicalModelAverager:
r"""
A group of model averagers used for hierarchical model averaging (hierarchical SGD).
Process groups of different sizes are organized in a hierarhicy, and they average parameters
by using different periods concurrently after the warm-up stage.
This is an extension of :class:`~torch.distributed.algorithms.model_averaging.averagers.PeriodicModelAverager`
that supports `post-local SGD <https://arxiv.org/abs/1808.07217>`_, which essentially only supports
a two-level hierarchy: the intra-machine level and the global level, where the intra-machine
level is usually embedded in :meth:`~torch.distributed.algorithms.ddp_comm_hooks.post_localSGD_hook`.
Similarly, the process groups within this class do not have such an intra-machine process
subgroup, which should be embedded by the post-local SGD communication hook instead.
Args:
period_group_size_dict: An ordered dict mapping keys of model averaging period to
process group size, used for initializing process groups of
different sizes in a hierarchy to average parameters concurrently.
Particularly, at each iteration, there will be at most a single
process group that runs averaging -- the period of such group should
have the largest period which the current step can be divided by.
For example, if the dict has three keys: 2, 4, and 8,
then this means totally three process groups will be created to
average parameters every 2, 4, and 8 iterations, respectively.
At the 4th iteration, only the second process group will run
averaging, because the first process group should be a
subset of the second process group, and no need to execute the first
process group redundantly.
On the other hand, the third process group can only be triggered
every 8 iterations, so it will not be triggered at the 4th iteration.
warmup_steps (int): The number of warm-up steps. During this stage, model averaging is skipped.
process_group (ProcessGroup, optional): The overall process group containing all the processes that runs model averaging.
If ``None``, the default process group, which is created
by :func:`torch.distributed.init_process_group`, will be used.
(default: ``None``)
Example::
>>> from collections import OrderedDict
>>> import torch
>>> import torch.distributed as dist
>>> from torch.distributed.algorithms.ddp_comm_hooks.post_localSGD_hook import (
>>> PostLocalSGDState,
>>> post_localSGD_hook,
>>> )
>>> import torch.distributed.algorithms.model_averaging.hierarchical_model_averager as hierarchicalSGD
>>> import torch.nn as nn
>>>
>>> dist.init_process_group("nccl", rank=rank, world_size=16)
>>> torch.cuda.set_device(rank)
>>> module = nn.Linear(1, 1, bias=False).to(rank)
>>> model = nn.parallel.DistributedDataParallel(
>>> module, device_ids=[rank], output_device=rank
>>> )
>>> # Register a post-localSGD communication hook.
>>> # Assume that each machine has 4 GPUs, then each intra-machine subgroup has a size of 4.
>>> subgroup, _ = dist.new_subgroups()
>>> state = PostLocalSGDState(subgroup=subgroup, start_localSGD_iter=100)
>>> model.register_comm_hook(state, post_localSGD_hook)
>>>
>>> # Average parameters among each group of 8 processes every 4 iterations, and among all
>>> # the 16 processes every 16 iterations.
>>> averager = hierarchicalSGD.HierarchicalModelAverager(
>>> period_group_size_dict=OrderedDict([(4, 8), (16, 16)]), warmup_steps=100)
>>> # Note that ``warmup_steps`` must be the same as ``start_localSGD_iter`` used in ``PostLocalSGDState``.
>>> # In the first 100 steps, run global gradient averaging like normal DDP at every step.
>>> # After 100 steps, run model averaging at two levels.
>>> for step in range(0, 200):
>>> optimizer.zero_grad()
>>> loss = loss_fn(output, labels)
>>> loss.backward()
>>> optimizer.step()
>>> # Average parameters after ``optimizer.step()``.
>>> # Thus, the inter-node communication only occurs periodically after ``warmup_steps``.
>>> averager.average_parameters(model.parameters())
.. warning ::
The last group size in the dict must be the size of the provided ``process_group``,
which indicates model averaging at the highest level of the hierarchy.
If ``process_group`` is not provided, then the last group size should be equal to the world size.
.. warning ::
`HierarchicalModelAverager` is experimental and subject to change.
"""
def __init__(self, period_group_size_dict=None, warmup_steps=0, process_group=None):
if not period_group_size_dict:
raise ValueError("Arg ``period_group_size_dict`` must not be empty.")
self._periods = list(period_group_size_dict.keys())
if self._periods[0] <= 0:
raise ValueError("The minimum period in arg ``period_group_size_dict`` must be a positive value.")
elif self._periods[-1] == 1:
warnings.warn(
"When the maximum period in arg ``period_group_size_dict`` is 1, "
"no need to use model averaging because the communication cost "
"of all-reducing parameters will be no less than the cost of all-reducing gradients "
"by DistributedDataParallel in the backward pass. Therefore, only "
"DistributedDataParallel should be used for this case."
)
ovall_group : dist.ProcessGroup = (
process_group if process_group is not None else dist.group.WORLD
)
overall_group_size = dist.get_world_size(group=ovall_group)
if list(period_group_size_dict.values())[-1] != overall_group_size:
raise ValueError(
"The last value in arg ``period_process_group_dict`` "
"must be equal to the size of arg ``process_group``.")
self.period_process_group_dict = OrderedDict()
logger.info("Model averaging hierarchy:")
for period, group_size in period_group_size_dict.items():
logger.info(
f"\tEach group that has {group_size} processes average parameters every {period} iterations, "
"if no higher-level averaging.")
if group_size != overall_group_size:
self.period_process_group_dict[period], _ = dist.new_subgroups(
group_size=group_size, group=ovall_group)
else:
self.period_process_group_dict[period] = ovall_group
if warmup_steps < 0:
raise ValueError("Arg ``warmup_steps`` must be a non-negative number.")
self.warmup_steps = warmup_steps
self.step = 0
def _find_process_group(self):
"""
Returns a tuple consisting of whether ``step`` can be divided by
a period in the keys of ``period_process_group_dict`` and the associated process group if any.
If ``step`` can be divided by multiple periods in the keys of ``period_process_group_dict``,
then the returned process group is the one corresponding to the largest period,
since this process group will be used for averaging parameters at this ``step``.
"""
for period in reversed(self._periods):
if self.step % period == 0:
return (True, self.period_process_group_dict[period])
return (False, None)
def average_parameters(self, params):
r"""
Averages parameters if ``step`` is no less than ``warmup_steps``
and it can be divided by a period in the keys of ``period_process_group_dict``,
where ``step`` is increased by 1 at each iteration in the training loop.
If ``step`` can be divided by multiple periods in the keys of ``period_process_group_dict``,
only the largest period is used, and the corresponding process group is used for averaging parameters.
"""
if self.step >= self.warmup_steps:
found, group = self._find_process_group()
if found:
utils.average_parameters(iter(params), group)
self.step += 1
|
from UnetModel import *
def get_params_dict(logDir):
file = open(logDir, 'r')
logText = file.read()
file.close()
filterText = re.findall('parameters_search : (\w.*)', logText)[2:-2]
splitedText = [item.split(' : ') for item in filterText]
dictParams = dict()
for item in splitedText:
if item[1] in ['True', 'False']:
dictParams[str(item[0])] = item[1]
elif float(item[1]) < 1:
dictParams[str(item[0])] = float(item[1])
else:
dictParams[str(item[0])] = int(item[1])
return dictParams
def main_func(number):
logDir = '/Users/royhirsch/Documents/GitHub/runDataFromTheServer/08_05__14_55/bestRes/RunFolder_07_05_18__02_02_iter_num_5 copy/logFile_02_02__07_05_18.log'
restorePath = '/Users/royhirsch/Documents/GitHub/runDataFromTheServer/08_05__14_55/bestRes/RunFolder_07_05_18__02_02_iter_num_5 copy/validation_save_step_3000.ckpt'
createFolder(os.path.realpath(__file__ + "/../"), 'runData')
runFolderStr = time.strftime('RunFolder_restore_%d_%m_%y__%H_%M_iter_num_{}'.format(number))
createFolder(os.path.realpath(__file__ + "/../") + "/runData/", runFolderStr)
runFolderDir = os.path.realpath(__file__ + "/../") + "/runData/" + runFolderStr
logFolder = runFolderDir
startLogging(logFolder, False)
# get permutation of the parameters dict
paramsDict = get_params_dict(logDir)
logging.info('###############################################\n')
logging.info('Parameters search, iteration mum: {}\n'.format(number))
logging.info('Permutation dict values:')
# print for permutation dict for debug
for key, value in paramsDict.items():
logging.info(str(key) + ' : ' + str(value))
logging.info('###############################################\n')
# LOAD DATA
logging.info('Run mode: logging dir: {}'.format(logFolder))
dataPipe = DataPipline(numTrain=1,
numVal=1,
numTest=1,
modalityList=[0, 1, 2, 3],
permotate=False, # if FALSE - load the manual data lists
optionsDict={'zeroPadding': True,
'paddingSize': 240,
'normalize': True,
'normType': 'reg',
'cutPatch': False, # Added option not to cut patched - no filter !
'patchSize': 240,
'binaryLabelsC':True,
'filterSlices': paramsDict['filterSlices'],
'minPerentageLabeledVoxals': paramsDict['min_perentage_labeled_voxals'],
'percentageOfLabeledData': paramsDict['percentage_of_labeled_data']})
# CREATE MODEL
unetModel = UnetModelClass(layers=paramsDict['num_layers'],
num_channels=len(dataPipe.modalityList),
num_labels=1,
image_size=240,
kernel_size=3,
depth=paramsDict['depth'],
pool_size=2,
costStr='sigmoid',
optStr='adam',
argsDict={'layersTodisplay':[1],'weightedSum': paramsDict['weighted_sum'],
'weightVal': paramsDict['weight_val'],
'isBatchNorm': paramsDict['isBatchNorm']})
# TRAIN AND TEST MODEL
trainModel = Trainer(net=unetModel, argsDict={'printValidation': 10})
trainModel.train(dataPipe=dataPipe,
batchSize=2,
numSteps=5,
printInterval=1,
logPath=logFolder,
serialNum=number,
isRestore=True,
restorePath=restorePath)
logging.info('Summery data for permutation number {}:'.format(number))
# run as main
if __name__ == "__main__":
if len(sys.argv) != 2:
print("Missing argument <iteration_number>")
exit()
main_func(sys.argv[1])
|
from configparser import ConfigParser
from contextlib import contextmanager
import os
from dotenv import load_dotenv
from mop2.utils.atomic_writes import atomic_write
from mop2.utils.files import change_dir
CONFVARIABLES = "app.config.ini"
OPERATIONSPATH = "../../../data"
TESTVARIABLES = "test.app.config.ini"
TESTINGPATH = "../../../data"
def create_baseline_configuration(configuration_file):
"""
The method creates the api configuration file for Azure API calls. As Microsoft changes the API, the
methods can change with the API by altering the signatures in this generated method or in the resulting
configuration files.
:return:
"""
load_dotenv()
config = ConfigParser()
# AZURE_SUBSCRIPTION_ID, AZURE_TENANT_ID, etc attempt to align with environment variable names found in most
# Microsoft Examples.
config["DEFAULT"] = {
"subscription_id": os.environ["AZURE_SUBSCRIPTION_ID"],
"management_grp_id": os.environ["AZURE_MANAGEMENT_GROUP_ID"],
"tenant_id": os.environ["AZURE_TENANT_ID"],
"organization": os.environ["ORGANIZATION"],
# Migrating OS evironment variables to Microsoft common naming standards for MS related technologies only
"AZURE_SUBSCRIPTION_ID": os.environ["AZURE_SUBSCRIPTION_ID"],
"AZURE_MANAGEMENT_GROUP_ID": os.environ["AZURE_MANAGEMENT_GROUP_ID"],
"AZURE_TENANT_ID": os.environ["AZURE_TENANT_ID"],
"plugin_root_path": "src/mop/azure/plugins/",
"activeDirectoryEndpointUrl": "https://login.microsoftonline.com",
"resourceManagerEndpointUrl": "https://management.azure.com/",
"activeDirectoryGraphResourceId": "https://graph.windows.net/",
"sqlManagementEndpointUrl": "https://management.core.windows.net:8443/",
"galleryEndpointUrl": "https://gallery.azure.com/",
"managementEndpointUrl": "https://management.core.windows.net/",
"scope": "User.ReadBasic.All",
"test_data_file": "test_data.ini",
}
"""
The configuration file supports multiple database instances
"""
config["SQLSERVER"] = {
"instance01": {
"server": "tcp:172.17.0.1",
"database": "sqlmopbucket",
"username": "robert",
"db_driver": "{ODBC Driver 17 for SQL Server}",
"dialect": "mssql",
}
}
config["LOG_ANALYTICS"] = {
"instance01": {"workspace_id": os.environ["LOG_ANALYTICS_WORKSPACE_ID"]}
}
config["FILTERS"] = {
"policy_definition_category": "Security",
"policy_definition_name_01": "",
}
config["LOGGING"] = {"level": "10"}
config["AZURESDK"] = {
"management_group_scope_policy_assignment": "/providers/Microsoft.Management/managementGroups/{managementGroup}",
'policy_definitions_create_or_update_at_management_group': 'https://management.azure.com/providers/Microsoft.Management/managementgroups/{managementGroupId}/providers/Microsoft.Authorization/policyDefinitions/{policyDefinitionName}?api-version=2019-09-01',
}
config["PRISMACLOUD"] = {
"api2_eu_login": "https://api2.eu.prismacloud.io/login",
"api2_eu": "https://api2.eu.prismacloud.io",
"policy": "{cloud_api}/policy",
"compliance": "{cloud_api}/compliance",
"filter_policy_suggest": "{cloud_api}/filter/policy/suggest",
}
config["COSMOSDB"] = {"URI_01": os.environ["COSMODB_URI"]}
config["PLUGINS"] = {
"plugin_python_policies": "pypolicy/glbl_pr_sec*.py",
"plugin_database": "test_db_plugin",
}
config["GIT"] = {
"azure_project_01": "testproject",
"azure_repository_id_01": "b3e721c7-0a2a-4712-b37a-2df3ce32f4cf",
"azure_repository_name_01": "testrepo",
"azure_scope_path_01": "/cloud/azure/policy/security",
"azure_devops_organization_url": "",
"azure_devops_repositories_list": "https://dev.azure.com/{organization}/{project}/_apis/git/repositories?api-version=5.1",
"azure_devops_repository_get": "https://dev.azure.com/{organization}/{project}/_apis/git/repositories/{repositoryId}?api-version=5.1",
"azure_devops_refs_list": "https://dev.azure.com/{organization}/{project}/_apis/git/repositories/{repositoryId}/refs?filter=heads/&filterContains={filterValue}&api-version=5.1",
"azure_devops_items_list": "https://dev.azure.com/{organization}/{project}/_apis/git/repositories/{repositoryId}/items?scopePath={scopePath}&recursionLevel={recursionLevel}&includeLinks={includeLinks}&versionDescriptor.version={versionDescriptor_version}&api-version=5.1",
}
with atomic_write(configuration_file, "w") as configfile:
config.write(configfile)
def main():
with change_dir(OPERATIONSPATH):
create_baseline_configuration(CONFVARIABLES)
with change_dir(TESTINGPATH):
create_baseline_configuration(TESTVARIABLES)
if __name__ == "__main__":
main()
|
from sqlobject import *
__connection__ = connectionForURI("sqlite:///:memory:")
hub = __connection__
class Genre(SQLObject):
name = StringCol()
artists = RelatedJoin('Artist')
class Artist(SQLObject):
name = StringCol()
genres = RelatedJoin('Genre')
albums = MultipleJoin('Album')
plays_instruments = RelatedJoin('Instrument', addRemoveName='anInstrument',
joinColumn='artist_id',
otherColumn='plays_instrument_id',
intermediateTable='artist_plays_instrument')
class Album(SQLObject):
name = StringCol()
artist = ForeignKey('Artist')
songs = MultipleJoin('Song')
class Instrument(SQLObject):
name = StringCol()
played_by = RelatedJoin( 'Artist', joinColumn='artist_id',
otherColumn='plays_instrument_id',
intermediateTable='artist_plays_instrument')
class Song(SQLObject):
name = StringCol()
album = ForeignKey('Album')
Genre.createTable(ifNotExists=True)
Artist.createTable(ifNotExists=True)
Album.createTable(ifNotExists=True)
Song.createTable(ifNotExists=True)
Instrument.createTable(ifNotExists=True)
|
import numpy as np
from random import randrange
def eval_numerical_gradient(f, x, verbose=True, h=0.00001):
"""
a naive implementation of numerical gradient of f at x
- f should be a function that takes a single argument
- x is the point (numpy array) to evaluate the gradient at
"""
fx = f(x) # evaluate function value at original point
grad = np.zeros_like(x)
# iterate over all indexes in x
it = np.nditer(x, flags=['multi_index'], op_flags=['readwrite'])
while not it.finished:
# evaluate function at x+h
ix = it.multi_index
oldval = x[ix]
x[ix] = oldval + h # increment by h
fxph = f(x) # evalute f(x + h)
x[ix] = oldval - h
fxmh = f(x) # evaluate f(x - h)
x[ix] = oldval # restore
# compute the partial derivative with centered formula
grad[ix] = (fxph - fxmh) / (2 * h) # the slope
if verbose:
print(ix, grad[ix])
it.iternext() # step to next dimension
return grad
def eval_numerical_gradient_array(f, x, df, h=1e-5):
"""
Evaluate a numeric gradient for a function that accepts a numpy
array and returns a numpy array.
"""
grad = np.zeros_like(x)
it = np.nditer(x, flags=['multi_index'], op_flags=['readwrite'])
while not it.finished:
ix = it.multi_index
oldval = x[ix]
x[ix] = oldval + h
pos = f(x).copy()
x[ix] = oldval - h
neg = f(x).copy()
x[ix] = oldval
grad[ix] = np.sum((pos - neg) * df) / (2 * h)
it.iternext()
return grad
def eval_numerical_gradient_blobs(f, inputs, output, h=1e-5):
"""
Compute numeric gradients for a function that operates on input
and output blobs.
We assume that f accepts several input blobs as arguments, followed by a
blob where outputs will be written. For example, f might be called like:
f(x, w, out)
where x and w are input Blobs, and the result of f will be written to out.
Inputs:
- f: function
- inputs: tuple of input blobs
- output: output blob
- h: step size
"""
numeric_diffs = []
for input_blob in inputs:
diff = np.zeros_like(input_blob.diffs)
it = np.nditer(input_blob.vals, flags=['multi_index'],
op_flags=['readwrite'])
while not it.finished:
idx = it.multi_index
orig = input_blob.vals[idx]
input_blob.vals[idx] = orig + h
f(*(inputs + (output,)))
pos = np.copy(output.vals)
input_blob.vals[idx] = orig - h
f(*(inputs + (output,)))
neg = np.copy(output.vals)
input_blob.vals[idx] = orig
diff[idx] = np.sum((pos - neg) * output.diffs) / (2.0 * h)
it.iternext()
numeric_diffs.append(diff)
return numeric_diffs
def eval_numerical_gradient_net(net, inputs, output, h=1e-5):
return eval_numerical_gradient_blobs(lambda *args: net.forward(),
inputs, output, h=h)
def grad_check_sparse(f, x, analytic_grad, num_checks=10, h=1e-5):
"""
sample a few random elements and only return numerical
in this dimensions.
"""
for i in range(num_checks):
ix = tuple([randrange(m) for m in x.shape])
oldval = x[ix]
x[ix] = oldval + h # increment by h
fxph = f(x) # evaluate f(x + h)
x[ix] = oldval - h # increment by h
fxmh = f(x) # evaluate f(x - h)
x[ix] = oldval # reset
grad_numerical = (fxph - fxmh) / (2 * h)
grad_analytic = analytic_grad[ix]
rel_error = (abs(grad_numerical - grad_analytic) /
(abs(grad_numerical) + abs(grad_analytic)))
print('numerical: %f analytic: %f, relative error: %e'
%(grad_numerical, grad_analytic, rel_error))
|
import contextlib
import os
from typing import Optional, cast, Callable, Generator, IO, Any
from pathlib import Path
from pacu import settings
get_active_session: Optional[Callable] = None
class PacuException(Exception):
pass
def strip_lines(text: str) -> str:
out = []
for line in text.splitlines():
out.append(line.strip("\t "))
return " ".join(out)
def home_dir() -> Path:
return settings.home_dir
def pacu_dir() -> Path:
return Path(__file__).parents[1]
def session_dir() -> Path:
if not get_active_session:
raise UserWarning("No session_name set.")
p = (home_dir() / cast(Callable, get_active_session)().name).absolute()
os.makedirs(p, exist_ok=True)
return p
def downloads_dir() -> Path:
p = (session_dir() / "downloads").absolute()
os.makedirs(p, exist_ok=True)
return p
def module_data_dir(module: str) -> Path:
p = (session_dir() / "modules" / module).absolute()
os.makedirs(p, exist_ok=True)
return p
@contextlib.contextmanager
def save(
file_name: str, mode: str = "w", header: Optional[str] = None, **kwargs
) -> Generator[IO[Any], None, None]:
"""Saves the contents of text to {pacu_home}/{session}/downloads/{file_name}.
Use append to avoid overwriting existing content.
Setting the header will write the value to the first line if the file doesn't already exist.
Used for CSV headers.
By default the home directory is ~/.pacu.
"""
p = Path(downloads_dir()) / file_name
p.parent.mkdir(parents=True, exist_ok=True, mode=0o700)
with open(str(p), mode, **kwargs) as f:
if header and not p.exists():
f.write(header + "\n")
try:
yield f
finally:
f.close()
|
_base_ = [
'../_base_/datasets/coco_detection.py', '../_base_/default_runtime.py'
]
model = dict(
type='SingleStageDetector',
backbone=dict(
type='MobileNetV2',
out_indices=(4, 7),
norm_cfg=dict(type='BN', eps=0.001, momentum=0.03),
init_cfg=dict(type='TruncNormal', layer='Conv2d', std=0.03)),
neck=dict(
type='SSDNeck',
in_channels=(96, 1280),
out_channels=(96, 1280, 512, 256, 256, 128),
level_strides=(2, 2, 2, 2),
level_paddings=(1, 1, 1, 1),
l2_norm_scale=None,
use_depthwise=True,
norm_cfg=dict(type='BN', eps=0.001, momentum=0.03),
act_cfg=dict(type='ReLU6'),
init_cfg=dict(type='TruncNormal', layer='Conv2d', std=0.03)),
bbox_head=dict(
type='SSDHead',
in_channels=(96, 1280, 512, 256, 256, 128),
num_classes=80,
use_depthwise=True,
norm_cfg=dict(type='BN', eps=0.001, momentum=0.03),
act_cfg=dict(type='ReLU6'),
init_cfg=dict(type='Normal', layer='Conv2d', std=0.001),
# set anchor size manually instead of using the predefined
# SSD300 setting.
anchor_generator=dict(
type='SSDAnchorGenerator',
scale_major=False,
strides=[16, 32, 64, 107, 160, 320],
ratios=[[2, 3], [2, 3], [2, 3], [2, 3], [2, 3], [2, 3]],
min_sizes=[48, 100, 150, 202, 253, 304],
max_sizes=[100, 150, 202, 253, 304, 320]),
bbox_coder=dict(
type='DeltaXYWHBBoxCoder',
target_means=[.0, .0, .0, .0],
target_stds=[0.1, 0.1, 0.2, 0.2])),
# model training and testing settings
train_cfg=dict(
assigner=dict(
type='MaxIoUAssigner',
pos_iou_thr=0.5,
neg_iou_thr=0.5,
min_pos_iou=0.,
ignore_iof_thr=-1,
gt_max_assign_all=False),
smoothl1_beta=1.,
allowed_border=-1,
pos_weight=-1,
neg_pos_ratio=3,
debug=False),
test_cfg=dict(
nms_pre=1000,
nms=dict(type='nms', iou_threshold=0.45),
min_bbox_size=0,
score_thr=0.02,
max_per_img=200))
cudnn_benchmark = True
# dataset settings
dataset_type = 'CocoDataset'
data_root = 'data/coco/'
img_norm_cfg = dict(
mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True)
train_pipeline = [
dict(type='LoadImageFromFile'),
dict(type='LoadAnnotations', with_bbox=True),
dict(
type='Expand',
mean=img_norm_cfg['mean'],
to_rgb=img_norm_cfg['to_rgb'],
ratio_range=(1, 4)),
dict(
type='MinIoURandomCrop',
min_ious=(0.1, 0.3, 0.5, 0.7, 0.9),
min_crop_size=0.3),
dict(type='Resize', img_scale=(320, 320), keep_ratio=False),
dict(type='RandomFlip', flip_ratio=0.5),
dict(
type='PhotoMetricDistortion',
brightness_delta=32,
contrast_range=(0.5, 1.5),
saturation_range=(0.5, 1.5),
hue_delta=18),
dict(type='Normalize', **img_norm_cfg),
dict(type='Pad', size_divisor=320),
dict(type='DefaultFormatBundle'),
dict(type='Collect', keys=['img', 'gt_bboxes', 'gt_labels']),
]
test_pipeline = [
dict(type='LoadImageFromFile'),
dict(
type='MultiScaleFlipAug',
img_scale=(320, 320),
flip=False,
transforms=[
dict(type='Resize', keep_ratio=False),
dict(type='Normalize', **img_norm_cfg),
dict(type='Pad', size_divisor=320),
dict(type='ImageToTensor', keys=['img']),
dict(type='Collect', keys=['img']),
])
]
data = dict(
samples_per_gpu=24,
workers_per_gpu=4,
train=dict(
_delete_=True,
type='RepeatDataset', # use RepeatDataset to speed up training
times=5,
dataset=dict(
type=dataset_type,
ann_file=data_root + 'annotations/instances_train2017.json',
img_prefix=data_root + 'train2017/',
pipeline=train_pipeline)),
val=dict(pipeline=test_pipeline),
test=dict(pipeline=test_pipeline))
# optimizer
optimizer = dict(type='SGD', lr=0.015, momentum=0.9, weight_decay=4.0e-5)
optimizer_config = dict(grad_clip=None)
# learning policy
lr_config = dict(
policy='CosineAnnealing',
warmup='linear',
warmup_iters=500,
warmup_ratio=0.001,
min_lr=0)
runner = dict(type='EpochBasedRunner', max_epochs=120)
# Avoid evaluation and saving weights too frequently
evaluation = dict(interval=5, metric='bbox')
checkpoint_config = dict(interval=5)
custom_hooks = [
dict(type='NumClassCheckHook'),
dict(type='CheckInvalidLossHook', interval=50, priority='VERY_LOW')
]
|
# -*- coding: utf-8 -*-
"""
Created on Tue Oct 31 19:06:00 2017
@author: Thomas
"""
#%%
import numpy as np
import scipy.io
mat = scipy.io.loadmat('mnist_all.mat')
print("MAT file loaded. Contains", len(mat), "datasets. Example size:", mat['train1'].shape)
scipy.io.savemat('test.mat', mat)
|
# Generated by Django 2.2.10 on 2020-10-19 16:12
from django.db import migrations
import wagtail.core.fields
class Migration(migrations.Migration):
dependencies = [
('core', '0008_remove_personpage_name'),
]
operations = [
migrations.AddField(
model_name='administrationindexpage',
name='overview',
field=wagtail.core.fields.RichTextField(default=''),
preserve_default=False,
),
]
|
from dataclasses import dataclass
from openpersonen.api.enum import IndicatieGezagMinderjarigeChoices
from .in_onderzoek import GezagsVerhoudingInOnderzoek
@dataclass
class GezagsVerhouding:
indicatieCurateleRegister: bool
indicatieGezagMinderjarige: str
inOnderzoek: GezagsVerhoudingInOnderzoek
def get_indicatieGezagMinderjarige_display(self):
return IndicatieGezagMinderjarigeChoices.values[self.indicatieGezagMinderjarige]
|
# -*- coding: utf-8 -*-
"""
chemdataextractor.tests.test_reader_els.py
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
Test reader for Elsevier.
Juraj Mavračić (jm2111@cam.ac.uk)
"""
import unittest
import logging
import io
import os
from chemdataextractor import Document
from chemdataextractor.reader.elsevier import ElsevierXmlReader
log = logging.getLogger(__name__)
log.setLevel(logging.DEBUG)
class TestElsXMLReader(unittest.TestCase):
maxDiff = None
def test_detect(self):
"""Test RscXMLReader can detect an RSC document."""
r = ElsevierXmlReader()
fname = 'j.jnoncrysol.2017.07.006.xml'
f = io.open(os.path.join(os.path.dirname(__file__), 'data', 'elsevier', fname), 'rb')
content = f.read()
f.close()
self.assertEqual(r.detect(content, fname=fname), True)
def test_direct_usage(self):
"""Test RscXMLReader used directly to parse file."""
r = ElsevierXmlReader()
fname = 'j.jnoncrysol.2017.07.006.xml'
f = io.open(os.path.join(os.path.dirname(__file__), 'data', 'elsevier', fname), 'rb')
content = f.read()
d = r.readstring(content)
f.close()
self.assertEqual(len(d.elements), 913)
def test_document_usage(self):
"""Test RscXMLReader used via Document.from_file."""
fname = 'j.jnoncrysol.2017.07.006.xml'
f = io.open(os.path.join(os.path.dirname(__file__), 'data', 'elsevier', fname), 'rb')
d = Document.from_file(f, readers=[ElsevierXmlReader()])
self.assertEqual(len(d.elements), 913)
if __name__ == '__main__':
unittest.main()
|
# Testing code
import numpy as np
import unittest
import subprocess
from .. import netcdf_read_write
class Tests(unittest.TestCase):
def test_pixel_node_writer(self):
"""
See if the writing function for pixel-node files produces a pixel-node file.
The behavior has been finicky for float32 vs float64
Writing a full test for float32 would be good (although the example grd file gets pretty close)
"""
grid_def = [-120, -114, 32, 37];
inc = [0.02, 0.02];
filename = 'test_outfile.nc'
lons = np.arange(grid_def[0], grid_def[1] + 0.00001, inc[0])
lats = np.arange(grid_def[2], grid_def[3] + 0.00001, inc[1])
# Test a write function
grid = np.zeros((len(lats), len(lons)));
netcdf_read_write.write_netcdf4(lons, lats, grid, filename);
netcdf_read_write.parse_pixelnode_registration(filename);
subprocess.call(['rm', filename], shell=False);
subprocess.call(['rm', 'gmt.history'], shell=False);
# Test a read-write cycle on an example grid
[x, y, z] = netcdf_read_write.read_any_grd("Tectonic_Utils/read_write/test/example_grd.grd");
netcdf_read_write.write_netcdf4(x, y, z, "Tectonic_Utils/read_write/test/written_example.grd");
netcdf_read_write.parse_pixelnode_registration("Tectonic_Utils/read_write/test/written_example.grd");
subprocess.call(['rm', 'gmt.history'], shell=False);
return;
if __name__ == "__main__":
unittest.main();
|
#!/usr/bin/env python3
#
# Copyright 2021 Red Hat, Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
from argparse import ArgumentParser
from argparse import Namespace
from pprint import PrettyPrinter
import sys
from urllib.parse import urlparse
from distroinfo import info as di
INFO_FILE = 'osp.yml'
RDOINFO_GIT_URL = 'https://code.engineering.redhat.com/gerrit/ospinfo'
APP_DESCRIPTION = 'Find OSP packages, repositories, components and releases.'
def get_distroinfo():
return di.DistroInfo(info_files=INFO_FILE,
cache_ttl=24*60*60, # 1 day in seconds
remote_git_info=RDOINFO_GIT_URL).get_info()
def get_components(**kwargs):
info = get_distroinfo()
components = info.get('components')
if kwargs.get('name'):
components = [component for component in components
if kwargs.get('name') == component.get('name')]
return components
def get_packages(**kwargs):
info = get_distroinfo()
packages = info.get('packages')
packages = [package for package in packages
if 'osp-name' in package.keys()]
if kwargs.get('component'):
packages = [package for package in packages
if kwargs.get('component') == package.get('component')]
if kwargs.get('name'):
packages = [package for package in packages
if kwargs.get('name') == package.get('name')]
if kwargs.get('tag'):
packages = [package for package in packages
if kwargs.get('tag') in package.get('tags')]
if kwargs.get('upstream'):
packages = [package for package in packages
if kwargs.get('upstream') in str(package.get('upstream'))]
for package in packages:
package['osp-project'] = urlparse(package['osp-patches']).path[1:]
return packages
def get_projects_mapping(**kwawrgs) -> dict:
packages = get_packages(**kwawrgs)
projects_mapping = {}
for package in packages:
if 'upstream' in package.keys() and package['upstream']:
upstream_name = urlparse(package['upstream']).path[1:]
upstream_name = upstream_name.replace("/", "-")
else:
upstream_name = package['name']
if 'osp-patches' in package.keys() and package['osp-patches']:
projects_mapping[upstream_name] = urlparse(
package['osp-patches']).path[1:]
else:
projects_mapping[upstream_name] = upstream_name
return projects_mapping
def get_releases(**kwargs):
info = get_distroinfo()
releases = info.get('osp_releases')
if kwargs.get('tag'):
releases = [release for release in releases
if kwargs.get('tag') in release.get('ospinfo_tag_name')]
return releases
def process_arguments(argv=None) -> Namespace:
parser = ArgumentParser(description=APP_DESCRIPTION)
subparsers = parser.add_subparsers(dest='command', metavar='command')
common = ArgumentParser(add_help=False)
common.add_argument('--debug', dest='debug',
default=False, action='store_true',
help='print all fields in output')
common.add_argument('--header', dest='header',
default=False, action='store_true',
help='print header with output names on top')
common.add_argument('--output', dest='output',
help='comma-separated list of fields to return')
components = subparsers.add_parser('components', help='', parents=[common])
components.add_argument('--name', dest='name')
packages = subparsers.add_parser('packages', help='', parents=[common])
packages.add_argument('--component', dest='component')
packages.add_argument('--name', dest='name')
packages.add_argument('--tag', dest='tag')
packages.add_argument('--upstream', dest='upstream')
releases = subparsers.add_parser('releases', help='', parents=[common])
releases.add_argument('--tag', dest='tag')
arguments = parser.parse_args(argv)
if not arguments.command:
parser.print_help()
sys.exit(1)
return arguments
def main(argv=None) -> None:
args = process_arguments(argv)
if args.command == 'components':
results = get_components(**vars(args))
default_output = ['name']
elif args.command == 'packages':
results = get_packages(**vars(args))
default_output = ['osp-name', 'osp-distgit', 'osp-patches']
elif args.command == 'releases':
results = get_releases(**vars(args))
default_output = ['ospinfo_tag_name', 'git_release_branch']
else:
results = None
if args.debug:
pp = PrettyPrinter()
pp.pprint(results)
return
if args.output:
output = [entry.strip() for entry in args.output.split(',')]
else:
output = default_output
if args.header:
print(' '.join(output))
print(' '.join(['-' * len(field) for field in output]))
for result in results:
print(' '.join([result.get(key, 'None') for key in output]))
if __name__ == '__main__':
main()
|
from bilibili import bilibili
import datetime
import time
import asyncio
import traceback
import os
import configloader
import utils
from printer import Printer
class Tasks:
def __init__(self):
fileDir = os.path.dirname(os.path.realpath('__file__'))
file_user = fileDir + "/conf/user.conf"
self.dic_user = configloader.load_user(file_user)
# 获取每日包裹奖励
async def Daily_bag(self):
response = await bilibili().get_dailybag()
json_response = await response.json()
for i in range(0, len(json_response['data']['bag_list'])):
Printer().printer(f"获得-{json_response['data']['bag_list'][i]['bag_name']}-成功", "Info", "green")
def CurrentTime(self):
currenttime = str(int(time.mktime(datetime.datetime.now().timetuple())))
return currenttime
# 签到功能
async def DoSign(self):
response = await bilibili().get_dosign()
temp = await response.json(content_type=None)
Printer().printer(f"签到状态:{temp['msg']}", "Info", "green")
# 领取每日任务奖励
async def Daily_Task(self):
response2 = await bilibili().get_dailytask()
json_response2 = await response2.json()
Printer().printer(f"双端观看直播:{json_response2['msg']}", "Info", "green")
# 应援团签到
async def link_sign(self):
response = await bilibili().get_grouplist()
json_response = await response.json(content_type=None)
check = len(json_response['data']['list'])
group_id_list = []
owner_uid_list = []
for i in range(0, check):
group_id = json_response['data']['list'][i]['group_id']
owner_uid = json_response['data']['list'][i]['owner_uid']
group_id_list.append(group_id)
owner_uid_list.append(owner_uid)
for (i1, i2) in zip(group_id_list, owner_uid_list):
response = await bilibili().assign_group(i1, i2)
json_response = await response.json(content_type=None)
if json_response['code'] == 0:
if (json_response['data']['status']) == 1:
Printer().printer(f"应援团{i1}已应援过", "Info", "green")
if (json_response['data']['status']) == 0:
Printer().printer(f"应援团{i1}应援成功,获得{json_response['data']['add_num']}点亲密度", "Info", "green")
else:
Printer().printer(f"应援团{i1}应援失败,{json_response}", "Error", "red")
async def send_gift(self):
if self.dic_user['gift']['on/off'] == '1':
argvs, x = await utils.fetch_bag_list(printer=False)
for i in range(0, len(argvs)):
giftID = argvs[i][0]
giftNum = argvs[i][1]
bagID = argvs[i][2]
roomID = self.dic_user['gift']['send_to_room']
await utils.send_gift_web(roomID, giftID, giftNum, bagID)
if not argvs:
Printer().printer(f"没有将要过期的礼物~", "Info", "green")
async def auto_send_gift(self):
if self.dic_user['auto-gift']['on/off'] == "1":
a = await utils.fetch_medal(printer=False)
res = await bilibili().gift_list()
json_res = await res.json()
temp_dic = {}
for j in range(0, len(json_res['data'])):
price = json_res['data'][j]['price']
id = json_res['data'][j]['id']
temp_dic[id] = price
x, temp = await utils.fetch_bag_list(printer=False)
roomid = a[0]
today_feed = a[1]
day_limit = a[2]
left_num = int(day_limit) - int(today_feed)
calculate = 0
for i in range(0, len(temp)):
gift_id = int(temp[i][0])
gift_num = int(temp[i][1])
bag_id = int(temp[i][2])
expire = int(temp[i][3])
if (gift_id != 4 and gift_id != 3 and gift_id != 9 and gift_id != 10) and expire != 0:
if (gift_num * (temp_dic[gift_id] / 100) < left_num):
calculate = calculate + temp_dic[gift_id] / 100 * gift_num
tmp2 = temp_dic[gift_id] / 100 * gift_num
await utils.send_gift_web(roomid, gift_id, gift_num, bag_id)
left_num = left_num - tmp2
elif left_num - temp_dic[gift_id] / 100 >= 0:
tmp = (left_num) / (temp_dic[gift_id] / 100)
tmp1 = (temp_dic[gift_id] / 100) * int(tmp)
calculate = calculate + tmp1
await utils.send_gift_web(roomid, gift_id, tmp, bag_id)
left_num = left_num - tmp1
Printer().printer(f"自动送礼共送出亲密度为{int(calculate)}的礼物", "Info", "green")
async def doublegain_coin2silver(self):
if self.dic_user['doublegain_coin2silver']['on/off'] == "1":
response0 = await bilibili().request_doublegain_coin2silver()
json_response0 = await response0.json()
response1 = await bilibili().request_doublegain_coin2silver()
json_response1 = await response1.json()
print(json_response0['msg'], json_response1['msg'])
async def sliver2coin(self):
if self.dic_user['coin']['on/off'] == '1':
response1 = await bilibili().silver2coin_app()
json_response1 = await response1.json()
Printer().printer(f"银瓜子兑换硬币状态:{json_response1['msg']}", "Info", "green")
async def run(self):
while 1:
try:
Printer().printer(f"开始执行每日任务", "Info", "green")
await self.DoSign()
await self.Daily_bag()
await self.Daily_Task()
await self.link_sign()
await self.send_gift()
await self.sliver2coin()
await self.doublegain_coin2silver()
await self.auto_send_gift()
await utils.reconnect()
await asyncio.sleep(21600)
except:
await asyncio.sleep(10)
Printer().printer(traceback.format_exc(), "Error", "red")
|
# -*- coding: utf-8 -*-
"""
Enforce state for SSL/TLS
=========================
"""
# Import Python Libs
from __future__ import absolute_import, print_function, unicode_literals
import datetime
import logging
import time
__virtualname__ = "tls"
log = logging.getLogger(__name__)
def __virtual__():
if "tls.cert_info" not in __salt__:
return False
return __virtualname__
def valid_certificate(name, weeks=0, days=0, hours=0, minutes=0, seconds=0):
"""
Verify that a TLS certificate is valid now and (optionally) will be valid
for the time specified through weeks, days, hours, minutes, and seconds.
"""
ret = {"name": name, "changes": {}, "result": False, "comment": ""}
now = time.time()
try:
cert_info = __salt__["tls.cert_info"](name)
except IOError as exc:
ret["comment"] = "{}".format(exc)
ret["result"] = False
log.error(ret["comment"])
return ret
# verify that the cert is valid *now*
if now < cert_info["not_before"]:
ret["comment"] = "Certificate is not yet valid"
return ret
if now > cert_info["not_after"]:
ret["comment"] = "Certificate is expired"
return ret
# verify the cert will be valid for defined time
delta_remaining = datetime.timedelta(seconds=cert_info["not_after"] - now)
delta_kind_map = {
"weeks": weeks,
"days": days,
"hours": hours,
"minutes": minutes,
"seconds": seconds,
}
delta_min = datetime.timedelta(**delta_kind_map)
# if ther eisn't enough time remaining, we consider it a failure
if delta_remaining < delta_min:
ret[
"comment"
] = "Certificate will expire in {0}, which is less than {1}".format(
delta_remaining, delta_min
)
return ret
ret["result"] = True
ret["comment"] = "Certificate is valid for {0}".format(delta_remaining)
return ret
|
"""Adding feature vectors
Revision ID: f4249b4ba6fa
Revises: 863114f0c659
Create Date: 2020-11-24 14:43:08.789873
"""
import sqlalchemy as sa
from alembic import op
from mlrun.api.utils.db.sql_collation import SQLCollationUtil
# revision identifiers, used by Alembic.
revision = "f4249b4ba6fa"
down_revision = "863114f0c659"
branch_labels = None
depends_on = None
def upgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.create_table(
"feature_vectors",
sa.Column("id", sa.Integer(), nullable=False),
sa.Column(
"name",
sa.String(255, collation=SQLCollationUtil.collation()),
nullable=True,
),
sa.Column(
"project",
sa.String(255, collation=SQLCollationUtil.collation()),
nullable=True,
),
sa.Column("created", sa.TIMESTAMP(), nullable=True),
sa.Column("updated", sa.TIMESTAMP(), nullable=True),
sa.Column(
"state",
sa.String(255, collation=SQLCollationUtil.collation()),
nullable=True,
),
sa.Column(
"uid", sa.String(255, collation=SQLCollationUtil.collation()), nullable=True
),
sa.Column("object", sa.JSON(), nullable=True),
sa.PrimaryKeyConstraint("id"),
sa.UniqueConstraint("name", "project", "uid", name="_feature_vectors_uc"),
)
op.create_table(
"feature_vectors_labels",
sa.Column("id", sa.Integer(), nullable=False),
sa.Column(
"name",
sa.String(255, collation=SQLCollationUtil.collation()),
nullable=True,
),
sa.Column(
"value",
sa.String(255, collation=SQLCollationUtil.collation()),
nullable=True,
),
sa.Column("parent", sa.Integer(), nullable=True),
sa.ForeignKeyConstraint(
["parent"],
["feature_vectors.id"],
),
sa.PrimaryKeyConstraint("id"),
sa.UniqueConstraint("name", "parent", name="_feature_vectors_labels_uc"),
)
op.create_table(
"feature_vectors_tags",
sa.Column("id", sa.Integer(), nullable=False),
sa.Column(
"project",
sa.String(255, collation=SQLCollationUtil.collation()),
nullable=True,
),
sa.Column(
"name",
sa.String(255, collation=SQLCollationUtil.collation()),
nullable=True,
),
sa.Column("obj_id", sa.Integer(), nullable=True),
sa.Column(
"obj_name",
sa.String(255, collation=SQLCollationUtil.collation()),
nullable=True,
),
sa.ForeignKeyConstraint(
["obj_id"],
["feature_vectors.id"],
),
sa.ForeignKeyConstraint(
["obj_name"],
["feature_vectors.name"],
),
sa.PrimaryKeyConstraint("id"),
sa.UniqueConstraint(
"project", "name", "obj_name", name="_feature_vectors_tags_uc"
),
)
# ### end Alembic commands ###
def downgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.drop_table("feature_vectors_tags")
op.drop_table("feature_vectors_labels")
op.drop_table("feature_vectors")
# ### end Alembic commands ###
|
import warnings
import numpy as np
from .utils_moments import moments
from .velocity import velocity, ss_estimation
from .utils import (
get_mapper,
get_valid_bools,
get_data_for_kin_params_estimation,
get_U_S_for_velocity_estimation,
)
from .utils import set_velocity, set_param_ss, set_param_kinetic
from .moments import moment_model
# incorporate the model selection code soon
def _dynamics(
adata,
tkey=None,
filter_gene_mode="final",
mode="moment",
use_smoothed=True,
group=None,
protein_names=None,
experiment_type=None,
assumption_mRNA=None,
assumption_protein="ss",
NTR_vel=True,
concat_data=False,
log_unnormalized=True,
one_shot_method="combined",
):
"""Inclusive model of expression dynamics considers splicing, metabolic labeling and protein translation. It supports
learning high-dimensional velocity vector samples for droplet based (10x, inDrop, drop-seq, etc), scSLAM-seq, NASC-seq
sci-fate, scNT-seq or cite-seq datasets.
Parameters
----------
adata: :class:`~anndata.AnnData`
AnnData object.
tkey: `str` or None (default: None)
The column key for the time label of cells in .obs. Used for either "steady_state" or non-"steady_state" mode or `moment`
mode with labeled data.
filter_gene_mode: `str` (default: `final`)
The string for indicating which mode (one of, {'final', 'basic', 'no'}) of gene filter will be used.
mode: `str` (default: `deterministic`)
String indicates which estimation mode will be used. This parameter should be used in conjunction with assumption_mRNA.
* Available options when the `assumption_mRNA` is 'ss' include:
(1) 'linear_regression': The canonical method from the seminar RNA velocity paper based on deterministic ordinary
differential equations;
(2) 'gmm': The new generalized methods of moments from us that is based on master equations, similar to the
"moment" mode in the excellent scvelo package;
(3) 'negbin': The new method from us that models steady state RNA expression as a negative binomial distribution,
also built upons on master equations.
Note that all those methods require using extreme data points (except negbin) for the estimation. Extreme data points
are defined as the data from cells where the expression of unspliced / spliced or new / total RNA, etc. are in the
top or bottom, 5%, for example. `linear_regression` only considers the mean of RNA species (based on the deterministic
ordinary different equations) while moment based methods (`gmm`, `negbin`) considers both first moment (mean) and
second moment (uncentered variance) of RNA species (based on the stochastic master equations).
* Available options when the `assumption_mRNA` is 'kinetic' include:
(1) 'deterministic': The method based on deterministic ordinary differential equations;
(2) 'stochastic' or `moment`: The new method from us that is based on master equations;
Note that `kinetic` model implicitly assumes the `experiment_type` is not `conventional`. Thus `deterministic`,
`stochastic` (equivalent to `moment`) models are only possible for the labeling experiments.
A "model_selection" mode will be supported soon in which alpha, beta and gamma will be modeled as a function of time.
use_smoothed: `bool` (default: `True`)
Whether to use the smoothed data when calculating velocity for each gene. `use_smoothed` is only relevant when
mode is `linear_regression` (and experiment_type and assumption_mRNA correspond to `conventional` and `ss` implicitly).
group: `str` or None (default: `None`)
The column key/name that identifies the grouping information (for example, clusters that correspond to different cell types)
of cells. This will be used to estimate group-specific (i.e cell-type specific) kinetic parameters.
protein_names: `List`
A list of gene names corresponds to the rows of the measured proteins in the `X_protein` of the `obsm` attribute.
The names have to be included in the adata.var.index.
experiment_type: `str`
single cell RNA-seq experiment type. Available options are:
(1) 'conventional': conventional single-cell RNA-seq experiment;
(2) 'deg': chase/degradation experiment;
(3) 'kin': pulse/synthesis/kinetics experiment;
(4) 'one-shot': one-shot kinetic experiment.
assumption_mRNA: `str`
Parameter estimation assumption for mRNA. Available options are:
(1) 'ss': pseudo steady state;
(2) 'kinetic' or None: degradation and kinetic data without steady state assumption.
If no labelling data exists, assumption_mRNA will automatically set to be 'ss'. For one-shot experiment, assumption_mRNA
is set to be None. However we will use steady state assumption to estimate parameters alpha and gamma either by a deterministic
linear regression or the first order decay approach in line of the sci-fate paper.
assumption_protein: `str`
Parameter estimation assumption for protein. Available options are:
(1) 'ss': pseudo steady state;
NTR_vel: `bool` (default: `True`)
Whether to use NTR (new/total ratio) velocity for labeling datasets.
concat_data: `bool` (default: `False`)
Whether to concatenate data before estimation. If your data is a list of matrices for each time point, this need to be set as True.
log_unnormalized: `bool` (default: `True`)
Whether to log transform the unnormalized data.
Returns
-------
adata: :class:`~anndata.AnnData`
A updated AnnData object with estimated kinetic parameters and inferred velocity included.
"""
if (
"use_for_dynamics" not in adata.var.columns
and "pass_basic_filter" not in adata.var.columns
):
filter_gene_mode = "no"
valid_ind = get_valid_bools(adata, filter_gene_mode)
if mode == "moment" or (
use_smoothed and len([i for i in adata.layers.keys() if i.startswith("M_")]) < 2
):
if experiment_type == "kin":
use_smoothed = False
else:
moments(adata)
valid_adata = adata[:, valid_ind].copy()
if group is not None and group in adata.obs[group]:
_group = adata.obs[group].unique()
else:
_group = ["_all_cells"]
for cur_grp in _group:
if cur_grp == "_all_cells":
kin_param_pre = ""
cur_cells_bools = np.ones(valid_adata.shape[0], dtype=bool)
subset_adata = valid_adata[cur_cells_bools]
else:
kin_param_pre = group + "_" + cur_grp + "_"
cur_cells_bools = (valid_adata.obs[group] == cur_grp).values
subset_adata = valid_adata[cur_cells_bools]
(
U,
Ul,
S,
Sl,
P,
US,
S2,
t,
normalized,
has_splicing,
has_labeling,
has_protein,
ind_for_proteins,
assumption_mRNA,
exp_type,
) = get_data_for_kin_params_estimation(
subset_adata,
mode,
use_smoothed,
tkey,
protein_names,
experiment_type,
log_unnormalized,
NTR_vel,
)
if exp_type is not None:
if experiment_type != exp_type:
warnings.warn(
"dynamo detects the experiment type of your data as {}, but your input experiment_type "
"is {}".format(exp_type, experiment_type)
)
experiment_type = exp_type
assumption_mRNA = (
"ss" if exp_type == "conventional" and mode == "deterministic" else None
)
NTR_vel = False
if mode == "moment" and experiment_type not in ["conventional", "kin"]:
"""
# temporially convert to deterministic mode as moment mode for one-shot,
degradation and other types of labeling experiment is ongoing."""
mode = "deterministic"
if mode == "deterministic" or (
experiment_type != "kin" and mode == "moment"
):
est = ss_estimation(
U=U,
Ul=Ul,
S=S,
Sl=Sl,
P=P,
US=US,
S2=S2,
t=t,
ind_for_proteins=ind_for_proteins,
experiment_type=experiment_type,
assumption_mRNA=assumption_mRNA,
assumption_protein=assumption_protein,
concat_data=concat_data,
)
with warnings.catch_warnings():
warnings.simplefilter("ignore")
if experiment_type in ["one-shot", "one_shot"]:
est.fit(one_shot_method=one_shot_method)
else:
est.fit()
alpha, beta, gamma, eta, delta = est.parameters.values()
U, S = get_U_S_for_velocity_estimation(
subset_adata,
use_smoothed,
has_splicing,
has_labeling,
log_unnormalized,
NTR_vel,
)
vel = velocity(estimation=est)
vel_U = vel.vel_u(U)
vel_S = vel.vel_s(U, S)
vel_P = vel.vel_p(S, P)
adata = set_velocity(
adata,
vel_U,
vel_S,
vel_P,
_group,
cur_grp,
cur_cells_bools,
valid_ind,
ind_for_proteins,
)
adata = set_param_ss(
adata,
est,
alpha,
beta,
gamma,
eta,
delta,
experiment_type,
_group,
cur_grp,
kin_param_pre,
valid_ind,
ind_for_proteins,
)
elif mode == "moment":
adata, Est, t_ind = moment_model(
adata, subset_adata, _group, cur_grp, log_unnormalized, tkey
)
t_ind += 1
params, costs = Est.fit()
a, b, alpha_a, alpha_i, beta, gamma = (
params[:, 0],
params[:, 1],
params[:, 2],
params[:, 3],
params[:, 4],
params[:, 5],
)
def fbar(x_a, x_i, a, b):
return b / (a + b) * x_a + a / (a + b) * x_i
alpha = fbar(alpha_a, alpha_i, a, b)[:, None]
params = {"alpha": alpha, "beta": beta, "gamma": gamma, "t": t}
vel = velocity(**params)
U, S = get_U_S_for_velocity_estimation(
subset_adata,
use_smoothed,
has_splicing,
has_labeling,
log_unnormalized,
NTR_vel,
)
vel_U = vel.vel_u(U)
vel_S = vel.vel_s(U, S)
vel_P = vel.vel_p(S, P)
adata = set_velocity(
adata,
vel_U,
vel_S,
vel_P,
_group,
cur_grp,
cur_cells_bools,
valid_ind,
ind_for_proteins,
)
adata = set_param_kinetic(
adata,
alpha,
a,
b,
alpha_a,
alpha_i,
beta,
gamma,
kin_param_pre,
_group,
cur_grp,
valid_ind,
)
# add protein related parameters in the moment model below:
elif mode == "model_selection":
warnings.warn("Not implemented yet.")
if group is not None and group in adata.obs[group]:
uns_key = group + "_dynamics"
else:
uns_key = "dynamics"
if has_splicing and has_labeling:
adata.layers['X_U'], adata.layers['X_S'] = adata.layers['X_uu'] + adata.layers['X_ul'], adata.layers['X_su'] + adata.layers['X_sl']
adata.uns[uns_key] = {
"t": t,
"group": group,
"asspt_mRNA": assumption_mRNA,
"experiment_type": experiment_type,
"normalized": normalized,
"mode": mode,
"has_splicing": has_splicing,
"has_labeling": has_labeling,
"has_protein": has_protein,
"use_smoothed": use_smoothed,
"NTR_vel": NTR_vel,
"log_unnormalized": log_unnormalized,
}
return adata
|
class DeviceIdentifierType:
FIT = "fit"
TCX = "tcx"
class FITDeviceIdentifier:
def __init__(self, manufacturer, product=None):
self.Type = DeviceIdentifierType.FIT
self.Manufacturer = manufacturer
self.Product = product
class TCXDeviceIdentifier:
def __init__(self, name, productId=None):
self.Type = DeviceIdentifierType.TCX
self.Name = name
self.ProductID = productId
class DeviceIdentifier:
_identifierGroups = []
def AddIdentifierGroup(*identifiers):
DeviceIdentifier._identifierGroups.append(identifiers)
def FindMatchingIdentifierOfType(type, query):
for group in DeviceIdentifier._identifierGroups:
for identifier in group:
if identifier.Type != type:
continue
compareDict = dict(identifier.__dict__)
compareDict.update(query)
if compareDict == identifier.__dict__: # At the time it felt like a better idea than iterating through keys?
return identifier
def FindEquivalentIdentifierOfType(type, identifier):
if not identifier:
return
if identifier.Type == type:
return identifier # We preemptively do this, so international variants have a chance of being preserved
for group in DeviceIdentifier._identifierGroups:
if identifier not in group:
continue
for altIdentifier in group:
if altIdentifier.Type == type:
return altIdentifier
class Device:
def __init__(self, identifier, serial=None, verMaj=None, verMin=None):
self.Identifier = identifier
self.Serial = serial
self.VersionMajor = verMaj
self.VersionMinor = verMin
# I think Garmin devices' TCX ProductID match their FIT garmin_product id
# And, since the FIT SDK is lagging behind:
# - Forerunner 620 is 1623
def _garminIdentifier(name, *fitIds):
return [TCXDeviceIdentifier("Garmin %s" % name, fitIds[0])] + [FITDeviceIdentifier(1, fitId) for fitId in fitIds]
# This list is REGEXed from the FIT SDK - I have no clue what some of the entries are...
# Some products have international variants with different FIT IDs - the first ID given is used for TCX
# DeviceIdentifier.AddIdentifierGroup(*_garminIdentifier("HRM1", 1)) - Garmin Connect reports itself as ID 1 too.
DeviceIdentifier.AddIdentifierGroup(*_garminIdentifier("AXH01", 2))
DeviceIdentifier.AddIdentifierGroup(*_garminIdentifier("AXB01", 3))
DeviceIdentifier.AddIdentifierGroup(*_garminIdentifier("AXB02", 4))
DeviceIdentifier.AddIdentifierGroup(*_garminIdentifier("HRM2SS", 5))
DeviceIdentifier.AddIdentifierGroup(*_garminIdentifier("DSI_ALF02", 6))
DeviceIdentifier.AddIdentifierGroup(*_garminIdentifier("Forerunner 301", 473, 474, 475, 494))
DeviceIdentifier.AddIdentifierGroup(*_garminIdentifier("Forerunner 405", 717, 987))
DeviceIdentifier.AddIdentifierGroup(*_garminIdentifier("Forerunner 50", 782))
DeviceIdentifier.AddIdentifierGroup(*_garminIdentifier("Forerunner 60", 988))
DeviceIdentifier.AddIdentifierGroup(*_garminIdentifier("DSI_ALF01", 1011))
DeviceIdentifier.AddIdentifierGroup(*_garminIdentifier("Forerunner 310XT", 1018, 1446))
DeviceIdentifier.AddIdentifierGroup(*_garminIdentifier("Edge 500", 1036, 1199, 1213, 1387, 1422))
DeviceIdentifier.AddIdentifierGroup(*_garminIdentifier("Forerunner 110", 1124, 1274))
DeviceIdentifier.AddIdentifierGroup(*_garminIdentifier("Edge 800", 1169, 1333, 1334, 1497, 1386))
DeviceIdentifier.AddIdentifierGroup(*_garminIdentifier("Chirp", 1253))
DeviceIdentifier.AddIdentifierGroup(*_garminIdentifier("Edge 200", 1325, 1555))
DeviceIdentifier.AddIdentifierGroup(*_garminIdentifier("Forerunner 910XT", 1328, 1537, 1600, 1664))
DeviceIdentifier.AddIdentifierGroup(*_garminIdentifier("ALF04", 1341))
DeviceIdentifier.AddIdentifierGroup(*_garminIdentifier("Forerunner 610", 1345, 1410))
DeviceIdentifier.AddIdentifierGroup(*_garminIdentifier("Forerunner 210", 1360)) # In the SDK this is marked as "JAPAN" :S
DeviceIdentifier.AddIdentifierGroup(*_garminIdentifier("Forerunner 70", 1436))
DeviceIdentifier.AddIdentifierGroup(*_garminIdentifier("AMX", 1461))
DeviceIdentifier.AddIdentifierGroup(*_garminIdentifier("Forerunner 10", 1482, 1688))
DeviceIdentifier.AddIdentifierGroup(*_garminIdentifier("Swim", 1499))
DeviceIdentifier.AddIdentifierGroup(*_garminIdentifier("Fenix", 1551))
DeviceIdentifier.AddIdentifierGroup(*_garminIdentifier("Edge 510", 1561, 1742, 1821))
DeviceIdentifier.AddIdentifierGroup(*_garminIdentifier("Edge 810", 1567, 1721, 1822, 1823))
DeviceIdentifier.AddIdentifierGroup(*_garminIdentifier("Tempe", 1570))
DeviceIdentifier.AddIdentifierGroup(*_garminIdentifier("VIRB Elite", 1735)) # Where's the VIRB Proletariat?
DeviceIdentifier.AddIdentifierGroup(*_garminIdentifier("Edge Touring", 1736))
DeviceIdentifier.AddIdentifierGroup(*_garminIdentifier("HRM Run", 1752))
DeviceIdentifier.AddIdentifierGroup(*_garminIdentifier("SDM4", 10007))
DeviceIdentifier.AddIdentifierGroup(*_garminIdentifier("Training Center", 20119))
DeviceIdentifier.AddIdentifierGroup(*_garminIdentifier("Forerunner 620", 1623))
|
import random
import json
import pickle
import numpy as np
import nltk
nltk.download('punkt')
nltk.download('wordnet')
from nltk.stem import WordNetLemmatizer
from tensorflow.keras.models import Sequential
from tensorflow.keras.layers import Dense, Activation, Dropout
from tensorflow.keras.optimizers import SGD
lemmatizer = WordNetLemmatizer()
intents = json.loads(open('./intents.json').read())
words = []
classes = []
documents = []
ignore_letters = ['?', '!', '@', ',', ';', '.']
for intent in intents['intents']:
for pattern in intent['patterns']:
word_list = nltk.word_tokenize(pattern)
words.extend(word_list)
documents.append((word_list, intent['tag']))
if intent['tag'] not in classes:
classes.append(intent['tag'])
words = [lemmatizer.lemmatize(word) for word in words if word not in ignore_letters]
words = sorted(set(words))
classes = sorted(set(classes))
pickle.dump(words, open('words.pkl', 'wb'))
pickle.dump(classes, open('classes.pkl', 'wb'))
training = []
output_empty = [0] * len(classes)
for document in documents:
bag = []
word_patterns = document[0]
word_patterns = [lemmatizer.lemmatize(word.lower()) for word in word_patterns]
for word in word_patterns:
bag.append(1) if word in word_patterns else bag.append(0)
output_row = list(output_empty)
output_row[classes.index(document[1])] = 1
training.append([bag, output_row])
random.shuffle(training)
training = np.array(training)
train_x = list(training[:, 0])
train_y = list(training[:, 1])
model = Sequential()
model.add(Dense(128, input_shape=(len(train_x[0]),), activation='relu'))
model.add(Dropout(0.5))
model.add(Dense(64, activation='relu'))
model.add(Dropout(0.5))
model.add(Dense(len(train_y[0]), activation='softmax'))
sgd = SGD(lr=0.01, decay=1e-6, momentum=0.9, nesterov=True)
model.compile(loss='categorical_crossentropy', optimizer=sgd, metrics=['accuracy'])
hist = model.fit(np.array(train_x), np.array(train_y), epochs=200, batch_size=5, verbose=1)
model.save('chatbot_model.model.h5', hist)
print('Done')
|
import os
import json
import numpy as np
import pandas as pd
import datetime
import SAVIZ.situation_awareness_visualization as saviz
with open("tempfile.json", 'r') as f:
json_file = f.readlines()[0]
has_type = True
has_time = False
timeRange = [0, 1]
with open("tempconfig.json", 'r') as f:
config = f.readlines()[0]
has_type = json.loads(config)['has_type']
has_time = json.loads(config)['has_time']
if has_time == True:
timeRange[0] = json.loads(config)['time_min']
timeRange[1] = json.loads(config)['time_max']
timeRange[0] = datetime.datetime.strptime(timeRange[0], "%Y-%m-%dT%H:%M:%S")
timeRange[1] = datetime.datetime.strptime(timeRange[1], "%Y-%m-%dT%H:%M:%S")
data = json.loads(json_file)
if "time_value" in data:
for i in range(len(data["time_value"])):
data["time_value"][i] = datetime.datetime.strptime(data["time_value"][i], "%Y-%m-%dT%H:%M:%S")
# convert the json to dataframe
pd_data = pd.DataFrame.from_dict(data)
if "time_value" in data:
pd_data['time_value'] = pd.to_datetime(pd_data['time_value'])
sav = saviz.saviz_visualization(pd_data, has_type, has_time, timeRange)
# build tooltips
tp = sav.set_tooltips()
sp = sav.build()
|
"""
To understand why this file is here, please read:
http://cookiecutter-django.readthedocs.io/en/latest/faq.html#why-is-there-a-django-contrib-sites-directory-in-cookiecutter-django
"""
from django.conf import settings
from django.db import migrations
def update_site_forward(apps, schema_editor):
"""Set site domain and name."""
Site = apps.get_model("sites", "Site")
Site.objects.update_or_create(
id=settings.SITE_ID,
defaults={
"domain": "example.com",
"name": "django-react-hybrid",
},
)
def update_site_backward(apps, schema_editor):
"""Revert site domain and name to default."""
Site = apps.get_model("sites", "Site")
Site.objects.update_or_create(
id=settings.SITE_ID, defaults={"domain": "example.com", "name": "example.com"}
)
class Migration(migrations.Migration):
dependencies = [("sites", "0002_alter_domain_unique")]
operations = [migrations.RunPython(update_site_forward, update_site_backward)]
|
'''
several running examples, run with
python3 runGan.py 1 # the last number is the run case number
runcase == 1 inference a trained model
runcase == 2 calculate the metrics, and save the numbers in csv
runcase == 3 training TecoGAN
runcase == 4 training FRVSR
runcase == ... coming... data preparation and so on...
'''
import os, subprocess, sys, datetime, signal, shutil
runcase = int(sys.argv[1])
print ("Testing test case %d" % runcase)
def preexec(): # Don't forward signals.
os.setpgrp()
def mycall(cmd, block=False):
if not block:
return subprocess.Popen(cmd)
else:
return subprocess.Popen(cmd, preexec_fn = preexec)
def folder_check(path):
try_num = 1
oripath = path[:-1] if path.endswith('/') else path
while os.path.exists(path):
print("Delete existing folder " + path + "?(Y/N)")
decision = input()
if decision == "Y":
shutil.rmtree(path, ignore_errors=True)
break
else:
path = oripath + "_%d/"%try_num
try_num += 1
print(path)
return path
if( runcase == 0 ): # download inference data, trained models
# download the trained model
if(not os.path.exists("./model/")): os.mkdir("./model/")
cmd1 = "wget https://ge.in.tum.de/download/data/TecoGAN/model.zip -O model/model.zip;"
cmd1 += "unzip model/model.zip -d model; rm model/model.zip"
subprocess.call(cmd1, shell=True)
# download some test data
cmd2 = "wget https://ge.in.tum.de/download/data/TecoGAN/vid3_LR.zip -O LR/vid3.zip;"
cmd2 += "unzip LR/vid3.zip -d LR; rm LR/vid3.zip"
subprocess.call(cmd2, shell=True)
cmd2 = "wget https://ge.in.tum.de/download/data/TecoGAN/tos_LR.zip -O LR/tos.zip;"
cmd2 += "unzip LR/tos.zip -d LR; rm LR/tos.zip"
subprocess.call(cmd2, shell=True)
# download the ground-truth data
if(not os.path.exists("./HR/")): os.mkdir("./HR/")
cmd3 = "wget https://ge.in.tum.de/download/data/TecoGAN/vid4_HR.zip -O HR/vid4.zip;"
cmd3 += "unzip HR/vid4.zip -d HR; rm HR/vid4.zip"
subprocess.call(cmd3, shell=True)
cmd3 = "wget https://ge.in.tum.de/download/data/TecoGAN/tos_HR.zip -O HR/tos.zip;"
cmd3 += "unzip HR/tos.zip -d HR; rm HR/tos.zip"
subprocess.call(cmd3, shell=True)
elif( runcase == 1 ): # inference a trained model
dirstr = './results/' # the place to save the results
testpre = ['calendar'] # the test cases
if (not os.path.exists(dirstr)): os.mkdir(dirstr)
# run these test cases one by one:
for nn in range(len(testpre)):
cmd1 = ["python3", "main.py",
"--cudaID", "0", # set the cudaID here to use only one GPU
"--output_dir", dirstr, # Set the place to put the results.
"--summary_dir", os.path.join(dirstr, 'log/'), # Set the place to put the log.
"--mode","inference",
"--input_dir_LR", os.path.join("./LR/", testpre[nn]), # the LR directory
#"--input_dir_HR", os.path.join("./HR/", testpre[nn]), # the HR directory
# one of (input_dir_HR,input_dir_LR) should be given
"--output_pre", testpre[nn], # the subfolder to save current scene, optional
"--num_resblock", "16", # our model has 16 residual blocks,
# the pre-trained FRVSR and TecoGAN mini have 10 residual blocks
"--checkpoint", './model/TecoGAN', # the path of the trained model,
"--output_ext", "png" # png is more accurate, jpg is smaller
]
mycall(cmd1).communicate()
elif( runcase == 2 ): # calculate all metrics, and save the csv files, should use png
testpre = ["calendar"] # just put more scenes to evaluate all of them
dirstr = './results/' # the outputs
tarstr = './HR/' # the GT
tar_list = [(tarstr+_) for _ in testpre]
out_list = [(dirstr+_) for _ in testpre]
cmd1 = ["python3", "metrics.py",
"--output", dirstr+"metric_log/",
"--results", ",".join(out_list),
"--targets", ",".join(tar_list),
]
mycall(cmd1).communicate()
elif( runcase == 3 ): # Train TecoGAN
'''
In order to use the VGG as a perceptual loss,
we download from TensorFlow-Slim image classification model library:
https://github.com/tensorflow/models/tree/master/research/slim
'''
VGGPath = "model/" # the path for the VGG model, there should be a vgg_19.ckpt inside
VGGModelPath = os.path.join(VGGPath, "vgg_19.ckpt")
if(not os.path.exists(VGGPath)): os.mkdir(VGGPath)
if(not os.path.exists(VGGModelPath)):
# Download the VGG 19 model from
print("VGG model not found, downloading to %s"%VGGPath)
cmd0 = "wget http://download.tensorflow.org/models/vgg_19_2016_08_28.tar.gz -O " + os.path.join(VGGPath, "vgg19.tar.gz")
cmd0 += ";tar -xvf " + os.path.join(VGGPath,"vgg19.tar.gz") + " -C " + VGGPath + "; rm "+ os.path.join(VGGPath, "vgg19.tar.gz")
subprocess.call(cmd0, shell=True)
'''
Use our pre-trained FRVSR model. If you want to train one, try runcase 4, and update this path by:
FRVSRModel = "ex_FRVSRmm-dd-hh/model-500000"
'''
FRVSRModel = "model/ourFRVSR"
if(not os.path.exists(FRVSRModel+".data-00000-of-00001")):
# Download our pre-trained FRVSR model
print("pre-trained FRVSR model not found, downloading")
cmd0 = "wget http://ge.in.tum.de/download/2019-TecoGAN/FRVSR_Ours.zip -O model/ofrvsr.zip;"
cmd0 += "unzip model/ofrvsr.zip -d model; rm model/ofrvsr.zip"
subprocess.call(cmd0, shell=True)
TrainingDataPath = "./video_data/"
'''Prepare Training Folder'''
# path appendix, manually define it, or use the current datetime, now_str = "mm-dd-hh"
now_str = datetime.datetime.now().strftime("%m-%d-%H")
train_dir = folder_check("ex_TecoGAN%s/"%now_str)
# train TecoGAN, loss = l2 + VGG54 loss + A spatio-temporal Discriminator
cmd1 = ["python3", "main.py",
"--cudaID", "0", # set the cudaID here to use only one GPU
"--output_dir", train_dir, # Set the place to save the models.
"--summary_dir", os.path.join(train_dir,"log/"), # Set the place to save the log.
"--mode","train",
"--batch_size", "4" , # small, because GPU memory is not big
"--RNN_N", "10" , # train with a sequence of RNN_N frames, >6 is better, >10 is not necessary
"--movingFirstFrame", # a data augmentation
"--random_crop",
"--crop_size", "32",
"--learning_rate", "0.00005",
# -- learning_rate step decay, here it is not used --
"--decay_step", "500000",
"--decay_rate", "1.0", # 1.0 means no decay
"--stair",
"--beta", "0.9", # ADAM training parameter beta
"--max_iter", "500000", # 500k or more, the one we present is trained for 900k
"--save_freq", "10000", # the frequency we save models
# -- network architecture parameters --
"--num_resblock", "16", # FRVSR and TecoGANmini has num_resblock as 10. The TecoGAN has 16.
# -- VGG loss, disable with vgg_scaling < 0
"--vgg_scaling", "0.2",
"--vgg_ckpt", VGGModelPath, # necessary if vgg_scaling > 0
]
'''Video Training data:
please udate the TrainingDataPath according to ReadMe.md
input_video_pre is hard coded as scene in dataPrepare.py at line 142
str_dir is the starting index for training data
end_dir is the ending index for training data
end_dir+1 is the starting index for validation data
end_dir_val is the ending index for validation data
max_frm should be duration (in dataPrepare.py) -1
queue_thread: how many cpu can be used for loading data when training
name_video_queue_capacity, video_queue_capacity: how much memory can be used
'''
cmd1 += [
"--input_video_dir", TrainingDataPath,
"--input_video_pre", "scene",
"--str_dir", "2000",
"--end_dir", "2250",
"--end_dir_val", "2290",
"--max_frm", "119",
# -- cpu memory for data loading --
"--queue_thread", "12",# Cpu threads for the data. >4 to speedup the training
"--name_video_queue_capacity", "1024",
"--video_queue_capacity", "1024",
]
'''
loading the pre-trained model from FRVSR can make the training faster
--checkpoint, path of the model, here our pre-trained FRVSR is given
--pre_trained_model, to continue an old (maybe accidentally stopeed) training,
pre_trained_model should be false, and checkpoint should be the last model such as
ex_TecoGANmm-dd-hh/model-xxxxxxx
To start a new and different training, pre_trained_model is True.
The difference here is
whether to load the whole graph icluding ADAM training averages/momentums/ and so on
or just load existing pre-trained weights.
'''
cmd1 += [ # based on a pre-trained FRVSR model. Here we want to train a new adversarial training
"--pre_trained_model", # True
"--checkpoint", FRVSRModel,
]
# the following can be used to train TecoGAN continuously
# old_model = "model/ex_TecoGANmm-dd-hh/model-xxxxxxx"
# cmd1 += [ # Here we want to train continuously
# "--nopre_trained_model", # False
# "--checkpoint", old_model,
# ]
''' parameters for GAN training '''
cmd1 += [
"--ratio", "0.01", # the ratio for the adversarial loss from the Discriminator to the Generator
"--Dt_mergeDs", # if Dt_mergeDs == False, only use temporal inputs, so we have a temporal Discriminator
# else, use both temporal and spatial inputs, then we have a Dst, the spatial and temporal Discriminator
]
''' if the generator is pre-trained, to fade in the discriminator is usually more stable.
the weight of the adversarial loss will be weighed with a weight, started from Dt_ratio_0,
and increases until Dt_ratio_max, the increased value is Dt_ratio_add per training step
For example, fading Dst in smoothly in the first 4k steps is
"--Dt_ratio_max", "1.0", "--Dt_ratio_0", "0.0", "--Dt_ratio_add", "0.00025"
'''
cmd1 += [ # here, the fading in is disabled
"--Dt_ratio_max", "1.0",
"--Dt_ratio_0", "1.0",
"--Dt_ratio_add", "0.0",
]
''' Other Losses '''
cmd1 += [
"--pingpang", # our Ping-Pang loss
"--pp_scaling", "0.5", # the weight of the our bi-directional loss, 0.0~0.5
"--D_LAYERLOSS", # use feature layer losses from the discriminator
]
pid = mycall(cmd1, block=True)
try: # catch interruption for training
pid.communicate()
except KeyboardInterrupt: # Ctrl + C to stop current training try to save the last model
print("runGAN.py: sending SIGINT signal to the sub process...")
pid.send_signal(signal.SIGINT)
# try to save the last model
pid.communicate()
print("runGAN.py: finished...")
elif( runcase == 4 ): # Train FRVSR, loss = l2 warp + l2 content
now_str = datetime.datetime.now().strftime("%m-%d-%H")
train_dir = folder_check("ex_FRVSR%s/"%now_str)
cmd1 = ["python3", "main.py",
"--cudaID", "0", # set the cudaID here to use only one GPU
"--output_dir", train_dir, # Set the place to save the models.
"--summary_dir", os.path.join(train_dir,"log/"), # Set the place to save the log.
"--mode","train",
"--batch_size", "4" , # small, because GPU memory is not big
"--RNN_N", "10" , # train with a sequence of RNN_N frames, >6 is better, >10 is not necessary
"--movingFirstFrame", # a data augmentation
"--random_crop",
"--crop_size", "32",
"--learning_rate", "0.00005",
# -- learning_rate step decay, here it is not used --
"--decay_step", "500000",
"--decay_rate", "1.0", # 1.0 means no decay
"--stair",
"--beta", "0.9", # ADAM training parameter beta
"--max_iter", "500000", # 500k is usually fine for FRVSR, GAN versions need more to be stable
"--save_freq", "10000", # the frequency we save models
# -- network architecture parameters --
"--num_resblock", "10", # a smaller model
"--ratio", "-0.01", # the ratio for the adversarial loss, negative means disabled
"--nopingpang",
]
'''Video Training data... Same as runcase 3...'''
TrainingDataPath = "./video_data/"
cmd1 += [
"--input_video_dir", TrainingDataPath,
"--input_video_pre", "scene",
"--str_dir", "2000",
"--end_dir", "2250",
"--end_dir_val", "2290",
"--max_frm", "119",
# -- cpu memory for data loading --
"--queue_thread", "12",# Cpu threads for the data. >4 to speedup the training
"--name_video_queue_capacity", "1024",
"--video_queue_capacity", "1024",
]
pid = mycall(cmd1, block=True)
try: # catch interruption for training
pid.communicate()
except KeyboardInterrupt: # Ctrl + C to stop current training try to save the last model
print("runGAN.py: sending SIGINT signal to the sub process...")
pid.send_signal(signal.SIGINT)
# try to save the last model
pid.communicate()
print("runGAN.py: finished...")
|
from django.apps import AppConfig
class MycrudappConfig(AppConfig):
default_auto_field = 'django.db.models.BigAutoField'
name = 'mycrudApp'
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
:mod:`Quantulum` parser.
"""
# Standard library
import re
import logging
from fractions import Fraction
from collections import defaultdict
from math import pow
# Quantulum
from . import load
from . import regex as reg
from . import classes as cls
from . import disambiguate as dis
from . import language
def _get_parser(lang='en_US'):
"""
Get parser module for given language
:param lang:
:return:
"""
return language.get('parser', lang)
###############################################################################
def extract_spellout_values(text, lang='en_US'):
"""
Convert spelled out numbers in a given text to digits.
"""
return _get_parser(lang).extract_spellout_values(text)
###############################################################################
def substitute_values(text, values):
"""
Convert spelled out numbers in a given text to digits.
"""
shift, final_text, shifts = 0, text, defaultdict(int)
for value in values:
first = value['old_span'][0] + shift
second = value['old_span'][1] + shift
final_text = final_text[0:first] + value['new_surface'] + \
final_text[second:]
shift += len(value['new_surface']) - len(value['old_surface'])
for char in range(first + 1, len(final_text)):
shifts[char] = shift
logging.debug('Text after numeric conversion: "%s"', final_text)
return final_text, shifts
###############################################################################
def get_values(item, lang='en_US'):
"""
Extract value from regex hit.
"""
def callback(pattern):
return ' %s' % (reg.unicode_fractions()[pattern.group(0)])
fracs = r'|'.join(reg.unicode_fractions())
value = item.group('value')
# Remove grouping operators
value = re.sub(
r'(?<=\d)[%s](?=\d{3})' % reg.grouping_operators_regex(lang), '',
value)
# Replace unusual exponents by e (including e)
value = re.sub(
r'(?<=\d)(%s)(e|E|10)\^?' % reg.multiplication_operators_regex(lang),
'e', value)
# calculate other exponents
value, factors = resolve_exponents(value)
logging.debug("After exponent resolution: {}".format(value))
value = re.sub(fracs, callback, value, re.IGNORECASE)
range_separator = re.findall(
r'\d+ ?((?:-\ )?(?:%s)) ?\d' % '|'.join(reg.ranges(lang)), value)
uncer_separator = re.findall(
r'\d+ ?(%s) ?\d' % '|'.join(reg.uncertainties(lang)), value)
fract_separator = re.findall(r'\d+/\d+', value)
value = re.sub(' +', ' ', value)
uncertainty = None
if range_separator:
# A range just describes an uncertain quantity
values = value.split(range_separator[0])
values = [
float(re.sub(r'-$', '', v)) * factors[i]
for i, v in enumerate(values)
]
if values[1] < values[0]:
raise ValueError(
"Invalid range, with second item being smaller than the first "
"item"
)
mean = sum(values) / len(values)
uncertainty = mean - min(values)
values = [mean]
elif uncer_separator:
values = [float(i) for i in value.split(uncer_separator[0])]
uncertainty = values[1] * factors[1]
values = [values[0] * factors[0]]
elif fract_separator:
values = value.split()
try:
if len(values) > 1:
values = [
float(values[0]) * factors[0] + float(Fraction(values[1]))
]
else:
values = [float(Fraction(values[0]))]
except ZeroDivisionError as e:
raise ValueError('{} is not a number'.format(values[0]), e)
else:
values = [float(re.sub(r'-$', '', value)) * factors[0]]
logging.debug('\tUncertainty: %s', uncertainty)
logging.debug('\tValues: %s', values)
return uncertainty, values
###############################################################################
def resolve_exponents(value, lang='en_US'):
"""Resolve unusual exponents (like 2^4) and return substituted string and
factor
Params:
value: str, string with only one value
Returns:
str, string with basis and exponent removed
array of float, factors for multiplication
"""
factors = []
matches = re.finditer(
reg.number_pattern_groups(lang), value, re.IGNORECASE | re.VERBOSE)
for item in matches:
if item.group('base') and item.group('exponent'):
base = item.group('base')
exp = item.group('exponent')
if base in ['e', 'E']:
# already handled by float
factors.append(1)
continue
# exp = '10'
# Expect that in a pure decimal base,
# either ^ or superscript notation is used
if re.match(r'\d+\^?', base):
if not ('^' in base or re.match(
r'[%s]' % reg.unicode_superscript_regex(), exp)):
factors.append(1)
continue
for superscript, substitute in reg.unicode_superscript().items():
exp.replace(superscript, substitute)
exp = float(exp)
base = float(base.replace('^', ''))
factor = pow(base, exp)
stripped = str(value).replace(item.group('scale'), '')
value = stripped
factors.append(factor)
logging.debug("Replaced {} by factor {}".format(
item.group('scale'), factor))
else:
factors.append(1)
continue
return value, factors
###############################################################################
def build_unit_name(dimensions, lang='en_US'):
"""
Build the name of the unit from its dimensions.
"""
name = _get_parser(lang).name_from_dimensions(dimensions)
logging.debug('\tUnit inferred name: %s', name)
return name
###############################################################################
def get_unit_from_dimensions(dimensions, text, lang='en_US'):
"""
Reconcile a unit based on its dimensionality.
"""
key = load.get_key_from_dimensions(dimensions)
try:
unit = load.units(lang).derived[key]
except KeyError:
logging.debug(u'\tCould not find unit for: %s', key)
unit = cls.Unit(
name=build_unit_name(dimensions, lang),
dimensions=dimensions,
entity=get_entity_from_dimensions(dimensions, text, lang))
# Carry on original composition
unit.original_dimensions = dimensions
return unit
def name_from_dimensions(dimensions, lang='en_US'):
"""
Build the name of a unit from its dimensions.
Param:
dimensions: List of dimensions
"""
return _get_parser(lang).name_from_dimensions(dimensions)
def infer_name(unit):
"""
Return unit name based on dimensions
:return: new name of this unit
"""
name = name_from_dimensions(unit.dimensions) if unit.dimensions else None
return name
###############################################################################
def get_entity_from_dimensions(dimensions, text, lang='en_US'):
"""
Infer the underlying entity of a unit (e.g. "volume" for "m^3") based on
its dimensionality.
"""
new_derived = [{
'base': load.units(lang).names[i['base']].entity.name,
'power': i['power']
} for i in dimensions]
final_derived = sorted(new_derived, key=lambda x: x['base'])
key = load.get_key_from_dimensions(final_derived)
ent = dis.disambiguate_entity(key, text, lang)
if ent is None:
logging.debug('\tCould not find entity for: %s', key)
ent = cls.Entity(name='unknown', dimensions=new_derived)
return ent
###############################################################################
def parse_unit(item, unit, slash, lang='en_US'):
"""
Parse surface and power from unit text.
"""
return _get_parser(lang).parse_unit(item, unit, slash)
###############################################################################
def get_unit(item, text, lang='en_US'):
"""
Extract unit from regex hit.
"""
group_units = ['prefix', 'unit1', 'unit2', 'unit3', 'unit4']
group_operators = ['operator1', 'operator2', 'operator3', 'operator4']
# How much of the end is removed because of an "incorrect" regex match
unit_shortening = 0
item_units = [item.group(i) for i in group_units if item.group(i)]
if len(item_units) == 0:
unit = load.units(lang).names['dimensionless']
else:
derived, slash = [], False
multiplication_operator = False
for index in range(0, 5):
unit = item.group(group_units[index])
operator_index = None if index < 1 else group_operators[index - 1]
operator = None if index < 1 else item.group(operator_index)
# disallow spaces as operators in units expressed in their symbols
# Enforce consistency among multiplication and division operators
# Single exceptions are colloquial number abbreviations (5k miles)
if operator in reg.multiplication_operators(lang) or (
operator is None and unit and
not (index == 1 and unit in reg.suffixes(lang))):
if multiplication_operator != operator and not (
index == 1 and str(operator).isspace()):
if multiplication_operator is False:
multiplication_operator = operator
else:
# Cut if inconsistent multiplication operator
# treat the None operator differently - remove the
# whole word of it
if operator is None:
# For this, use the last consistent operator
# (before the current) with a space
# which should always be the preceding operator
derived.pop()
operator_index = group_operators[index - 2]
# Remove (original length - new end) characters
unit_shortening = item.end() - item.start(
operator_index)
logging.debug(
"Because operator inconsistency, cut from "
"operator: '{}', new surface: {}"
.format(
operator, text[item.start():item.end() -
unit_shortening]))
break
# Determine whether a negative power has to be applied to following
# units
if operator and not slash:
slash = any(
i in operator for i in reg.division_operators(lang))
# Determine which unit follows
if unit:
unit_surface, power = parse_unit(item, unit, slash, lang)
base = dis.disambiguate_unit(unit_surface, text, lang)
derived += [{
'base': base,
'power': power,
'surface': unit_surface
}]
unit = get_unit_from_dimensions(derived, text, lang)
logging.debug('\tUnit: %s', unit)
logging.debug('\tEntity: %s', unit.entity)
return unit, unit_shortening
###############################################################################
def get_surface(shifts, orig_text, item, text, unit_shortening=0):
"""
Extract surface from regex hit.
"""
# handle cut end
span = (item.start(), item.end() - unit_shortening)
logging.debug('\tInitial span: %s ("%s")', span, text[span[0]:span[1]])
real_span = (span[0] - shifts[span[0]], span[1] - shifts[span[1] - 1])
surface = orig_text[real_span[0]:real_span[1]]
logging.debug('\tShifted span: %s ("%s")', real_span, surface)
while any(surface.endswith(i) for i in [' ', '-']):
surface = surface[:-1]
real_span = (real_span[0], real_span[1] - 1)
while surface.startswith(' '):
surface = surface[1:]
real_span = (real_span[0] + 1, real_span[1])
logging.debug('\tFinal span: %s ("%s")', real_span, surface)
return surface, real_span
###############################################################################
def is_quote_artifact(orig_text, span):
"""
Distinguish between quotes and units.
"""
res = False
cursor = re.finditer(r'["\'][^ .,:;?!()*+-].*?["\']', orig_text)
for item in cursor:
if span[0] <= item.span()[1] <= span[1]:
res = item
break
return res
###############################################################################
def build_quantity(orig_text,
text,
item,
values,
unit,
surface,
span,
uncert,
lang='en_US'):
"""
Build a Quantity object out of extracted information.
Takes care of caveats and common errors
"""
return _get_parser(lang).build_quantity(orig_text, text, item, values,
unit, surface, span, uncert)
###############################################################################
def clean_text(text, lang='en_US'):
"""
Clean text before parsing.
"""
# Replace a few nasty unicode characters with their ASCII equivalent
maps = {'×': 'x', '–': '-', '−': '-'}
for element in maps:
text = text.replace(element, maps[element])
# Language specific cleaning
text = _get_parser(lang).clean_text(text)
logging.debug('Clean text: "%s"', text)
return text
###############################################################################
def parse(text, lang='en_US', verbose=False):
"""
Extract all quantities from unstructured text.
"""
log_format = '%(asctime)s --- %(message)s'
logging.basicConfig(format=log_format)
root = logging.getLogger()
if verbose: # pragma: no cover
level = root.level
root.setLevel(logging.DEBUG)
logging.debug('Verbose mode')
orig_text = text
logging.debug('Original text: "%s"', orig_text)
text = clean_text(text, lang)
values = extract_spellout_values(text, lang)
text, shifts = substitute_values(text, values)
quantities = []
for item in reg.units_regex(lang).finditer(text):
groups = dict(
[i for i in item.groupdict().items() if i[1] and i[1].strip()])
logging.debug(u'Quantity found: %s', groups)
try:
uncert, values = get_values(item, lang)
unit, unit_shortening = get_unit(item, text)
surface, span = get_surface(shifts, orig_text, item, text,
unit_shortening)
objs = build_quantity(orig_text, text, item, values, unit, surface,
span, uncert, lang)
if objs is not None:
quantities += objs
except ValueError as err:
logging.debug('Could not parse quantity: %s', err)
if verbose: # pragma: no cover
root.level = level
return quantities
###############################################################################
def inline_parse(text, verbose=False): # pragma: no cover
"""
Extract all quantities from unstructured text.
"""
parsed = parse(text, verbose=verbose)
shift = 0
for quantity in parsed:
index = quantity.span[1] + shift
to_add = u' {' + str(quantity) + u'}'
text = text[0:index] + to_add + text[index:]
shift += len(to_add)
return text
###############################################################################
def inline_parse_and_replace(text, lang='en_US',
verbose=False): # pragma: no cover
"""
Parse text and replace with the standardised quantities as string
"""
parsed = parse(text, lang=lang, verbose=verbose)
shift = 0
for quantity in parsed:
index_start = quantity.span[0] + shift
index_end = quantity.span[1] + shift
to_add = str(quantity)
text = text[0:index_start] + to_add + text[index_end:]
shift += len(to_add) - (quantity.span[1] - quantity.span[0])
return text
###############################################################################
def inline_parse_and_expand(text, lang='en_US', verbose=False):
"""
Parse text and replace qunatities with speakable version
"""
parsed = parse(text, verbose=verbose)
shift = 0
for quantity in parsed:
index_start = quantity.span[0] + shift
index_end = quantity.span[1] + shift
to_add = quantity.to_spoken()
text = text[0:index_start] + to_add + text[index_end:]
shift += len(to_add) - (quantity.span[1] - quantity.span[0])
return text
|
from Plots import *
#PlotNearRays( measure='SM' )
PlotFarRays( measure='SM', plot_mean=True, uniform=True, save_mean=True )# False )
#PlotFarRays( measure='SM', mean=True, uniform=True, overestimate=True )
PlotFarRays( measure='DM', plot_mean=True, uniform=True, save_mean=True )# False )
#PlotFarRays( measure='DM', plot_mean=True, overestimate=True )
models = ['primordial', 'astrophysical', 'B9b', 'B9.5b', 'B10.0b', 'B10.5b', 'B11b', 'B13b', 'B15b', 'B17b' ][:4]
models=['primordial', 'astrophysical_mean', 'astrophysical_median', 'alpha1-3rd', 'alpha2-3rd', 'alpha3-3rd', 'alpha4-3rd', 'alpha5-3rd', 'alpha6-3rd', 'alpha7-3rd', 'alpha8-3rd', 'alpha9-3rd'] ## models to be considered for the magnetic field, provided as B~rho relations in relations_file
PlotFarRays( model=models[2], measure='RM', plot_mean=True, plot_stddev=False, save_mean=False, linestyle=":" )
PlotFarRays( model=models[1], measure='RM', plot_mean=True, plot_stddev=False, save_mean=False )
for model in models[3::]:
PlotFarRays( model=model, measure='RM', plot_mean=True, plot_stddev=False, save_mean=False , linestyle='--')
#PlotFarRays( measure='RM', mean=True, overestimate=True )
PlotFarRays( model=models[0], measure='RM', plot_mean=True, plot_stddev=False, uniform=True, z_max=6, linestyle=':', save_mean=True )# False )
|
# --------------------------------------------------------
# Compute metrics for trackers using ground-truth data
# Written by Wang Xueyang (wangxuey19@mails.tsinghua.edu.cn), Version 20200321
# Based on motmetrics (https://github.com/cheind/py-motmetrics/)
# --------------------------------------------------------
import argparse
from collections import OrderedDict
import glob
import logging
import os
from pathlib import Path
import motmetrics as mm
from panda_utils import generate_mot_anno
def parse_args():
"""Defines and parses command-line arguments."""
parser = argparse.ArgumentParser(description="""
Compute metrics for trackers using ground-truth data.
Files
-----
All result files have to comply with the
format described in
Milan, Anton, et al.
"Mot16: A benchmark for multi-object tracking."
arXiv preprint arXiv:1603.00831 (2016).
https://motchallenge.net/
Structure
---------
Layout for ground truth data
<GT_ROOT>/<SEQUENCE_1>/tracks.json
<GT_ROOT>/<SEQUENCE_1>/seqinfo.json
<GT_ROOT>/<SEQUENCE_2>/tracks.json
<GT_ROOT>/<SEQUENCE_2>/seqinfo.json
...
Layout for test data
<TEST_ROOT>/<SEQUENCE_1>.txt
<TEST_ROOT>/<SEQUENCE_2>.txt
...
Sequences of ground truth and test will be matched according to the `<SEQUENCE_X>`
string.""", formatter_class=argparse.RawTextHelpFormatter)
parser.add_argument('groundtruths', type=str, help='Directory containing ground truth files.')
parser.add_argument('tests', type=str, help='Directory containing tracker result files')
parser.add_argument('--transfered', type=str, help='Directory containing transfered gt files', default='transfered')
parser.add_argument('--loglevel', type=str, help='Log level', default='info')
parser.add_argument('--fmt', type=str, help='Data format', default='mot15-2D')
parser.add_argument('--solver', type=str, help='LAP solver to use for matching between frames.')
parser.add_argument('--id_solver', type=str, help='LAP solver to use for ID metrics. Defaults to --solver.')
parser.add_argument('--exclude_id', dest='exclude_id', default=False, action='store_true',
help='Disable ID metrics')
return parser.parse_args()
def compare_dataframes(gts, ts):
"""Builds accumulator for each sequence."""
accs = []
names = []
for k, tsacc in ts.items():
if k in gts:
logging.info('Comparing %s...', k)
accs.append(mm.utils.compare_to_groundtruth(gts[k], tsacc, 'iou', distth=0.5))
names.append(k)
else:
logging.warning('No ground truth for %s, skipping.', k)
return accs, names
def main():
# pylint: disable=missing-function-docstring
args = parse_args()
# transfer ground truth file from PANDA format to MOTChallenge format
generate_mot_anno(args.groundtruths, args.transfered)
loglevel = getattr(logging, args.loglevel.upper(), None)
if not isinstance(loglevel, int):
raise ValueError('Invalid log level: {} '.format(args.loglevel))
logging.basicConfig(level=loglevel, format='%(asctime)s %(levelname)s - %(message)s', datefmt='%I:%M:%S')
if args.solver:
mm.lap.default_solver = args.solver
gtfiles = glob.glob(os.path.join(args.transfered, '*.txt'))
tsfiles = [f for f in glob.glob(os.path.join(args.tests, '*.txt')) if not os.path.basename(f).startswith('eval')]
logging.info('Found %d groundtruths and %d test files.', len(gtfiles), len(tsfiles))
logging.info('Available LAP solvers %s', str(mm.lap.available_solvers))
logging.info('Default LAP solver \'%s\'', mm.lap.default_solver)
logging.info('Loading files.')
gt = OrderedDict([(os.path.splitext(Path(f).parts[-1])[0], mm.io.loadtxt(f, fmt=args.fmt, min_confidence=1)) for f in gtfiles])
ts = OrderedDict([(os.path.splitext(Path(f).parts[-1])[0], mm.io.loadtxt(f, fmt=args.fmt)) for f in tsfiles])
mh = mm.metrics.create()
accs, names = compare_dataframes(gt, ts)
metrics = list(mm.metrics.motchallenge_metrics)
if args.exclude_id:
metrics = [x for x in metrics if not x.startswith('id')]
logging.info('Running metrics')
if args.id_solver:
mm.lap.default_solver = args.id_solver
summary = mh.compute_many(accs, names=names, metrics=metrics, generate_overall=True)
print(mm.io.render_summary(summary, formatters=mh.formatters, namemap=mm.io.motchallenge_metric_names))
logging.info('Completed')
'''we use MOTA, MOTP, IDF1, MT, ML, FP, FN, IDs, Frag to evaluate'''
MOTA = summary['mota']['OVERALL']
MOTP = 1 - summary['motp']['OVERALL']
IDF1 = summary['idf1']['OVERALL']
MT = summary['mostly_tracked']['OVERALL']
ML = summary['mostly_lost']['OVERALL']
FP = summary['num_false_positives']['OVERALL']
FN = summary['num_misses']['OVERALL']
IDs = summary['num_switches']['OVERALL']
Frag = summary['num_fragmentations']['OVERALL']
print(MOTA, MOTP, IDF1, MT, ML, FP, FN, IDs, Frag)
if __name__ == '__main__':
main()
|
# -*- coding: utf-8 -*-
"""
Created on Thu Nov 14 11:30:55 2019
@author: Mortis Huang
"""
# import the necessary packages
from PIL import Image
import numpy as np
import datetime
import os
import pandas as pd
#%% Set the output file location
run_data = datetime.datetime.now().strftime("%Y_%m_%d")
result_path=r"SFig11_{}/".format(run_data)
if not os.path.exists(result_path):
os.makedirs(result_path)
#%% Read Traget Folders' Path
labels=['neutrophyl','lymphocyte']
#base_path = r'E:\DeepLearning\Mikami\Generate\White Cell'
base_path = r'.\Whitecell'
file_list_lym = []
file_list_neu = []
for root, dirs, files in os.walk(base_path):
for file in files:
if file.endswith(".tif"):
filename = os.path.join(root, file)
file_size = os.path.getsize(filename)
category_name = os.path.basename(root)
if category_name == labels[0]:
file_list_neu.append(filename)
else :
file_list_lym.append(filename)
#%% Sort the file list
#file_list_lym = sorted(file_list_lym, key=lambda x:int(x.split('_')[-1].split('.')[0]))
#files_name_lym = sorted(files, key=lambda x:int(x.split('_')[-1].split('.')[0]))
#%% Read image files and put in a list
data_number = 11000
label='lymphocyte' # 'lymphocyte' or 'neutrophyl'
data_of_lym_cell = []
for i, filename in enumerate(file_list_lym[:data_number]):
# Read the image file again (for insure) and calculate the nucleus area
im = Image.open(filename)
imarray = np.array(im)
threadhold = np.max(imarray)*0.35
imarray[imarray<threadhold]=0
image = imarray[:,:,0]
cell_area=np.count_nonzero(imarray)
# Temp. the resluts
data_of_lym_cell.append(cell_area)
label='neutrophyl' # 'lymphocyte' or 'neutrophyl'
data_of_neu_name = []
data_of_neu_cell = []
for i, filename in enumerate(file_list_neu[:data_number]):
# Read the image file again (for insure) and calculate the nucleus area
im = Image.open(filename)
imarray = np.array(im)
threadhold = np.max(imarray)*0.35
imarray[imarray<threadhold]=0
image = imarray[:,:,0]
cell_area=np.count_nonzero(imarray)
# Temp. the resluts
data_of_neu_cell.append(cell_area)
#%% Remove zeros
data_of_lym_cell=np.asarray(data_of_lym_cell)
data_of_neu_cell=np.asarray(data_of_neu_cell)
data_of_lym_cell=data_of_lym_cell[data_of_lym_cell>0]
data_of_neu_cell=data_of_neu_cell[data_of_neu_cell>0]
#%% Save the Results
data = {'lymphocyte':data_of_lym_cell}
df1 = pd.DataFrame(data)
data = {'neutrophyl':data_of_neu_cell}
df2 = pd.DataFrame(data)
df_all = pd.concat([df1,df2], ignore_index=True, axis=1)
df_all.columns = ["Lymphocyte","Neutrophyl"]
writer = pd.ExcelWriter('{}SFig11_35_CellArea.xlsx'.format(result_path))
#writer = pd.ExcelWriter('CellArea.xlsx')
df_all.to_excel(writer,'Sheet 1',float_format='%.2f') # float_format
writer.save()
|
MAXIMUM_ARRAY_LENGTH = 1024
def Main(operation, args):
if operation == 'DynamicListTest':
return DynamicListTest()
return False
def DynamicListTest():
dynamicList = DynamicList()
added = DynamicAppend(dynamicList, 1)
assert(added)
count = len(dynamicList["packed"][0]["array"])
assert(count == 1)
added = DynamicAppend(dynamicList, 2)
assert(added)
count = len(dynamicList["packed"][0]["array"])
assert(count == 2)
removed = DynamicRemove(dynamicList, 1)
assert(removed)
count = len(dynamicList["packed"][0]["array"])
assert(count == 1)
items = dynamicList["items"]
assert(items == 1)
removed = DynamicRemove(dynamicList, 2)
assert(removed)
count = len(dynamicList["packed"][0]["array"])
assert(count == 0)
items = dynamicList["items"]
assert(items == 0)
return True
#### Dynamic/PackedList ####
def DynamicList():
'''
Creates a new DynamicList
'''
dynamic = {
"packed": [],
"items": 0
}
return dynamic
def DynamicAppend(dynamic, itm):
'''
Appends an item to a packed list or creates a new one.
:param packed: The DynamicList
:param itm: The item to add to the DynamicList
'''
packedArr = dynamic["packed"]
maximum = MAXIMUM_ARRAY_LENGTH * 7 - 6
length = len(packedArr)
for i in range(length):
packed = packedArr[i]
if packed["items"] < maximum:
PackedAppend(packed, itm)
dynamic["packed"][i] = packed
dynamic["items"] += 1
return True
if length < MAXIMUM_ARRAY_LENGTH:
packed = PackedList()
PackedAppend(packed, itm)
packedArr.append(packed)
dynamic["items"] += 1
return True
return False
def DynamicRemove(dynamic, itm):
'''
Removes an item from the DynamicList.
:param packed: The DynamicList
:param itm: The item to remove from the DynamicList
'''
packedArr = dynamic["packed"]
length = len(packedArr)
for i in range(length):
packed = packedArr[i]
if PackedRemove(packed, itm):
dynamic["packed"][i] = packed
dynamic["items"] -= 1
return True
return False
def PackedList():
'''
Creates a new PackedList
'''
packed = {
"array": [],
"items": 0
}
return packed
def PackedAppend(packed, itm):
'''
Appends an item to the PackedList.array or wraps it in a new layer if full.
Increments the PackedList.items count.
:param packed: The PackedList
:param itm: The item to add to the PackedList
'''
array = packed["array"]
length = len(array)
if length == MAXIMUM_ARRAY_LENGTH:
tmp = [array]
tmp.append(itm)
array = tmp
else:
array.append(itm)
packed["array"] = array
packed["items"] += 1
def PackedRemove(packed, itm):
'''
Removes an item from the PackedList.array.
:param packed: The PackedList
:param itm: The item to remove from the PackedList
'''
length = len(packed["array"])
if length == 0:
return False
if not do_swap(packed, itm): # Item not found
return False
if length == 2: # Peel off layer
peel(packed)
else: # Remove last item
packed["array"] = remove_last(length, packed["array"])
packed["items"] -= 1
return True
def peel(packed):
'''
Peels a layer off of the PackedList
:param packed: The PackedList
'''
packed["array"] = packed["array"][0]
def remove_last(length, lst):
'''
Removes the last item from a list.
:param lst: The list to remove the item from
'''
nLst = []
if length > 1:
for i in range(length - 1):
nLst.append(lst[i])
return nLst
def do_swap(packed, itm):
'''
Swaps the last item in the PackedList.array with the item.
:param packed: The PackedList
:param itm: The item to swap
'''
array = packed["array"]
items = packed["items"]
length = len(array)
if length is 0:
return False
if length is 1:
return True
last = array[length - 1]
if last is itm:
return True
layers = get_layers(items)
if do_find(array, length, layers, itm, last):
array[length - 1] = itm
return True
return False
def do_find(array, length, layers, itm, last):
'''
Finds the item in the array and swaps it with the last item
:param array: The PackedList.array
:param length: The length of the PackedList.array
:param layers: The amount of layers in the PackedList.array
:param itm: The item to swap
:param last: The last item in the PackedList.array
'''
for i in range(length):
item = array[i]
if i == 0 and layers > 1:
if do_find(item, len(item), layers - 1, itm, last):
return True
elif item is itm:
array[i] = last
return True
return False
def get_layers(items):
'''
Calculated the amount of layers in the PackedList
:param items: The amount of items in the PackedList
'''
x = items
x -= MAXIMUM_ARRAY_LENGTH
layers = 1
while x > 0:
x -= (MAXIMUM_ARRAY_LENGTH - 1)
layers += 1
return layers
|
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from typing import Any, AsyncIterable, Callable, Dict, Generic, Optional, TypeVar, Union
import warnings
from azure.core.async_paging import AsyncItemPaged, AsyncList
from azure.core.exceptions import ClientAuthenticationError, HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error
from azure.core.pipeline import PipelineResponse
from azure.core.pipeline.transport import AsyncHttpResponse, HttpRequest
from azure.core.polling import AsyncLROPoller, AsyncNoPolling, AsyncPollingMethod
from azure.mgmt.core.exceptions import ARMErrorFormat
from azure.mgmt.core.polling.async_arm_polling import AsyncARMPolling
from ... import models
T = TypeVar('T')
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, AsyncHttpResponse], T, Dict[str, Any]], Any]]
class NetworkSecurityGroupsOperations:
"""NetworkSecurityGroupsOperations async operations.
You should not instantiate this class directly. Instead, you should create a Client instance that
instantiates it for you and attaches it as an attribute.
:ivar models: Alias to model classes used in this operation group.
:type models: ~azure.mgmt.network.v2018_07_01.models
:param client: Client for service requests.
:param config: Configuration of service client.
:param serializer: An object model serializer.
:param deserializer: An object model deserializer.
"""
models = models
def __init__(self, client, config, serializer, deserializer) -> None:
self._client = client
self._serialize = serializer
self._deserialize = deserializer
self._config = config
async def _delete_initial(
self,
resource_group_name: str,
network_security_group_name: str,
**kwargs
) -> None:
cls = kwargs.pop('cls', None) # type: ClsType[None]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2018-07-01"
# Construct URL
url = self._delete_initial.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'networkSecurityGroupName': self._serialize.url("network_security_group_name", network_security_group_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
request = self._client.delete(url, query_parameters, header_parameters)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 202, 204]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
if cls:
return cls(pipeline_response, None, {})
_delete_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/networkSecurityGroups/{networkSecurityGroupName}'} # type: ignore
async def begin_delete(
self,
resource_group_name: str,
network_security_group_name: str,
**kwargs
) -> AsyncLROPoller[None]:
"""Deletes the specified network security group.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param network_security_group_name: The name of the network security group.
:type network_security_group_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: True for ARMPolling, False for no polling, or a
polling object for personal polling strategy
:paramtype polling: bool or ~azure.core.polling.AsyncPollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.
:return: An instance of AsyncLROPoller that returns either None or the result of cls(response)
:rtype: ~azure.core.polling.AsyncLROPoller[None]
:raises ~azure.core.exceptions.HttpResponseError:
"""
polling = kwargs.pop('polling', True) # type: Union[bool, AsyncPollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType[None]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = await self._delete_initial(
resource_group_name=resource_group_name,
network_security_group_name=network_security_group_name,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
kwargs.pop('content_type', None)
def get_long_running_output(pipeline_response):
if cls:
return cls(pipeline_response, None, {})
if polling is True: polling_method = AsyncARMPolling(lro_delay, **kwargs)
elif polling is False: polling_method = AsyncNoPolling()
else: polling_method = polling
if cont_token:
return AsyncLROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return AsyncLROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_delete.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/networkSecurityGroups/{networkSecurityGroupName}'} # type: ignore
async def get(
self,
resource_group_name: str,
network_security_group_name: str,
expand: Optional[str] = None,
**kwargs
) -> "models.NetworkSecurityGroup":
"""Gets the specified network security group.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param network_security_group_name: The name of the network security group.
:type network_security_group_name: str
:param expand: Expands referenced resources.
:type expand: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: NetworkSecurityGroup, or the result of cls(response)
:rtype: ~azure.mgmt.network.v2018_07_01.models.NetworkSecurityGroup
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["models.NetworkSecurityGroup"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2018-07-01"
accept = "application/json"
# Construct URL
url = self.get.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'networkSecurityGroupName': self._serialize.url("network_security_group_name", network_security_group_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
if expand is not None:
query_parameters['$expand'] = self._serialize.query("expand", expand, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.get(url, query_parameters, header_parameters)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = self._deserialize('NetworkSecurityGroup', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
get.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/networkSecurityGroups/{networkSecurityGroupName}'} # type: ignore
async def _create_or_update_initial(
self,
resource_group_name: str,
network_security_group_name: str,
parameters: "models.NetworkSecurityGroup",
**kwargs
) -> "models.NetworkSecurityGroup":
cls = kwargs.pop('cls', None) # type: ClsType["models.NetworkSecurityGroup"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2018-07-01"
content_type = kwargs.pop("content_type", "application/json")
accept = "application/json"
# Construct URL
url = self._create_or_update_initial.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'networkSecurityGroupName': self._serialize.url("network_security_group_name", network_security_group_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str')
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
body_content_kwargs = {} # type: Dict[str, Any]
body_content = self._serialize.body(parameters, 'NetworkSecurityGroup')
body_content_kwargs['content'] = body_content
request = self._client.put(url, query_parameters, header_parameters, **body_content_kwargs)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 201]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
if response.status_code == 200:
deserialized = self._deserialize('NetworkSecurityGroup', pipeline_response)
if response.status_code == 201:
deserialized = self._deserialize('NetworkSecurityGroup', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
_create_or_update_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/networkSecurityGroups/{networkSecurityGroupName}'} # type: ignore
async def begin_create_or_update(
self,
resource_group_name: str,
network_security_group_name: str,
parameters: "models.NetworkSecurityGroup",
**kwargs
) -> AsyncLROPoller["models.NetworkSecurityGroup"]:
"""Creates or updates a network security group in the specified resource group.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param network_security_group_name: The name of the network security group.
:type network_security_group_name: str
:param parameters: Parameters supplied to the create or update network security group
operation.
:type parameters: ~azure.mgmt.network.v2018_07_01.models.NetworkSecurityGroup
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: True for ARMPolling, False for no polling, or a
polling object for personal polling strategy
:paramtype polling: bool or ~azure.core.polling.AsyncPollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.
:return: An instance of AsyncLROPoller that returns either NetworkSecurityGroup or the result of cls(response)
:rtype: ~azure.core.polling.AsyncLROPoller[~azure.mgmt.network.v2018_07_01.models.NetworkSecurityGroup]
:raises ~azure.core.exceptions.HttpResponseError:
"""
polling = kwargs.pop('polling', True) # type: Union[bool, AsyncPollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType["models.NetworkSecurityGroup"]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = await self._create_or_update_initial(
resource_group_name=resource_group_name,
network_security_group_name=network_security_group_name,
parameters=parameters,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
kwargs.pop('content_type', None)
def get_long_running_output(pipeline_response):
deserialized = self._deserialize('NetworkSecurityGroup', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
if polling is True: polling_method = AsyncARMPolling(lro_delay, **kwargs)
elif polling is False: polling_method = AsyncNoPolling()
else: polling_method = polling
if cont_token:
return AsyncLROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return AsyncLROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_create_or_update.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/networkSecurityGroups/{networkSecurityGroupName}'} # type: ignore
async def _update_tags_initial(
self,
resource_group_name: str,
network_security_group_name: str,
parameters: "models.TagsObject",
**kwargs
) -> "models.NetworkSecurityGroup":
cls = kwargs.pop('cls', None) # type: ClsType["models.NetworkSecurityGroup"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2018-07-01"
content_type = kwargs.pop("content_type", "application/json")
accept = "application/json"
# Construct URL
url = self._update_tags_initial.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'networkSecurityGroupName': self._serialize.url("network_security_group_name", network_security_group_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str')
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
body_content_kwargs = {} # type: Dict[str, Any]
body_content = self._serialize.body(parameters, 'TagsObject')
body_content_kwargs['content'] = body_content
request = self._client.patch(url, query_parameters, header_parameters, **body_content_kwargs)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = self._deserialize('NetworkSecurityGroup', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
_update_tags_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/networkSecurityGroups/{networkSecurityGroupName}'} # type: ignore
async def begin_update_tags(
self,
resource_group_name: str,
network_security_group_name: str,
parameters: "models.TagsObject",
**kwargs
) -> AsyncLROPoller["models.NetworkSecurityGroup"]:
"""Updates a network security group tags.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param network_security_group_name: The name of the network security group.
:type network_security_group_name: str
:param parameters: Parameters supplied to update network security group tags.
:type parameters: ~azure.mgmt.network.v2018_07_01.models.TagsObject
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: True for ARMPolling, False for no polling, or a
polling object for personal polling strategy
:paramtype polling: bool or ~azure.core.polling.AsyncPollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.
:return: An instance of AsyncLROPoller that returns either NetworkSecurityGroup or the result of cls(response)
:rtype: ~azure.core.polling.AsyncLROPoller[~azure.mgmt.network.v2018_07_01.models.NetworkSecurityGroup]
:raises ~azure.core.exceptions.HttpResponseError:
"""
polling = kwargs.pop('polling', True) # type: Union[bool, AsyncPollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType["models.NetworkSecurityGroup"]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = await self._update_tags_initial(
resource_group_name=resource_group_name,
network_security_group_name=network_security_group_name,
parameters=parameters,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
kwargs.pop('content_type', None)
def get_long_running_output(pipeline_response):
deserialized = self._deserialize('NetworkSecurityGroup', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
if polling is True: polling_method = AsyncARMPolling(lro_delay, **kwargs)
elif polling is False: polling_method = AsyncNoPolling()
else: polling_method = polling
if cont_token:
return AsyncLROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return AsyncLROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_update_tags.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/networkSecurityGroups/{networkSecurityGroupName}'} # type: ignore
def list_all(
self,
**kwargs
) -> AsyncIterable["models.NetworkSecurityGroupListResult"]:
"""Gets all network security groups in a subscription.
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either NetworkSecurityGroupListResult or the result of cls(response)
:rtype: ~azure.core.async_paging.AsyncItemPaged[~azure.mgmt.network.v2018_07_01.models.NetworkSecurityGroupListResult]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["models.NetworkSecurityGroupListResult"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2018-07-01"
accept = "application/json"
def prepare_request(next_link=None):
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
if not next_link:
# Construct URL
url = self.list_all.metadata['url'] # type: ignore
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
request = self._client.get(url, query_parameters, header_parameters)
else:
url = next_link
query_parameters = {} # type: Dict[str, Any]
request = self._client.get(url, query_parameters, header_parameters)
return request
async def extract_data(pipeline_response):
deserialized = self._deserialize('NetworkSecurityGroupListResult', pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.next_link or None, AsyncList(list_of_elem)
async def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
return pipeline_response
return AsyncItemPaged(
get_next, extract_data
)
list_all.metadata = {'url': '/subscriptions/{subscriptionId}/providers/Microsoft.Network/networkSecurityGroups'} # type: ignore
def list(
self,
resource_group_name: str,
**kwargs
) -> AsyncIterable["models.NetworkSecurityGroupListResult"]:
"""Gets all network security groups in a resource group.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either NetworkSecurityGroupListResult or the result of cls(response)
:rtype: ~azure.core.async_paging.AsyncItemPaged[~azure.mgmt.network.v2018_07_01.models.NetworkSecurityGroupListResult]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["models.NetworkSecurityGroupListResult"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2018-07-01"
accept = "application/json"
def prepare_request(next_link=None):
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
if not next_link:
# Construct URL
url = self.list.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
request = self._client.get(url, query_parameters, header_parameters)
else:
url = next_link
query_parameters = {} # type: Dict[str, Any]
request = self._client.get(url, query_parameters, header_parameters)
return request
async def extract_data(pipeline_response):
deserialized = self._deserialize('NetworkSecurityGroupListResult', pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.next_link or None, AsyncList(list_of_elem)
async def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
return pipeline_response
return AsyncItemPaged(
get_next, extract_data
)
list.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/networkSecurityGroups'} # type: ignore
|
from agents.common import PLAYER1, PLAYER2, initialize_game_state, evaluate_antidiagonals, is_player_blocking_opponent, \
is_player_winning
def test_evaluate_antidiagonals_uppertriangle_True_Player1_is_player_blocking_opponent():
game = initialize_game_state()
num_rows = game.shape[0]
num_cols = game.shape[1]
last_col = 0
for row in range(0, num_rows):
last_row = 0
for col in range(0, num_cols):
if last_row < 0:
break
if col == 0 and row > 0:
game[0][last_col] = PLAYER2
game[row][col] = PLAYER2
elif col < 3:
game[row - col][col] = PLAYER2
elif col < 4:
game[row - col][col] = PLAYER1
else:
game[row - col + 1][col - 1] = PLAYER2
game[row - col][col] = PLAYER1
last_row = row - col - 1
last_col = col
if col > 2:
assert evaluate_antidiagonals(game, PLAYER1, is_player_blocking_opponent) == True
assert evaluate_antidiagonals(game, PLAYER2, is_player_blocking_opponent) == False
if col < 3:
assert evaluate_antidiagonals(game, PLAYER1, is_player_blocking_opponent) == False
assert evaluate_antidiagonals(game, PLAYER2, is_player_blocking_opponent) == False
def test_evaluate_antidiagonals_uppertriangle_True_Player2_is_player_blocking_opponent():
game = initialize_game_state()
num_rows = game.shape[0]
num_cols = game.shape[1]
last_col = 0
for row in range(0, num_rows):
last_row = 0
for col in range(0, num_cols):
if last_row < 0:
break
if col == 0 and row > 0:
game[0][last_col] = PLAYER1
game[row][col] = PLAYER1
elif col < 3:
game[row - col][col] = PLAYER1
elif col < 4:
game[row - col][col] = PLAYER2
else:
game[row - col + 1][col - 1] = PLAYER1
game[row - col][col] = PLAYER2
last_row = row - col - 1
last_col = col
if col > 2:
assert evaluate_antidiagonals(game, PLAYER1, is_player_blocking_opponent) == False
assert evaluate_antidiagonals(game, PLAYER2, is_player_blocking_opponent) == True
if col < 3:
assert evaluate_antidiagonals(game, PLAYER1, is_player_blocking_opponent) == False
assert evaluate_antidiagonals(game, PLAYER2, is_player_blocking_opponent) == False
def test_evaluate_antidiagonals_uppertriangle_False_is_player_blocking_opponent():
game = initialize_game_state()
num_rows = game.shape[0]
num_cols = game.shape[1]
for row in range(0, num_rows):
last_row = 0
for col in range(0, num_cols):
if last_row < 0:
break
game[row - col][col] = PLAYER2
last_row = row - col - 1
assert evaluate_antidiagonals(game, PLAYER1, is_player_blocking_opponent) == False
assert evaluate_antidiagonals(game, PLAYER2, is_player_blocking_opponent) == False
def test_evaluate_antidiagonals_lowertriangle_True_Player1_is_player_blocking_opponent():
game = initialize_game_state()
num_rows = game.shape[0]
num_cols = game.shape[1]
last_row = num_rows - 1
for col in range(1, num_cols):
last_col = 1
for row in range(num_rows - 1, -1, -1):
if last_col == num_cols - 1:
break
if row == num_rows - 1 and col > 1:
game[last_row][num_cols - 1] = PLAYER2
game[row][col] = PLAYER2
elif row > 2:
game[row][num_cols - row - 1 + col - 1] = PLAYER2
elif row == 2:
game[row][num_cols - row - 1 + col - 1] = PLAYER1
else:
game[row + 1][num_cols - row - 1 + col - 2] = PLAYER2
game[row][num_cols - row - 1 + col - 1] = PLAYER1
last_row = row
last_col = num_cols - row - 1 + col - 1
if row < 3:
assert evaluate_antidiagonals(game, PLAYER1, is_player_blocking_opponent) == True
assert evaluate_antidiagonals(game, PLAYER2, is_player_blocking_opponent) == False
if row > 2:
assert evaluate_antidiagonals(game, PLAYER1, is_player_blocking_opponent) == False
assert evaluate_antidiagonals(game, PLAYER2, is_player_blocking_opponent) == False
def test_evaluate_antidiagonals_lowertriangle_True_Player2_is_player_blocking_opponent():
game = initialize_game_state()
num_rows = game.shape[0]
num_cols = game.shape[1]
last_row = num_rows - 1
for col in range(1, num_cols):
last_col = 1
for row in range(num_rows - 1, -1, -1):
if last_col == num_cols - 1:
break
if row == num_rows - 1 and col > 1:
game[last_row][num_cols - 1] = PLAYER1
game[row][col] = PLAYER1
elif row > 2:
game[row][num_cols - row - 1 + col - 1] = PLAYER1
elif row == 2:
game[row][num_cols - row - 1 + col - 1] = PLAYER2
else:
game[row + 1][num_cols - row - 1 + col - 2] = PLAYER1
game[row][num_cols - row - 1 + col - 1] = PLAYER2
last_row = row
last_col = num_cols - row - 1 + col - 1
if row < 3:
assert evaluate_antidiagonals(game, PLAYER1, is_player_blocking_opponent) == False
assert evaluate_antidiagonals(game, PLAYER2, is_player_blocking_opponent) == True
if row > 2:
assert evaluate_antidiagonals(game, PLAYER1, is_player_blocking_opponent) == False
assert evaluate_antidiagonals(game, PLAYER2, is_player_blocking_opponent) == False
def test_evaluate_antidiagonals_lowertriangle_False_is_player_blocking_opponent():
game = initialize_game_state()
num_rows = game.shape[0]
num_cols = game.shape[1]
for col in range(1, num_cols):
last_col = 1
for row in range(num_rows - 1, -1, -1):
if last_col == num_cols - 1:
break
game[row][num_cols - row - 1 + col - 1] = PLAYER2
last_col = num_cols - row - 1 + col - 1
assert evaluate_antidiagonals(game, PLAYER1, is_player_blocking_opponent) == False
assert evaluate_antidiagonals(game, PLAYER2, is_player_blocking_opponent) == False
def test_evaluate_antidiagonals_uppertriangle_True_Player1_is_player_winning():
game = initialize_game_state()
num_rows = game.shape[0]
num_cols = game.shape[1]
last_col = 0
for row in range(0, num_rows):
last_row = 0
for col in range(0, num_cols):
if last_row < 0:
break
if col == 0 and row > 0:
game[0][last_col] = PLAYER2
game[row][col] = PLAYER1
elif col < 4:
game[row - col][col] = PLAYER1
else:
game[row - col + 4][col - 4] = PLAYER2
game[row - col][col] = PLAYER1
last_row = row - col - 1
last_col = col
if col > 2:
assert evaluate_antidiagonals(game, PLAYER1, is_player_winning) == True
assert evaluate_antidiagonals(game, PLAYER2, is_player_winning) == False
if col < 3:
assert evaluate_antidiagonals(game, PLAYER1, is_player_winning) == False
assert evaluate_antidiagonals(game, PLAYER2, is_player_winning) == False
def test_evaluate_antidiagonals_uppertriangle_True_Player2_is_player_winning():
game = initialize_game_state()
num_rows = game.shape[0]
num_cols = game.shape[1]
last_col = 0
for row in range(0, num_rows):
last_row = 0
for col in range(0, num_cols):
if last_row < 0:
break
if col == 0 and row > 0:
game[0][last_col] = PLAYER1
game[row][col] = PLAYER2
elif col < 4:
game[row - col][col] = PLAYER2
else:
game[row - col + 4][col - 4] = PLAYER1
game[row - col][col] = PLAYER2
last_row = row - col - 1
last_col = col
if col > 2:
assert evaluate_antidiagonals(game, PLAYER1, is_player_winning) == False
assert evaluate_antidiagonals(game, PLAYER2, is_player_winning) == True
if col < 3:
assert evaluate_antidiagonals(game, PLAYER1, is_player_winning) == False
assert evaluate_antidiagonals(game, PLAYER2, is_player_winning) == False
def test_evaluate_antidiagonals_uppertriangle_False_is_player_winning():
game = initialize_game_state()
num_rows = game.shape[0]
num_cols = game.shape[1]
for row in range(0, num_rows):
last_row = 0
for col in range(0, num_cols):
if last_row < 0:
break
if col % 2 == 0:
game[row - col][col] = PLAYER2
else:
game[row - col][col] = PLAYER1
last_row = row - col - 1
assert evaluate_antidiagonals(game, PLAYER1, is_player_winning) == False
assert evaluate_antidiagonals(game, PLAYER2, is_player_winning) == False
def test_evaluate_antidiagonals_lowertriangle_True_Player1_is_player_winning():
game = initialize_game_state()
num_rows = game.shape[0]
num_cols = game.shape[1]
last_row = num_rows - 1
for col in range(1, num_cols):
last_col = 1
for row in range(num_rows - 1, -1, -1):
if last_col == num_cols - 1:
break
if row == num_rows - 1 and col > 1:
game[last_row][num_cols - 1] = PLAYER2
game[row][col] = PLAYER1
elif row > 2:
game[row][num_cols - row - 1 + col - 1] = PLAYER1
elif row == 2:
game[row][num_cols - row - 1 + col - 1] = PLAYER1
else:
game[row + 4][num_cols - row - 1 + col - 5] = PLAYER2
game[row][num_cols - row - 1 + col - 1] = PLAYER1
last_row = row
last_col = num_cols - row - 1 + col - 1
if row < 3:
assert evaluate_antidiagonals(game, PLAYER1, is_player_winning) == True
assert evaluate_antidiagonals(game, PLAYER2, is_player_winning) == False
if row > 2:
assert evaluate_antidiagonals(game, PLAYER1, is_player_winning) == False
assert evaluate_antidiagonals(game, PLAYER2, is_player_winning) == False
def test_evaluate_antidiagonals_lowertriangle_True_Player2_is_player_winning():
game = initialize_game_state()
num_rows = game.shape[0]
num_cols = game.shape[1]
last_row = num_rows - 1
for col in range(1, num_cols):
last_col = 1
for row in range(num_rows - 1, -1, -1):
if last_col == num_cols - 1:
break
if row == num_rows - 1 and col > 1:
game[last_row][num_cols - 1] = PLAYER1
game[row][col] = PLAYER2
elif row > 2:
game[row][num_cols - row - 1 + col - 1] = PLAYER2
elif row == 2:
game[row][num_cols - row - 1 + col - 1] = PLAYER2
else:
game[row + 4][num_cols - row - 1 + col - 5] = PLAYER1
game[row][num_cols - row - 1 + col - 1] = PLAYER2
last_row = row
last_col = num_cols - row - 1 + col - 1
if row < 3:
assert evaluate_antidiagonals(game, PLAYER1, is_player_winning) == False
assert evaluate_antidiagonals(game, PLAYER2, is_player_winning) == True
if row > 2:
assert evaluate_antidiagonals(game, PLAYER1, is_player_winning) == False
assert evaluate_antidiagonals(game, PLAYER2, is_player_winning) == False
def test_evaluate_antidiagonals_lowertriangle_False_is_player_winning():
game = initialize_game_state()
num_rows = game.shape[0]
num_cols = game.shape[1]
for col in range(1, num_cols):
last_col = 1
for row in range(num_rows - 1, -1, -1):
if last_col == num_cols - 1:
break
if row % 2 == 0:
game[row][num_cols - row - 1 + col - 1] = PLAYER2
else:
game[row][num_cols - row - 1 + col - 1] = PLAYER1
last_col = num_cols - row - 1 + col - 1
assert evaluate_antidiagonals(game, PLAYER1, is_player_winning) == False
assert evaluate_antidiagonals(game, PLAYER2, is_player_winning) == False
|
"""This script downloads all of the data located in the AWS S3 bucket, given the proper
access key and secret key. Assumes that this script will be run from the root of the repository.
Usage: get-data.py --access_key=<access_key> --secret_key=<secret_key>
Options:
--access_key=<access_key> The AWS access key providing access to the bucket.
--secret_key=<secret_key> The AWS secret key providing access to the bucket.
"""
import boto3
import os
from docopt import docopt
# Code is largely adapted from user Shan
# on StackOverflow: https://stackoverflow.com/questions/31918960/boto3-to-download-all-files-from-a-s3-bucket/33350380#33350380
opt = docopt(__doc__)
def main(access_key, secret_key):
"""
This function downloads all of the data in the S3 bucket, given
an accesss key and secret key with the right access.
Parameters
----------
access_key: str
The AWS access key.
secret_key: str
The AWS secret key.
Returns
---------
None
Examples
---------
main(
access_key=MY_ACCESS_KEY,
secret_key=MY_SECRET_KEY
)
"""
# Initiate S3 client
s3 = boto3.client(
's3',
aws_access_key_id=access_key,
aws_secret_access_key=secret_key
)
for item in s3.list_objects(Bucket='mds-capstone-assurance')['Contents']:
if not item['Key'].endswith("/"):
print("Downloading file:", item['Key'])
s3.download_file(
'mds-capstone-assurance',
item['Key'],
item['Key']
)
else:
if not os.path.exists(item['Key']):
os.makedirs(item['Key'])
return
main(
access_key=opt['--access_key'],
secret_key=opt['--secret_key']
)
|
# Generated by Django 3.0.8 on 2020-07-22 12:11
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('Home', '0010_auto_20200722_1738'),
]
operations = [
migrations.RenameField(
model_name='student',
old_name='fathermobileno',
new_name='father_mobile_no',
),
migrations.RenameField(
model_name='student',
old_name='fathername',
new_name='father_name',
),
migrations.RenameField(
model_name='student',
old_name='mobileno',
new_name='mobile_no',
),
migrations.RenameField(
model_name='student',
old_name='rollno',
new_name='roll_no',
),
]
|
import torch
from torch import nn
from torch.nn import functional as F
from torchutils import to_device
class FocalLoss(nn.Module):
"""weighted version of Focal Loss"""
def __init__(self, alpha=.25, gamma=2, device=None):
super(FocalLoss, self).__init__()
self.alpha = torch.tensor([alpha, 1 - alpha])
# self.alpha = to_device(self.alpha, device=device)
self.gamma = gamma
def forward(self, inputs, targets):
BCE_loss = F.binary_cross_entropy(inputs, targets.float(), reduction='none')
targets = targets.long()
at = self.alpha.to(targets.device).gather(0, targets.view(-1))
pt = torch.exp(-BCE_loss)
F_loss = at * (1 - pt) ** self.gamma * BCE_loss
return F_loss.mean()
def binary_cross_entropy_weighted_focal_loss(y_pred, y_true, alpha=0.25, gamma=6, mask=None):
return FocalLoss(alpha=alpha, gamma=gamma, )(y_pred, y_true)
def cross_entropy_focal_loss(y_pred, y_true, weight=None, alpha=0.25, gamma=6, mask=None):
# important to add reduction='none' to keep per-batch-item loss
ce_loss = F.cross_entropy(y_pred, y_true, reduction='none', weight=weight)
pt = torch.exp(-ce_loss)
focal_loss = (alpha * (1 - pt) ** gamma * ce_loss).mean() # mean over the batch
return focal_loss
def binary_cross_entropy_focal_loss___(y_pred, y_true, alpha=0.25, gamma=6, mask=None):
# important to add reduction='none' to keep per-batch-item loss
ce_loss = F.binary_cross_entropy(y_pred, y_true, reduction='none')
pt = torch.exp(-ce_loss)
focal_loss = (alpha * (1 - pt) ** gamma * ce_loss).mean() # mean over the batch
return focal_loss
def bce_focal_loss(alpha=0.25, gamma=6):
def fn(y_pred, y_true, mask=None):
return binary_cross_entropy_focal_loss___(y_pred, y_true, alpha, gamma, mask=mask)
return fn
def ce_focal_loss(alpha=0.25, gamma=6):
def fn(y_pred, y_true, mask=None):
return cross_entropy_focal_loss(y_pred, y_true, alpha, gamma, mask=mask)
return fn
|
# Natural Language Toolkit: Aligner Utilities
#
# Copyright (C) 2001-2013 NLTK Project
# Author:
# URL: <http://www.nltk.org/>
# For license information, see LICENSE.TXT
|
from tornado.web import HTTPError
from ddtrace import config
from ...constants import ANALYTICS_SAMPLE_RATE_KEY
from ...constants import SPAN_MEASURED_KEY
from ...ext import SpanTypes
from ...ext import http
from ...propagation.http import HTTPPropagator
from .constants import CONFIG_KEY
from .constants import REQUEST_CONTEXT_KEY
from .constants import REQUEST_SPAN_KEY
from .stack_context import TracerStackContext
def execute(func, handler, args, kwargs):
"""
Wrap the handler execute method so that the entire request is within the same
``TracerStackContext``. This simplifies users code when the automatic ``Context``
retrieval is used via ``Tracer.trace()`` method.
"""
# retrieve tracing settings
settings = handler.settings[CONFIG_KEY]
tracer = settings["tracer"]
service = settings["default_service"]
distributed_tracing = settings["distributed_tracing"]
with TracerStackContext():
# attach the context to the request
setattr(handler.request, REQUEST_CONTEXT_KEY, tracer.get_call_context())
# Read and use propagated context from HTTP headers
if distributed_tracing:
context = HTTPPropagator.extract(handler.request.headers)
if context.trace_id:
tracer.context_provider.activate(context)
# store the request span in the request so that it can be used later
request_span = tracer.trace(
"tornado.request",
service=service,
span_type=SpanTypes.WEB,
)
request_span.set_tag(SPAN_MEASURED_KEY)
# set analytics sample rate
# DEV: tornado is special case maintains separate configuration from config api
analytics_enabled = settings["analytics_enabled"]
if (config.analytics_enabled and analytics_enabled is not False) or analytics_enabled is True:
request_span.set_tag(ANALYTICS_SAMPLE_RATE_KEY, settings.get("analytics_sample_rate", True))
setattr(handler.request, REQUEST_SPAN_KEY, request_span)
return func(*args, **kwargs)
def on_finish(func, handler, args, kwargs):
"""
Wrap the ``RequestHandler.on_finish`` method. This is the last executed method
after the response has been sent, and it's used to retrieve and close the
current request span (if available).
"""
request = handler.request
request_span = getattr(request, REQUEST_SPAN_KEY, None)
if request_span:
# use the class name as a resource; if an handler is not available, the
# default handler class will be used so we don't pollute the resource
# space here
klass = handler.__class__
request_span.resource = "{}.{}".format(klass.__module__, klass.__name__)
request_span.set_tag("http.method", request.method)
request_span.set_tag("http.status_code", handler.get_status())
request_span.set_tag(http.URL, request.full_url().rsplit("?", 1)[0])
if config.tornado.trace_query_string:
request_span.set_tag(http.QUERY_STRING, request.query)
request_span.finish()
return func(*args, **kwargs)
def log_exception(func, handler, args, kwargs):
"""
Wrap the ``RequestHandler.log_exception``. This method is called when an
Exception is not handled in the user code. In this case, we save the exception
in the current active span. If the Tornado ``Finish`` exception is raised, this wrapper
will not be called because ``Finish`` is not an exception.
"""
# safe-guard: expected arguments -> log_exception(self, typ, value, tb)
value = args[1] if len(args) == 3 else None
if not value:
return func(*args, **kwargs)
# retrieve the current span
tracer = handler.settings[CONFIG_KEY]["tracer"]
current_span = tracer.current_span()
if not current_span:
return func(*args, **kwargs)
if isinstance(value, HTTPError):
# Tornado uses HTTPError exceptions to stop and return a status code that
# is not a 2xx. In this case we want to check the status code to be sure that
# only 5xx are traced as errors, while any other HTTPError exception is handled as
# usual.
if 500 <= value.status_code <= 599:
current_span.set_exc_info(*args)
else:
# any other uncaught exception should be reported as error
current_span.set_exc_info(*args)
return func(*args, **kwargs)
|
import os
import random
import cherrypy
"""
This is a simple Battlesnake server written in Python.
For instructions see https://github.com/BattlesnakeOfficial/starter-snake-python/README.md
"""
class Battlesnake(object):
@cherrypy.expose
@cherrypy.tools.json_out()
def index(self):
# This function is called when you register your Battlesnake on play.battlesnake.com
# It controls your Battlesnake appearance and author permissions.
# TIP: If you open your Battlesnake URL in browser you should see this data
return {
"apiversion": "1",
"author": "", # TODO: Your Battlesnake Username
"color": "#888888", # TODO: Personalize
"head": "default", # TODO: Personalize
"tail": "default", # TODO: Personalize
}
@cherrypy.expose
@cherrypy.tools.json_in()
def start(self):
# This function is called everytime your snake is entered into a game.
# cherrypy.request.json contains information about the game that's about to be played.
data = cherrypy.request.json
print("START")
return "ok"
@cherrypy.expose
@cherrypy.tools.json_in()
@cherrypy.tools.json_out()
def move(self):
# This function is called on every turn of a game. It's how your snake decides where to move.
# Valid moves are "up", "down", "left", or "right".
# TODO: Use the information in cherrypy.request.json to decide your next move.
data = cherrypy.request.json
board = data.board
# Choose a random direction to move in
possible_moves = ["up", "down", "left", "right"]
move = random.choice(possible_moves)
print(f"MOVE: {move}")
return {"move": move}
@cherrypy.expose
@cherrypy.tools.json_in()
def end(self):
# This function is called when a game your snake was in ends.
# It's purely for informational purposes, you don't have to make any decisions here.
data = cherrypy.request.json
print("END")
return "ok"
if __name__ == "__main__":
server = Battlesnake()
cherrypy.config.update({"server.socket_host": "0.0.0.0"})
cherrypy.config.update(
{"server.socket_port": int(os.environ.get("PORT", "8080")),}
)
print("Starting Battlesnake Server...")
cherrypy.quickstart(server)
|
"""
test the group model from zoom.models
"""
import unittest
import zoom
from zoom.database import setup_test
from zoom.models import Groups
from zoom.utils import Bunch
class TestGroup(unittest.TestCase):
"""Test the Zoom Group and Groups models"""
def setUp(self):
self.db = setup_test()
self.groups = Groups(self.db)
zoom.system.site = zoom.sites.Site()
zoom.system.user = zoom.system.site.users.get(1)
zoom.system.request = Bunch(app=Bunch(name=__name__))
def tearDown(self):
self.db.close()
def test_get_group(self):
group = self.groups.get(1)
self.assertEqual(group._id, 1)
self.assertEqual(group.name, 'administrators')
self.assertEqual(group.type, 'U')
self.assertEqual(group.admin_group_id, 1)
group = self.groups.get(3)
self.assertEqual(group._id, 3)
def test_get_group_admin_group_ids(self):
ids = self.groups.get_group_admin_group_ids()
self.assertEqual(ids, {1})
def test_is_group_admin_group(self):
group = self.groups.get(1)
self.assertTrue(group.is_group_admin_group)
group = self.groups.get(2)
self.assertFalse(group.is_group_admin_group)
def test_get_group_users(self):
group = self.groups.first(name='users')
self.assertSetEqual(group.users, group.get_users())
self.assertTrue(group.users)
def test_group_record_store(self):
group = self.groups.first(name='users')
self.assertTrue(group['__store'])
self.assertIsInstance(group['__store'], Groups)
del group['__store']
self.assertRaises(KeyError, lambda: group.apps, )
self.assertRaises(KeyError, lambda: group.roles, )
self.assertRaises(KeyError, lambda: group.subgroups, )
def test_add_delete_group(self):
groups = self.groups
self.assertFalse(groups.first(name='testgroup1'))
groups.add('testgroup1')
self.assertTrue(groups.first(name='testgroup1'))
groups.delete(name='testgroup1')
self.assertFalse(groups.first(name='testgroup1'))
def test_add_remove_subgroup(self):
users_group = self.groups.first(name='users')
managers_group = self.groups.first(name='managers')
self.assertEqual(managers_group.subgroups, {1})
managers_group.add_subgroup(users_group)
self.assertEqual(managers_group.subgroups, {1, users_group.group_id})
managers_group.remove_subgroup(users_group)
self.assertEqual(managers_group.subgroups, {1})
def test_locate_group(self):
groups = self.groups
group = groups.first(name='users')
group_id = group.group_id
self.assertEqual(groups.locate(group).group_id, group_id)
self.assertEqual(groups.locate(group_id).group_id, group_id)
self.assertEqual(groups.locate(group.name).group_id, group_id)
def test_groups_add_remove_app(self):
groups = self.groups
app_name = 'ping'
self.assertNotIn(
'a_' + app_name,
set(g.name for g in groups)
)
groups.add_app(app_name)
self.assertIn(
'a_' + app_name,
set(g.name for g in groups)
)
groups.remove_app(app_name)
self.assertNotIn(
'a_' + app_name,
set(g.name for g in groups)
)
def test_groups_add_remove_app_idempotentcy(self):
groups = self.groups
app_name = 'ping'
self.assertNotIn(
'a_' + app_name,
set(g.name for g in groups)
)
groups.add_app(app_name)
groups.add_app(app_name)
self.assertIn(
'a_' + app_name,
set(g.name for g in groups)
)
groups.remove_app(app_name)
groups.remove_app(app_name)
self.assertNotIn(
'a_' + app_name,
set(g.name for g in groups)
)
def test_add_apps_remove_apps(self):
groups = self.groups
group_name = 'my_new_group'
app_names = {'ping', 'hello'}
if not groups.first(name=group_name):
groups.add(group_name)
group = groups.first(name=group_name)
self.assertEqual(group.app_names, set())
group.add_apps(app_names)
self.assertEqual(group.app_names, app_names)
group.remove_apps(app_names)
group = groups.first(name=group_name)
self.assertEqual(group.app_names, set())
groups.delete(name=group_name)
|
# -*- coding: utf-8 -*-
import util
import sys, decode, datetime, os
apikey = '*****'
apisec = '*****'
def date2int(datestr):
date = datetime.datetime.strptime(datestr, "%a %b %d %H:%M:%S %z %Y")
return date
class timeline:
time_begin = 0
time_end = 0
hashtag = ''
tweetlist = []
def __init__(tb, te, ht):
time_begin = tb
time_end = te
hashtag = ht
def fetchTweets():
res = req("https://api.twitter.com/1.1/search/tweets.json", {'q':hashtag, 'until':time_end}, 'GET')
res = json.loads(res.decode('utf-8'))
self.tweetlist+=res
while(time_end > self.tweetlist[-1]["created_at"]):
res = req("https://api.twitter.com/1.1/search/tweets.json", {'q':hashtag, 'until':time_end, 'since_id':self.tweetlist[-1]['id']}, 'GET')
res = json.loads(res.decode('utf-8'))
self.tweetlist += res
#def start():
def main():
tw =
authorize_filename = "authorization.txt"
if os.path.isfile(authorize_filename):
authorize_keys = authorize_twitter(apikey, apisec)
authorize_file = open(authorize_filename, 'w')
authorize_file.write(authorize_keys)
else:
authorize_file = open(authorize_filename, 'r')
authorize_keys = json.load(authorize_filejson.load(authorize_file))
if sys.argc is not 5*2+1+1:
print("Usage: " + sys.argv[0] + "[begin_year] [begin_month] [begin_day] [begin_hour] [begin_minute] [end_year] [end_month] [end_day] [end_hour] [end_minute] [hashtag]")
time_begin = datetime.datetime(sys.argv[1], sys.argv[2], sys.argv[3], sys.argv[4], sys.argv[5]);
time_end = datetime.datetime(sys.argv[6], sys.argv[7], sys.argv[8], sys.argv[9], sys.argv[10]);
hashtag = sys.argv[11]
tl = timeline(time_begin, time_end, hashtag)
for i in tl.tweetlist:
print(i["status"])
if __name__ == '__main__':
main()
exit(0)
|
from django.apps import AppConfig
class CranworthSiteConfig(AppConfig):
name = 'cranworth_site'
verbose_name = 'Website'
|
#
#
#
import re
import random
import time
COMMA_DELIMITER_1 = ',(?=([^"]*"[^"]*")*[^"]*$)'
COMMA_DELIMITER_2 = ',(?=([^"\\]*"\\[^"\\]*"\\)*[^"\\]*$)'
#
#
def print_separator():
print(" " * 30)
print(" #" * 30)
print(" " * 30)
#
# line2 = '1;"Goroka";"Goroka";"Papua New Guinea";"GKA";"AYGA";-6.081689;145.391881;5282;10;"U";"Pacific/Port_Moresby"'
# records = commons.split_csv(";", line2)
# print(float(records[6]) > 40)
#
#
def split_csv(d, x):
splits = re.split(r"{}".format(d), x)
return splits
#
# line = '1,"Goroka","Goroka","Papua New Guinea","GKA","AYGA",-6.081689,145.391881,5282,10,"U","Pacific/Port_Moresby"'
# cols = commons.split_csv_line(line)
#
def split_csv_line(line):
cols = re.split(r",(?![^(]*?\))\s*", line)
return cols
def str_time_prop(start, end, time_format, prop):
"""Get a time at a proportion of a range of two formatted times.
start and end should be strings specifying times formatted in the
given format (strftime-style), giving an interval [start, end].
prop specifies how a proportion of the interval to be taken after
start. The returned time will be in the specified format.
"""
stime = time.mktime(time.strptime(start, time_format))
etime = time.mktime(time.strptime(end, time_format))
ptime = stime + prop * (etime - stime)
return time.strftime(time_format, time.localtime(ptime))
def random_date(start, end, prop):
return str_time_prop(start, end, '%m/%d/%Y %I:%M %p', prop)
# We can test function by calling it.
if __name__ == "__main__":
line = '1,"Goroka","Goroka","Papua New Guinea","GKA","AYGA",-6.081689,145.391881,5282,10,"U","Pacific/Port_Moresby"'
cols = split_csv_line(line)
records = split_csv(",", line)
|
#!/usr/bin/python3
# encoding='utf-8'
# author:weibk
# @time:2021/9/23 19:10
import pymysql
import random
con = pymysql.connect(host="localhost",
user="root",
password="123456",
database="db",
charset="utf8")
cursor = con.cursor(cursor=pymysql.cursors.DictCursor)
print("*****************************")
print("* 中国工商银行 *")
print("* 账户管理系统 *")
print("* V1.0 *")
print("*****************************")
print("* *")
print("* 1.开户 *")
print("* 2.存款 *")
print("* 3.取款 *")
print("* 4.转账 *")
print("* 5.查询 *")
print("* 6.退出 *")
print("*****************************")
BANK_NAME = "中国工商银行"
MONEY_INIT = 0
# 根据账号查询信息
def getinfo(account):
cursor.execute('select * from bank_user where account=%s', (account,))
result = cursor.fetchone()
return result
# 添加用户
def useradd():
# 判断用户库是否已满
s = cursor.execute("select * from bank_user")
if s == 100:
return 3
# 判断用户是否存在
while True:
username = input("请输入您的姓名:")
cursor.execute("select username from bank_user")
uname = cursor.fetchall()
for item in uname:
if username == item['username']:
return 2
break
password = input("请设置一个密码:")
print("请您填写地址:")
country = input("\t请输入您所在的国家:")
province = input("\t请输入您所在的城市:")
street = input("\t请输入您所在的街道:")
house_number = input("\t请输入您的门牌号:")
# 判断账号是否已经存在,如果已经存在则重新生成
while True:
account = str(random.randint(10, 99)) + str(
random.randint(10, 99)) + str(
random.randint(10, 99)) + str(random.randint(10, 99))
cursor.execute("select account from bank_user")
uname = cursor.fetchall()
for item in uname:
if account == item['account']:
continue
else:
break
cursor.execute("insert into bank_user values "
"(%s, %s, %s, %s, %s, %s, %s, %s, %s)",
(repr(account), repr(username), repr(password),
repr(country), repr(province),
repr(street), repr(house_number),
repr(BANK_NAME), repr(MONEY_INIT)))
con.commit()
cursor.execute("select * from bank_user where account=%s", (account,))
info1 = cursor.fetchone()
return info1
# 登录方法
def login():
while True:
acc = int(input("请输入您的账号"))
cursor.execute("select account from bank_user")
uname = cursor.fetchall()
for item in uname:
if acc == item['account']:
while True:
pwd = input("请输入密码:")
cursor.execute("select * from bank_user where "
"account=%s", (acc,))
info1 = cursor.fetchone()
if pwd == info1['password']:
return {"flag": 1, 'info': info1}
else:
return 2
else:
continue
return 3
while True:
step = input("请选择业务:")
if step == "1":
info = useradd()
print(type(info))
# 如果开户成功,打印用户信息
if isinstance(info, dict):
profile = '''
用户信息
---------------
账号:%s
姓名:%s
密码:%s
地址:%s-%s-%s-%s
余额:%s
开户行:%s
---------------
'''
print("恭喜你开户成功!!,您的信息如下:")
print(profile % (info['account'], info['username'],
info['password'], info['country'],
info['province'], info['street'],
info['house_number'], info['bank'],
info['balance']))
elif info == 2:
print("该用户已存在")
continue
elif info == 3:
print("用户库已满暂不支持开户业务")
continue
elif step == "2":
flag = login()
if isinstance(flag, dict):
bank = flag['info']
yue = bank['balance']
print(f"你好,{bank['username']}登录成功!账户当前余额为{yue}")
# 登录成功存款
while True:
cunkuan = input("请输入您要存的金额:")
if cunkuan == 'Q' or cunkuan == 'q':
break
elif cunkuan.isdigit():
cunkuan = int(cunkuan)
else:
print('存款请输入正数,输入Q/q可退出业务')
continue
yue += cunkuan
print(f"存款成功!余额为{yue}")
cursor.execute("update bank_user set balance=%s where "
"account=%s", (yue, bank['account']))
con.commit()
break
elif flag == 2:
print("密码错误!")
continue
elif flag == 3:
print("账号不存在!")
continue
elif step == "3":
flag = login()
if isinstance(flag, dict):
bank = flag['info']
yue = bank['balance']
# 判断余额是否为0
if yue == 0:
print(f"你好,{bank['username']},您的余额为0,不能使用取款业务")
continue
else:
print(f"你好,{bank['username']},登录成功!账户当前余额为{yue}")
while True:
qukuan = input("请输入您要取的金额:")
if qukuan == 'Q' or qukuan == 'q':
break
elif qukuan.isdigit():
qukuan = int(qukuan)
else:
print('取款请输入正数,输入Q/q可退出业务')
# 判断余额是否足够
if yue < qukuan:
print('您的余额不足')
break
else:
yue -= qukuan
print(f"取款成功!余额为{yue}")
cursor.execute("update bank_user set balance=%s where "
"account=%s", (yue, bank['account']))
con.commit()
break
elif flag == 2:
print("密码错误!")
continue
elif flag == 3:
print("账号不存在!")
continue
elif step == "4":
flag = login()
if isinstance(flag, dict):
bank = flag['info']
yue = bank['balance']
acc1 = bank['account']
# 余额为0不能转账
if yue == 0:
print(f"你好,{bank['username']},您的余额为0,不能使用转账业务")
continue
else:
print(f"你好,{bank['username']},登录成功!账户当前余额为{yue}")
while True:
acc2 = input("请输入您要转账的账户:")
# 判断转入账户是否存在
y = cursor.execute(
"select * from bank_user where account=%s", (acc2,))
x = cursor.fetchone()
if y == 1:
# 判断转出和转入账户是否相同
if acc2 != acc1:
zhuan = input("请输入您要转的金额:")
if zhuan == 'Q' or zhuan == 'q':
break
elif zhuan.isdigit():
zhuan = int(zhuan)
else:
print('转账请输入正数,输入Q/q可退出业务')
# 判断余额
if yue < zhuan:
print("您的余额不足,输入Q/q可退出业务")
break
else:
# 转出账户余额减少
yue -= zhuan
print(f"转账成功!您的余额为{yue}")
cursor.execute(
"update bank_user set balance=%s where "
"account=%s", (yue, acc1))
con.commit()
# 转入账户余额增加
x['balance'] += zhuan
cursor.execute(
"update bank_user set balance=%s where "
"account=%s", (x['balance'], acc2))
con.commit()
break
else:
print('不能给自己转账,输入Q/q可退出业务')
continue
else:
print("您输入的账号不存在,输入Q/q可退出业务")
continue
elif flag == 2:
print("密码错误!")
continue
elif flag == 3:
print("账号不存在!")
continue
elif step == "5":
flag = login()
if isinstance(flag, dict):
bank = flag['info']
print(f"登录成功!账户当前信息如下:")
profile = '''
用户信息
---------------
账号:%s
姓名:%s
密码:%s
地址:%s-%s-%s-%s
开户行:%s
余额:%s
---------------
'''
print(profile % (bank['account'], bank['username'],
bank['password'], bank['country'],
bank['province'], bank['street'],
bank['house_number'], bank['bank'],
bank['balance']))
elif flag == 2:
print("密码错误!")
continue
elif flag == 3:
print("账号不存在!")
continue
elif step == "6":
break
con.commit()
cursor.close()
con.close()
|
import zipfile # noqa: F401
from zipfile import ZipFile # noqa: F401
|
from sys import maxsize
class User:
def __init__(self, firstname=None, lastname=None, address=None, email=None, email2=None, email3=None, user_id=None,
homephone=None, workphone=None, mobilephone=None, additionalphone=None,
all_phones_from_home_page=None, all_emails_from_home_page=None, deprecated=None):
self.firstname = firstname
self.lastname = lastname
self.address = address
self.email = email
self.email2 = email2
self.email3 = email3
self.homephone = homephone
self.mobilephone = mobilephone
self.id = user_id
self.workphone = workphone
self.additionalphone = additionalphone
self.all_phones_from_home_page = all_phones_from_home_page
self.all_emails_from_home_page = all_emails_from_home_page
self.deprecated = deprecated
def __repr__(self):
return "%s:%s:%s:%s" % (self.id, self.firstname, self.lastname, self.deprecated)
def __eq__(self, other):
return (self.id is None or other.id is None or self.id == other.id) and self.firstname == other.firstname \
and self.lastname == other.lastname
def id_or_max(self):
if self.id:
return int(self.id)
else:
return maxsize
|
# Copyright (c) 2013 Mirantis, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from webob import exc
from murano.api.v1 import request_statistics
from murano.common.i18n import _
from murano.common import wsgi
from murano.db import models
from murano.db.services import environments as envs
from murano.db.services import sessions
from murano.db import session as db_session
from murano.openstack.common import log as logging
from murano.services import states
LOG = logging.getLogger(__name__)
API_NAME = 'Sessions'
class Controller(object):
def _check_environment(self, request, environment_id):
unit = db_session.get_session()
environment = unit.query(models.Environment).get(environment_id)
if environment is None:
msg = _('Environment <EnvId {0}>'
' is not found').format(environment_id)
LOG.error(msg)
raise exc.HTTPNotFound(explanation=msg)
if environment_id != envs.get_cloud_id():
if environment.tenant_id != request.context.tenant:
msg = _('User is not authorized to access '
'this tenant resources.')
LOG.error(msg)
raise exc.HTTPUnauthorized(explanation=msg)
def _check_session(self, request, environment_id, session, session_id):
if session is None:
msg = _('Session <SessionId {0}> is not found').format(session_id)
LOG.error(msg)
raise exc.HTTPNotFound(explanation=msg)
if session.environment_id != environment_id:
msg = _('Session <SessionId {0}> is not tied with Environment '
'<EnvId {1}>').format(session_id, environment_id)
LOG.error(msg)
raise exc.HTTPNotFound(explanation=msg)
self._check_environment(request, environment_id)
@request_statistics.stats_count(API_NAME, 'Create')
def configure(self, request, environment_id):
LOG.debug('Session:Configure <EnvId: {0}>'.format(environment_id))
self._check_environment(request, environment_id)
# no new session can be opened if environment has deploying status
env_status = envs.EnvironmentServices.get_status(environment_id)
if env_status in (states.EnvironmentStatus.DEPLOYING,
states.EnvironmentStatus.DELETING):
msg = _('Could not open session for environment <EnvId: {0}>,'
'environment has deploying status.').format(environment_id)
LOG.error(msg)
raise exc.HTTPForbidden(explanation=msg)
user_id = request.context.user
session = sessions.SessionServices.create(environment_id, user_id)
return session.to_dict()
@request_statistics.stats_count(API_NAME, 'Index')
def show(self, request, environment_id, session_id):
LOG.debug('Session:Show <SessionId: {0}>'.format(session_id))
unit = db_session.get_session()
session = unit.query(models.Session).get(session_id)
self._check_session(request, environment_id, session, session_id)
user_id = request.context.user
msg = _('User <UserId {0}> is not authorized to access session'
'<SessionId {1}>.').format(user_id, session_id)
if session.user_id != user_id:
LOG.error(msg)
raise exc.HTTPUnauthorized(explanation=msg)
if not sessions.SessionServices.validate(session):
msg = _('Session <SessionId {0}> is invalid').format(session_id)
LOG.error(msg)
raise exc.HTTPForbidden(explanation=msg)
return session.to_dict()
@request_statistics.stats_count(API_NAME, 'Delete')
def delete(self, request, environment_id, session_id):
LOG.debug('Session:Delete <SessionId: {0}>'.format(session_id))
unit = db_session.get_session()
session = unit.query(models.Session).get(session_id)
self._check_session(request, environment_id, session, session_id)
user_id = request.context.user
if session.user_id != user_id:
msg = _('User <UserId {0}> is not authorized to access session'
'<SessionId {1}>.').format(user_id, session_id)
LOG.error(msg)
raise exc.HTTPUnauthorized(explanation=msg)
if session.state == states.SessionState.DEPLOYING:
msg = _('Session <SessionId: {0}> is in deploying state and '
'could not be deleted').format(session_id)
LOG.error(msg)
raise exc.HTTPForbidden(explanation=msg)
with unit.begin():
unit.delete(session)
return None
@request_statistics.stats_count(API_NAME, 'Deploy')
def deploy(self, request, environment_id, session_id):
LOG.debug('Session:Deploy <SessionId: {0}>'.format(session_id))
unit = db_session.get_session()
session = unit.query(models.Session).get(session_id)
session.tenant_id = request.context.tenant
self._check_session(request, environment_id, session, session_id)
if not sessions.SessionServices.validate(session):
msg = _('Session <SessionId {0}> is invalid').format(session_id)
LOG.error(msg)
raise exc.HTTPForbidden(explanation=msg)
if session.state != states.SessionState.OPENED:
msg = _('Session <SessionId {0}> is already deployed or '
'deployment is in progress').format(session_id)
LOG.error(msg)
raise exc.HTTPForbidden(explanation=msg)
envs.EnvironmentServices.deploy(session,
unit,
request.context.auth_token)
def create_resource():
return wsgi.Resource(Controller())
|
'''
This Example uses scikits-learn to do a binary classfication of images
of nuts vs. bolts. Only the area, height, and width are used to classify
the actual images but data is extracted from the images using blobs.
This is a very crude example and could easily be built upon, but is just
meant to give an introductory example for using machine learning
The data set should auto download, if not you can get it from:
https://github.com/downloads/sightmachine/SimpleCV/nuts_bolts.zip
'''
print __doc__
from SimpleCV import *
from sklearn.svm import LinearSVC
from sklearn.linear_model import LogisticRegression
import numpy as np
#Download the dataset
machine_learning_data_set = 'https://github.com/downloads/sightmachine/SimpleCV/nuts_bolts.zip'
data_path = download_and_extract(machine_learning_data_set)
print 'Test Images Downloaded at:', data_path
display = Display((800,600)) #Display to show the images
target_names = ['bolt', 'nut']
print 'Loading Bolts for Training'
bolts = ImageSet(data_path + '/data/supervised/bolts') #Load Bolts for training
bolt_blobs = [b.findBlobs()[0] for b in bolts] #exact the blobs for our features
tmp_data = [] #array to store data features
tmp_target = [] #array to store targets
for b in bolt_blobs: #Format Data for SVM
tmp_data.append([b.area(), b.height(), b.width()])
tmp_target.append(0)
print 'Loading Nuts for Training'
nuts = ImageSet(data_path + '/data/supervised/nuts')
nut_blobs = [n.invert().findBlobs()[0] for n in nuts]
for n in nut_blobs:
tmp_data.append([n.area(), n.height(), n.width()])
tmp_target.append(1)
dataset = np.array(tmp_data)
targets = np.array(tmp_target)
print 'Training Machine Learning'
clf = LinearSVC()
clf = clf.fit(dataset, targets)
clf2 = LogisticRegression().fit(dataset, targets)
print 'Running prediction on bolts now'
untrained_bolts = ImageSet(data_path + '/data/unsupervised/bolts')
unbolt_blobs = [b.findBlobs()[0] for b in untrained_bolts]
for b in unbolt_blobs:
ary = [b.area(), b.height(), b.width()]
name = target_names[clf.predict(ary)[0]]
probability = clf2.predict_proba(ary)[0]
img = b.image
img.drawText(name)
img.save(display)
print "Predicted:",name,", Guess:",probability[0], target_names[0],",", probability[1], target_names[1]
print 'Running prediction on nuts now'
untrained_nuts = ImageSet(data_path + '/data/unsupervised/nuts')
unnut_blobs = [n.invert().findBlobs()[0] for n in untrained_nuts]
for n in unnut_blobs:
ary = [n.area(), n.height(), n.width()]
name = target_names[clf.predict(ary)[0]]
probability = clf2.predict_proba(ary)[0]
img = n.image
img.drawText(name)
img.save(display)
print "Predicted:",name,", Guess:",probability[0], target_names[0],",", probability[1], target_names[1]
|
from __future__ import unicode_literals
from collections import OrderedDict
from datetime import datetime
from dateutil.tz import tzstr
import pytest
from javaproperties import PropertiesFile, dumps
INPUT = '''\
# A comment before the timestamp
#Thu Mar 16 17:06:52 EDT 2017
# A comment after the timestamp
foo: first definition
bar=only definition
# Comment between values
key = value
zebra \\
apple
foo : second definition
# Comment at end of file
'''
def test_propfile_empty():
pf = PropertiesFile()
pf._check()
assert len(pf) == 0
assert not bool(pf)
assert dict(pf) == {}
assert list(pf) == []
assert list(reversed(pf)) == []
assert pf.dumps() == ''
@pytest.mark.parametrize('src', [INPUT, INPUT.encode('iso-8859-1')])
def test_propfile_loads(src):
pf = PropertiesFile.loads(src)
pf._check()
assert len(pf) == 4
assert bool(pf)
assert dict(pf) == {
"foo": "second definition",
"bar": "only definition",
"key": "value",
"zebra": "apple",
}
assert list(pf) == ["foo", "bar", "key", "zebra"]
assert list(reversed(pf)) == ["zebra", "key", "bar", "foo"]
def test_propfile_dumps():
pf = PropertiesFile.loads(INPUT)
pf._check()
assert pf.dumps() == INPUT
def test_propfile_getitem():
pf = PropertiesFile.loads(INPUT)
pf._check()
assert pf["key"] == "value"
assert pf["foo"] == "second definition"
with pytest.raises(KeyError):
pf["missing"]
pf._check()
def test_propfile_setitem():
pf = PropertiesFile.loads(INPUT)
pf._check()
pf["key"] = "lock"
pf._check()
assert dict(pf) == {
"foo": "second definition",
"bar": "only definition",
"key": "lock",
"zebra": "apple",
}
assert list(pf) == ["foo", "bar", "key", "zebra"]
assert list(reversed(pf)) == ["zebra", "key", "bar", "foo"]
assert pf.dumps() == '''\
# A comment before the timestamp
#Thu Mar 16 17:06:52 EDT 2017
# A comment after the timestamp
foo: first definition
bar=only definition
# Comment between values
key=lock
zebra \\
apple
foo : second definition
# Comment at end of file
'''
def test_propfile_additem():
pf = PropertiesFile.loads(INPUT)
pf._check()
pf["new"] = "old"
pf._check()
assert dict(pf) == {
"foo": "second definition",
"bar": "only definition",
"key": "value",
"zebra": "apple",
"new": "old",
}
assert list(pf) == ["foo", "bar", "key", "zebra", "new"]
assert list(reversed(pf)) == ["new", "zebra", "key", "bar", "foo"]
assert pf.dumps() == '''\
# A comment before the timestamp
#Thu Mar 16 17:06:52 EDT 2017
# A comment after the timestamp
foo: first definition
bar=only definition
# Comment between values
key = value
zebra \\
apple
foo : second definition
# Comment at end of file
new=old
'''
def test_propfile_delitem():
pf = PropertiesFile.loads(INPUT)
pf._check()
del pf["key"]
pf._check()
assert dict(pf) == {
"foo": "second definition",
"bar": "only definition",
"zebra": "apple",
}
assert list(pf) == ["foo", "bar", "zebra"]
assert list(reversed(pf)) == ["zebra", "bar", "foo"]
assert pf.dumps() == '''\
# A comment before the timestamp
#Thu Mar 16 17:06:52 EDT 2017
# A comment after the timestamp
foo: first definition
bar=only definition
# Comment between values
zebra \\
apple
foo : second definition
# Comment at end of file
'''
def test_propfile_delitem_missing():
pf = PropertiesFile.loads(INPUT)
pf._check()
with pytest.raises(KeyError):
del pf["missing"]
pf._check()
assert len(pf) == 4
assert bool(pf)
assert dict(pf) == {
"foo": "second definition",
"bar": "only definition",
"key": "value",
"zebra": "apple",
}
assert list(pf) == ["foo", "bar", "key", "zebra"]
assert list(reversed(pf)) == ["zebra", "key", "bar", "foo"]
assert pf.dumps() == INPUT
def test_propfile_move_item():
pf = PropertiesFile.loads(INPUT)
pf._check()
del pf["key"]
pf._check()
pf["key"] = "recreated"
pf._check()
assert dict(pf) == {
"foo": "second definition",
"bar": "only definition",
"key": "recreated",
"zebra": "apple",
}
assert list(pf) == ["foo", "bar", "zebra", "key"]
assert list(reversed(pf)) == ["key", "zebra", "bar", "foo"]
assert pf.dumps() == '''\
# A comment before the timestamp
#Thu Mar 16 17:06:52 EDT 2017
# A comment after the timestamp
foo: first definition
bar=only definition
# Comment between values
zebra \\
apple
foo : second definition
# Comment at end of file
key=recreated
'''
def test_propfile_set_nochange():
pf = PropertiesFile.loads(INPUT)
pf._check()
assert pf["key"] == "value"
pf["key"] = "value"
pf._check()
assert dict(pf) == {
"foo": "second definition",
"bar": "only definition",
"key": "value",
"zebra": "apple",
}
assert list(pf) == ["foo", "bar", "key", "zebra"]
assert list(reversed(pf)) == ["zebra", "key", "bar", "foo"]
assert pf.dumps() == '''\
# A comment before the timestamp
#Thu Mar 16 17:06:52 EDT 2017
# A comment after the timestamp
foo: first definition
bar=only definition
# Comment between values
key=value
zebra \\
apple
foo : second definition
# Comment at end of file
'''
def test_propfile_dumps_function():
assert dumps(PropertiesFile.loads(INPUT), timestamp=False) == '''\
foo=second definition
bar=only definition
key=value
zebra=apple
'''
def test_propfile_set_repeated_key():
pf = PropertiesFile.loads(INPUT)
pf._check()
pf["foo"] = "redefinition"
pf._check()
assert dict(pf) == {
"foo": "redefinition",
"bar": "only definition",
"key": "value",
"zebra": "apple",
}
assert list(pf) == ["foo", "bar", "key", "zebra"]
assert list(reversed(pf)) == ["zebra", "key", "bar", "foo"]
assert pf.dumps() == '''\
# A comment before the timestamp
#Thu Mar 16 17:06:52 EDT 2017
# A comment after the timestamp
foo=redefinition
bar=only definition
# Comment between values
key = value
zebra \\
apple
# Comment at end of file
'''
def test_propfile_delete_repeated_key():
pf = PropertiesFile.loads(INPUT)
pf._check()
del pf["foo"]
pf._check()
assert dict(pf) == {
"bar": "only definition",
"key": "value",
"zebra": "apple",
}
assert list(pf) == ["bar", "key", "zebra"]
assert list(reversed(pf)) == ["zebra", "key", "bar"]
assert pf.dumps() == '''\
# A comment before the timestamp
#Thu Mar 16 17:06:52 EDT 2017
# A comment after the timestamp
bar=only definition
# Comment between values
key = value
zebra \\
apple
# Comment at end of file
'''
def test_propfile_from_ordereddict():
pf = PropertiesFile(OrderedDict([('key', 'value'), ('apple', 'zebra')]))
pf._check()
assert len(pf) == 2
assert bool(pf)
assert dict(pf) == {"apple": "zebra", "key": "value"}
assert list(pf) == ["key", "apple"]
assert list(reversed(pf)) == ["apple", "key"]
assert pf.dumps() == 'key=value\napple=zebra\n'
def test_propfile_from_kwarg():
pf = PropertiesFile(key='value')
pf._check()
assert len(pf) == 1
assert bool(pf)
assert dict(pf) == {"key": "value"}
assert list(pf) == ["key"]
assert list(reversed(pf)) == ["key"]
assert pf.dumps() == 'key=value\n'
def test_propfile_from_pairs_list():
pf = PropertiesFile([('key', 'value'), ('apple', 'zebra')])
pf._check()
assert len(pf) == 2
assert bool(pf)
assert dict(pf) == {"apple": "zebra", "key": "value"}
assert list(pf) == ["key", "apple"]
assert list(reversed(pf)) == ["apple", "key"]
assert pf.dumps() == 'key=value\napple=zebra\n'
def test_propfile_from_ordereddict_and_kwarg():
pf = PropertiesFile(OrderedDict([('key', 'value'), ('apple', 'zebra')]),
key='lock')
pf._check()
assert len(pf) == 2
assert bool(pf)
assert dict(pf) == {"apple": "zebra", "key": "lock"}
assert list(pf) == ["key", "apple"]
assert list(reversed(pf)) == ["apple", "key"]
assert pf.dumps() == 'key=lock\napple=zebra\n'
def test_propfile_dumps_separator():
pf = PropertiesFile.loads(INPUT)
pf._check()
assert pf.dumps(separator='\t') == INPUT
def test_propfile_set_dumps_separator():
pf = PropertiesFile.loads(INPUT)
pf._check()
pf["key"] = "lock"
pf._check()
assert pf.dumps(separator='\t') == '''\
# A comment before the timestamp
#Thu Mar 16 17:06:52 EDT 2017
# A comment after the timestamp
foo: first definition
bar=only definition
# Comment between values
key\tlock
zebra \\
apple
foo : second definition
# Comment at end of file
'''
def test_propfile_copy():
pf = PropertiesFile({"Foo": "bar"})
pf2 = pf.copy()
pf._check()
pf2._check()
assert pf is not pf2
assert isinstance(pf2, PropertiesFile)
assert pf == pf2
assert dict(pf) == dict(pf2) == {"Foo": "bar"}
pf2["Foo"] = "gnusto"
pf._check()
pf2._check()
assert dict(pf) == {"Foo": "bar"}
assert dict(pf2) == {"Foo": "gnusto"}
assert pf != pf2
pf2["fOO"] = "quux"
pf._check()
pf2._check()
assert dict(pf) == {"Foo": "bar"}
assert dict(pf2) == {"Foo": "gnusto", "fOO": "quux"}
assert pf != pf2
def test_propfile_copy_more():
pf = PropertiesFile.loads(INPUT)
pf2 = pf.copy()
pf._check()
pf2._check()
assert pf is not pf2
assert isinstance(pf2, PropertiesFile)
assert pf == pf2
assert dict(pf) == dict(pf2) == {
"foo": "second definition",
"bar": "only definition",
"key": "value",
"zebra": "apple",
}
pf2["foo"] = "third definition"
del pf2["bar"]
pf2["key"] = "value"
pf2["zebra"] = "horse"
pf2["new"] = "old"
pf._check()
pf2._check()
assert pf != pf2
assert dict(pf) == {
"foo": "second definition",
"bar": "only definition",
"key": "value",
"zebra": "apple",
}
assert dict(pf2) == {
"foo": "third definition",
"key": "value",
"zebra": "horse",
"new": "old",
}
assert pf.dumps() == INPUT
assert pf2.dumps() == '''\
# A comment before the timestamp
#Thu Mar 16 17:06:52 EDT 2017
# A comment after the timestamp
foo=third definition
# Comment between values
key=value
zebra=horse
# Comment at end of file
new=old
'''
def test_propfile_eq_empty():
pf = PropertiesFile()
pf2 = PropertiesFile()
assert pf is not pf2
assert pf == pf2
def test_propfile_eq_nonempty():
pf = PropertiesFile({"Foo": "bar"})
pf2 = PropertiesFile({"Foo": "bar"})
assert pf is not pf2
assert pf == pf2
def test_propfile_eq_self():
pf = PropertiesFile.loads(INPUT)
assert pf == pf
def test_propfile_neq():
assert PropertiesFile({"Foo": "bar"}) != PropertiesFile({"Foo": "BAR"})
def test_propfile_eq_dict():
pf = PropertiesFile({"Foo": "BAR"})
assert pf == {"Foo": "BAR"}
assert {"Foo": "BAR"} == pf
assert pf != {"Foo": "bar"}
assert {"Foo": "bar"} != pf
def test_propfile_eq_set_nochange():
pf = PropertiesFile.loads(INPUT)
pf2 = PropertiesFile.loads(INPUT)
assert pf == pf2
assert pf["key"] == pf2["key"] == "value"
pf2["key"] = "value"
assert pf == pf2
assert dict(pf) == dict(pf2)
assert pf.dumps() == INPUT
assert pf.dumps() != pf2.dumps()
def test_propfile_neq_one_comment():
pf = PropertiesFile.loads('#This is a comment.\nkey=value\n')
pf2 = PropertiesFile.loads('key=value\n')
assert pf != pf2
assert dict(pf) == dict(pf2)
def test_propfile_neq_different_comments():
pf = PropertiesFile.loads('#This is a comment.\nkey=value\n')
pf2 = PropertiesFile.loads('#This is also a comment.\nkey=value\n')
assert pf != pf2
assert dict(pf) == dict(pf2)
def test_propfile_eq_one_repeated_key():
pf = PropertiesFile.loads('key = value\nkey: other value\n')
pf2 = PropertiesFile.loads('key other value')
assert pf == pf2
assert dict(pf) == dict(pf2) == {"key": "other value"}
def test_propfile_eq_repeated_keys():
pf = PropertiesFile.loads('key = value\nkey: other value\n')
pf2 = PropertiesFile.loads('key: whatever\nkey other value')
assert pf == pf2
assert dict(pf) == dict(pf2) == {"key": "other value"}
def test_propfile_neq_string():
pf = PropertiesFile.loads('key = value\nkey: other value\n')
assert pf != 'key = value\nkey: other value\n'
assert 'key = value\nkey: other value\n' != pf
def test_propfile_preserve_trailing_escape():
pf = PropertiesFile.loads('key = value\\')
pf._check()
assert dict(pf) == {"key": "value"}
assert pf.dumps() == 'key = value\\'
def test_propfile_add_after_trailing_escape():
pf = PropertiesFile.loads('key = value\\')
pf._check()
pf["new"] = "old"
pf._check()
assert dict(pf) == {"key": "value", "new": "old"}
assert pf.dumps() == 'key = value\nnew=old\n'
def test_propfile_preserve_trailing_comment_escape():
pf = PropertiesFile.loads('#key = value\\')
pf._check()
assert dict(pf) == {}
assert pf.dumps() == '#key = value\\'
def test_propfile_add_after_trailing_comment_escape():
pf = PropertiesFile.loads('#key = value\\')
pf._check()
pf["new"] = "old"
pf._check()
assert dict(pf) == {"new": "old"}
assert pf.dumps() == '#key = value\\\nnew=old\n'
def test_propfile_preserve_no_trailing_newline():
pf = PropertiesFile.loads('key = value')
pf._check()
assert dict(pf) == {"key": "value"}
assert pf.dumps() == 'key = value'
def test_propfile_add_after_no_trailing_newline():
pf = PropertiesFile.loads('key = value\\')
pf._check()
pf["new"] = "old"
pf._check()
assert dict(pf) == {"key": "value", "new": "old"}
assert pf.dumps() == 'key = value\nnew=old\n'
def test_propfile_preserve_comment_no_trailing_newline():
pf = PropertiesFile.loads('#key = value')
pf._check()
assert dict(pf) == {}
assert pf.dumps() == '#key = value'
def test_propfile_add_after_comment_no_trailing_newline():
pf = PropertiesFile.loads('#key = value')
pf._check()
pf["new"] = "old"
pf._check()
assert dict(pf) == {"new": "old"}
assert pf.dumps() == '#key = value\nnew=old\n'
def test_propfile_preserve_trailing_escape_nl():
pf = PropertiesFile.loads('key = value\\\n')
pf._check()
assert dict(pf) == {"key": "value"}
assert pf.dumps() == 'key = value\\\n'
def test_propfile_add_after_trailing_escape_nl():
pf = PropertiesFile.loads('key = value\\\n')
pf._check()
pf["new"] = "old"
pf._check()
assert dict(pf) == {"key": "value", "new": "old"}
assert pf.dumps() == 'key = value\nnew=old\n'
def test_propfile_preserve_trailing_comment_escape_nl():
pf = PropertiesFile.loads('#key = value\\\n')
pf._check()
assert dict(pf) == {}
assert pf.dumps() == '#key = value\\\n'
def test_propfile_add_after_trailing_comment_escape_nl():
pf = PropertiesFile.loads('#key = value\\\n')
pf._check()
pf["new"] = "old"
pf._check()
assert dict(pf) == {"new": "old"}
assert pf.dumps() == '#key = value\\\nnew=old\n'
def test_propfile_get_nonstring_key():
pf = PropertiesFile({"key": "value", "apple": "zebra", "foo": "bar"})
with pytest.raises(TypeError) as excinfo:
pf[42]
assert str(excinfo.value) == \
'Keys & values of PropertiesFile instances must be strings'
def test_propfile_set_nonstring_key():
pf = PropertiesFile({"key": "value", "apple": "zebra", "foo": "bar"})
with pytest.raises(TypeError) as excinfo:
pf[42] = 'forty-two'
assert str(excinfo.value) == \
'Keys & values of PropertiesFile instances must be strings'
def test_propfile_set_nonstring_value():
pf = PropertiesFile({"key": "value", "apple": "zebra", "foo": "bar"})
with pytest.raises(TypeError) as excinfo:
pf['forty-two'] = 42
assert str(excinfo.value) == \
'Keys & values of PropertiesFile instances must be strings'
def test_propfile_del_nonstring_key():
pf = PropertiesFile({"key": "value", "apple": "zebra", "foo": "bar"})
with pytest.raises(TypeError) as excinfo:
del pf[42]
assert str(excinfo.value) == \
'Keys & values of PropertiesFile instances must be strings'
def test_propfile_from_nonstring_key():
with pytest.raises(TypeError) as excinfo:
PropertiesFile({"key": "value", 42: "forty-two"})
assert str(excinfo.value) == \
'Keys & values of PropertiesFile instances must be strings'
def test_propfile_from_nonstring_value():
with pytest.raises(TypeError) as excinfo:
PropertiesFile({"key": "value", "forty-two": 42})
assert str(excinfo.value) == \
'Keys & values of PropertiesFile instances must be strings'
def test_propfile_empty_setitem():
pf = PropertiesFile()
pf._check()
pf["key"] = "value"
pf._check()
assert len(pf) == 1
assert bool(pf)
assert dict(pf) == {"key": "value"}
assert list(pf) == ["key"]
assert list(reversed(pf)) == ["key"]
assert pf.dumps() == 'key=value\n'
def test_propfile_to_ordereddict():
pf = PropertiesFile.loads(INPUT)
pf._check()
assert OrderedDict(pf) == OrderedDict([
("foo", "second definition"),
("bar", "only definition"),
("key", "value"),
("zebra", "apple"),
])
@pytest.mark.parametrize('src,ts', [
('', None),
('#Thu Mar 16 17:06:52 EDT 2017\n', 'Thu Mar 16 17:06:52 EDT 2017'),
('!Thu Mar 16 17:06:52 EDT 2017\n', 'Thu Mar 16 17:06:52 EDT 2017'),
('\n \r#Thu Mar 16 17:06:52 EDT 2017\n', 'Thu Mar 16 17:06:52 EDT 2017'),
(INPUT, 'Thu Mar 16 17:06:52 EDT 2017'),
(
'# comment 1\n!comment 2\n# Thu Mar 16 17:06:52 EDT 2017\n',
' Thu Mar 16 17:06:52 EDT 2017',
),
('key=value\n#Thu Mar 16 17:06:52 EDT 2017\n', None),
(
'#Thu Mar 16 17:06:52 EDT 2017\n#Tue Feb 25 19:13:27 EST 2020\n',
'Thu Mar 16 17:06:52 EDT 2017',
),
])
def test_propfile_get_timestamp(src, ts):
pf = PropertiesFile.loads(src)
pf._check()
assert pf.timestamp == ts
@pytest.mark.parametrize('src,ts,ts2,result', [
(
'',
'Thu Mar 16 17:06:52 EDT 2017',
'Thu Mar 16 17:06:52 EDT 2017',
'#Thu Mar 16 17:06:52 EDT 2017\n',
),
('', None, None, ''),
('', False, None, ''),
('', '', None, '#\n'),
(
'key=value\n',
0,
'Wed Dec 31 19:00:00 EST 1969',
'#Wed Dec 31 19:00:00 EST 1969\nkey=value\n',
),
(
'key=value\n',
1234567890,
'Fri Feb 13 18:31:30 EST 2009',
'#Fri Feb 13 18:31:30 EST 2009\nkey=value\n',
),
(
'key=value\n',
datetime(2020, 3, 4, 15, 57, 41),
'Wed Mar 04 15:57:41 EST 2020',
'#Wed Mar 04 15:57:41 EST 2020\nkey=value\n',
),
(
'key=value\n',
datetime(2020, 3, 4, 12, 57, 41,tzinfo=tzstr('PST8PDT,M4.1.0,M10.5.0')),
'Wed Mar 04 12:57:41 PST 2020',
'#Wed Mar 04 12:57:41 PST 2020\nkey=value\n',
),
('key=value\n', None, None, 'key=value\n'),
('key=value\n', False, None, 'key=value\n'),
('key=value\n', '', None, '#\nkey=value\n'),
('key=value\n', 'Not a timestamp', None, '#Not a timestamp\nkey=value\n'),
('key=value\n', 'Line 1\n', None, '#Line 1\n#\nkey=value\n'),
('key=value\n', 'Line 1\nLine 2', None, '#Line 1\n#Line 2\nkey=value\n'),
('key=value\n', 'Line 1\n#Line 2', None, '#Line 1\n#Line 2\nkey=value\n'),
('key=value\n', 'Line 1\n!Line 2', None, '#Line 1\n!Line 2\nkey=value\n'),
(
'#Comment\n'
'#Thu Mar 16 17:06:52 EDT 2017\n'
'#Comment 2\n'
'#Wed Mar 04 12:57:41 PST 2020\n'
'key=value\n',
1234567890,
'Fri Feb 13 18:31:30 EST 2009',
'#Comment\n'
'#Fri Feb 13 18:31:30 EST 2009\n'
'#Comment 2\n'
'#Wed Mar 04 12:57:41 PST 2020\n'
'key=value\n',
),
(
'#Comment\n'
'#Thu Mar 16 17:06:52 EDT 2017\n'
'#Comment 2\n'
'#Wed Mar 04 12:57:41 PST 2020\n'
'key=value\n',
None,
'Wed Mar 04 12:57:41 PST 2020',
'#Comment\n'
'#Comment 2\n'
'#Wed Mar 04 12:57:41 PST 2020\n'
'key=value\n',
),
(
'#Comment\n'
'#Thu Mar 16 17:06:52 EDT 2017\n'
'#Comment 2\n'
'#Wed Mar 04 12:57:41 PST 2020\n'
'key=value\n',
False,
'Wed Mar 04 12:57:41 PST 2020',
'#Comment\n'
'#Comment 2\n'
'#Wed Mar 04 12:57:41 PST 2020\n'
'key=value\n',
),
(
'#Comment\n'
'#Thu Mar 16 17:06:52 EDT 2017\n'
'#Comment 2\n'
'#Wed Mar 04 12:57:41 PST 2020\n'
'key=value\n',
'',
'Wed Mar 04 12:57:41 PST 2020',
'#Comment\n'
'#\n'
'#Comment 2\n'
'#Wed Mar 04 12:57:41 PST 2020\n'
'key=value\n',
),
(
'#Comment\n'
'#Thu Mar 16 17:06:52 EDT 2017\n'
'#Comment 2\n'
'#Wed Mar 04 12:57:41 PST 2020\n'
'key=value\n',
'Not a timestamp',
'Wed Mar 04 12:57:41 PST 2020',
'#Comment\n'
'#Not a timestamp\n'
'#Comment 2\n'
'#Wed Mar 04 12:57:41 PST 2020\n'
'key=value\n',
),
(
'#Comment\n'
'#Thu Mar 16 17:06:52 EDT 2017\n'
'#Comment 2\n'
'#Wed Mar 04 12:57:41 PST 2020\n'
'key=value\n',
'Line 1\n',
'Wed Mar 04 12:57:41 PST 2020',
'#Comment\n'
'#Line 1\n'
'#\n'
'#Comment 2\n'
'#Wed Mar 04 12:57:41 PST 2020\n'
'key=value\n',
),
(
'#Comment\n'
'#Thu Mar 16 17:06:52 EDT 2017\n'
'#Comment 2\n'
'#Wed Mar 04 12:57:41 PST 2020\n'
'key=value\n',
'Line 1\nLine 2',
'Wed Mar 04 12:57:41 PST 2020',
'#Comment\n'
'#Line 1\n'
'#Line 2\n'
'#Comment 2\n'
'#Wed Mar 04 12:57:41 PST 2020\n'
'key=value\n',
),
(
'#Comment\n'
'#Thu Mar 16 17:06:52 EDT 2017\n'
'#Comment 2\n'
'#Wed Mar 04 12:57:41 PST 2020\n'
'key=value\n',
'Line 1\n#Line 2',
'Wed Mar 04 12:57:41 PST 2020',
'#Comment\n'
'#Line 1\n'
'#Line 2\n'
'#Comment 2\n'
'#Wed Mar 04 12:57:41 PST 2020\n'
'key=value\n',
),
(
'#Comment\n'
'#Thu Mar 16 17:06:52 EDT 2017\n'
'#Comment 2\n'
'#Wed Mar 04 12:57:41 PST 2020\n'
'key=value\n',
'Line 1\n!Line 2',
'Wed Mar 04 12:57:41 PST 2020',
'#Comment\n'
'#Line 1\n'
'!Line 2\n'
'#Comment 2\n'
'#Wed Mar 04 12:57:41 PST 2020\n'
'key=value\n',
),
(
'#Comment\n'
'\n'
'#Comment 2\n'
'key=value\n',
1234567890,
'Fri Feb 13 18:31:30 EST 2009',
'#Comment\n'
'\n'
'#Comment 2\n'
'#Fri Feb 13 18:31:30 EST 2009\n'
'key=value\n',
),
])
def test_propfile_set_timestamp(src, ts, ts2, result):
pf = PropertiesFile.loads(src)
pf._check()
pf.timestamp = ts
pf._check()
assert pf.timestamp == ts2
assert pf.dumps() == result
def test_propfile_set_timestamp_now(fixed_timestamp):
pf = PropertiesFile.loads('key=value\n')
pf._check()
pf.timestamp = True
pf._check()
assert pf.timestamp == fixed_timestamp
assert pf.dumps() == '#' + fixed_timestamp + '\nkey=value\n'
@pytest.mark.parametrize('src,ts2,result', [
('', None, ''),
('#Thu Mar 16 17:06:52 EDT 2017\n', None, ''),
('\n \r#Thu Mar 16 17:06:52 EDT 2017\n', None, '\n \r'),
(
INPUT,
None,
'# A comment before the timestamp\n'
'# A comment after the timestamp\n'
'foo: first definition\n'
'bar=only definition\n'
'\n'
'# Comment between values\n'
'\n'
'key = value\n'
'\n'
'zebra \\\n'
' apple\n'
'foo : second definition\n'
'\n'
'# Comment at end of file\n'
),
(
'# comment 1\n!comment 2\n# Thu Mar 16 17:06:52 EDT 2017\n',
None,
'# comment 1\n!comment 2\n',
),
(
'key=value\n#Thu Mar 16 17:06:52 EDT 2017\n',
None,
'key=value\n#Thu Mar 16 17:06:52 EDT 2017\n',
),
(
'#Thu Mar 16 17:06:52 EDT 2017\n#Tue Feb 25 19:13:27 EST 2020\n',
'Tue Feb 25 19:13:27 EST 2020',
'#Tue Feb 25 19:13:27 EST 2020\n',
),
])
def test_propfile_delete_timestamp(src, ts2, result):
pf = PropertiesFile.loads(src)
pf._check()
del pf.timestamp
pf._check()
assert pf.timestamp == ts2
assert pf.dumps() == result
@pytest.mark.parametrize('src,c', [
('', None),
('#\n', ''),
('#\n#comment\n', '\ncomment'),
('#comment\n#\n', 'comment\n'),
(INPUT, ' A comment before the timestamp'),
(
'# comment 1\n!comment 2\n# Thu Mar 16 17:06:52 EDT 2017\n',
' comment 1\ncomment 2',
),
('# comment 1\n!comment 2\nkey=value\n', ' comment 1\ncomment 2'),
('# comment 1\r\n!comment 2\nkey=value\n', ' comment 1\ncomment 2'),
('# comment 1\r!comment 2\nkey=value\n', ' comment 1\ncomment 2'),
('# comment 1\n\t\r\n !comment 2\nkey=value\n', ' comment 1\ncomment 2'),
('# Thu Mar 16 17:06:52 EDT 2017\n# Comment\n', None),
('key=value\n# Comment\n', None),
])
def test_propfile_get_header_comment(src, c):
pf = PropertiesFile.loads(src)
pf._check()
assert pf.header_comment == c
@pytest.mark.parametrize('c,c2,csrc', [
(None, None, ''),
('', '', '#\n'),
('This is test text.', 'This is test text.', '#This is test text.\n'),
('Line 1\n', 'Line 1\n', '#Line 1\n#\n'),
('Line 1\nLine 2', 'Line 1\nLine 2', '#Line 1\n#Line 2\n'),
('Line 1\n#Line 2', 'Line 1\nLine 2', '#Line 1\n#Line 2\n'),
('Line 1\n!Line 2', 'Line 1\nLine 2', '#Line 1\n!Line 2\n'),
])
@pytest.mark.parametrize('part1', [
'',
'#This comment will be deleted.\n',
'#This will be deleted.\n!This, too\n',
'#This will be deleted.\n \r\n#And also that blank line in between.\n',
'\n\n#This and the blank lines above will be deleted.\n',
'#This and the blank lines below will be deleted.\n\n\n',
])
@pytest.mark.parametrize('part2', [
'',
'key=value\n',
'#Thu Mar 16 17:06:52 EDT 2017\nkey=value\n'
'key=value\n#Post-entry comment\n',
])
def test_propfile_set_header_comment(part1, part2, c, c2, csrc):
pf = PropertiesFile.loads(part1 + part2)
pf._check()
pf.header_comment = c
pf._check()
assert pf.header_comment == c2
assert pf.dumps() == csrc + part2
@pytest.mark.parametrize('part1', [
'',
'#This comment will be deleted.\n',
'#This will be deleted.\n!This, too\n',
'#This will be deleted.\n \r\n#And also that blank line in between.\n',
'\n\n#This and the blank lines above will be deleted.\n',
'#This and the blank lines below will be deleted.\n\n\n',
])
@pytest.mark.parametrize('part2', [
'',
'key=value\n',
'#Thu Mar 16 17:06:52 EDT 2017\nkey=value\n'
'key=value\n#Post-entry comment\n',
])
def test_propfile_delete_header_comment(part1, part2):
pf = PropertiesFile.loads(part1 + part2)
pf._check()
del pf.header_comment
pf._check()
assert pf.header_comment is None
assert pf.dumps() == part2
# preserving mixtures of line endings
|
import strawberry
from typing import Callable, List, Optional, Dict
import dataclasses
from . import utils, queries
@dataclasses.dataclass
class DjangoField:
resolver: Callable
field_name: Optional[str]
kwargs: dict
def resolve(self, is_relation, is_m2m):
resolver = queries.resolvers.get_resolver(self.resolver, self.field_name, is_relation, is_m2m)
field = strawberry.field(resolver, **self.kwargs)
return field
def field(resolver=None, field_name=None, **kwargs):
if resolver:
resolver = queries.resolvers.get_resolver(resolver)
return strawberry.field(resolver)
return DjangoField(resolver, field_name, kwargs)
mutation = field
|
# Authored by Tiantian Liu, Taichi Graphics.
import math
import taichi as ti
ti.init(arch=ti.cpu)
# global control
paused = ti.field(ti.i32, ())
# gravitational constant 6.67408e-11, using 1 for simplicity
G = 1
# number of planets
N = 3000
# unit mass
m = 1
# galaxy size
galaxy_size = 0.4
# planet radius (for rendering)
planet_radius = 2
# init vel
init_vel = 120
# time-step size
h = 1e-4
# substepping
substepping = 10
# center of the screen
center = ti.Vector.field(2, ti.f32, ())
# pos, vel and force of the planets
# Nx2 vectors
pos = ti.Vector.field(2, ti.f32, N)
vel = ti.Vector.field(2, ti.f32, N)
force = ti.Vector.field(2, ti.f32, N)
@ti.kernel
def initialize():
center[None] = [0.5, 0.5]
for i in range(N):
theta = ti.random() * 2 * math.pi
r = (ti.sqrt(ti.random()) * 0.6 + 0.4) * galaxy_size
offset = r * ti.Vector([ti.cos(theta), ti.sin(theta)])
pos[i] = center[None] + offset
vel[i] = [-offset.y, offset.x]
vel[i] *= init_vel
@ti.kernel
def compute_force():
# clear force
for i in range(N):
force[i] = [0.0, 0.0]
# compute gravitational force
for i in range(N):
p = pos[i]
for j in range(N):
if i != j: # double the computation for a better memory footprint and load balance
diff = p - pos[j]
r = diff.norm(1e-5)
# gravitational force -(GMm / r^2) * (diff/r) for i
f = -G * m * m * (1.0 / r)**3 * diff
# assign to each particle
force[i] += f
@ti.kernel
def update():
dt = h / substepping
for i in range(N):
#symplectic euler
vel[i] += dt * force[i] / m
pos[i] += dt * vel[i]
gui = ti.GUI('N-body problem', (800, 800))
initialize()
while gui.running:
for e in gui.get_events(ti.GUI.PRESS):
if e.key in [ti.GUI.ESCAPE, ti.GUI.EXIT]:
exit()
elif e.key == 'r':
initialize()
elif e.key == ti.GUI.SPACE:
paused[None] = not paused[None]
if not paused[None]:
for i in range(substepping):
compute_force()
update()
gui.circles(pos.to_numpy(), color=0xffffff, radius=planet_radius)
gui.show()
|
import torch
from torch_geometric.nn.functional import gini
def test_gini():
w = torch.tensor(
[
[0., 0., 0., 0.],
[0., 0., 0., 1000.0]
]
)
assert torch.isclose(gini(w), torch.tensor(0.5))
|
import setuptools
with open("README.md", "r") as fh:
long_description = fh.read()
REQUIREMENTS = [i.strip() for i in open("requirements.txt").readlines()]
setuptools.setup(
name="saltbox",
version="0.1.3",
author="Björn Orri Saemundsson",
author_email="bjornorri@gmail.com",
description="Interface with your Salt Fiber Box router in python.",
long_description=long_description,
long_description_content_type="text/markdown",
url="https://github.com/bjornorri/pysaltbox",
packages=setuptools.find_packages(),
classifiers=[
"Programming Language :: Python :: 3",
"License :: OSI Approved :: MIT License",
"Operating System :: OS Independent",
],
install_requires=REQUIREMENTS,
python_requires='>=3.6',
)
|
#!/usr/bin/env python
import sys
import base64
def jwt_base64url_decode(s: str) -> str:
return base64.urlsafe_b64decode(s + '='*(-len(s)%4))
def jwt_decode(jwt_str: str, pos: int=None,
hex_sig: bool=False, verbose: bool=False) -> str:
def _decode(s: str, pos: int) -> str:
r = jwt_base64url_decode(s)
if pos == 2 and hex_sig is True:
return r.hex()
else:
return r.decode(errors="ignore")
#
tidy_str = jwt_str.replace(" ","").replace("\n","")
if pos is None:
ret = []
for i,s in enumerate(tidy_str.strip().split(".")):
if verbose:
print(f"INPUT: {s}")
ret.append(_decode(s, i))
return ret
else:
s = tidy_str.strip().split(".")[pos]
if verbose:
print(f"INPUT: {s} at #{pos}")
return [_decode(s, pos)]
if __name__ == "__main__":
from argparse import ArgumentParser
from argparse import ArgumentDefaultsHelpFormatter
ap = ArgumentParser(
description="JWT decoder.",
formatter_class=ArgumentDefaultsHelpFormatter)
ap.add_argument("jwt", nargs="*",
help="specify the JWT.")
ap.add_argument("-s", action="store_true", dest="single_shot",
help="specify the input is one JWT.")
ap.add_argument("-p", action="store", dest="position",
type=int, default=None,
help="specify the position to print binary mode. "
"e.g. -p1 means the header.")
ap.add_argument("-X", action="store_true", dest="raw_sig",
help="specify to show the result of signature in raw.")
ap.add_argument("-v", action="store_true", dest="verbose",
help="enable verbose mode.")
opt = ap.parse_args()
if opt.jwt:
for jwt_str in opt.jwt:
for r in jwt_decode(jwt_str, pos=opt.position, hex_sig=False,
verbose=opt.verbose):
print(r)
elif opt.single_shot:
print(jwt_decode(sys.stdin.read(), pos=opt.position, hex_sig=False,
verbose=opt.verbose))
else:
for jwt_str in sys.stdin:
for r in jwt_decode(jwt_str, pos=opt.position, hex_sig=False,
verbose=opt.verbose):
print(r)
|
__author__ = 'Pavel Ageyev'
class Groups:
def __init__(self, name , header, footer):
self.name=name
self.header=header
self.footer=footer
class Formfields:
def __init__(self, firstName, lastName, companyName, email, mobile):
self.firstName=firstName
self.lastName=lastName
self.companyName=companyName
self.email=email
self.mobile=mobile
|
from __future__ import unicode_literals
import frappe
from frappe import _
from frappe.utils import nowdate
def update_last(doc, method):
frappe.db.sql("""update `tabVehicle Income` set age='old' where vehicle=%s
and age=%s""", (doc.vehicle, doc.age))
|
__author__ = 'dstogsdill'
_name_ = 'pypalo'
"""A Python library for interacting with Palo Alto devices"""
#: Version info (major, minor, maintenance, status)
VERSION = (0, 0, 7)
__version__ = '%d.%d.%d' % VERSION[0:3]
from .pan import Panorama
__all__ = [Panorama]
|
import numpy as np
import json
prefixes = ['softmax', 'fc', 'conv', 'max_pool', 'avg_pool', 'relu'] # TODO: ADD CONCAT
# Validate that every dictionary key is the name of a valid layer format
def validate_prefixes(names):
for name in names:
index = name.rfind('/')
if index != -1: section = name[index + 1:]
else: section = name
hasPrefix = False
for prefix in prefixes:
if (section.startswith(prefix)):
hasPrefix = True
break
if not hasPrefix:
return False
return True
# Prefix of the namespaces in the dictionary
prefix = '/home/peter/Desktop/'
# Max pool must have entry in dict mapped to a list of the format [windowHeight, windowWidth, strideHeight, strideWidth]
# Also must be named 'max_pool' + etc.
# Average pool must have entry in dict mapped to a list of the format [windowHeight, windowWidth, strideHeight, strideWidth].
# Also must be named 'avg_pool' + etc.
# Softmax must be named 'softmax' + etc.
# Full connected must be named 'fc' + etc.
# Convolutional layer must have entry in dict mapped to a list of the format [strideHeight, strideWidth, padding]
# Padding is an optional entry for if you want custom padding, not 'SAME' padding,
def convert_separate(graph, namescopes, dict, session, channels, height, width):
if not validate_prefixes(namescopes):
return None
# Create a model specification file named "input" that specifies input tensor parameters
json_object = {}
json_object['num_input_channels'] = channels
json_object['input_height'] = height
json_object['input_width'] = width
with open(prefix + 'input', 'w') as outfile:
json.dump(json_object, outfile)
outfile.close()
counter = 0
# Create a model specification file for each layer in the network
for namescope in namescopes:
counter += 1
index = namescope.rfind('/')
if index != -1: section = namescope[index + 1:]
else: section = namescope
print(section)
layer = {}
if section.startswith(prefixes[0]):
# If layer is softmax
layer['name'] = 'softmax'
elif section.startswith(prefixes[1]) and namescope not in dict:
# If layer is fully connected
layer['name'] = 'fc'
for variable in graph.get_collection('trainable_variables', namescope):
name = variable.name[len(namescope) + 1:]
if name.startswith('weight'):
weight = session.run(variable)
layer['weights'] = weight.tolist()
if name.startswith('bias'):
bias = session.run(variable)
layer['biases'] = bias.tolist()
layer['num_outputs'] = len(bias)
elif section.startswith(prefixes[2]) or (namescope in dict and (len(dict[namescope]) == 2 or len(dict[namescope]) == 3)):
# If layer is convolutional
layer['name'] = 'conv'
for variable in graph.get_collection('trainable_variables', namescope):
name = variable.name[len(namescope) + 1:]
if name.startswith('weight'):
weight = session.run(variable)
shape = weight.shape
layer['weights_hwio'] = np.transpose(weight, (3,2,0,1)).tolist() # Rearrange order to be compatible with TensorRT
layer['filter_height'] = shape[0]
layer['filter_width'] = shape[1]
layer['out_maps'] = shape[3]
if name.startswith('bias'):
bias = session.run(variable)
layer['biases'] = bias.tolist()
layer['num_outputs'] = len(bias)
properties = dict[namescope]
layer['stride_height'] = properties[0]
layer['stride_width'] = properties[1]
if (len(properties) == 3): layer['padding'] = properties[2]
else: layer['padding'] = -1
print(layer['padding'])
elif section.startswith(prefixes[3]):
# If layer is max pool
layer['name'] = 'max_pool'
properties = dict[namescope]
layer['window_height'] = properties[0]
layer['window_width'] = properties[1]
layer['stride_height'] = properties[2]
layer['stride_width'] = properties[3]
elif section.startswith(prefixes[4]):
# If layer is average pool
layer['name'] = 'avg_pool'
properties = dict[namescope]
layer['window_height'] = properties[0]
layer['window_width'] = properties[1]
layer['stride_height'] = properties[2]
layer['stride_width'] = properties[3]
elif section.startswith(prefixes[5]):
# If layer is a ReLU activation
layer['name'] = 'relu'
with open(prefix + str(counter), 'w') as outfile:
json.dump(layer, outfile)
outfile.close()
def convert_entire(graph, namescopes, dict, session, channels, height, width):
if not validate_prefixes(namescopes):
return None
# Create a model specification file named "input" that specifies input tensor parameters
json_object = {}
json_object['num_input_channels'] = channels
json_object['input_height'] = height
json_object['input_width'] = width
json_object['layers'] = []
# Create a model specification file for each layer in the network
for namescope in namescopes:
index = namescope.rfind('/')
if index != -1: section = namescope[index + 1:]
else: section = namescope
print(section)
layer = {}
if section.startswith(prefixes[0]):
# If layer is softmax
layer['name'] = 'softmax'
elif section.startswith(prefixes[1]) and namescope not in dict:
# If layer is fully connected
layer['name'] = 'fc'
for variable in graph.get_collection('trainable_variables', namescope):
name = variable.name[len(namescope) + 1:]
if name.startswith('weight'):
weight = session.run(variable)
layer['weights'] = weight.tolist()
if name.startswith('bias'):
bias = session.run(variable)
layer['biases'] = bias.tolist()
layer['num_outputs'] = len(bias)
elif section.startswith(prefixes[2]) or (namescope in dict and (len(dict[namescope]) == 2 or len(dict[namescope]) == 3)):
# If layer is convolutional
layer['name'] = 'conv'
for variable in graph.get_collection('trainable_variables', namescope):
name = variable.name[len(namescope) + 1:]
if name.startswith('weight'):
weight = session.run(variable)
shape = weight.shape
layer['weights_hwio'] = np.transpose(weight, (3,2,0,1)).tolist() # Rearrange order to be compatible with TensorRT
layer['filter_height'] = shape[0]
layer['filter_width'] = shape[1]
layer['out_maps'] = shape[3]
if name.startswith('bias'):
bias = session.run(variable)
layer['biases'] = bias.tolist()
layer['num_outputs'] = len(bias)
properties = dict[namescope]
layer['stride_height'] = properties[0]
layer['stride_width'] = properties[1]
if (len(properties) == 3): layer['padding'] = properties[2]
else: layer['padding'] = -1
print(layer['padding'])
elif section.startswith(prefixes[3]):
# If layer is max pool
layer['name'] = 'max_pool'
properties = dict[namescope]
layer['window_height'] = properties[0]
layer['window_width'] = properties[1]
layer['stride_height'] = properties[2]
layer['stride_width'] = properties[3]
elif section.startswith(prefixes[4]):
# If layer is average pool
layer['name'] = 'avg_pool'
properties = dict[namescope]
layer['window_height'] = properties[0]
layer['window_width'] = properties[1]
layer['stride_height'] = properties[2]
layer['stride_width'] = properties[3]
elif section.startswith(prefixes[5]):
# If layer is a ReLU activation
layer['name'] = 'relu'
json_object['layers'].append(layer)
with open("mnist_final", 'w') as outfile:
json.dump(json_object, outfile)
outfile.close()
|
"""Unit test package for multi_notifier."""
|
default_app_config = 'glitter.blocks.form.apps.FormConfig'
|
from StringIO import StringIO
try:
import json
except ImportError:
try:
import simplejson as json
except ImportError:
json = None
import unittest
from robot.utils.asserts import assert_equals, assert_raises
from robot.htmldata.jsonwriter import JsonDumper
class TestJsonDumper(unittest.TestCase):
def _dump(self, data):
output = StringIO()
JsonDumper(output).dump(data)
return output.getvalue()
def _test(self, data, expected):
assert_equals(self._dump(data), expected)
def test_dump_string(self):
self._test('', '""')
self._test('hello world', '"hello world"')
self._test('123', '"123"')
def test_dump_non_ascii_string(self):
self._test(u'hyv\xe4', u'"hyv\xe4"'.encode('UTF-8'))
def test_escape_string(self):
self._test('"-\\-\n-\t-\r', '"\\"-\\\\-\\n-\\t-\\r"')
def test_escape_closing_tags(self):
self._test('<script><></script>', '"<script><>\\x3c/script>"')
def test_dump_boolean(self):
self._test(True, 'true')
self._test(False, 'false')
def test_dump_integer(self):
self._test(12, '12')
self._test(-12312, '-12312')
self._test(0, '0')
self._test(1, '1')
def test_dump_long(self):
self._test(12345678901234567890L, '12345678901234567890')
self._test(0L, '0')
def test_dump_list(self):
self._test([1, 2, True, 'hello', 'world'], '[1,2,true,"hello","world"]')
self._test(['*nes"ted', [1, 2, [4]]], '["*nes\\"ted",[1,2,[4]]]')
def test_dump_tuple(self):
self._test(('hello', '*world'), '["hello","*world"]')
self._test((1, 2, (3, 4)), '[1,2,[3,4]]')
def test_dump_dictionary(self):
self._test({'key': 1}, '{"key":1}')
self._test({'nested': [-1L, {42: None}]}, '{"nested":[-1,{42:null}]}')
def test_dictionaries_are_sorted(self):
self._test({'key': 1, 'hello': ['wor', 'ld'], 'z': 'a', 'a': 'z'},
'{"a":"z","hello":["wor","ld"],"key":1,"z":"a"}')
def test_dump_none(self):
self._test(None, 'null')
def test_json_dump_mapping(self):
output = StringIO()
dumper = JsonDumper(output)
mapped1 = object()
mapped2 = 'string'
dumper.dump([mapped1, [mapped2, {mapped2: mapped1}]],
mapping={mapped1: '1', mapped2: 'a'})
assert_equals(output.getvalue(), '[1,[a,{a:1}]]')
assert_raises(ValueError, dumper.dump, [mapped1])
if json:
def test_against_standard_json(self):
data = ['\\\'\"\r\t\n' + ''.join(chr(i) for i in xrange(32, 127)),
{'A': 1, 'b': 2, 'C': ()}, None, (1, 2, 3)]
try:
expected = json.dumps(data, sort_keys=True,
separators=(',', ':'))
except UnicodeError:
return # http://ironpython.codeplex.com/workitem/32331
self._test(data, expected)
if __name__ == '__main__':
unittest.main()
|
"""A module for defining WORKSPACE dependencies required for rules_foreign_cc"""
load("//for_workspace:repositories.bzl", "repositories")
load("//toolchains:toolchains.bzl", "prebuilt_toolchains", "preinstalled_toolchains")
load(
"//tools/build_defs/shell_toolchain/toolchains:ws_defs.bzl",
shell_toolchain_workspace_initalization = "workspace_part",
)
# buildifier: disable=unnamed-macro
def rules_foreign_cc_dependencies(
native_tools_toolchains = [],
register_default_tools = True,
cmake_version = "3.19.6",
ninja_version = "1.10.2",
register_preinstalled_tools = True,
additional_shell_toolchain_mappings = [],
additional_shell_toolchain_package = None):
"""Call this function from the WORKSPACE file to initialize rules_foreign_cc \
dependencies and let neccesary code generation happen \
(Code generation is needed to support different variants of the C++ Starlark API.).
Args:
native_tools_toolchains: pass the toolchains for toolchain types
'@rules_foreign_cc//tools/build_defs:cmake_toolchain' and
'@rules_foreign_cc//tools/build_defs:ninja_toolchain' with the needed platform constraints.
If you do not pass anything, registered default toolchains will be selected (see below).
register_default_tools: If True, the cmake and ninja toolchains, calling corresponding
preinstalled binaries by name (cmake, ninja) will be registered after
'native_tools_toolchains' without any platform constraints. The default is True.
cmake_version: The target version of the default cmake toolchain if `register_default_tools`
is set to `True`.
ninja_version: The target version of the default ninja toolchain if `register_default_tools`
is set to `True`.
register_preinstalled_tools: If true, toolchains will be registered for the native built tools
installed on the exec host
additional_shell_toolchain_mappings: Mappings of the shell toolchain functions to
execution and target platforms constraints. Similar to what defined in
@rules_foreign_cc//tools/build_defs/shell_toolchain/toolchains:toolchain_mappings.bzl
in the TOOLCHAIN_MAPPINGS list. Please refer to example in @rules_foreign_cc//toolchain_examples.
additional_shell_toolchain_package: A package under which additional toolchains, referencing
the generated data for the passed additonal_shell_toolchain_mappings, will be defined.
This value is needed since register_toolchains() is called for these toolchains.
Please refer to example in @rules_foreign_cc//toolchain_examples.
"""
repositories()
shell_toolchain_workspace_initalization(
additional_shell_toolchain_mappings,
additional_shell_toolchain_package,
)
native.register_toolchains(*native_tools_toolchains)
if register_default_tools:
prebuilt_toolchains(cmake_version, ninja_version)
if register_preinstalled_tools:
preinstalled_toolchains()
|
# Generated by Django 2.1.1 on 2018-09-16 04:36
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('partners', '0002_auto_20180915_2328'),
]
operations = [
migrations.AlterField(
model_name='communitypartner',
name='college',
field=models.CharField(blank=True, max_length=50),
),
migrations.AlterField(
model_name='communitypartner',
name='department',
field=models.CharField(blank=True, max_length=30),
),
migrations.AlterField(
model_name='communitypartner',
name='k12_level',
field=models.CharField(blank=True, max_length=20),
),
]
|
# coding=utf-8
# *** WARNING: this file was generated by the Pulumi SDK Generator. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union, overload
from ... import _utilities
from ._enums import *
__all__ = [
'AKSArgs',
'AKSPropertiesArgs',
'AksNetworkingConfigurationArgs',
'AmlComputeArgs',
'AmlComputePropertiesArgs',
'AssignedUserArgs',
'ComputeInstanceArgs',
'ComputeInstancePropertiesArgs',
'ComputeInstanceSshSettingsArgs',
'ContainerResourceRequirementsArgs',
'CosmosDbSettingsArgs',
'CreateServiceRequestEnvironmentImageRequestArgs',
'CreateServiceRequestKeysArgs',
'DataFactoryArgs',
'DataLakeAnalyticsArgs',
'DataLakeAnalyticsPropertiesArgs',
'DatabricksArgs',
'DatabricksPropertiesArgs',
'DatasetReferenceArgs',
'EncryptionPropertyArgs',
'EnvironmentImageRequestEnvironmentArgs',
'EnvironmentImageRequestEnvironmentReferenceArgs',
'HDInsightArgs',
'HDInsightPropertiesArgs',
'IdentityArgs',
'IdentityForCmkArgs',
'ImageAssetArgs',
'KeyVaultPropertiesArgs',
'ModelArgs',
'ModelDockerSectionBaseImageRegistryArgs',
'ModelEnvironmentDefinitionDockerArgs',
'ModelEnvironmentDefinitionPythonArgs',
'ModelEnvironmentDefinitionRArgs',
'ModelEnvironmentDefinitionSparkArgs',
'PersonalComputeInstanceSettingsArgs',
'PrivateLinkServiceConnectionStateArgs',
'RCranPackageArgs',
'RGitHubPackageArgs',
'ResourceIdArgs',
'ScaleSettingsArgs',
'ScriptReferenceArgs',
'ScriptsToExecuteArgs',
'ServiceManagedResourcesSettingsArgs',
'SetupScriptsArgs',
'SharedPrivateLinkResourceArgs',
'SkuArgs',
'SparkMavenPackageArgs',
'SslConfigurationArgs',
'UserAccountCredentialsArgs',
'VirtualMachineArgs',
'VirtualMachineImageArgs',
'VirtualMachinePropertiesArgs',
'VirtualMachineSshCredentialsArgs',
]
@pulumi.input_type
class AKSArgs:
def __init__(__self__, *,
compute_type: pulumi.Input[str],
compute_location: Optional[pulumi.Input[str]] = None,
description: Optional[pulumi.Input[str]] = None,
properties: Optional[pulumi.Input['AKSPropertiesArgs']] = None,
resource_id: Optional[pulumi.Input[str]] = None):
"""
A Machine Learning compute based on AKS.
:param pulumi.Input[str] compute_type: The type of compute
Expected value is 'AKS'.
:param pulumi.Input[str] compute_location: Location for the underlying compute
:param pulumi.Input[str] description: The description of the Machine Learning compute.
:param pulumi.Input['AKSPropertiesArgs'] properties: AKS properties
:param pulumi.Input[str] resource_id: ARM resource id of the underlying compute
"""
pulumi.set(__self__, "compute_type", 'AKS')
if compute_location is not None:
pulumi.set(__self__, "compute_location", compute_location)
if description is not None:
pulumi.set(__self__, "description", description)
if properties is not None:
pulumi.set(__self__, "properties", properties)
if resource_id is not None:
pulumi.set(__self__, "resource_id", resource_id)
@property
@pulumi.getter(name="computeType")
def compute_type(self) -> pulumi.Input[str]:
"""
The type of compute
Expected value is 'AKS'.
"""
return pulumi.get(self, "compute_type")
@compute_type.setter
def compute_type(self, value: pulumi.Input[str]):
pulumi.set(self, "compute_type", value)
@property
@pulumi.getter(name="computeLocation")
def compute_location(self) -> Optional[pulumi.Input[str]]:
"""
Location for the underlying compute
"""
return pulumi.get(self, "compute_location")
@compute_location.setter
def compute_location(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "compute_location", value)
@property
@pulumi.getter
def description(self) -> Optional[pulumi.Input[str]]:
"""
The description of the Machine Learning compute.
"""
return pulumi.get(self, "description")
@description.setter
def description(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "description", value)
@property
@pulumi.getter
def properties(self) -> Optional[pulumi.Input['AKSPropertiesArgs']]:
"""
AKS properties
"""
return pulumi.get(self, "properties")
@properties.setter
def properties(self, value: Optional[pulumi.Input['AKSPropertiesArgs']]):
pulumi.set(self, "properties", value)
@property
@pulumi.getter(name="resourceId")
def resource_id(self) -> Optional[pulumi.Input[str]]:
"""
ARM resource id of the underlying compute
"""
return pulumi.get(self, "resource_id")
@resource_id.setter
def resource_id(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "resource_id", value)
@pulumi.input_type
class AKSPropertiesArgs:
def __init__(__self__, *,
agent_count: Optional[pulumi.Input[int]] = None,
agent_vm_size: Optional[pulumi.Input[str]] = None,
aks_networking_configuration: Optional[pulumi.Input['AksNetworkingConfigurationArgs']] = None,
cluster_fqdn: Optional[pulumi.Input[str]] = None,
cluster_purpose: Optional[pulumi.Input[Union[str, 'ClusterPurpose']]] = None,
ssl_configuration: Optional[pulumi.Input['SslConfigurationArgs']] = None):
"""
AKS properties
:param pulumi.Input[int] agent_count: Number of agents
:param pulumi.Input[str] agent_vm_size: Agent virtual machine size
:param pulumi.Input['AksNetworkingConfigurationArgs'] aks_networking_configuration: AKS networking configuration for vnet
:param pulumi.Input[str] cluster_fqdn: Cluster full qualified domain name
:param pulumi.Input[Union[str, 'ClusterPurpose']] cluster_purpose: Intended usage of the cluster
:param pulumi.Input['SslConfigurationArgs'] ssl_configuration: SSL configuration
"""
if agent_count is not None:
pulumi.set(__self__, "agent_count", agent_count)
if agent_vm_size is not None:
pulumi.set(__self__, "agent_vm_size", agent_vm_size)
if aks_networking_configuration is not None:
pulumi.set(__self__, "aks_networking_configuration", aks_networking_configuration)
if cluster_fqdn is not None:
pulumi.set(__self__, "cluster_fqdn", cluster_fqdn)
if cluster_purpose is None:
cluster_purpose = 'FastProd'
if cluster_purpose is not None:
pulumi.set(__self__, "cluster_purpose", cluster_purpose)
if ssl_configuration is not None:
pulumi.set(__self__, "ssl_configuration", ssl_configuration)
@property
@pulumi.getter(name="agentCount")
def agent_count(self) -> Optional[pulumi.Input[int]]:
"""
Number of agents
"""
return pulumi.get(self, "agent_count")
@agent_count.setter
def agent_count(self, value: Optional[pulumi.Input[int]]):
pulumi.set(self, "agent_count", value)
@property
@pulumi.getter(name="agentVmSize")
def agent_vm_size(self) -> Optional[pulumi.Input[str]]:
"""
Agent virtual machine size
"""
return pulumi.get(self, "agent_vm_size")
@agent_vm_size.setter
def agent_vm_size(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "agent_vm_size", value)
@property
@pulumi.getter(name="aksNetworkingConfiguration")
def aks_networking_configuration(self) -> Optional[pulumi.Input['AksNetworkingConfigurationArgs']]:
"""
AKS networking configuration for vnet
"""
return pulumi.get(self, "aks_networking_configuration")
@aks_networking_configuration.setter
def aks_networking_configuration(self, value: Optional[pulumi.Input['AksNetworkingConfigurationArgs']]):
pulumi.set(self, "aks_networking_configuration", value)
@property
@pulumi.getter(name="clusterFqdn")
def cluster_fqdn(self) -> Optional[pulumi.Input[str]]:
"""
Cluster full qualified domain name
"""
return pulumi.get(self, "cluster_fqdn")
@cluster_fqdn.setter
def cluster_fqdn(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "cluster_fqdn", value)
@property
@pulumi.getter(name="clusterPurpose")
def cluster_purpose(self) -> Optional[pulumi.Input[Union[str, 'ClusterPurpose']]]:
"""
Intended usage of the cluster
"""
return pulumi.get(self, "cluster_purpose")
@cluster_purpose.setter
def cluster_purpose(self, value: Optional[pulumi.Input[Union[str, 'ClusterPurpose']]]):
pulumi.set(self, "cluster_purpose", value)
@property
@pulumi.getter(name="sslConfiguration")
def ssl_configuration(self) -> Optional[pulumi.Input['SslConfigurationArgs']]:
"""
SSL configuration
"""
return pulumi.get(self, "ssl_configuration")
@ssl_configuration.setter
def ssl_configuration(self, value: Optional[pulumi.Input['SslConfigurationArgs']]):
pulumi.set(self, "ssl_configuration", value)
@pulumi.input_type
class AksNetworkingConfigurationArgs:
def __init__(__self__, *,
dns_service_ip: Optional[pulumi.Input[str]] = None,
docker_bridge_cidr: Optional[pulumi.Input[str]] = None,
service_cidr: Optional[pulumi.Input[str]] = None,
subnet_id: Optional[pulumi.Input[str]] = None):
"""
Advance configuration for AKS networking
:param pulumi.Input[str] dns_service_ip: An IP address assigned to the Kubernetes DNS service. It must be within the Kubernetes service address range specified in serviceCidr.
:param pulumi.Input[str] docker_bridge_cidr: A CIDR notation IP range assigned to the Docker bridge network. It must not overlap with any Subnet IP ranges or the Kubernetes service address range.
:param pulumi.Input[str] service_cidr: A CIDR notation IP range from which to assign service cluster IPs. It must not overlap with any Subnet IP ranges.
:param pulumi.Input[str] subnet_id: Virtual network subnet resource ID the compute nodes belong to
"""
if dns_service_ip is not None:
pulumi.set(__self__, "dns_service_ip", dns_service_ip)
if docker_bridge_cidr is not None:
pulumi.set(__self__, "docker_bridge_cidr", docker_bridge_cidr)
if service_cidr is not None:
pulumi.set(__self__, "service_cidr", service_cidr)
if subnet_id is not None:
pulumi.set(__self__, "subnet_id", subnet_id)
@property
@pulumi.getter(name="dnsServiceIP")
def dns_service_ip(self) -> Optional[pulumi.Input[str]]:
"""
An IP address assigned to the Kubernetes DNS service. It must be within the Kubernetes service address range specified in serviceCidr.
"""
return pulumi.get(self, "dns_service_ip")
@dns_service_ip.setter
def dns_service_ip(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "dns_service_ip", value)
@property
@pulumi.getter(name="dockerBridgeCidr")
def docker_bridge_cidr(self) -> Optional[pulumi.Input[str]]:
"""
A CIDR notation IP range assigned to the Docker bridge network. It must not overlap with any Subnet IP ranges or the Kubernetes service address range.
"""
return pulumi.get(self, "docker_bridge_cidr")
@docker_bridge_cidr.setter
def docker_bridge_cidr(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "docker_bridge_cidr", value)
@property
@pulumi.getter(name="serviceCidr")
def service_cidr(self) -> Optional[pulumi.Input[str]]:
"""
A CIDR notation IP range from which to assign service cluster IPs. It must not overlap with any Subnet IP ranges.
"""
return pulumi.get(self, "service_cidr")
@service_cidr.setter
def service_cidr(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "service_cidr", value)
@property
@pulumi.getter(name="subnetId")
def subnet_id(self) -> Optional[pulumi.Input[str]]:
"""
Virtual network subnet resource ID the compute nodes belong to
"""
return pulumi.get(self, "subnet_id")
@subnet_id.setter
def subnet_id(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "subnet_id", value)
@pulumi.input_type
class AmlComputeArgs:
def __init__(__self__, *,
compute_type: pulumi.Input[str],
compute_location: Optional[pulumi.Input[str]] = None,
description: Optional[pulumi.Input[str]] = None,
properties: Optional[pulumi.Input['AmlComputePropertiesArgs']] = None,
resource_id: Optional[pulumi.Input[str]] = None):
"""
An Azure Machine Learning compute.
:param pulumi.Input[str] compute_type: The type of compute
Expected value is 'AmlCompute'.
:param pulumi.Input[str] compute_location: Location for the underlying compute
:param pulumi.Input[str] description: The description of the Machine Learning compute.
:param pulumi.Input['AmlComputePropertiesArgs'] properties: AML Compute properties
:param pulumi.Input[str] resource_id: ARM resource id of the underlying compute
"""
pulumi.set(__self__, "compute_type", 'AmlCompute')
if compute_location is not None:
pulumi.set(__self__, "compute_location", compute_location)
if description is not None:
pulumi.set(__self__, "description", description)
if properties is not None:
pulumi.set(__self__, "properties", properties)
if resource_id is not None:
pulumi.set(__self__, "resource_id", resource_id)
@property
@pulumi.getter(name="computeType")
def compute_type(self) -> pulumi.Input[str]:
"""
The type of compute
Expected value is 'AmlCompute'.
"""
return pulumi.get(self, "compute_type")
@compute_type.setter
def compute_type(self, value: pulumi.Input[str]):
pulumi.set(self, "compute_type", value)
@property
@pulumi.getter(name="computeLocation")
def compute_location(self) -> Optional[pulumi.Input[str]]:
"""
Location for the underlying compute
"""
return pulumi.get(self, "compute_location")
@compute_location.setter
def compute_location(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "compute_location", value)
@property
@pulumi.getter
def description(self) -> Optional[pulumi.Input[str]]:
"""
The description of the Machine Learning compute.
"""
return pulumi.get(self, "description")
@description.setter
def description(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "description", value)
@property
@pulumi.getter
def properties(self) -> Optional[pulumi.Input['AmlComputePropertiesArgs']]:
"""
AML Compute properties
"""
return pulumi.get(self, "properties")
@properties.setter
def properties(self, value: Optional[pulumi.Input['AmlComputePropertiesArgs']]):
pulumi.set(self, "properties", value)
@property
@pulumi.getter(name="resourceId")
def resource_id(self) -> Optional[pulumi.Input[str]]:
"""
ARM resource id of the underlying compute
"""
return pulumi.get(self, "resource_id")
@resource_id.setter
def resource_id(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "resource_id", value)
@pulumi.input_type
class AmlComputePropertiesArgs:
def __init__(__self__, *,
enable_node_public_ip: Optional[pulumi.Input[bool]] = None,
isolated_network: Optional[pulumi.Input[bool]] = None,
os_type: Optional[pulumi.Input[Union[str, 'OsType']]] = None,
remote_login_port_public_access: Optional[pulumi.Input[Union[str, 'RemoteLoginPortPublicAccess']]] = None,
scale_settings: Optional[pulumi.Input['ScaleSettingsArgs']] = None,
subnet: Optional[pulumi.Input['ResourceIdArgs']] = None,
user_account_credentials: Optional[pulumi.Input['UserAccountCredentialsArgs']] = None,
virtual_machine_image: Optional[pulumi.Input['VirtualMachineImageArgs']] = None,
vm_priority: Optional[pulumi.Input[Union[str, 'VmPriority']]] = None,
vm_size: Optional[pulumi.Input[str]] = None):
"""
AML Compute properties
:param pulumi.Input[bool] enable_node_public_ip: Enable or disable node public IP address provisioning. Possible values are: Possible values are: true - Indicates that the compute nodes will have public IPs provisioned. false - Indicates that the compute nodes will have a private endpoint and no public IPs.
:param pulumi.Input[bool] isolated_network: Network is isolated or not
:param pulumi.Input[Union[str, 'OsType']] os_type: Compute OS Type
:param pulumi.Input[Union[str, 'RemoteLoginPortPublicAccess']] remote_login_port_public_access: State of the public SSH port. Possible values are: Disabled - Indicates that the public ssh port is closed on all nodes of the cluster. Enabled - Indicates that the public ssh port is open on all nodes of the cluster. NotSpecified - Indicates that the public ssh port is closed on all nodes of the cluster if VNet is defined, else is open all public nodes. It can be default only during cluster creation time, after creation it will be either enabled or disabled.
:param pulumi.Input['ScaleSettingsArgs'] scale_settings: Scale settings for AML Compute
:param pulumi.Input['ResourceIdArgs'] subnet: Virtual network subnet resource ID the compute nodes belong to.
:param pulumi.Input['UserAccountCredentialsArgs'] user_account_credentials: Credentials for an administrator user account that will be created on each compute node.
:param pulumi.Input['VirtualMachineImageArgs'] virtual_machine_image: Virtual Machine image for AML Compute - windows only
:param pulumi.Input[Union[str, 'VmPriority']] vm_priority: Virtual Machine priority
:param pulumi.Input[str] vm_size: Virtual Machine Size
"""
if enable_node_public_ip is None:
enable_node_public_ip = True
if enable_node_public_ip is not None:
pulumi.set(__self__, "enable_node_public_ip", enable_node_public_ip)
if isolated_network is not None:
pulumi.set(__self__, "isolated_network", isolated_network)
if os_type is None:
os_type = 'Linux'
if os_type is not None:
pulumi.set(__self__, "os_type", os_type)
if remote_login_port_public_access is None:
remote_login_port_public_access = 'NotSpecified'
if remote_login_port_public_access is not None:
pulumi.set(__self__, "remote_login_port_public_access", remote_login_port_public_access)
if scale_settings is not None:
pulumi.set(__self__, "scale_settings", scale_settings)
if subnet is not None:
pulumi.set(__self__, "subnet", subnet)
if user_account_credentials is not None:
pulumi.set(__self__, "user_account_credentials", user_account_credentials)
if virtual_machine_image is not None:
pulumi.set(__self__, "virtual_machine_image", virtual_machine_image)
if vm_priority is not None:
pulumi.set(__self__, "vm_priority", vm_priority)
if vm_size is not None:
pulumi.set(__self__, "vm_size", vm_size)
@property
@pulumi.getter(name="enableNodePublicIp")
def enable_node_public_ip(self) -> Optional[pulumi.Input[bool]]:
"""
Enable or disable node public IP address provisioning. Possible values are: Possible values are: true - Indicates that the compute nodes will have public IPs provisioned. false - Indicates that the compute nodes will have a private endpoint and no public IPs.
"""
return pulumi.get(self, "enable_node_public_ip")
@enable_node_public_ip.setter
def enable_node_public_ip(self, value: Optional[pulumi.Input[bool]]):
pulumi.set(self, "enable_node_public_ip", value)
@property
@pulumi.getter(name="isolatedNetwork")
def isolated_network(self) -> Optional[pulumi.Input[bool]]:
"""
Network is isolated or not
"""
return pulumi.get(self, "isolated_network")
@isolated_network.setter
def isolated_network(self, value: Optional[pulumi.Input[bool]]):
pulumi.set(self, "isolated_network", value)
@property
@pulumi.getter(name="osType")
def os_type(self) -> Optional[pulumi.Input[Union[str, 'OsType']]]:
"""
Compute OS Type
"""
return pulumi.get(self, "os_type")
@os_type.setter
def os_type(self, value: Optional[pulumi.Input[Union[str, 'OsType']]]):
pulumi.set(self, "os_type", value)
@property
@pulumi.getter(name="remoteLoginPortPublicAccess")
def remote_login_port_public_access(self) -> Optional[pulumi.Input[Union[str, 'RemoteLoginPortPublicAccess']]]:
"""
State of the public SSH port. Possible values are: Disabled - Indicates that the public ssh port is closed on all nodes of the cluster. Enabled - Indicates that the public ssh port is open on all nodes of the cluster. NotSpecified - Indicates that the public ssh port is closed on all nodes of the cluster if VNet is defined, else is open all public nodes. It can be default only during cluster creation time, after creation it will be either enabled or disabled.
"""
return pulumi.get(self, "remote_login_port_public_access")
@remote_login_port_public_access.setter
def remote_login_port_public_access(self, value: Optional[pulumi.Input[Union[str, 'RemoteLoginPortPublicAccess']]]):
pulumi.set(self, "remote_login_port_public_access", value)
@property
@pulumi.getter(name="scaleSettings")
def scale_settings(self) -> Optional[pulumi.Input['ScaleSettingsArgs']]:
"""
Scale settings for AML Compute
"""
return pulumi.get(self, "scale_settings")
@scale_settings.setter
def scale_settings(self, value: Optional[pulumi.Input['ScaleSettingsArgs']]):
pulumi.set(self, "scale_settings", value)
@property
@pulumi.getter
def subnet(self) -> Optional[pulumi.Input['ResourceIdArgs']]:
"""
Virtual network subnet resource ID the compute nodes belong to.
"""
return pulumi.get(self, "subnet")
@subnet.setter
def subnet(self, value: Optional[pulumi.Input['ResourceIdArgs']]):
pulumi.set(self, "subnet", value)
@property
@pulumi.getter(name="userAccountCredentials")
def user_account_credentials(self) -> Optional[pulumi.Input['UserAccountCredentialsArgs']]:
"""
Credentials for an administrator user account that will be created on each compute node.
"""
return pulumi.get(self, "user_account_credentials")
@user_account_credentials.setter
def user_account_credentials(self, value: Optional[pulumi.Input['UserAccountCredentialsArgs']]):
pulumi.set(self, "user_account_credentials", value)
@property
@pulumi.getter(name="virtualMachineImage")
def virtual_machine_image(self) -> Optional[pulumi.Input['VirtualMachineImageArgs']]:
"""
Virtual Machine image for AML Compute - windows only
"""
return pulumi.get(self, "virtual_machine_image")
@virtual_machine_image.setter
def virtual_machine_image(self, value: Optional[pulumi.Input['VirtualMachineImageArgs']]):
pulumi.set(self, "virtual_machine_image", value)
@property
@pulumi.getter(name="vmPriority")
def vm_priority(self) -> Optional[pulumi.Input[Union[str, 'VmPriority']]]:
"""
Virtual Machine priority
"""
return pulumi.get(self, "vm_priority")
@vm_priority.setter
def vm_priority(self, value: Optional[pulumi.Input[Union[str, 'VmPriority']]]):
pulumi.set(self, "vm_priority", value)
@property
@pulumi.getter(name="vmSize")
def vm_size(self) -> Optional[pulumi.Input[str]]:
"""
Virtual Machine Size
"""
return pulumi.get(self, "vm_size")
@vm_size.setter
def vm_size(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "vm_size", value)
@pulumi.input_type
class AssignedUserArgs:
def __init__(__self__, *,
object_id: pulumi.Input[str],
tenant_id: pulumi.Input[str]):
"""
A user that can be assigned to a compute instance.
:param pulumi.Input[str] object_id: User’s AAD Object Id.
:param pulumi.Input[str] tenant_id: User’s AAD Tenant Id.
"""
pulumi.set(__self__, "object_id", object_id)
pulumi.set(__self__, "tenant_id", tenant_id)
@property
@pulumi.getter(name="objectId")
def object_id(self) -> pulumi.Input[str]:
"""
User’s AAD Object Id.
"""
return pulumi.get(self, "object_id")
@object_id.setter
def object_id(self, value: pulumi.Input[str]):
pulumi.set(self, "object_id", value)
@property
@pulumi.getter(name="tenantId")
def tenant_id(self) -> pulumi.Input[str]:
"""
User’s AAD Tenant Id.
"""
return pulumi.get(self, "tenant_id")
@tenant_id.setter
def tenant_id(self, value: pulumi.Input[str]):
pulumi.set(self, "tenant_id", value)
@pulumi.input_type
class ComputeInstanceArgs:
def __init__(__self__, *,
compute_type: pulumi.Input[str],
compute_location: Optional[pulumi.Input[str]] = None,
description: Optional[pulumi.Input[str]] = None,
properties: Optional[pulumi.Input['ComputeInstancePropertiesArgs']] = None,
resource_id: Optional[pulumi.Input[str]] = None):
"""
An Azure Machine Learning compute instance.
:param pulumi.Input[str] compute_type: The type of compute
Expected value is 'ComputeInstance'.
:param pulumi.Input[str] compute_location: Location for the underlying compute
:param pulumi.Input[str] description: The description of the Machine Learning compute.
:param pulumi.Input['ComputeInstancePropertiesArgs'] properties: Compute Instance properties
:param pulumi.Input[str] resource_id: ARM resource id of the underlying compute
"""
pulumi.set(__self__, "compute_type", 'ComputeInstance')
if compute_location is not None:
pulumi.set(__self__, "compute_location", compute_location)
if description is not None:
pulumi.set(__self__, "description", description)
if properties is not None:
pulumi.set(__self__, "properties", properties)
if resource_id is not None:
pulumi.set(__self__, "resource_id", resource_id)
@property
@pulumi.getter(name="computeType")
def compute_type(self) -> pulumi.Input[str]:
"""
The type of compute
Expected value is 'ComputeInstance'.
"""
return pulumi.get(self, "compute_type")
@compute_type.setter
def compute_type(self, value: pulumi.Input[str]):
pulumi.set(self, "compute_type", value)
@property
@pulumi.getter(name="computeLocation")
def compute_location(self) -> Optional[pulumi.Input[str]]:
"""
Location for the underlying compute
"""
return pulumi.get(self, "compute_location")
@compute_location.setter
def compute_location(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "compute_location", value)
@property
@pulumi.getter
def description(self) -> Optional[pulumi.Input[str]]:
"""
The description of the Machine Learning compute.
"""
return pulumi.get(self, "description")
@description.setter
def description(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "description", value)
@property
@pulumi.getter
def properties(self) -> Optional[pulumi.Input['ComputeInstancePropertiesArgs']]:
"""
Compute Instance properties
"""
return pulumi.get(self, "properties")
@properties.setter
def properties(self, value: Optional[pulumi.Input['ComputeInstancePropertiesArgs']]):
pulumi.set(self, "properties", value)
@property
@pulumi.getter(name="resourceId")
def resource_id(self) -> Optional[pulumi.Input[str]]:
"""
ARM resource id of the underlying compute
"""
return pulumi.get(self, "resource_id")
@resource_id.setter
def resource_id(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "resource_id", value)
@pulumi.input_type
class ComputeInstancePropertiesArgs:
def __init__(__self__, *,
application_sharing_policy: Optional[pulumi.Input[Union[str, 'ApplicationSharingPolicy']]] = None,
compute_instance_authorization_type: Optional[pulumi.Input[Union[str, 'ComputeInstanceAuthorizationType']]] = None,
personal_compute_instance_settings: Optional[pulumi.Input['PersonalComputeInstanceSettingsArgs']] = None,
setup_scripts: Optional[pulumi.Input['SetupScriptsArgs']] = None,
ssh_settings: Optional[pulumi.Input['ComputeInstanceSshSettingsArgs']] = None,
subnet: Optional[pulumi.Input['ResourceIdArgs']] = None,
vm_size: Optional[pulumi.Input[str]] = None):
"""
Compute Instance properties
:param pulumi.Input[Union[str, 'ApplicationSharingPolicy']] application_sharing_policy: Policy for sharing applications on this compute instance among users of parent workspace. If Personal, only the creator can access applications on this compute instance. When Shared, any workspace user can access applications on this instance depending on his/her assigned role.
:param pulumi.Input[Union[str, 'ComputeInstanceAuthorizationType']] compute_instance_authorization_type: The Compute Instance Authorization type. Available values are personal (default).
:param pulumi.Input['PersonalComputeInstanceSettingsArgs'] personal_compute_instance_settings: Settings for a personal compute instance.
:param pulumi.Input['SetupScriptsArgs'] setup_scripts: Details of customized scripts to execute for setting up the cluster.
:param pulumi.Input['ComputeInstanceSshSettingsArgs'] ssh_settings: Specifies policy and settings for SSH access.
:param pulumi.Input['ResourceIdArgs'] subnet: Virtual network subnet resource ID the compute nodes belong to.
:param pulumi.Input[str] vm_size: Virtual Machine Size
"""
if application_sharing_policy is None:
application_sharing_policy = 'Shared'
if application_sharing_policy is not None:
pulumi.set(__self__, "application_sharing_policy", application_sharing_policy)
if compute_instance_authorization_type is None:
compute_instance_authorization_type = 'personal'
if compute_instance_authorization_type is not None:
pulumi.set(__self__, "compute_instance_authorization_type", compute_instance_authorization_type)
if personal_compute_instance_settings is not None:
pulumi.set(__self__, "personal_compute_instance_settings", personal_compute_instance_settings)
if setup_scripts is not None:
pulumi.set(__self__, "setup_scripts", setup_scripts)
if ssh_settings is not None:
pulumi.set(__self__, "ssh_settings", ssh_settings)
if subnet is not None:
pulumi.set(__self__, "subnet", subnet)
if vm_size is not None:
pulumi.set(__self__, "vm_size", vm_size)
@property
@pulumi.getter(name="applicationSharingPolicy")
def application_sharing_policy(self) -> Optional[pulumi.Input[Union[str, 'ApplicationSharingPolicy']]]:
"""
Policy for sharing applications on this compute instance among users of parent workspace. If Personal, only the creator can access applications on this compute instance. When Shared, any workspace user can access applications on this instance depending on his/her assigned role.
"""
return pulumi.get(self, "application_sharing_policy")
@application_sharing_policy.setter
def application_sharing_policy(self, value: Optional[pulumi.Input[Union[str, 'ApplicationSharingPolicy']]]):
pulumi.set(self, "application_sharing_policy", value)
@property
@pulumi.getter(name="computeInstanceAuthorizationType")
def compute_instance_authorization_type(self) -> Optional[pulumi.Input[Union[str, 'ComputeInstanceAuthorizationType']]]:
"""
The Compute Instance Authorization type. Available values are personal (default).
"""
return pulumi.get(self, "compute_instance_authorization_type")
@compute_instance_authorization_type.setter
def compute_instance_authorization_type(self, value: Optional[pulumi.Input[Union[str, 'ComputeInstanceAuthorizationType']]]):
pulumi.set(self, "compute_instance_authorization_type", value)
@property
@pulumi.getter(name="personalComputeInstanceSettings")
def personal_compute_instance_settings(self) -> Optional[pulumi.Input['PersonalComputeInstanceSettingsArgs']]:
"""
Settings for a personal compute instance.
"""
return pulumi.get(self, "personal_compute_instance_settings")
@personal_compute_instance_settings.setter
def personal_compute_instance_settings(self, value: Optional[pulumi.Input['PersonalComputeInstanceSettingsArgs']]):
pulumi.set(self, "personal_compute_instance_settings", value)
@property
@pulumi.getter(name="setupScripts")
def setup_scripts(self) -> Optional[pulumi.Input['SetupScriptsArgs']]:
"""
Details of customized scripts to execute for setting up the cluster.
"""
return pulumi.get(self, "setup_scripts")
@setup_scripts.setter
def setup_scripts(self, value: Optional[pulumi.Input['SetupScriptsArgs']]):
pulumi.set(self, "setup_scripts", value)
@property
@pulumi.getter(name="sshSettings")
def ssh_settings(self) -> Optional[pulumi.Input['ComputeInstanceSshSettingsArgs']]:
"""
Specifies policy and settings for SSH access.
"""
return pulumi.get(self, "ssh_settings")
@ssh_settings.setter
def ssh_settings(self, value: Optional[pulumi.Input['ComputeInstanceSshSettingsArgs']]):
pulumi.set(self, "ssh_settings", value)
@property
@pulumi.getter
def subnet(self) -> Optional[pulumi.Input['ResourceIdArgs']]:
"""
Virtual network subnet resource ID the compute nodes belong to.
"""
return pulumi.get(self, "subnet")
@subnet.setter
def subnet(self, value: Optional[pulumi.Input['ResourceIdArgs']]):
pulumi.set(self, "subnet", value)
@property
@pulumi.getter(name="vmSize")
def vm_size(self) -> Optional[pulumi.Input[str]]:
"""
Virtual Machine Size
"""
return pulumi.get(self, "vm_size")
@vm_size.setter
def vm_size(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "vm_size", value)
@pulumi.input_type
class ComputeInstanceSshSettingsArgs:
def __init__(__self__, *,
admin_public_key: Optional[pulumi.Input[str]] = None,
ssh_public_access: Optional[pulumi.Input[Union[str, 'SshPublicAccess']]] = None):
"""
Specifies policy and settings for SSH access.
:param pulumi.Input[str] admin_public_key: Specifies the SSH rsa public key file as a string. Use "ssh-keygen -t rsa -b 2048" to generate your SSH key pairs.
:param pulumi.Input[Union[str, 'SshPublicAccess']] ssh_public_access: State of the public SSH port. Possible values are: Disabled - Indicates that the public ssh port is closed on this instance. Enabled - Indicates that the public ssh port is open and accessible according to the VNet/subnet policy if applicable.
"""
if admin_public_key is not None:
pulumi.set(__self__, "admin_public_key", admin_public_key)
if ssh_public_access is None:
ssh_public_access = 'Disabled'
if ssh_public_access is not None:
pulumi.set(__self__, "ssh_public_access", ssh_public_access)
@property
@pulumi.getter(name="adminPublicKey")
def admin_public_key(self) -> Optional[pulumi.Input[str]]:
"""
Specifies the SSH rsa public key file as a string. Use "ssh-keygen -t rsa -b 2048" to generate your SSH key pairs.
"""
return pulumi.get(self, "admin_public_key")
@admin_public_key.setter
def admin_public_key(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "admin_public_key", value)
@property
@pulumi.getter(name="sshPublicAccess")
def ssh_public_access(self) -> Optional[pulumi.Input[Union[str, 'SshPublicAccess']]]:
"""
State of the public SSH port. Possible values are: Disabled - Indicates that the public ssh port is closed on this instance. Enabled - Indicates that the public ssh port is open and accessible according to the VNet/subnet policy if applicable.
"""
return pulumi.get(self, "ssh_public_access")
@ssh_public_access.setter
def ssh_public_access(self, value: Optional[pulumi.Input[Union[str, 'SshPublicAccess']]]):
pulumi.set(self, "ssh_public_access", value)
@pulumi.input_type
class ContainerResourceRequirementsArgs:
def __init__(__self__, *,
cpu: Optional[pulumi.Input[float]] = None,
cpu_limit: Optional[pulumi.Input[float]] = None,
fpga: Optional[pulumi.Input[int]] = None,
gpu: Optional[pulumi.Input[int]] = None,
memory_in_gb: Optional[pulumi.Input[float]] = None,
memory_in_gb_limit: Optional[pulumi.Input[float]] = None):
"""
The resource requirements for the container (cpu and memory).
:param pulumi.Input[float] cpu: The minimum amount of CPU cores to be used by the container. More info:
https://kubernetes.io/docs/concepts/configuration/manage-compute-resources-container/
:param pulumi.Input[float] cpu_limit: The maximum amount of CPU cores allowed to be used by the container. More info:
https://kubernetes.io/docs/concepts/configuration/manage-compute-resources-container/
:param pulumi.Input[int] fpga: The number of FPGA PCIE devices exposed to the container. Must be multiple of 2.
:param pulumi.Input[int] gpu: The number of GPU cores in the container.
:param pulumi.Input[float] memory_in_gb: The minimum amount of memory (in GB) to be used by the container. More info:
https://kubernetes.io/docs/concepts/configuration/manage-compute-resources-container/
:param pulumi.Input[float] memory_in_gb_limit: The maximum amount of memory (in GB) allowed to be used by the container. More info:
https://kubernetes.io/docs/concepts/configuration/manage-compute-resources-container/
"""
if cpu is not None:
pulumi.set(__self__, "cpu", cpu)
if cpu_limit is not None:
pulumi.set(__self__, "cpu_limit", cpu_limit)
if fpga is not None:
pulumi.set(__self__, "fpga", fpga)
if gpu is not None:
pulumi.set(__self__, "gpu", gpu)
if memory_in_gb is not None:
pulumi.set(__self__, "memory_in_gb", memory_in_gb)
if memory_in_gb_limit is not None:
pulumi.set(__self__, "memory_in_gb_limit", memory_in_gb_limit)
@property
@pulumi.getter
def cpu(self) -> Optional[pulumi.Input[float]]:
"""
The minimum amount of CPU cores to be used by the container. More info:
https://kubernetes.io/docs/concepts/configuration/manage-compute-resources-container/
"""
return pulumi.get(self, "cpu")
@cpu.setter
def cpu(self, value: Optional[pulumi.Input[float]]):
pulumi.set(self, "cpu", value)
@property
@pulumi.getter(name="cpuLimit")
def cpu_limit(self) -> Optional[pulumi.Input[float]]:
"""
The maximum amount of CPU cores allowed to be used by the container. More info:
https://kubernetes.io/docs/concepts/configuration/manage-compute-resources-container/
"""
return pulumi.get(self, "cpu_limit")
@cpu_limit.setter
def cpu_limit(self, value: Optional[pulumi.Input[float]]):
pulumi.set(self, "cpu_limit", value)
@property
@pulumi.getter
def fpga(self) -> Optional[pulumi.Input[int]]:
"""
The number of FPGA PCIE devices exposed to the container. Must be multiple of 2.
"""
return pulumi.get(self, "fpga")
@fpga.setter
def fpga(self, value: Optional[pulumi.Input[int]]):
pulumi.set(self, "fpga", value)
@property
@pulumi.getter
def gpu(self) -> Optional[pulumi.Input[int]]:
"""
The number of GPU cores in the container.
"""
return pulumi.get(self, "gpu")
@gpu.setter
def gpu(self, value: Optional[pulumi.Input[int]]):
pulumi.set(self, "gpu", value)
@property
@pulumi.getter(name="memoryInGB")
def memory_in_gb(self) -> Optional[pulumi.Input[float]]:
"""
The minimum amount of memory (in GB) to be used by the container. More info:
https://kubernetes.io/docs/concepts/configuration/manage-compute-resources-container/
"""
return pulumi.get(self, "memory_in_gb")
@memory_in_gb.setter
def memory_in_gb(self, value: Optional[pulumi.Input[float]]):
pulumi.set(self, "memory_in_gb", value)
@property
@pulumi.getter(name="memoryInGBLimit")
def memory_in_gb_limit(self) -> Optional[pulumi.Input[float]]:
"""
The maximum amount of memory (in GB) allowed to be used by the container. More info:
https://kubernetes.io/docs/concepts/configuration/manage-compute-resources-container/
"""
return pulumi.get(self, "memory_in_gb_limit")
@memory_in_gb_limit.setter
def memory_in_gb_limit(self, value: Optional[pulumi.Input[float]]):
pulumi.set(self, "memory_in_gb_limit", value)
@pulumi.input_type
class CosmosDbSettingsArgs:
def __init__(__self__, *,
collections_throughput: Optional[pulumi.Input[int]] = None):
"""
:param pulumi.Input[int] collections_throughput: The throughput of the collections in cosmosdb database
"""
if collections_throughput is not None:
pulumi.set(__self__, "collections_throughput", collections_throughput)
@property
@pulumi.getter(name="collectionsThroughput")
def collections_throughput(self) -> Optional[pulumi.Input[int]]:
"""
The throughput of the collections in cosmosdb database
"""
return pulumi.get(self, "collections_throughput")
@collections_throughput.setter
def collections_throughput(self, value: Optional[pulumi.Input[int]]):
pulumi.set(self, "collections_throughput", value)
@pulumi.input_type
class CreateServiceRequestEnvironmentImageRequestArgs:
def __init__(__self__, *,
assets: Optional[pulumi.Input[Sequence[pulumi.Input['ImageAssetArgs']]]] = None,
driver_program: Optional[pulumi.Input[str]] = None,
environment: Optional[pulumi.Input['EnvironmentImageRequestEnvironmentArgs']] = None,
environment_reference: Optional[pulumi.Input['EnvironmentImageRequestEnvironmentReferenceArgs']] = None,
model_ids: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,
models: Optional[pulumi.Input[Sequence[pulumi.Input['ModelArgs']]]] = None):
"""
The Environment, models and assets needed for inferencing.
:param pulumi.Input[Sequence[pulumi.Input['ImageAssetArgs']]] assets: The list of assets.
:param pulumi.Input[str] driver_program: The name of the driver file.
:param pulumi.Input['EnvironmentImageRequestEnvironmentArgs'] environment: The details of the AZURE ML environment.
:param pulumi.Input['EnvironmentImageRequestEnvironmentReferenceArgs'] environment_reference: The unique identifying details of the AZURE ML environment.
:param pulumi.Input[Sequence[pulumi.Input[str]]] model_ids: The list of model Ids.
:param pulumi.Input[Sequence[pulumi.Input['ModelArgs']]] models: The list of models.
"""
if assets is not None:
pulumi.set(__self__, "assets", assets)
if driver_program is not None:
pulumi.set(__self__, "driver_program", driver_program)
if environment is not None:
pulumi.set(__self__, "environment", environment)
if environment_reference is not None:
pulumi.set(__self__, "environment_reference", environment_reference)
if model_ids is not None:
pulumi.set(__self__, "model_ids", model_ids)
if models is not None:
pulumi.set(__self__, "models", models)
@property
@pulumi.getter
def assets(self) -> Optional[pulumi.Input[Sequence[pulumi.Input['ImageAssetArgs']]]]:
"""
The list of assets.
"""
return pulumi.get(self, "assets")
@assets.setter
def assets(self, value: Optional[pulumi.Input[Sequence[pulumi.Input['ImageAssetArgs']]]]):
pulumi.set(self, "assets", value)
@property
@pulumi.getter(name="driverProgram")
def driver_program(self) -> Optional[pulumi.Input[str]]:
"""
The name of the driver file.
"""
return pulumi.get(self, "driver_program")
@driver_program.setter
def driver_program(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "driver_program", value)
@property
@pulumi.getter
def environment(self) -> Optional[pulumi.Input['EnvironmentImageRequestEnvironmentArgs']]:
"""
The details of the AZURE ML environment.
"""
return pulumi.get(self, "environment")
@environment.setter
def environment(self, value: Optional[pulumi.Input['EnvironmentImageRequestEnvironmentArgs']]):
pulumi.set(self, "environment", value)
@property
@pulumi.getter(name="environmentReference")
def environment_reference(self) -> Optional[pulumi.Input['EnvironmentImageRequestEnvironmentReferenceArgs']]:
"""
The unique identifying details of the AZURE ML environment.
"""
return pulumi.get(self, "environment_reference")
@environment_reference.setter
def environment_reference(self, value: Optional[pulumi.Input['EnvironmentImageRequestEnvironmentReferenceArgs']]):
pulumi.set(self, "environment_reference", value)
@property
@pulumi.getter(name="modelIds")
def model_ids(self) -> Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]:
"""
The list of model Ids.
"""
return pulumi.get(self, "model_ids")
@model_ids.setter
def model_ids(self, value: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]):
pulumi.set(self, "model_ids", value)
@property
@pulumi.getter
def models(self) -> Optional[pulumi.Input[Sequence[pulumi.Input['ModelArgs']]]]:
"""
The list of models.
"""
return pulumi.get(self, "models")
@models.setter
def models(self, value: Optional[pulumi.Input[Sequence[pulumi.Input['ModelArgs']]]]):
pulumi.set(self, "models", value)
@pulumi.input_type
class CreateServiceRequestKeysArgs:
def __init__(__self__, *,
primary_key: Optional[pulumi.Input[str]] = None,
secondary_key: Optional[pulumi.Input[str]] = None):
"""
The authentication keys.
:param pulumi.Input[str] primary_key: The primary key.
:param pulumi.Input[str] secondary_key: The secondary key.
"""
if primary_key is not None:
pulumi.set(__self__, "primary_key", primary_key)
if secondary_key is not None:
pulumi.set(__self__, "secondary_key", secondary_key)
@property
@pulumi.getter(name="primaryKey")
def primary_key(self) -> Optional[pulumi.Input[str]]:
"""
The primary key.
"""
return pulumi.get(self, "primary_key")
@primary_key.setter
def primary_key(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "primary_key", value)
@property
@pulumi.getter(name="secondaryKey")
def secondary_key(self) -> Optional[pulumi.Input[str]]:
"""
The secondary key.
"""
return pulumi.get(self, "secondary_key")
@secondary_key.setter
def secondary_key(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "secondary_key", value)
@pulumi.input_type
class DataFactoryArgs:
def __init__(__self__, *,
compute_type: pulumi.Input[str],
compute_location: Optional[pulumi.Input[str]] = None,
description: Optional[pulumi.Input[str]] = None,
resource_id: Optional[pulumi.Input[str]] = None):
"""
A DataFactory compute.
:param pulumi.Input[str] compute_type: The type of compute
Expected value is 'DataFactory'.
:param pulumi.Input[str] compute_location: Location for the underlying compute
:param pulumi.Input[str] description: The description of the Machine Learning compute.
:param pulumi.Input[str] resource_id: ARM resource id of the underlying compute
"""
pulumi.set(__self__, "compute_type", 'DataFactory')
if compute_location is not None:
pulumi.set(__self__, "compute_location", compute_location)
if description is not None:
pulumi.set(__self__, "description", description)
if resource_id is not None:
pulumi.set(__self__, "resource_id", resource_id)
@property
@pulumi.getter(name="computeType")
def compute_type(self) -> pulumi.Input[str]:
"""
The type of compute
Expected value is 'DataFactory'.
"""
return pulumi.get(self, "compute_type")
@compute_type.setter
def compute_type(self, value: pulumi.Input[str]):
pulumi.set(self, "compute_type", value)
@property
@pulumi.getter(name="computeLocation")
def compute_location(self) -> Optional[pulumi.Input[str]]:
"""
Location for the underlying compute
"""
return pulumi.get(self, "compute_location")
@compute_location.setter
def compute_location(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "compute_location", value)
@property
@pulumi.getter
def description(self) -> Optional[pulumi.Input[str]]:
"""
The description of the Machine Learning compute.
"""
return pulumi.get(self, "description")
@description.setter
def description(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "description", value)
@property
@pulumi.getter(name="resourceId")
def resource_id(self) -> Optional[pulumi.Input[str]]:
"""
ARM resource id of the underlying compute
"""
return pulumi.get(self, "resource_id")
@resource_id.setter
def resource_id(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "resource_id", value)
@pulumi.input_type
class DataLakeAnalyticsArgs:
def __init__(__self__, *,
compute_type: pulumi.Input[str],
compute_location: Optional[pulumi.Input[str]] = None,
description: Optional[pulumi.Input[str]] = None,
properties: Optional[pulumi.Input['DataLakeAnalyticsPropertiesArgs']] = None,
resource_id: Optional[pulumi.Input[str]] = None):
"""
A DataLakeAnalytics compute.
:param pulumi.Input[str] compute_type: The type of compute
Expected value is 'DataLakeAnalytics'.
:param pulumi.Input[str] compute_location: Location for the underlying compute
:param pulumi.Input[str] description: The description of the Machine Learning compute.
:param pulumi.Input[str] resource_id: ARM resource id of the underlying compute
"""
pulumi.set(__self__, "compute_type", 'DataLakeAnalytics')
if compute_location is not None:
pulumi.set(__self__, "compute_location", compute_location)
if description is not None:
pulumi.set(__self__, "description", description)
if properties is not None:
pulumi.set(__self__, "properties", properties)
if resource_id is not None:
pulumi.set(__self__, "resource_id", resource_id)
@property
@pulumi.getter(name="computeType")
def compute_type(self) -> pulumi.Input[str]:
"""
The type of compute
Expected value is 'DataLakeAnalytics'.
"""
return pulumi.get(self, "compute_type")
@compute_type.setter
def compute_type(self, value: pulumi.Input[str]):
pulumi.set(self, "compute_type", value)
@property
@pulumi.getter(name="computeLocation")
def compute_location(self) -> Optional[pulumi.Input[str]]:
"""
Location for the underlying compute
"""
return pulumi.get(self, "compute_location")
@compute_location.setter
def compute_location(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "compute_location", value)
@property
@pulumi.getter
def description(self) -> Optional[pulumi.Input[str]]:
"""
The description of the Machine Learning compute.
"""
return pulumi.get(self, "description")
@description.setter
def description(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "description", value)
@property
@pulumi.getter
def properties(self) -> Optional[pulumi.Input['DataLakeAnalyticsPropertiesArgs']]:
return pulumi.get(self, "properties")
@properties.setter
def properties(self, value: Optional[pulumi.Input['DataLakeAnalyticsPropertiesArgs']]):
pulumi.set(self, "properties", value)
@property
@pulumi.getter(name="resourceId")
def resource_id(self) -> Optional[pulumi.Input[str]]:
"""
ARM resource id of the underlying compute
"""
return pulumi.get(self, "resource_id")
@resource_id.setter
def resource_id(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "resource_id", value)
@pulumi.input_type
class DataLakeAnalyticsPropertiesArgs:
def __init__(__self__, *,
data_lake_store_account_name: Optional[pulumi.Input[str]] = None):
"""
:param pulumi.Input[str] data_lake_store_account_name: DataLake Store Account Name
"""
if data_lake_store_account_name is not None:
pulumi.set(__self__, "data_lake_store_account_name", data_lake_store_account_name)
@property
@pulumi.getter(name="dataLakeStoreAccountName")
def data_lake_store_account_name(self) -> Optional[pulumi.Input[str]]:
"""
DataLake Store Account Name
"""
return pulumi.get(self, "data_lake_store_account_name")
@data_lake_store_account_name.setter
def data_lake_store_account_name(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "data_lake_store_account_name", value)
@pulumi.input_type
class DatabricksArgs:
def __init__(__self__, *,
compute_type: pulumi.Input[str],
compute_location: Optional[pulumi.Input[str]] = None,
description: Optional[pulumi.Input[str]] = None,
properties: Optional[pulumi.Input['DatabricksPropertiesArgs']] = None,
resource_id: Optional[pulumi.Input[str]] = None):
"""
A DataFactory compute.
:param pulumi.Input[str] compute_type: The type of compute
Expected value is 'Databricks'.
:param pulumi.Input[str] compute_location: Location for the underlying compute
:param pulumi.Input[str] description: The description of the Machine Learning compute.
:param pulumi.Input[str] resource_id: ARM resource id of the underlying compute
"""
pulumi.set(__self__, "compute_type", 'Databricks')
if compute_location is not None:
pulumi.set(__self__, "compute_location", compute_location)
if description is not None:
pulumi.set(__self__, "description", description)
if properties is not None:
pulumi.set(__self__, "properties", properties)
if resource_id is not None:
pulumi.set(__self__, "resource_id", resource_id)
@property
@pulumi.getter(name="computeType")
def compute_type(self) -> pulumi.Input[str]:
"""
The type of compute
Expected value is 'Databricks'.
"""
return pulumi.get(self, "compute_type")
@compute_type.setter
def compute_type(self, value: pulumi.Input[str]):
pulumi.set(self, "compute_type", value)
@property
@pulumi.getter(name="computeLocation")
def compute_location(self) -> Optional[pulumi.Input[str]]:
"""
Location for the underlying compute
"""
return pulumi.get(self, "compute_location")
@compute_location.setter
def compute_location(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "compute_location", value)
@property
@pulumi.getter
def description(self) -> Optional[pulumi.Input[str]]:
"""
The description of the Machine Learning compute.
"""
return pulumi.get(self, "description")
@description.setter
def description(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "description", value)
@property
@pulumi.getter
def properties(self) -> Optional[pulumi.Input['DatabricksPropertiesArgs']]:
return pulumi.get(self, "properties")
@properties.setter
def properties(self, value: Optional[pulumi.Input['DatabricksPropertiesArgs']]):
pulumi.set(self, "properties", value)
@property
@pulumi.getter(name="resourceId")
def resource_id(self) -> Optional[pulumi.Input[str]]:
"""
ARM resource id of the underlying compute
"""
return pulumi.get(self, "resource_id")
@resource_id.setter
def resource_id(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "resource_id", value)
@pulumi.input_type
class DatabricksPropertiesArgs:
def __init__(__self__, *,
databricks_access_token: Optional[pulumi.Input[str]] = None,
workspace_url: Optional[pulumi.Input[str]] = None):
"""
:param pulumi.Input[str] databricks_access_token: Databricks access token
:param pulumi.Input[str] workspace_url: Workspace Url
"""
if databricks_access_token is not None:
pulumi.set(__self__, "databricks_access_token", databricks_access_token)
if workspace_url is not None:
pulumi.set(__self__, "workspace_url", workspace_url)
@property
@pulumi.getter(name="databricksAccessToken")
def databricks_access_token(self) -> Optional[pulumi.Input[str]]:
"""
Databricks access token
"""
return pulumi.get(self, "databricks_access_token")
@databricks_access_token.setter
def databricks_access_token(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "databricks_access_token", value)
@property
@pulumi.getter(name="workspaceUrl")
def workspace_url(self) -> Optional[pulumi.Input[str]]:
"""
Workspace Url
"""
return pulumi.get(self, "workspace_url")
@workspace_url.setter
def workspace_url(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "workspace_url", value)
@pulumi.input_type
class DatasetReferenceArgs:
def __init__(__self__, *,
id: Optional[pulumi.Input[str]] = None,
name: Optional[pulumi.Input[str]] = None):
"""
The dataset reference object.
:param pulumi.Input[str] id: The id of the dataset reference.
:param pulumi.Input[str] name: The name of the dataset reference.
"""
if id is not None:
pulumi.set(__self__, "id", id)
if name is not None:
pulumi.set(__self__, "name", name)
@property
@pulumi.getter
def id(self) -> Optional[pulumi.Input[str]]:
"""
The id of the dataset reference.
"""
return pulumi.get(self, "id")
@id.setter
def id(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "id", value)
@property
@pulumi.getter
def name(self) -> Optional[pulumi.Input[str]]:
"""
The name of the dataset reference.
"""
return pulumi.get(self, "name")
@name.setter
def name(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "name", value)
@pulumi.input_type
class EncryptionPropertyArgs:
def __init__(__self__, *,
key_vault_properties: pulumi.Input['KeyVaultPropertiesArgs'],
status: pulumi.Input[Union[str, 'EncryptionStatus']],
identity: Optional[pulumi.Input['IdentityForCmkArgs']] = None):
"""
:param pulumi.Input['KeyVaultPropertiesArgs'] key_vault_properties: Customer Key vault properties.
:param pulumi.Input[Union[str, 'EncryptionStatus']] status: Indicates whether or not the encryption is enabled for the workspace.
:param pulumi.Input['IdentityForCmkArgs'] identity: The identity that will be used to access the key vault for encryption at rest.
"""
pulumi.set(__self__, "key_vault_properties", key_vault_properties)
pulumi.set(__self__, "status", status)
if identity is not None:
pulumi.set(__self__, "identity", identity)
@property
@pulumi.getter(name="keyVaultProperties")
def key_vault_properties(self) -> pulumi.Input['KeyVaultPropertiesArgs']:
"""
Customer Key vault properties.
"""
return pulumi.get(self, "key_vault_properties")
@key_vault_properties.setter
def key_vault_properties(self, value: pulumi.Input['KeyVaultPropertiesArgs']):
pulumi.set(self, "key_vault_properties", value)
@property
@pulumi.getter
def status(self) -> pulumi.Input[Union[str, 'EncryptionStatus']]:
"""
Indicates whether or not the encryption is enabled for the workspace.
"""
return pulumi.get(self, "status")
@status.setter
def status(self, value: pulumi.Input[Union[str, 'EncryptionStatus']]):
pulumi.set(self, "status", value)
@property
@pulumi.getter
def identity(self) -> Optional[pulumi.Input['IdentityForCmkArgs']]:
"""
The identity that will be used to access the key vault for encryption at rest.
"""
return pulumi.get(self, "identity")
@identity.setter
def identity(self, value: Optional[pulumi.Input['IdentityForCmkArgs']]):
pulumi.set(self, "identity", value)
@pulumi.input_type
class EnvironmentImageRequestEnvironmentArgs:
def __init__(__self__, *,
docker: Optional[pulumi.Input['ModelEnvironmentDefinitionDockerArgs']] = None,
environment_variables: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None,
inferencing_stack_version: Optional[pulumi.Input[str]] = None,
name: Optional[pulumi.Input[str]] = None,
python: Optional[pulumi.Input['ModelEnvironmentDefinitionPythonArgs']] = None,
r: Optional[pulumi.Input['ModelEnvironmentDefinitionRArgs']] = None,
spark: Optional[pulumi.Input['ModelEnvironmentDefinitionSparkArgs']] = None,
version: Optional[pulumi.Input[str]] = None):
"""
The details of the AZURE ML environment.
:param pulumi.Input['ModelEnvironmentDefinitionDockerArgs'] docker: The definition of a Docker container.
:param pulumi.Input[Mapping[str, pulumi.Input[str]]] environment_variables: Definition of environment variables to be defined in the environment.
:param pulumi.Input[str] inferencing_stack_version: The inferencing stack version added to the image. To avoid adding an inferencing stack, do not set this value. Valid values: "latest".
:param pulumi.Input[str] name: The name of the environment.
:param pulumi.Input['ModelEnvironmentDefinitionPythonArgs'] python: Settings for a Python environment.
:param pulumi.Input['ModelEnvironmentDefinitionRArgs'] r: Settings for a R environment.
:param pulumi.Input['ModelEnvironmentDefinitionSparkArgs'] spark: The configuration for a Spark environment.
:param pulumi.Input[str] version: The environment version.
"""
if docker is not None:
pulumi.set(__self__, "docker", docker)
if environment_variables is not None:
pulumi.set(__self__, "environment_variables", environment_variables)
if inferencing_stack_version is not None:
pulumi.set(__self__, "inferencing_stack_version", inferencing_stack_version)
if name is not None:
pulumi.set(__self__, "name", name)
if python is not None:
pulumi.set(__self__, "python", python)
if r is not None:
pulumi.set(__self__, "r", r)
if spark is not None:
pulumi.set(__self__, "spark", spark)
if version is not None:
pulumi.set(__self__, "version", version)
@property
@pulumi.getter
def docker(self) -> Optional[pulumi.Input['ModelEnvironmentDefinitionDockerArgs']]:
"""
The definition of a Docker container.
"""
return pulumi.get(self, "docker")
@docker.setter
def docker(self, value: Optional[pulumi.Input['ModelEnvironmentDefinitionDockerArgs']]):
pulumi.set(self, "docker", value)
@property
@pulumi.getter(name="environmentVariables")
def environment_variables(self) -> Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]]:
"""
Definition of environment variables to be defined in the environment.
"""
return pulumi.get(self, "environment_variables")
@environment_variables.setter
def environment_variables(self, value: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]]):
pulumi.set(self, "environment_variables", value)
@property
@pulumi.getter(name="inferencingStackVersion")
def inferencing_stack_version(self) -> Optional[pulumi.Input[str]]:
"""
The inferencing stack version added to the image. To avoid adding an inferencing stack, do not set this value. Valid values: "latest".
"""
return pulumi.get(self, "inferencing_stack_version")
@inferencing_stack_version.setter
def inferencing_stack_version(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "inferencing_stack_version", value)
@property
@pulumi.getter
def name(self) -> Optional[pulumi.Input[str]]:
"""
The name of the environment.
"""
return pulumi.get(self, "name")
@name.setter
def name(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "name", value)
@property
@pulumi.getter
def python(self) -> Optional[pulumi.Input['ModelEnvironmentDefinitionPythonArgs']]:
"""
Settings for a Python environment.
"""
return pulumi.get(self, "python")
@python.setter
def python(self, value: Optional[pulumi.Input['ModelEnvironmentDefinitionPythonArgs']]):
pulumi.set(self, "python", value)
@property
@pulumi.getter
def r(self) -> Optional[pulumi.Input['ModelEnvironmentDefinitionRArgs']]:
"""
Settings for a R environment.
"""
return pulumi.get(self, "r")
@r.setter
def r(self, value: Optional[pulumi.Input['ModelEnvironmentDefinitionRArgs']]):
pulumi.set(self, "r", value)
@property
@pulumi.getter
def spark(self) -> Optional[pulumi.Input['ModelEnvironmentDefinitionSparkArgs']]:
"""
The configuration for a Spark environment.
"""
return pulumi.get(self, "spark")
@spark.setter
def spark(self, value: Optional[pulumi.Input['ModelEnvironmentDefinitionSparkArgs']]):
pulumi.set(self, "spark", value)
@property
@pulumi.getter
def version(self) -> Optional[pulumi.Input[str]]:
"""
The environment version.
"""
return pulumi.get(self, "version")
@version.setter
def version(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "version", value)
@pulumi.input_type
class EnvironmentImageRequestEnvironmentReferenceArgs:
def __init__(__self__, *,
name: Optional[pulumi.Input[str]] = None,
version: Optional[pulumi.Input[str]] = None):
"""
The unique identifying details of the AZURE ML environment.
:param pulumi.Input[str] name: Name of the environment.
:param pulumi.Input[str] version: Version of the environment.
"""
if name is not None:
pulumi.set(__self__, "name", name)
if version is not None:
pulumi.set(__self__, "version", version)
@property
@pulumi.getter
def name(self) -> Optional[pulumi.Input[str]]:
"""
Name of the environment.
"""
return pulumi.get(self, "name")
@name.setter
def name(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "name", value)
@property
@pulumi.getter
def version(self) -> Optional[pulumi.Input[str]]:
"""
Version of the environment.
"""
return pulumi.get(self, "version")
@version.setter
def version(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "version", value)
@pulumi.input_type
class HDInsightArgs:
def __init__(__self__, *,
compute_type: pulumi.Input[str],
compute_location: Optional[pulumi.Input[str]] = None,
description: Optional[pulumi.Input[str]] = None,
properties: Optional[pulumi.Input['HDInsightPropertiesArgs']] = None,
resource_id: Optional[pulumi.Input[str]] = None):
"""
A HDInsight compute.
:param pulumi.Input[str] compute_type: The type of compute
Expected value is 'HDInsight'.
:param pulumi.Input[str] compute_location: Location for the underlying compute
:param pulumi.Input[str] description: The description of the Machine Learning compute.
:param pulumi.Input[str] resource_id: ARM resource id of the underlying compute
"""
pulumi.set(__self__, "compute_type", 'HDInsight')
if compute_location is not None:
pulumi.set(__self__, "compute_location", compute_location)
if description is not None:
pulumi.set(__self__, "description", description)
if properties is not None:
pulumi.set(__self__, "properties", properties)
if resource_id is not None:
pulumi.set(__self__, "resource_id", resource_id)
@property
@pulumi.getter(name="computeType")
def compute_type(self) -> pulumi.Input[str]:
"""
The type of compute
Expected value is 'HDInsight'.
"""
return pulumi.get(self, "compute_type")
@compute_type.setter
def compute_type(self, value: pulumi.Input[str]):
pulumi.set(self, "compute_type", value)
@property
@pulumi.getter(name="computeLocation")
def compute_location(self) -> Optional[pulumi.Input[str]]:
"""
Location for the underlying compute
"""
return pulumi.get(self, "compute_location")
@compute_location.setter
def compute_location(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "compute_location", value)
@property
@pulumi.getter
def description(self) -> Optional[pulumi.Input[str]]:
"""
The description of the Machine Learning compute.
"""
return pulumi.get(self, "description")
@description.setter
def description(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "description", value)
@property
@pulumi.getter
def properties(self) -> Optional[pulumi.Input['HDInsightPropertiesArgs']]:
return pulumi.get(self, "properties")
@properties.setter
def properties(self, value: Optional[pulumi.Input['HDInsightPropertiesArgs']]):
pulumi.set(self, "properties", value)
@property
@pulumi.getter(name="resourceId")
def resource_id(self) -> Optional[pulumi.Input[str]]:
"""
ARM resource id of the underlying compute
"""
return pulumi.get(self, "resource_id")
@resource_id.setter
def resource_id(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "resource_id", value)
@pulumi.input_type
class HDInsightPropertiesArgs:
def __init__(__self__, *,
address: Optional[pulumi.Input[str]] = None,
administrator_account: Optional[pulumi.Input['VirtualMachineSshCredentialsArgs']] = None,
ssh_port: Optional[pulumi.Input[int]] = None):
"""
:param pulumi.Input[str] address: Public IP address of the master node of the cluster.
:param pulumi.Input['VirtualMachineSshCredentialsArgs'] administrator_account: Admin credentials for master node of the cluster
:param pulumi.Input[int] ssh_port: Port open for ssh connections on the master node of the cluster.
"""
if address is not None:
pulumi.set(__self__, "address", address)
if administrator_account is not None:
pulumi.set(__self__, "administrator_account", administrator_account)
if ssh_port is not None:
pulumi.set(__self__, "ssh_port", ssh_port)
@property
@pulumi.getter
def address(self) -> Optional[pulumi.Input[str]]:
"""
Public IP address of the master node of the cluster.
"""
return pulumi.get(self, "address")
@address.setter
def address(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "address", value)
@property
@pulumi.getter(name="administratorAccount")
def administrator_account(self) -> Optional[pulumi.Input['VirtualMachineSshCredentialsArgs']]:
"""
Admin credentials for master node of the cluster
"""
return pulumi.get(self, "administrator_account")
@administrator_account.setter
def administrator_account(self, value: Optional[pulumi.Input['VirtualMachineSshCredentialsArgs']]):
pulumi.set(self, "administrator_account", value)
@property
@pulumi.getter(name="sshPort")
def ssh_port(self) -> Optional[pulumi.Input[int]]:
"""
Port open for ssh connections on the master node of the cluster.
"""
return pulumi.get(self, "ssh_port")
@ssh_port.setter
def ssh_port(self, value: Optional[pulumi.Input[int]]):
pulumi.set(self, "ssh_port", value)
@pulumi.input_type
class IdentityArgs:
def __init__(__self__, *,
type: Optional[pulumi.Input['ResourceIdentityType']] = None,
user_assigned_identities: Optional[pulumi.Input[Mapping[str, Any]]] = None):
"""
Identity for the resource.
:param pulumi.Input['ResourceIdentityType'] type: The identity type.
:param pulumi.Input[Mapping[str, Any]] user_assigned_identities: The user assigned identities associated with the resource.
"""
if type is not None:
pulumi.set(__self__, "type", type)
if user_assigned_identities is not None:
pulumi.set(__self__, "user_assigned_identities", user_assigned_identities)
@property
@pulumi.getter
def type(self) -> Optional[pulumi.Input['ResourceIdentityType']]:
"""
The identity type.
"""
return pulumi.get(self, "type")
@type.setter
def type(self, value: Optional[pulumi.Input['ResourceIdentityType']]):
pulumi.set(self, "type", value)
@property
@pulumi.getter(name="userAssignedIdentities")
def user_assigned_identities(self) -> Optional[pulumi.Input[Mapping[str, Any]]]:
"""
The user assigned identities associated with the resource.
"""
return pulumi.get(self, "user_assigned_identities")
@user_assigned_identities.setter
def user_assigned_identities(self, value: Optional[pulumi.Input[Mapping[str, Any]]]):
pulumi.set(self, "user_assigned_identities", value)
@pulumi.input_type
class IdentityForCmkArgs:
def __init__(__self__, *,
user_assigned_identity: pulumi.Input[str]):
"""
Identity that will be used to access key vault for encryption at rest
:param pulumi.Input[str] user_assigned_identity: The ArmId of the user assigned identity that will be used to access the customer managed key vault
"""
pulumi.set(__self__, "user_assigned_identity", user_assigned_identity)
@property
@pulumi.getter(name="userAssignedIdentity")
def user_assigned_identity(self) -> pulumi.Input[str]:
"""
The ArmId of the user assigned identity that will be used to access the customer managed key vault
"""
return pulumi.get(self, "user_assigned_identity")
@user_assigned_identity.setter
def user_assigned_identity(self, value: pulumi.Input[str]):
pulumi.set(self, "user_assigned_identity", value)
@pulumi.input_type
class ImageAssetArgs:
def __init__(__self__, *,
id: Optional[pulumi.Input[str]] = None,
mime_type: Optional[pulumi.Input[str]] = None,
unpack: Optional[pulumi.Input[bool]] = None,
url: Optional[pulumi.Input[str]] = None):
"""
An Image asset.
:param pulumi.Input[str] id: The Asset Id.
:param pulumi.Input[str] mime_type: The mime type.
:param pulumi.Input[bool] unpack: Whether the Asset is unpacked.
:param pulumi.Input[str] url: The Url of the Asset.
"""
if id is not None:
pulumi.set(__self__, "id", id)
if mime_type is not None:
pulumi.set(__self__, "mime_type", mime_type)
if unpack is not None:
pulumi.set(__self__, "unpack", unpack)
if url is not None:
pulumi.set(__self__, "url", url)
@property
@pulumi.getter
def id(self) -> Optional[pulumi.Input[str]]:
"""
The Asset Id.
"""
return pulumi.get(self, "id")
@id.setter
def id(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "id", value)
@property
@pulumi.getter(name="mimeType")
def mime_type(self) -> Optional[pulumi.Input[str]]:
"""
The mime type.
"""
return pulumi.get(self, "mime_type")
@mime_type.setter
def mime_type(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "mime_type", value)
@property
@pulumi.getter
def unpack(self) -> Optional[pulumi.Input[bool]]:
"""
Whether the Asset is unpacked.
"""
return pulumi.get(self, "unpack")
@unpack.setter
def unpack(self, value: Optional[pulumi.Input[bool]]):
pulumi.set(self, "unpack", value)
@property
@pulumi.getter
def url(self) -> Optional[pulumi.Input[str]]:
"""
The Url of the Asset.
"""
return pulumi.get(self, "url")
@url.setter
def url(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "url", value)
@pulumi.input_type
class KeyVaultPropertiesArgs:
def __init__(__self__, *,
key_identifier: pulumi.Input[str],
key_vault_arm_id: pulumi.Input[str],
identity_client_id: Optional[pulumi.Input[str]] = None):
"""
:param pulumi.Input[str] key_identifier: Key vault uri to access the encryption key.
:param pulumi.Input[str] key_vault_arm_id: The ArmId of the keyVault where the customer owned encryption key is present.
:param pulumi.Input[str] identity_client_id: For future use - The client id of the identity which will be used to access key vault.
"""
pulumi.set(__self__, "key_identifier", key_identifier)
pulumi.set(__self__, "key_vault_arm_id", key_vault_arm_id)
if identity_client_id is not None:
pulumi.set(__self__, "identity_client_id", identity_client_id)
@property
@pulumi.getter(name="keyIdentifier")
def key_identifier(self) -> pulumi.Input[str]:
"""
Key vault uri to access the encryption key.
"""
return pulumi.get(self, "key_identifier")
@key_identifier.setter
def key_identifier(self, value: pulumi.Input[str]):
pulumi.set(self, "key_identifier", value)
@property
@pulumi.getter(name="keyVaultArmId")
def key_vault_arm_id(self) -> pulumi.Input[str]:
"""
The ArmId of the keyVault where the customer owned encryption key is present.
"""
return pulumi.get(self, "key_vault_arm_id")
@key_vault_arm_id.setter
def key_vault_arm_id(self, value: pulumi.Input[str]):
pulumi.set(self, "key_vault_arm_id", value)
@property
@pulumi.getter(name="identityClientId")
def identity_client_id(self) -> Optional[pulumi.Input[str]]:
"""
For future use - The client id of the identity which will be used to access key vault.
"""
return pulumi.get(self, "identity_client_id")
@identity_client_id.setter
def identity_client_id(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "identity_client_id", value)
@pulumi.input_type
class ModelArgs:
def __init__(__self__, *,
mime_type: pulumi.Input[str],
name: pulumi.Input[str],
url: pulumi.Input[str],
created_time: Optional[pulumi.Input[str]] = None,
datasets: Optional[pulumi.Input[Sequence[pulumi.Input['DatasetReferenceArgs']]]] = None,
derived_model_ids: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,
description: Optional[pulumi.Input[str]] = None,
experiment_name: Optional[pulumi.Input[str]] = None,
framework: Optional[pulumi.Input[str]] = None,
framework_version: Optional[pulumi.Input[str]] = None,
id: Optional[pulumi.Input[str]] = None,
kv_tags: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None,
modified_time: Optional[pulumi.Input[str]] = None,
parent_model_id: Optional[pulumi.Input[str]] = None,
properties: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None,
resource_requirements: Optional[pulumi.Input['ContainerResourceRequirementsArgs']] = None,
run_id: Optional[pulumi.Input[str]] = None,
sample_input_data: Optional[pulumi.Input[str]] = None,
sample_output_data: Optional[pulumi.Input[str]] = None,
unpack: Optional[pulumi.Input[bool]] = None,
version: Optional[pulumi.Input[float]] = None):
"""
An Azure Machine Learning Model.
:param pulumi.Input[str] mime_type: The MIME type of Model content. For more details about MIME type, please open https://www.iana.org/assignments/media-types/media-types.xhtml
:param pulumi.Input[str] name: The Model name.
:param pulumi.Input[str] url: The URL of the Model. Usually a SAS URL.
:param pulumi.Input[str] created_time: The Model creation time (UTC).
:param pulumi.Input[Sequence[pulumi.Input['DatasetReferenceArgs']]] datasets: The list of datasets associated with the model.
:param pulumi.Input[Sequence[pulumi.Input[str]]] derived_model_ids: Models derived from this model
:param pulumi.Input[str] description: The Model description text.
:param pulumi.Input[str] experiment_name: The name of the experiment where this model was created.
:param pulumi.Input[str] framework: The Model framework.
:param pulumi.Input[str] framework_version: The Model framework version.
:param pulumi.Input[str] id: The Model Id.
:param pulumi.Input[Mapping[str, pulumi.Input[str]]] kv_tags: The Model tag dictionary. Items are mutable.
:param pulumi.Input[str] modified_time: The Model last modified time (UTC).
:param pulumi.Input[str] parent_model_id: The Parent Model Id.
:param pulumi.Input[Mapping[str, pulumi.Input[str]]] properties: The Model property dictionary. Properties are immutable.
:param pulumi.Input['ContainerResourceRequirementsArgs'] resource_requirements: Resource requirements for the model
:param pulumi.Input[str] run_id: The RunId that created this model.
:param pulumi.Input[str] sample_input_data: Sample Input Data for the Model. A reference to a dataset in the workspace in the format aml://dataset/{datasetId}
:param pulumi.Input[str] sample_output_data: Sample Output Data for the Model. A reference to a dataset in the workspace in the format aml://dataset/{datasetId}
:param pulumi.Input[bool] unpack: Indicates whether we need to unpack the Model during docker Image creation.
:param pulumi.Input[float] version: The Model version assigned by Model Management Service.
"""
pulumi.set(__self__, "mime_type", mime_type)
pulumi.set(__self__, "name", name)
pulumi.set(__self__, "url", url)
if created_time is not None:
pulumi.set(__self__, "created_time", created_time)
if datasets is not None:
pulumi.set(__self__, "datasets", datasets)
if derived_model_ids is not None:
pulumi.set(__self__, "derived_model_ids", derived_model_ids)
if description is not None:
pulumi.set(__self__, "description", description)
if experiment_name is not None:
pulumi.set(__self__, "experiment_name", experiment_name)
if framework is not None:
pulumi.set(__self__, "framework", framework)
if framework_version is not None:
pulumi.set(__self__, "framework_version", framework_version)
if id is not None:
pulumi.set(__self__, "id", id)
if kv_tags is not None:
pulumi.set(__self__, "kv_tags", kv_tags)
if modified_time is not None:
pulumi.set(__self__, "modified_time", modified_time)
if parent_model_id is not None:
pulumi.set(__self__, "parent_model_id", parent_model_id)
if properties is not None:
pulumi.set(__self__, "properties", properties)
if resource_requirements is not None:
pulumi.set(__self__, "resource_requirements", resource_requirements)
if run_id is not None:
pulumi.set(__self__, "run_id", run_id)
if sample_input_data is not None:
pulumi.set(__self__, "sample_input_data", sample_input_data)
if sample_output_data is not None:
pulumi.set(__self__, "sample_output_data", sample_output_data)
if unpack is not None:
pulumi.set(__self__, "unpack", unpack)
if version is not None:
pulumi.set(__self__, "version", version)
@property
@pulumi.getter(name="mimeType")
def mime_type(self) -> pulumi.Input[str]:
"""
The MIME type of Model content. For more details about MIME type, please open https://www.iana.org/assignments/media-types/media-types.xhtml
"""
return pulumi.get(self, "mime_type")
@mime_type.setter
def mime_type(self, value: pulumi.Input[str]):
pulumi.set(self, "mime_type", value)
@property
@pulumi.getter
def name(self) -> pulumi.Input[str]:
"""
The Model name.
"""
return pulumi.get(self, "name")
@name.setter
def name(self, value: pulumi.Input[str]):
pulumi.set(self, "name", value)
@property
@pulumi.getter
def url(self) -> pulumi.Input[str]:
"""
The URL of the Model. Usually a SAS URL.
"""
return pulumi.get(self, "url")
@url.setter
def url(self, value: pulumi.Input[str]):
pulumi.set(self, "url", value)
@property
@pulumi.getter(name="createdTime")
def created_time(self) -> Optional[pulumi.Input[str]]:
"""
The Model creation time (UTC).
"""
return pulumi.get(self, "created_time")
@created_time.setter
def created_time(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "created_time", value)
@property
@pulumi.getter
def datasets(self) -> Optional[pulumi.Input[Sequence[pulumi.Input['DatasetReferenceArgs']]]]:
"""
The list of datasets associated with the model.
"""
return pulumi.get(self, "datasets")
@datasets.setter
def datasets(self, value: Optional[pulumi.Input[Sequence[pulumi.Input['DatasetReferenceArgs']]]]):
pulumi.set(self, "datasets", value)
@property
@pulumi.getter(name="derivedModelIds")
def derived_model_ids(self) -> Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]:
"""
Models derived from this model
"""
return pulumi.get(self, "derived_model_ids")
@derived_model_ids.setter
def derived_model_ids(self, value: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]):
pulumi.set(self, "derived_model_ids", value)
@property
@pulumi.getter
def description(self) -> Optional[pulumi.Input[str]]:
"""
The Model description text.
"""
return pulumi.get(self, "description")
@description.setter
def description(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "description", value)
@property
@pulumi.getter(name="experimentName")
def experiment_name(self) -> Optional[pulumi.Input[str]]:
"""
The name of the experiment where this model was created.
"""
return pulumi.get(self, "experiment_name")
@experiment_name.setter
def experiment_name(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "experiment_name", value)
@property
@pulumi.getter
def framework(self) -> Optional[pulumi.Input[str]]:
"""
The Model framework.
"""
return pulumi.get(self, "framework")
@framework.setter
def framework(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "framework", value)
@property
@pulumi.getter(name="frameworkVersion")
def framework_version(self) -> Optional[pulumi.Input[str]]:
"""
The Model framework version.
"""
return pulumi.get(self, "framework_version")
@framework_version.setter
def framework_version(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "framework_version", value)
@property
@pulumi.getter
def id(self) -> Optional[pulumi.Input[str]]:
"""
The Model Id.
"""
return pulumi.get(self, "id")
@id.setter
def id(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "id", value)
@property
@pulumi.getter(name="kvTags")
def kv_tags(self) -> Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]]:
"""
The Model tag dictionary. Items are mutable.
"""
return pulumi.get(self, "kv_tags")
@kv_tags.setter
def kv_tags(self, value: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]]):
pulumi.set(self, "kv_tags", value)
@property
@pulumi.getter(name="modifiedTime")
def modified_time(self) -> Optional[pulumi.Input[str]]:
"""
The Model last modified time (UTC).
"""
return pulumi.get(self, "modified_time")
@modified_time.setter
def modified_time(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "modified_time", value)
@property
@pulumi.getter(name="parentModelId")
def parent_model_id(self) -> Optional[pulumi.Input[str]]:
"""
The Parent Model Id.
"""
return pulumi.get(self, "parent_model_id")
@parent_model_id.setter
def parent_model_id(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "parent_model_id", value)
@property
@pulumi.getter
def properties(self) -> Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]]:
"""
The Model property dictionary. Properties are immutable.
"""
return pulumi.get(self, "properties")
@properties.setter
def properties(self, value: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]]):
pulumi.set(self, "properties", value)
@property
@pulumi.getter(name="resourceRequirements")
def resource_requirements(self) -> Optional[pulumi.Input['ContainerResourceRequirementsArgs']]:
"""
Resource requirements for the model
"""
return pulumi.get(self, "resource_requirements")
@resource_requirements.setter
def resource_requirements(self, value: Optional[pulumi.Input['ContainerResourceRequirementsArgs']]):
pulumi.set(self, "resource_requirements", value)
@property
@pulumi.getter(name="runId")
def run_id(self) -> Optional[pulumi.Input[str]]:
"""
The RunId that created this model.
"""
return pulumi.get(self, "run_id")
@run_id.setter
def run_id(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "run_id", value)
@property
@pulumi.getter(name="sampleInputData")
def sample_input_data(self) -> Optional[pulumi.Input[str]]:
"""
Sample Input Data for the Model. A reference to a dataset in the workspace in the format aml://dataset/{datasetId}
"""
return pulumi.get(self, "sample_input_data")
@sample_input_data.setter
def sample_input_data(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "sample_input_data", value)
@property
@pulumi.getter(name="sampleOutputData")
def sample_output_data(self) -> Optional[pulumi.Input[str]]:
"""
Sample Output Data for the Model. A reference to a dataset in the workspace in the format aml://dataset/{datasetId}
"""
return pulumi.get(self, "sample_output_data")
@sample_output_data.setter
def sample_output_data(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "sample_output_data", value)
@property
@pulumi.getter
def unpack(self) -> Optional[pulumi.Input[bool]]:
"""
Indicates whether we need to unpack the Model during docker Image creation.
"""
return pulumi.get(self, "unpack")
@unpack.setter
def unpack(self, value: Optional[pulumi.Input[bool]]):
pulumi.set(self, "unpack", value)
@property
@pulumi.getter
def version(self) -> Optional[pulumi.Input[float]]:
"""
The Model version assigned by Model Management Service.
"""
return pulumi.get(self, "version")
@version.setter
def version(self, value: Optional[pulumi.Input[float]]):
pulumi.set(self, "version", value)
@pulumi.input_type
class ModelDockerSectionBaseImageRegistryArgs:
def __init__(__self__, *,
address: Optional[pulumi.Input[str]] = None,
password: Optional[pulumi.Input[str]] = None,
username: Optional[pulumi.Input[str]] = None):
"""
Image registry that contains the base image.
"""
if address is not None:
pulumi.set(__self__, "address", address)
if password is not None:
pulumi.set(__self__, "password", password)
if username is not None:
pulumi.set(__self__, "username", username)
@property
@pulumi.getter
def address(self) -> Optional[pulumi.Input[str]]:
return pulumi.get(self, "address")
@address.setter
def address(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "address", value)
@property
@pulumi.getter
def password(self) -> Optional[pulumi.Input[str]]:
return pulumi.get(self, "password")
@password.setter
def password(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "password", value)
@property
@pulumi.getter
def username(self) -> Optional[pulumi.Input[str]]:
return pulumi.get(self, "username")
@username.setter
def username(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "username", value)
@pulumi.input_type
class ModelEnvironmentDefinitionDockerArgs:
def __init__(__self__, *,
base_dockerfile: Optional[pulumi.Input[str]] = None,
base_image: Optional[pulumi.Input[str]] = None,
base_image_registry: Optional[pulumi.Input['ModelDockerSectionBaseImageRegistryArgs']] = None):
"""
The definition of a Docker container.
:param pulumi.Input[str] base_dockerfile: Base Dockerfile used for Docker-based runs. Mutually exclusive with BaseImage.
:param pulumi.Input[str] base_image: Base image used for Docker-based runs. Mutually exclusive with BaseDockerfile.
:param pulumi.Input['ModelDockerSectionBaseImageRegistryArgs'] base_image_registry: Image registry that contains the base image.
"""
if base_dockerfile is not None:
pulumi.set(__self__, "base_dockerfile", base_dockerfile)
if base_image is not None:
pulumi.set(__self__, "base_image", base_image)
if base_image_registry is not None:
pulumi.set(__self__, "base_image_registry", base_image_registry)
@property
@pulumi.getter(name="baseDockerfile")
def base_dockerfile(self) -> Optional[pulumi.Input[str]]:
"""
Base Dockerfile used for Docker-based runs. Mutually exclusive with BaseImage.
"""
return pulumi.get(self, "base_dockerfile")
@base_dockerfile.setter
def base_dockerfile(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "base_dockerfile", value)
@property
@pulumi.getter(name="baseImage")
def base_image(self) -> Optional[pulumi.Input[str]]:
"""
Base image used for Docker-based runs. Mutually exclusive with BaseDockerfile.
"""
return pulumi.get(self, "base_image")
@base_image.setter
def base_image(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "base_image", value)
@property
@pulumi.getter(name="baseImageRegistry")
def base_image_registry(self) -> Optional[pulumi.Input['ModelDockerSectionBaseImageRegistryArgs']]:
"""
Image registry that contains the base image.
"""
return pulumi.get(self, "base_image_registry")
@base_image_registry.setter
def base_image_registry(self, value: Optional[pulumi.Input['ModelDockerSectionBaseImageRegistryArgs']]):
pulumi.set(self, "base_image_registry", value)
@pulumi.input_type
class ModelEnvironmentDefinitionPythonArgs:
def __init__(__self__, *,
base_conda_environment: Optional[pulumi.Input[str]] = None,
conda_dependencies: Optional[Any] = None,
interpreter_path: Optional[pulumi.Input[str]] = None,
user_managed_dependencies: Optional[pulumi.Input[bool]] = None):
"""
Settings for a Python environment.
:param Any conda_dependencies: A JObject containing Conda dependencies.
:param pulumi.Input[str] interpreter_path: The python interpreter path to use if an environment build is not required. The path specified gets used to call the user script.
:param pulumi.Input[bool] user_managed_dependencies: True means that AzureML reuses an existing python environment; False means that AzureML will create a python environment based on the Conda dependencies specification.
"""
if base_conda_environment is not None:
pulumi.set(__self__, "base_conda_environment", base_conda_environment)
if conda_dependencies is not None:
pulumi.set(__self__, "conda_dependencies", conda_dependencies)
if interpreter_path is not None:
pulumi.set(__self__, "interpreter_path", interpreter_path)
if user_managed_dependencies is not None:
pulumi.set(__self__, "user_managed_dependencies", user_managed_dependencies)
@property
@pulumi.getter(name="baseCondaEnvironment")
def base_conda_environment(self) -> Optional[pulumi.Input[str]]:
return pulumi.get(self, "base_conda_environment")
@base_conda_environment.setter
def base_conda_environment(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "base_conda_environment", value)
@property
@pulumi.getter(name="condaDependencies")
def conda_dependencies(self) -> Optional[Any]:
"""
A JObject containing Conda dependencies.
"""
return pulumi.get(self, "conda_dependencies")
@conda_dependencies.setter
def conda_dependencies(self, value: Optional[Any]):
pulumi.set(self, "conda_dependencies", value)
@property
@pulumi.getter(name="interpreterPath")
def interpreter_path(self) -> Optional[pulumi.Input[str]]:
"""
The python interpreter path to use if an environment build is not required. The path specified gets used to call the user script.
"""
return pulumi.get(self, "interpreter_path")
@interpreter_path.setter
def interpreter_path(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "interpreter_path", value)
@property
@pulumi.getter(name="userManagedDependencies")
def user_managed_dependencies(self) -> Optional[pulumi.Input[bool]]:
"""
True means that AzureML reuses an existing python environment; False means that AzureML will create a python environment based on the Conda dependencies specification.
"""
return pulumi.get(self, "user_managed_dependencies")
@user_managed_dependencies.setter
def user_managed_dependencies(self, value: Optional[pulumi.Input[bool]]):
pulumi.set(self, "user_managed_dependencies", value)
@pulumi.input_type
class ModelEnvironmentDefinitionRArgs:
def __init__(__self__, *,
bio_conductor_packages: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,
cran_packages: Optional[pulumi.Input[Sequence[pulumi.Input['RCranPackageArgs']]]] = None,
custom_url_packages: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,
git_hub_packages: Optional[pulumi.Input[Sequence[pulumi.Input['RGitHubPackageArgs']]]] = None,
r_version: Optional[pulumi.Input[str]] = None,
rscript_path: Optional[pulumi.Input[str]] = None,
snapshot_date: Optional[pulumi.Input[str]] = None,
user_managed: Optional[pulumi.Input[bool]] = None):
"""
Settings for a R environment.
:param pulumi.Input[Sequence[pulumi.Input[str]]] bio_conductor_packages: The packages from Bioconductor.
:param pulumi.Input[Sequence[pulumi.Input['RCranPackageArgs']]] cran_packages: The CRAN packages to use.
:param pulumi.Input[Sequence[pulumi.Input[str]]] custom_url_packages: The packages from custom urls.
:param pulumi.Input[Sequence[pulumi.Input['RGitHubPackageArgs']]] git_hub_packages: The packages directly from GitHub.
:param pulumi.Input[str] r_version: The version of R to be installed
:param pulumi.Input[str] rscript_path: The Rscript path to use if an environment build is not required.
The path specified gets used to call the user script.
:param pulumi.Input[str] snapshot_date: Date of MRAN snapshot to use in YYYY-MM-DD format, e.g. "2019-04-17"
:param pulumi.Input[bool] user_managed: Indicates whether the environment is managed by user or by AzureML.
"""
if bio_conductor_packages is not None:
pulumi.set(__self__, "bio_conductor_packages", bio_conductor_packages)
if cran_packages is not None:
pulumi.set(__self__, "cran_packages", cran_packages)
if custom_url_packages is not None:
pulumi.set(__self__, "custom_url_packages", custom_url_packages)
if git_hub_packages is not None:
pulumi.set(__self__, "git_hub_packages", git_hub_packages)
if r_version is not None:
pulumi.set(__self__, "r_version", r_version)
if rscript_path is not None:
pulumi.set(__self__, "rscript_path", rscript_path)
if snapshot_date is not None:
pulumi.set(__self__, "snapshot_date", snapshot_date)
if user_managed is not None:
pulumi.set(__self__, "user_managed", user_managed)
@property
@pulumi.getter(name="bioConductorPackages")
def bio_conductor_packages(self) -> Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]:
"""
The packages from Bioconductor.
"""
return pulumi.get(self, "bio_conductor_packages")
@bio_conductor_packages.setter
def bio_conductor_packages(self, value: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]):
pulumi.set(self, "bio_conductor_packages", value)
@property
@pulumi.getter(name="cranPackages")
def cran_packages(self) -> Optional[pulumi.Input[Sequence[pulumi.Input['RCranPackageArgs']]]]:
"""
The CRAN packages to use.
"""
return pulumi.get(self, "cran_packages")
@cran_packages.setter
def cran_packages(self, value: Optional[pulumi.Input[Sequence[pulumi.Input['RCranPackageArgs']]]]):
pulumi.set(self, "cran_packages", value)
@property
@pulumi.getter(name="customUrlPackages")
def custom_url_packages(self) -> Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]:
"""
The packages from custom urls.
"""
return pulumi.get(self, "custom_url_packages")
@custom_url_packages.setter
def custom_url_packages(self, value: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]):
pulumi.set(self, "custom_url_packages", value)
@property
@pulumi.getter(name="gitHubPackages")
def git_hub_packages(self) -> Optional[pulumi.Input[Sequence[pulumi.Input['RGitHubPackageArgs']]]]:
"""
The packages directly from GitHub.
"""
return pulumi.get(self, "git_hub_packages")
@git_hub_packages.setter
def git_hub_packages(self, value: Optional[pulumi.Input[Sequence[pulumi.Input['RGitHubPackageArgs']]]]):
pulumi.set(self, "git_hub_packages", value)
@property
@pulumi.getter(name="rVersion")
def r_version(self) -> Optional[pulumi.Input[str]]:
"""
The version of R to be installed
"""
return pulumi.get(self, "r_version")
@r_version.setter
def r_version(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "r_version", value)
@property
@pulumi.getter(name="rscriptPath")
def rscript_path(self) -> Optional[pulumi.Input[str]]:
"""
The Rscript path to use if an environment build is not required.
The path specified gets used to call the user script.
"""
return pulumi.get(self, "rscript_path")
@rscript_path.setter
def rscript_path(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "rscript_path", value)
@property
@pulumi.getter(name="snapshotDate")
def snapshot_date(self) -> Optional[pulumi.Input[str]]:
"""
Date of MRAN snapshot to use in YYYY-MM-DD format, e.g. "2019-04-17"
"""
return pulumi.get(self, "snapshot_date")
@snapshot_date.setter
def snapshot_date(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "snapshot_date", value)
@property
@pulumi.getter(name="userManaged")
def user_managed(self) -> Optional[pulumi.Input[bool]]:
"""
Indicates whether the environment is managed by user or by AzureML.
"""
return pulumi.get(self, "user_managed")
@user_managed.setter
def user_managed(self, value: Optional[pulumi.Input[bool]]):
pulumi.set(self, "user_managed", value)
@pulumi.input_type
class ModelEnvironmentDefinitionSparkArgs:
def __init__(__self__, *,
packages: Optional[pulumi.Input[Sequence[pulumi.Input['SparkMavenPackageArgs']]]] = None,
precache_packages: Optional[pulumi.Input[bool]] = None,
repositories: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None):
"""
The configuration for a Spark environment.
:param pulumi.Input[Sequence[pulumi.Input['SparkMavenPackageArgs']]] packages: The Spark packages to use.
:param pulumi.Input[bool] precache_packages: Whether to precache the packages.
:param pulumi.Input[Sequence[pulumi.Input[str]]] repositories: The list of spark repositories.
"""
if packages is not None:
pulumi.set(__self__, "packages", packages)
if precache_packages is not None:
pulumi.set(__self__, "precache_packages", precache_packages)
if repositories is not None:
pulumi.set(__self__, "repositories", repositories)
@property
@pulumi.getter
def packages(self) -> Optional[pulumi.Input[Sequence[pulumi.Input['SparkMavenPackageArgs']]]]:
"""
The Spark packages to use.
"""
return pulumi.get(self, "packages")
@packages.setter
def packages(self, value: Optional[pulumi.Input[Sequence[pulumi.Input['SparkMavenPackageArgs']]]]):
pulumi.set(self, "packages", value)
@property
@pulumi.getter(name="precachePackages")
def precache_packages(self) -> Optional[pulumi.Input[bool]]:
"""
Whether to precache the packages.
"""
return pulumi.get(self, "precache_packages")
@precache_packages.setter
def precache_packages(self, value: Optional[pulumi.Input[bool]]):
pulumi.set(self, "precache_packages", value)
@property
@pulumi.getter
def repositories(self) -> Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]:
"""
The list of spark repositories.
"""
return pulumi.get(self, "repositories")
@repositories.setter
def repositories(self, value: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]):
pulumi.set(self, "repositories", value)
@pulumi.input_type
class PersonalComputeInstanceSettingsArgs:
def __init__(__self__, *,
assigned_user: Optional[pulumi.Input['AssignedUserArgs']] = None):
"""
Settings for a personal compute instance.
:param pulumi.Input['AssignedUserArgs'] assigned_user: A user explicitly assigned to a personal compute instance.
"""
if assigned_user is not None:
pulumi.set(__self__, "assigned_user", assigned_user)
@property
@pulumi.getter(name="assignedUser")
def assigned_user(self) -> Optional[pulumi.Input['AssignedUserArgs']]:
"""
A user explicitly assigned to a personal compute instance.
"""
return pulumi.get(self, "assigned_user")
@assigned_user.setter
def assigned_user(self, value: Optional[pulumi.Input['AssignedUserArgs']]):
pulumi.set(self, "assigned_user", value)
@pulumi.input_type
class PrivateLinkServiceConnectionStateArgs:
def __init__(__self__, *,
actions_required: Optional[pulumi.Input[str]] = None,
description: Optional[pulumi.Input[str]] = None,
status: Optional[pulumi.Input[Union[str, 'PrivateEndpointServiceConnectionStatus']]] = None):
"""
A collection of information about the state of the connection between service consumer and provider.
:param pulumi.Input[str] actions_required: A message indicating if changes on the service provider require any updates on the consumer.
:param pulumi.Input[str] description: The reason for approval/rejection of the connection.
:param pulumi.Input[Union[str, 'PrivateEndpointServiceConnectionStatus']] status: Indicates whether the connection has been Approved/Rejected/Removed by the owner of the service.
"""
if actions_required is not None:
pulumi.set(__self__, "actions_required", actions_required)
if description is not None:
pulumi.set(__self__, "description", description)
if status is not None:
pulumi.set(__self__, "status", status)
@property
@pulumi.getter(name="actionsRequired")
def actions_required(self) -> Optional[pulumi.Input[str]]:
"""
A message indicating if changes on the service provider require any updates on the consumer.
"""
return pulumi.get(self, "actions_required")
@actions_required.setter
def actions_required(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "actions_required", value)
@property
@pulumi.getter
def description(self) -> Optional[pulumi.Input[str]]:
"""
The reason for approval/rejection of the connection.
"""
return pulumi.get(self, "description")
@description.setter
def description(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "description", value)
@property
@pulumi.getter
def status(self) -> Optional[pulumi.Input[Union[str, 'PrivateEndpointServiceConnectionStatus']]]:
"""
Indicates whether the connection has been Approved/Rejected/Removed by the owner of the service.
"""
return pulumi.get(self, "status")
@status.setter
def status(self, value: Optional[pulumi.Input[Union[str, 'PrivateEndpointServiceConnectionStatus']]]):
pulumi.set(self, "status", value)
@pulumi.input_type
class RCranPackageArgs:
def __init__(__self__, *,
name: Optional[pulumi.Input[str]] = None,
repository: Optional[pulumi.Input[str]] = None):
"""
:param pulumi.Input[str] name: The package name.
:param pulumi.Input[str] repository: The repository name.
"""
if name is not None:
pulumi.set(__self__, "name", name)
if repository is not None:
pulumi.set(__self__, "repository", repository)
@property
@pulumi.getter
def name(self) -> Optional[pulumi.Input[str]]:
"""
The package name.
"""
return pulumi.get(self, "name")
@name.setter
def name(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "name", value)
@property
@pulumi.getter
def repository(self) -> Optional[pulumi.Input[str]]:
"""
The repository name.
"""
return pulumi.get(self, "repository")
@repository.setter
def repository(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "repository", value)
@pulumi.input_type
class RGitHubPackageArgs:
def __init__(__self__, *,
auth_token: Optional[pulumi.Input[str]] = None,
repository: Optional[pulumi.Input[str]] = None):
"""
:param pulumi.Input[str] auth_token: Personal access token to install from a private repo
:param pulumi.Input[str] repository: Repository address in the format username/repo[/subdir][@ref|#pull].
"""
if auth_token is not None:
pulumi.set(__self__, "auth_token", auth_token)
if repository is not None:
pulumi.set(__self__, "repository", repository)
@property
@pulumi.getter(name="authToken")
def auth_token(self) -> Optional[pulumi.Input[str]]:
"""
Personal access token to install from a private repo
"""
return pulumi.get(self, "auth_token")
@auth_token.setter
def auth_token(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "auth_token", value)
@property
@pulumi.getter
def repository(self) -> Optional[pulumi.Input[str]]:
"""
Repository address in the format username/repo[/subdir][@ref|#pull].
"""
return pulumi.get(self, "repository")
@repository.setter
def repository(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "repository", value)
@pulumi.input_type
class ResourceIdArgs:
def __init__(__self__, *,
id: pulumi.Input[str]):
"""
Represents a resource ID. For example, for a subnet, it is the resource URL for the subnet.
:param pulumi.Input[str] id: The ID of the resource
"""
pulumi.set(__self__, "id", id)
@property
@pulumi.getter
def id(self) -> pulumi.Input[str]:
"""
The ID of the resource
"""
return pulumi.get(self, "id")
@id.setter
def id(self, value: pulumi.Input[str]):
pulumi.set(self, "id", value)
@pulumi.input_type
class ScaleSettingsArgs:
def __init__(__self__, *,
max_node_count: pulumi.Input[int],
min_node_count: Optional[pulumi.Input[int]] = None,
node_idle_time_before_scale_down: Optional[pulumi.Input[str]] = None):
"""
scale settings for AML Compute
:param pulumi.Input[int] max_node_count: Max number of nodes to use
:param pulumi.Input[int] min_node_count: Min number of nodes to use
:param pulumi.Input[str] node_idle_time_before_scale_down: Node Idle Time before scaling down amlCompute. This string needs to be in the RFC Format.
"""
pulumi.set(__self__, "max_node_count", max_node_count)
if min_node_count is None:
min_node_count = 0
if min_node_count is not None:
pulumi.set(__self__, "min_node_count", min_node_count)
if node_idle_time_before_scale_down is not None:
pulumi.set(__self__, "node_idle_time_before_scale_down", node_idle_time_before_scale_down)
@property
@pulumi.getter(name="maxNodeCount")
def max_node_count(self) -> pulumi.Input[int]:
"""
Max number of nodes to use
"""
return pulumi.get(self, "max_node_count")
@max_node_count.setter
def max_node_count(self, value: pulumi.Input[int]):
pulumi.set(self, "max_node_count", value)
@property
@pulumi.getter(name="minNodeCount")
def min_node_count(self) -> Optional[pulumi.Input[int]]:
"""
Min number of nodes to use
"""
return pulumi.get(self, "min_node_count")
@min_node_count.setter
def min_node_count(self, value: Optional[pulumi.Input[int]]):
pulumi.set(self, "min_node_count", value)
@property
@pulumi.getter(name="nodeIdleTimeBeforeScaleDown")
def node_idle_time_before_scale_down(self) -> Optional[pulumi.Input[str]]:
"""
Node Idle Time before scaling down amlCompute. This string needs to be in the RFC Format.
"""
return pulumi.get(self, "node_idle_time_before_scale_down")
@node_idle_time_before_scale_down.setter
def node_idle_time_before_scale_down(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "node_idle_time_before_scale_down", value)
@pulumi.input_type
class ScriptReferenceArgs:
def __init__(__self__, *,
script_arguments: Optional[pulumi.Input[str]] = None,
script_data: Optional[pulumi.Input[str]] = None,
script_source: Optional[pulumi.Input[str]] = None,
timeout: Optional[pulumi.Input[str]] = None):
"""
Script reference
:param pulumi.Input[str] script_arguments: Optional command line arguments passed to the script to run.
:param pulumi.Input[str] script_data: The location of scripts in the mounted volume.
:param pulumi.Input[str] script_source: The storage source of the script: inline, workspace.
:param pulumi.Input[str] timeout: Optional time period passed to timeout command.
"""
if script_arguments is not None:
pulumi.set(__self__, "script_arguments", script_arguments)
if script_data is not None:
pulumi.set(__self__, "script_data", script_data)
if script_source is not None:
pulumi.set(__self__, "script_source", script_source)
if timeout is not None:
pulumi.set(__self__, "timeout", timeout)
@property
@pulumi.getter(name="scriptArguments")
def script_arguments(self) -> Optional[pulumi.Input[str]]:
"""
Optional command line arguments passed to the script to run.
"""
return pulumi.get(self, "script_arguments")
@script_arguments.setter
def script_arguments(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "script_arguments", value)
@property
@pulumi.getter(name="scriptData")
def script_data(self) -> Optional[pulumi.Input[str]]:
"""
The location of scripts in the mounted volume.
"""
return pulumi.get(self, "script_data")
@script_data.setter
def script_data(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "script_data", value)
@property
@pulumi.getter(name="scriptSource")
def script_source(self) -> Optional[pulumi.Input[str]]:
"""
The storage source of the script: inline, workspace.
"""
return pulumi.get(self, "script_source")
@script_source.setter
def script_source(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "script_source", value)
@property
@pulumi.getter
def timeout(self) -> Optional[pulumi.Input[str]]:
"""
Optional time period passed to timeout command.
"""
return pulumi.get(self, "timeout")
@timeout.setter
def timeout(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "timeout", value)
@pulumi.input_type
class ScriptsToExecuteArgs:
def __init__(__self__, *,
creation_script: Optional[pulumi.Input['ScriptReferenceArgs']] = None,
startup_script: Optional[pulumi.Input['ScriptReferenceArgs']] = None):
"""
Customized setup scripts
:param pulumi.Input['ScriptReferenceArgs'] creation_script: Script that's run only once during provision of the compute.
:param pulumi.Input['ScriptReferenceArgs'] startup_script: Script that's run every time the machine starts.
"""
if creation_script is not None:
pulumi.set(__self__, "creation_script", creation_script)
if startup_script is not None:
pulumi.set(__self__, "startup_script", startup_script)
@property
@pulumi.getter(name="creationScript")
def creation_script(self) -> Optional[pulumi.Input['ScriptReferenceArgs']]:
"""
Script that's run only once during provision of the compute.
"""
return pulumi.get(self, "creation_script")
@creation_script.setter
def creation_script(self, value: Optional[pulumi.Input['ScriptReferenceArgs']]):
pulumi.set(self, "creation_script", value)
@property
@pulumi.getter(name="startupScript")
def startup_script(self) -> Optional[pulumi.Input['ScriptReferenceArgs']]:
"""
Script that's run every time the machine starts.
"""
return pulumi.get(self, "startup_script")
@startup_script.setter
def startup_script(self, value: Optional[pulumi.Input['ScriptReferenceArgs']]):
pulumi.set(self, "startup_script", value)
@pulumi.input_type
class ServiceManagedResourcesSettingsArgs:
def __init__(__self__, *,
cosmos_db: Optional[pulumi.Input['CosmosDbSettingsArgs']] = None):
"""
:param pulumi.Input['CosmosDbSettingsArgs'] cosmos_db: The settings for the service managed cosmosdb account.
"""
if cosmos_db is not None:
pulumi.set(__self__, "cosmos_db", cosmos_db)
@property
@pulumi.getter(name="cosmosDb")
def cosmos_db(self) -> Optional[pulumi.Input['CosmosDbSettingsArgs']]:
"""
The settings for the service managed cosmosdb account.
"""
return pulumi.get(self, "cosmos_db")
@cosmos_db.setter
def cosmos_db(self, value: Optional[pulumi.Input['CosmosDbSettingsArgs']]):
pulumi.set(self, "cosmos_db", value)
@pulumi.input_type
class SetupScriptsArgs:
def __init__(__self__, *,
scripts: Optional[pulumi.Input['ScriptsToExecuteArgs']] = None):
"""
Details of customized scripts to execute for setting up the cluster.
:param pulumi.Input['ScriptsToExecuteArgs'] scripts: Customized setup scripts
"""
if scripts is not None:
pulumi.set(__self__, "scripts", scripts)
@property
@pulumi.getter
def scripts(self) -> Optional[pulumi.Input['ScriptsToExecuteArgs']]:
"""
Customized setup scripts
"""
return pulumi.get(self, "scripts")
@scripts.setter
def scripts(self, value: Optional[pulumi.Input['ScriptsToExecuteArgs']]):
pulumi.set(self, "scripts", value)
@pulumi.input_type
class SharedPrivateLinkResourceArgs:
def __init__(__self__, *,
group_id: Optional[pulumi.Input[str]] = None,
name: Optional[pulumi.Input[str]] = None,
private_link_resource_id: Optional[pulumi.Input[str]] = None,
request_message: Optional[pulumi.Input[str]] = None,
status: Optional[pulumi.Input[Union[str, 'PrivateEndpointServiceConnectionStatus']]] = None):
"""
:param pulumi.Input[str] group_id: The private link resource group id.
:param pulumi.Input[str] name: Unique name of the private link.
:param pulumi.Input[str] private_link_resource_id: The resource id that private link links to.
:param pulumi.Input[str] request_message: Request message.
:param pulumi.Input[Union[str, 'PrivateEndpointServiceConnectionStatus']] status: Indicates whether the connection has been Approved/Rejected/Removed by the owner of the service.
"""
if group_id is not None:
pulumi.set(__self__, "group_id", group_id)
if name is not None:
pulumi.set(__self__, "name", name)
if private_link_resource_id is not None:
pulumi.set(__self__, "private_link_resource_id", private_link_resource_id)
if request_message is not None:
pulumi.set(__self__, "request_message", request_message)
if status is not None:
pulumi.set(__self__, "status", status)
@property
@pulumi.getter(name="groupId")
def group_id(self) -> Optional[pulumi.Input[str]]:
"""
The private link resource group id.
"""
return pulumi.get(self, "group_id")
@group_id.setter
def group_id(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "group_id", value)
@property
@pulumi.getter
def name(self) -> Optional[pulumi.Input[str]]:
"""
Unique name of the private link.
"""
return pulumi.get(self, "name")
@name.setter
def name(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "name", value)
@property
@pulumi.getter(name="privateLinkResourceId")
def private_link_resource_id(self) -> Optional[pulumi.Input[str]]:
"""
The resource id that private link links to.
"""
return pulumi.get(self, "private_link_resource_id")
@private_link_resource_id.setter
def private_link_resource_id(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "private_link_resource_id", value)
@property
@pulumi.getter(name="requestMessage")
def request_message(self) -> Optional[pulumi.Input[str]]:
"""
Request message.
"""
return pulumi.get(self, "request_message")
@request_message.setter
def request_message(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "request_message", value)
@property
@pulumi.getter
def status(self) -> Optional[pulumi.Input[Union[str, 'PrivateEndpointServiceConnectionStatus']]]:
"""
Indicates whether the connection has been Approved/Rejected/Removed by the owner of the service.
"""
return pulumi.get(self, "status")
@status.setter
def status(self, value: Optional[pulumi.Input[Union[str, 'PrivateEndpointServiceConnectionStatus']]]):
pulumi.set(self, "status", value)
@pulumi.input_type
class SkuArgs:
def __init__(__self__, *,
name: Optional[pulumi.Input[str]] = None,
tier: Optional[pulumi.Input[str]] = None):
"""
Sku of the resource
:param pulumi.Input[str] name: Name of the sku
:param pulumi.Input[str] tier: Tier of the sku like Basic or Enterprise
"""
if name is not None:
pulumi.set(__self__, "name", name)
if tier is not None:
pulumi.set(__self__, "tier", tier)
@property
@pulumi.getter
def name(self) -> Optional[pulumi.Input[str]]:
"""
Name of the sku
"""
return pulumi.get(self, "name")
@name.setter
def name(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "name", value)
@property
@pulumi.getter
def tier(self) -> Optional[pulumi.Input[str]]:
"""
Tier of the sku like Basic or Enterprise
"""
return pulumi.get(self, "tier")
@tier.setter
def tier(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "tier", value)
@pulumi.input_type
class SparkMavenPackageArgs:
def __init__(__self__, *,
artifact: Optional[pulumi.Input[str]] = None,
group: Optional[pulumi.Input[str]] = None,
version: Optional[pulumi.Input[str]] = None):
if artifact is not None:
pulumi.set(__self__, "artifact", artifact)
if group is not None:
pulumi.set(__self__, "group", group)
if version is not None:
pulumi.set(__self__, "version", version)
@property
@pulumi.getter
def artifact(self) -> Optional[pulumi.Input[str]]:
return pulumi.get(self, "artifact")
@artifact.setter
def artifact(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "artifact", value)
@property
@pulumi.getter
def group(self) -> Optional[pulumi.Input[str]]:
return pulumi.get(self, "group")
@group.setter
def group(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "group", value)
@property
@pulumi.getter
def version(self) -> Optional[pulumi.Input[str]]:
return pulumi.get(self, "version")
@version.setter
def version(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "version", value)
@pulumi.input_type
class SslConfigurationArgs:
def __init__(__self__, *,
cert: Optional[pulumi.Input[str]] = None,
cname: Optional[pulumi.Input[str]] = None,
key: Optional[pulumi.Input[str]] = None,
status: Optional[pulumi.Input[str]] = None):
"""
The ssl configuration for scoring
:param pulumi.Input[str] cert: Cert data
:param pulumi.Input[str] cname: CNAME of the cert
:param pulumi.Input[str] key: Key data
:param pulumi.Input[str] status: Enable or disable ssl for scoring
"""
if cert is not None:
pulumi.set(__self__, "cert", cert)
if cname is not None:
pulumi.set(__self__, "cname", cname)
if key is not None:
pulumi.set(__self__, "key", key)
if status is not None:
pulumi.set(__self__, "status", status)
@property
@pulumi.getter
def cert(self) -> Optional[pulumi.Input[str]]:
"""
Cert data
"""
return pulumi.get(self, "cert")
@cert.setter
def cert(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "cert", value)
@property
@pulumi.getter
def cname(self) -> Optional[pulumi.Input[str]]:
"""
CNAME of the cert
"""
return pulumi.get(self, "cname")
@cname.setter
def cname(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "cname", value)
@property
@pulumi.getter
def key(self) -> Optional[pulumi.Input[str]]:
"""
Key data
"""
return pulumi.get(self, "key")
@key.setter
def key(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "key", value)
@property
@pulumi.getter
def status(self) -> Optional[pulumi.Input[str]]:
"""
Enable or disable ssl for scoring
"""
return pulumi.get(self, "status")
@status.setter
def status(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "status", value)
@pulumi.input_type
class UserAccountCredentialsArgs:
def __init__(__self__, *,
admin_user_name: pulumi.Input[str],
admin_user_password: Optional[pulumi.Input[str]] = None,
admin_user_ssh_public_key: Optional[pulumi.Input[str]] = None):
"""
Settings for user account that gets created on each on the nodes of a compute.
:param pulumi.Input[str] admin_user_name: Name of the administrator user account which can be used to SSH to nodes.
:param pulumi.Input[str] admin_user_password: Password of the administrator user account.
:param pulumi.Input[str] admin_user_ssh_public_key: SSH public key of the administrator user account.
"""
pulumi.set(__self__, "admin_user_name", admin_user_name)
if admin_user_password is not None:
pulumi.set(__self__, "admin_user_password", admin_user_password)
if admin_user_ssh_public_key is not None:
pulumi.set(__self__, "admin_user_ssh_public_key", admin_user_ssh_public_key)
@property
@pulumi.getter(name="adminUserName")
def admin_user_name(self) -> pulumi.Input[str]:
"""
Name of the administrator user account which can be used to SSH to nodes.
"""
return pulumi.get(self, "admin_user_name")
@admin_user_name.setter
def admin_user_name(self, value: pulumi.Input[str]):
pulumi.set(self, "admin_user_name", value)
@property
@pulumi.getter(name="adminUserPassword")
def admin_user_password(self) -> Optional[pulumi.Input[str]]:
"""
Password of the administrator user account.
"""
return pulumi.get(self, "admin_user_password")
@admin_user_password.setter
def admin_user_password(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "admin_user_password", value)
@property
@pulumi.getter(name="adminUserSshPublicKey")
def admin_user_ssh_public_key(self) -> Optional[pulumi.Input[str]]:
"""
SSH public key of the administrator user account.
"""
return pulumi.get(self, "admin_user_ssh_public_key")
@admin_user_ssh_public_key.setter
def admin_user_ssh_public_key(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "admin_user_ssh_public_key", value)
@pulumi.input_type
class VirtualMachineArgs:
def __init__(__self__, *,
compute_type: pulumi.Input[str],
compute_location: Optional[pulumi.Input[str]] = None,
description: Optional[pulumi.Input[str]] = None,
properties: Optional[pulumi.Input['VirtualMachinePropertiesArgs']] = None,
resource_id: Optional[pulumi.Input[str]] = None):
"""
A Machine Learning compute based on Azure Virtual Machines.
:param pulumi.Input[str] compute_type: The type of compute
Expected value is 'VirtualMachine'.
:param pulumi.Input[str] compute_location: Location for the underlying compute
:param pulumi.Input[str] description: The description of the Machine Learning compute.
:param pulumi.Input[str] resource_id: ARM resource id of the underlying compute
"""
pulumi.set(__self__, "compute_type", 'VirtualMachine')
if compute_location is not None:
pulumi.set(__self__, "compute_location", compute_location)
if description is not None:
pulumi.set(__self__, "description", description)
if properties is not None:
pulumi.set(__self__, "properties", properties)
if resource_id is not None:
pulumi.set(__self__, "resource_id", resource_id)
@property
@pulumi.getter(name="computeType")
def compute_type(self) -> pulumi.Input[str]:
"""
The type of compute
Expected value is 'VirtualMachine'.
"""
return pulumi.get(self, "compute_type")
@compute_type.setter
def compute_type(self, value: pulumi.Input[str]):
pulumi.set(self, "compute_type", value)
@property
@pulumi.getter(name="computeLocation")
def compute_location(self) -> Optional[pulumi.Input[str]]:
"""
Location for the underlying compute
"""
return pulumi.get(self, "compute_location")
@compute_location.setter
def compute_location(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "compute_location", value)
@property
@pulumi.getter
def description(self) -> Optional[pulumi.Input[str]]:
"""
The description of the Machine Learning compute.
"""
return pulumi.get(self, "description")
@description.setter
def description(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "description", value)
@property
@pulumi.getter
def properties(self) -> Optional[pulumi.Input['VirtualMachinePropertiesArgs']]:
return pulumi.get(self, "properties")
@properties.setter
def properties(self, value: Optional[pulumi.Input['VirtualMachinePropertiesArgs']]):
pulumi.set(self, "properties", value)
@property
@pulumi.getter(name="resourceId")
def resource_id(self) -> Optional[pulumi.Input[str]]:
"""
ARM resource id of the underlying compute
"""
return pulumi.get(self, "resource_id")
@resource_id.setter
def resource_id(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "resource_id", value)
@pulumi.input_type
class VirtualMachineImageArgs:
def __init__(__self__, *,
id: pulumi.Input[str]):
"""
Virtual Machine image for Windows AML Compute
:param pulumi.Input[str] id: Virtual Machine image path
"""
pulumi.set(__self__, "id", id)
@property
@pulumi.getter
def id(self) -> pulumi.Input[str]:
"""
Virtual Machine image path
"""
return pulumi.get(self, "id")
@id.setter
def id(self, value: pulumi.Input[str]):
pulumi.set(self, "id", value)
@pulumi.input_type
class VirtualMachinePropertiesArgs:
def __init__(__self__, *,
address: Optional[pulumi.Input[str]] = None,
administrator_account: Optional[pulumi.Input['VirtualMachineSshCredentialsArgs']] = None,
ssh_port: Optional[pulumi.Input[int]] = None,
virtual_machine_size: Optional[pulumi.Input[str]] = None):
"""
:param pulumi.Input[str] address: Public IP address of the virtual machine.
:param pulumi.Input['VirtualMachineSshCredentialsArgs'] administrator_account: Admin credentials for virtual machine
:param pulumi.Input[int] ssh_port: Port open for ssh connections.
:param pulumi.Input[str] virtual_machine_size: Virtual Machine size
"""
if address is not None:
pulumi.set(__self__, "address", address)
if administrator_account is not None:
pulumi.set(__self__, "administrator_account", administrator_account)
if ssh_port is not None:
pulumi.set(__self__, "ssh_port", ssh_port)
if virtual_machine_size is not None:
pulumi.set(__self__, "virtual_machine_size", virtual_machine_size)
@property
@pulumi.getter
def address(self) -> Optional[pulumi.Input[str]]:
"""
Public IP address of the virtual machine.
"""
return pulumi.get(self, "address")
@address.setter
def address(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "address", value)
@property
@pulumi.getter(name="administratorAccount")
def administrator_account(self) -> Optional[pulumi.Input['VirtualMachineSshCredentialsArgs']]:
"""
Admin credentials for virtual machine
"""
return pulumi.get(self, "administrator_account")
@administrator_account.setter
def administrator_account(self, value: Optional[pulumi.Input['VirtualMachineSshCredentialsArgs']]):
pulumi.set(self, "administrator_account", value)
@property
@pulumi.getter(name="sshPort")
def ssh_port(self) -> Optional[pulumi.Input[int]]:
"""
Port open for ssh connections.
"""
return pulumi.get(self, "ssh_port")
@ssh_port.setter
def ssh_port(self, value: Optional[pulumi.Input[int]]):
pulumi.set(self, "ssh_port", value)
@property
@pulumi.getter(name="virtualMachineSize")
def virtual_machine_size(self) -> Optional[pulumi.Input[str]]:
"""
Virtual Machine size
"""
return pulumi.get(self, "virtual_machine_size")
@virtual_machine_size.setter
def virtual_machine_size(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "virtual_machine_size", value)
@pulumi.input_type
class VirtualMachineSshCredentialsArgs:
def __init__(__self__, *,
password: Optional[pulumi.Input[str]] = None,
private_key_data: Optional[pulumi.Input[str]] = None,
public_key_data: Optional[pulumi.Input[str]] = None,
username: Optional[pulumi.Input[str]] = None):
"""
Admin credentials for virtual machine
:param pulumi.Input[str] password: Password of admin account
:param pulumi.Input[str] private_key_data: Private key data
:param pulumi.Input[str] public_key_data: Public key data
:param pulumi.Input[str] username: Username of admin account
"""
if password is not None:
pulumi.set(__self__, "password", password)
if private_key_data is not None:
pulumi.set(__self__, "private_key_data", private_key_data)
if public_key_data is not None:
pulumi.set(__self__, "public_key_data", public_key_data)
if username is not None:
pulumi.set(__self__, "username", username)
@property
@pulumi.getter
def password(self) -> Optional[pulumi.Input[str]]:
"""
Password of admin account
"""
return pulumi.get(self, "password")
@password.setter
def password(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "password", value)
@property
@pulumi.getter(name="privateKeyData")
def private_key_data(self) -> Optional[pulumi.Input[str]]:
"""
Private key data
"""
return pulumi.get(self, "private_key_data")
@private_key_data.setter
def private_key_data(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "private_key_data", value)
@property
@pulumi.getter(name="publicKeyData")
def public_key_data(self) -> Optional[pulumi.Input[str]]:
"""
Public key data
"""
return pulumi.get(self, "public_key_data")
@public_key_data.setter
def public_key_data(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "public_key_data", value)
@property
@pulumi.getter
def username(self) -> Optional[pulumi.Input[str]]:
"""
Username of admin account
"""
return pulumi.get(self, "username")
@username.setter
def username(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "username", value)
|
import pandas as pd
import geopandas
import json
import altair as alt
def make_metrics_df():
GEOJSON = 'geojson/wi_map_plan_{}.geojson'
mm_gaps = []
sl_indices = []
efficiency_gaps = []
plan_number = [i for i in range(1,84)]
for i in range(1,84):
plan = geopandas.read_file(GEOJSON.format(i))
mm_gaps.append(plan['mm_gap'].iloc[0])
sl_indices.append(plan['SL_index'].iloc[0])
efficiency_gaps.append(plan['efficiency_gap'].iloc[0])
metrics_dict = {'plan_number':plan_number,'mm_gap':mm_gaps,'sl_index':sl_indices,'efficiency_gap':efficiency_gaps}
metrics_df = pd.DataFrame(metrics_dict, columns = ['plan_number','mm_gap','sl_index','efficiency_gap'])
return metrics_df
def make_metrics_plot(metric_df, variable, variable_title, plot_title, scale):
plot = alt.Chart(metric_df).mark_line(interpolate = 'basis').encode(
alt.X('plan_number', title = "Plan Number"),
alt.Y(variable, title = variable_title, scale = alt.Scale(domain = scale))).properties(
title = plot_title,
width = 300,
height = 300
)
return plot
|
# The following comment should be removed at some point in the future.
# mypy: disallow-untyped-defs=False
from __future__ import absolute_import
import hashlib
import logging
import sys
from pip._internal.cli.base_command import Command
from pip._internal.cli.status_codes import ERROR
from pip._internal.utils.hashes import FAVORITE_HASH, STRONG_HASHES
from pip._internal.utils.misc import read_chunks, write_output
logger = logging.getLogger(__name__)
class HashCommand(Command):
"""
Compute a hash of a local package archive.
These can be used with --hash in a requirements file to do repeatable
installs.
"""
usage = '%prog [options] <file> ...'
ignore_require_venv = True
def __init__(self, *args, **kw):
super(HashCommand, self).__init__(*args, **kw)
self.cmd_opts.add_option(
'-a', '--algorithm',
dest='algorithm',
choices=STRONG_HASHES,
action='store',
default=FAVORITE_HASH,
help='The hash algorithm to use: one of %s' %
', '.join(STRONG_HASHES))
self.parser.insert_option_group(0, self.cmd_opts)
def run(self, options, args):
if not args:
self.parser.print_usage(sys.stderr)
return ERROR
algorithm = options.algorithm
for path in args:
write_output('%s:\n--hash=%s:%s',
path, algorithm, _hash_of_file(path, algorithm))
def _hash_of_file(path, algorithm):
"""Return the hash digest of a file."""
with open(path, 'rb') as archive:
hash = hashlib.new(algorithm)
for chunk in read_chunks(archive):
hash.update(chunk)
return hash.hexdigest()
|
# -*- coding: utf-8 -*-
# Generated by Django 1.11 on 2018-10-08 15:32
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('gram', '0011_auto_20181008_1505'),
]
operations = [
migrations.AlterField(
model_name='profile',
name='profilepic',
field=models.ImageField(blank=True, upload_to='picture/'),
),
]
|
import sys
import os
import json
import azure.functions as func
import google.protobuf as proto
import grpc
# Load dependency manager from customer' context
from azure_functions_worker.utils.dependency import DependencyManager as dm
def main(req: func.HttpRequest) -> func.HttpResponse:
"""This function is an HttpTrigger to check if the modules are loaded from
customer's dependencies. We have mock a .python_packages/ folder in
this e2e test function app which contains the following stub package:
azure.functions==1.2.1
protobuf==3.9.0
grpc==1.35.0
If the version we check is the same as the one in local .python_packages/,
that means the isolate worker dependencies are working as expected.
"""
result = {
"sys.path": list(sys.path),
"dependency_manager": {
"cx_deps_path": dm._get_cx_deps_path(),
"cx_working_dir": dm._get_cx_working_dir(),
"worker_deps_path": dm._get_worker_deps_path(),
},
"libraries": {
"func.expected.version": "1.2.1",
"func.version": func.__version__,
"func.file": func.__file__,
"proto.expected.version": "3.9.0",
"proto.version": proto.__version__,
"proto.file": proto.__file__,
"grpc.expected.version": "1.35.0",
"grpc.version": grpc.__version__,
"grpc.file": grpc.__file__,
},
"environments": {
"PYTHON_ISOLATE_WORKER_DEPENDENCIES": (
os.getenv('PYTHON_ISOLATE_WORKER_DEPENDENCIES')
),
"AzureWebJobsScriptRoot": os.getenv('AzureWebJobsScriptRoot'),
"PYTHONPATH": os.getenv('PYTHONPATH'),
"HOST_VERSION": os.getenv('HOST_VERSION')
}
}
return func.HttpResponse(json.dumps(result))
|
# coding: utf-8
"""
"""
import logging
from .utils import find_migrations, should_skip_by_index, update_migration_index
def run(db):
logger = logging.getLogger('sampledb.migrations')
for index, name, function in find_migrations():
logger.info('Migration #{} "{}":'.format(index, name))
# Skip migration by migration index
if should_skip_by_index(db, index):
logger.info("Skipped (index).")
continue
try:
# Perform migration
if function(db):
logger.info("Done.")
else:
logger.info("Skipped (condition).")
# Update migration index to skip this migration by index in the future
update_migration_index(db, index)
db.session.commit()
except Exception:
db.session.rollback()
raise
|
import sys, fileinput, json
import numpy as np
fir_p = {} # 某字符出现在句首的概率对数 {str: float}
dou_count = {} # 字符的二元出现次数 {(str, str): int}
tri_count = {} # 字符的三元出现次数 {str: {str: {str: int}}}
sin_count = {} # 字符出现计数 {str: int}
pch = {} # 拼音到字符的dict {pinyin: [chs]}
sin_total = 396468407
def preload3():
def add3(dict, ch1, ch2, ch3):
if ch1 in dict:
d2 = dict[ch1]
if ch2 in d2:
d3 = d2[ch2]
if ch3 in d3:
d3[ch3] += 1
else:
d3[ch3] = 1
else:
d2[ch2] = {ch3: 1}
else:
dict[ch1] = {ch2: {ch3: 1}}
count = 0
for line in fileinput.input(['../data/sentences.txt']):
if count % 100000 == 0:
print('line:', count)
if count > 31000000: break
count += 1
for i in range(len(line) - 3):
add3(tri_count, line[i], line[i+1], line[i+2])
with open('../data/tri_count.json', 'w') as f:
json.dump(tri_count, f)
def load3():
global pch
global fir_p
global sin_count
global dou_count
global tri_count
with open('../data/pch.txt') as f:
pch = eval(f.read())
with open('../data/fir_p.txt') as f:
fir_p = eval(f.read())
with open('../data/sin_count.txt') as f:
sin_count = eval(f.read())
with open('../data/dou_count.json') as f:
dou_count = json.load(fp=f)
with open('../data/tri_count.json') as f:
tri_count = json.load(fp=f)
class node():
def __init__(self, ch, pr, prev):
self.ch = ch
self.pr = pr
self.prev = prev
def getpr(ch1, ch2, lam):
dd = {}
douc = dou_count.get(ch1, dd).get(ch2, 0)
sinc1 = sin_count.get(ch1, 0)
if sinc1 > 0:
sinc2 = sin_count.get(ch2, 0)
res = np.log(lam * douc / sinc1 + (1 - lam) * sinc2 / sin_total)
else:
res = -50
return res
def getpr3(ch1, ch2, ch3, lam):
lam2 = 0.99
dd = {}
tric = tri_count.get(ch1, dd).get(ch2, dd).get(ch3, 0)
douc = dou_count.get(ch1, dd).get(ch2, 0)
if douc > 0:
sinc3 = sin_count.get(ch3, 0)
res = np.log(lam2 * tric / douc + (1 - lam2) * sinc3 / sin_total)
else:
res = -20
res += getpr(ch2, ch3, lam)
return res
def run3(pylist, lam=0.99):
for py in pylist:
if py not in pch:
return ['Wrong pinyin format.']
nodes = []
# first layer
nodes.append([node(x, fir_p.get(x, -20.0), None) for x in pch[pylist[0]]])
# second layer
if len(pylist) > 1:
nodes.append([node(x, 0, None) for x in pch[pylist[1]]])
for nd in nodes[1]:
nd.pr = nodes[0][0].pr + getpr(nodes[1][0].ch, nd.ch, lam)
nd.prev = nodes[0][0]
for prend in nodes[0]:
pr = getpr(prend.ch, nd.ch, lam)
if prend.pr + pr > nd.pr:
nd.pr = prend.pr + pr
nd.prev = prend
# middle layers
for i in range(len(pylist)):
if i < 2:
continue
nodes.append([node(x, 0, None) for x in pch[pylist[i]]])
for nd in nodes[i]:
nd.pr = nodes[i - 1][0].pr + getpr3(nodes[i - 1][0].prev.ch, nodes[i - 1][0].ch, nd.ch, lam)
nd.prev = nodes[i - 1][0]
for prend in nodes[i - 1]:
pr3 = getpr3(prend.prev.ch, prend.ch, nd.ch, lam)
if prend.pr + pr3 > nd.pr:
nd.pr = prend.pr + pr3
nd.prev = prend
# back propagation
nd = max(nodes[-1], key=lambda x: x.pr)
chs = []
while nd is not None:
chs.append(nd.ch)
nd = nd.prev
return list(reversed(chs))
def pinyin2hanzi3(str):
return ''.join(run3(str.lower().split()))
#自己测试用
def test3(input, output='../data/output.txt'):
chcount = 0
chcorrect = 0
sencount = 0
sencorrect = 0
with open(input) as f:
lines = [line for line in f]
pys = ''
chs = ''
mychs = ''
f = open(output, 'w')
for i in range(len(lines)):
if i % 2 == 0:
pys = lines[i]
else:
chs = lines[i]
mychs = pinyin2hanzi3(pys)
f.write(pys+mychs+'\n')
if chs[: len(mychs)] == mychs:
sencorrect += 1
sencount += 1
for j in range(len(mychs)):
if chs[j] == mychs[j]:
chcorrect += 1
chcount += 1
print('Sentences:{}, Correct sentences:{}, Correct rate:{}%'
.format(sencount, sencorrect, round(100.0 * sencorrect / sencount, 2)))
print('Characters:{},Correct characters:{}, Correct rate:{}%'
.format(chcount, chcorrect, round(100.0 * chcorrect / chcount, 2)))
f.close()
# 课程测试用
def test3_class(input, output='../data/output.txt'):
with open(input) as f:
lines = [line for line in f]
f = open(output, 'w')
for i in range(len(lines)):
pys = lines[i]
mychs = pinyin2hanzi3(pys)
f.write(mychs+'\n')
f.close()
if __name__ == '__main__':
# preload3()
print('Pinyin(3-gram) is loading data...٩(๑>◡<๑)۶')
load3()
print('Begin testヾ(=・ω・=)o')
if len(sys.argv) == 3:
test3_class(sys.argv[1], sys.argv[2])
else:
print('Wrong form.')
|
#!/usr/bin/env python3
# Copyright (c) 2014-2019 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""Test the rawtransaction RPCs.
Test the following RPCs:
- createrawtransaction
- signrawtransactionwithwallet
- sendrawtransaction
- decoderawtransaction
- getrawtransaction
"""
from collections import OrderedDict
from decimal import Decimal
from io import BytesIO
from test_framework.messages import CTransaction, ToHex
from test_framework.test_framework import BazCoinTestFramework
from test_framework.util import (
assert_equal,
assert_raises_rpc_error,
find_vout_for_address,
hex_str_to_bytes,
)
class multidict(dict):
"""Dictionary that allows duplicate keys.
Constructed with a list of (key, value) tuples. When dumped by the json module,
will output invalid json with repeated keys, eg:
>>> json.dumps(multidict([(1,2),(1,2)])
'{"1": 2, "1": 2}'
Used to test calls to rpc methods with repeated keys in the json object."""
def __init__(self, x):
dict.__init__(self, x)
self.x = x
def items(self):
return self.x
# Create one-input, one-output, no-fee transaction:
class RawTransactionsTest(BazCoinTestFramework):
def set_test_params(self):
self.setup_clean_chain = True
self.num_nodes = 3
self.extra_args = [
["-txindex"],
["-txindex"],
["-txindex"],
]
self.supports_cli = False
def skip_test_if_missing_module(self):
self.skip_if_no_wallet()
def setup_network(self):
super().setup_network()
self.connect_nodes(0, 2)
def run_test(self):
self.log.info('prepare some coins for multiple *rawtransaction commands')
self.nodes[2].generate(1)
self.sync_all()
self.nodes[0].generate(101)
self.sync_all()
self.nodes[0].sendtoaddress(self.nodes[2].getnewaddress(),1.5)
self.nodes[0].sendtoaddress(self.nodes[2].getnewaddress(),1.0)
self.nodes[0].sendtoaddress(self.nodes[2].getnewaddress(),5.0)
self.sync_all()
self.nodes[0].generate(5)
self.sync_all()
self.log.info('Test getrawtransaction on genesis block coinbase returns an error')
block = self.nodes[0].getblock(self.nodes[0].getblockhash(0))
assert_raises_rpc_error(-5, "The genesis block coinbase is not considered an ordinary transaction", self.nodes[0].getrawtransaction, block['merkleroot'])
self.log.info('Check parameter types and required parameters of createrawtransaction')
# Test `createrawtransaction` required parameters
assert_raises_rpc_error(-1, "createrawtransaction", self.nodes[0].createrawtransaction)
assert_raises_rpc_error(-1, "createrawtransaction", self.nodes[0].createrawtransaction, [])
# Test `createrawtransaction` invalid extra parameters
assert_raises_rpc_error(-1, "createrawtransaction", self.nodes[0].createrawtransaction, [], {}, 0, False, 'foo')
# Test `createrawtransaction` invalid `inputs`
txid = '1d1d4e24ed99057e84c3f80fd8fbec79ed9e1acee37da269356ecea000000000'
assert_raises_rpc_error(-3, "Expected type array", self.nodes[0].createrawtransaction, 'foo', {})
assert_raises_rpc_error(-1, "JSON value is not an object as expected", self.nodes[0].createrawtransaction, ['foo'], {})
assert_raises_rpc_error(-1, "JSON value is not a string as expected", self.nodes[0].createrawtransaction, [{}], {})
assert_raises_rpc_error(-8, "txid must be of length 64 (not 3, for 'foo')", self.nodes[0].createrawtransaction, [{'txid': 'foo'}], {})
assert_raises_rpc_error(-8, "txid must be hexadecimal string (not 'ZZZ7bb8b1697ea987f3b223ba7819250cae33efacb068d23dc24859824a77844')", self.nodes[0].createrawtransaction, [{'txid': 'ZZZ7bb8b1697ea987f3b223ba7819250cae33efacb068d23dc24859824a77844'}], {})
assert_raises_rpc_error(-8, "Invalid parameter, missing vout key", self.nodes[0].createrawtransaction, [{'txid': txid}], {})
assert_raises_rpc_error(-8, "Invalid parameter, missing vout key", self.nodes[0].createrawtransaction, [{'txid': txid, 'vout': 'foo'}], {})
assert_raises_rpc_error(-8, "Invalid parameter, vout cannot be negative", self.nodes[0].createrawtransaction, [{'txid': txid, 'vout': -1}], {})
assert_raises_rpc_error(-8, "Invalid parameter, sequence number is out of range", self.nodes[0].createrawtransaction, [{'txid': txid, 'vout': 0, 'sequence': -1}], {})
# Test `createrawtransaction` invalid `outputs`
address = self.nodes[0].getnewaddress()
address2 = self.nodes[0].getnewaddress()
assert_raises_rpc_error(-1, "JSON value is not an array as expected", self.nodes[0].createrawtransaction, [], 'foo')
self.nodes[0].createrawtransaction(inputs=[], outputs={}) # Should not throw for backwards compatibility
self.nodes[0].createrawtransaction(inputs=[], outputs=[])
assert_raises_rpc_error(-8, "Data must be hexadecimal string", self.nodes[0].createrawtransaction, [], {'data': 'foo'})
assert_raises_rpc_error(-5, "Invalid BazCoin address", self.nodes[0].createrawtransaction, [], {'foo': 0})
assert_raises_rpc_error(-3, "Invalid amount", self.nodes[0].createrawtransaction, [], {address: 'foo'})
assert_raises_rpc_error(-3, "Amount out of range", self.nodes[0].createrawtransaction, [], {address: -1})
assert_raises_rpc_error(-8, "Invalid parameter, duplicated address: %s" % address, self.nodes[0].createrawtransaction, [], multidict([(address, 1), (address, 1)]))
assert_raises_rpc_error(-8, "Invalid parameter, duplicated address: %s" % address, self.nodes[0].createrawtransaction, [], [{address: 1}, {address: 1}])
assert_raises_rpc_error(-8, "Invalid parameter, duplicate key: data", self.nodes[0].createrawtransaction, [], [{"data": 'aa'}, {"data": "bb"}])
assert_raises_rpc_error(-8, "Invalid parameter, duplicate key: data", self.nodes[0].createrawtransaction, [], multidict([("data", 'aa'), ("data", "bb")]))
assert_raises_rpc_error(-8, "Invalid parameter, key-value pair must contain exactly one key", self.nodes[0].createrawtransaction, [], [{'a': 1, 'b': 2}])
assert_raises_rpc_error(-8, "Invalid parameter, key-value pair not an object as expected", self.nodes[0].createrawtransaction, [], [['key-value pair1'], ['2']])
# Test `createrawtransaction` invalid `locktime`
assert_raises_rpc_error(-3, "Expected type number", self.nodes[0].createrawtransaction, [], {}, 'foo')
assert_raises_rpc_error(-8, "Invalid parameter, locktime out of range", self.nodes[0].createrawtransaction, [], {}, -1)
assert_raises_rpc_error(-8, "Invalid parameter, locktime out of range", self.nodes[0].createrawtransaction, [], {}, 4294967296)
# Test `createrawtransaction` invalid `replaceable`
assert_raises_rpc_error(-3, "Expected type bool", self.nodes[0].createrawtransaction, [], {}, 0, 'foo')
self.log.info('Check that createrawtransaction accepts an array and object as outputs')
tx = CTransaction()
# One output
tx.deserialize(BytesIO(hex_str_to_bytes(self.nodes[2].createrawtransaction(inputs=[{'txid': txid, 'vout': 9}], outputs={address: 99}))))
assert_equal(len(tx.vout), 1)
assert_equal(
tx.serialize().hex(),
self.nodes[2].createrawtransaction(inputs=[{'txid': txid, 'vout': 9}], outputs=[{address: 99}]),
)
# Two outputs
tx.deserialize(BytesIO(hex_str_to_bytes(self.nodes[2].createrawtransaction(inputs=[{'txid': txid, 'vout': 9}], outputs=OrderedDict([(address, 99), (address2, 99)])))))
assert_equal(len(tx.vout), 2)
assert_equal(
tx.serialize().hex(),
self.nodes[2].createrawtransaction(inputs=[{'txid': txid, 'vout': 9}], outputs=[{address: 99}, {address2: 99}]),
)
# Multiple mixed outputs
tx.deserialize(BytesIO(hex_str_to_bytes(self.nodes[2].createrawtransaction(inputs=[{'txid': txid, 'vout': 9}], outputs=multidict([(address, 99), (address2, 99), ('data', '99')])))))
assert_equal(len(tx.vout), 3)
assert_equal(
tx.serialize().hex(),
self.nodes[2].createrawtransaction(inputs=[{'txid': txid, 'vout': 9}], outputs=[{address: 99}, {address2: 99}, {'data': '99'}]),
)
for type in ["bech32", "p2sh-segwit", "legacy"]:
addr = self.nodes[0].getnewaddress("", type)
addrinfo = self.nodes[0].getaddressinfo(addr)
pubkey = addrinfo["scriptPubKey"]
self.log.info('sendrawtransaction with missing prevtx info (%s)' %(type))
# Test `signrawtransactionwithwallet` invalid `prevtxs`
inputs = [ {'txid' : txid, 'vout' : 3, 'sequence' : 1000}]
outputs = { self.nodes[0].getnewaddress() : 1 }
rawtx = self.nodes[0].createrawtransaction(inputs, outputs)
prevtx = dict(txid=txid, scriptPubKey=pubkey, vout=3, amount=1)
succ = self.nodes[0].signrawtransactionwithwallet(rawtx, [prevtx])
assert succ["complete"]
if type == "legacy":
del prevtx["amount"]
succ = self.nodes[0].signrawtransactionwithwallet(rawtx, [prevtx])
assert succ["complete"]
if type != "legacy":
assert_raises_rpc_error(-3, "Missing amount", self.nodes[0].signrawtransactionwithwallet, rawtx, [
{
"txid": txid,
"scriptPubKey": pubkey,
"vout": 3,
}
])
assert_raises_rpc_error(-3, "Missing vout", self.nodes[0].signrawtransactionwithwallet, rawtx, [
{
"txid": txid,
"scriptPubKey": pubkey,
"amount": 1,
}
])
assert_raises_rpc_error(-3, "Missing txid", self.nodes[0].signrawtransactionwithwallet, rawtx, [
{
"scriptPubKey": pubkey,
"vout": 3,
"amount": 1,
}
])
assert_raises_rpc_error(-3, "Missing scriptPubKey", self.nodes[0].signrawtransactionwithwallet, rawtx, [
{
"txid": txid,
"vout": 3,
"amount": 1
}
])
#########################################
# sendrawtransaction with missing input #
#########################################
self.log.info('sendrawtransaction with missing input')
inputs = [ {'txid' : "1d1d4e24ed99057e84c3f80fd8fbec79ed9e1acee37da269356ecea000000000", 'vout' : 1}] #won't exists
outputs = { self.nodes[0].getnewaddress() : 4.998 }
rawtx = self.nodes[2].createrawtransaction(inputs, outputs)
rawtx = self.nodes[2].signrawtransactionwithwallet(rawtx)
# This will raise an exception since there are missing inputs
assert_raises_rpc_error(-25, "bad-txns-inputs-missingorspent", self.nodes[2].sendrawtransaction, rawtx['hex'])
#####################################
# getrawtransaction with block hash #
#####################################
# make a tx by sending then generate 2 blocks; block1 has the tx in it
tx = self.nodes[2].sendtoaddress(self.nodes[1].getnewaddress(), 1)
block1, block2 = self.nodes[2].generate(2)
self.sync_all()
# We should be able to get the raw transaction by providing the correct block
gottx = self.nodes[0].getrawtransaction(tx, True, block1)
assert_equal(gottx['txid'], tx)
assert_equal(gottx['in_active_chain'], True)
# We should not have the 'in_active_chain' flag when we don't provide a block
gottx = self.nodes[0].getrawtransaction(tx, True)
assert_equal(gottx['txid'], tx)
assert 'in_active_chain' not in gottx
# We should not get the tx if we provide an unrelated block
assert_raises_rpc_error(-5, "No such transaction found", self.nodes[0].getrawtransaction, tx, True, block2)
# An invalid block hash should raise the correct errors
assert_raises_rpc_error(-1, "JSON value is not a string as expected", self.nodes[0].getrawtransaction, tx, True, True)
assert_raises_rpc_error(-8, "parameter 3 must be of length 64 (not 6, for 'foobar')", self.nodes[0].getrawtransaction, tx, True, "foobar")
assert_raises_rpc_error(-8, "parameter 3 must be of length 64 (not 8, for 'abcd1234')", self.nodes[0].getrawtransaction, tx, True, "abcd1234")
assert_raises_rpc_error(-8, "parameter 3 must be hexadecimal string (not 'ZZZ0000000000000000000000000000000000000000000000000000000000000')", self.nodes[0].getrawtransaction, tx, True, "ZZZ0000000000000000000000000000000000000000000000000000000000000")
assert_raises_rpc_error(-5, "Block hash not found", self.nodes[0].getrawtransaction, tx, True, "0000000000000000000000000000000000000000000000000000000000000000")
# Undo the blocks and check in_active_chain
self.nodes[0].invalidateblock(block1)
gottx = self.nodes[0].getrawtransaction(txid=tx, verbose=True, blockhash=block1)
assert_equal(gottx['in_active_chain'], False)
self.nodes[0].reconsiderblock(block1)
assert_equal(self.nodes[0].getbestblockhash(), block2)
if not self.options.descriptors:
# The traditional multisig workflow does not work with descriptor wallets so these are legacy only.
# The multisig workflow with descriptor wallets uses PSBTs and is tested elsewhere, no need to do them here.
#########################
# RAW TX MULTISIG TESTS #
#########################
# 2of2 test
addr1 = self.nodes[2].getnewaddress()
addr2 = self.nodes[2].getnewaddress()
addr1Obj = self.nodes[2].getaddressinfo(addr1)
addr2Obj = self.nodes[2].getaddressinfo(addr2)
# Tests for createmultisig and addmultisigaddress
assert_raises_rpc_error(-5, "Invalid public key", self.nodes[0].createmultisig, 1, ["01020304"])
self.nodes[0].createmultisig(2, [addr1Obj['pubkey'], addr2Obj['pubkey']]) # createmultisig can only take public keys
assert_raises_rpc_error(-5, "Invalid public key", self.nodes[0].createmultisig, 2, [addr1Obj['pubkey'], addr1]) # addmultisigaddress can take both pubkeys and addresses so long as they are in the wallet, which is tested here.
mSigObj = self.nodes[2].addmultisigaddress(2, [addr1Obj['pubkey'], addr1])['address']
#use balance deltas instead of absolute values
bal = self.nodes[2].getbalance()
# send 1.2 BAZ to msig adr
txId = self.nodes[0].sendtoaddress(mSigObj, 1.2)
self.sync_all()
self.nodes[0].generate(1)
self.sync_all()
assert_equal(self.nodes[2].getbalance(), bal+Decimal('1.20000000')) #node2 has both keys of the 2of2 ms addr., tx should affect the balance
# 2of3 test from different nodes
bal = self.nodes[2].getbalance()
addr1 = self.nodes[1].getnewaddress()
addr2 = self.nodes[2].getnewaddress()
addr3 = self.nodes[2].getnewaddress()
addr1Obj = self.nodes[1].getaddressinfo(addr1)
addr2Obj = self.nodes[2].getaddressinfo(addr2)
addr3Obj = self.nodes[2].getaddressinfo(addr3)
mSigObj = self.nodes[2].addmultisigaddress(2, [addr1Obj['pubkey'], addr2Obj['pubkey'], addr3Obj['pubkey']])['address']
txId = self.nodes[0].sendtoaddress(mSigObj, 2.2)
decTx = self.nodes[0].gettransaction(txId)
rawTx = self.nodes[0].decoderawtransaction(decTx['hex'])
self.sync_all()
self.nodes[0].generate(1)
self.sync_all()
#THIS IS AN INCOMPLETE FEATURE
#NODE2 HAS TWO OF THREE KEY AND THE FUNDS SHOULD BE SPENDABLE AND COUNT AT BALANCE CALCULATION
assert_equal(self.nodes[2].getbalance(), bal) #for now, assume the funds of a 2of3 multisig tx are not marked as spendable
txDetails = self.nodes[0].gettransaction(txId, True)
rawTx = self.nodes[0].decoderawtransaction(txDetails['hex'])
vout = next(o for o in rawTx['vout'] if o['value'] == Decimal('2.20000000'))
bal = self.nodes[0].getbalance()
inputs = [{ "txid" : txId, "vout" : vout['n'], "scriptPubKey" : vout['scriptPubKey']['hex'], "amount" : vout['value']}]
outputs = { self.nodes[0].getnewaddress() : 2.19 }
rawTx = self.nodes[2].createrawtransaction(inputs, outputs)
rawTxPartialSigned = self.nodes[1].signrawtransactionwithwallet(rawTx, inputs)
assert_equal(rawTxPartialSigned['complete'], False) #node1 only has one key, can't comp. sign the tx
rawTxSigned = self.nodes[2].signrawtransactionwithwallet(rawTx, inputs)
assert_equal(rawTxSigned['complete'], True) #node2 can sign the tx compl., own two of three keys
self.nodes[2].sendrawtransaction(rawTxSigned['hex'])
rawTx = self.nodes[0].decoderawtransaction(rawTxSigned['hex'])
self.sync_all()
self.nodes[0].generate(1)
self.sync_all()
assert_equal(self.nodes[0].getbalance(), bal+Decimal('50.00000000')+Decimal('2.19000000')) #block reward + tx
# 2of2 test for combining transactions
bal = self.nodes[2].getbalance()
addr1 = self.nodes[1].getnewaddress()
addr2 = self.nodes[2].getnewaddress()
addr1Obj = self.nodes[1].getaddressinfo(addr1)
addr2Obj = self.nodes[2].getaddressinfo(addr2)
self.nodes[1].addmultisigaddress(2, [addr1Obj['pubkey'], addr2Obj['pubkey']])['address']
mSigObj = self.nodes[2].addmultisigaddress(2, [addr1Obj['pubkey'], addr2Obj['pubkey']])['address']
mSigObjValid = self.nodes[2].getaddressinfo(mSigObj)
txId = self.nodes[0].sendtoaddress(mSigObj, 2.2)
decTx = self.nodes[0].gettransaction(txId)
rawTx2 = self.nodes[0].decoderawtransaction(decTx['hex'])
self.sync_all()
self.nodes[0].generate(1)
self.sync_all()
assert_equal(self.nodes[2].getbalance(), bal) # the funds of a 2of2 multisig tx should not be marked as spendable
txDetails = self.nodes[0].gettransaction(txId, True)
rawTx2 = self.nodes[0].decoderawtransaction(txDetails['hex'])
vout = next(o for o in rawTx2['vout'] if o['value'] == Decimal('2.20000000'))
bal = self.nodes[0].getbalance()
inputs = [{ "txid" : txId, "vout" : vout['n'], "scriptPubKey" : vout['scriptPubKey']['hex'], "redeemScript" : mSigObjValid['hex'], "amount" : vout['value']}]
outputs = { self.nodes[0].getnewaddress() : 2.19 }
rawTx2 = self.nodes[2].createrawtransaction(inputs, outputs)
rawTxPartialSigned1 = self.nodes[1].signrawtransactionwithwallet(rawTx2, inputs)
self.log.debug(rawTxPartialSigned1)
assert_equal(rawTxPartialSigned1['complete'], False) #node1 only has one key, can't comp. sign the tx
rawTxPartialSigned2 = self.nodes[2].signrawtransactionwithwallet(rawTx2, inputs)
self.log.debug(rawTxPartialSigned2)
assert_equal(rawTxPartialSigned2['complete'], False) #node2 only has one key, can't comp. sign the tx
rawTxComb = self.nodes[2].combinerawtransaction([rawTxPartialSigned1['hex'], rawTxPartialSigned2['hex']])
self.log.debug(rawTxComb)
self.nodes[2].sendrawtransaction(rawTxComb)
rawTx2 = self.nodes[0].decoderawtransaction(rawTxComb)
self.sync_all()
self.nodes[0].generate(1)
self.sync_all()
assert_equal(self.nodes[0].getbalance(), bal+Decimal('50.00000000')+Decimal('2.19000000')) #block reward + tx
# decoderawtransaction tests
# witness transaction
encrawtx = "010000000001010000000000000072c1a6a246ae63f74f931e8365e15a089c68d61900000000000000000000ffffffff0100e1f50500000000000102616100000000"
decrawtx = self.nodes[0].decoderawtransaction(encrawtx, True) # decode as witness transaction
assert_equal(decrawtx['vout'][0]['value'], Decimal('1.00000000'))
assert_raises_rpc_error(-22, 'TX decode failed', self.nodes[0].decoderawtransaction, encrawtx, False) # force decode as non-witness transaction
# non-witness transaction
encrawtx = "01000000010000000000000072c1a6a246ae63f74f931e8365e15a089c68d61900000000000000000000ffffffff0100e1f505000000000000000000"
decrawtx = self.nodes[0].decoderawtransaction(encrawtx, False) # decode as non-witness transaction
assert_equal(decrawtx['vout'][0]['value'], Decimal('1.00000000'))
# known ambiguous transaction in the chain (see https://github.com/bazcoin/bazcoin/issues/20579)
encrawtx = "020000000001010000000000000000000000000000000000000000000000000000000000000000ffffffff4b03c68708046ff8415c622f4254432e434f4d2ffabe6d6de1965d02c68f928e5b244ab1965115a36f56eb997633c7f690124bbf43644e23080000000ca3d3af6d005a65ff0200fd00000000ffffffff03f4c1fb4b0000000016001497cfc76442fe717f2a3f0cc9c175f7561b6619970000000000000000266a24aa21a9ed957d1036a80343e0d1b659497e1b48a38ebe876a056d45965fac4a85cda84e1900000000000000002952534b424c4f434b3a8e092581ab01986cbadc84f4b43f4fa4bb9e7a2e2a0caf9b7cf64d939028e22c0120000000000000000000000000000000000000000000000000000000000000000000000000"
decrawtx = self.nodes[0].decoderawtransaction(encrawtx)
decrawtx_wit = self.nodes[0].decoderawtransaction(encrawtx, True)
assert_raises_rpc_error(-22, 'TX decode failed', self.nodes[0].decoderawtransaction, encrawtx, False) # fails to decode as non-witness transaction
assert_equal(decrawtx, decrawtx_wit) # the witness interpretation should be chosen
assert_equal(decrawtx['vin'][0]['coinbase'], "03c68708046ff8415c622f4254432e434f4d2ffabe6d6de1965d02c68f928e5b244ab1965115a36f56eb997633c7f690124bbf43644e23080000000ca3d3af6d005a65ff0200fd00000000")
# Basic signrawtransaction test
addr = self.nodes[1].getnewaddress()
txid = self.nodes[0].sendtoaddress(addr, 10)
self.nodes[0].generate(1)
self.sync_all()
vout = find_vout_for_address(self.nodes[1], txid, addr)
rawTx = self.nodes[1].createrawtransaction([{'txid': txid, 'vout': vout}], {self.nodes[1].getnewaddress(): 9.999})
rawTxSigned = self.nodes[1].signrawtransactionwithwallet(rawTx)
txId = self.nodes[1].sendrawtransaction(rawTxSigned['hex'])
self.nodes[0].generate(1)
self.sync_all()
# getrawtransaction tests
# 1. valid parameters - only supply txid
assert_equal(self.nodes[0].getrawtransaction(txId), rawTxSigned['hex'])
# 2. valid parameters - supply txid and 0 for non-verbose
assert_equal(self.nodes[0].getrawtransaction(txId, 0), rawTxSigned['hex'])
# 3. valid parameters - supply txid and False for non-verbose
assert_equal(self.nodes[0].getrawtransaction(txId, False), rawTxSigned['hex'])
# 4. valid parameters - supply txid and 1 for verbose.
# We only check the "hex" field of the output so we don't need to update this test every time the output format changes.
assert_equal(self.nodes[0].getrawtransaction(txId, 1)["hex"], rawTxSigned['hex'])
# 5. valid parameters - supply txid and True for non-verbose
assert_equal(self.nodes[0].getrawtransaction(txId, True)["hex"], rawTxSigned['hex'])
# 6. invalid parameters - supply txid and string "Flase"
assert_raises_rpc_error(-1, "not a boolean", self.nodes[0].getrawtransaction, txId, "Flase")
# 7. invalid parameters - supply txid and empty array
assert_raises_rpc_error(-1, "not a boolean", self.nodes[0].getrawtransaction, txId, [])
# 8. invalid parameters - supply txid and empty dict
assert_raises_rpc_error(-1, "not a boolean", self.nodes[0].getrawtransaction, txId, {})
inputs = [ {'txid' : "1d1d4e24ed99057e84c3f80fd8fbec79ed9e1acee37da269356ecea000000000", 'vout' : 1, 'sequence' : 1000}]
outputs = { self.nodes[0].getnewaddress() : 1 }
rawtx = self.nodes[0].createrawtransaction(inputs, outputs)
decrawtx= self.nodes[0].decoderawtransaction(rawtx)
assert_equal(decrawtx['vin'][0]['sequence'], 1000)
# 9. invalid parameters - sequence number out of range
inputs = [ {'txid' : "1d1d4e24ed99057e84c3f80fd8fbec79ed9e1acee37da269356ecea000000000", 'vout' : 1, 'sequence' : -1}]
outputs = { self.nodes[0].getnewaddress() : 1 }
assert_raises_rpc_error(-8, 'Invalid parameter, sequence number is out of range', self.nodes[0].createrawtransaction, inputs, outputs)
# 10. invalid parameters - sequence number out of range
inputs = [ {'txid' : "1d1d4e24ed99057e84c3f80fd8fbec79ed9e1acee37da269356ecea000000000", 'vout' : 1, 'sequence' : 4294967296}]
outputs = { self.nodes[0].getnewaddress() : 1 }
assert_raises_rpc_error(-8, 'Invalid parameter, sequence number is out of range', self.nodes[0].createrawtransaction, inputs, outputs)
inputs = [ {'txid' : "1d1d4e24ed99057e84c3f80fd8fbec79ed9e1acee37da269356ecea000000000", 'vout' : 1, 'sequence' : 4294967294}]
outputs = { self.nodes[0].getnewaddress() : 1 }
rawtx = self.nodes[0].createrawtransaction(inputs, outputs)
decrawtx= self.nodes[0].decoderawtransaction(rawtx)
assert_equal(decrawtx['vin'][0]['sequence'], 4294967294)
####################################
# TRANSACTION VERSION NUMBER TESTS #
####################################
# Test the minimum transaction version number that fits in a signed 32-bit integer.
# As transaction version is unsigned, this should convert to its unsigned equivalent.
tx = CTransaction()
tx.nVersion = -0x80000000
rawtx = ToHex(tx)
decrawtx = self.nodes[0].decoderawtransaction(rawtx)
assert_equal(decrawtx['version'], 0x80000000)
# Test the maximum transaction version number that fits in a signed 32-bit integer.
tx = CTransaction()
tx.nVersion = 0x7fffffff
rawtx = ToHex(tx)
decrawtx = self.nodes[0].decoderawtransaction(rawtx)
assert_equal(decrawtx['version'], 0x7fffffff)
self.log.info('sendrawtransaction/testmempoolaccept with maxfeerate')
# Test a transaction with a small fee.
txId = self.nodes[0].sendtoaddress(self.nodes[2].getnewaddress(), 1.0)
rawTx = self.nodes[0].getrawtransaction(txId, True)
vout = next(o for o in rawTx['vout'] if o['value'] == Decimal('1.00000000'))
self.sync_all()
inputs = [{ "txid" : txId, "vout" : vout['n'] }]
# Fee 10,000 satoshis, (1 - (10000 sat * 0.00000001 BAZ/sat)) = 0.9999
outputs = { self.nodes[0].getnewaddress() : Decimal("0.99990000") }
rawTx = self.nodes[2].createrawtransaction(inputs, outputs)
rawTxSigned = self.nodes[2].signrawtransactionwithwallet(rawTx)
assert_equal(rawTxSigned['complete'], True)
# Fee 10,000 satoshis, ~100 b transaction, fee rate should land around 100 sat/byte = 0.00100000 BAZ/kB
# Thus, testmempoolaccept should reject
testres = self.nodes[2].testmempoolaccept([rawTxSigned['hex']], 0.00001000)[0]
assert_equal(testres['allowed'], False)
assert_equal(testres['reject-reason'], 'max-fee-exceeded')
# and sendrawtransaction should throw
assert_raises_rpc_error(-25, 'Fee exceeds maximum configured by user (e.g. -maxtxfee, maxfeerate)', self.nodes[2].sendrawtransaction, rawTxSigned['hex'], 0.00001000)
# and the following calls should both succeed
testres = self.nodes[2].testmempoolaccept(rawtxs=[rawTxSigned['hex']])[0]
assert_equal(testres['allowed'], True)
self.nodes[2].sendrawtransaction(hexstring=rawTxSigned['hex'])
# Test a transaction with a large fee.
txId = self.nodes[0].sendtoaddress(self.nodes[2].getnewaddress(), 1.0)
rawTx = self.nodes[0].getrawtransaction(txId, True)
vout = next(o for o in rawTx['vout'] if o['value'] == Decimal('1.00000000'))
self.sync_all()
inputs = [{ "txid" : txId, "vout" : vout['n'] }]
# Fee 2,000,000 satoshis, (1 - (2000000 sat * 0.00000001 BAZ/sat)) = 0.98
outputs = { self.nodes[0].getnewaddress() : Decimal("0.98000000") }
rawTx = self.nodes[2].createrawtransaction(inputs, outputs)
rawTxSigned = self.nodes[2].signrawtransactionwithwallet(rawTx)
assert_equal(rawTxSigned['complete'], True)
# Fee 2,000,000 satoshis, ~100 b transaction, fee rate should land around 20,000 sat/byte = 0.20000000 BAZ/kB
# Thus, testmempoolaccept should reject
testres = self.nodes[2].testmempoolaccept([rawTxSigned['hex']])[0]
assert_equal(testres['allowed'], False)
assert_equal(testres['reject-reason'], 'max-fee-exceeded')
# and sendrawtransaction should throw
assert_raises_rpc_error(-25, 'Fee exceeds maximum configured by user (e.g. -maxtxfee, maxfeerate)', self.nodes[2].sendrawtransaction, rawTxSigned['hex'])
# and the following calls should both succeed
testres = self.nodes[2].testmempoolaccept(rawtxs=[rawTxSigned['hex']], maxfeerate='0.20000000')[0]
assert_equal(testres['allowed'], True)
self.nodes[2].sendrawtransaction(hexstring=rawTxSigned['hex'], maxfeerate='0.20000000')
if __name__ == '__main__':
RawTransactionsTest().main()
|
# Generated by Django 2.1.15 on 2020-12-12 13:14
from django.db import migrations, models
class Migration(migrations.Migration):
initial = True
dependencies = [
('auth', '0009_alter_user_last_name_max_length'),
]
operations = [
migrations.CreateModel(
name='User',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('password', models.CharField(max_length=128, verbose_name='password')),
('last_login', models.DateTimeField(blank=True, null=True, verbose_name='last login')),
('is_superuser', models.BooleanField(default=False, help_text='Designates that this user has all permissions without explicitly assigning them.', verbose_name='superuser status')),
('email', models.EmailField(max_length=255, unique=True)),
('name', models.CharField(max_length=255)),
('is_active', models.BooleanField(default=True)),
('is_staff', models.BooleanField(default=False)),
('groups', models.ManyToManyField(blank=True, help_text='The groups this user belongs to. A user will get all permissions granted to each of their groups.', related_name='user_set', related_query_name='user', to='auth.Group', verbose_name='groups')),
('user_permissions', models.ManyToManyField(blank=True, help_text='Specific permissions for this user.', related_name='user_set', related_query_name='user', to='auth.Permission', verbose_name='user permissions')),
],
options={
'abstract': False,
},
),
]
|
from typing import List, Optional
import aiosqlite
from btcgreen.util.db_wrapper import DBWrapper
from btcgreen.util.ints import uint32
from btcgreen.wallet.util.wallet_types import WalletType
from btcgreen.wallet.wallet_action import WalletAction
class WalletActionStore:
"""
WalletActionStore keeps track of all wallet actions that require persistence.
Used by Colored coins, Atomic swaps, Rate Limited, and Authorized payee wallets
"""
db_connection: aiosqlite.Connection
cache_size: uint32
db_wrapper: DBWrapper
@classmethod
async def create(cls, db_wrapper: DBWrapper):
self = cls()
self.db_wrapper = db_wrapper
self.db_connection = db_wrapper.db
await self.db_connection.execute(
(
"CREATE TABLE IF NOT EXISTS action_queue("
"id INTEGER PRIMARY KEY AUTOINCREMENT,"
" name text,"
" wallet_id int,"
" wallet_type int,"
" wallet_callback text,"
" done int,"
" data text)"
)
)
await self.db_connection.execute("CREATE INDEX IF NOT EXISTS name on action_queue(name)")
await self.db_connection.execute("CREATE INDEX IF NOT EXISTS wallet_id on action_queue(wallet_id)")
await self.db_connection.execute("CREATE INDEX IF NOT EXISTS wallet_type on action_queue(wallet_type)")
await self.db_connection.commit()
return self
async def _clear_database(self):
cursor = await self.db_connection.execute("DELETE FROM action_queue")
await cursor.close()
await self.db_connection.commit()
async def get_wallet_action(self, id: int) -> Optional[WalletAction]:
"""
Return a wallet action by id
"""
cursor = await self.db_connection.execute("SELECT * from action_queue WHERE id=?", (id,))
row = await cursor.fetchone()
await cursor.close()
if row is None:
return None
return WalletAction(row[0], row[1], row[2], WalletType(row[3]), row[4], bool(row[5]), row[6])
async def create_action(
self, name: str, wallet_id: int, type: int, callback: str, done: bool, data: str, in_transaction: bool
):
"""
Creates Wallet Action
"""
if not in_transaction:
await self.db_wrapper.lock.acquire()
try:
cursor = await self.db_connection.execute(
"INSERT INTO action_queue VALUES(?, ?, ?, ?, ?, ?, ?)",
(None, name, wallet_id, type, callback, done, data),
)
await cursor.close()
finally:
if not in_transaction:
await self.db_connection.commit()
self.db_wrapper.lock.release()
async def action_done(self, action_id: int):
"""
Marks action as done
"""
action: Optional[WalletAction] = await self.get_wallet_action(action_id)
assert action is not None
async with self.db_wrapper.lock:
cursor = await self.db_connection.execute(
"Replace INTO action_queue VALUES(?, ?, ?, ?, ?, ?, ?)",
(
action.id,
action.name,
action.wallet_id,
action.type.value,
action.wallet_callback,
True,
action.data,
),
)
await cursor.close()
await self.db_connection.commit()
async def get_all_pending_actions(self) -> List[WalletAction]:
"""
Returns list of all pending action
"""
result: List[WalletAction] = []
cursor = await self.db_connection.execute("SELECT * from action_queue WHERE done=?", (0,))
rows = await cursor.fetchall()
await cursor.close()
if rows is None:
return result
for row in rows:
action = WalletAction(row[0], row[1], row[2], WalletType(row[3]), row[4], bool(row[5]), row[6])
result.append(action)
return result
async def get_action_by_id(self, id) -> Optional[WalletAction]:
"""
Return a wallet action by id
"""
cursor = await self.db_connection.execute("SELECT * from action_queue WHERE id=?", (id,))
row = await cursor.fetchone()
await cursor.close()
if row is None:
return None
return WalletAction(row[0], row[1], row[2], WalletType(row[3]), row[4], bool(row[5]), row[6])
|
import math
import random
import geocoder
import gpxpy.geo
from geopy import Point, distance
from s2sphere import CellId, LatLng
from .custom_exceptions import GeneralPogoException
from .util import is_float
DEFAULT_RADIUS = 70
# Wrapper for location
class Location(object):
def __init__(self, locationLookup, geo_key, api):
self.geo_key = geo_key
self.api = api
self.setLocation(locationLookup)
def __str__(self):
s = 'Coordinates: {} {} {}'.format(
self.latitude,
self.longitude,
self.altitude
)
return s
@staticmethod
def getDistance(*coords):
return gpxpy.geo.haversine_distance(*coords)
def getFortDistance(self, fort):
lat, lng ,alt = self.getCoordinates()
return self.getDistance(lat, lng, fort.latitude, fort.longitude)
def setLocation(self, search):
if len(search.split(" ")) == 2:
f, s = [i.replace(',','') for i in search.split(" ")]
# Input location is coordinates
if is_float(f) and is_float(s):
self.latitude = float(f)
self.longitude = float(s)
self.altitude = 8
return self.latitude, self.longitude, self.altitude
providers = ['google', 'osm', 'arcgis', 'freegeoip']
for p in providers:
geo = getattr(geocoder, p)(search)
if geo.lat is not None and geo.lng is not None:
elev = geocoder.elevation(geo.latlng)
self.latitude, self.longitude, self.altitude = geo.lat, geo.lng, elev.meters or 8
return self.latitude, self.longitude, self.altitude
raise GeneralPogoException("Location could not be found")
def setCoordinates(self, latitude, longitude, override=True):
self.latitude = latitude
self.longitude = longitude
self.altitude = random.randint(0,10)
self.api.set_position(latitude, longitude, self.altitude)
def getCoordinates(self):
return self.latitude, self.longitude, self.altitude
def getNeighbors(self, lat, lng):
origin = CellId.from_lat_lng(LatLng.from_degrees(lat, lng)).parent(15)
neighbors = {origin.id()}
edge_neighbors = origin.get_edge_neighbors()
surrounding_neighbors = [
edge_neighbors[0], # North neighbor
edge_neighbors[0].get_edge_neighbors()[1], # North-east neighbor
edge_neighbors[1], # East neighbor
edge_neighbors[2].get_edge_neighbors()[1], # South-east neighbor
edge_neighbors[2], # South neighbor
edge_neighbors[2].get_edge_neighbors()[3], # South-west neighbor
edge_neighbors[3], # West neighbor
edge_neighbors[0].get_edge_neighbors()[3], # North-west neighbor
]
for cell in surrounding_neighbors:
neighbors.add(cell.id())
for cell2 in cell.get_edge_neighbors():
neighbors.add(cell2.id())
return list(neighbors)
def getCells(self, lat=0, lon=0):
if not lat: lat = self.latitude
if not lon: lon = self.longitude
return self.getNeighbors(lat, lon)
def getAllSteps(self, radius=140):
start = list(self.getCoordinates()[:2])
allSteps = [start]
if radius <= DEFAULT_RADIUS: return allSteps
distPerStep = 140
steps = math.ceil(radius/distPerStep)
lat, lon = start
origin = Point(lat, lon)
angleBetween = 60
for s in range(1, steps + 1):
for d in range(0, 360, int(angleBetween/min(s, 2))):
destination = distance.VincentyDistance(meters=s*distPerStep).destination(origin, d)
allSteps.append([destination.latitude, destination.longitude])
return allSteps
|
# -*- coding: utf-8 -*-
from __future__ import print_function
import os
from pygcgen import ChangelogGenerator
base_options = [
"--quiet",
# "-h",
# "-v",
# "-vv", # or "-v", "-v",
# "-vvv",
# "--options-file", ".pygcgen_example",
# "-u", "topic2k",
# "-p", "pygcgen",
# '-s', "**Questions:**", "question", "Question",
# '-s', "**Future-Requests:**", "feature-request",
# '--section', '**Important changes:**', 'notice',
# '-s', "**Implemented enhancements:**", "enhancement", "Enhancement",
# '-s', "**Fixed bugs:**", "bug", "Bug",
# "-v",
# "--no-overwrite",
# "--between-tags", "v0.1.1",
# "--include-labels", "bug",
# "--no-issues-wo-labels",
# "--future-release", "v0.2.0",
# "--tag-separator", " ---\n\n",
]
on_travis = os.environ.get('TRAVIS', 'false') == 'true'
if not on_travis:
ChangelogGenerator(base_options + ["-v"]).run()
else:
tests = [
[ # Test #01
"--no-overwrite",
"--max-simultaneous-requests", "25",
"--section", '**Important changes:**', 'notice',
"--since-tag", "v0.1.0",
"--between-tags", "v0.1.1", "v0.2.1",
"--due-tag", "v0.2.0",
"--exclude-tags-regex", "v0\.0\..*",
"--exclude-tags", "v0.1.2",
"--with-unreleased",
"--include-labels", "notice", "enhancement", "bug",
"--exclude-labels",
"duplicate", "Duplicate",
"invalid", "Invalid",
"wontfix", "Wontfix",
"question", "Question",
"hide in changelog",
]
]
for nr, options in enumerate(tests, start=1):
print("starting test {} ...".format(nr), end="")
ChangelogGenerator(base_options + options).run()
print(" done.")
|
import numpy as np
import pandas as pd
def tanhderiv(K):
"""
used to calculate the derivative of tanh function.
"""
return 1- (np.tanh(K)**2)
def initialisetheta(m,n,nodes,yn):
"""
used to randomly initialise the weights matrix and store it in the
form of a list. Note that the bias term has been directly added to each weight matrix.
"""
theta=[]
theta.append(np.random.randn(n+1,nodes[0]))
t=len(nodes)
for i in range(t-1):
theta.append(np.random.randn(nodes[i]+1,nodes[i+1]))
theta.append(np.random.randn(nodes[len(nodes)-1]+1,yn))
return theta
def hiddenlayer(m,nodes):
"""
used to initialise the hidden layers with ones.
Bias term has been included here as well.
"""
hidden=[]
for i in a:
hidden.append(np.ones((m,i+1)))
return hidden
def forwardpropogation(x,theta,hidden):
"""
performs a pass of forward propogation and predicts the output
of the pass.
"""
hidden[0][:,1:]= np.tanh(np.dot(x,theta[0]))
for i in range (len(hidden)-1):
hidden[i+1][:,1:]= np.tanh(np.dot(hidden[i],theta[i+1]))
output=np.tanh(np.dot(hidden[len(hidden)-1],theta[-1]))
return (output,hidden)
def delta_function(m,hidden,output,y,theta):
"""
performs a pass of back propogation and calculates the value
of delta for each layer.
"""
delta=[]
delta.append(output-y)
delta.insert(0, np.dot(delta[0], theta[len(hidden) ].T ))
for i in range(len(hidden)-1):
p =np.zeros((m,len(hidden[len(hidden)-i-1])))
p= delta[0]* tanhderiv(hidden[len(hidden)-i-1])
delta.insert(0, np.dot(p[:,1:],theta[len(hidden)-i-1].T))
return delta
def update_theta(x,delta,hidden,theta):
"""
the value of theta matrices in list "theta" are updated for each pass
"""
print(np.size(x.T,0))
theta[0]+= np.dot(x.T,delta[0][:,1:])
for i in range(len(hidden)-1 ):
theta[i+1]+=np.dot(hidden[i].T,delta[i+1][:,1:])
theta[len(hidden)]+=np.dot(hidden[len(hidden)-1].T,delta[len(hidden)])
return theta
def main():
"""
sample run
"""
x = np.array([[1, 2, 3], [2, 3, 4], [3, 4, 5], [4, 5, 6], [5, 6, 7], [6, 7, 8], [7, 8, 9]]) # Test data
y = np.array([[0, 1], [1, 0], [1, 0], [0, 1], [0, 1], [1, 0], [0, 1]]) #Test Labels
m = np.size(x, 0)
n = np.size(x, 1)
yn = np.size(y, 1)
nodes = [30,20,40] """ number of layers and nodes in each layer """
X = np.ones((m,n+1))
X[:,1:]=x
"""
theta and hidden layer initialisation
"""
theta = initialisetheta(m,n,nodes,yn)
hidden = hiddenlayer(m,nodes)
for i in range(10000):
output,hidden= forwardpropogation(X,theta,hidden)
delta=delta_function(m,hidden, output, y,theta)
theta=update_theta(X,delta,hidden,theta)
main()
|
"""
Logistic Regression
"""
# Author: Gael Varoquaux <gael.varoquaux@normalesup.org>
# Fabian Pedregosa <f@bianp.net>
# Alexandre Gramfort <alexandre.gramfort@telecom-paristech.fr>
# Manoj Kumar <manojkumarsivaraj334@gmail.com>
# Lars Buitinck
# Simon Wu <s8wu@uwaterloo.ca>
# Arthur Mensch <arthur.mensch@m4x.org
import numbers
import warnings
import numpy as np
from scipy import optimize, sparse
from scipy.special import expit, logsumexp
from joblib import Parallel, effective_n_jobs
from ._base import LinearClassifierMixin, SparseCoefMixin, BaseEstimator
from ._sag import sag_solver
from ..preprocessing import LabelEncoder, LabelBinarizer
from ..svm._base import _fit_liblinear
from ..utils import check_array, check_consistent_length, compute_class_weight
from ..utils import check_random_state
from ..utils.extmath import log_logistic, safe_sparse_dot, softmax, squared_norm
from ..utils.extmath import row_norms
from ..utils.optimize import _newton_cg, _check_optimize_result
from ..utils.validation import check_is_fitted, _check_sample_weight
from ..utils.multiclass import check_classification_targets
from ..utils.fixes import _joblib_parallel_args
from ..utils.fixes import delayed
from ..model_selection import check_cv
from ..metrics import get_scorer
_LOGISTIC_SOLVER_CONVERGENCE_MSG = (
"Please also refer to the documentation for alternative solver options:\n"
" https://scikit-learn.org/stable/modules/linear_model.html"
"#logistic-regression"
)
# .. some helper functions for logistic_regression_path ..
def _intercept_dot(w, X, y):
"""Computes y * np.dot(X, w).
It takes into consideration if the intercept should be fit or not.
Parameters
----------
w : ndarray of shape (n_features,) or (n_features + 1,)
Coefficient vector.
X : {array-like, sparse matrix} of shape (n_samples, n_features)
Training data.
y : ndarray of shape (n_samples,)
Array of labels.
Returns
-------
w : ndarray of shape (n_features,)
Coefficient vector without the intercept weight (w[-1]) if the
intercept should be fit. Unchanged otherwise.
c : float
The intercept.
yz : float
y * np.dot(X, w).
"""
c = 0.0
if w.size == X.shape[1] + 1:
c = w[-1]
w = w[:-1]
z = safe_sparse_dot(X, w) + c
yz = y * z
return w, c, yz
def _logistic_loss_and_grad(w, X, y, alpha, sample_weight=None):
"""Computes the logistic loss and gradient.
Parameters
----------
w : ndarray of shape (n_features,) or (n_features + 1,)
Coefficient vector.
X : {array-like, sparse matrix} of shape (n_samples, n_features)
Training data.
y : ndarray of shape (n_samples,)
Array of labels.
alpha : float
Regularization parameter. alpha is equal to 1 / C.
sample_weight : array-like of shape (n_samples,), default=None
Array of weights that are assigned to individual samples.
If not provided, then each sample is given unit weight.
Returns
-------
out : float
Logistic loss.
grad : ndarray of shape (n_features,) or (n_features + 1,)
Logistic gradient.
"""
n_samples, n_features = X.shape
grad = np.empty_like(w)
w, c, yz = _intercept_dot(w, X, y)
if sample_weight is None:
sample_weight = np.ones(n_samples)
# Logistic loss is the negative of the log of the logistic function.
out = -np.sum(sample_weight * log_logistic(yz)) + 0.5 * alpha * np.dot(w, w)
z = expit(yz)
z0 = sample_weight * (z - 1) * y
grad[:n_features] = safe_sparse_dot(X.T, z0) + alpha * w
# Case where we fit the intercept.
if grad.shape[0] > n_features:
grad[-1] = z0.sum()
return out, grad
def _logistic_loss(w, X, y, alpha, sample_weight=None):
"""Computes the logistic loss.
Parameters
----------
w : ndarray of shape (n_features,) or (n_features + 1,)
Coefficient vector.
X : {array-like, sparse matrix} of shape (n_samples, n_features)
Training data.
y : ndarray of shape (n_samples,)
Array of labels.
alpha : float
Regularization parameter. alpha is equal to 1 / C.
sample_weight : array-like of shape (n_samples,) default=None
Array of weights that are assigned to individual samples.
If not provided, then each sample is given unit weight.
Returns
-------
out : float
Logistic loss.
"""
w, c, yz = _intercept_dot(w, X, y)
if sample_weight is None:
sample_weight = np.ones(y.shape[0])
# Logistic loss is the negative of the log of the logistic function.
out = -np.sum(sample_weight * log_logistic(yz)) + 0.5 * alpha * np.dot(w, w)
return out
def _logistic_grad_hess(w, X, y, alpha, sample_weight=None):
"""Computes the gradient and the Hessian, in the case of a logistic loss.
Parameters
----------
w : ndarray of shape (n_features,) or (n_features + 1,)
Coefficient vector.
X : {array-like, sparse matrix} of shape (n_samples, n_features)
Training data.
y : ndarray of shape (n_samples,)
Array of labels.
alpha : float
Regularization parameter. alpha is equal to 1 / C.
sample_weight : array-like of shape (n_samples,) default=None
Array of weights that are assigned to individual samples.
If not provided, then each sample is given unit weight.
Returns
-------
grad : ndarray of shape (n_features,) or (n_features + 1,)
Logistic gradient.
Hs : callable
Function that takes the gradient as a parameter and returns the
matrix product of the Hessian and gradient.
"""
n_samples, n_features = X.shape
grad = np.empty_like(w)
fit_intercept = grad.shape[0] > n_features
w, c, yz = _intercept_dot(w, X, y)
if sample_weight is None:
sample_weight = np.ones(y.shape[0])
z = expit(yz)
z0 = sample_weight * (z - 1) * y
grad[:n_features] = safe_sparse_dot(X.T, z0) + alpha * w
# Case where we fit the intercept.
if fit_intercept:
grad[-1] = z0.sum()
# The mat-vec product of the Hessian
d = sample_weight * z * (1 - z)
if sparse.issparse(X):
dX = safe_sparse_dot(sparse.dia_matrix((d, 0), shape=(n_samples, n_samples)), X)
else:
# Precompute as much as possible
dX = d[:, np.newaxis] * X
if fit_intercept:
# Calculate the double derivative with respect to intercept
# In the case of sparse matrices this returns a matrix object.
dd_intercept = np.squeeze(np.array(dX.sum(axis=0)))
def Hs(s):
ret = np.empty_like(s)
if sparse.issparse(X):
ret[:n_features] = X.T.dot(dX.dot(s[:n_features]))
else:
ret[:n_features] = np.linalg.multi_dot([X.T, dX, s[:n_features]])
ret[:n_features] += alpha * s[:n_features]
# For the fit intercept case.
if fit_intercept:
ret[:n_features] += s[-1] * dd_intercept
ret[-1] = dd_intercept.dot(s[:n_features])
ret[-1] += d.sum() * s[-1]
return ret
return grad, Hs
def _multinomial_loss(w, X, Y, alpha, sample_weight):
"""Computes multinomial loss and class probabilities.
Parameters
----------
w : ndarray of shape (n_classes * n_features,) or
(n_classes * (n_features + 1),)
Coefficient vector.
X : {array-like, sparse matrix} of shape (n_samples, n_features)
Training data.
Y : ndarray of shape (n_samples, n_classes)
Transformed labels according to the output of LabelBinarizer.
alpha : float
Regularization parameter. alpha is equal to 1 / C.
sample_weight : array-like of shape (n_samples,)
Array of weights that are assigned to individual samples.
Returns
-------
loss : float
Multinomial loss.
p : ndarray of shape (n_samples, n_classes)
Estimated class probabilities.
w : ndarray of shape (n_classes, n_features)
Reshaped param vector excluding intercept terms.
Reference
---------
Bishop, C. M. (2006). Pattern recognition and machine learning.
Springer. (Chapter 4.3.4)
"""
n_classes = Y.shape[1]
n_features = X.shape[1]
fit_intercept = w.size == (n_classes * (n_features + 1))
w = w.reshape(n_classes, -1)
sample_weight = sample_weight[:, np.newaxis]
if fit_intercept:
intercept = w[:, -1]
w = w[:, :-1]
else:
intercept = 0
p = safe_sparse_dot(X, w.T)
p += intercept
p -= logsumexp(p, axis=1)[:, np.newaxis]
loss = -(sample_weight * Y * p).sum()
loss += 0.5 * alpha * squared_norm(w)
p = np.exp(p, p)
return loss, p, w
def _multinomial_loss_grad(w, X, Y, alpha, sample_weight):
"""Computes the multinomial loss, gradient and class probabilities.
Parameters
----------
w : ndarray of shape (n_classes * n_features,) or
(n_classes * (n_features + 1),)
Coefficient vector.
X : {array-like, sparse matrix} of shape (n_samples, n_features)
Training data.
Y : ndarray of shape (n_samples, n_classes)
Transformed labels according to the output of LabelBinarizer.
alpha : float
Regularization parameter. alpha is equal to 1 / C.
sample_weight : array-like of shape (n_samples,)
Array of weights that are assigned to individual samples.
Returns
-------
loss : float
Multinomial loss.
grad : ndarray of shape (n_classes * n_features,) or \
(n_classes * (n_features + 1),)
Ravelled gradient of the multinomial loss.
p : ndarray of shape (n_samples, n_classes)
Estimated class probabilities
Reference
---------
Bishop, C. M. (2006). Pattern recognition and machine learning.
Springer. (Chapter 4.3.4)
"""
n_classes = Y.shape[1]
n_features = X.shape[1]
fit_intercept = w.size == n_classes * (n_features + 1)
grad = np.zeros((n_classes, n_features + bool(fit_intercept)), dtype=X.dtype)
loss, p, w = _multinomial_loss(w, X, Y, alpha, sample_weight)
sample_weight = sample_weight[:, np.newaxis]
diff = sample_weight * (p - Y)
grad[:, :n_features] = safe_sparse_dot(diff.T, X)
grad[:, :n_features] += alpha * w
if fit_intercept:
grad[:, -1] = diff.sum(axis=0)
return loss, grad.ravel(), p
def _multinomial_grad_hess(w, X, Y, alpha, sample_weight):
"""
Computes the gradient and the Hessian, in the case of a multinomial loss.
Parameters
----------
w : ndarray of shape (n_classes * n_features,) or
(n_classes * (n_features + 1),)
Coefficient vector.
X : {array-like, sparse matrix} of shape (n_samples, n_features)
Training data.
Y : ndarray of shape (n_samples, n_classes)
Transformed labels according to the output of LabelBinarizer.
alpha : float
Regularization parameter. alpha is equal to 1 / C.
sample_weight : array-like of shape (n_samples,)
Array of weights that are assigned to individual samples.
Returns
-------
grad : ndarray of shape (n_classes * n_features,) or \
(n_classes * (n_features + 1),)
Ravelled gradient of the multinomial loss.
hessp : callable
Function that takes in a vector input of shape (n_classes * n_features)
or (n_classes * (n_features + 1)) and returns matrix-vector product
with hessian.
References
----------
Barak A. Pearlmutter (1993). Fast Exact Multiplication by the Hessian.
http://www.bcl.hamilton.ie/~barak/papers/nc-hessian.pdf
"""
n_features = X.shape[1]
n_classes = Y.shape[1]
fit_intercept = w.size == (n_classes * (n_features + 1))
# `loss` is unused. Refactoring to avoid computing it does not
# significantly speed up the computation and decreases readability
loss, grad, p = _multinomial_loss_grad(w, X, Y, alpha, sample_weight)
sample_weight = sample_weight[:, np.newaxis]
# Hessian-vector product derived by applying the R-operator on the gradient
# of the multinomial loss function.
def hessp(v):
v = v.reshape(n_classes, -1)
if fit_intercept:
inter_terms = v[:, -1]
v = v[:, :-1]
else:
inter_terms = 0
# r_yhat holds the result of applying the R-operator on the multinomial
# estimator.
r_yhat = safe_sparse_dot(X, v.T)
r_yhat += inter_terms
r_yhat += (-p * r_yhat).sum(axis=1)[:, np.newaxis]
r_yhat *= p
r_yhat *= sample_weight
hessProd = np.zeros((n_classes, n_features + bool(fit_intercept)))
hessProd[:, :n_features] = safe_sparse_dot(r_yhat.T, X)
hessProd[:, :n_features] += v * alpha
if fit_intercept:
hessProd[:, -1] = r_yhat.sum(axis=0)
return hessProd.ravel()
return grad, hessp
def _check_solver(solver, penalty, dual):
all_solvers = ["liblinear", "newton-cg", "lbfgs", "sag", "saga"]
if solver not in all_solvers:
raise ValueError(
"Logistic Regression supports only solvers in %s, got %s."
% (all_solvers, solver)
)
all_penalties = ["l1", "l2", "elasticnet", "none"]
if penalty not in all_penalties:
raise ValueError(
"Logistic Regression supports only penalties in %s, got %s."
% (all_penalties, penalty)
)
if solver not in ["liblinear", "saga"] and penalty not in ("l2", "none"):
raise ValueError(
"Solver %s supports only 'l2' or 'none' penalties, got %s penalty."
% (solver, penalty)
)
if solver != "liblinear" and dual:
raise ValueError(
"Solver %s supports only dual=False, got dual=%s" % (solver, dual)
)
if penalty == "elasticnet" and solver != "saga":
raise ValueError(
"Only 'saga' solver supports elasticnet penalty, got solver={}.".format(
solver
)
)
if solver == "liblinear" and penalty == "none":
raise ValueError("penalty='none' is not supported for the liblinear solver")
return solver
def _check_multi_class(multi_class, solver, n_classes):
if multi_class == "auto":
if solver == "liblinear":
multi_class = "ovr"
elif n_classes > 2:
multi_class = "multinomial"
else:
multi_class = "ovr"
if multi_class not in ("multinomial", "ovr"):
raise ValueError(
"multi_class should be 'multinomial', 'ovr' or 'auto'. Got %s."
% multi_class
)
if multi_class == "multinomial" and solver == "liblinear":
raise ValueError("Solver %s does not support a multinomial backend." % solver)
return multi_class
def _logistic_regression_path(
X,
y,
pos_class=None,
Cs=10,
fit_intercept=True,
max_iter=100,
tol=1e-4,
verbose=0,
solver="lbfgs",
coef=None,
class_weight=None,
dual=False,
penalty="l2",
intercept_scaling=1.0,
multi_class="auto",
random_state=None,
check_input=True,
max_squared_sum=None,
sample_weight=None,
l1_ratio=None,
):
"""Compute a Logistic Regression model for a list of regularization
parameters.
This is an implementation that uses the result of the previous model
to speed up computations along the set of solutions, making it faster
than sequentially calling LogisticRegression for the different parameters.
Note that there will be no speedup with liblinear solver, since it does
not handle warm-starting.
Read more in the :ref:`User Guide <logistic_regression>`.
Parameters
----------
X : {array-like, sparse matrix} of shape (n_samples, n_features)
Input data.
y : array-like of shape (n_samples,) or (n_samples, n_targets)
Input data, target values.
pos_class : int, default=None
The class with respect to which we perform a one-vs-all fit.
If None, then it is assumed that the given problem is binary.
Cs : int or array-like of shape (n_cs,), default=10
List of values for the regularization parameter or integer specifying
the number of regularization parameters that should be used. In this
case, the parameters will be chosen in a logarithmic scale between
1e-4 and 1e4.
fit_intercept : bool, default=True
Whether to fit an intercept for the model. In this case the shape of
the returned array is (n_cs, n_features + 1).
max_iter : int, default=100
Maximum number of iterations for the solver.
tol : float, default=1e-4
Stopping criterion. For the newton-cg and lbfgs solvers, the iteration
will stop when ``max{|g_i | i = 1, ..., n} <= tol``
where ``g_i`` is the i-th component of the gradient.
verbose : int, default=0
For the liblinear and lbfgs solvers set verbose to any positive
number for verbosity.
solver : {'lbfgs', 'newton-cg', 'liblinear', 'sag', 'saga'}, \
default='lbfgs'
Numerical solver to use.
coef : array-like of shape (n_features,), default=None
Initialization value for coefficients of logistic regression.
Useless for liblinear solver.
class_weight : dict or 'balanced', default=None
Weights associated with classes in the form ``{class_label: weight}``.
If not given, all classes are supposed to have weight one.
The "balanced" mode uses the values of y to automatically adjust
weights inversely proportional to class frequencies in the input data
as ``n_samples / (n_classes * np.bincount(y))``.
Note that these weights will be multiplied with sample_weight (passed
through the fit method) if sample_weight is specified.
dual : bool, default=False
Dual or primal formulation. Dual formulation is only implemented for
l2 penalty with liblinear solver. Prefer dual=False when
n_samples > n_features.
penalty : {'l1', 'l2', 'elasticnet'}, default='l2'
Used to specify the norm used in the penalization. The 'newton-cg',
'sag' and 'lbfgs' solvers support only l2 penalties. 'elasticnet' is
only supported by the 'saga' solver.
intercept_scaling : float, default=1.
Useful only when the solver 'liblinear' is used
and self.fit_intercept is set to True. In this case, x becomes
[x, self.intercept_scaling],
i.e. a "synthetic" feature with constant value equal to
intercept_scaling is appended to the instance vector.
The intercept becomes ``intercept_scaling * synthetic_feature_weight``.
Note! the synthetic feature weight is subject to l1/l2 regularization
as all other features.
To lessen the effect of regularization on synthetic feature weight
(and therefore on the intercept) intercept_scaling has to be increased.
multi_class : {'ovr', 'multinomial', 'auto'}, default='auto'
If the option chosen is 'ovr', then a binary problem is fit for each
label. For 'multinomial' the loss minimised is the multinomial loss fit
across the entire probability distribution, *even when the data is
binary*. 'multinomial' is unavailable when solver='liblinear'.
'auto' selects 'ovr' if the data is binary, or if solver='liblinear',
and otherwise selects 'multinomial'.
.. versionadded:: 0.18
Stochastic Average Gradient descent solver for 'multinomial' case.
.. versionchanged:: 0.22
Default changed from 'ovr' to 'auto' in 0.22.
random_state : int, RandomState instance, default=None
Used when ``solver`` == 'sag', 'saga' or 'liblinear' to shuffle the
data. See :term:`Glossary <random_state>` for details.
check_input : bool, default=True
If False, the input arrays X and y will not be checked.
max_squared_sum : float, default=None
Maximum squared sum of X over samples. Used only in SAG solver.
If None, it will be computed, going through all the samples.
The value should be precomputed to speed up cross validation.
sample_weight : array-like of shape(n_samples,), default=None
Array of weights that are assigned to individual samples.
If not provided, then each sample is given unit weight.
l1_ratio : float, default=None
The Elastic-Net mixing parameter, with ``0 <= l1_ratio <= 1``. Only
used if ``penalty='elasticnet'``. Setting ``l1_ratio=0`` is equivalent
to using ``penalty='l2'``, while setting ``l1_ratio=1`` is equivalent
to using ``penalty='l1'``. For ``0 < l1_ratio <1``, the penalty is a
combination of L1 and L2.
Returns
-------
coefs : ndarray of shape (n_cs, n_features) or (n_cs, n_features + 1)
List of coefficients for the Logistic Regression model. If
fit_intercept is set to True then the second dimension will be
n_features + 1, where the last item represents the intercept. For
``multiclass='multinomial'``, the shape is (n_classes, n_cs,
n_features) or (n_classes, n_cs, n_features + 1).
Cs : ndarray
Grid of Cs used for cross-validation.
n_iter : array of shape (n_cs,)
Actual number of iteration for each Cs.
Notes
-----
You might get slightly different results with the solver liblinear than
with the others since this uses LIBLINEAR which penalizes the intercept.
.. versionchanged:: 0.19
The "copy" parameter was removed.
"""
if isinstance(Cs, numbers.Integral):
Cs = np.logspace(-4, 4, Cs)
solver = _check_solver(solver, penalty, dual)
# Preprocessing.
if check_input:
X = check_array(
X,
accept_sparse="csr",
dtype=np.float64,
accept_large_sparse=solver not in ["liblinear", "sag", "saga"],
)
y = check_array(y, ensure_2d=False, dtype=None)
check_consistent_length(X, y)
_, n_features = X.shape
classes = np.unique(y)
random_state = check_random_state(random_state)
multi_class = _check_multi_class(multi_class, solver, len(classes))
if pos_class is None and multi_class != "multinomial":
if classes.size > 2:
raise ValueError("To fit OvR, use the pos_class argument")
# np.unique(y) gives labels in sorted order.
pos_class = classes[1]
# If sample weights exist, convert them to array (support for lists)
# and check length
# Otherwise set them to 1 for all examples
sample_weight = _check_sample_weight(sample_weight, X, dtype=X.dtype, copy=True)
# If class_weights is a dict (provided by the user), the weights
# are assigned to the original labels. If it is "balanced", then
# the class_weights are assigned after masking the labels with a OvR.
le = LabelEncoder()
if isinstance(class_weight, dict) or multi_class == "multinomial":
class_weight_ = compute_class_weight(class_weight, classes=classes, y=y)
sample_weight *= class_weight_[le.fit_transform(y)]
# For doing a ovr, we need to mask the labels first. for the
# multinomial case this is not necessary.
if multi_class == "ovr":
w0 = np.zeros(n_features + int(fit_intercept), dtype=X.dtype)
mask_classes = np.array([-1, 1])
mask = y == pos_class
y_bin = np.ones(y.shape, dtype=X.dtype)
y_bin[~mask] = -1.0
# for compute_class_weight
if class_weight == "balanced":
class_weight_ = compute_class_weight(
class_weight, classes=mask_classes, y=y_bin
)
sample_weight *= class_weight_[le.fit_transform(y_bin)]
else:
if solver not in ["sag", "saga"]:
lbin = LabelBinarizer()
Y_multi = lbin.fit_transform(y)
if Y_multi.shape[1] == 1:
Y_multi = np.hstack([1 - Y_multi, Y_multi])
else:
# SAG multinomial solver needs LabelEncoder, not LabelBinarizer
le = LabelEncoder()
Y_multi = le.fit_transform(y).astype(X.dtype, copy=False)
w0 = np.zeros(
(classes.size, n_features + int(fit_intercept)), order="F", dtype=X.dtype
)
if coef is not None:
# it must work both giving the bias term and not
if multi_class == "ovr":
if coef.size not in (n_features, w0.size):
raise ValueError(
"Initialization coef is of shape %d, expected shape %d or %d"
% (coef.size, n_features, w0.size)
)
w0[: coef.size] = coef
else:
# For binary problems coef.shape[0] should be 1, otherwise it
# should be classes.size.
n_classes = classes.size
if n_classes == 2:
n_classes = 1
if coef.shape[0] != n_classes or coef.shape[1] not in (
n_features,
n_features + 1,
):
raise ValueError(
"Initialization coef is of shape (%d, %d), expected "
"shape (%d, %d) or (%d, %d)"
% (
coef.shape[0],
coef.shape[1],
classes.size,
n_features,
classes.size,
n_features + 1,
)
)
if n_classes == 1:
w0[0, : coef.shape[1]] = -coef
w0[1, : coef.shape[1]] = coef
else:
w0[:, : coef.shape[1]] = coef
if multi_class == "multinomial":
# scipy.optimize.minimize and newton-cg accepts only
# ravelled parameters.
if solver in ["lbfgs", "newton-cg"]:
w0 = w0.ravel()
target = Y_multi
if solver == "lbfgs":
def func(x, *args):
return _multinomial_loss_grad(x, *args)[0:2]
elif solver == "newton-cg":
def func(x, *args):
return _multinomial_loss(x, *args)[0]
def grad(x, *args):
return _multinomial_loss_grad(x, *args)[1]
hess = _multinomial_grad_hess
warm_start_sag = {"coef": w0.T}
else:
target = y_bin
if solver == "lbfgs":
func = _logistic_loss_and_grad
elif solver == "newton-cg":
func = _logistic_loss
def grad(x, *args):
return _logistic_loss_and_grad(x, *args)[1]
hess = _logistic_grad_hess
warm_start_sag = {"coef": np.expand_dims(w0, axis=1)}
coefs = list()
n_iter = np.zeros(len(Cs), dtype=np.int32)
for i, C in enumerate(Cs):
if solver == "lbfgs":
iprint = [-1, 50, 1, 100, 101][
np.searchsorted(np.array([0, 1, 2, 3]), verbose)
]
opt_res = optimize.minimize(
func,
w0,
method="L-BFGS-B",
jac=True,
args=(X, target, 1.0 / C, sample_weight),
options={"iprint": iprint, "gtol": tol, "maxiter": max_iter},
)
n_iter_i = _check_optimize_result(
solver,
opt_res,
max_iter,
extra_warning_msg=_LOGISTIC_SOLVER_CONVERGENCE_MSG,
)
w0, loss = opt_res.x, opt_res.fun
elif solver == "newton-cg":
args = (X, target, 1.0 / C, sample_weight)
w0, n_iter_i = _newton_cg(
hess, func, grad, w0, args=args, maxiter=max_iter, tol=tol
)
elif solver == "liblinear":
coef_, intercept_, n_iter_i, = _fit_liblinear(
X,
target,
C,
fit_intercept,
intercept_scaling,
None,
penalty,
dual,
verbose,
max_iter,
tol,
random_state,
sample_weight=sample_weight,
)
if fit_intercept:
w0 = np.concatenate([coef_.ravel(), intercept_])
else:
w0 = coef_.ravel()
elif solver in ["sag", "saga"]:
if multi_class == "multinomial":
target = target.astype(X.dtype, copy=False)
loss = "multinomial"
else:
loss = "log"
# alpha is for L2-norm, beta is for L1-norm
if penalty == "l1":
alpha = 0.0
beta = 1.0 / C
elif penalty == "l2":
alpha = 1.0 / C
beta = 0.0
else: # Elastic-Net penalty
alpha = (1.0 / C) * (1 - l1_ratio)
beta = (1.0 / C) * l1_ratio
w0, n_iter_i, warm_start_sag = sag_solver(
X,
target,
sample_weight,
loss,
alpha,
beta,
max_iter,
tol,
verbose,
random_state,
False,
max_squared_sum,
warm_start_sag,
is_saga=(solver == "saga"),
)
else:
raise ValueError(
"solver must be one of {'liblinear', 'lbfgs', "
"'newton-cg', 'sag'}, got '%s' instead" % solver
)
if multi_class == "multinomial":
n_classes = max(2, classes.size)
multi_w0 = np.reshape(w0, (n_classes, -1))
if n_classes == 2:
multi_w0 = multi_w0[1][np.newaxis, :]
coefs.append(multi_w0.copy())
else:
coefs.append(w0.copy())
n_iter[i] = n_iter_i
return np.array(coefs), np.array(Cs), n_iter
# helper function for LogisticCV
def _log_reg_scoring_path(
X,
y,
train,
test,
pos_class=None,
Cs=10,
scoring=None,
fit_intercept=False,
max_iter=100,
tol=1e-4,
class_weight=None,
verbose=0,
solver="lbfgs",
penalty="l2",
dual=False,
intercept_scaling=1.0,
multi_class="auto",
random_state=None,
max_squared_sum=None,
sample_weight=None,
l1_ratio=None,
):
"""Computes scores across logistic_regression_path
Parameters
----------
X : {array-like, sparse matrix} of shape (n_samples, n_features)
Training data.
y : array-like of shape (n_samples,) or (n_samples, n_targets)
Target labels.
train : list of indices
The indices of the train set.
test : list of indices
The indices of the test set.
pos_class : int, default=None
The class with respect to which we perform a one-vs-all fit.
If None, then it is assumed that the given problem is binary.
Cs : int or list of floats, default=10
Each of the values in Cs describes the inverse of
regularization strength. If Cs is as an int, then a grid of Cs
values are chosen in a logarithmic scale between 1e-4 and 1e4.
If not provided, then a fixed set of values for Cs are used.
scoring : callable, default=None
A string (see model evaluation documentation) or
a scorer callable object / function with signature
``scorer(estimator, X, y)``. For a list of scoring functions
that can be used, look at :mod:`sklearn.metrics`. The
default scoring option used is accuracy_score.
fit_intercept : bool, default=False
If False, then the bias term is set to zero. Else the last
term of each coef_ gives us the intercept.
max_iter : int, default=100
Maximum number of iterations for the solver.
tol : float, default=1e-4
Tolerance for stopping criteria.
class_weight : dict or 'balanced', default=None
Weights associated with classes in the form ``{class_label: weight}``.
If not given, all classes are supposed to have weight one.
The "balanced" mode uses the values of y to automatically adjust
weights inversely proportional to class frequencies in the input data
as ``n_samples / (n_classes * np.bincount(y))``
Note that these weights will be multiplied with sample_weight (passed
through the fit method) if sample_weight is specified.
verbose : int, default=0
For the liblinear and lbfgs solvers set verbose to any positive
number for verbosity.
solver : {'lbfgs', 'newton-cg', 'liblinear', 'sag', 'saga'}, \
default='lbfgs'
Decides which solver to use.
penalty : {'l1', 'l2', 'elasticnet'}, default='l2'
Used to specify the norm used in the penalization. The 'newton-cg',
'sag' and 'lbfgs' solvers support only l2 penalties. 'elasticnet' is
only supported by the 'saga' solver.
dual : bool, default=False
Dual or primal formulation. Dual formulation is only implemented for
l2 penalty with liblinear solver. Prefer dual=False when
n_samples > n_features.
intercept_scaling : float, default=1.
Useful only when the solver 'liblinear' is used
and self.fit_intercept is set to True. In this case, x becomes
[x, self.intercept_scaling],
i.e. a "synthetic" feature with constant value equals to
intercept_scaling is appended to the instance vector.
The intercept becomes intercept_scaling * synthetic feature weight
Note! the synthetic feature weight is subject to l1/l2 regularization
as all other features.
To lessen the effect of regularization on synthetic feature weight
(and therefore on the intercept) intercept_scaling has to be increased.
multi_class : {'auto', 'ovr', 'multinomial'}, default='auto'
If the option chosen is 'ovr', then a binary problem is fit for each
label. For 'multinomial' the loss minimised is the multinomial loss fit
across the entire probability distribution, *even when the data is
binary*. 'multinomial' is unavailable when solver='liblinear'.
random_state : int, RandomState instance, default=None
Used when ``solver`` == 'sag', 'saga' or 'liblinear' to shuffle the
data. See :term:`Glossary <random_state>` for details.
max_squared_sum : float, default=None
Maximum squared sum of X over samples. Used only in SAG solver.
If None, it will be computed, going through all the samples.
The value should be precomputed to speed up cross validation.
sample_weight : array-like of shape(n_samples,), default=None
Array of weights that are assigned to individual samples.
If not provided, then each sample is given unit weight.
l1_ratio : float, default=None
The Elastic-Net mixing parameter, with ``0 <= l1_ratio <= 1``. Only
used if ``penalty='elasticnet'``. Setting ``l1_ratio=0`` is equivalent
to using ``penalty='l2'``, while setting ``l1_ratio=1`` is equivalent
to using ``penalty='l1'``. For ``0 < l1_ratio <1``, the penalty is a
combination of L1 and L2.
Returns
-------
coefs : ndarray of shape (n_cs, n_features) or (n_cs, n_features + 1)
List of coefficients for the Logistic Regression model. If
fit_intercept is set to True then the second dimension will be
n_features + 1, where the last item represents the intercept.
Cs : ndarray
Grid of Cs used for cross-validation.
scores : ndarray of shape (n_cs,)
Scores obtained for each Cs.
n_iter : ndarray of shape(n_cs,)
Actual number of iteration for each Cs.
"""
X_train = X[train]
X_test = X[test]
y_train = y[train]
y_test = y[test]
if sample_weight is not None:
sample_weight = _check_sample_weight(sample_weight, X)
sample_weight = sample_weight[train]
coefs, Cs, n_iter = _logistic_regression_path(
X_train,
y_train,
Cs=Cs,
l1_ratio=l1_ratio,
fit_intercept=fit_intercept,
solver=solver,
max_iter=max_iter,
class_weight=class_weight,
pos_class=pos_class,
multi_class=multi_class,
tol=tol,
verbose=verbose,
dual=dual,
penalty=penalty,
intercept_scaling=intercept_scaling,
random_state=random_state,
check_input=False,
max_squared_sum=max_squared_sum,
sample_weight=sample_weight,
)
log_reg = LogisticRegression(solver=solver, multi_class=multi_class)
# The score method of Logistic Regression has a classes_ attribute.
if multi_class == "ovr":
log_reg.classes_ = np.array([-1, 1])
elif multi_class == "multinomial":
log_reg.classes_ = np.unique(y_train)
else:
raise ValueError(
"multi_class should be either multinomial or ovr, got %d" % multi_class
)
if pos_class is not None:
mask = y_test == pos_class
y_test = np.ones(y_test.shape, dtype=np.float64)
y_test[~mask] = -1.0
scores = list()
scoring = get_scorer(scoring)
for w in coefs:
if multi_class == "ovr":
w = w[np.newaxis, :]
if fit_intercept:
log_reg.coef_ = w[:, :-1]
log_reg.intercept_ = w[:, -1]
else:
log_reg.coef_ = w
log_reg.intercept_ = 0.0
if scoring is None:
scores.append(log_reg.score(X_test, y_test))
else:
scores.append(scoring(log_reg, X_test, y_test))
return coefs, Cs, np.array(scores), n_iter
class LogisticRegression(LinearClassifierMixin, SparseCoefMixin, BaseEstimator):
"""
Logistic Regression (aka logit, MaxEnt) classifier.
In the multiclass case, the training algorithm uses the one-vs-rest (OvR)
scheme if the 'multi_class' option is set to 'ovr', and uses the
cross-entropy loss if the 'multi_class' option is set to 'multinomial'.
(Currently the 'multinomial' option is supported only by the 'lbfgs',
'sag', 'saga' and 'newton-cg' solvers.)
This class implements regularized logistic regression using the
'liblinear' library, 'newton-cg', 'sag', 'saga' and 'lbfgs' solvers. **Note
that regularization is applied by default**. It can handle both dense
and sparse input. Use C-ordered arrays or CSR matrices containing 64-bit
floats for optimal performance; any other input format will be converted
(and copied).
The 'newton-cg', 'sag', and 'lbfgs' solvers support only L2 regularization
with primal formulation, or no regularization. The 'liblinear' solver
supports both L1 and L2 regularization, with a dual formulation only for
the L2 penalty. The Elastic-Net regularization is only supported by the
'saga' solver.
Read more in the :ref:`User Guide <logistic_regression>`.
Parameters
----------
penalty : {'l1', 'l2', 'elasticnet', 'none'}, default='l2'
Specify the norm of the penalty:
- `'none'`: no penalty is added;
- `'l2'`: add a L2 penalty term and it is the default choice;
- `'l1'`: add a L1 penalty term;
- `'elasticnet'`: both L1 and L2 penalty terms are added.
.. warning::
Some penalties may not work with some solvers. See the parameter
`solver` below, to know the compatibility between the penalty and
solver.
.. versionadded:: 0.19
l1 penalty with SAGA solver (allowing 'multinomial' + L1)
dual : bool, default=False
Dual or primal formulation. Dual formulation is only implemented for
l2 penalty with liblinear solver. Prefer dual=False when
n_samples > n_features.
tol : float, default=1e-4
Tolerance for stopping criteria.
C : float, default=1.0
Inverse of regularization strength; must be a positive float.
Like in support vector machines, smaller values specify stronger
regularization.
fit_intercept : bool, default=True
Specifies if a constant (a.k.a. bias or intercept) should be
added to the decision function.
intercept_scaling : float, default=1
Useful only when the solver 'liblinear' is used
and self.fit_intercept is set to True. In this case, x becomes
[x, self.intercept_scaling],
i.e. a "synthetic" feature with constant value equal to
intercept_scaling is appended to the instance vector.
The intercept becomes ``intercept_scaling * synthetic_feature_weight``.
Note! the synthetic feature weight is subject to l1/l2 regularization
as all other features.
To lessen the effect of regularization on synthetic feature weight
(and therefore on the intercept) intercept_scaling has to be increased.
class_weight : dict or 'balanced', default=None
Weights associated with classes in the form ``{class_label: weight}``.
If not given, all classes are supposed to have weight one.
The "balanced" mode uses the values of y to automatically adjust
weights inversely proportional to class frequencies in the input data
as ``n_samples / (n_classes * np.bincount(y))``.
Note that these weights will be multiplied with sample_weight (passed
through the fit method) if sample_weight is specified.
.. versionadded:: 0.17
*class_weight='balanced'*
random_state : int, RandomState instance, default=None
Used when ``solver`` == 'sag', 'saga' or 'liblinear' to shuffle the
data. See :term:`Glossary <random_state>` for details.
solver : {'newton-cg', 'lbfgs', 'liblinear', 'sag', 'saga'}, \
default='lbfgs'
Algorithm to use in the optimization problem. Default is 'lbfgs'.
To choose a solver, you might want to consider the following aspects:
- For small datasets, 'liblinear' is a good choice, whereas 'sag'
and 'saga' are faster for large ones;
- For multiclass problems, only 'newton-cg', 'sag', 'saga' and
'lbfgs' handle multinomial loss;
- 'liblinear' is limited to one-versus-rest schemes.
.. warning::
The choice of the algorithm depends on the penalty chosen:
Supported penalties by solver:
- 'newton-cg' - ['l2', 'none']
- 'lbfgs' - ['l2', 'none']
- 'liblinear' - ['l1', 'l2']
- 'sag' - ['l2', 'none']
- 'saga' - ['elasticnet', 'l1', 'l2', 'none']
.. note::
'sag' and 'saga' fast convergence is only guaranteed on
features with approximately the same scale. You can
preprocess the data with a scaler from :mod:`sklearn.preprocessing`.
.. seealso::
Refer to the User Guide for more information regarding
:class:`LogisticRegression` and more specifically the
`Table <https://scikit-learn.org/dev/modules/linear_model.html#logistic-regression>`_
summarazing solver/penalty supports.
<!--
# noqa: E501
-->
.. versionadded:: 0.17
Stochastic Average Gradient descent solver.
.. versionadded:: 0.19
SAGA solver.
.. versionchanged:: 0.22
The default solver changed from 'liblinear' to 'lbfgs' in 0.22.
max_iter : int, default=100
Maximum number of iterations taken for the solvers to converge.
multi_class : {'auto', 'ovr', 'multinomial'}, default='auto'
If the option chosen is 'ovr', then a binary problem is fit for each
label. For 'multinomial' the loss minimised is the multinomial loss fit
across the entire probability distribution, *even when the data is
binary*. 'multinomial' is unavailable when solver='liblinear'.
'auto' selects 'ovr' if the data is binary, or if solver='liblinear',
and otherwise selects 'multinomial'.
.. versionadded:: 0.18
Stochastic Average Gradient descent solver for 'multinomial' case.
.. versionchanged:: 0.22
Default changed from 'ovr' to 'auto' in 0.22.
verbose : int, default=0
For the liblinear and lbfgs solvers set verbose to any positive
number for verbosity.
warm_start : bool, default=False
When set to True, reuse the solution of the previous call to fit as
initialization, otherwise, just erase the previous solution.
Useless for liblinear solver. See :term:`the Glossary <warm_start>`.
.. versionadded:: 0.17
*warm_start* to support *lbfgs*, *newton-cg*, *sag*, *saga* solvers.
n_jobs : int, default=None
Number of CPU cores used when parallelizing over classes if
multi_class='ovr'". This parameter is ignored when the ``solver`` is
set to 'liblinear' regardless of whether 'multi_class' is specified or
not. ``None`` means 1 unless in a :obj:`joblib.parallel_backend`
context. ``-1`` means using all processors.
See :term:`Glossary <n_jobs>` for more details.
l1_ratio : float, default=None
The Elastic-Net mixing parameter, with ``0 <= l1_ratio <= 1``. Only
used if ``penalty='elasticnet'``. Setting ``l1_ratio=0`` is equivalent
to using ``penalty='l2'``, while setting ``l1_ratio=1`` is equivalent
to using ``penalty='l1'``. For ``0 < l1_ratio <1``, the penalty is a
combination of L1 and L2.
Attributes
----------
classes_ : ndarray of shape (n_classes, )
A list of class labels known to the classifier.
coef_ : ndarray of shape (1, n_features) or (n_classes, n_features)
Coefficient of the features in the decision function.
`coef_` is of shape (1, n_features) when the given problem is binary.
In particular, when `multi_class='multinomial'`, `coef_` corresponds
to outcome 1 (True) and `-coef_` corresponds to outcome 0 (False).
intercept_ : ndarray of shape (1,) or (n_classes,)
Intercept (a.k.a. bias) added to the decision function.
If `fit_intercept` is set to False, the intercept is set to zero.
`intercept_` is of shape (1,) when the given problem is binary.
In particular, when `multi_class='multinomial'`, `intercept_`
corresponds to outcome 1 (True) and `-intercept_` corresponds to
outcome 0 (False).
n_features_in_ : int
Number of features seen during :term:`fit`.
.. versionadded:: 0.24
feature_names_in_ : ndarray of shape (`n_features_in_`,)
Names of features seen during :term:`fit`. Defined only when `X`
has feature names that are all strings.
.. versionadded:: 1.0
n_iter_ : ndarray of shape (n_classes,) or (1, )
Actual number of iterations for all classes. If binary or multinomial,
it returns only 1 element. For liblinear solver, only the maximum
number of iteration across all classes is given.
.. versionchanged:: 0.20
In SciPy <= 1.0.0 the number of lbfgs iterations may exceed
``max_iter``. ``n_iter_`` will now report at most ``max_iter``.
See Also
--------
SGDClassifier : Incrementally trained logistic regression (when given
the parameter ``loss="log"``).
LogisticRegressionCV : Logistic regression with built-in cross validation.
Notes
-----
The underlying C implementation uses a random number generator to
select features when fitting the model. It is thus not uncommon,
to have slightly different results for the same input data. If
that happens, try with a smaller tol parameter.
Predict output may not match that of standalone liblinear in certain
cases. See :ref:`differences from liblinear <liblinear_differences>`
in the narrative documentation.
References
----------
L-BFGS-B -- Software for Large-scale Bound-constrained Optimization
Ciyou Zhu, Richard Byrd, Jorge Nocedal and Jose Luis Morales.
http://users.iems.northwestern.edu/~nocedal/lbfgsb.html
LIBLINEAR -- A Library for Large Linear Classification
https://www.csie.ntu.edu.tw/~cjlin/liblinear/
SAG -- Mark Schmidt, Nicolas Le Roux, and Francis Bach
Minimizing Finite Sums with the Stochastic Average Gradient
https://hal.inria.fr/hal-00860051/document
SAGA -- Defazio, A., Bach F. & Lacoste-Julien S. (2014).
:arxiv:`"SAGA: A Fast Incremental Gradient Method With Support
for Non-Strongly Convex Composite Objectives" <1407.0202>`
Hsiang-Fu Yu, Fang-Lan Huang, Chih-Jen Lin (2011). Dual coordinate descent
methods for logistic regression and maximum entropy models.
Machine Learning 85(1-2):41-75.
https://www.csie.ntu.edu.tw/~cjlin/papers/maxent_dual.pdf
Examples
--------
>>> from sklearn.datasets import load_iris
>>> from sklearn.linear_model import LogisticRegression
>>> X, y = load_iris(return_X_y=True)
>>> clf = LogisticRegression(random_state=0).fit(X, y)
>>> clf.predict(X[:2, :])
array([0, 0])
>>> clf.predict_proba(X[:2, :])
array([[9.8...e-01, 1.8...e-02, 1.4...e-08],
[9.7...e-01, 2.8...e-02, ...e-08]])
>>> clf.score(X, y)
0.97...
"""
def __init__(
self,
penalty="l2",
*,
dual=False,
tol=1e-4,
C=1.0,
fit_intercept=True,
intercept_scaling=1,
class_weight=None,
random_state=None,
solver="lbfgs",
max_iter=100,
multi_class="auto",
verbose=0,
warm_start=False,
n_jobs=None,
l1_ratio=None,
):
self.penalty = penalty
self.dual = dual
self.tol = tol
self.C = C
self.fit_intercept = fit_intercept
self.intercept_scaling = intercept_scaling
self.class_weight = class_weight
self.random_state = random_state
self.solver = solver
self.max_iter = max_iter
self.multi_class = multi_class
self.verbose = verbose
self.warm_start = warm_start
self.n_jobs = n_jobs
self.l1_ratio = l1_ratio
def fit(self, X, y, sample_weight=None):
"""
Fit the model according to the given training data.
Parameters
----------
X : {array-like, sparse matrix} of shape (n_samples, n_features)
Training vector, where `n_samples` is the number of samples and
`n_features` is the number of features.
y : array-like of shape (n_samples,)
Target vector relative to X.
sample_weight : array-like of shape (n_samples,) default=None
Array of weights that are assigned to individual samples.
If not provided, then each sample is given unit weight.
.. versionadded:: 0.17
*sample_weight* support to LogisticRegression.
Returns
-------
self
Fitted estimator.
Notes
-----
The SAGA solver supports both float64 and float32 bit arrays.
"""
solver = _check_solver(self.solver, self.penalty, self.dual)
if not isinstance(self.C, numbers.Number) or self.C < 0:
raise ValueError("Penalty term must be positive; got (C=%r)" % self.C)
if self.penalty == "elasticnet":
if (
not isinstance(self.l1_ratio, numbers.Number)
or self.l1_ratio < 0
or self.l1_ratio > 1
):
raise ValueError(
"l1_ratio must be between 0 and 1; got (l1_ratio=%r)"
% self.l1_ratio
)
elif self.l1_ratio is not None:
warnings.warn(
"l1_ratio parameter is only used when penalty is "
"'elasticnet'. Got "
"(penalty={})".format(self.penalty)
)
if self.penalty == "none":
if self.C != 1.0: # default values
warnings.warn(
"Setting penalty='none' will ignore the C and l1_ratio parameters"
)
# Note that check for l1_ratio is done right above
C_ = np.inf
penalty = "l2"
else:
C_ = self.C
penalty = self.penalty
if not isinstance(self.max_iter, numbers.Number) or self.max_iter < 0:
raise ValueError(
"Maximum number of iteration must be positive; got (max_iter=%r)"
% self.max_iter
)
if not isinstance(self.tol, numbers.Number) or self.tol < 0:
raise ValueError(
"Tolerance for stopping criteria must be positive; got (tol=%r)"
% self.tol
)
if solver == "lbfgs":
_dtype = np.float64
else:
_dtype = [np.float64, np.float32]
X, y = self._validate_data(
X,
y,
accept_sparse="csr",
dtype=_dtype,
order="C",
accept_large_sparse=solver not in ["liblinear", "sag", "saga"],
)
check_classification_targets(y)
self.classes_ = np.unique(y)
multi_class = _check_multi_class(self.multi_class, solver, len(self.classes_))
if solver == "liblinear":
if effective_n_jobs(self.n_jobs) != 1:
warnings.warn(
"'n_jobs' > 1 does not have any effect when"
" 'solver' is set to 'liblinear'. Got 'n_jobs'"
" = {}.".format(effective_n_jobs(self.n_jobs))
)
self.coef_, self.intercept_, n_iter_ = _fit_liblinear(
X,
y,
self.C,
self.fit_intercept,
self.intercept_scaling,
self.class_weight,
self.penalty,
self.dual,
self.verbose,
self.max_iter,
self.tol,
self.random_state,
sample_weight=sample_weight,
)
self.n_iter_ = np.array([n_iter_])
return self
if solver in ["sag", "saga"]:
max_squared_sum = row_norms(X, squared=True).max()
else:
max_squared_sum = None
n_classes = len(self.classes_)
classes_ = self.classes_
if n_classes < 2:
raise ValueError(
"This solver needs samples of at least 2 classes"
" in the data, but the data contains only one"
" class: %r"
% classes_[0]
)
if len(self.classes_) == 2:
n_classes = 1
classes_ = classes_[1:]
if self.warm_start:
warm_start_coef = getattr(self, "coef_", None)
else:
warm_start_coef = None
if warm_start_coef is not None and self.fit_intercept:
warm_start_coef = np.append(
warm_start_coef, self.intercept_[:, np.newaxis], axis=1
)
# Hack so that we iterate only once for the multinomial case.
if multi_class == "multinomial":
classes_ = [None]
warm_start_coef = [warm_start_coef]
if warm_start_coef is None:
warm_start_coef = [None] * n_classes
path_func = delayed(_logistic_regression_path)
# The SAG solver releases the GIL so it's more efficient to use
# threads for this solver.
if solver in ["sag", "saga"]:
prefer = "threads"
else:
prefer = "processes"
fold_coefs_ = Parallel(
n_jobs=self.n_jobs,
verbose=self.verbose,
**_joblib_parallel_args(prefer=prefer),
)(
path_func(
X,
y,
pos_class=class_,
Cs=[C_],
l1_ratio=self.l1_ratio,
fit_intercept=self.fit_intercept,
tol=self.tol,
verbose=self.verbose,
solver=solver,
multi_class=multi_class,
max_iter=self.max_iter,
class_weight=self.class_weight,
check_input=False,
random_state=self.random_state,
coef=warm_start_coef_,
penalty=penalty,
max_squared_sum=max_squared_sum,
sample_weight=sample_weight,
)
for class_, warm_start_coef_ in zip(classes_, warm_start_coef)
)
fold_coefs_, _, n_iter_ = zip(*fold_coefs_)
self.n_iter_ = np.asarray(n_iter_, dtype=np.int32)[:, 0]
n_features = X.shape[1]
if multi_class == "multinomial":
self.coef_ = fold_coefs_[0][0]
else:
self.coef_ = np.asarray(fold_coefs_)
self.coef_ = self.coef_.reshape(
n_classes, n_features + int(self.fit_intercept)
)
if self.fit_intercept:
self.intercept_ = self.coef_[:, -1]
self.coef_ = self.coef_[:, :-1]
else:
self.intercept_ = np.zeros(n_classes)
return self
def predict_proba(self, X):
"""
Probability estimates.
The returned estimates for all classes are ordered by the
label of classes.
For a multi_class problem, if multi_class is set to be "multinomial"
the softmax function is used to find the predicted probability of
each class.
Else use a one-vs-rest approach, i.e calculate the probability
of each class assuming it to be positive using the logistic function.
and normalize these values across all the classes.
Parameters
----------
X : array-like of shape (n_samples, n_features)
Vector to be scored, where `n_samples` is the number of samples and
`n_features` is the number of features.
Returns
-------
T : array-like of shape (n_samples, n_classes)
Returns the probability of the sample for each class in the model,
where classes are ordered as they are in ``self.classes_``.
"""
check_is_fitted(self)
ovr = self.multi_class in ["ovr", "warn"] or (
self.multi_class == "auto"
and (self.classes_.size <= 2 or self.solver == "liblinear")
)
if ovr:
return super()._predict_proba_lr(X)
else:
decision = self.decision_function(X)
if decision.ndim == 1:
# Workaround for multi_class="multinomial" and binary outcomes
# which requires softmax prediction with only a 1D decision.
decision_2d = np.c_[-decision, decision]
else:
decision_2d = decision
return softmax(decision_2d, copy=False)
def predict_log_proba(self, X):
"""
Predict logarithm of probability estimates.
The returned estimates for all classes are ordered by the
label of classes.
Parameters
----------
X : array-like of shape (n_samples, n_features)
Vector to be scored, where `n_samples` is the number of samples and
`n_features` is the number of features.
Returns
-------
T : array-like of shape (n_samples, n_classes)
Returns the log-probability of the sample for each class in the
model, where classes are ordered as they are in ``self.classes_``.
"""
return np.log(self.predict_proba(X))
class LogisticRegressionCV(LogisticRegression, LinearClassifierMixin, BaseEstimator):
"""Logistic Regression CV (aka logit, MaxEnt) classifier.
See glossary entry for :term:`cross-validation estimator`.
This class implements logistic regression using liblinear, newton-cg, sag
of lbfgs optimizer. The newton-cg, sag and lbfgs solvers support only L2
regularization with primal formulation. The liblinear solver supports both
L1 and L2 regularization, with a dual formulation only for the L2 penalty.
Elastic-Net penalty is only supported by the saga solver.
For the grid of `Cs` values and `l1_ratios` values, the best hyperparameter
is selected by the cross-validator
:class:`~sklearn.model_selection.StratifiedKFold`, but it can be changed
using the :term:`cv` parameter. The 'newton-cg', 'sag', 'saga' and 'lbfgs'
solvers can warm-start the coefficients (see :term:`Glossary<warm_start>`).
Read more in the :ref:`User Guide <logistic_regression>`.
Parameters
----------
Cs : int or list of floats, default=10
Each of the values in Cs describes the inverse of regularization
strength. If Cs is as an int, then a grid of Cs values are chosen
in a logarithmic scale between 1e-4 and 1e4.
Like in support vector machines, smaller values specify stronger
regularization.
fit_intercept : bool, default=True
Specifies if a constant (a.k.a. bias or intercept) should be
added to the decision function.
cv : int or cross-validation generator, default=None
The default cross-validation generator used is Stratified K-Folds.
If an integer is provided, then it is the number of folds used.
See the module :mod:`sklearn.model_selection` module for the
list of possible cross-validation objects.
.. versionchanged:: 0.22
``cv`` default value if None changed from 3-fold to 5-fold.
dual : bool, default=False
Dual or primal formulation. Dual formulation is only implemented for
l2 penalty with liblinear solver. Prefer dual=False when
n_samples > n_features.
penalty : {'l1', 'l2', 'elasticnet'}, default='l2'
Specify the norm of the penalty:
- `'l2'`: add a L2 penalty term (used by default);
- `'l1'`: add a L1 penalty term;
- `'elasticnet'`: both L1 and L2 penalty terms are added.
.. warning::
Some penalties may not work with some solvers. See the parameter
`solver` below, to know the compatibility between the penalty and
solver.
scoring : str or callable, default=None
A string (see model evaluation documentation) or
a scorer callable object / function with signature
``scorer(estimator, X, y)``. For a list of scoring functions
that can be used, look at :mod:`sklearn.metrics`. The
default scoring option used is 'accuracy'.
solver : {'newton-cg', 'lbfgs', 'liblinear', 'sag', 'saga'}, \
default='lbfgs'
Algorithm to use in the optimization problem. Default is 'lbfgs'.
To choose a solver, you might want to consider the following aspects:
- For small datasets, 'liblinear' is a good choice, whereas 'sag'
and 'saga' are faster for large ones;
- For multiclass problems, only 'newton-cg', 'sag', 'saga' and
'lbfgs' handle multinomial loss;
- 'liblinear' might be slower in :class:`LogisticRegressionCV`
because it does not handle warm-starting. 'liblinear' is
limited to one-versus-rest schemes.
.. warning::
The choice of the algorithm depends on the penalty chosen:
- 'newton-cg' - ['l2']
- 'lbfgs' - ['l2']
- 'liblinear' - ['l1', 'l2']
- 'sag' - ['l2']
- 'saga' - ['elasticnet', 'l1', 'l2']
.. note::
'sag' and 'saga' fast convergence is only guaranteed on features
with approximately the same scale. You can preprocess the data with
a scaler from :mod:`sklearn.preprocessing`.
.. versionadded:: 0.17
Stochastic Average Gradient descent solver.
.. versionadded:: 0.19
SAGA solver.
tol : float, default=1e-4
Tolerance for stopping criteria.
max_iter : int, default=100
Maximum number of iterations of the optimization algorithm.
class_weight : dict or 'balanced', default=None
Weights associated with classes in the form ``{class_label: weight}``.
If not given, all classes are supposed to have weight one.
The "balanced" mode uses the values of y to automatically adjust
weights inversely proportional to class frequencies in the input data
as ``n_samples / (n_classes * np.bincount(y))``.
Note that these weights will be multiplied with sample_weight (passed
through the fit method) if sample_weight is specified.
.. versionadded:: 0.17
class_weight == 'balanced'
n_jobs : int, default=None
Number of CPU cores used during the cross-validation loop.
``None`` means 1 unless in a :obj:`joblib.parallel_backend` context.
``-1`` means using all processors. See :term:`Glossary <n_jobs>`
for more details.
verbose : int, default=0
For the 'liblinear', 'sag' and 'lbfgs' solvers set verbose to any
positive number for verbosity.
refit : bool, default=True
If set to True, the scores are averaged across all folds, and the
coefs and the C that corresponds to the best score is taken, and a
final refit is done using these parameters.
Otherwise the coefs, intercepts and C that correspond to the
best scores across folds are averaged.
intercept_scaling : float, default=1
Useful only when the solver 'liblinear' is used
and self.fit_intercept is set to True. In this case, x becomes
[x, self.intercept_scaling],
i.e. a "synthetic" feature with constant value equal to
intercept_scaling is appended to the instance vector.
The intercept becomes ``intercept_scaling * synthetic_feature_weight``.
Note! the synthetic feature weight is subject to l1/l2 regularization
as all other features.
To lessen the effect of regularization on synthetic feature weight
(and therefore on the intercept) intercept_scaling has to be increased.
multi_class : {'auto, 'ovr', 'multinomial'}, default='auto'
If the option chosen is 'ovr', then a binary problem is fit for each
label. For 'multinomial' the loss minimised is the multinomial loss fit
across the entire probability distribution, *even when the data is
binary*. 'multinomial' is unavailable when solver='liblinear'.
'auto' selects 'ovr' if the data is binary, or if solver='liblinear',
and otherwise selects 'multinomial'.
.. versionadded:: 0.18
Stochastic Average Gradient descent solver for 'multinomial' case.
.. versionchanged:: 0.22
Default changed from 'ovr' to 'auto' in 0.22.
random_state : int, RandomState instance, default=None
Used when `solver='sag'`, 'saga' or 'liblinear' to shuffle the data.
Note that this only applies to the solver and not the cross-validation
generator. See :term:`Glossary <random_state>` for details.
l1_ratios : list of float, default=None
The list of Elastic-Net mixing parameter, with ``0 <= l1_ratio <= 1``.
Only used if ``penalty='elasticnet'``. A value of 0 is equivalent to
using ``penalty='l2'``, while 1 is equivalent to using
``penalty='l1'``. For ``0 < l1_ratio <1``, the penalty is a combination
of L1 and L2.
Attributes
----------
classes_ : ndarray of shape (n_classes, )
A list of class labels known to the classifier.
coef_ : ndarray of shape (1, n_features) or (n_classes, n_features)
Coefficient of the features in the decision function.
`coef_` is of shape (1, n_features) when the given problem
is binary.
intercept_ : ndarray of shape (1,) or (n_classes,)
Intercept (a.k.a. bias) added to the decision function.
If `fit_intercept` is set to False, the intercept is set to zero.
`intercept_` is of shape(1,) when the problem is binary.
Cs_ : ndarray of shape (n_cs)
Array of C i.e. inverse of regularization parameter values used
for cross-validation.
l1_ratios_ : ndarray of shape (n_l1_ratios)
Array of l1_ratios used for cross-validation. If no l1_ratio is used
(i.e. penalty is not 'elasticnet'), this is set to ``[None]``
coefs_paths_ : ndarray of shape (n_folds, n_cs, n_features) or \
(n_folds, n_cs, n_features + 1)
dict with classes as the keys, and the path of coefficients obtained
during cross-validating across each fold and then across each Cs
after doing an OvR for the corresponding class as values.
If the 'multi_class' option is set to 'multinomial', then
the coefs_paths are the coefficients corresponding to each class.
Each dict value has shape ``(n_folds, n_cs, n_features)`` or
``(n_folds, n_cs, n_features + 1)`` depending on whether the
intercept is fit or not. If ``penalty='elasticnet'``, the shape is
``(n_folds, n_cs, n_l1_ratios_, n_features)`` or
``(n_folds, n_cs, n_l1_ratios_, n_features + 1)``.
scores_ : dict
dict with classes as the keys, and the values as the
grid of scores obtained during cross-validating each fold, after doing
an OvR for the corresponding class. If the 'multi_class' option
given is 'multinomial' then the same scores are repeated across
all classes, since this is the multinomial class. Each dict value
has shape ``(n_folds, n_cs`` or ``(n_folds, n_cs, n_l1_ratios)`` if
``penalty='elasticnet'``.
C_ : ndarray of shape (n_classes,) or (n_classes - 1,)
Array of C that maps to the best scores across every class. If refit is
set to False, then for each class, the best C is the average of the
C's that correspond to the best scores for each fold.
`C_` is of shape(n_classes,) when the problem is binary.
l1_ratio_ : ndarray of shape (n_classes,) or (n_classes - 1,)
Array of l1_ratio that maps to the best scores across every class. If
refit is set to False, then for each class, the best l1_ratio is the
average of the l1_ratio's that correspond to the best scores for each
fold. `l1_ratio_` is of shape(n_classes,) when the problem is binary.
n_iter_ : ndarray of shape (n_classes, n_folds, n_cs) or (1, n_folds, n_cs)
Actual number of iterations for all classes, folds and Cs.
In the binary or multinomial cases, the first dimension is equal to 1.
If ``penalty='elasticnet'``, the shape is ``(n_classes, n_folds,
n_cs, n_l1_ratios)`` or ``(1, n_folds, n_cs, n_l1_ratios)``.
n_features_in_ : int
Number of features seen during :term:`fit`.
.. versionadded:: 0.24
feature_names_in_ : ndarray of shape (`n_features_in_`,)
Names of features seen during :term:`fit`. Defined only when `X`
has feature names that are all strings.
.. versionadded:: 1.0
See Also
--------
LogisticRegression : Logistic regression without tuning the
hyperparameter `C`.
Examples
--------
>>> from sklearn.datasets import load_iris
>>> from sklearn.linear_model import LogisticRegressionCV
>>> X, y = load_iris(return_X_y=True)
>>> clf = LogisticRegressionCV(cv=5, random_state=0).fit(X, y)
>>> clf.predict(X[:2, :])
array([0, 0])
>>> clf.predict_proba(X[:2, :]).shape
(2, 3)
>>> clf.score(X, y)
0.98...
"""
def __init__(
self,
*,
Cs=10,
fit_intercept=True,
cv=None,
dual=False,
penalty="l2",
scoring=None,
solver="lbfgs",
tol=1e-4,
max_iter=100,
class_weight=None,
n_jobs=None,
verbose=0,
refit=True,
intercept_scaling=1.0,
multi_class="auto",
random_state=None,
l1_ratios=None,
):
self.Cs = Cs
self.fit_intercept = fit_intercept
self.cv = cv
self.dual = dual
self.penalty = penalty
self.scoring = scoring
self.tol = tol
self.max_iter = max_iter
self.class_weight = class_weight
self.n_jobs = n_jobs
self.verbose = verbose
self.solver = solver
self.refit = refit
self.intercept_scaling = intercept_scaling
self.multi_class = multi_class
self.random_state = random_state
self.l1_ratios = l1_ratios
def fit(self, X, y, sample_weight=None):
"""Fit the model according to the given training data.
Parameters
----------
X : {array-like, sparse matrix} of shape (n_samples, n_features)
Training vector, where `n_samples` is the number of samples and
`n_features` is the number of features.
y : array-like of shape (n_samples,)
Target vector relative to X.
sample_weight : array-like of shape (n_samples,) default=None
Array of weights that are assigned to individual samples.
If not provided, then each sample is given unit weight.
Returns
-------
self : object
Fitted LogisticRegressionCV estimator.
"""
solver = _check_solver(self.solver, self.penalty, self.dual)
if not isinstance(self.max_iter, numbers.Number) or self.max_iter < 0:
raise ValueError(
"Maximum number of iteration must be positive; got (max_iter=%r)"
% self.max_iter
)
if not isinstance(self.tol, numbers.Number) or self.tol < 0:
raise ValueError(
"Tolerance for stopping criteria must be positive; got (tol=%r)"
% self.tol
)
if self.penalty == "elasticnet":
if (
self.l1_ratios is None
or len(self.l1_ratios) == 0
or any(
(
not isinstance(l1_ratio, numbers.Number)
or l1_ratio < 0
or l1_ratio > 1
)
for l1_ratio in self.l1_ratios
)
):
raise ValueError(
"l1_ratios must be a list of numbers between "
"0 and 1; got (l1_ratios=%r)"
% self.l1_ratios
)
l1_ratios_ = self.l1_ratios
else:
if self.l1_ratios is not None:
warnings.warn(
"l1_ratios parameter is only used when penalty "
"is 'elasticnet'. Got (penalty={})".format(self.penalty)
)
l1_ratios_ = [None]
if self.penalty == "none":
raise ValueError(
"penalty='none' is not useful and not supported by "
"LogisticRegressionCV."
)
X, y = self._validate_data(
X,
y,
accept_sparse="csr",
dtype=np.float64,
order="C",
accept_large_sparse=solver not in ["liblinear", "sag", "saga"],
)
check_classification_targets(y)
class_weight = self.class_weight
# Encode for string labels
label_encoder = LabelEncoder().fit(y)
y = label_encoder.transform(y)
if isinstance(class_weight, dict):
class_weight = {
label_encoder.transform([cls])[0]: v for cls, v in class_weight.items()
}
# The original class labels
classes = self.classes_ = label_encoder.classes_
encoded_labels = label_encoder.transform(label_encoder.classes_)
multi_class = _check_multi_class(self.multi_class, solver, len(classes))
if solver in ["sag", "saga"]:
max_squared_sum = row_norms(X, squared=True).max()
else:
max_squared_sum = None
# init cross-validation generator
cv = check_cv(self.cv, y, classifier=True)
folds = list(cv.split(X, y))
# Use the label encoded classes
n_classes = len(encoded_labels)
if n_classes < 2:
raise ValueError(
"This solver needs samples of at least 2 classes"
" in the data, but the data contains only one"
" class: %r"
% classes[0]
)
if n_classes == 2:
# OvR in case of binary problems is as good as fitting
# the higher label
n_classes = 1
encoded_labels = encoded_labels[1:]
classes = classes[1:]
# We need this hack to iterate only once over labels, in the case of
# multi_class = multinomial, without changing the value of the labels.
if multi_class == "multinomial":
iter_encoded_labels = iter_classes = [None]
else:
iter_encoded_labels = encoded_labels
iter_classes = classes
# compute the class weights for the entire dataset y
if class_weight == "balanced":
class_weight = compute_class_weight(
class_weight, classes=np.arange(len(self.classes_)), y=y
)
class_weight = dict(enumerate(class_weight))
path_func = delayed(_log_reg_scoring_path)
# The SAG solver releases the GIL so it's more efficient to use
# threads for this solver.
if self.solver in ["sag", "saga"]:
prefer = "threads"
else:
prefer = "processes"
fold_coefs_ = Parallel(
n_jobs=self.n_jobs,
verbose=self.verbose,
**_joblib_parallel_args(prefer=prefer),
)(
path_func(
X,
y,
train,
test,
pos_class=label,
Cs=self.Cs,
fit_intercept=self.fit_intercept,
penalty=self.penalty,
dual=self.dual,
solver=solver,
tol=self.tol,
max_iter=self.max_iter,
verbose=self.verbose,
class_weight=class_weight,
scoring=self.scoring,
multi_class=multi_class,
intercept_scaling=self.intercept_scaling,
random_state=self.random_state,
max_squared_sum=max_squared_sum,
sample_weight=sample_weight,
l1_ratio=l1_ratio,
)
for label in iter_encoded_labels
for train, test in folds
for l1_ratio in l1_ratios_
)
# _log_reg_scoring_path will output different shapes depending on the
# multi_class param, so we need to reshape the outputs accordingly.
# Cs is of shape (n_classes . n_folds . n_l1_ratios, n_Cs) and all the
# rows are equal, so we just take the first one.
# After reshaping,
# - scores is of shape (n_classes, n_folds, n_Cs . n_l1_ratios)
# - coefs_paths is of shape
# (n_classes, n_folds, n_Cs . n_l1_ratios, n_features)
# - n_iter is of shape
# (n_classes, n_folds, n_Cs . n_l1_ratios) or
# (1, n_folds, n_Cs . n_l1_ratios)
coefs_paths, Cs, scores, n_iter_ = zip(*fold_coefs_)
self.Cs_ = Cs[0]
if multi_class == "multinomial":
coefs_paths = np.reshape(
coefs_paths,
(len(folds), len(l1_ratios_) * len(self.Cs_), n_classes, -1),
)
# equiv to coefs_paths = np.moveaxis(coefs_paths, (0, 1, 2, 3),
# (1, 2, 0, 3))
coefs_paths = np.swapaxes(coefs_paths, 0, 1)
coefs_paths = np.swapaxes(coefs_paths, 0, 2)
self.n_iter_ = np.reshape(
n_iter_, (1, len(folds), len(self.Cs_) * len(l1_ratios_))
)
# repeat same scores across all classes
scores = np.tile(scores, (n_classes, 1, 1))
else:
coefs_paths = np.reshape(
coefs_paths,
(n_classes, len(folds), len(self.Cs_) * len(l1_ratios_), -1),
)
self.n_iter_ = np.reshape(
n_iter_, (n_classes, len(folds), len(self.Cs_) * len(l1_ratios_))
)
scores = np.reshape(scores, (n_classes, len(folds), -1))
self.scores_ = dict(zip(classes, scores))
self.coefs_paths_ = dict(zip(classes, coefs_paths))
self.C_ = list()
self.l1_ratio_ = list()
self.coef_ = np.empty((n_classes, X.shape[1]))
self.intercept_ = np.zeros(n_classes)
for index, (cls, encoded_label) in enumerate(
zip(iter_classes, iter_encoded_labels)
):
if multi_class == "ovr":
scores = self.scores_[cls]
coefs_paths = self.coefs_paths_[cls]
else:
# For multinomial, all scores are the same across classes
scores = scores[0]
# coefs_paths will keep its original shape because
# logistic_regression_path expects it this way
if self.refit:
# best_index is between 0 and (n_Cs . n_l1_ratios - 1)
# for example, with n_cs=2 and n_l1_ratios=3
# the layout of scores is
# [c1, c2, c1, c2, c1, c2]
# l1_1 , l1_2 , l1_3
best_index = scores.sum(axis=0).argmax()
best_index_C = best_index % len(self.Cs_)
C_ = self.Cs_[best_index_C]
self.C_.append(C_)
best_index_l1 = best_index // len(self.Cs_)
l1_ratio_ = l1_ratios_[best_index_l1]
self.l1_ratio_.append(l1_ratio_)
if multi_class == "multinomial":
coef_init = np.mean(coefs_paths[:, :, best_index, :], axis=1)
else:
coef_init = np.mean(coefs_paths[:, best_index, :], axis=0)
# Note that y is label encoded and hence pos_class must be
# the encoded label / None (for 'multinomial')
w, _, _ = _logistic_regression_path(
X,
y,
pos_class=encoded_label,
Cs=[C_],
solver=solver,
fit_intercept=self.fit_intercept,
coef=coef_init,
max_iter=self.max_iter,
tol=self.tol,
penalty=self.penalty,
class_weight=class_weight,
multi_class=multi_class,
verbose=max(0, self.verbose - 1),
random_state=self.random_state,
check_input=False,
max_squared_sum=max_squared_sum,
sample_weight=sample_weight,
l1_ratio=l1_ratio_,
)
w = w[0]
else:
# Take the best scores across every fold and the average of
# all coefficients corresponding to the best scores.
best_indices = np.argmax(scores, axis=1)
if multi_class == "ovr":
w = np.mean(
[coefs_paths[i, best_indices[i], :] for i in range(len(folds))],
axis=0,
)
else:
w = np.mean(
[
coefs_paths[:, i, best_indices[i], :]
for i in range(len(folds))
],
axis=0,
)
best_indices_C = best_indices % len(self.Cs_)
self.C_.append(np.mean(self.Cs_[best_indices_C]))
if self.penalty == "elasticnet":
best_indices_l1 = best_indices // len(self.Cs_)
self.l1_ratio_.append(np.mean(l1_ratios_[best_indices_l1]))
else:
self.l1_ratio_.append(None)
if multi_class == "multinomial":
self.C_ = np.tile(self.C_, n_classes)
self.l1_ratio_ = np.tile(self.l1_ratio_, n_classes)
self.coef_ = w[:, : X.shape[1]]
if self.fit_intercept:
self.intercept_ = w[:, -1]
else:
self.coef_[index] = w[: X.shape[1]]
if self.fit_intercept:
self.intercept_[index] = w[-1]
self.C_ = np.asarray(self.C_)
self.l1_ratio_ = np.asarray(self.l1_ratio_)
self.l1_ratios_ = np.asarray(l1_ratios_)
# if elasticnet was used, add the l1_ratios dimension to some
# attributes
if self.l1_ratios is not None:
# with n_cs=2 and n_l1_ratios=3
# the layout of scores is
# [c1, c2, c1, c2, c1, c2]
# l1_1 , l1_2 , l1_3
# To get a 2d array with the following layout
# l1_1, l1_2, l1_3
# c1 [[ . , . , . ],
# c2 [ . , . , . ]]
# We need to first reshape and then transpose.
# The same goes for the other arrays
for cls, coefs_path in self.coefs_paths_.items():
self.coefs_paths_[cls] = coefs_path.reshape(
(len(folds), self.l1_ratios_.size, self.Cs_.size, -1)
)
self.coefs_paths_[cls] = np.transpose(
self.coefs_paths_[cls], (0, 2, 1, 3)
)
for cls, score in self.scores_.items():
self.scores_[cls] = score.reshape(
(len(folds), self.l1_ratios_.size, self.Cs_.size)
)
self.scores_[cls] = np.transpose(self.scores_[cls], (0, 2, 1))
self.n_iter_ = self.n_iter_.reshape(
(-1, len(folds), self.l1_ratios_.size, self.Cs_.size)
)
self.n_iter_ = np.transpose(self.n_iter_, (0, 1, 3, 2))
return self
def score(self, X, y, sample_weight=None):
"""Score using the `scoring` option on the given test data and labels.
Parameters
----------
X : array-like of shape (n_samples, n_features)
Test samples.
y : array-like of shape (n_samples,)
True labels for X.
sample_weight : array-like of shape (n_samples,), default=None
Sample weights.
Returns
-------
score : float
Score of self.predict(X) wrt. y.
"""
scoring = self.scoring or "accuracy"
scoring = get_scorer(scoring)
return scoring(self, X, y, sample_weight=sample_weight)
def _more_tags(self):
return {
"_xfail_checks": {
"check_sample_weights_invariance": (
"zero sample_weight is not equivalent to removing samples"
),
}
}
|
# Generated by Django 2.1.4 on 2019-02-09 15:20
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('society_bureau', '0002_sitesettings'),
]
operations = [
migrations.AlterField(
model_name='societybureau',
name='user',
field=models.OneToOneField(on_delete=django.db.models.deletion.CASCADE, related_name='society_bureau', to=settings.AUTH_USER_MODEL),
),
]
|
"""The dagster-airflow operators."""
from dagster_airflow.operators.util import invoke_steps_within_python_operator
from dagster_airflow.vendor.python_operator import PythonOperator
class DagsterPythonOperator(PythonOperator):
def __init__(self, dagster_operator_parameters, *args, **kwargs):
def python_callable(ts, dag_run, **kwargs): # pylint: disable=unused-argument
return invoke_steps_within_python_operator(
dagster_operator_parameters.invocation_args, ts, dag_run, **kwargs
)
super(DagsterPythonOperator, self).__init__(
task_id=dagster_operator_parameters.task_id,
provide_context=True,
python_callable=python_callable,
dag=dagster_operator_parameters.dag,
*args,
**kwargs,
)
|
# Copyright(C) 2011, 2015, 2018 by
# Ben Edwards <bedwards@cs.unm.edu>
# Aric Hagberg <hagberg@lanl.gov>
# Konstantinos Karakatsanis <dinoskarakas@gmail.com>
# All rights reserved.
# BSD license.
#
# Authors: Ben Edwards (bedwards@cs.unm.edu)
# Aric Hagberg (hagberg@lanl.gov)
# Konstantinos Karakatsanis (dinoskarakas@gmail.com)
# Jean-Gabriel Young (jean.gabriel.young@gmail.com)
"""Generators for classes of graphs used in studying social networks."""
from __future__ import division
import itertools
import math
import networkx as nx
from networkx.utils import py_random_state
__all__ = ['caveman_graph', 'connected_caveman_graph',
'relaxed_caveman_graph', 'random_partition_graph',
'planted_partition_graph', 'gaussian_random_partition_graph',
'ring_of_cliques', 'windmill_graph', 'stochastic_block_model']
def caveman_graph(l, k):
"""Returns a caveman graph of `l` cliques of size `k`.
Parameters
----------
l : int
Number of cliques
k : int
Size of cliques
Returns
-------
G : NetworkX Graph
caveman graph
Notes
-----
This returns an undirected graph, it can be converted to a directed
graph using :func:`nx.to_directed`, or a multigraph using
``nx.MultiGraph(nx.caveman_graph(l, k))``. Only the undirected version is
described in [1]_ and it is unclear which of the directed
generalizations is most useful.
Examples
--------
>>> G = nx.caveman_graph(3, 3)
See also
--------
connected_caveman_graph
References
----------
.. [1] Watts, D. J. 'Networks, Dynamics, and the Small-World Phenomenon.'
Amer. J. Soc. 105, 493-527, 1999.
"""
# l disjoint cliques of size k
G = nx.empty_graph(l * k)
if k > 1:
for start in range(0, l * k, k):
edges = itertools.combinations(range(start, start + k), 2)
G.add_edges_from(edges)
return G
def connected_caveman_graph(l, k):
"""Returns a connected caveman graph of `l` cliques of size `k`.
The connected caveman graph is formed by creating `n` cliques of size
`k`, then a single edge in each clique is rewired to a node in an
adjacent clique.
Parameters
----------
l : int
number of cliques
k : int
size of cliques
Returns
-------
G : NetworkX Graph
connected caveman graph
Notes
-----
This returns an undirected graph, it can be converted to a directed
graph using :func:`nx.to_directed`, or a multigraph using
``nx.MultiGraph(nx.caveman_graph(l, k))``. Only the undirected version is
described in [1]_ and it is unclear which of the directed
generalizations is most useful.
Examples
--------
>>> G = nx.connected_caveman_graph(3, 3)
References
----------
.. [1] Watts, D. J. 'Networks, Dynamics, and the Small-World Phenomenon.'
Amer. J. Soc. 105, 493-527, 1999.
"""
G = nx.caveman_graph(l, k)
for start in range(0, l * k, k):
G.remove_edge(start, start + 1)
G.add_edge(start, (start - 1) % (l * k))
return G
@py_random_state(3)
def relaxed_caveman_graph(l, k, p, seed=None):
"""Returns a relaxed caveman graph.
A relaxed caveman graph starts with `l` cliques of size `k`. Edges are
then randomly rewired with probability `p` to link different cliques.
Parameters
----------
l : int
Number of groups
k : int
Size of cliques
p : float
Probabilty of rewiring each edge.
seed : integer, random_state, or None (default)
Indicator of random number generation state.
See :ref:`Randomness<randomness>`.
Returns
-------
G : NetworkX Graph
Relaxed Caveman Graph
Raises
------
NetworkXError:
If p is not in [0,1]
Examples
--------
>>> G = nx.relaxed_caveman_graph(2, 3, 0.1, seed=42)
References
----------
.. [1] Santo Fortunato, Community Detection in Graphs,
Physics Reports Volume 486, Issues 3-5, February 2010, Pages 75-174.
https://arxiv.org/abs/0906.0612
"""
G = nx.caveman_graph(l, k)
nodes = list(G)
for (u, v) in G.edges():
if seed.random() < p: # rewire the edge
x = seed.choice(nodes)
if G.has_edge(u, x):
continue
G.remove_edge(u, v)
G.add_edge(u, x)
return G
@py_random_state(3)
def random_partition_graph(sizes, p_in, p_out, seed=None, directed=False):
"""Returns the random partition graph with a partition of sizes.
A partition graph is a graph of communities with sizes defined by
s in sizes. Nodes in the same group are connected with probability
p_in and nodes of different groups are connected with probability
p_out.
Parameters
----------
sizes : list of ints
Sizes of groups
p_in : float
probability of edges with in groups
p_out : float
probability of edges between groups
directed : boolean optional, default=False
Whether to create a directed graph
seed : integer, random_state, or None (default)
Indicator of random number generation state.
See :ref:`Randomness<randomness>`.
Returns
-------
G : NetworkX Graph or DiGraph
random partition graph of size sum(gs)
Raises
------
NetworkXError
If p_in or p_out is not in [0,1]
Examples
--------
>>> G = nx.random_partition_graph([10,10,10],.25,.01)
>>> len(G)
30
>>> partition = G.graph['partition']
>>> len(partition)
3
Notes
-----
This is a generalization of the planted-l-partition described in
[1]_. It allows for the creation of groups of any size.
The partition is store as a graph attribute 'partition'.
References
----------
.. [1] Santo Fortunato 'Community Detection in Graphs' Physical Reports
Volume 486, Issue 3-5 p. 75-174. https://arxiv.org/abs/0906.0612
"""
# Use geometric method for O(n+m) complexity algorithm
# partition = nx.community_sets(nx.get_node_attributes(G, 'affiliation'))
if not 0.0 <= p_in <= 1.0:
raise nx.NetworkXError("p_in must be in [0,1]")
if not 0.0 <= p_out <= 1.0:
raise nx.NetworkXError("p_out must be in [0,1]")
# create connection matrix
num_blocks = len(sizes)
p = [[p_out for s in range(num_blocks)] for r in range(num_blocks)]
for r in range(num_blocks):
p[r][r] = p_in
return stochastic_block_model(sizes, p, nodelist=None, seed=seed,
directed=directed, selfloops=False,
sparse=True)
@py_random_state(4)
def planted_partition_graph(l, k, p_in, p_out, seed=None, directed=False):
"""Returns the planted l-partition graph.
This model partitions a graph with n=l*k vertices in
l groups with k vertices each. Vertices of the same
group are linked with a probability p_in, and vertices
of different groups are linked with probability p_out.
Parameters
----------
l : int
Number of groups
k : int
Number of vertices in each group
p_in : float
probability of connecting vertices within a group
p_out : float
probability of connected vertices between groups
seed : integer, random_state, or None (default)
Indicator of random number generation state.
See :ref:`Randomness<randomness>`.
directed : bool,optional (default=False)
If True return a directed graph
Returns
-------
G : NetworkX Graph or DiGraph
planted l-partition graph
Raises
------
NetworkXError:
If p_in,p_out are not in [0,1] or
Examples
--------
>>> G = nx.planted_partition_graph(4, 3, 0.5, 0.1, seed=42)
See Also
--------
random_partition_model
References
----------
.. [1] A. Condon, R.M. Karp, Algorithms for graph partitioning
on the planted partition model,
Random Struct. Algor. 18 (2001) 116-140.
.. [2] Santo Fortunato 'Community Detection in Graphs' Physical Reports
Volume 486, Issue 3-5 p. 75-174. https://arxiv.org/abs/0906.0612
"""
return random_partition_graph([k] * l, p_in, p_out, seed, directed)
@py_random_state(6)
def gaussian_random_partition_graph(n, s, v, p_in, p_out, directed=False,
seed=None):
"""Generate a Gaussian random partition graph.
A Gaussian random partition graph is created by creating k partitions
each with a size drawn from a normal distribution with mean s and variance
s/v. Nodes are connected within clusters with probability p_in and
between clusters with probability p_out[1]
Parameters
----------
n : int
Number of nodes in the graph
s : float
Mean cluster size
v : float
Shape parameter. The variance of cluster size distribution is s/v.
p_in : float
Probabilty of intra cluster connection.
p_out : float
Probability of inter cluster connection.
directed : boolean, optional default=False
Whether to create a directed graph or not
seed : integer, random_state, or None (default)
Indicator of random number generation state.
See :ref:`Randomness<randomness>`.
Returns
-------
G : NetworkX Graph or DiGraph
gaussian random partition graph
Raises
------
NetworkXError
If s is > n
If p_in or p_out is not in [0,1]
Notes
-----
Note the number of partitions is dependent on s,v and n, and that the
last partition may be considerably smaller, as it is sized to simply
fill out the nodes [1]
See Also
--------
random_partition_graph
Examples
--------
>>> G = nx.gaussian_random_partition_graph(100,10,10,.25,.1)
>>> len(G)
100
References
----------
.. [1] Ulrik Brandes, Marco Gaertler, Dorothea Wagner,
Experiments on Graph Clustering Algorithms,
In the proceedings of the 11th Europ. Symp. Algorithms, 2003.
"""
if s > n:
raise nx.NetworkXError("s must be <= n")
assigned = 0
sizes = []
while True:
size = int(seed.gauss(s, float(s) / v + 0.5))
if size < 1: # how to handle 0 or negative sizes?
continue
if assigned + size >= n:
sizes.append(n - assigned)
break
assigned += size
sizes.append(size)
return random_partition_graph(sizes, p_in, p_out, directed, seed)
def ring_of_cliques(num_cliques, clique_size):
"""Defines a "ring of cliques" graph.
A ring of cliques graph is consisting of cliques, connected through single
links. Each clique is a complete graph.
Parameters
----------
num_cliques : int
Number of cliques
clique_size : int
Size of cliques
Returns
-------
G : NetworkX Graph
ring of cliques graph
Raises
------
NetworkXError
If the number of cliques is lower than 2 or
if the size of cliques is smaller than 2.
Examples
--------
>>> G = nx.ring_of_cliques(8, 4)
See Also
--------
connected_caveman_graph
Notes
-----
The `connected_caveman_graph` graph removes a link from each clique to
connect it with the next clique. Instead, the `ring_of_cliques` graph
simply adds the link without removing any link from the cliques.
"""
if num_cliques < 2:
raise nx.NetworkXError('A ring of cliques must have at least '
'two cliques')
if clique_size < 2:
raise nx.NetworkXError('The cliques must have at least two nodes')
G = nx.Graph()
for i in range(num_cliques):
edges = itertools.combinations(range(i * clique_size, i * clique_size +
clique_size), 2)
G.add_edges_from(edges)
G.add_edge(i * clique_size + 1, (i + 1) * clique_size %
(num_cliques * clique_size))
return G
def windmill_graph(n, k):
"""Generate a windmill graph.
A windmill graph is a graph of `n` cliques each of size `k` that are all
joined at one node.
It can be thought of as taking a disjoint union of `n` cliques of size `k`,
selecting one point from each, and contracting all of the selected points.
Alternatively, one could generate `n` cliques of size `k-1` and one node
that is connected to all other nodes in the graph.
Parameters
----------
n : int
Number of cliques
k : int
Size of cliques
Returns
-------
G : NetworkX Graph
windmill graph with n cliques of size k
Raises
------
NetworkXError
If the number of cliques is less than two
If the size of the cliques are less than two
Examples
--------
>>> G = nx.windmill_graph(4, 5)
Notes
-----
The node labeled `0` will be the node connected to all other nodes.
Note that windmill graphs are usually denoted `Wd(k,n)`, so the parameters
are in the opposite order as the parameters of this method.
"""
if n < 2:
msg = 'A windmill graph must have at least two cliques'
raise nx.NetworkXError(msg)
if k < 2:
raise nx.NetworkXError('The cliques must have at least two nodes')
G = nx.disjoint_union_all(itertools.chain([nx.complete_graph(k)],
(nx.complete_graph(k - 1)
for _ in range(n - 1))))
G.add_edges_from((0, i) for i in range(k, G.number_of_nodes()))
return G
@py_random_state(3)
def stochastic_block_model(sizes, p, nodelist=None, seed=None,
directed=False, selfloops=False, sparse=True):
"""Returns a stochastic block model graph.
This model partitions the nodes in blocks of arbitrary sizes, and places
edges between pairs of nodes independently, with a probability that depends
on the blocks.
Parameters
----------
sizes : list of ints
Sizes of blocks
p : list of list of floats
Element (r,s) gives the density of edges going from the nodes
of group r to nodes of group s.
p must match the number of groups (len(sizes) == len(p)),
and it must be symmetric if the graph is undirected.
nodelist : list, optional
The block tags are assigned according to the node identifiers
in nodelist. If nodelist is None, then the ordering is the
range [0,sum(sizes)-1].
seed : integer, random_state, or None (default)
Indicator of random number generation state.
See :ref:`Randomness<randomness>`.
directed : boolean optional, default=False
Whether to create a directed graph or not.
selfloops : boolean optional, default=False
Whether to include self-loops or not.
sparse: boolean optional, default=True
Use the sparse heuristic to speed up the generator.
Returns
-------
g : NetworkX Graph or DiGraph
Stochastic block model graph of size sum(sizes)
Raises
------
NetworkXError
If probabilities are not in [0,1].
If the probability matrix is not square (directed case).
If the probability matrix is not symmetric (undirected case).
If the sizes list does not match nodelist or the probability matrix.
If nodelist contains duplicate.
Examples
--------
>>> sizes = [75, 75, 300]
>>> probs = [[0.25, 0.05, 0.02],
... [0.05, 0.35, 0.07],
... [0.02, 0.07, 0.40]]
>>> g = nx.stochastic_block_model(sizes, probs, seed=0)
>>> len(g)
450
>>> H = nx.quotient_graph(g, g.graph['partition'], relabel=True)
>>> for v in H.nodes(data=True):
... print(round(v[1]['density'], 3))
...
0.245
0.348
0.405
>>> for v in H.edges(data=True):
... print(round(1.0 * v[2]['weight'] / (sizes[v[0]] * sizes[v[1]]), 3))
...
0.051
0.022
0.07
See Also
--------
random_partition_graph
planted_partition_graph
gaussian_random_partition_graph
gnp_random_graph
References
----------
.. [1] Holland, P. W., Laskey, K. B., & Leinhardt, S.,
"Stochastic blockmodels: First steps",
Social networks, 5(2), 109-137, 1983.
"""
# Check if dimensions match
if len(sizes) != len(p):
raise nx.NetworkXException("'sizes' and 'p' do not match.")
# Check for probability symmetry (undirected) and shape (directed)
for row in p:
if len(p) != len(row):
raise nx.NetworkXException("'p' must be a square matrix.")
if not directed:
p_transpose = [list(i) for i in zip(*p)]
for i in zip(p, p_transpose):
for j in zip(i[0], i[1]):
if abs(j[0] - j[1]) > 1e-08:
raise nx.NetworkXException("'p' must be symmetric.")
# Check for probability range
for row in p:
for prob in row:
if prob < 0 or prob > 1:
raise nx.NetworkXException("Entries of 'p' not in [0,1].")
# Check for nodelist consistency
if nodelist is not None:
if len(nodelist) != sum(sizes):
raise nx.NetworkXException("'nodelist' and 'sizes' do not match.")
if len(nodelist) != len(set(nodelist)):
raise nx.NetworkXException("nodelist contains duplicate.")
else:
nodelist = range(0, sum(sizes))
# Setup the graph conditionally to the directed switch.
block_range = range(len(sizes))
if directed:
g = nx.DiGraph()
block_iter = itertools.product(block_range, block_range)
else:
g = nx.Graph()
block_iter = itertools.combinations_with_replacement(block_range, 2)
# Split nodelist in a partition (list of sets).
size_cumsum = [sum(sizes[0:x]) for x in range(0, len(sizes) + 1)]
g.graph['partition'] = [set(nodelist[size_cumsum[x]:size_cumsum[x + 1]])
for x in range(0, len(size_cumsum) - 1)]
# Setup nodes and graph name
for block_id, nodes in enumerate(g.graph['partition']):
for node in nodes:
g.add_node(node, block=block_id)
g.name = "stochastic_block_model"
# Test for edge existence
parts = g.graph['partition']
for i, j in block_iter:
if i == j:
if directed:
if selfloops:
edges = itertools.product(parts[i], parts[i])
else:
edges = itertools.permutations(parts[i], 2)
else:
edges = itertools.combinations(parts[i], 2)
if selfloops:
edges = itertools.chain(edges, zip(parts[i], parts[i]))
for e in edges:
if seed.random() < p[i][j]:
g.add_edge(*e)
else:
edges = itertools.product(parts[i], parts[j])
if sparse:
if p[i][j] == 1: # Test edges cases p_ij = 0 or 1
for e in edges:
g.add_edge(*e)
elif p[i][j] > 0:
while True:
try:
logrand = math.log(seed.random())
skip = math.floor(logrand / math.log(1 - p[i][j]))
# consume "skip" edges
next(itertools.islice(edges, skip, skip), None)
e = next(edges)
g.add_edge(*e) # __safe
except StopIteration:
break
else:
for e in edges:
if seed.random() < p[i][j]:
g.add_edge(*e) # __safe
return g
|
import os
import sys
path = os.environ.get('TRAVIS_BUILD_DIR')
sys.path.insert(0, path+'/protlearn')
import numpy as np
from preprocessing import txt_to_df
from feature_engineering import length
def test_lengths():
"Test sequence lengths"
# load data
df = txt_to_df(path+'/tests/docs/test_seq.txt', 0)
# test integer lengths
len_int = length(df, 'int')
assert np.array_equal(len_int, np.array([6, 9, 7, 6]))
# test one-hot-encoded lengths
len_ohe = length(df, 'ohe')
# columns: [6, 7, 9]
assert np.array_equal(len_ohe, np.array([[1., 0., 0.],
[0., 0., 1.],
[0., 1., 0.],
[1., 0., 0.]]))
|
import gym
import matplotlib.pyplot as plt
import numpy as np
import time
import brs_envs
env = gym.make('RocketLanderBRSEnv-v0',
render=True,
max_lateral_offset=0,
max_pitch_offset=0,
max_roll_offset=0,
max_yaw_offset=0,
mean_robot_start_height=100)
s = env.reset()
zero_action = np.zeros(env.action_space.shape)
vels = []
input("Press ENTER to start...")
target = 80
for i in range(1800):
time.sleep(1/60)
if s[2] < target:
a = np.array([min(1.0, (s[2] - target)**2), 0.0, 0.0])
else:
a = np.array([0.3, 0.0, 0.0])
a += np.array([0.0, 0.15 * np.sin(0.1 * i), 0.8])
s, r, done, _ = env.step(a)
# vels.append(np.linalg.norm(s[7:10]))
vels.append(s[2])
# s, r, done, _ = env.step(env.action_space.sample())
if done:
print("Reward in final frame: {}".format(r))
break
plt.plot(np.arange(len(vels)) / 60, vels)
plt.xlabel('Time (s)')
plt.ylabel('Position (m)')
plt.show()
|
from classes.Humanoid import Humanoid
class Player(Humanoid):
def __init__(self, name, room, dmg=1, hp=10):
super().__init__(name, room, dmg, hp)
self.equipped = None
def __str__(self):
return f'{self.name}: ', '{\n', f'\t[\n\t\thp: {self.hp}/{self.max_hp},\n\t\tdmg: {self.dmg}\n\tequipped: {self.equipped}\n]'
def use_item(self, item):
item.use(self)
def print_inventory(self):
if len(self.inventory):
print(f'\n{self.name}\'s Inventory:')
for inventory_item in self.inventory:
print(f'\t{inventory_item.name}: {inventory_item.description}')
else:
print(
'\nYou have no items in your inventory.\n\nTry roaming around to find some items.')
def take_damage(self, attacker):
if not self.blocking:
self.hp -= attacker.dmg
if self.hp <= 0:
print('You have died. Better luck next time!')
else:
print(
f'You were hit with {attacker.dmg} damage. ({self.hp}/{self.max_hp})')
else:
self.blocking = False
print('You blocked {attacker.name}\'s attack!')
def equip(self, item):
if item.equippable:
item.use(self)
|
from ravenrpc import Ravencoin
import ipfshttpclient
from credentials import USER, PASSWORD
rvn = Ravencoin(USER, PASSWORD)
ipfs = ipfshttpclient.connect()
ASSETNAME = "POLITICOIN"
IPFSDIRPATH = "/opt/squawker/ipfs"
|
from django.contrib import admin
# Register your models here.
from library.models import Book, Author
@admin.register(Book)
class BookAdmin(admin.ModelAdmin):
list_display = ('name', 'id', 'author', 'publication_date', 'is_active')
@admin.register(Author)
class AuthorAdmin(admin.ModelAdmin):
list_display = ('name', 'id', 'is_active')
|
# -*- coding: utf-8-*-
import random
import re
from client import jasperpath
import RPi.GPIO as GPIO
import time
import sys
import vibrate
WORDS = ["HELLO"]
def handle(text, mic, profile):
vibrate.retrieve_from_DOA('low')
print("hello module")
mic.say('hello')
def isValid(text):
"""
Returns True if the input is related to jokes/humor.
Arguments:
text -- user-input, typically transcribed speech
"""
return bool(re.search(r'\bhello\b', text, re.IGNORECASE))
"""
Responds to user-input, typically speech text, by telling a joke.
Arguments:
text -- user-input, typically transcribed speech
mic -- used to interact with the user (for both input and output)
profile -- contains information related to the user (e.g., phone
number)
try:
global motorLeft
global motorRight
GPIO.setmode(GPIO.BCM)
#GPIO.setwarnings(False)
GPIO.setup(18,GPIO.OUT)
GPIO.setup(23,GPIO.OUT)
motorLeft = GPIO.PWM(18,100) #motor left = yellow
motorLeft.start(0)
motorRight = GPIO.PWM(23,100) #motor right = blue
motorRight.start(0)
print("motor vibrating")
mic.say("vibration")
retrieve_from_DOA()
#direction = retrieve_from_DOA()
#print("direction: " + direction)
#if direction == 'left':
#print("left motor running")
#for x in range(num):
#motorLeft.ChangeDutyCycle(80)
#time.sleep(0.5)
#motorLeft.ChangeDutyCycle(0)
#time.sleep(0.2)
#if direction == 'right':
#print("right motor running")
except KeyboardInterrupt: # If CTRL+C is pressed, exit cleanly:
print("Keyboard interrupt")
finally:
motorLeft.stop()
motorRight.stop()
motorLeft = None
motorRight = None
GPIO.cleanup() # cleanup all GPIO
print("clean up")
def retrieve_from_DOA():
sys.path.append('/home/pi/reverb/usb_4_mic_array')
import DOA
doa = DOA.main()
if (doa < 90 and doa >= 0) or (doa >= 270):
print("motor DOA <90 or >= 270: " + str(doa))
#return ("right")
#vibrate_motor("right")
vibrate.start_vibrate('right', 50, 3)
elif (doa >= 90 and doa < 270):
print ("motor DOA >90: " + str(doa))
#return("left")
#vibrate_motor("left")
vibrate.start_vibrate('left', 50, 3)
def vibrate_motor(direction):
if direction == 'left':
print("left motor running")
#GPIO.output(18,True)
#time.sleep(2)
motorLeft_Pulse(50,3)
if direction == 'right':
print("right motor running")
#GPIO.output(23,True)
#time.sleep(2)
motorRight_Pulse(100,2)
def motorLeft_Pulse(intensity, num):
for x in range(num):
print("motor left pulse")
motorLeft.ChangeDutyCycle(intensity)
time.sleep(1)
motorLeft.ChangeDutyCycle(0)
time.sleep(1)
def motorRight_Pulse(intensity, num):
for x in range(num):
print("motor right pulse")
motorRight.ChangeDutyCycle(intensity)
time.sleep(1)
motorRight.ChangeDutyCycle(0)
time.sleep(1)
"""
|
from unittest import mock
import pytest
from django.http import Http404
from know_me import serializers, views
def test_get_queryset(api_rf, km_user_accessor_factory, km_user_factory):
"""
The queryset for the view should include all accessors granting
access to the requesting user's Know Me user.
"""
km_user = km_user_factory()
api_rf.user = km_user.user
km_user_accessor_factory(km_user=km_user)
km_user_accessor_factory(km_user=km_user)
view = views.AccessorListView()
view.request = api_rf.get("/")
assert list(view.get_queryset()) == list(km_user.km_user_accessors.all())
def test_get_queryset_no_km_user(api_rf, user_factory):
"""
If the requesting user has no associated Know Me user, a 404 error
should be raised.
"""
user = user_factory()
api_rf.user = user
view = views.AccessorListView()
view.request = api_rf.get("/")
with pytest.raises(Http404):
view.get_queryset()
def test_get_serializer_class():
"""
Test the serializer class used by the view.
"""
view = views.AccessorListView()
assert view.get_serializer_class() == serializers.KMUserAccessorSerializer
def test_perform_create(api_rf, km_user_factory):
"""
If the requesting user has an associated Know Me user, that Know Me
user should be passed to the serializer being saved.
"""
km_user = km_user_factory()
api_rf.user = km_user.user
serializer = mock.Mock(name="Mock Serializer")
view = views.AccessorListView()
view.request = api_rf.post("/")
view.perform_create(serializer)
assert serializer.save.call_args[1] == {"km_user": km_user}
def test_perform_create_no_km_user(api_rf, user_factory):
"""
If the requesting user does not have an associated Know Me user, the
method should throw a 404 exception.
"""
user = user_factory()
api_rf.user = user
serializer = mock.Mock(name="Mock Serializer")
view = views.AccessorListView()
view.request = api_rf.post("/")
with pytest.raises(Http404):
view.perform_create(serializer)
|
"""Tests for 1-Wire sensor platform."""
from unittest.mock import patch
from pyownet.protocol import Error as ProtocolError
import pytest
from homeassistant.components.onewire.const import (
DEFAULT_SYSBUS_MOUNT_DIR,
DOMAIN,
PLATFORMS,
)
from homeassistant.components.sensor import ATTR_STATE_CLASS, DOMAIN as SENSOR_DOMAIN
from homeassistant.const import (
ATTR_DEVICE_CLASS,
ATTR_MANUFACTURER,
ATTR_MODEL,
ATTR_NAME,
ATTR_UNIT_OF_MEASUREMENT,
)
from homeassistant.setup import async_setup_component
from . import (
setup_onewire_patched_owserver_integration,
setup_onewire_sysbus_integration,
setup_owproxy_mock_devices,
setup_sysbus_mock_devices,
)
from .const import MOCK_OWPROXY_DEVICES, MOCK_SYSBUS_DEVICES
from tests.common import assert_setup_component, mock_device_registry, mock_registry
MOCK_COUPLERS = {
key: value for (key, value) in MOCK_OWPROXY_DEVICES.items() if "branches" in value
}
async def test_setup_minimum(hass):
"""Test old platform setup with minimum configuration."""
config = {"sensor": {"platform": "onewire"}}
with assert_setup_component(1, "sensor"):
assert await async_setup_component(hass, SENSOR_DOMAIN, config)
await hass.async_block_till_done()
async def test_setup_sysbus(hass):
"""Test old platform setup with SysBus configuration."""
config = {
"sensor": {
"platform": "onewire",
"mount_dir": DEFAULT_SYSBUS_MOUNT_DIR,
}
}
with assert_setup_component(1, "sensor"):
assert await async_setup_component(hass, SENSOR_DOMAIN, config)
await hass.async_block_till_done()
async def test_setup_owserver(hass):
"""Test old platform setup with OWServer configuration."""
config = {"sensor": {"platform": "onewire", "host": "localhost"}}
with assert_setup_component(1, "sensor"):
assert await async_setup_component(hass, SENSOR_DOMAIN, config)
await hass.async_block_till_done()
async def test_setup_owserver_with_port(hass):
"""Test old platform setup with OWServer configuration."""
config = {"sensor": {"platform": "onewire", "host": "localhost", "port": "1234"}}
with assert_setup_component(1, "sensor"):
assert await async_setup_component(hass, SENSOR_DOMAIN, config)
await hass.async_block_till_done()
@pytest.mark.parametrize("device_id", ["1F.111111111111"])
@patch("homeassistant.components.onewire.onewirehub.protocol.proxy")
async def test_sensors_on_owserver_coupler(owproxy, hass, device_id):
"""Test for 1-Wire sensors connected to DS2409 coupler."""
entity_registry = mock_registry(hass)
mock_coupler = MOCK_COUPLERS[device_id]
dir_side_effect = [] # List of lists of string
read_side_effect = [] # List of byte arrays
dir_side_effect.append([f"/{device_id}/"]) # dir on root
read_side_effect.append(device_id[0:2].encode()) # read family on root
if "inject_reads" in mock_coupler:
read_side_effect += mock_coupler["inject_reads"]
expected_sensors = []
for branch, branch_details in mock_coupler["branches"].items():
dir_side_effect.append(
[ # dir on branch
f"/{device_id}/{branch}/{sub_device_id}/"
for sub_device_id in branch_details
]
)
for sub_device_id, sub_device in branch_details.items():
read_side_effect.append(sub_device_id[0:2].encode())
if "inject_reads" in sub_device:
read_side_effect.extend(sub_device["inject_reads"])
expected_sensors += sub_device[SENSOR_DOMAIN]
for expected_sensor in sub_device[SENSOR_DOMAIN]:
read_side_effect.append(expected_sensor["injected_value"])
# Ensure enough read side effect
read_side_effect.extend([ProtocolError("Missing injected value")] * 10)
owproxy.return_value.dir.side_effect = dir_side_effect
owproxy.return_value.read.side_effect = read_side_effect
with patch("homeassistant.components.onewire.PLATFORMS", [SENSOR_DOMAIN]):
await setup_onewire_patched_owserver_integration(hass)
await hass.async_block_till_done()
assert len(entity_registry.entities) == len(expected_sensors)
for expected_sensor in expected_sensors:
entity_id = expected_sensor["entity_id"]
registry_entry = entity_registry.entities.get(entity_id)
assert registry_entry is not None
assert registry_entry.unique_id == expected_sensor["unique_id"]
assert registry_entry.disabled == expected_sensor.get("disabled", False)
state = hass.states.get(entity_id)
assert state.state == expected_sensor["result"]
for attr in (ATTR_DEVICE_CLASS, ATTR_STATE_CLASS, ATTR_UNIT_OF_MEASUREMENT):
assert state.attributes.get(attr) == expected_sensor[attr]
assert state.attributes["device_file"] == expected_sensor["device_file"]
@pytest.mark.parametrize("device_id", MOCK_OWPROXY_DEVICES.keys())
@pytest.mark.parametrize("platform", PLATFORMS)
@patch("homeassistant.components.onewire.onewirehub.protocol.proxy")
async def test_owserver_setup_valid_device(owproxy, hass, device_id, platform):
"""Test for 1-Wire device.
As they would be on a clean setup: all binary-sensors and switches disabled.
"""
entity_registry = mock_registry(hass)
device_registry = mock_device_registry(hass)
setup_owproxy_mock_devices(owproxy, platform, [device_id])
mock_device = MOCK_OWPROXY_DEVICES[device_id]
expected_entities = mock_device.get(platform, [])
with patch("homeassistant.components.onewire.PLATFORMS", [platform]):
await setup_onewire_patched_owserver_integration(hass)
await hass.async_block_till_done()
assert len(entity_registry.entities) == len(expected_entities)
if len(expected_entities) > 0:
device_info = mock_device["device_info"]
assert len(device_registry.devices) == 1
registry_entry = device_registry.async_get_device({(DOMAIN, device_id)})
assert registry_entry is not None
assert registry_entry.identifiers == {(DOMAIN, device_id)}
assert registry_entry.manufacturer == device_info[ATTR_MANUFACTURER]
assert registry_entry.name == device_info[ATTR_NAME]
assert registry_entry.model == device_info[ATTR_MODEL]
for expected_entity in expected_entities:
entity_id = expected_entity["entity_id"]
registry_entry = entity_registry.entities.get(entity_id)
assert registry_entry is not None
assert registry_entry.unique_id == expected_entity["unique_id"]
assert registry_entry.disabled == expected_entity.get("disabled", False)
state = hass.states.get(entity_id)
if registry_entry.disabled:
assert state is None
else:
assert state.state == expected_entity["result"]
for attr in (ATTR_DEVICE_CLASS, ATTR_STATE_CLASS, ATTR_UNIT_OF_MEASUREMENT):
assert state.attributes.get(attr) == expected_entity[attr]
assert state.attributes["device_file"] == expected_entity.get(
"device_file", registry_entry.unique_id
)
@pytest.mark.parametrize("device_id", MOCK_SYSBUS_DEVICES.keys())
async def test_onewiredirect_setup_valid_device(hass, device_id):
"""Test that sysbus config entry works correctly."""
entity_registry = mock_registry(hass)
device_registry = mock_device_registry(hass)
glob_result, read_side_effect = setup_sysbus_mock_devices(
SENSOR_DOMAIN, [device_id]
)
mock_device = MOCK_SYSBUS_DEVICES[device_id]
expected_entities = mock_device.get(SENSOR_DOMAIN, [])
with patch("pi1wire._finder.glob.glob", return_value=glob_result,), patch(
"pi1wire.OneWire.get_temperature",
side_effect=read_side_effect,
):
assert await setup_onewire_sysbus_integration(hass)
await hass.async_block_till_done()
assert len(entity_registry.entities) == len(expected_entities)
if len(expected_entities) > 0:
device_info = mock_device["device_info"]
assert len(device_registry.devices) == 1
registry_entry = device_registry.async_get_device({(DOMAIN, device_id)})
assert registry_entry is not None
assert registry_entry.identifiers == {(DOMAIN, device_id)}
assert registry_entry.manufacturer == device_info[ATTR_MANUFACTURER]
assert registry_entry.name == device_info[ATTR_NAME]
assert registry_entry.model == device_info[ATTR_MODEL]
for expected_sensor in expected_entities:
entity_id = expected_sensor["entity_id"]
registry_entry = entity_registry.entities.get(entity_id)
assert registry_entry is not None
assert registry_entry.unique_id == expected_sensor["unique_id"]
state = hass.states.get(entity_id)
assert state.state == expected_sensor["result"]
for attr in (ATTR_DEVICE_CLASS, ATTR_STATE_CLASS, ATTR_UNIT_OF_MEASUREMENT):
assert state.attributes.get(attr) == expected_sensor[attr]
|
import uuid
from django.test import TestCase, SimpleTestCase
from casexml.apps.case.exceptions import IllegalCaseId
from casexml.apps.case.mock import CaseBlock
from casexml.apps.case.models import CommCareCase
from casexml.apps.case.xform import CaseDbCache
from casexml.apps.case.xml import V2
from corehq.form_processor.interfaces import FormProcessorInterface
class CaseDbCacheTest(TestCase):
"""
Tests the functionality of the CaseDbCache object
"""
def testDomainCheck(self):
id = uuid.uuid4().hex
FormProcessorInterface.post_case_blocks([
CaseBlock(
create=True, case_id=id,
user_id='some-user'
).as_xml()
], {'domain': 'good-domain'}
)
bad_cache = CaseDbCache(domain='bad-domain')
try:
bad_cache.get(id)
self.fail('domain security check failed to raise exception')
except IllegalCaseId:
pass
good_cache = CaseDbCache(domain='good-domain')
case = good_cache.get(id)
self.assertEqual('some-user', case.user_id) # just sanity check it's the right thing
def testDocTypeCheck(self):
id = uuid.uuid4().hex
CommCareCase.get_db().save_doc({
"_id": id,
"doc_type": "AintNoCasesHere"
})
cache = CaseDbCache()
try:
cache.get(id)
self.fail('doc type security check failed to raise exception')
except IllegalCaseId:
pass
doc_back = CommCareCase.get_db().get(id)
self.assertEqual("AintNoCasesHere", doc_back['doc_type'])
def testGetPopulatesCache(self):
case_ids = _make_some_cases(3)
cache = CaseDbCache()
for id in case_ids:
self.assertFalse(cache.in_cache(id))
for i, id in enumerate(case_ids):
case = cache.get(id)
self.assertEqual(str(i), case.my_index)
for id in case_ids:
self.assertTrue(cache.in_cache(id))
def testSetPopulatesCache(self):
case_ids = _make_some_cases(3)
cache = CaseDbCache()
for id in case_ids:
self.assertFalse(cache.in_cache(id))
for id in case_ids:
cache.set(id, CommCareCase.get(id))
for i, id in enumerate(case_ids):
self.assertTrue(cache.in_cache(id))
case = cache.get(id)
self.assertEqual(str(i), case.my_index)
def testPopulate(self):
case_ids = _make_some_cases(3)
cache = CaseDbCache()
for id in case_ids:
self.assertFalse(cache.in_cache(id))
cache.populate(case_ids)
for id in case_ids:
self.assertTrue(cache.in_cache(id))
# sanity check
for i, id in enumerate(case_ids):
case = cache.get(id)
self.assertEqual(str(i), case.my_index)
def testStripHistory(self):
case_ids = _make_some_cases(3)
history_cache = CaseDbCache()
for i, id in enumerate(case_ids):
self.assertFalse(history_cache.in_cache(id))
case = history_cache.get(id)
self.assertEqual(str(i), case.my_index)
self.assertTrue(len(case.actions) > 0)
nohistory_cache = CaseDbCache(strip_history=True)
for i, id in enumerate(case_ids):
self.assertFalse(nohistory_cache.in_cache(id))
case = nohistory_cache.get(id)
self.assertEqual(str(i), case.my_index)
self.assertTrue(len(case.actions) == 0)
more_case_ids = _make_some_cases(3)
history_cache.populate(more_case_ids)
nohistory_cache.populate(more_case_ids)
for i, id in enumerate(more_case_ids):
self.assertTrue(history_cache.in_cache(id))
case = history_cache.get(id)
self.assertEqual(str(i), case.my_index)
self.assertTrue(len(case.actions) > 0)
for i, id in enumerate(more_case_ids):
self.assertTrue(nohistory_cache.in_cache(id))
case = nohistory_cache.get(id)
self.assertEqual(str(i), case.my_index)
self.assertTrue(len(case.actions) == 0)
def test_nowrap(self):
case_ids = _make_some_cases(1)
cache = CaseDbCache(wrap=False)
case = cache.get(case_ids[0])
self.assertTrue(isinstance(case, dict))
self.assertFalse(isinstance(case, CommCareCase))
class CaseDbCacheNoDbTest(SimpleTestCase):
def test_wrap_lock_dependency(self):
# valid combinations
CaseDbCache(domain='some-domain', lock=False, wrap=True)
CaseDbCache(domain='some-domain', lock=False, wrap=False)
CaseDbCache(domain='some-domain', lock=True, wrap=True)
with self.assertRaises(ValueError):
# invalid
CaseDbCache(domain='some-domain', lock=True, wrap=False)
def _make_some_cases(howmany, domain='dbcache-test'):
ids = [uuid.uuid4().hex for i in range(howmany)]
FormProcessorInterface.post_case_blocks([
CaseBlock(
create=True,
case_id=ids[i],
user_id='some-user',
update={
'my_index': i,
}
).as_xml() for i in range(howmany)
], {'domain': domain})
return ids
|
import unittest
import os
import sys
import subprocess
from dimod import sym, BINARY, INTEGER, ConstrainedQuadraticModel
from job_shop_scheduler import JSSCQM
from data import Data
import utils.plot_schedule as job_plotter
project_dir = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
class TestSmoke(unittest.TestCase):
@unittest.skipIf(os.getenv('SKIP_INT_TESTS'), "Skipping integration test.")
def test_smoke(self):
"""Run job_shop_scheduler.py and check that nothing crashes"""
demo_file = os.path.join(project_dir, 'job_shop_scheduler.py')
subprocess.check_output([sys.executable, demo_file])
class TestData(unittest.TestCase):
def test_data(self):
"""Test input data name, size and max completion time"""
input_file = "tests/instance_test.txt"
test_data = Data(input_file)
self.assertEqual(test_data.instance_name, 'instance_test')
test_data.read_input_data()
self.assertEqual(test_data.num_jobs, 3)
self.assertEqual(test_data.num_machines, 3)
self.assertEqual(test_data.max_makespan, 24)
def test_prep_solution_for_plotting(self):
"""Test if data is formatted correctly for plotting"""
input_file = "tests/instance_test.txt"
test_data = Data(input_file)
test_data.read_input_data()
solution = {(0, 0): (1, 8.0, 2), (1, 0): (1, 11.0, 3),
(2, 0): (1, 14.0, 0), (0, 1): (2, 0.0, 3),
(1, 1): (2, 3.0, 2), (2, 1): (2, 5.0, 3),
(0, 2): (0, 3.0, 4), (1, 2): (0, 7.0, 2),
(2, 2): (0, 9.0, 5)}
job_start_time, processing_time = \
job_plotter.plot_solution(test_data, solution)
self.assertEqual({0: [8.0, 0.0, 3.0],
1: [11.0, 3.0, 7.0],
2: [14.0, 5.0, 9.0]}, job_start_time)
self.assertEqual({0: [2, 3, 4], 1: [3, 2, 2], 2: [0, 3, 5]},
processing_time)
def test_jss_cqm_size(self):
"Testing size of CQM model built for an JSS instance"""
input_file = "tests/instance_test.txt"
test_data = Data(input_file)
test_data.read_input_data()
model = JSSCQM()
model.define_cqm_model()
model.define_variables(test_data)
model.add_precedence_constraints(test_data)
model.add_quadratic_overlap_constraint(test_data)
model.add_makespan_constraint(test_data)
model.define_objective_function()
cqm = model.cqm
num_binaries = sum(cqm.vartype(v) is BINARY for v in cqm.variables)
self.assertEqual(num_binaries, 9)
num_integers = sum(cqm.vartype(v) is INTEGER for v in cqm.variables)
self.assertEqual(num_integers, 10)
num_linear_constraints = sum(
constraint.lhs.is_linear() for constraint in
cqm.constraints.values())
self.assertEqual(num_linear_constraints, 9)
num_quadratic_constraints = sum(
not constraint.lhs.is_linear() for constraint in
cqm.constraints.values())
self.assertEqual(num_quadratic_constraints, 9)
num_ge_inequality_constraints = sum(
constraint.sense is sym.Sense.Ge for constraint in
cqm.constraints.values())
self.assertEqual(num_ge_inequality_constraints, 18)
class CQM_model(unittest.TestCase):
def test_model(self):
"""Test if the cqm gives correct energy for a given sample"""
# Check energy for an infeasible sample
sample = {'x0_2': 3.0, 'x0_1': 0.0, 'x0_0': 10.0, 'x1_2': 10.0,
'x1_1': 6.0, 'x1_0': 14.0, 'x2_2': 14.0, 'x2_1': 8.0,
'x2_0': 20.0, 'y0_1_0': 1.0, 'y0_1_1': 1.0, 'y0_1_2': 1.0,
'y0_2_0': 1.0, 'y0_2_1': 1.0, 'y0_2_2': 1.0, 'y1_2_0': 1.0,
'y1_2_1': 1.0, 'y1_2_2': 1.0, 'makespan': 20.0}
expected_violations = {
'pj0_m1': -0.0, 'pj0_m2': -3.0, 'pj1_m1': -2.0,
'pj1_m2': -2.0,
'pj2_m1': -3.0, 'pj2_m2': -1.0, 'OneJobj0_j1_m0': -2.0,
'OneJobj0_j1_m1': -3.0, 'OneJobj0_j1_m2': -3.0,
'OneJobj0_j2_m0': -8.0,
'OneJobj0_j2_m1': -5.0, 'OneJobj0_j2_m2': -7.0,
'OneJobj1_j2_m0': -3.0,
'OneJobj1_j2_m1': -0.0, 'OneJobj1_j2_m2': -2.0,
'makespan_ctr0': -8.0,
'makespan_ctr1': -3.0, 'makespan_ctr2': -0.0}
input_file = "tests/instance_test.txt"
test_data = Data(input_file)
test_data.read_input_data()
model = JSSCQM()
model.define_cqm_model()
model.define_variables(test_data)
model.add_precedence_constraints(test_data)
model.add_quadratic_overlap_constraint(test_data)
model.add_makespan_constraint(test_data)
model.define_objective_function()
violations = {label: violation for (label, violation)
in ConstrainedQuadraticModel.iter_violations(model.cqm,
sample)}
self.assertEqual(violations, expected_violations)
self.assertTrue(model.cqm.check_feasible(sample))
# Check energy for an infeasible sample
infeasible_sample = sample.copy()
infeasible_sample['x0_2'] = 7.0
infeasible_sample['make_span'] = 16.0
violations = {label: violation for (label, violation)
in ConstrainedQuadraticModel.iter_violations(
model.cqm, infeasible_sample)}
expected_violations = {
'pj0_m1': -4.0, 'pj0_m2': 1.0, 'pj1_m1': -2.0, 'pj1_m2': -2.0,
'pj2_m1': -3.0, 'pj2_m2': -1.0, 'OneJobj0_j1_m0': -2.0,
'OneJobj0_j1_m1': -3.0, 'OneJobj0_j1_m2': 1.0,
'OneJobj0_j2_m0': -8.0, 'OneJobj0_j2_m1': -5.0,
'OneJobj0_j2_m2': -3.0, 'OneJobj1_j2_m0': -3.0,
'OneJobj1_j2_m1': -0.0, 'OneJobj1_j2_m2': -2.0,
'makespan_ctr0': -8.0, 'makespan_ctr1': -3.0,
'makespan_ctr2': -0.0}
self.assertEqual(violations, expected_violations)
self.assertFalse(model.cqm.check_feasible(infeasible_sample))
|
from IPython.display import Image
#%matplotlib inline
from distutils.version import LooseVersion as Version
from sklearn import __version__ as sklearn_version
#Image(filename='./images/10_01.png', width=500)
import pandas as pd
df = pd.read_csv('https://archive.ics.uci.edu/ml/machine-learning-databases/'
'housing/housing.data',
header=None,
sep='\s+')
df.columns = ['CRIM', 'ZN', 'INDUS', 'CHAS',
'NOX', 'RM', 'AGE', 'DIS', 'RAD',
'TAX', 'PTRATIO', 'B', 'LSTAT', 'MEDV']
df.head()
df = pd.read_csv('https://raw.githubusercontent.com/rasbt/python-machine-learning-book/master/code/datasets/housing/housing.data',
header=None, sep='\s+')
df.columns = ['CRIM', 'ZN', 'INDUS', 'CHAS',
'NOX', 'RM', 'AGE', 'DIS', 'RAD',
'TAX', 'PTRATIO', 'B', 'LSTAT', 'MEDV']
df.head()
import matplotlib.pyplot as plt
import seaborn as sns
sns.set(style='whitegrid', context='notebook')
cols = ['LSTAT', 'INDUS', 'NOX', 'RM', 'MEDV']
sns.pairplot(df[cols], size=2.5)
plt.tight_layout()
# plt.savefig('./figures/scatter.png', dpi=300)
plt.show()
import numpy as np
cm = np.corrcoef(df[cols].values.T)
sns.set(font_scale=1.5)
hm = sns.heatmap(cm,
cbar=True,
annot=True,
square=True,
fmt='.2f',
annot_kws={'size': 15},
yticklabels=cols,
xticklabels=cols)
# plt.tight_layout()
# plt.savefig('./figures/corr_mat.png', dpi=300)
plt.show()
sns.reset_orig()
#%matplotlib inline
class LinearRegressionGD(object):
def __init__(self, eta=0.001, n_iter=20):
self.eta = eta
self.n_iter = n_iter
def fit(self, X, y):
self.w_ = np.zeros(1 + X.shape[1])
self.cost_ = []
for i in range(self.n_iter):
output = self.net_input(X)
errors = (y - output)
self.w_[1:] += self.eta * X.T.dot(errors)
self.w_[0] += self.eta * errors.sum()
cost = (errors**2).sum() / 2.0
self.cost_.append(cost)
return self
def net_input(self, X):
return np.dot(X, self.w_[1:]) + self.w_[0]
def predict(self, X):
return self.net_input(X)
X = df[['RM']].values
y = df['MEDV'].values
from sklearn.preprocessing import StandardScaler
sc_x = StandardScaler()
sc_y = StandardScaler()
X_std = sc_x.fit_transform(X)
y_std = sc_y.fit_transform(y[:, np.newaxis]).flatten()
lr = LinearRegressionGD()
lr.fit(X_std, y_std)
plt.plot(range(1, lr.n_iter+1), lr.cost_)
plt.ylabel('SSE')
plt.xlabel('Epoch')
plt.tight_layout()
# plt.savefig('./figures/cost.png', dpi=300)
plt.show()
def lin_regplot(X, y, model):
plt.scatter(X, y, c='lightblue')
plt.plot(X, model.predict(X), color='red', linewidth=2)
return
lin_regplot(X_std, y_std, lr)
plt.xlabel('Average number of rooms [RM] (standardized)')
plt.ylabel('Price in $1000\'s [MEDV] (standardized)')
plt.tight_layout()
# plt.savefig('./figures/gradient_fit.png', dpi=300)
plt.show()
|
import numpy as np
import pandas as pd
from weatherClass import weatherClass
from IdentifierClass import identifierClass
from eventsClass import eventsClass
import datetime
### load some data:
#read the ticket+complaint data, combined for location:
# events fields: date, lat, lng, address, identifier, index
temp = pd.read_csv('/Users/nbechor/Insight/noslipwalk/noslipwalk/features/negative_labels_5_d_15_with_identifier.csv')
events = eventsClass(temp)
# read the identifier to weather data:
# this is the the result of the nearest neighbor for weather. Each
# key address has an identifier, that identifier is tied to the different
# lat longs of a given address, and to the closest weather data grid point
# fields: lat, lon, identifier as index
temp = pd.read_csv('/Users/nbechor/Insight/noslipwalk/noslipwalk/features/identifier2weatherloc.csv')
identifier2weatherloc = identifierClass(temp)
# weather_features fields:
# fields: time, lat, lon, frost indicator,thaw indicator, rain indicator,
# snow indicator, rain amount, snow amount
temp = pd.read_csv('/Users/nbechor/Insight/noslipwalk/noslipwalk/features/weather_features.csv')
weather_features = weatherClass(temp)
weather_features.df = weather_features.df.fillna(0)
print(weather_features.df)
newPointEvents = pd.DataFrame() # we'll add to this in the loop (the main output)
# going over all identifiers, and events for each:
identifiers = events.df['identifier'].unique().astype('int').tolist()
new_events = pd.DataFrame()
for identifier in identifiers:
pointEvents = events.df[events.df['identifier'] == identifier]
lat,lon,errFlag = identifierClass.latLonFromRecord(identifier2weatherloc,identifier)
if (~errFlag):
pointWeather = weatherClass.weatherByLatLon(weather_features,lat,lon)
# now need to go over events and get weather for each of them:
for i in range(0,pointEvents.shape[0]):
date = pointEvents['date'].iloc[i]
time_struct = date.timetuple()
year = time_struct.tm_year
doy = time_struct.tm_yday
weather = pointWeather[pointWeather['date']==date]
if (~weather.empty):
# switch the lat lon in the weather for the lat lon of the event:
try:
weather['lat'] = pointEvents['lat'].iloc[i]
weather['lon'] = pointEvents['lng'].iloc[i]
weather['address'] = pointEvents['address'].iloc[i]
weather['label'] = 0
weather['year'] = year
weather['day of year'] = doy
weather['year + day of year'] = year+doy
new_events = new_events.append(weather)
except:
print(weather.shape)
print('something off for date',date,'identifier',identifier)
print(new_events)
new_events.to_csv('/Users/nbechor/Insight/noslipwalk/noslipwalk/features/features_label0.csv')
|
from datetime import datetime
from unittest.mock import ANY, patch
from django.urls import reverse
from django.utils.dateparse import parse_datetime
from rest_framework import status
from internal.tests.base_test import tz
from playlist.date_stop import KARAOKE_JOB_NAME, clear_date_stop
from playlist.models import Karaoke, PlayerError, PlaylistEntry
from playlist.tests.base_test import PlaylistAPITestCase
class KaraokeViewTestCase(PlaylistAPITestCase):
url = reverse("playlist-karaoke")
url_digest = reverse("playlist-digest")
def setUp(self):
self.create_test_data()
def test_get_karaoke(self):
"""Test an authenticated user can access the karaoke."""
# set stop date
karaoke = Karaoke.objects.get_object()
date_stop = datetime.now(tz)
karaoke.date_stop = date_stop
karaoke.save()
# login as simple user
self.authenticate(self.user)
# get karaoke
response = self.client.get(self.url)
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.assertTrue(response.data["ongoing"])
self.assertTrue(response.data["can_add_to_playlist"])
self.assertTrue(response.data["player_play_next_song"])
self.assertEqual(parse_datetime(response.data["date_stop"]), date_stop)
# Get karaoke again but through digest route
response = self.client.get(self.url_digest)
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.assertTrue(response.data["karaoke"]["ongoing"])
self.assertTrue(response.data["karaoke"]["can_add_to_playlist"])
self.assertTrue(response.data["karaoke"]["player_play_next_song"])
self.assertEqual(
parse_datetime(response.data["karaoke"]["date_stop"]), date_stop
)
def test_get_karaoke_forbidden(self):
"""Test an unauthenticated user cannot access the karaoke."""
# get karaoke
response = self.client.get(self.url)
self.assertEqual(response.status_code, status.HTTP_401_UNAUTHORIZED)
@patch("playlist.views.send_to_channel")
def test_patch_karaoke_status_booleans(self, mocked_send_to_channel):
"""Test a manager can modify the karaoke status booleans."""
# login as manager
self.authenticate(self.manager)
# set can add to playlist to false
response = self.client.patch(self.url, {"can_add_to_playlist": False})
self.assertEqual(response.status_code, status.HTTP_200_OK)
karaoke = Karaoke.objects.get_object()
self.assertFalse(karaoke.can_add_to_playlist)
# set player play next song to false
response = self.client.patch(self.url, {"player_play_next_song": False})
self.assertEqual(response.status_code, status.HTTP_200_OK)
karaoke = Karaoke.objects.get_object()
self.assertFalse(karaoke.player_play_next_song)
# set karaoke ongoing to false
response = self.client.patch(self.url, {"ongoing": False})
self.assertEqual(response.status_code, status.HTTP_200_OK)
karaoke = Karaoke.objects.get_object()
self.assertFalse(karaoke.ongoing)
mocked_send_to_channel.assert_called_with(ANY, "send_idle")
def test_patch_karaoke_forbidden(self):
"""Test a simple user or an unauthenticated user cannot modify the karaoke."""
# login as user
self.authenticate(self.user)
# set karaoke ogoing to false
response = self.client.patch(self.url, {"ongoing": False})
self.assertEqual(response.status_code, status.HTTP_403_FORBIDDEN)
@patch("playlist.views.send_to_channel")
def test_patch_ongoing_false(self, mocked_send_to_channel):
"""Test the playlist has been emptied when the kara is not ongoing.
And empty the player errors pool.
"""
url_player_status = reverse("playlist-player-status")
# the player is playing
self.player_play_next_song()
# there is a player error
PlayerError.objects.create(
playlist_entry=self.pe3, error_message="error message"
)
# login as manager
self.authenticate(self.manager)
# pre-assertion
# the playlist is not empty
self.assertTrue(PlaylistEntry.objects.all())
# the player errors list is not empty
self.assertTrue(PlayerError.objects.all())
# the player is currently playing
response = self.client.get(url_player_status)
self.assertTrue(response.data["playlist_entry"])
# set kara ongoing to false
response = self.client.patch(self.url, {"ongoing": False})
self.assertEqual(response.status_code, status.HTTP_200_OK)
# post-assertion
# the playlist is empty now
self.assertFalse(PlaylistEntry.objects.all())
# the player errors list is empty now
self.assertFalse(PlayerError.objects.all())
# the device was requested to be idle
mocked_send_to_channel.assert_called_with(ANY, "send_idle")
# the player is not playing anything
response = self.client.get(url_player_status)
self.assertFalse(response.data["playlist_entry"])
def test_put_player_play_next_song_false(self):
"""Test the playlist has not been emptied when can't add to playlist."""
url_player_status = reverse("playlist-player-status")
# the player is playing
self.player_play_next_song()
# there is a player error
PlayerError.objects.create(
playlist_entry=self.pe3, error_message="error message"
)
# login as manager
self.authenticate(self.manager)
# pre-assertion
# the playlist is not empty
self.assertTrue(PlaylistEntry.objects.all())
# the player errors list is not empty
self.assertTrue(PlayerError.objects.all())
# the player is currently playing
response = self.client.get(url_player_status)
self.assertTrue(response.data["playlist_entry"])
# set can't add to playlist
response = self.client.put(self.url, {"can_add_to_playlist": False})
self.assertEqual(response.status_code, status.HTTP_200_OK)
# post-assertion
# the playlist is not empty
self.assertTrue(PlaylistEntry.objects.all())
# the player errors list is not empty
self.assertTrue(PlayerError.objects.all())
# the player is still playling
response = self.client.get(url_player_status)
self.assertTrue(response.data["playlist_entry"])
@patch("playlist.views.send_to_channel")
def test_put_resume_kara_player_idle(self, mocked_send_to_channel):
"""Test idle player is requested to play after play next song.
Player play next song was false and the player idle.
When player play next song switch to true,
the player should be requested to play the next
song of the playlist.
"""
url_player_status = reverse("playlist-player-status")
# set player play next song to false
self.set_karaoke(player_play_next_song=False)
# login as manager
self.authenticate(self.manager)
# the player is not currently playing
response = self.client.get(url_player_status)
self.assertIsNone(response.data["playlist_entry"])
# resume the kara
response = self.client.put(self.url, {"player_play_next_song": True})
self.assertEqual(response.status_code, status.HTTP_200_OK)
# post-assertion
# the player is requested to start
mocked_send_to_channel.assert_called_with(
ANY, "send_playlist_entry", data={"playlist_entry": self.pe1}
)
@patch("playlist.views.send_to_channel")
def test_put_resume_kara_not_idle(self, mocked_send_to_channel):
"""Test not idle player is not requested after play next song.
Player play next song was false and the player not idle.
When play next song is switched to true,
the player should not be requested to do anything.
"""
url_player_status = reverse("playlist-player-status")
# set player play next song to false
self.set_karaoke(player_play_next_song=False)
# the player is playing
self.player_play_next_song()
# login as manager
self.authenticate(self.manager)
# the player is currently playing
response = self.client.get(url_player_status)
self.assertTrue(response.data["playlist_entry"])
# reset the mock
mocked_send_to_channel.reset_mock()
# resume the kara
response = self.client.put(self.url, {"player_play_next_song": True})
self.assertEqual(response.status_code, status.HTTP_200_OK)
# post-assertion
# the player is not requested to do anything
mocked_send_to_channel.assert_not_called()
@patch("playlist.views.send_to_channel")
def test_patch_resume_kara_playlist_empty(self, mocked_send_to_channel):
"""Test send_playlist_entry is not sent when there is nothing to play."""
url_player_status = reverse("playlist-player-status")
# login as manager
self.authenticate(self.manager)
# empty the playlist
PlaylistEntry.objects.all().delete()
# the player is not playing anything
response = self.client.get(url_player_status)
self.assertFalse(response.data["playlist_entry"])
# resume the kara
response = self.client.put(self.url, {"player_play_next_song": True})
self.assertEqual(response.status_code, status.HTTP_200_OK)
# post-assertion
# no command was sent to device
mocked_send_to_channel.assert_not_called()
@patch("playlist.views.scheduler")
def test_patch_karaoke_date_stop(self, mocked_scheduler):
"""Test a manager can modify the kara date stop and scheduler is called."""
# Mock return value of add_job
mocked_scheduler.add_job.return_value.id = "job_id"
# login as manager
self.authenticate(self.manager)
# set karaoke date stop
date_stop = datetime.now(tz)
response = self.client.patch(self.url, {"date_stop": date_stop.isoformat()})
self.assertEqual(response.status_code, status.HTTP_200_OK)
# Check karaoke was updated
karaoke = Karaoke.objects.get_object()
self.assertEqual(karaoke.date_stop, date_stop)
# Check job was added
mocked_scheduler.add_job.assert_called_with(
clear_date_stop, "date", run_date=date_stop
)
@patch("playlist.views.scheduler")
@patch("playlist.views.cache")
def test_patch_karaoke_clear_date_stop(self, mocked_cache, mocked_scheduler):
"""Test a manager can clear the kara date stop and job is cancelled."""
# set karaoke date stop
karaoke = Karaoke.objects.get_object()
date_stop = datetime.now(tz)
karaoke.date_stop = date_stop
karaoke.save()
# login as manager
self.authenticate(self.manager)
# clear karaoke date stop
response = self.client.patch(self.url, {"date_stop": None})
self.assertEqual(response.status_code, status.HTTP_200_OK)
# Check karaoke was updated
karaoke = Karaoke.objects.get_object()
self.assertIsNone(karaoke.date_stop)
# Check remove was called
mocked_cache.get.assert_called_with(KARAOKE_JOB_NAME)
mocked_scheduler.get_job.return_value.remove.assert_called_with()
@patch("playlist.views.scheduler")
@patch("playlist.views.cache")
def test_patch_karaoke_clear_date_stop_existing_job_id(
self, mocked_cache, mocked_scheduler
):
"""Test a manager can clear existing date stop."""
# create existing job in cache
mocked_cache.get.return_value = "job_id"
# login as manager
self.authenticate(self.manager)
# clear karaoke date stop
response = self.client.patch(self.url, {"date_stop": None})
self.assertEqual(response.status_code, status.HTTP_200_OK)
# Check remove was called
mocked_cache.get.assert_called_with(KARAOKE_JOB_NAME)
mocked_scheduler.get_job.return_value.remove.assert_called_with()
mocked_cache.delete.assert_not_called()
@patch("playlist.views.scheduler")
@patch("playlist.views.cache")
def test_patch_karaoke_clear_date_stop_existing_job_id_no_job(
self, mocked_cache, mocked_scheduler
):
"""Test a manager can clear existing date stop without job."""
# create existing job in cache
mocked_cache.get.return_value = "job_id"
mocked_scheduler.get_job.return_value = None
# login as manager
self.authenticate(self.manager)
# clear karaoke date stop
response = self.client.patch(self.url, {"date_stop": None})
self.assertEqual(response.status_code, status.HTTP_200_OK)
# Check remove was called
mocked_cache.get.assert_called_with(KARAOKE_JOB_NAME)
mocked_cache.delete.assert_called_with(KARAOKE_JOB_NAME)
|
# -*- coding: utf-8 -*-
"""
Salt compatibility code
"""
# pylint: disable=import-error,unused-import,invalid-name,W0231,W0233
# Import python libs
from __future__ import absolute_import, print_function, unicode_literals
import binascii
import logging
import sys
# Import 3rd-party libs
from salt.exceptions import SaltException
from salt.ext.six import binary_type, integer_types, string_types, text_type
from salt.ext.six.moves import StringIO, cStringIO
log = logging.getLogger(__name__)
try:
# Python >2.5
import xml.etree.cElementTree as ElementTree
except Exception: # pylint: disable=broad-except
try:
# Python >2.5
import xml.etree.ElementTree as ElementTree
except Exception: # pylint: disable=broad-except
try:
# normal cElementTree install
import elementtree.cElementTree as ElementTree
except Exception: # pylint: disable=broad-except
try:
# normal ElementTree install
import elementtree.ElementTree as ElementTree
except Exception: # pylint: disable=broad-except
ElementTree = None
# True if we are running on Python 3.
PY3 = sys.version_info.major == 3
if PY3:
import builtins
exceptions = builtins
else:
import exceptions
if ElementTree is not None:
if not hasattr(ElementTree, "ParseError"):
class ParseError(Exception):
"""
older versions of ElementTree do not have ParseError
"""
ElementTree.ParseError = ParseError
def text_(s, encoding="latin-1", errors="strict"):
"""
If ``s`` is an instance of ``binary_type``, return
``s.decode(encoding, errors)``, otherwise return ``s``
"""
return s.decode(encoding, errors) if isinstance(s, binary_type) else s
def bytes_(s, encoding="latin-1", errors="strict"):
"""
If ``s`` is an instance of ``text_type``, return
``s.encode(encoding, errors)``, otherwise return ``s``
"""
return s.encode(encoding, errors) if isinstance(s, text_type) else s
def ascii_native_(s):
"""
Python 3: If ``s`` is an instance of ``text_type``, return
``s.encode('ascii')``, otherwise return ``str(s, 'ascii', 'strict')``
Python 2: If ``s`` is an instance of ``text_type``, return
``s.encode('ascii')``, otherwise return ``str(s)``
"""
if isinstance(s, text_type):
s = s.encode("ascii")
return str(s, "ascii", "strict") if PY3 else s
def native_(s, encoding="latin-1", errors="strict"):
"""
Python 3: If ``s`` is an instance of ``text_type``, return ``s``, otherwise
return ``str(s, encoding, errors)``
Python 2: If ``s`` is an instance of ``text_type``, return
``s.encode(encoding, errors)``, otherwise return ``str(s)``
"""
if PY3:
out = s if isinstance(s, text_type) else str(s, encoding, errors)
else:
out = s.encode(encoding, errors) if isinstance(s, text_type) else str(s)
return out
def string_io(data=None): # cStringIO can't handle unicode
"""
Pass data through to stringIO module and return result
"""
try:
return cStringIO(bytes(data))
except (UnicodeEncodeError, TypeError):
return StringIO(data)
try:
if PY3:
import ipaddress
else:
import salt.ext.ipaddress as ipaddress
except ImportError:
ipaddress = None
class IPv6AddressScoped(ipaddress.IPv6Address):
"""
Represent and manipulate single IPv6 Addresses.
Scope-aware version
"""
def __init__(self, address):
"""
Instantiate a new IPv6 address object. Scope is moved to an attribute 'scope'.
Args:
address: A string or integer representing the IP
Additionally, an integer can be passed, so
IPv6Address('2001:db8::') == IPv6Address(42540766411282592856903984951653826560)
or, more generally
IPv6Address(int(IPv6Address('2001:db8::'))) == IPv6Address('2001:db8::')
Raises:
AddressValueError: If address isn't a valid IPv6 address.
:param address:
"""
# pylint: disable-all
if not hasattr(self, "_is_packed_binary"):
# This method (below) won't be around for some Python 3 versions
# and we need check this differently anyway
self._is_packed_binary = lambda p: isinstance(p, bytes)
# pylint: enable-all
if isinstance(address, string_types) and "%" in address:
buff = address.split("%")
if len(buff) != 2:
raise SaltException('Invalid IPv6 address: "{}"'.format(address))
address, self.__scope = buff
else:
self.__scope = None
if sys.version_info.major == 2:
ipaddress._BaseAddress.__init__(self, address)
ipaddress._BaseV6.__init__(self, address)
else:
# Python 3.4 fix. Versions higher are simply not affected
# https://github.com/python/cpython/blob/3.4/Lib/ipaddress.py#L543-L544
self._version = 6
self._max_prefixlen = ipaddress.IPV6LENGTH
# Efficient constructor from integer.
if isinstance(address, integer_types):
self._check_int_address(address)
self._ip = address
elif self._is_packed_binary(address):
self._check_packed_address(address, 16)
self._ip = int(binascii.hexlify(address), 16)
else:
address = str(address)
if "/" in address:
raise ipaddress.AddressValueError(
"Unexpected '/' in {}".format(address)
)
self._ip = self._ip_int_from_string(address)
def _is_packed_binary(self, data):
"""
Check if data is hexadecimal packed
:param data:
:return:
"""
packed = False
if isinstance(data, bytes) and len(data) == 16 and b":" not in data:
try:
packed = bool(int(binascii.hexlify(data), 16))
except (ValueError, TypeError):
pass
return packed
@property
def scope(self):
"""
Return scope of IPv6 address.
:return:
"""
return self.__scope
def __str__(self):
return text_type(
self._string_from_ip_int(self._ip)
+ ("%" + self.scope if self.scope is not None else "")
)
class IPv6InterfaceScoped(ipaddress.IPv6Interface, IPv6AddressScoped):
"""
Update
"""
def __init__(self, address):
if (
PY3
and isinstance(address, (bytes, int))
or not PY3
and isinstance(address, int)
):
IPv6AddressScoped.__init__(self, address)
self.network = ipaddress.IPv6Network(self._ip)
self._prefixlen = self._max_prefixlen
return
addr = ipaddress._split_optional_netmask(address)
IPv6AddressScoped.__init__(self, addr[0])
self.network = ipaddress.IPv6Network(address, strict=False)
self.netmask = self.network.netmask
self._prefixlen = self.network._prefixlen
self.hostmask = self.network.hostmask
if ipaddress:
ipaddress.IPv6Address = IPv6AddressScoped
if sys.version_info.major == 2:
ipaddress.IPv6Interface = IPv6InterfaceScoped
|
import pygame
class ControlManager(object):
@classmethod
def up(cls):
raise NotImplementedError('Error: Abstract class')
@classmethod
def down(cls):
raise NotImplementedError('Error: Abstract class')
@classmethod
def left(cls):
raise NotImplementedError('Error: Abstract class')
@classmethod
def right(cls):
raise NotImplementedError('Error: Abstract class')
@classmethod
def angle(cls, pos):
raise NotImplementedError('Error: Abstract class')
@classmethod
def prim_button(cls):
raise NotImplementedError('Error: Abstract class')
@classmethod
def sec_button(cls):
raise NotImplementedError('Error: Abstract class')
@classmethod
def select_button(cls):
raise NotImplementedError('Error: Abstract class')
|
import random
import mysql.connector
import yaml
from os import path as os_path
config_path = os_path.abspath(os_path.join(os_path.dirname(__file__), 'config.yml'))
data = yaml.safe_load(open(config_path))
def extract_fact(user_id):
mydb = mysql.connector.connect(
host=data['DB_HOST'],
user=data['DB_USERNAME'],
password=data['DB_PASSWORD'],
database=data['DB_NAME'])
mycursor = mydb.cursor(buffered=True)
mycursor.execute(f"SELECT * FROM facts WHERE user_id = {user_id} OR privacy = 'Public' ORDER BY RAND() LIMIT 1 ")
myresult = mycursor.fetchone()
mycursor.close()
mydb.close()
# title = myresult[5]
# link = myresult[6]
# note = myresult[2]
# code = myresult[3]
title, link, note, code = '', '', '', ''
if len(myresult[5]) > 0:
title = f'<b>{myresult[5]}</b>\n\n'
if len(myresult[6]) > 0:
link = f'<a href="{myresult[6]}">Источник</a>'
if len(myresult[2]) > 0:
note = f'<i>{myresult[2]}</i>\n\n'
if len(myresult[3]) > 0:
code = f'<pre><code class="language-python">{myresult[3]}</code></pre>\n\n'
message = f'{title}{code}{note}{link}'
# message = f'<b>{title}</b>\n\n<pre><code class="language-python">{code}</code></pre>\n\n<i>{note}</i>\n\n<a href="{link}">Источник</a>'
return message
|
import json
import logging
import random
import requests
from hashlib import sha1 as sha_constructor
from django.conf import settings
from gluu_ecommerce.connectors.uma_access import obtain_authorized_rpt_token
logger = logging.getLogger('idp')
SCIM_CREATE_USER_ENDPOINT = 'https://idp.gluu.org/identity/seam/resource/restv1/scim/v2/Users/'
SCIM_UPDATE_USER_ENDPOINT = 'https://idp.gluu.org/identity/seam/resource/restv1/scim/v2/Users/{}/'
def create_user(user, password, active=False):
headers = {'Content-Type': 'application/json'}
params = {}
payload = {
'schemas': ['urn:ietf:params:scim:schemas:core:2.0:User'],
'userName': sha_constructor(str(random.random())).hexdigest()[:12],
'name': {'givenName': user.first_name, 'familyName': user.last_name},
'displayName': u'{}{}'.format(user.first_name, user.last_name),
'password': password,
'emails': [
{'value': user.email, 'primary': True, 'type': 'Work'}
],
'phoneNumbers': [
{'value': user.phone_number, 'primary': True, 'type': 'Work'}
],
}
if active:
payload['active'] = True
url = SCIM_CREATE_USER_ENDPOINT
if settings.SCIM_TEST_MODE:
params['access_token'] = settings.SCIM_TEST_MODE_ACCESS_TOKEN
else:
rpt = obtain_authorized_rpt_token(resource_uri=url)
headers['Authorization'] = 'Bearer {}'.format(rpt)
response = requests.post(
url,
data=json.dumps(payload),
verify=settings.VERIFY_SSL,
headers=headers,
params=params
)
if response.status_code != 201:
message = 'Error writing to idp: {} {}'.format(response.status_code, response.text)
logger.error(message)
raise Exception(message)
else:
response = response.json()
return response['id']
def activate_user(user):
headers = {'Content-Type': 'application/json'}
params = {}
url = SCIM_UPDATE_USER_ENDPOINT.format(user.idp_uuid)
if settings.SCIM_TEST_MODE:
params['access_token'] = settings.SCIM_TEST_MODE_ACCESS_TOKEN
else:
rpt = obtain_authorized_rpt_token(resource_uri=url)
headers['Authorization'] = 'Bearer {}'.format(rpt)
payload = {'active': True}
response = requests.put(
url,
data=json.dumps(payload),
verify=settings.VERIFY_SSL,
headers=headers,
params=params
)
if response.status_code != 200:
message = 'Error writing to idp: {} {}'.format(response.status_code, response.text)
logger.error(message)
raise Exception(message)
def update_user(user):
headers = {'Content-Type': 'application/json'}
params = {}
if not user.idp_uuid:
logger.error('Error writing to idp, missing uid: {}'.format(user.email))
return
url = SCIM_UPDATE_USER_ENDPOINT.format(user.idp_uuid)
if settings.SCIM_TEST_MODE:
params['access_token'] = settings.SCIM_TEST_MODE_ACCESS_TOKEN
else:
rpt = obtain_authorized_rpt_token(resource_uri=url)
headers['Authorization'] = 'Bearer {}'.format(rpt)
payload = {
'name': {'givenName': user.first_name, 'familyName': user.last_name},
'displayName': u'{}{}'.format(user.first_name, user.last_name),
'phoneNumbers': [
{'value': user.mobile_number, 'primary': True, 'type': 'Work'}
],
'timezone': user.timezone,
'title': user.job_title
}
response = requests.put(
url,
data=json.dumps(payload),
verify=settings.VERIFY_SSL,
headers=headers,
params=params
)
if response.status_code != 200:
message = 'Error writing to idp: {} {}'.format(response.status_code, response.text)
logger.error(message)
raise Exception(message)
else:
logger.info('Successfully updated {}'.format(user.email))
def get_user(user):
if not user.idp_uuid:
logger.error('Error writing to idp, missing uid: {}'.format(user.email))
return
headers = {'Content-Type': 'application/json'}
params = {}
url = SCIM_UPDATE_USER_ENDPOINT.format(user.idp_uuid)
if settings.SCIM_TEST_MODE:
params['access_token'] = settings.SCIM_TEST_MODE_ACCESS_TOKEN
else:
rpt = obtain_authorized_rpt_token(resource_uri=url)
headers['Authorization'] = 'Bearer {}'.format(rpt)
response = requests.get(url, verify=settings.VERIFY_SSL, headers=headers)
if response.status_code != 200:
message = 'Error retrieving idp: {} {}'.format(response.status_code, response.text)
logger.error(message)
raise Exception(message)
else:
return response.json()
def email_exists(email):
headers = {'Content-Type': 'application/json'}
url = SCIM_CREATE_USER_ENDPOINT
params = {'filter': 'emails.value eq "{}"'.format(email)}
if settings.SCIM_TEST_MODE:
params['access_token'] = settings.SCIM_TEST_MODE_ACCESS_TOKEN
else:
rpt = obtain_authorized_rpt_token(resource_uri=url)
headers['Authorization'] = 'Bearer {}'.format(rpt)
response = requests.get(url, verify=settings.VERIFY_SSL, headers=headers, params=params)
if response.status_code != 200:
message = 'Error retrieving from idp: {} {}'.format(response.status_code, response.text)
logger.error(message)
raise Exception(message)
else:
no_records = int(response.json()['totalResults'])
if no_records not in [0, 1]:
message = 'Unexpected number of records found for {}'.email
logger.error(message)
raise Exception(message)
return no_records == 1
|
"""
CryptoAPIs
Crypto APIs 2.0 is a complex and innovative infrastructure layer that radically simplifies the development of any Blockchain and Crypto related applications. Organized around REST, Crypto APIs 2.0 can assist both novice Bitcoin/Ethereum enthusiasts and crypto experts with the development of their blockchain applications. Crypto APIs 2.0 provides unified endpoints and data, raw data, automatic tokens and coins forwardings, callback functionalities, and much more. # noqa: E501
The version of the OpenAPI document: 2.0.0
Contact: developers@cryptoapis.io
Generated by: https://openapi-generator.tech
"""
import sys
import unittest
import cryptoapis
from cryptoapis.model.banned_ip_address_details import BannedIpAddressDetails
from cryptoapis.model.invalid_api_key import InvalidApiKey
from cryptoapis.model.missing_api_key import MissingApiKey
globals()['BannedIpAddressDetails'] = BannedIpAddressDetails
globals()['InvalidApiKey'] = InvalidApiKey
globals()['MissingApiKey'] = MissingApiKey
from cryptoapis.model.get_xrp_ripple_transaction_details_by_transaction_ide401 import GetXRPRippleTransactionDetailsByTransactionIDE401
class TestGetXRPRippleTransactionDetailsByTransactionIDE401(unittest.TestCase):
"""GetXRPRippleTransactionDetailsByTransactionIDE401 unit test stubs"""
def setUp(self):
pass
def tearDown(self):
pass
def testGetXRPRippleTransactionDetailsByTransactionIDE401(self):
"""Test GetXRPRippleTransactionDetailsByTransactionIDE401"""
# FIXME: construct object with mandatory attributes with example values
# model = GetXRPRippleTransactionDetailsByTransactionIDE401() # noqa: E501
pass
if __name__ == '__main__':
unittest.main()
|
"""Standalone Authenticator."""
import collections
import errno
import logging
import socket
from typing import Any
from typing import Callable
from typing import DefaultDict
from typing import Dict
from typing import Iterable
from typing import List
from typing import Mapping
from typing import Set
from typing import Tuple
from typing import Type
from typing import TYPE_CHECKING
from OpenSSL import crypto
from acme import challenges
from acme import standalone as acme_standalone
from certbot import achallenges
from certbot import errors
from certbot import interfaces
from certbot.display import util as display_util
from certbot.plugins import common
logger = logging.getLogger(__name__)
if TYPE_CHECKING:
ServedType = DefaultDict[
acme_standalone.BaseDualNetworkedServers,
Set[achallenges.KeyAuthorizationAnnotatedChallenge]
]
class ServerManager:
"""Standalone servers manager.
Manager for `ACMEServer` and `ACMETLSServer` instances.
`certs` and `http_01_resources` correspond to
`acme.crypto_util.SSLSocket.certs` and
`acme.crypto_util.SSLSocket.http_01_resources` respectively. All
created servers share the same certificates and resources, so if
you're running both TLS and non-TLS instances, HTTP01 handlers
will serve the same URLs!
"""
def __init__(self, certs: Mapping[bytes, Tuple[crypto.PKey, crypto.X509]],
http_01_resources: Set[acme_standalone.HTTP01RequestHandler.HTTP01Resource]
) -> None:
self._instances: Dict[int, acme_standalone.HTTP01DualNetworkedServers] = {}
self.certs = certs
self.http_01_resources = http_01_resources
def run(self, port: int, challenge_type: Type[challenges.Challenge],
listenaddr: str = "") -> acme_standalone.HTTP01DualNetworkedServers:
"""Run ACME server on specified ``port``.
This method is idempotent, i.e. all calls with the same pair of
``(port, challenge_type)`` will reuse the same server.
:param int port: Port to run the server on.
:param challenge_type: Subclass of `acme.challenges.Challenge`,
currently only `acme.challenge.HTTP01`.
:param str listenaddr: (optional) The address to listen on. Defaults to all addrs.
:returns: DualNetworkedServers instance.
:rtype: ACMEServerMixin
"""
assert challenge_type == challenges.HTTP01
if port in self._instances:
return self._instances[port]
address = (listenaddr, port)
try:
servers = acme_standalone.HTTP01DualNetworkedServers(
address, self.http_01_resources)
except socket.error as error:
raise errors.StandaloneBindError(error, port)
servers.serve_forever()
# if port == 0, then random free port on OS is taken
# both servers, if they exist, have the same port
real_port = servers.getsocknames()[0][1]
self._instances[real_port] = servers
return servers
def stop(self, port: int) -> None:
"""Stop ACME server running on the specified ``port``.
:param int port:
"""
instance = self._instances[port]
for sockname in instance.getsocknames():
logger.debug("Stopping server at %s:%d...",
*sockname[:2])
instance.shutdown_and_server_close()
del self._instances[port]
def running(self) -> Dict[int, acme_standalone.HTTP01DualNetworkedServers]:
"""Return all running instances.
Once the server is stopped using `stop`, it will not be
returned.
:returns: Mapping from ``port`` to ``servers``.
:rtype: tuple
"""
return self._instances.copy()
class Authenticator(common.Plugin, interfaces.Authenticator):
"""Standalone Authenticator.
This authenticator creates its own ephemeral TCP listener on the
necessary port in order to respond to incoming http-01
challenges from the certificate authority. Therefore, it does not
rely on any existing server program.
"""
description = "Spin up a temporary webserver"
def __init__(self, *args: Any, **kwargs: Any) -> None:
super().__init__(*args, **kwargs)
self.served: ServedType = collections.defaultdict(set)
# Stuff below is shared across threads (i.e. servers read
# values, main thread writes). Due to the nature of CPython's
# GIL, the operations are safe, c.f.
# https://docs.python.org/2/faq/library.html#what-kinds-of-global-value-mutation-are-thread-safe
self.certs: Mapping[bytes, Tuple[crypto.PKey, crypto.X509]] = {}
self.http_01_resources: Set[acme_standalone.HTTP01RequestHandler.HTTP01Resource] = set()
self.servers = ServerManager(self.certs, self.http_01_resources)
@classmethod
def add_parser_arguments(cls, add: Callable[..., None]) -> None:
pass # No additional argument for the standalone plugin parser
def more_info(self) -> str: # pylint: disable=missing-function-docstring
return("This authenticator creates its own ephemeral TCP listener "
"on the necessary port in order to respond to incoming "
"http-01 challenges from the certificate authority. Therefore, "
"it does not rely on any existing server program.")
def prepare(self) -> None: # pylint: disable=missing-function-docstring
pass
def get_chall_pref(self, domain: str) -> Iterable[Type[challenges.Challenge]]:
# pylint: disable=unused-argument,missing-function-docstring
return [challenges.HTTP01]
def perform(self, achalls: Iterable[achallenges.AnnotatedChallenge]
) -> List[challenges.ChallengeResponse]: # pylint: disable=missing-function-docstring
return [self._try_perform_single(achall) for achall in achalls]
def _try_perform_single(self,
achall: achallenges.AnnotatedChallenge) -> challenges.ChallengeResponse:
while True:
try:
return self._perform_single(achall)
except errors.StandaloneBindError as error:
_handle_perform_error(error)
def _perform_single(self,
achall: achallenges.AnnotatedChallenge) -> challenges.ChallengeResponse:
servers, response = self._perform_http_01(achall)
self.served[servers].add(achall)
return response
def _perform_http_01(self, achall: achallenges.AnnotatedChallenge
) -> Tuple[acme_standalone.HTTP01DualNetworkedServers,
challenges.ChallengeResponse]:
port = self.config.http01_port
addr = self.config.http01_address
servers = self.servers.run(port, challenges.HTTP01, listenaddr=addr)
response, validation = achall.response_and_validation()
resource = acme_standalone.HTTP01RequestHandler.HTTP01Resource(
chall=achall.chall, response=response, validation=validation)
self.http_01_resources.add(resource)
return servers, response
def cleanup(self, achalls: Iterable[achallenges.AnnotatedChallenge]) -> None: # pylint: disable=missing-function-docstring
# reduce self.served and close servers if no challenges are served
for unused_servers, server_achalls in self.served.items():
for achall in achalls:
if achall in server_achalls:
server_achalls.remove(achall)
for port, servers in self.servers.running().items():
if not self.served[servers]:
self.servers.stop(port)
def auth_hint(self, failed_achalls: List[achallenges.AnnotatedChallenge]) -> str:
port, addr = self.config.http01_port, self.config.http01_address
neat_addr = f"{addr}:{port}" if addr else f"port {port}"
return ("The Certificate Authority failed to download the challenge files from "
f"the temporary standalone webserver started by Certbot on {neat_addr}. "
"Ensure that the listed domains point to this machine and that it can "
"accept inbound connections from the internet.")
def _handle_perform_error(error: errors.StandaloneBindError) -> None:
if error.socket_error.errno == errno.EACCES:
raise errors.PluginError(
"Could not bind TCP port {0} because you don't have "
"the appropriate permissions (for example, you "
"aren't running this program as "
"root).".format(error.port))
if error.socket_error.errno == errno.EADDRINUSE:
msg = (
"Could not bind TCP port {0} because it is already in "
"use by another process on this system (such as a web "
"server). Please stop the program in question and "
"then try again.".format(error.port))
should_retry = display_util.yesno(msg, "Retry", "Cancel", default=False)
if not should_retry:
raise errors.PluginError(msg)
else:
raise error
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.