text
stringlengths 2
999k
|
|---|
import logging
from django.conf import settings
from django_elasticsearch_dsl import DocType, Index, fields
from elasticsearch import Elasticsearch
from readthedocs.projects.models import HTMLFile, Project
project_conf = settings.ES_INDEXES['project']
project_index = Index(project_conf['name'])
project_index.settings(**project_conf['settings'])
page_conf = settings.ES_INDEXES['page']
page_index = Index(page_conf['name'])
page_index.settings(**page_conf['settings'])
log = logging.getLogger(__name__)
class RTDDocTypeMixin:
def update(self, *args, **kwargs):
# Hack a fix to our broken connection pooling
# This creates a new connection on every request,
# but actually works :)
log.info('Hacking Elastic indexing to fix connection pooling')
self.using = Elasticsearch(**settings.ELASTICSEARCH_DSL['default'])
super().update(*args, **kwargs)
@project_index.doc_type
class ProjectDocument(RTDDocTypeMixin, DocType):
# Metadata
url = fields.TextField(attr='get_absolute_url')
users = fields.NestedField(
properties={
'username': fields.TextField(),
'id': fields.IntegerField(),
}
)
language = fields.KeywordField()
modified_model_field = 'modified_date'
class Meta:
model = Project
fields = ('name', 'slug', 'description')
ignore_signals = True
@page_index.doc_type
class PageDocument(RTDDocTypeMixin, DocType):
# Metadata
project = fields.KeywordField(attr='project.slug')
version = fields.KeywordField(attr='version.slug')
path = fields.KeywordField(attr='processed_json.path')
full_path = fields.KeywordField(attr='path')
rank = fields.IntegerField()
# Searchable content
title = fields.TextField(attr='processed_json.title')
sections = fields.NestedField(
attr='processed_json.sections',
properties={
'id': fields.KeywordField(),
'title': fields.TextField(),
'content': fields.TextField(),
}
)
domains = fields.NestedField(
properties={
'role_name': fields.KeywordField(),
# For linking to the URL
'anchor': fields.KeywordField(),
# For showing in the search result
'type_display': fields.TextField(),
'docstrings': fields.TextField(),
# Simple analyzer breaks on `.`,
# otherwise search results are too strict for this use case
'name': fields.TextField(analyzer='simple'),
}
)
modified_model_field = 'modified_date'
class Meta:
model = HTMLFile
fields = ('commit', 'build')
ignore_signals = True
def prepare_rank(self, html_file):
if not (-10 <= html_file.rank <= 10):
return 0
return html_file.rank
def prepare_domains(self, html_file):
"""Prepares and returns the values for domains field."""
if not html_file.version.is_sphinx_type:
return []
all_domains = []
try:
domains_qs = html_file.sphinx_domains.exclude(
domain='std',
type__in=['doc', 'label']
).iterator()
all_domains = [
{
'role_name': domain.role_name,
'anchor': domain.anchor,
'type_display': domain.type_display,
'docstrings': html_file.processed_json.get(
'domain_data', {}
).get(domain.anchor, ''),
'name': domain.name,
}
for domain in domains_qs
]
log.debug(
"[%s] [%s] Total domains for file %s are: %s",
html_file.project.slug,
html_file.version.slug,
html_file.path,
len(all_domains)
)
except Exception:
log.exception(
"[%s] [%s] Error preparing domain data for file %s",
html_file.project.slug,
html_file.version.slug,
html_file.path
)
return all_domains
def get_queryset(self):
"""
Ignore certain files from indexing.
- Files from external versions
- Ignored files
"""
queryset = super().get_queryset()
queryset = (
queryset
.internal()
.exclude(ignore=True)
)
return queryset
|
import argparse
from packaging.version import Version
from pdm import termui
from pdm.cli.commands.base import BaseCommand
from pdm.exceptions import PdmUsageError
from pdm.models.candidates import Candidate
from pdm.models.project_info import ProjectInfo
from pdm.models.requirements import parse_requirement
from pdm.project import Project
from pdm.utils import normalize_name
def filter_stable(candidate: Candidate) -> bool:
assert candidate.version
return not Version(candidate.version).is_prerelease
class Command(BaseCommand):
"""Show the package information"""
metadata_keys = ["name", "version", "summary", "license", "platform", "keywords"]
def add_arguments(self, parser: argparse.ArgumentParser) -> None:
parser.add_argument(
"package",
type=normalize_name,
nargs=argparse.OPTIONAL,
help="Specify the package name, or show this package if not given",
)
for option in self.metadata_keys:
parser.add_argument(
f"--{option}", action="store_true", help=f"Show {option}"
)
def handle(self, project: Project, options: argparse.Namespace) -> None:
package = options.package
if package:
req = parse_requirement(package)
repository = project.get_repository()
# reverse the result so that latest is at first.
matches = repository.find_candidates(req, True, True)
latest = next(iter(matches), None)
if not latest:
project.core.ui.echo(
termui.yellow(f"No match found for the package {package!r}"),
err=True,
)
return
latest_stable = next(filter(filter_stable, matches), None)
metadata = latest.prepare(project.environment).metadata
else:
if not project.meta.name:
raise PdmUsageError("This project is not a package")
metadata = project.meta
package = normalize_name(metadata.name)
latest_stable = None
assert metadata
project_info = ProjectInfo(metadata)
if any(getattr(options, key, None) for key in self.metadata_keys):
for key in self.metadata_keys:
if getattr(options, key, None):
project.core.ui.echo(project_info[key])
return
installed = project.environment.get_working_set().get(package)
if latest_stable:
project_info.latest_stable_version = str(latest_stable.version)
if installed:
project_info.installed_version = str(installed.version)
project.core.ui.display_columns(list(project_info.generate_rows()))
|
"""
Deprecated. Use types.bundle instead.
"""
from .types import CreateCollectionArg, CollectionMetadata, MintCollectionArg
|
from robot import Robot
from robot.collector.shortcut import *
collector = pipe(
const('http://www.dataversity.net/category/education/daily-data/'),
get(),
css('#primary article'),
foreach(dict(
pipe(
css('a[href]'), attr('href'), any(), url(),
get(),
dict(
body=pipe(css('.entry-content p'), as_text())
)
),
title=pipe(css('.entry-title'), as_text()),
url=pipe(css('a[href]'), attr('href'), any(), url()),
))
)
with Robot() as robot:
result = robot.sync_run(collector)
for r in result:
print(r)
|
cadena = input("\33[0mIngrese la cadena a separar: \33[34m")
separador = input("\33[0mIngrese el carácter espaciador: \33[34m")[0]
print("\33[0m")
print("Resultado:\33[33m", cadena.replace(' ', separador), "\33[0m")
|
from .LagrangePolynomial import LagrangeExpand
from pytorch_lightning import LightningModule, Trainer
from high_order_layers_torch.PolynomialLayers import *
from torch.nn import Conv2d
import torch.nn as nn
import torch
from .utils import *
def conv2d_wrapper(
in_channels: int,
out_channels: int,
kernel_size: int,
stride: int = 1,
padding: int = 0,
dilation: int = 1,
groups: int = 1,
padding_mode: str = 'zeros',
weight_magnitude: float = 1.0,
rescale_output: bool = False,
verbose: bool = False,
** kwargs
):
"""
Inputs need to be an exact clone of those in torch conv2d including
defaults. Function allows you to pass extra arguments without braking
conv2d.
"""
conv = Conv2d(
in_channels=in_channels,
out_channels=out_channels,
kernel_size=kernel_size,
stride=stride,
padding=padding,
dilation=dilation,
groups=groups,
# Bias should always be false as the bias is already included in these methods.
bias=False,
padding_mode=padding_mode,
)
in_features = in_channels*kernel_size*kernel_size
if verbose is True:
print('in_channels', in_channels, 'out_channels', out_channels)
print('conv.weight.shape', conv.weight.shape)
# We don't want to use the standard conv initialization
# since this is a bit different.
if rescale_output is False:
conv.weight.data.uniform_(-weight_magnitude/in_features,
weight_magnitude/in_features)
elif rescale_output is True:
conv.weight.data.uniform_(-weight_magnitude, weight_magnitude)
else:
print('Using kaiming for weight initialization')
return conv
class Expansion2d(nn.Module):
def __init__(self, basis=None):
"""
Expand an input by a function defined by basis.
Args :
- basis: function to expand input by.
"""
super().__init__()
if basis == None:
raise Exception(
'You must define the basis function in ExpansionLayer2D')
self.basis = basis
def build(self, input_shape):
pass
def __call__(self, inputs):
"""
Expand input
Args :
inputs : Tensor of shape [batches, channels, height, width]
Return :
Tensor of shape [batches, channels*(basis size), height, width]
"""
res = self.basis(
inputs) # outputs [basis_size, batches, channels, height, width]
res = res.permute(1, 3, 4, 2, 0)
res = torch.reshape(
res, [res.shape[0], res.shape[1],
res.shape[2], res.shape[3]*res.shape[4]]
)
res = res.permute(0, 3, 1, 2)
return res
class Expansion1d(nn.Module):
def __init__(self, basis=None):
"""
Expand an input by a function defined by basis.
Args :
- basis: function to expand input by.
"""
super().__init__()
if basis == None:
raise Exception(
'You must define the basis function in ExpansionLayer2D')
self.basis = basis
def build(self, input_shape):
pass
def __call__(self, inputs):
"""
Expand input
Args :
inputs : Tensor of shape [batches, channels, width]
Return :
Tensor of shape [batches, channels*(basis size), width]
"""
res = self.basis(
inputs) # outputs [basis_size, batches, channels, width]
res = res.permute(1, 3, 2, 0)
res = torch.reshape(
res, [res.shape[0], res.shape[1], res.shape[2]*res.shape[3]]
)
res = res.permute(0, 2, 1) # batches, basis_size*channels, width
return res
class FourierConvolution2d(nn.Module):
def __init__(self, n: int, in_channels: int, kernel_size: int, length: float = 2.0, rescale_output=False, *args, **kwargs):
"""
Fourier series convolutional layer.
Args :
- n : number of fourier series components. n=1 is a constant, n=3 contains both first sin an consine components.
- in_channels : number of input channels
- kernel_size : size of the kernel
- length : Range of the polynomial interpolation points. length = 2 implies [-1, 1] so the interpolation points
are in that range. Anything outside that range could grow.
- rescale_output: If rescale output is True then the output is divided by the number of inputs for each output,
in effect taking the average. This is generally not necessary for the fourier series.
"""
super().__init__()
self.poly = Expansion2d(FourierExpand(n, length))
self._channels = n*in_channels
self.conv = conv2d_wrapper(in_channels=self._channels,
kernel_size=kernel_size, **kwargs)
self._total_in = in_channels*kernel_size*kernel_size
self._rescale = 1.0
if rescale_output is True:
self._rescale = 1.0/self._total_in
def forward(self, x):
x = self.poly(x)
out = self.conv(x)
return out*self._rescale
class PolynomialConvolution2d(nn.Module):
def __init__(self, n: int, in_channels: int, kernel_size: int, length: float = 2.0, rescale_output=False, periodicity: float = None, *args, **kwargs):
"""
Polynomial convolutional layer.
Args :
- n : number of weights or nodes. Polynomial order is n-1 so quadratic would be n=3.
- in_channels : number of input channels
- kernel_size : size of the kernel
- length : Range of the polynomial interpolation points. length = 2 implies [-1, 1] so the interpolation points
are in that range. Anything outside that range could grow.
- rescale_output: If rescale output is True then the output is divided by the number of inputs for each output,
in effect taking the average.
"""
super().__init__()
self.poly = Expansion2d(LagrangeExpand(n, length=length))
self._channels = n*in_channels
self.periodicity = periodicity
self.conv = conv2d_wrapper(in_channels=self._channels,
kernel_size=kernel_size, **kwargs)
self._total_in = in_channels*kernel_size*kernel_size
self._rescale = 1.0
if rescale_output is True:
self._rescale = 1.0/self._total_in
def forward(self, x):
periodicity = self.periodicity
if periodicity is not None:
x = make_periodic(x, periodicity)
x = self.poly(x)
out = self.conv(x)
return out*self._rescale
class PiecewisePolynomialConvolution2d(nn.Module):
def __init__(self, n: int, segments: int, in_channels: int, kernel_size: int, length: float = 2.0, rescale_output: bool = False, periodicity: float = None, *args, **kwargs):
"""
Piecewise continuous polynomial convolutional layer. The boundary between each polynomial are continuous.
Args :
- n : number of weights or nodes. Polynomial order is n-1 so quadratic would be n=3.
- segments: The number of segments in the piecewise polynomial.
- in_channels : number of input channels
- kernel_size : size of the kernel
- length : Range of the piecewise polynomial interpolation points. length = 2 implies [-1, 1] so the interpolation points
are in that range.
- rescale_output: If rescale output is True then the output is divided by the number of inputs for each output,
in effect taking the average.
"""
super().__init__()
self.poly = Expansion2d(
PiecewisePolynomialExpand(n=n, segments=segments, length=length))
self._channels = ((n-1)*segments+1)*in_channels
self.periodicity = periodicity
self.conv = conv2d_wrapper(in_channels=self._channels,
kernel_size=kernel_size, **kwargs)
self._total_in = in_channels*kernel_size*kernel_size
self._rescale = 1.0
if rescale_output is True:
self._rescale = 1.0/self._total_in
def forward(self, x):
periodicity = self.periodicity
if periodicity is not None:
x = make_periodic(x, periodicity)
x = self.poly(x)
out = self.conv(x)
return out*self._rescale
class PiecewiseDiscontinuousPolynomialConvolution2d(nn.Module):
def __init__(self, n: int, segments: int, in_channels: int, kernel_size: int, length: float = 2.0, rescale_output: bool = False, periodicity: float = None, *args, **kwargs):
"""
Discontinuous piecewise polynomial convolutional layer. The boundary between each polynomial can be discontinuous.
Args :
- n : number of weights or nodes. Polynomial order is n-1 so quadratic would be n=3.
- segments: The number of segments in the piecewise polynomial.
- in_channels : number of input channels
- kernel_size : size of the kernel
- length : Range of the piecewise polynomial interpolation points. length = 2 implies [-1, 1] so the interpolation points
are in that range.
- rescale_output: If rescale output is True then the output is divided by the number of inputs for each output,
in effect taking the average.
"""
super().__init__()
self.poly = Expansion2d(
PiecewiseDiscontinuousPolynomialExpand(n=n, segments=segments, length=length))
self._channels = n*segments*in_channels
self.periodicity = periodicity
self.conv = conv2d_wrapper(in_channels=self._channels,
kernel_size=kernel_size, **kwargs)
self._total_in = in_channels*kernel_size*kernel_size
self._rescale = 1.0
if rescale_output is True:
self._rescale = 1.0/self._total_in
def forward(self, x):
periodicity = self.periodicity
if periodicity is not None:
x = make_periodic(x, periodicity)
x = self.poly(x)
out = self.conv(x)
return out*self._rescale
|
import _plotly_utils.basevalidators
class ColorValidator(_plotly_utils.basevalidators.ColorValidator):
def __init__(
self,
plotly_name="color",
parent_name="parcats.line.colorbar.title.font",
**kwargs,
):
super(ColorValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
edit_type=kwargs.pop("edit_type", "colorbars"),
**kwargs,
)
|
import numbers # noqa: E402
try:
basestring # basestring was removed in Python 3
except NameError:
basestring = str
def test_trade(exchange, trade, symbol, now):
assert trade
sampleTrade = {
'info': {'a': 1, 'b': 2, 'c': 3}, # the original decoded JSON as is
'id': '12345-67890:09876/54321', # string trade id
'timestamp': 1502962946216, # Unix timestamp in milliseconds
'datetime': '2017-08-17 12:42:48.000', # ISO8601 datetime with milliseconds
'symbol': 'ETH/BTC', # symbol
'order': '12345-67890:09876/54321', # string order id or None/None/null
'type': 'limit', # order type, 'market', 'limit' or None/None/null
'side': 'buy', # direction of the trade, 'buy' or 'sell'
'takerOrMaker': 'taker', # string, 'taker' or 'maker'
'price': 0.06917684, # float price in quote currency
'amount': 1.5, # amount of base currency
'cost': 0.10376526, # total cost(including fees), `price * amount`
}
keys = list(sampleTrade.keys())
for i in range(0, len(keys)):
key = keys[i]
assert key in trade
fee = trade['fee'] if ('fee' in trade) else None
fees = trade['fees'] if ('fees' in trade) else None
# logical XOR
if fee or fees:
assert not (fee and fees)
if fee:
assert('cost' in fee) and ('currency' in fee)
if fees:
assert isinstance(fees, list)
for i in range(0, len(fees)):
fee = fees[i]
assert('cost' in fee) and ('currency' in fee)
id = trade['id']
assert(id is None) or (isinstance(id, basestring))
timestamp = trade['timestamp']
assert isinstance(timestamp, numbers.Real) or timestamp is None
if timestamp:
assert timestamp > 1230940800000 # 03 Jan 2009 - first block
assert timestamp < 2147483648000 # 19 Jan 2038 - int32 overflows
adjustedNow = now + 60000
assert timestamp < adjustedNow, 'trade.timestamp is greater than or equal to current time: trade: ' + exchange.iso8601(timestamp) + ' now: ' + exchange.iso8601(now)
assert trade['datetime'] == exchange.iso8601(timestamp)
assert trade['symbol'] == symbol, 'trade symbol is not equal to requested symbol: trade: ' + trade['symbol'] + ' requested: ' + symbol
assert trade['type'] is None or isinstance(trade['type'], basestring)
assert trade['side'] is None or trade['side'] == 'buy' or trade['side'] == 'sell', 'unexpected trade side ' + trade['side']
assert trade['order'] is None or isinstance(trade['order'], basestring)
assert isinstance(trade['price'], numbers.Real), 'trade.price is not a number'
assert trade['price'] > 0
assert isinstance(trade['amount'], numbers.Real), 'trade.amount is not a number'
assert trade['amount'] >= 0
assert trade['cost'] is None or isinstance(trade['cost'], numbers.Real), 'trade.cost is not a number'
assert trade['cost'] is None or trade['cost'] >= 0
takerOrMaker = trade['takerOrMaker']
assert takerOrMaker is None or takerOrMaker == 'taker' or takerOrMaker == 'maker'
|
from pathlib import Path
from pprint import pprint
from hesiod import get_cfg_copy, hmain
template_file = Path("tests/configs/templates/complex.yaml")
base_cfg_dir = Path("tests/configs/bases")
@hmain(base_cfg_dir, template_cfg_file=template_file)
def test() -> None:
cfg = get_cfg_copy()
pprint(cfg)
test()
|
import os
import yaml
import asyncio
import platform
from functools import lru_cache
from typing import List, Dict, Coroutine, Union
from . import info
from . import common
def get_path_fname() -> str:
"""
Return the file name that stores the repo locations.
"""
root = common.get_config_dir()
return os.path.join(root, 'repo_path')
@lru_cache()
def get_repos() -> Dict[str, str]:
"""
Return a `dict` of repo name to repo absolute path
"""
path_file = get_path_fname()
repos = {}
# Each line is a repo path and repo name separated by ,
if os.path.isfile(path_file) and os.stat(path_file).st_size > 0:
with open(path_file) as f:
for line in f:
line = line.rstrip()
if not line: # blank line
continue
path, name = line.split(',')
if not is_git(path):
continue
if name not in repos:
repos[name] = path
else: # repo name collision for different paths: include parent path name
par_name = os.path.basename(os.path.dirname(path))
repos[os.path.join(par_name, name)] = path
return repos
def get_choices() -> List[Union[str, None]]:
"""
Return all repo names and an additional empty list. This is a workaround of
argparse's problem with coexisting nargs='*' and choices.
See https://utcc.utoronto.ca/~cks/space/blog/python/ArgparseNargsChoicesLimitation
and
https://bugs.python.org/issue27227
"""
repos = list(get_repos())
repos.append([])
return repos
def is_git(path: str) -> bool:
"""
Return True if the path is a git repo.
"""
# An alternative is to call `git rev-parse --is-inside-work-tree`
# I don't see why that one is better yet.
# For a regular git repo, .git is a folder, for a worktree repo, .git is a file.
# However, git submodule repo also has .git as a file.
# A more reliable way to differentiable regular and worktree repos is to
# compare the result of `git rev-parse --git-dir` and
# `git rev-parse --git-common-dir`
loc = os.path.join(path, '.git')
# TODO: we can display the worktree repos in a different font.
return os.path.exists(loc)
def rename_repo(repos: Dict[str, str], repo: str, new_name: str):
"""
Write new repo name to file
"""
path = repos[repo]
del repos[repo]
repos[new_name] = path
write_to_repo_file(repos, 'w')
def write_to_repo_file(repos: Dict[str, str], mode: str):
"""
"""
data = ''.join(f'{path},{name}\n' for name, path in repos.items())
fname = get_path_fname()
os.makedirs(os.path.dirname(fname), exist_ok=True)
with open(fname, mode) as f:
f.write(data)
def add_repos(repos: Dict[str, str], new_paths: List[str]):
"""
Write new repo paths to file
"""
existing_paths = set(repos.values())
new_paths = set(os.path.abspath(p) for p in new_paths if is_git(p))
new_paths = new_paths - existing_paths
if new_paths:
print(f"Found {len(new_paths)} new repo(s).")
new_repos = {
os.path.basename(os.path.normpath(path)): path
for path in new_paths}
write_to_repo_file(new_repos, 'a+')
else:
print('No new repos found!')
async def run_async(repo_name: str, path: str, cmds: List[str]) -> Union[None, str]:
"""
Run `cmds` asynchronously in `path` directory. Return the `path` if
execution fails.
"""
process = await asyncio.create_subprocess_exec(
*cmds,
stdin=asyncio.subprocess.DEVNULL,
stdout=asyncio.subprocess.PIPE,
stderr=asyncio.subprocess.PIPE,
start_new_session=True,
cwd=path)
stdout, stderr = await process.communicate()
for pipe in (stdout, stderr):
if pipe:
print(format_output(pipe.decode(), f'{repo_name}: '))
# The existence of stderr is not good indicator since git sometimes write
# to stderr even if the execution is successful, e.g. git fetch
if process.returncode != 0:
return path
def format_output(s: str, prefix: str):
"""
Prepends every line in given string with the given prefix.
"""
return ''.join([f'{prefix}{line}' for line in s.splitlines(keepends=True)])
def exec_async_tasks(tasks: List[Coroutine]) -> List[Union[None, str]]:
"""
Execute tasks asynchronously
"""
# TODO: asyncio API is nicer in python 3.7
if platform.system() == 'Windows':
loop = asyncio.ProactorEventLoop()
asyncio.set_event_loop(loop)
else:
loop = asyncio.get_event_loop()
try:
errors = loop.run_until_complete(asyncio.gather(*tasks))
finally:
loop.close()
return errors
def describe(repos: Dict[str, str]) -> str:
"""
Return the status of all repos
"""
if repos:
name_width = max(len(n) for n in repos) + 1
funcs = info.get_info_funcs()
for name in sorted(repos):
path = repos[name]
display_items = ' '.join(f(path) for f in funcs)
yield f'{name:<{name_width}}{display_items}'
def get_cmds_from_files() -> Dict[str, Dict[str, str]]:
"""
Parse delegated git commands from default config file
and custom config file.
Example return
{
'branch': {'help': 'show local branches'},
'clean': {'cmd': 'clean -dfx',
'help': 'remove all untracked files/folders'},
}
"""
# default config file
fname = os.path.join(os.path.dirname(__file__), "cmds.yml")
with open(fname, 'r') as stream:
cmds = yaml.load(stream, Loader=yaml.FullLoader)
# custom config file
root = common.get_config_dir()
fname = os.path.join(root, 'cmds.yml')
custom_cmds = {}
if os.path.isfile(fname) and os.path.getsize(fname):
with open(fname, 'r') as stream:
custom_cmds = yaml.load(stream, Loader=yaml.FullLoader)
# custom commands shadow default ones
cmds.update(custom_cmds)
return cmds
|
#!/usr/bin/env python3
# Requires PyAudio and PySpeech and more.
import speech_recognition as sr
from time import ctime
import time
import os
from gtts import gTTS
import random
from pygame import mixer
from pyicloud import PyiCloudService
from datetime import date
import re
from re import findall, finditer
from urllib.request import urlopen
#iCloud stuff. You gotta add you icloud login details here.
iCloudService = PyiCloudService('icloudemail.com', 'icloudPassword')
#Speech recognition recogniser used to call recognise audio google
r = sr.Recognizer()
##A phrase used to awaken Oswald
awaken = ["Jarvis"]
awake = False
#mixer is used to play the saved audio file which is Jarvis 'speaking'
mixer.init()
##Opening phrases
welcome_phrases = ['What can I do for you?', 'What\'s up?', 'How can I be of assistance?']
greeting = random.randint(0, len(welcome_phrases)-1)
def speak(audioString):
print(audioString)
tts = gTTS(text=audioString, lang='en')
tts.save("audio.mp3")
os.system("mpg321 audio.mp3")
mixer.music.load('audio.mp3')
mixer.music.play()
def recordAudio():
# Record Audio
with sr.Microphone() as source:
print("Say something!")
audio = r.listen(source)
# Speech recognition using Google Speech Recognition
data = ""
try:
# Uses the default API key
# To use another API key: `r.recognize_google(audio, key="GOOGLE_SPEECH_RECOGNITION_API_KEY")`
data = r.recognize_google(audio)
print("You said: " + data)
except sr.UnknownValueError:
print("Google Speech Recognition could not understand audio")
except sr.RequestError as e:
print("Could not request results from Google Speech Recognition service; {0}".format(e))
return data
def awakenAlarm():
# Record Audio
with sr.Microphone() as source:
audio = r.listen(source)
# Speech recognition using Google Speech Recognition
data = ""
try:
# Uses the default API key
# To use another API key: `r.recognize_google(audio, key="GOOGLE_SPEECH_RECOGNITION_API_KEY")`
speak('Processing')
data = r.recognize_google(audio)
print("You said: " + data)
for i in range(0, len(awaken)):
if awaken[i] in data:
awake = True
speak(welcome_phrases[greeting])
except sr.UnknownValueError:
print("Google Speech Recognition could not understand audio")
except sr.RequestError as e:
print("Could not request results from Google Speech Recognition service; {0}".format(e))
return data
def jarvis(data):
if "weather" in data:
weather = 'http://api.openweathermap.org/data/2.5/weather?q=Brisbane,AU&appid=eccbc53293f9233984b66fc892ee71fe'
weather_data = urlopen(weather).read()
weather_data = str(weather_data)
minimal_temp = findall('"temp_min":(.*),"temp_max"', weather_data)
minimal_temp = float(minimal_temp[0])
maximum_temp = findall('"temp_max":(.*)},"vis', weather_data)
maximum_temp = float(maximum_temp[0])
minimal_temp = minimal_temp - 273.15
maximum_temp = maximum_temp - 273.15
avg_temp = (minimal_temp + maximum_temp) / 2
speak(str(avg_temp))
if "events for today" in data:
from_dt = date.today()
to_dt = date.today()
iCalEvents = iCloudService.calendar.events(from_dt, to_dt)
iCalEvents = str(iCalEvents)
iCalEvent_titles = findall("'title': '(.*)', 'location", iCalEvents)
iCalEvent_location = findall("'location': (.*), 'startDate", iCalEvents)
#iCalEvent = str(iCalEvents[0])
#iCaltitle = findall("'title': '([ A-Za-z]*)'", iCalEvent)
print(iCalEvents)
for i in iCalEvent_titles:
print(iCalEvent_titles)
print(iCalEvent_location)
if "how are you" in data:
speak("I am fine")
if "what time is it" in data:
speak(ctime())
if "where is" in data:
data = data.split(" ")
location = data[2]
speak("Hold on Frank, I will show you where " + location + " is.")
os.system("chromium-browser https://www.google.nl/maps/place/" + location + "/&")
# initialization
#while(awake == False):
# data = awakenAlarm()
while 1:
data = recordAudio()
jarvis(data)
|
""" Bring-Your-Own-Blocks Network
A flexible network w/ dataclass based config for stacking those NN blocks.
This model is currently used to implement the following networks:
GPU Efficient (ResNets) - gernet_l/m/s (original versions called genet, but this was already used (by SENet author)).
Paper: `Neural Architecture Design for GPU-Efficient Networks` - https://arxiv.org/abs/2006.14090
Code and weights: https://github.com/idstcv/GPU-Efficient-Networks, licensed Apache 2.0
RepVGG - repvgg_*
Paper: `Making VGG-style ConvNets Great Again` - https://arxiv.org/abs/2101.03697
Code and weights: https://github.com/DingXiaoH/RepVGG, licensed MIT
In all cases the models have been modified to fit within the design of ByobNet. I've remapped
the original weights and verified accuracies.
For GPU Efficient nets, I used the original names for the blocks since they were for the most part
the same as original residual blocks in ResNe(X)t, DarkNet, and other existing models. Note also some
changes introduced in RegNet were also present in the stem and bottleneck blocks for this model.
A significant number of different network archs can be implemented here, including variants of the
above nets that include attention.
Hacked together by / copyright Ross Wightman, 2021.
"""
import math
from dataclasses import dataclass, field, replace
from typing import Tuple, List, Dict, Optional, Union, Any, Callable, Sequence
from functools import partial
import torch
import torch.nn as nn
from timm.data import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD
from .helpers import build_model_with_cfg, named_apply
from .layers import ClassifierHead, ConvBnAct, BatchNormAct2d, DropPath, AvgPool2dSame, \
create_conv2d, get_act_layer, convert_norm_act, get_attn, make_divisible, to_2tuple
from .registry import register_model
__all__ = ['ByobNet', 'ByoModelCfg', 'ByoBlockCfg', 'create_byob_stem', 'create_block']
def _cfg(url='', **kwargs):
return {
'url': url, 'num_classes': 1000, 'input_size': (3, 224, 224), 'pool_size': (7, 7),
'crop_pct': 0.875, 'interpolation': 'bilinear',
'mean': IMAGENET_DEFAULT_MEAN, 'std': IMAGENET_DEFAULT_STD,
'first_conv': 'stem.conv', 'classifier': 'head.fc',
**kwargs
}
default_cfgs = {
# GPU-Efficient (ResNet) weights
'gernet_s': _cfg(
url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-ger-weights/gernet_s-756b4751.pth'),
'gernet_m': _cfg(
url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-ger-weights/gernet_m-0873c53a.pth'),
'gernet_l': _cfg(
url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-ger-weights/gernet_l-f31e2e8d.pth',
input_size=(3, 256, 256), pool_size=(8, 8)),
# RepVGG weights
'repvgg_a2': _cfg(
url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-repvgg-weights/repvgg_a2-c1ee6d2b.pth',
first_conv=('stem.conv_kxk.conv', 'stem.conv_1x1.conv')),
'repvgg_b0': _cfg(
url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-repvgg-weights/repvgg_b0-80ac3f1b.pth',
first_conv=('stem.conv_kxk.conv', 'stem.conv_1x1.conv')),
'repvgg_b1': _cfg(
url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-repvgg-weights/repvgg_b1-77ca2989.pth',
first_conv=('stem.conv_kxk.conv', 'stem.conv_1x1.conv')),
'repvgg_b1g4': _cfg(
url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-repvgg-weights/repvgg_b1g4-abde5d92.pth',
first_conv=('stem.conv_kxk.conv', 'stem.conv_1x1.conv')),
'repvgg_b2': _cfg(
url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-repvgg-weights/repvgg_b2-25b7494e.pth',
first_conv=('stem.conv_kxk.conv', 'stem.conv_1x1.conv')),
'repvgg_b2g4': _cfg(
url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-repvgg-weights/repvgg_b2g4-165a85f2.pth',
first_conv=('stem.conv_kxk.conv', 'stem.conv_1x1.conv')),
'repvgg_b3': _cfg(
url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-repvgg-weights/repvgg_b3-199bc50d.pth',
first_conv=('stem.conv_kxk.conv', 'stem.conv_1x1.conv')),
'repvgg_b3g4': _cfg(
url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-repvgg-weights/repvgg_b3g4-73c370bf.pth',
first_conv=('stem.conv_kxk.conv', 'stem.conv_1x1.conv')),
# experimental configs
'resnet51q': _cfg(
url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/resnet51q_ra2-d47dcc76.pth',
first_conv='stem.conv1', input_size=(3, 256, 256), pool_size=(8, 8),
test_input_size=(3, 288, 288), crop_pct=1.0),
'resnet61q': _cfg(
url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/resnet61q_ra2-6afc536c.pth',
first_conv='stem.conv1.conv', input_size=(3, 256, 256), pool_size=(8, 8),
test_input_size=(3, 288, 288), crop_pct=1.0, interpolation='bicubic'),
'resnext26ts': _cfg(
url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-attn-weights/resnext26ts_256_ra2-8bbd9106.pth',
first_conv='stem.conv1.conv', input_size=(3, 256, 256), pool_size=(8, 8), interpolation='bicubic'),
'gcresnext26ts': _cfg(
url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-attn-weights/gcresnext26ts_256-e414378b.pth',
first_conv='stem.conv1.conv', input_size=(3, 256, 256), pool_size=(8, 8), interpolation='bicubic'),
'seresnext26ts': _cfg(
url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-attn-weights/seresnext26ts_256-6f0d74a3.pth',
first_conv='stem.conv1.conv', input_size=(3, 256, 256), pool_size=(8, 8), interpolation='bicubic'),
'eca_resnext26ts': _cfg(
url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-attn-weights/eca_resnext26ts_256-5a1d030f.pth',
first_conv='stem.conv1.conv', input_size=(3, 256, 256), pool_size=(8, 8), interpolation='bicubic'),
'bat_resnext26ts': _cfg(
url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-attn-weights/bat_resnext26ts_256-fa6fd595.pth',
first_conv='stem.conv1.conv', input_size=(3, 256, 256), pool_size=(8, 8), interpolation='bicubic',
min_input_size=(3, 256, 256)),
'resnet32ts': _cfg(
url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-attn-weights/resnet32ts_256-aacf5250.pth',
first_conv='stem.conv1.conv', input_size=(3, 256, 256), pool_size=(8, 8), interpolation='bicubic'),
'resnet33ts': _cfg(
url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-attn-weights/resnet33ts_256-e91b09a4.pth',
first_conv='stem.conv1.conv', input_size=(3, 256, 256), pool_size=(8, 8), interpolation='bicubic'),
'gcresnet33ts': _cfg(
url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-attn-weights/gcresnet33ts_256-0e0cd345.pth',
first_conv='stem.conv1.conv', input_size=(3, 256, 256), pool_size=(8, 8), interpolation='bicubic'),
'seresnet33ts': _cfg(
url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-attn-weights/seresnet33ts_256-f8ad44d9.pth',
first_conv='stem.conv1.conv', input_size=(3, 256, 256), pool_size=(8, 8), interpolation='bicubic'),
'eca_resnet33ts': _cfg(
url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-attn-weights/eca_resnet33ts_256-8f98face.pth',
first_conv='stem.conv1.conv', input_size=(3, 256, 256), pool_size=(8, 8), interpolation='bicubic'),
'gcresnet50t': _cfg(
url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-attn-weights/gcresnet50t_256-96374d1c.pth',
first_conv='stem.conv1.conv', input_size=(3, 256, 256), pool_size=(8, 8), interpolation='bicubic'),
'gcresnext50ts': _cfg(
url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-attn-weights/gcresnext50ts_256-3e0f515e.pth',
first_conv='stem.conv1.conv', input_size=(3, 256, 256), pool_size=(8, 8), interpolation='bicubic'),
}
@dataclass
class ByoBlockCfg:
type: Union[str, nn.Module]
d: int # block depth (number of block repeats in stage)
c: int # number of output channels for each block in stage
s: int = 2 # stride of stage (first block)
gs: Optional[Union[int, Callable]] = None # group-size of blocks in stage, conv is depthwise if gs == 1
br: float = 1. # bottleneck-ratio of blocks in stage
# NOTE: these config items override the model cfgs that are applied to all blocks by default
attn_layer: Optional[str] = None
attn_kwargs: Optional[Dict[str, Any]] = None
self_attn_layer: Optional[str] = None
self_attn_kwargs: Optional[Dict[str, Any]] = None
block_kwargs: Optional[Dict[str, Any]] = None
@dataclass
class ByoModelCfg:
blocks: Tuple[Union[ByoBlockCfg, Tuple[ByoBlockCfg, ...]], ...]
downsample: str = 'conv1x1'
stem_type: str = '3x3'
stem_pool: Optional[str] = 'maxpool'
stem_chs: int = 32
width_factor: float = 1.0
num_features: int = 0 # num out_channels for final conv, no final 1x1 conv if 0
zero_init_last: bool = True # zero init last weight (usually bn) in residual path
fixed_input_size: bool = False # model constrained to a fixed-input size / img_size must be provided on creation
act_layer: str = 'relu'
norm_layer: str = 'batchnorm'
# NOTE: these config items will be overridden by the block cfg (per-block) if they are set there
attn_layer: Optional[str] = None
attn_kwargs: dict = field(default_factory=lambda: dict())
self_attn_layer: Optional[str] = None
self_attn_kwargs: dict = field(default_factory=lambda: dict())
block_kwargs: Dict[str, Any] = field(default_factory=lambda: dict())
def _rep_vgg_bcfg(d=(4, 6, 16, 1), wf=(1., 1., 1., 1.), groups=0):
c = (64, 128, 256, 512)
group_size = 0
if groups > 0:
group_size = lambda chs, idx: chs // groups if (idx + 1) % 2 == 0 else 0
bcfg = tuple([ByoBlockCfg(type='rep', d=d, c=c * wf, gs=group_size) for d, c, wf in zip(d, c, wf)])
return bcfg
def interleave_blocks(
types: Tuple[str, str], d, every: Union[int, List[int]] = 1, first: bool = False, **kwargs
) -> Tuple[ByoBlockCfg]:
""" interleave 2 block types in stack
"""
assert len(types) == 2
if isinstance(every, int):
every = list(range(0 if first else every, d, every + 1))
if not every:
every = [d - 1]
set(every)
blocks = []
for i in range(d):
block_type = types[1] if i in every else types[0]
blocks += [ByoBlockCfg(type=block_type, d=1, **kwargs)]
return tuple(blocks)
model_cfgs = dict(
gernet_l=ByoModelCfg(
blocks=(
ByoBlockCfg(type='basic', d=1, c=128, s=2, gs=0, br=1.),
ByoBlockCfg(type='basic', d=2, c=192, s=2, gs=0, br=1.),
ByoBlockCfg(type='bottle', d=6, c=640, s=2, gs=0, br=1 / 4),
ByoBlockCfg(type='bottle', d=5, c=640, s=2, gs=1, br=3.),
ByoBlockCfg(type='bottle', d=4, c=640, s=1, gs=1, br=3.),
),
stem_chs=32,
stem_pool=None,
num_features=2560,
),
gernet_m=ByoModelCfg(
blocks=(
ByoBlockCfg(type='basic', d=1, c=128, s=2, gs=0, br=1.),
ByoBlockCfg(type='basic', d=2, c=192, s=2, gs=0, br=1.),
ByoBlockCfg(type='bottle', d=6, c=640, s=2, gs=0, br=1 / 4),
ByoBlockCfg(type='bottle', d=4, c=640, s=2, gs=1, br=3.),
ByoBlockCfg(type='bottle', d=1, c=640, s=1, gs=1, br=3.),
),
stem_chs=32,
stem_pool=None,
num_features=2560,
),
gernet_s=ByoModelCfg(
blocks=(
ByoBlockCfg(type='basic', d=1, c=48, s=2, gs=0, br=1.),
ByoBlockCfg(type='basic', d=3, c=48, s=2, gs=0, br=1.),
ByoBlockCfg(type='bottle', d=7, c=384, s=2, gs=0, br=1 / 4),
ByoBlockCfg(type='bottle', d=2, c=560, s=2, gs=1, br=3.),
ByoBlockCfg(type='bottle', d=1, c=256, s=1, gs=1, br=3.),
),
stem_chs=13,
stem_pool=None,
num_features=1920,
),
repvgg_a2=ByoModelCfg(
blocks=_rep_vgg_bcfg(d=(2, 4, 14, 1), wf=(1.5, 1.5, 1.5, 2.75)),
stem_type='rep',
stem_chs=64,
),
repvgg_b0=ByoModelCfg(
blocks=_rep_vgg_bcfg(wf=(1., 1., 1., 2.5)),
stem_type='rep',
stem_chs=64,
),
repvgg_b1=ByoModelCfg(
blocks=_rep_vgg_bcfg(wf=(2., 2., 2., 4.)),
stem_type='rep',
stem_chs=64,
),
repvgg_b1g4=ByoModelCfg(
blocks=_rep_vgg_bcfg(wf=(2., 2., 2., 4.), groups=4),
stem_type='rep',
stem_chs=64,
),
repvgg_b2=ByoModelCfg(
blocks=_rep_vgg_bcfg(wf=(2.5, 2.5, 2.5, 5.)),
stem_type='rep',
stem_chs=64,
),
repvgg_b2g4=ByoModelCfg(
blocks=_rep_vgg_bcfg(wf=(2.5, 2.5, 2.5, 5.), groups=4),
stem_type='rep',
stem_chs=64,
),
repvgg_b3=ByoModelCfg(
blocks=_rep_vgg_bcfg(wf=(3., 3., 3., 5.)),
stem_type='rep',
stem_chs=64,
),
repvgg_b3g4=ByoModelCfg(
blocks=_rep_vgg_bcfg(wf=(3., 3., 3., 5.), groups=4),
stem_type='rep',
stem_chs=64,
),
# 4 x conv stem w/ 2 act, no maxpool, 2,4,6,4 repeats, group size 32 in first 3 blocks
# DW convs in last block, 2048 pre-FC, silu act
resnet51q=ByoModelCfg(
blocks=(
ByoBlockCfg(type='bottle', d=2, c=256, s=1, gs=32, br=0.25),
ByoBlockCfg(type='bottle', d=4, c=512, s=2, gs=32, br=0.25),
ByoBlockCfg(type='bottle', d=6, c=1536, s=2, gs=32, br=0.25),
ByoBlockCfg(type='bottle', d=4, c=1536, s=2, gs=1, br=1.0),
),
stem_chs=128,
stem_type='quad2',
stem_pool=None,
num_features=2048,
act_layer='silu',
),
# 4 x conv stem w/ 4 act, no maxpool, 1,4,6,4 repeats, edge block first, group size 32 in next 2 blocks
# DW convs in last block, 4 conv for each bottle block, 2048 pre-FC, silu act
resnet61q=ByoModelCfg(
blocks=(
ByoBlockCfg(type='edge', d=1, c=256, s=1, gs=0, br=1.0, block_kwargs=dict()),
ByoBlockCfg(type='bottle', d=4, c=512, s=2, gs=32, br=0.25),
ByoBlockCfg(type='bottle', d=6, c=1536, s=2, gs=32, br=0.25),
ByoBlockCfg(type='bottle', d=4, c=1536, s=2, gs=1, br=1.0),
),
stem_chs=128,
stem_type='quad',
stem_pool=None,
num_features=2048,
act_layer='silu',
block_kwargs=dict(extra_conv=True),
),
# A series of ResNeXt-26 models w/ one of none, GC, SE, ECA, BAT attn, group size 32, SiLU act,
# and a tiered stem w/ maxpool
resnext26ts=ByoModelCfg(
blocks=(
ByoBlockCfg(type='bottle', d=2, c=256, s=1, gs=32, br=0.25),
ByoBlockCfg(type='bottle', d=2, c=512, s=2, gs=32, br=0.25),
ByoBlockCfg(type='bottle', d=2, c=1024, s=2, gs=32, br=0.25),
ByoBlockCfg(type='bottle', d=2, c=2048, s=2, gs=32, br=0.25),
),
stem_chs=64,
stem_type='tiered',
stem_pool='maxpool',
act_layer='silu',
),
gcresnext26ts=ByoModelCfg(
blocks=(
ByoBlockCfg(type='bottle', d=2, c=256, s=1, gs=32, br=0.25),
ByoBlockCfg(type='bottle', d=2, c=512, s=2, gs=32, br=0.25),
ByoBlockCfg(type='bottle', d=2, c=1024, s=2, gs=32, br=0.25),
ByoBlockCfg(type='bottle', d=2, c=2048, s=2, gs=32, br=0.25),
),
stem_chs=64,
stem_type='tiered',
stem_pool='maxpool',
act_layer='silu',
attn_layer='gca',
),
seresnext26ts=ByoModelCfg(
blocks=(
ByoBlockCfg(type='bottle', d=2, c=256, s=1, gs=32, br=0.25),
ByoBlockCfg(type='bottle', d=2, c=512, s=2, gs=32, br=0.25),
ByoBlockCfg(type='bottle', d=2, c=1024, s=2, gs=32, br=0.25),
ByoBlockCfg(type='bottle', d=2, c=2048, s=2, gs=32, br=0.25),
),
stem_chs=64,
stem_type='tiered',
stem_pool='maxpool',
act_layer='silu',
attn_layer='se',
),
eca_resnext26ts=ByoModelCfg(
blocks=(
ByoBlockCfg(type='bottle', d=2, c=256, s=1, gs=32, br=0.25),
ByoBlockCfg(type='bottle', d=2, c=512, s=2, gs=32, br=0.25),
ByoBlockCfg(type='bottle', d=2, c=1024, s=2, gs=32, br=0.25),
ByoBlockCfg(type='bottle', d=2, c=2048, s=2, gs=32, br=0.25),
),
stem_chs=64,
stem_type='tiered',
stem_pool='maxpool',
act_layer='silu',
attn_layer='eca',
),
bat_resnext26ts=ByoModelCfg(
blocks=(
ByoBlockCfg(type='bottle', d=2, c=256, s=1, gs=32, br=0.25),
ByoBlockCfg(type='bottle', d=2, c=512, s=2, gs=32, br=0.25),
ByoBlockCfg(type='bottle', d=2, c=1024, s=2, gs=32, br=0.25),
ByoBlockCfg(type='bottle', d=2, c=2048, s=2, gs=32, br=0.25),
),
stem_chs=64,
stem_type='tiered',
stem_pool='maxpool',
act_layer='silu',
attn_layer='bat',
attn_kwargs=dict(block_size=8)
),
# ResNet-32 (2, 3, 3, 2) models w/ no attn, no groups, SiLU act, no pre-fc feat layer, tiered stem w/o maxpool
resnet32ts=ByoModelCfg(
blocks=(
ByoBlockCfg(type='bottle', d=2, c=256, s=1, gs=0, br=0.25),
ByoBlockCfg(type='bottle', d=3, c=512, s=2, gs=0, br=0.25),
ByoBlockCfg(type='bottle', d=3, c=1536, s=2, gs=0, br=0.25),
ByoBlockCfg(type='bottle', d=2, c=1536, s=2, gs=0, br=0.25),
),
stem_chs=64,
stem_type='tiered',
stem_pool='',
num_features=0,
act_layer='silu',
),
# ResNet-33 (2, 3, 3, 2) models w/ no attn, no groups, SiLU act, 1280 pre-FC feat, tiered stem w/o maxpool
resnet33ts=ByoModelCfg(
blocks=(
ByoBlockCfg(type='bottle', d=2, c=256, s=1, gs=0, br=0.25),
ByoBlockCfg(type='bottle', d=3, c=512, s=2, gs=0, br=0.25),
ByoBlockCfg(type='bottle', d=3, c=1536, s=2, gs=0, br=0.25),
ByoBlockCfg(type='bottle', d=2, c=1536, s=2, gs=0, br=0.25),
),
stem_chs=64,
stem_type='tiered',
stem_pool='',
num_features=1280,
act_layer='silu',
),
# A series of ResNet-33 (2, 3, 3, 2) models w/ one of GC, SE, ECA attn, no groups, SiLU act, 1280 pre-FC feat
# and a tiered stem w/ no maxpool
gcresnet33ts=ByoModelCfg(
blocks=(
ByoBlockCfg(type='bottle', d=2, c=256, s=1, gs=0, br=0.25),
ByoBlockCfg(type='bottle', d=3, c=512, s=2, gs=0, br=0.25),
ByoBlockCfg(type='bottle', d=3, c=1536, s=2, gs=0, br=0.25),
ByoBlockCfg(type='bottle', d=2, c=1536, s=2, gs=0, br=0.25),
),
stem_chs=64,
stem_type='tiered',
stem_pool='',
num_features=1280,
act_layer='silu',
attn_layer='gca',
),
seresnet33ts=ByoModelCfg(
blocks=(
ByoBlockCfg(type='bottle', d=2, c=256, s=1, gs=0, br=0.25),
ByoBlockCfg(type='bottle', d=3, c=512, s=2, gs=0, br=0.25),
ByoBlockCfg(type='bottle', d=3, c=1536, s=2, gs=0, br=0.25),
ByoBlockCfg(type='bottle', d=2, c=1536, s=2, gs=0, br=0.25),
),
stem_chs=64,
stem_type='tiered',
stem_pool='',
num_features=1280,
act_layer='silu',
attn_layer='se',
),
eca_resnet33ts=ByoModelCfg(
blocks=(
ByoBlockCfg(type='bottle', d=2, c=256, s=1, gs=0, br=0.25),
ByoBlockCfg(type='bottle', d=3, c=512, s=2, gs=0, br=0.25),
ByoBlockCfg(type='bottle', d=3, c=1536, s=2, gs=0, br=0.25),
ByoBlockCfg(type='bottle', d=2, c=1536, s=2, gs=0, br=0.25),
),
stem_chs=64,
stem_type='tiered',
stem_pool='',
num_features=1280,
act_layer='silu',
attn_layer='eca',
),
gcresnet50t=ByoModelCfg(
blocks=(
ByoBlockCfg(type='bottle', d=3, c=256, s=1, br=0.25),
ByoBlockCfg(type='bottle', d=4, c=512, s=2, br=0.25),
ByoBlockCfg(type='bottle', d=6, c=1024, s=2, br=0.25),
ByoBlockCfg(type='bottle', d=3, c=2048, s=2, br=0.25),
),
stem_chs=64,
stem_type='tiered',
stem_pool='',
attn_layer='gca',
),
gcresnext50ts=ByoModelCfg(
blocks=(
ByoBlockCfg(type='bottle', d=3, c=256, s=1, gs=32, br=0.25),
ByoBlockCfg(type='bottle', d=4, c=512, s=2, gs=32, br=0.25),
ByoBlockCfg(type='bottle', d=6, c=1024, s=2, gs=32, br=0.25),
ByoBlockCfg(type='bottle', d=3, c=2048, s=2, gs=32, br=0.25),
),
stem_chs=64,
stem_type='tiered',
stem_pool='maxpool',
# stem_pool=None,
act_layer='silu',
attn_layer='gca',
),
)
@register_model
def gernet_l(pretrained=False, **kwargs):
""" GEResNet-Large (GENet-Large from official impl)
`Neural Architecture Design for GPU-Efficient Networks` - https://arxiv.org/abs/2006.14090
"""
return _create_byobnet('gernet_l', pretrained=pretrained, **kwargs)
@register_model
def gernet_m(pretrained=False, **kwargs):
""" GEResNet-Medium (GENet-Normal from official impl)
`Neural Architecture Design for GPU-Efficient Networks` - https://arxiv.org/abs/2006.14090
"""
return _create_byobnet('gernet_m', pretrained=pretrained, **kwargs)
@register_model
def gernet_s(pretrained=False, **kwargs):
""" EResNet-Small (GENet-Small from official impl)
`Neural Architecture Design for GPU-Efficient Networks` - https://arxiv.org/abs/2006.14090
"""
return _create_byobnet('gernet_s', pretrained=pretrained, **kwargs)
@register_model
def repvgg_a2(pretrained=False, **kwargs):
""" RepVGG-A2
`Making VGG-style ConvNets Great Again` - https://arxiv.org/abs/2101.03697
"""
return _create_byobnet('repvgg_a2', pretrained=pretrained, **kwargs)
@register_model
def repvgg_b0(pretrained=False, **kwargs):
""" RepVGG-B0
`Making VGG-style ConvNets Great Again` - https://arxiv.org/abs/2101.03697
"""
return _create_byobnet('repvgg_b0', pretrained=pretrained, **kwargs)
@register_model
def repvgg_b1(pretrained=False, **kwargs):
""" RepVGG-B1
`Making VGG-style ConvNets Great Again` - https://arxiv.org/abs/2101.03697
"""
return _create_byobnet('repvgg_b1', pretrained=pretrained, **kwargs)
@register_model
def repvgg_b1g4(pretrained=False, **kwargs):
""" RepVGG-B1g4
`Making VGG-style ConvNets Great Again` - https://arxiv.org/abs/2101.03697
"""
return _create_byobnet('repvgg_b1g4', pretrained=pretrained, **kwargs)
@register_model
def repvgg_b2(pretrained=False, **kwargs):
""" RepVGG-B2
`Making VGG-style ConvNets Great Again` - https://arxiv.org/abs/2101.03697
"""
return _create_byobnet('repvgg_b2', pretrained=pretrained, **kwargs)
@register_model
def repvgg_b2g4(pretrained=False, **kwargs):
""" RepVGG-B2g4
`Making VGG-style ConvNets Great Again` - https://arxiv.org/abs/2101.03697
"""
return _create_byobnet('repvgg_b2g4', pretrained=pretrained, **kwargs)
@register_model
def repvgg_b3(pretrained=False, **kwargs):
""" RepVGG-B3
`Making VGG-style ConvNets Great Again` - https://arxiv.org/abs/2101.03697
"""
return _create_byobnet('repvgg_b3', pretrained=pretrained, **kwargs)
@register_model
def repvgg_b3g4(pretrained=False, **kwargs):
""" RepVGG-B3g4
`Making VGG-style ConvNets Great Again` - https://arxiv.org/abs/2101.03697
"""
return _create_byobnet('repvgg_b3g4', pretrained=pretrained, **kwargs)
@register_model
def resnet51q(pretrained=False, **kwargs):
"""
"""
return _create_byobnet('resnet51q', pretrained=pretrained, **kwargs)
@register_model
def resnet61q(pretrained=False, **kwargs):
"""
"""
return _create_byobnet('resnet61q', pretrained=pretrained, **kwargs)
@register_model
def resnext26ts(pretrained=False, **kwargs):
"""
"""
return _create_byobnet('resnext26ts', pretrained=pretrained, **kwargs)
@register_model
def gcresnext26ts(pretrained=False, **kwargs):
"""
"""
return _create_byobnet('gcresnext26ts', pretrained=pretrained, **kwargs)
@register_model
def seresnext26ts(pretrained=False, **kwargs):
"""
"""
return _create_byobnet('seresnext26ts', pretrained=pretrained, **kwargs)
@register_model
def eca_resnext26ts(pretrained=False, **kwargs):
"""
"""
return _create_byobnet('eca_resnext26ts', pretrained=pretrained, **kwargs)
@register_model
def bat_resnext26ts(pretrained=False, **kwargs):
"""
"""
return _create_byobnet('bat_resnext26ts', pretrained=pretrained, **kwargs)
@register_model
def resnet32ts(pretrained=False, **kwargs):
"""
"""
return _create_byobnet('resnet32ts', pretrained=pretrained, **kwargs)
@register_model
def resnet33ts(pretrained=False, **kwargs):
"""
"""
return _create_byobnet('resnet33ts', pretrained=pretrained, **kwargs)
@register_model
def gcresnet33ts(pretrained=False, **kwargs):
"""
"""
return _create_byobnet('gcresnet33ts', pretrained=pretrained, **kwargs)
@register_model
def seresnet33ts(pretrained=False, **kwargs):
"""
"""
return _create_byobnet('seresnet33ts', pretrained=pretrained, **kwargs)
@register_model
def eca_resnet33ts(pretrained=False, **kwargs):
"""
"""
return _create_byobnet('eca_resnet33ts', pretrained=pretrained, **kwargs)
@register_model
def gcresnet50t(pretrained=False, **kwargs):
"""
"""
return _create_byobnet('gcresnet50t', pretrained=pretrained, **kwargs)
@register_model
def gcresnext50ts(pretrained=False, **kwargs):
"""
"""
return _create_byobnet('gcresnext50ts', pretrained=pretrained, **kwargs)
def expand_blocks_cfg(stage_blocks_cfg: Union[ByoBlockCfg, Sequence[ByoBlockCfg]]) -> List[ByoBlockCfg]:
if not isinstance(stage_blocks_cfg, Sequence):
stage_blocks_cfg = (stage_blocks_cfg,)
block_cfgs = []
for i, cfg in enumerate(stage_blocks_cfg):
block_cfgs += [replace(cfg, d=1) for _ in range(cfg.d)]
return block_cfgs
def num_groups(group_size, channels):
if not group_size: # 0 or None
return 1 # normal conv with 1 group
else:
# NOTE group_size == 1 -> depthwise conv
assert channels % group_size == 0
return channels // group_size
@dataclass
class LayerFn:
conv_norm_act: Callable = ConvBnAct
norm_act: Callable = BatchNormAct2d
act: Callable = nn.ReLU
attn: Optional[Callable] = None
self_attn: Optional[Callable] = None
class DownsampleAvg(nn.Module):
def __init__(self, in_chs, out_chs, stride=1, dilation=1, apply_act=False, layers: LayerFn = None):
""" AvgPool Downsampling as in 'D' ResNet variants."""
super(DownsampleAvg, self).__init__()
layers = layers or LayerFn()
avg_stride = stride if dilation == 1 else 1
if stride > 1 or dilation > 1:
avg_pool_fn = AvgPool2dSame if avg_stride == 1 and dilation > 1 else nn.AvgPool2d
self.pool = avg_pool_fn(2, avg_stride, ceil_mode=True, count_include_pad=False)
else:
self.pool = nn.Identity()
self.conv = layers.conv_norm_act(in_chs, out_chs, 1, apply_act=apply_act)
def forward(self, x):
return self.conv(self.pool(x))
def create_downsample(downsample_type, layers: LayerFn, **kwargs):
if downsample_type == 'avg':
return DownsampleAvg(**kwargs)
else:
return layers.conv_norm_act(kwargs.pop('in_chs'), kwargs.pop('out_chs'), kernel_size=1, **kwargs)
class BasicBlock(nn.Module):
""" ResNet Basic Block - kxk + kxk
"""
def __init__(
self, in_chs, out_chs, kernel_size=3, stride=1, dilation=(1, 1), group_size=None, bottle_ratio=1.0,
downsample='avg', attn_last=True, linear_out=False, layers: LayerFn = None, drop_block=None,
drop_path_rate=0.):
super(BasicBlock, self).__init__()
layers = layers or LayerFn()
mid_chs = make_divisible(out_chs * bottle_ratio)
groups = num_groups(group_size, mid_chs)
if in_chs != out_chs or stride != 1 or dilation[0] != dilation[1]:
self.shortcut = create_downsample(
downsample, in_chs=in_chs, out_chs=out_chs, stride=stride, dilation=dilation[0],
apply_act=False, layers=layers)
else:
self.shortcut = nn.Identity()
self.conv1_kxk = layers.conv_norm_act(in_chs, mid_chs, kernel_size, stride=stride, dilation=dilation[0])
self.attn = nn.Identity() if attn_last or layers.attn is None else layers.attn(mid_chs)
self.conv2_kxk = layers.conv_norm_act(
mid_chs, out_chs, kernel_size, dilation=dilation[1], groups=groups, drop_block=drop_block, apply_act=False)
self.attn_last = nn.Identity() if not attn_last or layers.attn is None else layers.attn(out_chs)
self.drop_path = DropPath(drop_path_rate) if drop_path_rate > 0. else nn.Identity()
self.act = nn.Identity() if linear_out else layers.act(inplace=True)
def init_weights(self, zero_init_last: bool = False):
if zero_init_last:
nn.init.zeros_(self.conv2_kxk.bn.weight)
for attn in (self.attn, self.attn_last):
if hasattr(attn, 'reset_parameters'):
attn.reset_parameters()
def forward(self, x):
shortcut = self.shortcut(x)
# residual path
x = self.conv1_kxk(x)
x = self.conv2_kxk(x)
x = self.attn(x)
x = self.drop_path(x)
x = self.act(x + shortcut)
return x
class BottleneckBlock(nn.Module):
""" ResNet-like Bottleneck Block - 1x1 - kxk - 1x1
"""
def __init__(self, in_chs, out_chs, kernel_size=3, stride=1, dilation=(1, 1), bottle_ratio=1., group_size=None,
downsample='avg', attn_last=False, linear_out=False, extra_conv=False, layers: LayerFn = None,
drop_block=None, drop_path_rate=0.):
super(BottleneckBlock, self).__init__()
layers = layers or LayerFn()
mid_chs = make_divisible(out_chs * bottle_ratio)
groups = num_groups(group_size, mid_chs)
if in_chs != out_chs or stride != 1 or dilation[0] != dilation[1]:
self.shortcut = create_downsample(
downsample, in_chs=in_chs, out_chs=out_chs, stride=stride, dilation=dilation[0],
apply_act=False, layers=layers)
else:
self.shortcut = nn.Identity()
self.conv1_1x1 = layers.conv_norm_act(in_chs, mid_chs, 1)
self.conv2_kxk = layers.conv_norm_act(
mid_chs, mid_chs, kernel_size, stride=stride, dilation=dilation[0],
groups=groups, drop_block=drop_block)
self.conv2_kxk = layers.conv_norm_act(
mid_chs, mid_chs, kernel_size, stride=stride, dilation=dilation[0],
groups=groups, drop_block=drop_block)
if extra_conv:
self.conv2b_kxk = layers.conv_norm_act(
mid_chs, mid_chs, kernel_size, dilation=dilation[1], groups=groups, drop_block=drop_block)
else:
self.conv2b_kxk = nn.Identity()
self.attn = nn.Identity() if attn_last or layers.attn is None else layers.attn(mid_chs)
self.conv3_1x1 = layers.conv_norm_act(mid_chs, out_chs, 1, apply_act=False)
self.attn_last = nn.Identity() if not attn_last or layers.attn is None else layers.attn(out_chs)
self.drop_path = DropPath(drop_path_rate) if drop_path_rate > 0. else nn.Identity()
self.act = nn.Identity() if linear_out else layers.act(inplace=True)
def init_weights(self, zero_init_last: bool = False):
if zero_init_last:
nn.init.zeros_(self.conv3_1x1.bn.weight)
for attn in (self.attn, self.attn_last):
if hasattr(attn, 'reset_parameters'):
attn.reset_parameters()
def forward(self, x):
shortcut = self.shortcut(x)
x = self.conv1_1x1(x)
x = self.conv2_kxk(x)
x = self.conv2b_kxk(x)
x = self.attn(x)
x = self.conv3_1x1(x)
x = self.attn_last(x)
x = self.drop_path(x)
x = self.act(x + shortcut)
return x
class DarkBlock(nn.Module):
""" DarkNet-like (1x1 + 3x3 w/ stride) block
The GE-Net impl included a 1x1 + 3x3 block in their search space. It was not used in the feature models.
This block is pretty much a DarkNet block (also DenseNet) hence the name. Neither DarkNet or DenseNet
uses strides within the block (external 3x3 or maxpool downsampling is done in front of the block repeats).
If one does want to use a lot of these blocks w/ stride, I'd recommend using the EdgeBlock (3x3 /w stride + 1x1)
for more optimal compute.
"""
def __init__(self, in_chs, out_chs, kernel_size=3, stride=1, dilation=(1, 1), bottle_ratio=1.0, group_size=None,
downsample='avg', attn_last=True, linear_out=False, layers: LayerFn = None, drop_block=None,
drop_path_rate=0.):
super(DarkBlock, self).__init__()
layers = layers or LayerFn()
mid_chs = make_divisible(out_chs * bottle_ratio)
groups = num_groups(group_size, mid_chs)
if in_chs != out_chs or stride != 1 or dilation[0] != dilation[1]:
self.shortcut = create_downsample(
downsample, in_chs=in_chs, out_chs=out_chs, stride=stride, dilation=dilation[0],
apply_act=False, layers=layers)
else:
self.shortcut = nn.Identity()
self.conv1_1x1 = layers.conv_norm_act(in_chs, mid_chs, 1)
self.attn = nn.Identity() if attn_last or layers.attn is None else layers.attn(mid_chs)
self.conv2_kxk = layers.conv_norm_act(
mid_chs, out_chs, kernel_size, stride=stride, dilation=dilation[0],
groups=groups, drop_block=drop_block, apply_act=False)
self.attn_last = nn.Identity() if not attn_last or layers.attn is None else layers.attn(out_chs)
self.drop_path = DropPath(drop_path_rate) if drop_path_rate > 0. else nn.Identity()
self.act = nn.Identity() if linear_out else layers.act(inplace=True)
def init_weights(self, zero_init_last: bool = False):
if zero_init_last:
nn.init.zeros_(self.conv2_kxk.bn.weight)
for attn in (self.attn, self.attn_last):
if hasattr(attn, 'reset_parameters'):
attn.reset_parameters()
def forward(self, x):
shortcut = self.shortcut(x)
x = self.conv1_1x1(x)
x = self.attn(x)
x = self.conv2_kxk(x)
x = self.attn_last(x)
x = self.drop_path(x)
x = self.act(x + shortcut)
return x
class EdgeBlock(nn.Module):
""" EdgeResidual-like (3x3 + 1x1) block
A two layer block like DarkBlock, but with the order of the 3x3 and 1x1 convs reversed.
Very similar to the EfficientNet Edge-Residual block but this block it ends with activations, is
intended to be used with either expansion or bottleneck contraction, and can use DW/group/non-grouped convs.
FIXME is there a more common 3x3 + 1x1 conv block to name this after?
"""
def __init__(self, in_chs, out_chs, kernel_size=3, stride=1, dilation=(1, 1), bottle_ratio=1.0, group_size=None,
downsample='avg', attn_last=False, linear_out=False, layers: LayerFn = None,
drop_block=None, drop_path_rate=0.):
super(EdgeBlock, self).__init__()
layers = layers or LayerFn()
mid_chs = make_divisible(out_chs * bottle_ratio)
groups = num_groups(group_size, mid_chs)
if in_chs != out_chs or stride != 1 or dilation[0] != dilation[1]:
self.shortcut = create_downsample(
downsample, in_chs=in_chs, out_chs=out_chs, stride=stride, dilation=dilation[0],
apply_act=False, layers=layers)
else:
self.shortcut = nn.Identity()
self.conv1_kxk = layers.conv_norm_act(
in_chs, mid_chs, kernel_size, stride=stride, dilation=dilation[0],
groups=groups, drop_block=drop_block)
self.attn = nn.Identity() if attn_last or layers.attn is None else layers.attn(mid_chs)
self.conv2_1x1 = layers.conv_norm_act(mid_chs, out_chs, 1, apply_act=False)
self.attn_last = nn.Identity() if not attn_last or layers.attn is None else layers.attn(out_chs)
self.drop_path = DropPath(drop_path_rate) if drop_path_rate > 0. else nn.Identity()
self.act = nn.Identity() if linear_out else layers.act(inplace=True)
def init_weights(self, zero_init_last: bool = False):
if zero_init_last:
nn.init.zeros_(self.conv2_1x1.bn.weight)
for attn in (self.attn, self.attn_last):
if hasattr(attn, 'reset_parameters'):
attn.reset_parameters()
def forward(self, x):
shortcut = self.shortcut(x)
x = self.conv1_kxk(x)
x = self.attn(x)
x = self.conv2_1x1(x)
x = self.attn_last(x)
x = self.drop_path(x)
x = self.act(x + shortcut)
return x
class RepVggBlock(nn.Module):
""" RepVGG Block.
Adapted from impl at https://github.com/DingXiaoH/RepVGG
This version does not currently support the deploy optimization. It is currently fixed in 'train' mode.
"""
def __init__(self, in_chs, out_chs, kernel_size=3, stride=1, dilation=(1, 1), bottle_ratio=1.0, group_size=None,
downsample='', layers: LayerFn = None, drop_block=None, drop_path_rate=0.):
super(RepVggBlock, self).__init__()
layers = layers or LayerFn()
groups = num_groups(group_size, in_chs)
use_ident = in_chs == out_chs and stride == 1 and dilation[0] == dilation[1]
self.identity = layers.norm_act(out_chs, apply_act=False) if use_ident else None
self.conv_kxk = layers.conv_norm_act(
in_chs, out_chs, kernel_size, stride=stride, dilation=dilation[0],
groups=groups, drop_block=drop_block, apply_act=False)
self.conv_1x1 = layers.conv_norm_act(in_chs, out_chs, 1, stride=stride, groups=groups, apply_act=False)
self.attn = nn.Identity() if layers.attn is None else layers.attn(out_chs)
self.drop_path = DropPath(drop_path_rate) if drop_path_rate > 0. and use_ident else nn.Identity()
self.act = layers.act(inplace=True)
def init_weights(self, zero_init_last: bool = False):
# NOTE this init overrides that base model init with specific changes for the block type
for m in self.modules():
if isinstance(m, nn.BatchNorm2d):
nn.init.normal_(m.weight, .1, .1)
nn.init.normal_(m.bias, 0, .1)
if hasattr(self.attn, 'reset_parameters'):
self.attn.reset_parameters()
def forward(self, x):
if self.identity is None:
x = self.conv_1x1(x) + self.conv_kxk(x)
else:
identity = self.identity(x)
x = self.conv_1x1(x) + self.conv_kxk(x)
x = self.drop_path(x) # not in the paper / official impl, experimental
x = x + identity
x = self.attn(x) # no attn in the paper / official impl, experimental
x = self.act(x)
return x
class SelfAttnBlock(nn.Module):
""" ResNet-like Bottleneck Block - 1x1 - optional kxk - self attn - 1x1
"""
def __init__(self, in_chs, out_chs, kernel_size=3, stride=1, dilation=(1, 1), bottle_ratio=1., group_size=None,
downsample='avg', extra_conv=False, linear_out=False, post_attn_na=True, feat_size=None,
layers: LayerFn = None, drop_block=None, drop_path_rate=0.):
super(SelfAttnBlock, self).__init__()
assert layers is not None
mid_chs = make_divisible(out_chs * bottle_ratio)
groups = num_groups(group_size, mid_chs)
if in_chs != out_chs or stride != 1 or dilation[0] != dilation[1]:
self.shortcut = create_downsample(
downsample, in_chs=in_chs, out_chs=out_chs, stride=stride, dilation=dilation[0],
apply_act=False, layers=layers)
else:
self.shortcut = nn.Identity()
self.conv1_1x1 = layers.conv_norm_act(in_chs, mid_chs, 1)
if extra_conv:
self.conv2_kxk = layers.conv_norm_act(
mid_chs, mid_chs, kernel_size, stride=stride, dilation=dilation[0],
groups=groups, drop_block=drop_block)
stride = 1 # striding done via conv if enabled
else:
self.conv2_kxk = nn.Identity()
opt_kwargs = {} if feat_size is None else dict(feat_size=feat_size)
# FIXME need to dilate self attn to have dilated network support, moop moop
self.self_attn = layers.self_attn(mid_chs, stride=stride, **opt_kwargs)
self.post_attn = layers.norm_act(mid_chs) if post_attn_na else nn.Identity()
self.conv3_1x1 = layers.conv_norm_act(mid_chs, out_chs, 1, apply_act=False)
self.drop_path = DropPath(drop_path_rate) if drop_path_rate > 0. else nn.Identity()
self.act = nn.Identity() if linear_out else layers.act(inplace=True)
def init_weights(self, zero_init_last: bool = False):
if zero_init_last:
nn.init.zeros_(self.conv3_1x1.bn.weight)
if hasattr(self.self_attn, 'reset_parameters'):
self.self_attn.reset_parameters()
def forward(self, x):
shortcut = self.shortcut(x)
x = self.conv1_1x1(x)
x = self.conv2_kxk(x)
x = self.self_attn(x)
x = self.post_attn(x)
x = self.conv3_1x1(x)
x = self.drop_path(x)
x = self.act(x + shortcut)
return x
_block_registry = dict(
basic=BasicBlock,
bottle=BottleneckBlock,
dark=DarkBlock,
edge=EdgeBlock,
rep=RepVggBlock,
self_attn=SelfAttnBlock,
)
def register_block(block_type:str, block_fn: nn.Module):
_block_registry[block_type] = block_fn
def create_block(block: Union[str, nn.Module], **kwargs):
if isinstance(block, (nn.Module, partial)):
return block(**kwargs)
assert block in _block_registry, f'Unknown block type ({block}'
return _block_registry[block](**kwargs)
class Stem(nn.Sequential):
def __init__(self, in_chs, out_chs, kernel_size=3, stride=4, pool='maxpool',
num_rep=3, num_act=None, chs_decay=0.5, layers: LayerFn = None):
super().__init__()
assert stride in (2, 4)
layers = layers or LayerFn()
if isinstance(out_chs, (list, tuple)):
num_rep = len(out_chs)
stem_chs = out_chs
else:
stem_chs = [round(out_chs * chs_decay ** i) for i in range(num_rep)][::-1]
self.stride = stride
self.feature_info = [] # track intermediate features
prev_feat = ''
stem_strides = [2] + [1] * (num_rep - 1)
if stride == 4 and not pool:
# set last conv in stack to be strided if stride == 4 and no pooling layer
stem_strides[-1] = 2
num_act = num_rep if num_act is None else num_act
# if num_act < num_rep, first convs in stack won't have bn + act
stem_norm_acts = [False] * (num_rep - num_act) + [True] * num_act
prev_chs = in_chs
curr_stride = 1
for i, (ch, s, na) in enumerate(zip(stem_chs, stem_strides, stem_norm_acts)):
layer_fn = layers.conv_norm_act if na else create_conv2d
conv_name = f'conv{i + 1}'
if i > 0 and s > 1:
self.feature_info.append(dict(num_chs=prev_chs, reduction=curr_stride, module=prev_feat))
self.add_module(conv_name, layer_fn(prev_chs, ch, kernel_size=kernel_size, stride=s))
prev_chs = ch
curr_stride *= s
prev_feat = conv_name
if pool and 'max' in pool.lower():
self.feature_info.append(dict(num_chs=prev_chs, reduction=curr_stride, module=prev_feat))
self.add_module('pool', nn.MaxPool2d(3, 2, 1))
curr_stride *= 2
prev_feat = 'pool'
self.feature_info.append(dict(num_chs=prev_chs, reduction=curr_stride, module=prev_feat))
assert curr_stride == stride
def create_byob_stem(in_chs, out_chs, stem_type='', pool_type='', feat_prefix='stem', layers: LayerFn = None):
layers = layers or LayerFn()
assert stem_type in ('', 'quad', 'quad2', 'tiered', 'deep', 'rep', '7x7', '3x3')
if 'quad' in stem_type:
# based on NFNet stem, stack of 4 3x3 convs
num_act = 2 if 'quad2' in stem_type else None
stem = Stem(in_chs, out_chs, num_rep=4, num_act=num_act, pool=pool_type, layers=layers)
elif 'tiered' in stem_type:
# 3x3 stack of 3 convs as in my ResNet-T
stem = Stem(in_chs, (3 * out_chs // 8, out_chs // 2, out_chs), pool=pool_type, layers=layers)
elif 'deep' in stem_type:
# 3x3 stack of 3 convs as in ResNet-D
stem = Stem(in_chs, out_chs, num_rep=3, chs_decay=1.0, pool=pool_type, layers=layers)
elif 'rep' in stem_type:
stem = RepVggBlock(in_chs, out_chs, stride=2, layers=layers)
elif '7x7' in stem_type:
# 7x7 stem conv as in ResNet
if pool_type:
stem = Stem(in_chs, out_chs, 7, num_rep=1, pool=pool_type, layers=layers)
else:
stem = layers.conv_norm_act(in_chs, out_chs, 7, stride=2)
else:
# 3x3 stem conv as in RegNet is the default
if pool_type:
stem = Stem(in_chs, out_chs, 3, num_rep=1, pool=pool_type, layers=layers)
else:
stem = layers.conv_norm_act(in_chs, out_chs, 3, stride=2)
if isinstance(stem, Stem):
feature_info = [dict(f, module='.'.join([feat_prefix, f['module']])) for f in stem.feature_info]
else:
feature_info = [dict(num_chs=out_chs, reduction=2, module=feat_prefix)]
return stem, feature_info
def reduce_feat_size(feat_size, stride=2):
return None if feat_size is None else tuple([s // stride for s in feat_size])
def override_kwargs(block_kwargs, model_kwargs):
""" Override model level attn/self-attn/block kwargs w/ block level
NOTE: kwargs are NOT merged across levels, block_kwargs will fully replace model_kwargs
for the block if set to anything that isn't None.
i.e. an empty block_kwargs dict will remove kwargs set at model level for that block
"""
out_kwargs = block_kwargs if block_kwargs is not None else model_kwargs
return out_kwargs or {} # make sure None isn't returned
def update_block_kwargs(block_kwargs: Dict[str, Any], block_cfg: ByoBlockCfg, model_cfg: ByoModelCfg, ):
layer_fns = block_kwargs['layers']
# override attn layer / args with block local config
attn_set = block_cfg.attn_layer is not None
if attn_set or block_cfg.attn_kwargs is not None:
# override attn layer config
if attn_set and not block_cfg.attn_layer:
# empty string for attn_layer type will disable attn for this block
attn_layer = None
else:
attn_kwargs = override_kwargs(block_cfg.attn_kwargs, model_cfg.attn_kwargs)
attn_layer = block_cfg.attn_layer or model_cfg.attn_layer
attn_layer = partial(get_attn(attn_layer), **attn_kwargs) if attn_layer is not None else None
layer_fns = replace(layer_fns, attn=attn_layer)
# override self-attn layer / args with block local cfg
self_attn_set = block_cfg.self_attn_layer is not None
if self_attn_set or block_cfg.self_attn_kwargs is not None:
# override attn layer config
if self_attn_set and not block_cfg.self_attn_layer: # attn_layer == ''
# empty string for self_attn_layer type will disable attn for this block
self_attn_layer = None
else:
self_attn_kwargs = override_kwargs(block_cfg.self_attn_kwargs, model_cfg.self_attn_kwargs)
self_attn_layer = block_cfg.self_attn_layer or model_cfg.self_attn_layer
self_attn_layer = partial(get_attn(self_attn_layer), **self_attn_kwargs) \
if self_attn_layer is not None else None
layer_fns = replace(layer_fns, self_attn=self_attn_layer)
block_kwargs['layers'] = layer_fns
# add additional block_kwargs specified in block_cfg or model_cfg, precedence to block if set
block_kwargs.update(override_kwargs(block_cfg.block_kwargs, model_cfg.block_kwargs))
def create_byob_stages(
cfg: ByoModelCfg, drop_path_rate: float, output_stride: int, stem_feat: Dict[str, Any],
feat_size: Optional[int] = None,
layers: Optional[LayerFn] = None,
block_kwargs_fn: Optional[Callable] = update_block_kwargs):
layers = layers or LayerFn()
feature_info = []
block_cfgs = [expand_blocks_cfg(s) for s in cfg.blocks]
depths = [sum([bc.d for bc in stage_bcs]) for stage_bcs in block_cfgs]
dpr = [x.tolist() for x in torch.linspace(0, drop_path_rate, sum(depths)).split(depths)]
dilation = 1
net_stride = stem_feat['reduction']
prev_chs = stem_feat['num_chs']
prev_feat = stem_feat
stages = []
for stage_idx, stage_block_cfgs in enumerate(block_cfgs):
stride = stage_block_cfgs[0].s
if stride != 1 and prev_feat:
feature_info.append(prev_feat)
if net_stride >= output_stride and stride > 1:
dilation *= stride
stride = 1
net_stride *= stride
first_dilation = 1 if dilation in (1, 2) else 2
blocks = []
for block_idx, block_cfg in enumerate(stage_block_cfgs):
out_chs = make_divisible(block_cfg.c * cfg.width_factor)
group_size = block_cfg.gs
if isinstance(group_size, Callable):
group_size = group_size(out_chs, block_idx)
block_kwargs = dict( # Blocks used in this model must accept these arguments
in_chs=prev_chs,
out_chs=out_chs,
stride=stride if block_idx == 0 else 1,
dilation=(first_dilation, dilation),
group_size=group_size,
bottle_ratio=block_cfg.br,
downsample=cfg.downsample,
drop_path_rate=dpr[stage_idx][block_idx],
layers=layers,
)
if block_cfg.type in ('self_attn',):
# add feat_size arg for blocks that support/need it
block_kwargs['feat_size'] = feat_size
block_kwargs_fn(block_kwargs, block_cfg=block_cfg, model_cfg=cfg)
blocks += [create_block(block_cfg.type, **block_kwargs)]
first_dilation = dilation
prev_chs = out_chs
if stride > 1 and block_idx == 0:
feat_size = reduce_feat_size(feat_size, stride)
stages += [nn.Sequential(*blocks)]
prev_feat = dict(num_chs=prev_chs, reduction=net_stride, module=f'stages.{stage_idx}')
feature_info.append(prev_feat)
return nn.Sequential(*stages), feature_info
def get_layer_fns(cfg: ByoModelCfg):
act = get_act_layer(cfg.act_layer)
norm_act = convert_norm_act(norm_layer=cfg.norm_layer, act_layer=act)
conv_norm_act = partial(ConvBnAct, norm_layer=cfg.norm_layer, act_layer=act)
attn = partial(get_attn(cfg.attn_layer), **cfg.attn_kwargs) if cfg.attn_layer else None
self_attn = partial(get_attn(cfg.self_attn_layer), **cfg.self_attn_kwargs) if cfg.self_attn_layer else None
layer_fn = LayerFn(conv_norm_act=conv_norm_act, norm_act=norm_act, act=act, attn=attn, self_attn=self_attn)
return layer_fn
class ByobNet(nn.Module):
""" 'Bring-your-own-blocks' Net
A flexible network backbone that allows building model stem + blocks via
dataclass cfg definition w/ factory functions for module instantiation.
Current assumption is that both stem and blocks are in conv-bn-act order (w/ block ending in act).
"""
def __init__(self, cfg: ByoModelCfg, num_classes=1000, in_chans=3, global_pool='avg', output_stride=32,
zero_init_last=True, img_size=None, drop_rate=0., drop_path_rate=0.):
super().__init__()
self.num_classes = num_classes
self.drop_rate = drop_rate
layers = get_layer_fns(cfg)
if cfg.fixed_input_size:
assert img_size is not None, 'img_size argument is required for fixed input size model'
feat_size = to_2tuple(img_size) if img_size is not None else None
self.feature_info = []
stem_chs = int(round((cfg.stem_chs or cfg.blocks[0].c) * cfg.width_factor))
self.stem, stem_feat = create_byob_stem(in_chans, stem_chs, cfg.stem_type, cfg.stem_pool, layers=layers)
self.feature_info.extend(stem_feat[:-1])
feat_size = reduce_feat_size(feat_size, stride=stem_feat[-1]['reduction'])
self.stages, stage_feat = create_byob_stages(
cfg, drop_path_rate, output_stride, stem_feat[-1], layers=layers, feat_size=feat_size)
self.feature_info.extend(stage_feat[:-1])
prev_chs = stage_feat[-1]['num_chs']
if cfg.num_features:
self.num_features = int(round(cfg.width_factor * cfg.num_features))
self.final_conv = layers.conv_norm_act(prev_chs, self.num_features, 1)
else:
self.num_features = prev_chs
self.final_conv = nn.Identity()
self.feature_info += [
dict(num_chs=self.num_features, reduction=stage_feat[-1]['reduction'], module='final_conv')]
self.head = ClassifierHead(self.num_features, num_classes, pool_type=global_pool, drop_rate=self.drop_rate)
# init weights
named_apply(partial(_init_weights, zero_init_last=zero_init_last), self)
def get_classifier(self):
return self.head.fc
def reset_classifier(self, num_classes, global_pool='avg'):
self.head = ClassifierHead(self.num_features, num_classes, pool_type=global_pool, drop_rate=self.drop_rate)
def forward_features(self, x):
x = self.stem(x)
x = self.stages(x)
x = self.final_conv(x)
return x
def forward(self, x):
x = self.forward_features(x)
x = self.head(x)
return x
def _init_weights(module, name='', zero_init_last=False):
if isinstance(module, nn.Conv2d):
fan_out = module.kernel_size[0] * module.kernel_size[1] * module.out_channels
fan_out //= module.groups
module.weight.data.normal_(0, math.sqrt(2.0 / fan_out))
if module.bias is not None:
module.bias.data.zero_()
elif isinstance(module, nn.Linear):
nn.init.normal_(module.weight, mean=0.0, std=0.01)
if module.bias is not None:
nn.init.zeros_(module.bias)
elif isinstance(module, nn.BatchNorm2d):
nn.init.ones_(module.weight)
nn.init.zeros_(module.bias)
elif hasattr(module, 'init_weights'):
module.init_weights(zero_init_last=zero_init_last)
def _create_byobnet(variant, pretrained=False, **kwargs):
return build_model_with_cfg(
ByobNet, variant, pretrained,
default_cfg=default_cfgs[variant],
model_cfg=model_cfgs[variant],
feature_cfg=dict(flatten_sequential=True),
**kwargs)
|
"""
14682. Shifty Sum
작성자: xCrypt0r
언어: Python 3
사용 메모리: 29,380 KB
소요 시간: 60 ms
해결 날짜: 2020년 9월 20일
"""
def main():
N, k = [int(input()) for _ in range(2)]
res = N
for _ in range(k):
N *= 10
res += N
print(res)
if __name__ == '__main__':
main()
|
# -*- coding: utf-8 -*-
"""
The graphics header element definition.
"""
from .base import NITFElement, UserHeaderType, _IntegerDescriptor,\
_StringDescriptor, _StringEnumDescriptor, _NITFElementDescriptor
from .security import NITFSecurityTags
__classification__ = "UNCLASSIFIED"
__author__ = "Thomas McCullough"
class GraphicsSegmentHeader(NITFElement):
"""
Graphics segment subheader - see standards document MIL-STD-2500C for more
information.
"""
_ordering = (
'SY', 'SID', 'SNAME', 'Security', 'ENCRYP', 'SFMT',
'SSTRUCT', 'SDLVL', 'SALVL', 'SLOC', 'SBND1',
'SCOLOR', 'SBND2', 'SRES2', 'UserHeader')
_lengths = {
'SY': 2, 'SID': 10, 'SNAME': 20, 'ENCRYP': 1,
'SFMT': 1, 'SSTRUCT': 13, 'SDLVL': 3, 'SALVL': 3,
'SLOC': 10, 'SBND1': 10, 'SCOLOR': 1, 'SBND2': 10,
'SRES2': 2}
SY = _StringEnumDescriptor(
'SY', True, 2, {'SY', }, default_value='SY',
docstring='File part type.') # type: str
SID = _StringDescriptor(
'SID', True, 10, default_value='',
docstring='Graphic Identifier. This field shall contain a valid alphanumeric identification code '
'associated with the graphic. The valid codes are determined by the application.') # type: str
SNAME = _StringDescriptor(
'SNAME', True, 20, default_value='',
docstring='Graphic name. This field shall contain an alphanumeric name for the graphic.') # type: str
Security = _NITFElementDescriptor(
'Security', True, NITFSecurityTags, default_args={},
docstring='The security tags.') # type: NITFSecurityTags
ENCRYP = _StringEnumDescriptor(
'ENCRYP', True, 1, {'0'}, default_value='0',
docstring='Encryption.') # type: str
SFMT = _StringDescriptor(
'SFMT', True, 1, default_value='C',
docstring='Graphic Type. This field shall contain a valid indicator of the '
'representation type of the graphic.') # type: str
SSTRUCT = _IntegerDescriptor(
'SSTRUCT', True, 13, default_value=0,
docstring='Reserved for Future Use.') # type: int
SDLVL = _IntegerDescriptor(
'SDLVL', True, 3, default_value=1,
docstring='Graphic Display Level. This field shall contain a valid value that indicates '
'the graphic display level of the graphic relative to other displayed file '
'components in a composite display. The valid values are :code:`1-999`. '
'The display level of each displayable file component (image or graphic) '
'within a file shall be unique.') # type: int
SALVL = _IntegerDescriptor(
'SALVL', True, 3, default_value=0,
docstring='Graphic Attachment Level. This field shall contain a valid value '
'that indicates the attachment level of the graphic. Valid values for '
'this field are 0 and the display level value of any other '
'image or graphic in the file.') # type: int
SLOC = _IntegerDescriptor(
'SLOC', True, 10, default_value=0,
docstring='Graphic Location. The graphics location is specified by providing the location '
'of the graphic’s origin point relative to the position (location of the CCS, image, '
'or graphic to which it is attached. This field shall contain the graphic location '
'offset from the `ILOC` or `SLOC` value of the CCS, image, or graphic to which the graphic '
'is attached or from the origin of the CCS when the graphic is unattached (`SALVL = 0`). '
'A row and column value of :code:`0` indicates no offset. Positive row and column values indicate '
'offsets down and to the right, while negative row and column values indicate '
'offsets up and to the left.') # type: int
SBND1 = _IntegerDescriptor(
'SBND1', True, 10, default_value=0,
docstring='First Graphic Bound Location. This field shall contain an ordered pair of '
'integers defining a location in Cartesian coordinates for use with CGM graphics. It is '
'the upper left corner of the bounding box for the CGM graphic.') # type: int
SCOLOR = _StringEnumDescriptor(
'SCOLOR', True, 1, {'C', 'M'}, default_value='M',
docstring='Graphic Color. If `SFMT = C`, this field shall contain a :code:`C` if the CGM contains any '
'color pieces or an :code:`M` if it is monochrome (i.e., black, '
'white, or levels of grey).') # type: str
SBND2 = _IntegerDescriptor(
'SBND2', True, 10, default_value=0,
docstring='Second Graphic Bound Location. This field shall contain an ordered pair of '
'integers defining a location in Cartesian coordinates for use with CGM graphics. '
'It is the lower right corner of the bounding box for the CGM graphic.') # type: int
SRES2 = _IntegerDescriptor(
'SRES2', True, 2, default_value=0,
docstring='Reserved for Future Use.') # type: int
UserHeader = _NITFElementDescriptor(
'UserHeader', True, UserHeaderType, default_args={},
docstring='User defined header.') # type: UserHeaderType
|
# -*- coding: utf-8 -*-
"""
pyrseas.column
~~~~~~~~~~~~~~
This module defines two classes: Column derived from
DbSchemaObject and ColumnDict derived from DbObjectDict.
"""
from pyrseas.dbobject import DbObjectDict, DbSchemaObject, quote_id
class Column(DbSchemaObject):
"A table column definition"
keylist = ['schema', 'table']
def to_map(self):
"""Convert a column to a YAML-suitable format
:return: dictionary
"""
if hasattr(self, 'dropped'):
return None
dct = self._base_map()
del dct['number'], dct['name'], dct['_table']
if hasattr(self, 'inherited'):
dct['inherited'] = (self.inherited != 0)
return {self.name: dct}
def add(self):
"""Return a string to specify the column in a CREATE or ALTER TABLE
:return: partial SQL statement
"""
stmt = "%s %s" % (quote_id(self.name), self.type)
if hasattr(self, 'not_null'):
stmt += ' NOT NULL'
if hasattr(self, 'default'):
if not self.default.startswith('nextval'):
stmt += ' DEFAULT ' + self.default
return (stmt, '' if not hasattr(self, 'description')
else self.comment())
def comment(self):
"""Return a SQL COMMENT statement for the column
:return: SQL statement
"""
return "COMMENT ON COLUMN %s.%s IS %s" % (
self._table.qualname(), self.name, self._comment_text())
def drop(self):
"""Return string to drop the column via ALTER TABLE
:return: SQL statement
"""
if hasattr(self, 'dropped'):
return ""
if hasattr(self, '_table'):
(comptype, objtype) = (self._table.objtype, 'COLUMN')
compname = self._table.qualname()
else:
# TODO: this is only a PG 9.1 feature, so more is required
(comptype, objtype) = ('TYPE', 'ATTRIBUTE')
compname = self.table
return "ALTER %s %s DROP %s %s" % (comptype, compname, objtype,
self.name)
def rename(self, newname):
"""Return SQL statement to RENAME the column
:param newname: the new name of the object
:return: SQL statement
"""
stmt = "ALTER TABLE %s RENAME COLUMN %s TO %s" % (
self._table.qualname(), self.name, newname)
self.name = newname
return stmt
def set_sequence_default(self):
"""Return SQL statements to set a nextval() DEFAULT
:return: list of SQL statements
"""
stmts = []
pth = self.set_search_path()
if pth:
stmts.append(pth)
stmts.append("ALTER TABLE %s ALTER COLUMN %s SET DEFAULT %s" % (
quote_id(self.table), quote_id(self.name), self.default))
return stmts
def diff_map(self, incol):
"""Generate SQL to transform an existing column
:param insequence: a YAML map defining the new column
:return: list of partial SQL statements
Compares the column to an input column and generates partial
SQL statements to transform it into the one represented by the
input.
"""
stmts = []
base = "ALTER COLUMN %s " % self.name
# check NOT NULL
if not hasattr(self, 'not_null') and hasattr(incol, 'not_null'):
stmts.append(base + "SET NOT NULL")
if hasattr(self, 'not_null') and not hasattr(incol, 'not_null'):
stmts.append(base + "DROP NOT NULL")
# check data types
if not hasattr(self, 'type'):
raise ValueError("Column '%s' missing datatype" % self.name)
if not hasattr(incol, 'type'):
raise ValueError("Input column '%s' missing datatype" % incol.name)
if self.type != incol.type:
# validate type conversion?
stmts.append(base + "TYPE %s" % incol.type)
# check DEFAULTs
if not hasattr(self, 'default') and hasattr(incol, 'default'):
stmts.append(base + "SET DEFAULT %s" % incol.default)
if hasattr(self, 'default') and not hasattr(incol, 'default'):
stmts.append(base + "DROP DEFAULT")
return (", ".join(stmts), self.diff_description(incol))
class ColumnDict(DbObjectDict):
"The collection of columns in tables in a database"
cls = Column
query = \
"""SELECT nspname AS schema, relname AS table, attname AS name,
attnum AS number, format_type(atttypid, atttypmod) AS type,
attnotnull AS not_null, attinhcount AS inherited,
pg_get_expr(adbin, adrelid) AS default,
attisdropped AS dropped,
col_description(c.oid, attnum) AS description
FROM pg_attribute JOIN pg_class c ON (attrelid = c.oid)
JOIN pg_namespace ON (relnamespace = pg_namespace.oid)
LEFT JOIN pg_attrdef ON (attrelid = pg_attrdef.adrelid
AND attnum = pg_attrdef.adnum)
WHERE relkind in ('c', 'r', 'f')
AND (nspname != 'pg_catalog'
AND nspname != 'information_schema')
AND attnum > 0
ORDER BY nspname, relname, attnum"""
def _from_catalog(self):
"""Initialize the dictionary of columns by querying the catalogs"""
for col in self.fetch():
sch, tbl = col.key()
if (sch, tbl) not in self:
self[(sch, tbl)] = []
self[(sch, tbl)].append(col)
def from_map(self, table, incols):
"""Initialize the dictionary of columns by converting the input list
:param table: table or type owning the columns/attributes
:param incols: YAML list defining the columns
"""
if not incols:
raise ValueError("Table '%s' has no columns" % table.name)
cols = self[(table.schema, table.name)] = []
for col in incols:
for key in list(col.keys()):
if isinstance(col[key], dict):
arg = col[key]
else:
arg = {'type': col[key]}
cols.append(Column(schema=table.schema, table=table.name,
name=key, **arg))
def diff_map(self, incols):
"""Generate SQL to transform existing columns
:param incols: a YAML map defining the new columns
:return: list of SQL statements
Compares the existing column definitions, as fetched from the
catalogs, to the input map and generates SQL statements to
transform the columns accordingly.
This takes care of dropping columns that are not present in
the input map. It's separate so that it can be done last,
after other table, constraint and index changes.
"""
stmts = []
if not incols or not self:
return stmts
for (sch, tbl) in list(incols.keys()):
if (sch, tbl) in list(self.keys()):
for col in self[(sch, tbl)]:
if col.name not in [c.name for c in incols[(sch, tbl)]] \
and not hasattr(col, 'dropped'):
stmts.append(col.drop())
return stmts
|
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
""" Support level3 operator test cases.
"""
import numpy as np
import pytest
import tvm
from tvm import te
from tvm import relay
from tvm.error import TVMError
from tvm.relay import create_executor, transform
from tvm.relay.testing import check_grad, run_infer_type
import tvm.testing
def test_zeros_ones():
for op, ref in [(relay.zeros, np.zeros), (relay.ones, np.ones)]:
y = op(shape=(124, 50), dtype="float64")
yy = run_infer_type(y)
assert yy.checked_type == relay.TensorType((124, 50), "float64")
intrp = create_executor()
intrp_res = intrp.evaluate(y).asnumpy()
np.testing.assert_allclose(intrp_res, ref((124, 50), 'float64'))
def test_unary_identity():
for op, ref in [(relay.zeros_like, np.zeros_like),
(relay.ones_like, np.ones_like),
(relay.ceil, np.ceil),
(relay.floor, np.floor),
(relay.trunc, np.trunc),
(relay.round, np.round),
(relay.abs, np.abs),
(relay.copy, None), # np.copy
(relay.negative, np.negative),
(relay.sign, np.sign)]:
shape = (8, 9, 4)
x = relay.var("x", relay.TensorType(shape, "float32"))
y = op(x)
yy = run_infer_type(y)
assert yy.checked_type == relay.TensorType(shape, "float32")
if ref is not None:
data = np.random.rand(*shape).astype('float32')
intrp = create_executor()
op_res = intrp.evaluate(y, { x: relay.const(data) })
ref_res = ref(data)
np.testing.assert_allclose(op_res.asnumpy(), ref_res, rtol=0.01)
def test_cast():
x = relay.var("x", relay.TensorType((8, 9, 4), "float32"))
y = x.astype("int32")
yy = run_infer_type(y)
assert "dtype=" in yy.astext()
assert yy.checked_type == relay.TensorType((8, 9, 4), "int32")
x = relay.var("x", relay.TensorType((8, 9, 4), "float32"))
y = relay.cast(x, "int32")
yy = run_infer_type(y)
assert "dtype=" in yy.astext()
assert yy.checked_type == relay.TensorType((8, 9, 4), "int32")
def test_clip():
a = relay.var("a", relay.TensorType((10, 4), "float32"))
y = relay.clip(a, 1., 4.)
yy = run_infer_type(y)
assert yy.checked_type == relay.TensorType((10, 4), "float32")
data = np.random.rand(10, 4).astype('float32')
intrp = create_executor()
op_res = intrp.evaluate(y, { a: relay.const(data) })
ref_res = np.clip(data, 1., 4.)
np.testing.assert_allclose(op_res.asnumpy(), ref_res, rtol=0.01)
def test_fixed_point_multiply():
# Test 23 * 1/16
# [m,s] = [0.5, -3] = frexp(1/16)
# M = 0.5*2^31 = 1073741824
# so M = 1073741824 and s = -3
a = relay.var("a", relay.TensorType((10, 4), "int32"))
y = relay.fixed_point_multiply(a, 1073741824, -3)
yy = run_infer_type(y)
assert yy.checked_type == relay.TensorType((10, 4), "int32")
data = 23*np.ones((10, 4)).astype('int32')
intrp = create_executor()
op_res = intrp.evaluate(y, { a: relay.const(data) })
ref_res = np.ones((10, 4)).astype('int32')
np.testing.assert_allclose(op_res.asnumpy(), ref_res, atol=1)
def test_reinterpret():
a = relay.var("a", relay.TensorType((1000, 4), "float32"))
y = relay.reinterpret(a, "int32")
yy = run_infer_type(y)
assert yy.checked_type == relay.TensorType((1000, 4), "int32")
data = np.random.randn(1000, 4).astype('float32') * 1000
intrp = create_executor()
op_res = intrp.evaluate(y, {a: relay.const(data)})
ref_res = data.view("int32")
np.testing.assert_equal(op_res.asnumpy(), ref_res)
def test_approximate_transcendental():
def C(x):
return relay.expr.const(x, "float32")
def approx_exp(x):
# An approximation derived from Opus,
# https://github.com/xiph/opus/blob/c1c247/celt/mathops.h#L147-L165
x = relay.minimum(relay.maximum(x, C(-88.0)), C(88.0))
x = C(127.0) + x * C(1.44269504)
xf = relay.floor(x)
i = relay.cast(xf, "int32")
x = x - xf
Y = C(0.99992522) + x * (C(0.69583354) + x * (C(0.22606716) + x * C(0.078024523)))
exponent = relay.left_shift(i, relay.expr.const(23, "int32"))
exponent = relay.reinterpret(exponent, "float32")
return exponent * Y
def approximate_sigmoid(x):
y = approx_exp(x)
return y / (y + C(1.0))
def approximate_tanh(x):
x = x * C(2.0)
y = approx_exp(x)
return (y - C(1.0)) / (y + C(1.0))
a = relay.var("a", relay.TensorType((1000,), "float32"))
y = approximate_sigmoid(a)
yy = run_infer_type(y)
assert yy.checked_type == relay.TensorType((1000,), "float32")
data = np.linspace(-5, 5, 1000).astype("float32")
intrp = create_executor()
op_res = intrp.evaluate(y, {a: relay.const(data)})
def reference_sigmoid(x):
return np.exp(-np.logaddexp(0, -x))
np.testing.assert_allclose(op_res.asnumpy(), reference_sigmoid(data), atol=2e-5, rtol=1e-9)
y = approximate_tanh(a)
yy = run_infer_type(y)
assert yy.checked_type == relay.TensorType((1000,), "float32")
data = np.linspace(-5, 5, 1000).astype("float32")
intrp = create_executor()
op_res = intrp.evaluate(y, {a: relay.const(data)})
def reference_tanh(x):
return np.tanh(x)
np.testing.assert_allclose(op_res.asnumpy(), reference_tanh(data), atol=4e-5, rtol=1e-9)
def test_squeeze():
def verify_squeeze(shape, dtype, axis):
x = relay.var("x", relay.TensorType(shape, dtype))
squeeze = relay.squeeze(x, axis=axis)
np_axis = tuple(axis) if axis is not None else None
data = np.random.random_sample(shape).astype(dtype)
intrp = create_executor()
op_res = intrp.evaluate(squeeze, { x : relay.const(data) })
ref_res = np.squeeze(data, axis=np_axis)
np.testing.assert_allclose(op_res.asnumpy(), ref_res, rtol=0.01)
verify_squeeze((1, 3, 2, 5), "float32", None)
verify_squeeze((1, 3, 1), "float32", [0])
verify_squeeze((1, 2, 1, 2, 1), "float32", [0, 2])
def test_transpose_infer_type():
n, t, d = te.size_var("n"), te.size_var("t"), 100
x = relay.var("x", relay.TensorType((n, t, d), "float32"))
y = relay.transpose(x, axes=(1, 0, 2))
assert "axes=" in y.astext()
yy = run_infer_type(y)
assert yy.checked_type == relay.TensorType(
(t, n, 100), "float32")
y = relay.transpose(x)
assert "axes=" in y.astext()
yy = run_infer_type(y)
assert yy.checked_type == relay.TensorType(
(100, t, n), "float32")
@tvm.testing.uses_gpu
def test_transpose():
def verify_transpose(dshape, axes):
x = relay.var("x", relay.TensorType(dshape, "float32"))
z = relay.transpose(x, axes=axes)
func = relay.Function([x], z)
x_data = np.random.uniform(low=-1, high=1, size=dshape).astype("float32")
ref_res = np.transpose(x_data, axes=axes)
for target, ctx in tvm.testing.enabled_targets():
for kind in ["graph", "debug"]:
intrp = relay.create_executor(kind, ctx=ctx, target=target)
op_res = intrp.evaluate(func)(x_data)
tvm.testing.assert_allclose(op_res.asnumpy(), ref_res, rtol=1e-5)
verify_transpose((2, 3, 4), (0, 2, 1))
def test_squeeze_infer_type():
n, t, d = 1, 4, 1
x = relay.var("x", relay.TensorType((n, t, d), "float32"))
y = relay.squeeze(x, axis=(2,))
assert "axis=" in y.astext()
yy = run_infer_type(y)
assert yy.checked_type == relay.TensorType(
(1, 4), "float32")
n, t, d = 1, 4, 1
x = relay.var("x", relay.TensorType((n, t, d), "float32"))
y = relay.squeeze(x)
assert "axis=" not in y.astext()
yy = run_infer_type(y)
assert yy.checked_type == relay.TensorType(
(4,), "float32")
@pytest.mark.xfail(raises=tvm._ffi.base.TVMError)
def test_squeeze_bad_axes_infer_type():
n, t, d = 1, 4, 1
x = relay.var("x", relay.TensorType((n, t, d), "float32"))
y = relay.squeeze(x, axis=(1,))
yy = run_infer_type(y)
def test_reshape_infer_type():
n, t, d1, d2 = 10, 20, 100, 20
x = relay.var("x", relay.TensorType((n, t, d1, d2), "float32"))
y = relay.reshape(x, newshape=(n, t, 2000))
assert "newshape=" in y.astext()
yy = run_infer_type(y)
assert yy.checked_type == relay.TensorType(
(n, t, 2000), "float32")
@tvm.testing.uses_gpu
def test_reshape():
def verify_reshape(shape, newshape, oshape):
x = relay.var("x", relay.TensorType(shape, "float32"))
z = relay.reshape(x, newshape=newshape)
zz = run_infer_type(z)
assert "newshape=" in z.astext()
assert zz.checked_type == relay.ty.TensorType(oshape, "float32")
func = relay.Function([x], z)
check_grad(func)
x_data = np.random.uniform(low=-1, high=1, size=shape).astype("float32")
ref_res = np.reshape(x_data, oshape)
for target, ctx in tvm.testing.enabled_targets():
for kind in ["graph", "debug"]:
intrp = relay.create_executor(kind, ctx=ctx, target=target)
op_res = intrp.evaluate(func)(x_data)
tvm.testing.assert_allclose(op_res.asnumpy(), ref_res, rtol=1e-5)
verify_reshape((2, 3, 4), (8, 3), (8, 3))
verify_reshape((4, 7), (2, 7, 2), (2, 7, 2))
verify_reshape((2, 3, 4), (4, 0, 2), (4, 3, 2))
verify_reshape((2, 3, 4), (2, 0, 0), (2, 3, 4))
verify_reshape((2, 3, 4), (0, -1), (2, 12))
verify_reshape((2, 3, 4), (-1, 0), (8, 3))
verify_reshape((2, 3, 4), (2, -2), (2, 3, 4))
verify_reshape((2, 3, 4), (-2, 1, 1), (2, 3, 4, 1, 1))
verify_reshape((2, 3, 4), (-3, 4), (6, 4))
verify_reshape((2, 3, 4, 5), (-3, -3), (6, 20))
verify_reshape((2, 3, 4), (0, -3), (2, 12))
verify_reshape((2, 3, 4), (-3, -2), (6, 4))
verify_reshape((2, 3, 4), (-4, 1, 2, -2), (1, 2, 3, 4))
verify_reshape((2, 3, 4), (2, -4, -1, 3, -2), (2, 1, 3, 4))
def test_reshape_fail():
with pytest.raises(TVMError) as reshape_err:
x = relay.var("x", relay.TensorType([2,3], "float32"))
z = relay.reshape(x, [7])
zz = run_infer_type(z)
def test_reshape_like_infer_type():
# concrete shape
x = relay.var("x", relay.TensorType((1, 2, 3), "float32"))
y = relay.var("y", relay.TensorType((1,6), "float32"))
z = relay.reshape_like(x, y)
zz = run_infer_type(z)
assert zz.checked_type == relay.TensorType((1, 6), "float32")
# symbolic shape
n, c, h, w = te.size_var("n"), 2, 3, te.size_var("w")
x = relay.var("x", relay.TensorType((n, c, h, w), "float32"))
y = relay.var("y", relay.TensorType((1, 8, 8), "float32"))
z = relay.reshape_like(x, y)
zz = run_infer_type(z)
assert zz.checked_type == relay.TensorType((1, 8, 8), "float32")
@tvm.testing.uses_gpu
def test_reshape_like():
def verify_reshape_like(shape, oshape):
x_data = np.random.uniform(low=-1, high=1, size=shape).astype("float32")
y_data = np.random.uniform(low=-1, high=1, size=oshape).astype("float32")
ref_res = np.reshape(x_data, y_data.shape)
x = relay.var("x", relay.TensorType(shape, "float32"))
y = relay.var("x", relay.TensorType(oshape, "float32"))
z = relay.reshape_like(x, y)
zz = run_infer_type(z)
assert zz.checked_type == relay.ty.TensorType(ref_res.shape, "float32")
func = relay.Function([x, y], z)
for target, ctx in tvm.testing.enabled_targets():
for kind in ["graph", "debug"]:
intrp = relay.create_executor(kind, ctx=ctx, target=target)
op_res = intrp.evaluate(func)(x_data, y_data)
tvm.testing.assert_allclose(op_res.asnumpy(), ref_res, rtol=1e-5)
verify_reshape_like((2, 3, 4), (1, 8, 3))
verify_reshape_like((4, 7), (2, 7, 2))
def test_take_infer_type():
def verify_take(dshape, indices_shape, oshape, axis=None):
x = relay.var("x", relay.TensorType(dshape, "float32"))
indices = relay.var("indices", relay.TensorType(indices_shape, "int32"))
y = relay.take(x, indices, axis=axis)
yy = run_infer_type(y)
assert yy.checked_type == relay.TensorType(oshape, "float32")
d1, d2, d3 = te.var("d1"), te.var("d2"), te.var("d3")
d4, d5, d6 = te.var("d4"), te.var("d5"), te.var("d6")
verify_take((d1,), (1,), (1,), 0)
verify_take((4,), (d1, d2), (d1, d2))
verify_take((3, 3, 3), (1, d2), (1, d2))
verify_take((d1, d2), (d3, d4, d5), (d3, d4, d5, d2), 0)
verify_take((d1, d2), (d3, d4, d5), (d1, d3, d4, d5), 1)
verify_take((d1, d2, d3, d4), (d5, d6), (d1, d2, d5, d6, d4), -2)
@tvm.testing.uses_gpu
def test_take():
def verify_take(src_shape, indices_src, axis=None, mode="clip"):
src_dtype = "float32"
indices_dtype = "int32"
indices_src = np.array(indices_src, dtype=indices_dtype)
x = relay.var("x", relay.TensorType(src_shape, src_dtype))
indices = relay.var("indices", relay.TensorType(indices_src.shape, indices_dtype))
z = relay.take(x, indices, axis=axis, mode=mode)
func = relay.Function([x, indices], z)
x_data = np.random.uniform(low=-1, high=1, size=src_shape).astype(src_dtype)
np_mode = "raise" if mode == "fast" else mode
ref_res = np.take(x_data, indices=indices_src, axis=axis, mode=np_mode)
for target, ctx in tvm.testing.enabled_targets():
for kind in ["graph", "debug"]:
intrp = relay.create_executor(kind, ctx=ctx, target=target)
op_res = intrp.evaluate(func)(x_data, indices_src)
tvm.testing.assert_allclose(op_res.asnumpy(), ref_res, rtol=1e-5)
verify_take((4,), [1])
verify_take((4,), [[0,1,2,3]])
verify_take((3,3,3), [[11,25]])
verify_take((4,), [[0,1],[2,3]])
verify_take((4,), [1], 0)
verify_take((2,2), [[[1,0],[0,1]]], 0)
verify_take((2,2), [[[1,0],[0,1]]], 1)
verify_take((4,3,5,6), [[2,1,0,0]], -2)
verify_take((3,4), [-5, 20])
verify_take((3,4), [-5, 20], mode="wrap")
verify_take((3,4), [-1, 2], axis=0)
verify_take((3,4), [-1, 2], axis=0, mode="wrap")
verify_take((3,4), [-1, 2], axis=1)
verify_take((3,4), [-1, 2], axis=1, mode="wrap")
verify_take((3,3,3), [[11,25]], mode="fast")
verify_take((3,4), [0, 2], axis=0, mode="fast")
verify_take((3,4), [0, 2], axis=1, mode="fast")
def test_split_infer_type():
def verify_split(dshape, indices_or_sections, ret_type, axis=None):
x = relay.var("x", relay.ty.TensorType(dshape, "float32"))
y = relay.split(x, indices_or_sections, axis=axis)
yy = run_infer_type(y.astuple())
assert yy.checked_type == ret_type
idxd = tvm.tir.indexdiv
d1, d2, d3, d4 = te.var("d1"), te.var("d2"), te.var("d3"), te.var("d4")
axis = te.var("axis")
verify_split((5, 5, 2, 2), 5,
relay.ty.TupleType(tvm.runtime.convert([
relay.ty.TensorType((5, 1, 2, 2), "float32"),
relay.ty.TensorType((5, 1, 2, 2), "float32"),
relay.ty.TensorType((5, 1, 2, 2), "float32"),
relay.ty.TensorType((5, 1, 2, 2), "float32"),
relay.ty.TensorType((5, 1, 2, 2), "float32")])),
axis=1)
verify_split((5, 5, 2, 2), 5,
relay.ty.TupleType(tvm.runtime.convert([
relay.ty.TensorType((1, 5, 2, 2), "float32"),
relay.ty.TensorType((1, 5, 2, 2), "float32"),
relay.ty.TensorType((1, 5, 2, 2), "float32"),
relay.ty.TensorType((1, 5, 2, 2), "float32"),
relay.ty.TensorType((1, 5, 2, 2), "float32")])),
axis=0)
verify_split((d1, d2, d3, d4), 4,
relay.ty.TupleType(tvm.runtime.convert([
relay.ty.TensorType((d1, d2, idxd(d3, 4), d4), "float32"),
relay.ty.TensorType((d1, d2, idxd(d3, 4), d4), "float32"),
relay.ty.TensorType((d1, d2, idxd(d3, 4), d4), "float32"),
relay.ty.TensorType((d1, d2, idxd(d3, 4), d4), "float32")])),
axis=2)
verify_split((d1, d2, d3, d4), 2,
relay.ty.TupleType(tvm.runtime.convert([
relay.ty.TensorType((idxd(d1, 2), d2, d3, d4), "float32"),
relay.ty.TensorType((idxd(d1, 2), d2, d3, d4), "float32")])),
axis=0)
verify_split((d1, d2, d3, d4), (2, 4, 7),
relay.ty.TupleType(tvm.runtime.convert([
relay.ty.TensorType((d1, 2, d3, d4), "float32"),
relay.ty.TensorType((d1, 2, d3, d4), "float32"),
relay.ty.TensorType((d1, 3, d3, d4), "float32"),
relay.ty.TensorType((d1, (d2-7), d3, d4), "float32")])),
axis=1)
def test_full_infer_type():
# default settings: match input dtype
x = relay.var("x", relay.TensorType((), "int8"))
y = relay.full(x, ())
yy = run_infer_type(y)
assert yy.checked_type == relay.TensorType((), "int8")
# change the shape and dtype
x = relay.var("x", relay.TensorType((), "float32"))
y = relay.full(x, (1, 2), "int8")
"shape=" in y.astext()
yy = run_infer_type(y)
assert yy.checked_type == relay.TensorType((1, 2), "int8")
@tvm.testing.uses_gpu
def test_full():
def verify_full(fill_value, src_shape, dtype):
x = relay.var("x", relay.scalar_type(dtype))
z = relay.full(x, src_shape, dtype)
func = relay.Function([x], z)
ref_res = np.full(src_shape, fill_value)
for target, ctx in tvm.testing.enabled_targets():
for kind in ["graph", "debug"]:
intrp = relay.create_executor(kind, ctx=ctx, target=target)
op_res = intrp.evaluate(func)(np.array(fill_value, dtype))
tvm.testing.assert_allclose(op_res.asnumpy(), ref_res, rtol=1e-5)
verify_full(4, (1, 3, 4, 4), "int32")
#verify_full(4, (1, 3, 4, 4), "int64") # This does not pass, python int32 is not upcast to int64, not sure how to fix it.
verify_full(4.0, (1, 4), "float32")
def test_full_like_infer_type():
# concrete shape
base = relay.var("base", relay.TensorType((1, 2, 3), "float32"))
fill = relay.var("fill", relay.TensorType((), "float32"))
y = relay.full_like(base, fill)
yy = run_infer_type(y)
assert yy.checked_type == relay.TensorType((1, 2, 3), "float32")
# symbolic shape
n, c, h, w = te.size_var("n"), 2, 3, te.size_var("w")
base = relay.var("base", relay.TensorType((n, c, h, w), "float32"))
fill = relay.var("fill", relay.TensorType((), "float32"))
y = relay.full_like(base, fill)
yy = run_infer_type(y)
assert yy.checked_type == relay.TensorType((n, c, h, w), "float32")
@tvm.testing.uses_gpu
def test_full_like():
def verify_full_like(base, fill_value, dtype):
x_data = np.random.uniform(low=-1, high=1, size=base).astype(dtype)
x = relay.var("x", relay.TensorType(base, dtype))
y = relay.var("y", relay.scalar_type(dtype))
z = relay.full_like(x, y)
func = relay.Function([x, y], z)
ref_res = np.full_like(x_data, fill_value)
for target, ctx in tvm.testing.enabled_targets():
for kind in ["graph", "debug"]:
intrp = relay.create_executor(kind, ctx=ctx, target=target)
op_res = intrp.evaluate(func)(x_data, np.array(fill_value, dtype))
tvm.testing.assert_allclose(op_res.asnumpy(), ref_res, rtol=1e-5)
verify_full_like((1, 3, 4, 4), 4, "int32")
verify_full_like((1, 1), 44.0, "float32")
@tvm.testing.uses_gpu
def test_infer_type_leaky_relu():
n, c , h, w = te.size_var("n"), te.size_var("c"), te.size_var("h"), te.size_var("w")
x = relay.var("x", relay.TensorType((n, c, h, w), "float32"))
y = relay.nn.leaky_relu(x, alpha=0.1)
"alpha=0.1" in y.astext()
yy = run_infer_type(y)
assert yy.checked_type == relay.TensorType((n, c, h, w), "float32")
shape = (1, 5, 10, 10)
dtype = "float32"
x = relay.var("x", relay.TensorType(shape, dtype))
z = relay.nn.leaky_relu(x, alpha=0.1)
assert "alpha=0.1" in z.astext()
zz = run_infer_type(z)
assert zz.checked_type == relay.TensorType(shape, dtype)
func = relay.Function([x], z)
x_data = np.random.uniform(low=-1, high=1, size=shape).astype(dtype)
ref_res = np.where(x_data > 0, x_data, x_data * 0.1)
for target, ctx in tvm.testing.enabled_targets():
intrp1 = relay.create_executor("graph", ctx=ctx, target=target)
intrp2 = relay.create_executor("debug", ctx=ctx, target=target)
op_res1 = intrp1.evaluate(func)(x_data)
tvm.testing.assert_allclose(op_res1.asnumpy(), ref_res, rtol=1e-5)
op_res2 = intrp2.evaluate(func)(x_data)
tvm.testing.assert_allclose(op_res2.asnumpy(), ref_res, rtol=1e-5)
def verify_infer_type_prelu(data, alpha, axis, output, dtype="float32"):
x = relay.var("data", relay.TensorType(data, dtype))
if alpha:
y = relay.var("alpha", relay.TensorType(alpha, dtype))
else:
y = relay.var("alpha", relay.IncompleteType())
z = relay.nn.prelu(x, y, axis=axis)
zz = run_infer_type(z)
if axis != 1:
assert "axis" in z.astext()
assert zz.checked_type == relay.ty.TensorType(output, dtype)
if not alpha:
axis = axis if axis else 1
alpha_shape = (data[axis],)
assert zz.args[1].checked_type == relay.TensorType(alpha_shape, "float32")
if all(isinstance(v, tvm.tir.Var) == 1 for v in data) or not alpha:
return
func = relay.Function([x, y], z)
x_data = np.random.uniform(low=-1, high=1, size=data).astype(dtype)
a_data = np.random.uniform(low=-1, high=1, size=alpha).astype(dtype)
if axis == 1:
ref_res = (x_data < 0) * (x_data * a_data.reshape(3, 1, 1)) + (x_data>=0) * x_data
else:
ref_res = (x_data < 0) * (x_data * a_data.reshape(1, 1, 3)) + (x_data>=0) * x_data
for target, ctx in tvm.testing.enabled_targets():
intrp1 = relay.create_executor("graph", ctx=ctx, target=target)
intrp2 = relay.create_executor("debug", ctx=ctx, target=target)
op_res1 = intrp1.evaluate(func)(x_data, a_data)
tvm.testing.assert_allclose(op_res1.asnumpy(), ref_res, rtol=1e-5)
op_res2 = intrp2.evaluate(func)(x_data, a_data)
tvm.testing.assert_allclose(op_res2.asnumpy(), ref_res, rtol=1e-5)
@tvm.testing.uses_gpu
def test_infer_type_prelu():
n, c , h, w = te.size_var("n"), te.size_var("c"), te.size_var("h"), te.size_var("w")
verify_infer_type_prelu((n, c, h, w), (c,), 1, (n, c, h, w))
verify_infer_type_prelu((n, h, w, c), (c,), 3, (n, h, w, c))
verify_infer_type_prelu((n, c, h, w), None, 1, (n, c, h, w))
verify_infer_type_prelu((n, h, w, c), None, 3, (n, h, w, c))
verify_infer_type_prelu((1, 3, 2, 2), (3,), 1, (1, 3, 2, 2))
verify_infer_type_prelu((1, 2, 2, 3), (3,), 3, (1, 2, 2, 3))
verify_infer_type_prelu((1, 3, 2, 2), None, 1, (1, 3, 2, 2))
verify_infer_type_prelu((1, 2, 2, 3), None, 3, (1, 2, 2, 3))
@tvm.testing.uses_gpu
def test_arange():
def verify_arange(start, stop, step):
dtype = "float32"
if start is None and step is None:
x = relay.arange(relay.const(stop, dtype=dtype))
ref_res = np.arange(stop).astype(dtype)
elif start is None:
x = relay.arange(relay.const(stop, dtype=dtype), step=relay.const(step, dtype=dtype))
ref_res = np.arange(stop, step=step).astype(dtype)
elif step is None:
x = relay.arange(relay.const(start, dtype=dtype), relay.const(stop, dtype=dtype))
ref_res = np.arange(start, stop).astype(dtype)
else:
x = relay.arange(
relay.const(start, dtype=dtype),
relay.const(stop, dtype=dtype),
relay.const(step, dtype=dtype))
ref_res = np.arange(start, stop, step).astype(dtype)
func = relay.Function([], x)
for target, ctx in tvm.testing.enabled_targets():
for kind in ["graph", "debug"]:
intrp = relay.create_executor(kind, ctx=ctx, target=target)
op_res = intrp.evaluate(func)()
tvm.testing.assert_allclose(op_res.asnumpy(), ref_res, rtol=1e-5)
verify_arange(None, 20, None)
verify_arange(None, 20, 2)
verify_arange(1, 20, None)
verify_arange(1, 20, 2)
# arange doesnt' support floating point right now, see type relation
# verify_arange(1, 20, 1.5)
verify_arange(1, 20.5, None)
verify_arange(1, 20, 3)
verify_arange(20, 1, -1)
# arange doesnt' support floating point right now, see type relation
# verify_arange(20, 1, -1.5)
@tvm.testing.uses_gpu
def test_meshgrid():
def verify_meshgrid(lengths, indexing="ij"):
input_vars = []
input_data = []
for i, length in enumerate(lengths):
input_name = "x_{}".format(i)
if length == 0:
# Scalar
input_vars.append(relay.var(input_name, relay.scalar_type("float32")))
input_data.append(np.array(1, "float32"))
else:
input_vars.append(relay.var(input_name, relay.TensorType((length,), "float32")))
input_data.append(np.arange(length).astype("float32"))
z = relay.meshgrid(input_vars, indexing=indexing).astuple()
func = relay.Function(input_vars, z)
# Get ref
ref_res = np.meshgrid(*input_data, indexing=indexing)
for target, ctx in tvm.testing.enabled_targets():
for kind in ["graph", "debug"]:
intrp = relay.create_executor(kind, ctx=ctx, target=target)
op_res = intrp.evaluate(func)(*input_data)
assert len(op_res) == len(ref_res)
for i in range(len(op_res)):
tvm.testing.assert_allclose(op_res[i].asnumpy(), ref_res[i], rtol=1e-5)
verify_meshgrid([3, 5])
verify_meshgrid([4, 2], indexing="xy")
verify_meshgrid([3, 5, 2])
verify_meshgrid([3, 1, 5], indexing="xy")
# Length 0 signifies scalar.
verify_meshgrid([3, 5, 0])
@tvm.testing.uses_gpu
def test_tile():
def verify_tile(dshape, reps):
x = relay.var("x", relay.TensorType(dshape, "float32"))
z = relay.tile(x, reps=reps)
func = relay.Function([x], z)
x_data = np.random.uniform(low=-1, high=1, size=dshape).astype("float32")
ref_res = np.tile(x_data, reps=reps)
for target, ctx in tvm.testing.enabled_targets():
for kind in ["graph", "debug"]:
intrp = relay.create_executor(kind, ctx=ctx, target=target)
op_res = intrp.evaluate(func)(x_data)
tvm.testing.assert_allclose(op_res.asnumpy(), ref_res, rtol=1e-5)
verify_tile((2, 3, 4), (3, 2, 1))
verify_tile((2, 3, 4), (1, 2))
verify_tile((2, 3), (3, 2, 1))
@tvm.testing.uses_gpu
def test_repeat():
def verify_repeat(dshape, repeats, axis):
x = relay.Var("x", relay.TensorType(dshape, "float32"))
func = relay.Function([x], relay.repeat(x, repeats, axis))
data = np.random.uniform(size=dshape).astype("float32")
ref_res = np.repeat(data, repeats, axis)
for target, ctx in tvm.testing.enabled_targets():
for kind in ["graph", "debug"]:
intrp = relay.create_executor(kind, ctx=ctx, target=target)
op_res = intrp.evaluate(func)(data)
tvm.testing.assert_allclose(op_res.asnumpy(), ref_res, rtol=1e-5)
verify_repeat((3,), 2, 0)
verify_repeat((3, 10), 2, -1)
verify_repeat((3, 2, 4), 3, 1)
@tvm.testing.uses_gpu
def test_stack():
def verify_stack(dshapes, axis):
y = []
for shape in dshapes:
y.append(relay.var("input", relay.TensorType(shape, "float32")))
x = relay.Tuple(y)
z = relay.stack(x, axis=axis)
func = relay.Function(y, z)
x_data = [np.random.normal(size=shape).astype("float32") for shape in dshapes]
ref_res = np.stack(x_data, axis=axis)
for target, ctx in tvm.testing.enabled_targets():
for kind in ["graph", "debug"]:
intrp = relay.create_executor(kind, ctx=ctx, target=target)
op_res = intrp.evaluate(func)(*x_data)
tvm.testing.assert_allclose(op_res.asnumpy(), ref_res, rtol=1e-5)
verify_stack([(2,), (2,), (2,)], -1)
verify_stack([(2,), (2,), (2,)], 0)
verify_stack([(2, 2, 4), (2, 2, 4), (2, 2, 4)], 1)
verify_stack([(2, 2, 3, 4), (2, 2, 3, 4), (2, 2, 3, 4), (2, 2, 3, 4)], -1)
verify_stack([(2, 2, 3, 4), (2, 2, 3, 4), (2, 2, 3, 4), (2, 2, 3, 4)], 4)
@tvm.testing.uses_gpu
def test_reverse():
def verify_reverse(dshape, axis):
x = relay.var("x", relay.TensorType(dshape, "float32"))
z = relay.reverse(x, axis=axis)
zz = run_infer_type(z)
func = relay.Function([x], z)
x_data = np.random.uniform(low=-1, high=1, size=dshape).astype("float32")
ref_res = np.flip(x_data, axis)
for target, ctx in tvm.testing.enabled_targets():
for kind in ["graph", "debug"]:
intrp = relay.create_executor(kind, ctx=ctx, target=target)
op_res = intrp.evaluate(func)(x_data)
tvm.testing.assert_allclose(op_res.asnumpy(), ref_res, rtol=1e-5)
verify_reverse((2, 3, 4), 1)
verify_reverse((4, 7), 0)
verify_reverse((2, 3, 4), -1)
@tvm.testing.uses_gpu
def test_reverse_sequence():
def verify_reverse_sequence(x_data, seq_lengths, batch_axis, seq_axis, ref_res):
seq_lengths_data = np.array(seq_lengths).astype("int32")
x = relay.var("x", relay.TensorType(x_data.shape, str(x_data.dtype)))
z = relay.reverse_sequence(x, relay.const(seq_lengths_data), seq_axis, batch_axis)
zz = run_infer_type(z)
assert zz.checked_type == x.type_annotation
func = relay.Function([x], z)
for target, ctx in tvm.testing.enabled_targets():
for kind in ["graph", "debug"]:
intrp = relay.create_executor(kind, ctx=ctx, target=target)
op_res = intrp.evaluate(func)(x_data)
tvm.testing.assert_allclose(op_res.asnumpy(), ref_res, rtol=1e-5)
indata = np.array(np.arange(0, 16)).reshape([4, 4]).astype("int32")
result = [[0, 5, 10, 15],
[4, 1, 6, 11],
[8, 9, 2, 7],
[12, 13, 14, 3]]
verify_reverse_sequence(indata, [1, 2, 3, 4], 1, 0, np.array(result))
verify_reverse_sequence(indata, [1, 2, 3, 4], -1, 0, np.array(result))
verify_reverse_sequence(indata.astype("float32"), [1, 2, 3, 4], 1, 0, np.array(result).astype("float32"))
indata = np.array(np.arange(0, 16)).reshape([4, 4]).astype("int32")
result = [[0, 1, 2, 3],
[5, 4, 6, 7],
[10, 9, 8, 11],
[15, 14, 13, 12]]
verify_reverse_sequence(indata, [1, 2, 3, 4], 0, 1, np.array(result))
verify_reverse_sequence(indata, [1, 2, 3, 4], 0, -1, np.array(result))
verify_reverse_sequence(indata.astype("float32"), [1, 2, 3, 4], 0, 1, np.array(result).astype("float32"))
indata = np.array(np.arange(0, 16)).reshape([4, 4]).astype("int32")
result = [[0, 1, 2, 3],
[4, 5, 6, 7],
[8, 9, 10, 11],
[15, 14, 13, 12]]
verify_reverse_sequence(indata, [-1, 0, 1, 5], 0, 1, np.array(result))
indata = np.array(np.arange(0, 54)).reshape([2, 3, 3, 3]).astype("int32")
result = [[[[18, 19, 20], [21, 22, 23], [24, 25, 26]],
[[9, 10, 11], [12, 13, 14], [15, 16, 17]],
[[0, 1, 2], [3, 4, 5], [6, 7, 8]]],
[[[45, 46, 47], [48, 49, 50], [51, 52, 53]],
[[36, 37, 38], [39, 40, 41], [42, 43, 44]],
[[27, 28, 29], [30, 31, 32], [33, 34, 35]]]]
verify_reverse_sequence(indata, [3, 3], 0, 1, np.array(result))
indata = np.array(np.arange(0, 54)).reshape([2, 3, 3, 3]).astype("int32")
result = [[[[9, 10, 11], [21, 22, 23], [15, 16, 17]],
[[0, 1, 2], [12, 13, 14], [6, 7, 8]],
[[18, 19, 20], [3, 4, 5], [24, 25, 26]]],
[[[36, 37, 38], [48, 49, 50], [42, 43, 44]],
[[27, 28, 29], [39, 40, 41], [33, 34, 35]],
[[45, 46, 47], [30, 31, 32], [51, 52, 53]]]]
verify_reverse_sequence(indata, [2, 3, 2], 2, 1, np.array(result))
indata = np.array(np.arange(0, 16)).reshape([4, 4]).astype("int32")
result = []
with pytest.raises(Exception) as execinfo:
verify_reverse_sequence(indata, [2, 3, 2, 4, 5], 1, 0, np.array(result))
assert "For reverse_sequnece seq_lengths size should match with dimension of batch axis," \
" but got dimension of batch_axis = 4, and seq_length size = 5" in execinfo.value.args[0]
def test_scatter():
def ref_scatter(data, indices, updates, axis=0):
idx = np.indices(indices.shape).reshape(indices.ndim, -1)
updated_idx = np.copy(idx)
indices = indices.reshape(-1)
for i in range(len(indices)):
updated_idx[axis, i] = indices[i]
scattered = np.copy(data)
scattered[tuple(updated_idx)] = updates[tuple(idx)]
return scattered
def verify_scatter(dshape, ishape, axis=0):
d = relay.var("d", relay.TensorType(dshape, "float32"))
i = relay.var("i", relay.TensorType(ishape, "int64"))
u = relay.var("u", relay.TensorType(ishape, "float32"))
z = relay.op.scatter(d, i, u, axis)
func = relay.Function([d, i, u], z)
data_np = np.random.uniform(size=dshape).astype("float32")
updates_np = np.random.uniform(size=ishape).astype("float32")
indices_np = np.random.randint(-dshape[axis], dshape[axis] - 1, ishape).astype("int64")
ref_res = ref_scatter(data_np, indices_np, updates_np, axis)
# TODO(mbrookhart): expand testing when adding more backend schedules
for target, ctx in [("llvm", tvm.cpu())]:
for kind in ["graph", "debug"]:
intrp = relay.create_executor(kind, ctx=ctx, target=target)
op_res = intrp.evaluate(func)(data_np, indices_np, updates_np)
tvm.testing.assert_allclose(
op_res.asnumpy(), ref_res, rtol=1e-5)
verify_scatter((10, ), (10, ), 0)
verify_scatter((10, 5), (10, 5), -2)
verify_scatter((10, 5), (10, 5), -1)
verify_scatter((10, 5), (3, 5), 0)
verify_scatter((12, 4), (7, 2), 1)
verify_scatter((2, 3, 4), (1, 3, 4), 0)
verify_scatter((2, 3, 4), (2, 1, 4), 1)
verify_scatter((2, 3, 4), (2, 3, 1), 2)
verify_scatter((2, 3, 4, 5), (1, 3, 4, 5), 0)
verify_scatter((6, 3, 4, 5), (2, 3, 4, 5), 1)
verify_scatter((2, 3, 8, 5), (2, 3, 1, 1), 2)
verify_scatter((16, 16, 4, 5), (16, 16, 4, 5), 3)
def test_scatter_add():
def ref_scatter_add(data, indices, updates, axis=0):
output = np.copy(data)
for index in np.ndindex(*indices.shape):
new_index = list(index)
new_index[axis] = indices[index]
output[tuple(new_index)] += updates[index]
return output
def verify_scatter_add(dshape, ishape, axis=0):
d = relay.var("d", relay.TensorType(dshape, "float32"))
i = relay.var("i", relay.TensorType(ishape, "int64"))
u = relay.var("u", relay.TensorType(ishape, "float32"))
z = relay.op.scatter_add(d, i, u, axis)
func = relay.Function([d, i, u], z)
data_np = np.random.uniform(size=dshape).astype("float32")
updates_np = np.random.uniform(size=ishape).astype("float32")
indices_np = np.random.randint(-dshape[axis], dshape[axis] - 1, ishape).astype("int64")
ref_res = ref_scatter_add(data_np, indices_np, updates_np, axis)
# TODO(mbrookhart): expand testing when adding more backend schedules
for target, ctx in [("llvm", tvm.cpu())]:
for kind in ["graph", "debug"]:
intrp = relay.create_executor(kind, ctx=ctx, target=target)
op_res = intrp.evaluate(func)(data_np, indices_np, updates_np)
tvm.testing.assert_allclose(
op_res.asnumpy(), ref_res, rtol=1e-5)
verify_scatter_add((10, ), (10, ), 0)
verify_scatter_add((10, 5), (10, 5), -2)
verify_scatter_add((10, 5), (10, 5), -1)
verify_scatter_add((10, 5), (3, 5), 0)
verify_scatter_add((12, 4), (7, 2), 1)
verify_scatter_add((2, 3, 4), (1, 3, 4), 0)
verify_scatter_add((2, 3, 4), (2, 1, 4), 1)
verify_scatter_add((2, 3, 4), (2, 3, 1), 2)
verify_scatter_add((2, 3, 4, 5), (1, 3, 4, 5), 0)
verify_scatter_add((6, 3, 4, 5), (2, 3, 4, 5), 1)
verify_scatter_add((2, 3, 8, 5), (2, 3, 1, 1), 2)
verify_scatter_add((16, 16, 4, 5), (16, 16, 4, 5), 3)
@tvm.testing.uses_gpu
def test_gather():
def verify_gather(data, axis, indices, ref_res):
data = np.asarray(data, dtype='float32')
indices = np.asarray(indices, dtype='int32')
ref_res = np.asarray(ref_res)
d = relay.var("x", relay.TensorType(data.shape, "float32"))
i = relay.var("y", relay.TensorType(indices.shape, "int32"))
z = relay.gather(d, axis, i)
func = relay.Function([d, i], z)
for target, ctx in tvm.testing.enabled_targets():
for kind in ["graph", "debug"]:
intrp = relay.create_executor(kind, ctx=ctx, target=target)
op_res = intrp.evaluate(func)(data, indices)
tvm.testing.assert_allclose(op_res.asnumpy(), ref_res,
rtol=1e-5)
verify_gather([[1, 2], [3, 4]],
1,
[[0, 0], [1, 0]],
[[1, 1], [4, 3]])
verify_gather([[[0, 1, 2], [3, 4, 5]], [[6, 7, 8], [9, 10, 11]]],
0,
[[[1, 0, 1], [1, 1, 0]]],
[[[6, 1, 8], [9, 10, 5]]])
verify_gather([[[-0.2321, -0.2024, -1.7624], [-0.3829, -0.4246, 0.2448],
[0.1822, 0.2360, -0.8965], [0.4497, -0.2224, 0.6103]],
[[0.0408, -0.7667, -0.4303], [-0.3216, 0.7489, -0.1502],
[0.0144, -0.4699, -0.0064], [-0.0768, -1.6064, 1.3390]]],
1,
[[[2, 2, 0], [1, 0, 3]], [[3, 2, 0], [1, 0, 0]]],
[[[0.1822, 0.2360, -1.7624], [-0.3829, -0.2024, 0.6103]],
[[-0.0768, -0.4699, -0.4303], [-0.3216, -0.7667, -0.4303]]])
verify_gather([[[0.3050, 1.6986, 1.1034], [0.7020, -0.6960, -2.1818],
[0.3116, -0.5773, -0.9912], [0.0835, -1.3915, -1.0720]],
[[0.1694, -0.6091, -0.6539], [-0.5234, -0.1218, 0.5084],
[0.2374, -1.9537, -2.0078], [-0.5700, -1.0302, 0.1558]]],
2,
[[[1, 1, 0, 1], [0, 0, 2, 2], [1, 2, 1, 2], [2, 2, 1, 0]],
[[0, 0, 1, 2], [2, 2, 1, 0], [1, 2, 0, 0], [0, 2, 0, 2]]],
[[[1.6986, 1.6986, 0.3050, 1.6986],
[0.7020, 0.7020, -2.1818, -2.1818],
[-0.5773, -0.9912, -0.5773, -0.9912],
[-1.0720, -1.0720, -1.3915, 0.0835]],
[[0.1694, 0.1694, -0.6091, -0.6539],
[0.5084, 0.5084, -0.1218, -0.5234],
[-1.9537, -2.0078, 0.2374, 0.2374],
[-0.5700, 0.1558, -0.5700, 0.1558]]])
@tvm.testing.uses_gpu
def test_gather_nd():
def verify_gather_nd(xshape, yshape, y_data):
x = relay.var("x", relay.TensorType(xshape, "float32"))
y = relay.var("y", relay.TensorType(yshape, "int32"))
z = relay.gather_nd(x, y)
func = relay.Function([x, y], z)
x_data = np.random.uniform(size=xshape).astype("float32")
ref_res = x_data[tuple(y_data)]
for target, ctx in tvm.testing.enabled_targets():
for kind in ["graph", "debug"]:
intrp = relay.create_executor(kind, ctx=ctx, target=target)
op_res = intrp.evaluate(func)(x_data, y_data)
tvm.testing.assert_allclose(op_res.asnumpy(), ref_res, rtol=1e-5)
verify_gather_nd((2, 2), (2, 3), [[1, 1, 0], [0, 1, 0]])
verify_gather_nd((2, 2, 2), (2, 2), [[0, 1], [1, 0]])
verify_gather_nd((3, 2, 2), (2, 2), [[0, 1], [1, 0]])
verify_gather_nd((3, 2), (2, 2, 3), [[[0, 1, 2], [2, 0, 1]], [[0, 0, 0], [1, 1, 1]]])
def _verify_infiniteness_ops(relay_op, ref_op):
for dtype in ['float32', 'float16', 'float16', 'int32', 'int16']:
shape = (2, 8, 8)
x = relay.var("x", relay.TensorType(shape, dtype))
y = relay_op(x)
yy = run_infer_type(y)
assert yy.checked_type == relay.TensorType(shape, "bool")
data = np.random.uniform(size=shape).astype(dtype)
if dtype.startswith('float'):
data.ravel()[np.random.choice(data.size, int(data.size * 0.5), replace=False)] = np.infty
data.ravel()[np.random.choice(data.size, int(data.size * 0.5), replace=False)] = np.nan
intrp = create_executor()
op_res = intrp.evaluate(y, {x: data})
ref_res = ref_op(data)
np.testing.assert_allclose(op_res.asnumpy(), ref_res, rtol=0.01)
def test_isfinite():
_verify_infiniteness_ops(relay.isfinite, np.isfinite)
def test_isinf():
_verify_infiniteness_ops(relay.isinf, np.isinf)
@tvm.testing.uses_gpu
def test_unravel_index():
def verify_unravel_index(indices, shape, dtype):
x_data = np.array(indices).astype(dtype)
y_data = np.array(shape).astype(dtype)
x = relay.var("x", relay.TensorType(x_data.shape, dtype))
y = relay.var("y", relay.TensorType(y_data.shape, dtype))
z = relay.unravel_index(x, y)
zz = run_infer_type(z)
if len(x_data.shape) == 1:
out_shape = [y_data.shape[0], x_data.shape[0]]
else:
out_shape = [y_data.shape[0]]
assert zz.checked_type == relay.ty.TensorType(out_shape, dtype)
func = relay.Function([x, y], z)
ref_res = np.unravel_index(x_data, y_data)
for target, ctx in tvm.testing.enabled_targets():
for kind in ["graph", "debug"]:
intrp = relay.create_executor(kind, ctx=ctx, target=target)
op_res = intrp.evaluate(func)(x_data, y_data)
tvm.testing.assert_allclose(op_res.asnumpy(), ref_res, rtol=1e-5)
for dtype in ["int64", "int32"]:
verify_unravel_index([0, 1, 2, 3], [2, 2], dtype)
verify_unravel_index([144], [5, 5, 5, 2], dtype)
verify_unravel_index(144, [5, 5, 5, 2], dtype)
verify_unravel_index([100, 13, 5], [5, 5, 5, 2], dtype)
# In below example, 5 is out of bound for array of size 4.
# Numpy implementation throws error for it
# TVM implementation does not throw error instead it produces
# output which is inline with Tensorflow
# verify_unravel_index([0, 1, 2, 5], [2, 2], dtype)
@tvm.testing.uses_gpu
def test_sparse_to_dense():
def verify_sparse_to_dense(sparse_indices, sparse_values, default_value, output_shape, xpected):
sparse_indices_data = np.array(sparse_indices)
sparse_values_data = np.array(sparse_values)
default_value_data = np.array(default_value)
a = relay.var("a", relay.TensorType(sparse_indices_data.shape, str(sparse_indices_data.dtype)))
b = relay.var("b", relay.TensorType(sparse_values_data.shape, str(sparse_values_data.dtype)))
if default_value is None:
args = [a, b]
d = relay.sparse_to_dense(a, output_shape, b)
else:
c = relay.var("c", relay.TensorType(default_value_data.shape, str(default_value_data.dtype)))
args = [a, b, c]
d = relay.sparse_to_dense(a, output_shape, b, c)
zz = run_infer_type(d)
assert zz.checked_type == relay.ty.TensorType(output_shape, str(sparse_values_data.dtype))
func = relay.Function(args, d)
for target, ctx in tvm.testing.enabled_targets():
for kind in ["graph", "debug"]:
intrp = relay.create_executor(kind, ctx=ctx, target=target)
if default_value is None:
op_res = intrp.evaluate(func)(sparse_indices_data, sparse_values_data)
else:
op_res = intrp.evaluate(func)(
sparse_indices_data, sparse_values_data, default_value_data
)
tvm.testing.assert_allclose(op_res.asnumpy(), xpected, rtol=1e-5)
verify_sparse_to_dense(1, 3, 0, [5], [0, 3, 0, 0, 0]) # scalar
verify_sparse_to_dense([0, 1, 4], [3, 3, 3], 0, [5], [3, 3, 0, 0, 3]) # vector
verify_sparse_to_dense([[0, 0], [1, 2]], [1, 2], 0, [3, 4], [[1, 0, 0, 0], [0, 0, 2, 0], [0, 0, 0, 0]]) # nXd
verify_sparse_to_dense(
[[0, 0, 0], [1, 2, 3]],
[1, 2],
4,
[2, 3, 4],
[[[1, 4, 4, 4], [4, 4, 4, 4], [4, 4, 4, 4]], [[4, 4, 4, 4], [4, 4, 4, 4], [4, 4, 4, 2]]]
) # nXd
verify_sparse_to_dense([0, 1, 4], [3.1, 3.1, 3.1], 3.5, [5], [3.1, 3.1, 3.5, 3.5, 3.1]) # floats
verify_sparse_to_dense(1, 3, None, [5], [0, 3, 0, 0, 0]) # default value not specified
#negative test cases
#sparse indices should be ints
#verify_sparse_to_dense([[0.1, 1.1, 4.1], [0,2,4]], [3.1, 3.1, 3.1], 3.5, [5], [3.1, 3.1, 3.5, 3.5, 3.1])
#sparse_values should be 0d or 1d only
#verify_sparse_to_dense([[0, 1, 4], [0, 2, 4]], [[[3.1, 3.1, 3.1]]], 3.5, [5], [3.1, 3.1, 3.5, 3.5, 3.1])
#sparse_indices should not be > 2d tensor
#verify_sparse_to_dense([[[[0, 1, 4], [0, 2, 4]]]], [[[[3.1, 3.1, 3.1]]]], 3.5, [5], [3.1, 3.1, 3.5, 3.5, 3.1])
if __name__ == "__main__":
test_cast()
test_zeros_ones()
test_unary_identity()
test_clip()
test_transpose_infer_type()
test_transpose()
test_reshape_infer_type()
test_reshape()
test_reshape_fail()
test_reshape_like_infer_type()
test_reshape_like()
test_take_infer_type()
test_take()
test_full_infer_type()
test_full()
test_full_like_infer_type()
test_full_like()
test_infer_type_leaky_relu()
test_infer_type_prelu()
test_squeeze()
test_squeeze_infer_type()
test_squeeze_bad_axes_infer_type()
test_split_infer_type()
test_arange()
test_meshgrid()
test_reverse()
test_stack()
test_tile()
test_repeat()
test_gather_nd()
test_isfinite()
test_isinf()
test_unravel_index()
test_sparse_to_dense()
test_fixed_point_multiply()
|
"""In this module we provide services for working with fit files.
Resources
- fitparse package: [GitHub](https://github.com/dtcooper/python-fitparse) and \
[Docs](http://dtcooper.github.io/python-fitparse/)
- fitdecode pacakge: [GitHub](https://github.com/polyvertex/fitdecode) and \
[Read the Docs](https://fitdecode.readthedocs.io/en/latest/)
- [FIT on Wikipedia](https://wiki.openstreetmap.org/wiki/FIT)
- [Download FIT SDK](https://www.thisisant.com/resources/fit).
"""
from typing import Union
import fitparse
import pandas as pd
UNIT_CONVERSION = {
"speed": {"from": "10*6m/s", "to": "km/h", "factor": 0.0036,},
"enhanced_speed": {"from": "10*6m/s", "to": "km/h", "factor": 3.6,},
"altitude": {"from": "unknown", "to": "m", "factor": 0.03855343881175331,},
"position_long": {"from": "semicircles", "to": "degrees", "factor": (180.0 / 2 ** 31),},
"position_lat": {"from": "semicircles", "to": "degrees", "factor": (180.0 / 2 ** 31),},
}
def parse_fit_file(file: Union[fitparse.base.FitFile, bytes, str,]) -> pd.DataFrame:
"""Converts a fit_file to a dataframe
Args:
file (Union[fitparse.base.FitFile, bytes, str]): The fit file to parse
Raises:
ValueError: If the file is not in a supported format
Returns:
pd.DataFrame: A DataFrame with the data
"""
if isinstance(file, (bytes, str,),):
fit_file = fitparse.FitFile(file)
elif isinstance(file, fitparse.base.FitFile,):
fit_file = file
else:
raise ValueError(f"{type(file)} is not supported!")
return _parse_records(fit_file.get_messages("record"))
def _parse_records(records,):
data = [record.get_values() for record in records]
training_data = pd.DataFrame(data)
_convert_units(training_data)
return training_data
def _convert_units(training_data_row: pd.DataFrame,):
columns = set(UNIT_CONVERSION.keys()).intersection(set(training_data_row.columns))
for column in columns:
training_data_row[column] *= UNIT_CONVERSION[column]["factor"]
|
import io
import unittest
from contextlib import redirect_stdout
from unittest.mock import patch
class TestQ(unittest.TestCase):
@patch('builtins.input', side_effect=[
'3',
'1 0',
'2 $',
'3 1',
])
def test_case_0(self, input_mock=None):
text_trap = io.StringIO()
with redirect_stdout(text_trap):
import solution
self.assertEqual(text_trap.getvalue(),
"Error Code: integer division or modulo by zero\n"
+ "Error Code: invalid literal for int() with base 10: '$'\n"
+ "3\n")
if __name__ == '__main__':
unittest.main()
|
import FWCore.ParameterSet.Config as cms
regressionModifier106XUL = cms.PSet(
modifierName = cms.string('EGRegressionModifierV3'),
rhoTag = cms.InputTag('fixedGridRhoFastjetAllTmp'),
useClosestToCentreSeedCrysDef = cms.bool(False),
maxRawEnergyForLowPtEBSigma = cms.double(-1),
maxRawEnergyForLowPtEESigma = cms.double(1200.),
eleRegs = cms.PSet(
ecalOnlyMean = cms.PSet(
rangeMinLowEt = cms.double(0.2),
rangeMaxLowEt = cms.double(2.0),
rangeMinHighEt = cms.double(-1.),
rangeMaxHighEt = cms.double(3.0),
forceHighEnergyTrainingIfSaturated = cms.bool(True),
lowEtHighEtBoundary = cms.double(999999.),
ebLowEtForestName = cms.string("electron_eb_ecalOnly_1To300_0p2To2_mean"),
ebHighEtForestName = cms.string("electron_eb_ECALonly"),
eeLowEtForestName = cms.string("electron_ee_ecalOnly_1To300_0p2To2_mean"),
eeHighEtForestName = cms.string("electron_ee_ECALonly"),
),
ecalOnlySigma = cms.PSet(
rangeMinLowEt = cms.double(0.0002),
rangeMaxLowEt = cms.double(0.5),
rangeMinHighEt = cms.double(0.0002),
rangeMaxHighEt = cms.double(0.5),
forceHighEnergyTrainingIfSaturated = cms.bool(True),
lowEtHighEtBoundary = cms.double(999999.),
ebLowEtForestName = cms.string("electron_eb_ecalOnly_1To300_0p0002To0p5_sigma"),
ebHighEtForestName = cms.string("electron_eb_ECALonly_var"),
eeLowEtForestName = cms.string("electron_ee_ecalOnly_1To300_0p0002To0p5_sigma"),
eeHighEtForestName = cms.string("electron_ee_ECALonly_var"),
),
epComb = cms.PSet(
ecalTrkRegressionConfig = cms.PSet(
rangeMinLowEt = cms.double(0.2),
rangeMaxLowEt = cms.double(2.0),
rangeMinHighEt = cms.double(0.2),
rangeMaxHighEt = cms.double(2.0),
lowEtHighEtBoundary = cms.double(999999.),
forceHighEnergyTrainingIfSaturated = cms.bool(False),
ebLowEtForestName = cms.string('electron_eb_ecalTrk_1To300_0p2To2_mean'),
ebHighEtForestName = cms.string('electron_eb_ecalTrk_1To300_0p2To2_mean'),
eeLowEtForestName = cms.string('electron_ee_ecalTrk_1To300_0p2To2_mean'),
eeHighEtForestName = cms.string('electron_ee_ecalTrk_1To300_0p2To2_mean'),
),
ecalTrkRegressionUncertConfig = cms.PSet(
rangeMinLowEt = cms.double(0.0002),
rangeMaxLowEt = cms.double(0.5),
rangeMinHighEt = cms.double(0.0002),
rangeMaxHighEt = cms.double(0.5),
lowEtHighEtBoundary = cms.double(999999.),
forceHighEnergyTrainingIfSaturated = cms.bool(False),
ebLowEtForestName = cms.string('electron_eb_ecalTrk_1To300_0p0002To0p5_sigma'),
ebHighEtForestName = cms.string('electron_eb_ecalTrk_1To300_0p0002To0p5_sigma'),
eeLowEtForestName = cms.string('electron_ee_ecalTrk_1To300_0p0002To0p5_sigma'),
eeHighEtForestName = cms.string('electron_ee_ecalTrk_1To300_0p0002To0p5_sigma'),
),
maxEcalEnergyForComb=cms.double(200.),
minEOverPForComb=cms.double(0.025),
maxEPDiffInSigmaForComb=cms.double(15.),
maxRelTrkMomErrForComb=cms.double(10.),
)
),
phoRegs = cms.PSet(
ecalOnlyMean = cms.PSet(
rangeMinLowEt = cms.double(0.2),
rangeMaxLowEt = cms.double(2.0),
rangeMinHighEt = cms.double(-1.),
rangeMaxHighEt = cms.double(3.0),
forceHighEnergyTrainingIfSaturated = cms.bool(True),
lowEtHighEtBoundary = cms.double(999999.),
ebLowEtForestName = cms.string("photon_eb_ecalOnly_5To300_0p2To2_mean"),
ebHighEtForestName = cms.string("photon_eb_ECALonly"),
eeLowEtForestName = cms.string("photon_ee_ecalOnly_5To300_0p2To2_mean"),
eeHighEtForestName = cms.string("photon_ee_ECALonly"),
),
ecalOnlySigma = cms.PSet(
rangeMinLowEt = cms.double(0.0002),
rangeMaxLowEt = cms.double(0.5),
rangeMinHighEt = cms.double(0.0002),
rangeMaxHighEt = cms.double(0.5),
forceHighEnergyTrainingIfSaturated = cms.bool(True),
lowEtHighEtBoundary = cms.double(999999.),
ebLowEtForestName = cms.string("photon_eb_ecalOnly_5To300_0p0002To0p5_sigma"),
ebHighEtForestName = cms.string("photon_eb_ECALonly_var"),
eeLowEtForestName = cms.string("photon_ee_ecalOnly_5To300_0p0002To0p5_sigma"),
eeHighEtForestName = cms.string("photon_ee_ECALonly_var"),
),
)
)
regressionModifier103XLowPtPho = cms.PSet(
modifierName = cms.string('EGRegressionModifierV3'),
rhoTag = cms.InputTag('fixedGridRhoFastjetAllTmp'),
useClosestToCentreSeedCrysDef = cms.bool(False),
maxRawEnergyForLowPtEBSigma = cms.double(-1),
maxRawEnergyForLowPtEESigma = cms.double(1200.),
eleRegs = cms.PSet(
ecalOnlyMean = cms.PSet(
rangeMinLowEt = cms.double(0.2),
rangeMaxLowEt = cms.double(2.0),
rangeMinHighEt = cms.double(-1.),
rangeMaxHighEt = cms.double(3.0),
forceHighEnergyTrainingIfSaturated = cms.bool(True),
lowEtHighEtBoundary = cms.double(999999.),
ebLowEtForestName = cms.string("electron_eb_ecalOnly_1To20_0p2To2_mean"),
ebHighEtForestName = cms.string("electron_eb_ECALonly"),
eeLowEtForestName = cms.string("electron_ee_ecalOnly_1To20_0p2To2_mean"),
eeHighEtForestName = cms.string("electron_ee_ECALonly"),
),
ecalOnlySigma = cms.PSet(
rangeMinLowEt = cms.double(0.0002),
rangeMaxLowEt = cms.double(0.5),
rangeMinHighEt = cms.double(0.0002),
rangeMaxHighEt = cms.double(0.5),
forceHighEnergyTrainingIfSaturated = cms.bool(True),
lowEtHighEtBoundary = cms.double(999999.),
ebLowEtForestName = cms.string("electron_eb_ecalOnly_1To20_0p0002To0p5_sigma"),
ebHighEtForestName = cms.string("electron_eb_ECALonly_var"),
eeLowEtForestName = cms.string("electron_ee_ecalOnly_1To20_0p0002To0p5_sigma"),
eeHighEtForestName = cms.string("electron_ee_ECALonly_var"),
),
epComb = cms.PSet(
ecalTrkRegressionConfig = cms.PSet(
rangeMinLowEt = cms.double(0.2),
rangeMaxLowEt = cms.double(2.0),
rangeMinHighEt = cms.double(0.2),
rangeMaxHighEt = cms.double(2.0),
lowEtHighEtBoundary = cms.double(999999.),
forceHighEnergyTrainingIfSaturated = cms.bool(False),
ebLowEtForestName = cms.string('electron_eb_ecalTrk_1To20_0p2To2_mean'),
ebHighEtForestName = cms.string('electron_eb_ecalTrk_1To20_0p2To2_mean'),
eeLowEtForestName = cms.string('electron_ee_ecalTrk_1To20_0p2To2_mean'),
eeHighEtForestName = cms.string('electron_ee_ecalTrk_1To20_0p2To2_mean'),
),
ecalTrkRegressionUncertConfig = cms.PSet(
rangeMinLowEt = cms.double(0.0002),
rangeMaxLowEt = cms.double(0.5),
rangeMinHighEt = cms.double(0.0002),
rangeMaxHighEt = cms.double(0.5),
lowEtHighEtBoundary = cms.double(999999.),
forceHighEnergyTrainingIfSaturated = cms.bool(False),
ebLowEtForestName = cms.string('electron_eb_ecalTrk_1To20_0p0002To0p5_sigma'),
ebHighEtForestName = cms.string('electron_eb_ecalTrk_1To20_0p0002To0p5_sigma'),
eeLowEtForestName = cms.string('electron_ee_ecalTrk_1To20_0p0002To0p5_sigma'),
eeHighEtForestName = cms.string('electron_ee_ecalTrk_1To20_0p0002To0p5_sigma'),
),
maxEcalEnergyForComb=cms.double(200.),
minEOverPForComb=cms.double(0.025),
maxEPDiffInSigmaForComb=cms.double(15.),
maxRelTrkMomErrForComb=cms.double(10.),
)
),
phoRegs = cms.PSet(
ecalOnlyMean = cms.PSet(
rangeMinLowEt = cms.double(0.2),
rangeMaxLowEt = cms.double(2.0),
rangeMinHighEt = cms.double(-1.),
rangeMaxHighEt = cms.double(3.0),
forceHighEnergyTrainingIfSaturated = cms.bool(True),
lowEtHighEtBoundary = cms.double(999999.),
ebLowEtForestName = cms.string("photon_eb_ecalOnly_1To20_0p2To2_mean"),
ebHighEtForestName = cms.string("photon_eb_ECALonly"),
eeLowEtForestName = cms.string("photon_ee_ecalOnly_1To20_0p2To2_mean"),
eeHighEtForestName = cms.string("photon_ee_ECALonly"),
),
ecalOnlySigma = cms.PSet(
rangeMinLowEt = cms.double(0.0002),
rangeMaxLowEt = cms.double(0.5),
rangeMinHighEt = cms.double(0.0002),
rangeMaxHighEt = cms.double(0.5),
forceHighEnergyTrainingIfSaturated = cms.bool(True),
lowEtHighEtBoundary = cms.double(999999.),
ebLowEtForestName = cms.string("photon_eb_ecalOnly_1To20_0p0002To0p5_sigma"),
ebHighEtForestName = cms.string("photon_eb_ECALonly_var"),
eeLowEtForestName = cms.string("photon_ee_ecalOnly_1To20_0p0002To0p5_sigma"),
eeHighEtForestName = cms.string("photon_ee_ECALonly_var"),
),
)
)
regressionModifier94X = \
cms.PSet( modifierName = cms.string('EGRegressionModifierV2'),
rhoCollection = cms.InputTag('fixedGridRhoFastjetAllTmp'),
electron_config = cms.PSet( # EB, EE
regressionKey = cms.vstring('electron_eb_ECALonly_lowpt', 'electron_eb_ECALonly', 'electron_ee_ECALonly_lowpt', 'electron_ee_ECALonly',
'electron_eb_ECALTRK_lowpt', 'electron_eb_ECALTRK', 'electron_ee_ECALTRK_lowpt', 'electron_ee_ECALTRK'),
uncertaintyKey = cms.vstring('electron_eb_ECALonly_lowpt_var', 'electron_eb_ECALonly_var', 'electron_ee_ECALonly_lowpt_var', 'electron_ee_ECALonly_var',
'electron_eb_ECALTRK_lowpt_var', 'electron_eb_ECALTRK_var', 'electron_ee_ECALTRK_lowpt_var', 'electron_ee_ECALTRK_var'),
),
photon_config = cms.PSet( # EB, EE
regressionKey = cms.vstring('photon_eb_ECALonly_lowpt', 'photon_eb_ECALonly', 'photon_ee_ECALonly_lowpt', 'photon_ee_ECALonly'),
uncertaintyKey = cms.vstring('photon_eb_ECALonly_lowpt_var', 'photon_eb_ECALonly_var', 'photon_ee_ECALonly_lowpt_var', 'photon_ee_ECALonly_var'),
),
lowEnergy_ECALonlyThr = cms.double(99999.),
lowEnergy_ECALTRKThr = cms.double(50.),
highEnergy_ECALTRKThr = cms.double(200.),
eOverP_ECALTRKThr = cms.double(0.025),
epDiffSig_ECALTRKThr = cms.double(15.),
epSig_ECALTRKThr = cms.double(10.),
forceHighEnergyEcalTrainingIfSaturated = cms.bool(True)
)
regressionModifier80X = \
cms.PSet( modifierName = cms.string('EGRegressionModifierV1'),
autoDetectBunchSpacing = cms.bool(True),
applyExtraHighEnergyProtection = cms.bool(True),
bunchSpacingTag = cms.InputTag("bunchSpacingProducer"),
manualBunchSpacing = cms.int32(50),
rhoCollection = cms.InputTag("fixedGridRhoFastjetAll"),
vertexCollection = cms.InputTag("offlinePrimaryVertices"),
electron_config = cms.PSet( # EB, EE
regressionKey_25ns = cms.vstring('gedelectron_EBCorrection_25ns', 'gedelectron_EECorrection_25ns'),
uncertaintyKey_25ns = cms.vstring('gedelectron_EBUncertainty_25ns', 'gedelectron_EEUncertainty_25ns'),
combinationKey_25ns = cms.string('gedelectron_p4combination_25ns'),
regressionKey_50ns = cms.vstring('gedelectron_EBCorrection_50ns', 'gedelectron_EECorrection_50ns'),
uncertaintyKey_50ns = cms.vstring('gedelectron_EBUncertainty_50ns', 'gedelectron_EEUncertainty_50ns'),
combinationKey_50ns = cms.string('gedelectron_p4combination_50ns'),
),
photon_config = cms.PSet( # EB, EE
regressionKey_25ns = cms.vstring('gedphoton_EBCorrection_25ns', 'gedphoton_EECorrection_25ns'),
uncertaintyKey_25ns = cms.vstring('gedphoton_EBUncertainty_25ns', 'gedphoton_EEUncertainty_25ns'),
regressionKey_50ns = cms.vstring('gedphoton_EBCorrection_50ns', 'gedphoton_EECorrection_50ns'),
uncertaintyKey_50ns = cms.vstring('gedphoton_EBUncertainty_50ns', 'gedphoton_EEUncertainty_50ns'),
)
)
#by default we use the regression inappropriate to the main purpose of this release
#life is simplier that way
regressionModifier = regressionModifier94X.clone()
from Configuration.Eras.Modifier_run2_egamma_2016_cff import run2_egamma_2016
from Configuration.Eras.Modifier_run2_egamma_2017_cff import run2_egamma_2017
from Configuration.Eras.Modifier_run2_egamma_2018_cff import run2_egamma_2018
(run2_egamma_2016 | run2_egamma_2017 | run2_egamma_2018).toReplaceWith(regressionModifier,regressionModifier106XUL)
from Configuration.ProcessModifiers.egamma_lowPt_exclusive_cff import egamma_lowPt_exclusive
egamma_lowPt_exclusive.toReplaceWith(regressionModifier,regressionModifier103XLowPtPho)
|
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for SparseSoftmaxCrossEntropyWithLogits op."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import sys
import time
import numpy as np
from tensorflow.core.protobuf import config_pb2
from tensorflow.python.client import session
from tensorflow.python.eager import backprop as backprop_lib
from tensorflow.python.eager import context
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import errors_impl
from tensorflow.python.framework import ops as ops_lib
from tensorflow.python.framework import test_util
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import gen_nn_ops
from tensorflow.python.ops import gradient_checker_v2
from tensorflow.python.ops import gradients_impl
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import nn_ops
from tensorflow.python.ops import sparse_ops
import tensorflow.python.ops.nn_grad # pylint: disable=unused-import
from tensorflow.python.platform import app
from tensorflow.python.platform import test
class SparseXentTest(test.TestCase):
def _npXent(self, features, labels):
features = np.reshape(features, [-1, features.shape[-1]])
labels = np.reshape(labels, [-1])
batch_dim = 0
class_dim = 1
batch_size = features.shape[batch_dim]
e = np.exp(features - np.reshape(
np.amax(
features, axis=class_dim), [batch_size, 1]))
probs = e / np.reshape(np.sum(e, axis=class_dim), [batch_size, 1])
labels_mat = np.zeros_like(probs).astype(probs.dtype)
labels_mat[np.arange(batch_size), labels] = 1.0
bp = (probs - labels_mat)
l = -np.sum(labels_mat * np.log(probs + 1.0e-20), axis=1)
return l, bp
def _testXent(self, np_features, np_labels):
np_loss, np_backprop = self._npXent(np_features, np_labels)
with self.cached_session():
loss, backprop = gen_nn_ops.sparse_softmax_cross_entropy_with_logits(
np_features, np_labels)
tf_loss, tf_backprop = self.evaluate([loss, backprop])
self.assertAllCloseAccordingToType(np_loss, tf_loss)
self.assertAllCloseAccordingToType(np_backprop, tf_backprop)
def testSingleClass(self):
for label_dtype in np.int32, np.int64:
with self.cached_session():
loss, backprop = gen_nn_ops.sparse_softmax_cross_entropy_with_logits(
np.array([[1.], [-1.], [0.]]).astype(np.float32),
np.array([0, 0, 0]).astype(label_dtype))
tf_loss, tf_backprop = self.evaluate([loss, backprop])
self.assertAllClose([0.0, 0.0, 0.0], tf_loss)
self.assertAllClose([[0.0], [0.0], [0.0]], tf_backprop)
@test_util.run_gpu_only()
def testInvalidLabelGPU(self):
features = [[1., 1., 1., 1.], [1., 1., 1., 1.], [1., 2., 3., 4.],
[1., 2., 3., 4.]]
labels = [4, 3, 0, -1]
loss, backprop = self.evaluate(
gen_nn_ops.sparse_softmax_cross_entropy_with_logits(features, labels))
self.assertAllClose([[np.nan] * 4, [0.25, 0.25, 0.25, -0.75],
[-0.968, 0.087, 0.237, 0.6439], [np.nan] * 4],
backprop,
rtol=1e-3,
atol=1e-3)
self.assertAllClose([np.nan, 1.3862, 3.4420, np.nan],
loss,
rtol=1e-3,
atol=1e-3)
@test_util.run_in_graph_and_eager_modes(use_gpu=False)
@test_util.disable_xla("XLA cannot assert inside of a kernel.")
def testInvalidLabelCPU(self):
features = [[1., 1., 1., 1.], [1., 1., 1., 1.], [1., 2., 3., 4.],
[1., 2., 3., 4.]]
labels = [4, 3, 0, -1]
with self.assertRaisesRegex(
(errors_impl.InvalidArgumentError, errors_impl.UnknownError),
"Received a label value of"):
self.evaluate(
gen_nn_ops.sparse_softmax_cross_entropy_with_logits(features, labels))
def testNpXent(self):
# We create 2 batches of logits for testing.
# batch 0 is the boring uniform distribution: 1, 1, 1, 1, with target 3.
# batch 1 has a bit of difference: 1, 2, 3, 4, with target 0.
features = [[1., 1., 1., 1.], [1., 2., 3., 4.]]
labels = [3, 0]
# For batch 0, we expect the uniform distribution: 0.25, 0.25, 0.25, 0.25
# With a hard target 3, the backprop is [0.25, 0.25, 0.25, -0.75]
# The loss for this batch is -log(0.25) = 1.386
#
# For batch 1, we have:
# exp(0) = 1
# exp(1) = 2.718
# exp(2) = 7.389
# exp(3) = 20.085
# SUM = 31.192
# So we have as probabilities:
# exp(0) / SUM = 0.032
# exp(1) / SUM = 0.087
# exp(2) / SUM = 0.237
# exp(3) / SUM = 0.644
# With a hard 1, the backprop is [0.032 - 1.0 = -0.968, 0.087, 0.237, 0.644]
# The loss for this batch is [1.0 * -log(0.25), 1.0 * -log(0.032)]
# = [1.3862, 3.4420]
np_loss, np_backprop = self._npXent(np.array(features), np.array(labels))
self.assertAllClose(
np.array([[0.25, 0.25, 0.25, -0.75], [-0.968, 0.087, 0.237, 0.6439]]),
np_backprop,
rtol=1.e-3,
atol=1.e-3)
self.assertAllClose(
np.array([1.3862, 3.4420]), np_loss, rtol=1.e-3, atol=1.e-3)
def testShapeMismatch(self):
with self.session():
with self.assertRaisesRegex(ValueError, ".*Rank mismatch:*"):
nn_ops.sparse_softmax_cross_entropy_with_logits(
labels=[[0, 2]], logits=[[0., 1.], [2., 3.], [2., 3.]])
def testScalar(self):
with self.session():
with self.assertRaisesRegex(ValueError, ".*Logits cannot be scalars*"):
nn_ops.sparse_softmax_cross_entropy_with_logits(
labels=constant_op.constant(0), logits=constant_op.constant(1.0))
def testLabelsPlaceholderScalar(self):
with ops_lib.Graph().as_default(), self.session():
labels = array_ops.placeholder(np.int32)
y = nn_ops.sparse_softmax_cross_entropy_with_logits(
labels=labels, logits=[[7.]])
with self.assertRaisesOpError("labels must be 1-D"):
y.eval(feed_dict={labels: 0})
def testVector(self):
with self.session():
loss = nn_ops.sparse_softmax_cross_entropy_with_logits(
labels=constant_op.constant(0), logits=constant_op.constant([1.0]))
self.assertAllClose(0.0, self.evaluate(loss))
def testFloat(self):
for label_dtype in np.int32, np.int64:
self._testXent(
np.array([[1., 1., 1., 1.], [1., 2., 3., 4.]]).astype(np.float32),
np.array([3, 0]).astype(label_dtype))
def testDouble(self):
for label_dtype in np.int32, np.int64:
self._testXent(
np.array([[1., 1., 1., 1.], [1., 2., 3., 4.]]).astype(np.float64),
np.array([0, 3]).astype(label_dtype))
def testHalf(self):
for label_dtype in np.int32, np.int64:
self._testXent(
np.array([[1., 1., 1., 1.], [1., 2., 3., 4.]]).astype(np.float16),
np.array([3, 0]).astype(label_dtype))
def testEmpty(self):
self._testXent(np.zeros((0, 3)), np.zeros((0,), dtype=np.int32))
@test_util.run_in_graph_and_eager_modes(use_gpu=True)
def testGradient(self):
with self.session() as sess:
l = constant_op.constant([3, 0, 1], name="l")
f = constant_op.constant(
[0.1, 0.2, 0.3, 0.4, 0.1, 0.4, 0.9, 1.6, 0.1, 0.8, 2.7, 6.4],
shape=[3, 4],
dtype=dtypes.float64,
name="f")
def xent(f):
# gradient_checker_v2.computee_gradient doesn't take int32/int64.
# labels must be of type int32/int64, so passing them separately here.
return nn_ops.sparse_softmax_cross_entropy_with_logits(
labels=l, logits=f, name="xent")
theoretical, numerical = gradient_checker_v2.compute_gradient(xent, [f])
if not context.executing_eagerly():
# Check that no extra computation performed. When only first derivative
# is requested, second derivative must not be computed. So when there is
# no second derivative, there is no `BatchMatMul` op in the graph.
op_names = [
op.op_def.name for op in sess.graph.get_operations() if op.op_def
]
self.assertNotIn("BatchMatMul", op_names)
self.assertNotIn("BatchMatMulV2", op_names)
tol = 5e-8
self.assertAllClose(theoretical, numerical, atol=tol, rtol=tol)
def testSecondGradient(self):
with self.session() as sess:
l = constant_op.constant([3, 0, 1], name="l")
f = constant_op.constant(
[0.3, 0.4, 0.1, 1.2, 0.1, 1.9, 0.1, 0.7, 0.8, 0.2, 1.3, 1.3],
shape=[3, 4],
dtype=dtypes.float64,
name="f")
def xent_grad(f):
if not context.executing_eagerly():
return gradients_impl.gradients(
nn_ops.sparse_softmax_cross_entropy_with_logits(
labels=l, logits=f, name="xent"), [f])[0]
with backprop_lib.GradientTape() as tape:
tape.watch(f)
return tape.gradient(
nn_ops.sparse_softmax_cross_entropy_with_logits(
labels=l, logits=f, name="xent"), [f])[0]
theoretical, numerical = gradient_checker_v2.compute_gradient(
xent_grad, [f])
if not context.executing_eagerly():
# Check that second derivative is calculated.
# (it is equivalent to being `BatchMatMul` op in the graph because of
# implementation of xentropy grad)
op_names = [
op.op_def.name for op in sess.graph.get_operations() if op.op_def
]
self.assertIn("BatchMatMulV2", op_names)
tol = 5e-8
self.assertAllClose(theoretical, numerical, atol=tol, rtol=tol)
@test_util.run_in_graph_and_eager_modes(use_gpu=True)
def _testHighDim(self, features, labels):
np_loss, np_backprop = self._npXent(np.array(features), np.array(labels))
# manually reshape loss
np_loss = np.reshape(np_loss, np.array(labels).shape)
tf_loss = nn_ops.sparse_softmax_cross_entropy_with_logits(
labels=labels, logits=features)
if not context.executing_eagerly():
tf_backprop = tf_loss.op.inputs[0].op.outputs[1]
else:
with backprop_lib.GradientTape() as tape:
features = constant_op.constant(features)
tape.watch(features)
tf_backprop = tape.gradient(
nn_ops.sparse_softmax_cross_entropy_with_logits(
labels=labels, logits=features), [features])[0]
tf_backprop = array_ops.reshape(tf_backprop, np_backprop.shape)
self.assertAllCloseAccordingToType(np_loss, tf_loss)
self.assertAllCloseAccordingToType(np_backprop, tf_backprop)
def testHighDim(self):
features = [[[1., 1., 1., 1.]], [[1., 2., 3., 4.]]]
labels = [[3], [0]]
self._testHighDim(features, labels)
def testHighDim2(self):
features = [[[1., 1., 1., 1.], [2., 2., 2., 2.]],
[[1., 2., 3., 4.], [5., 6., 7., 8.]]]
labels = [[3, 2], [0, 3]]
self._testHighDim(features, labels)
def testScalarHandling(self):
with ops_lib.Graph().as_default(), self.session(use_gpu=False) as sess:
with self.assertRaisesRegex(errors_impl.InvalidArgumentError,
".*labels must be 1-D.*"):
labels = array_ops.placeholder(dtypes.int32, shape=[None, 1])
logits = array_ops.placeholder(dtypes.float32, shape=[None, 3])
ce = nn_ops.sparse_softmax_cross_entropy_with_logits(
labels=array_ops.squeeze(labels), logits=logits)
labels_v2 = np.zeros((1, 1), dtype=np.int32)
logits_v2 = np.random.randn(1, 3)
sess.run([ce], feed_dict={labels: labels_v2, logits: logits_v2})
def _sparse_vs_dense_xent_benchmark_dense(labels, logits):
labels = array_ops.identity(labels)
logits = array_ops.identity(logits)
with ops_lib.device("/cpu:0"): # Sparse-to-dense must be on CPU
batch_size = array_ops.shape(logits)[0]
num_entries = array_ops.shape(logits)[1]
length = batch_size * num_entries
labels += num_entries * math_ops.range(batch_size)
target = sparse_ops.sparse_to_dense(labels,
array_ops.stack([length]), 1.0, 0.0)
target = array_ops.reshape(target, array_ops.stack([-1, num_entries]))
crossent = nn_ops.softmax_cross_entropy_with_logits(
labels=target, logits=logits, name="SequenceLoss/CrossEntropy")
crossent_sum = math_ops.reduce_sum(crossent)
grads = gradients_impl.gradients([crossent_sum], [logits])[0]
return (crossent_sum, grads)
def _sparse_vs_dense_xent_benchmark_sparse(labels, logits):
# Using sparse_softmax_cross_entropy_with_logits
labels = labels.astype(np.int64)
labels = array_ops.identity(labels)
logits = array_ops.identity(logits)
crossent = nn_ops.sparse_softmax_cross_entropy_with_logits(
logits, labels, name="SequenceLoss/CrossEntropy")
crossent_sum = math_ops.reduce_sum(crossent)
grads = gradients_impl.gradients([crossent_sum], [logits])[0]
return (crossent_sum, grads)
def sparse_vs_dense_xent_benchmark(batch_size, num_entries, use_gpu):
config = config_pb2.ConfigProto()
config.allow_soft_placement = True
config.gpu_options.per_process_gpu_memory_fraction = 0.3
labels = np.random.randint(num_entries, size=batch_size).astype(np.int32)
logits = np.random.randn(batch_size, num_entries).astype(np.float32)
def _timer(sess, ops):
# Warm in
for _ in range(20):
sess.run(ops)
# Timing run
start = time.time()
for _ in range(20):
sess.run(ops)
end = time.time()
return (end - start) / 20.0 # Average runtime per iteration
# Using sparse_to_dense and softmax_cross_entropy_with_logits
with session.Session(config=config) as sess:
if not use_gpu:
with ops_lib.device("/cpu:0"):
ops = _sparse_vs_dense_xent_benchmark_dense(labels, logits)
else:
ops = _sparse_vs_dense_xent_benchmark_dense(labels, logits)
delta_dense = _timer(sess, ops)
# Using sparse_softmax_cross_entropy_with_logits
with session.Session(config=config) as sess:
if not use_gpu:
with test_util.device("/cpu:0"):
ops = _sparse_vs_dense_xent_benchmark_sparse(labels, logits)
else:
ops = _sparse_vs_dense_xent_benchmark_sparse(labels, logits)
delta_sparse = _timer(sess, ops)
print("%d \t %d \t %s \t %f \t %f \t %f" % (batch_size, num_entries, use_gpu,
delta_dense, delta_sparse,
delta_sparse / delta_dense))
def main(_):
print("Sparse Xent vs. SparseToDense + Xent")
print("batch \t depth \t gpu \t dt(dense) \t dt(sparse) "
"\t dt(sparse)/dt(dense)")
for use_gpu in (False, True):
for batch_size in (32, 64, 128):
for num_entries in (100, 1000, 10000):
sparse_vs_dense_xent_benchmark(batch_size, num_entries, use_gpu)
sparse_vs_dense_xent_benchmark(32, 100000, use_gpu)
sparse_vs_dense_xent_benchmark(8, 1000000, use_gpu)
if __name__ == "__main__":
if "--benchmarks" in sys.argv:
sys.argv.remove("--benchmarks")
app.run()
else:
test.main()
|
import os
import time
import unittest
from mock import patch
from chirp.library import audio_file_test
from chirp.library import do_delete_audio_file_from_db
from chirp.library import database
TEST_DB_NAME_PATTERN = "/tmp/chirp-library-db_test.%d.sqlite"
class DeleteFingerprintTest(unittest.TestCase):
def setUp(self):
self.name = TEST_DB_NAME_PATTERN % int(time.time() * 1000000)
self.db = database.Database(self.name)
def tearDown(self):
os.unlink(self.name)
def _add_test_audiofiles(self):
test_volume = 17
test_import_timestamp = 1230959520
# populate some dummy audiofiles into the database
all_au_files = [audio_file_test.get_test_audio_file(i)
for i in xrange(10)]
add_txn = self.db.begin_add(test_volume, test_import_timestamp)
for au_file in all_au_files:
au_file.volume = test_volume
au_file.import_timestamp = test_import_timestamp
for au_file in all_au_files:
add_txn.add(au_file)
add_txn.commit()
def test_del_audiofilese__full_delete_single(self):
# SETUP
test_fingerprint = "0000000000000007"
# Create db tables
self.assertTrue(self.db.create_tables())
self._add_test_audiofiles()
# make sure 10 records exist
self.assertEqual(len(list(self.db.get_all())), 10)
# quick confirmation that the audiofile that we want to test exists.
af = self.db.get_by_fingerprint(test_fingerprint)
self.assertEquals(af.fingerprint, test_fingerprint)
afm = do_delete_audio_file_from_db.AudioFileManager(
library_db_file=self.name)
# TEST
afm.del_audiofiles([test_fingerprint])
# RESULTS
# verify audiofile doesn't exist
af = self.db.get_by_fingerprint(test_fingerprint)
self.assertEquals(af, None)
# make sure only 9 records exist now
self.assertEqual(len(list(self.db.get_all())), 9)
def test_del_audiofiles__full_delete_multiple(self):
# SETUP
test_fingerprint_1 = "0000000000000005"
test_fingerprint_2 = "0000000000000007"
# Create db tables
self.assertTrue(self.db.create_tables())
self._add_test_audiofiles()
# make sure 10 records exist
self.assertEqual(len(list(self.db.get_all())), 10)
# quick confirmation that the audiofiles that we want to test exists.
af = self.db.get_by_fingerprint(test_fingerprint_1)
self.assertEquals(af.fingerprint, test_fingerprint_1)
af = self.db.get_by_fingerprint(test_fingerprint_2)
self.assertEquals(af.fingerprint, test_fingerprint_2)
afm = do_delete_audio_file_from_db.AudioFileManager(
library_db_file=self.name)
# TEST
afm.del_audiofiles([test_fingerprint_1, test_fingerprint_2])
# RESULTS
# verify audiofiles don't exist
af = self.db.get_by_fingerprint(test_fingerprint_1)
self.assertEquals(af, None)
af = self.db.get_by_fingerprint(test_fingerprint_2)
self.assertEquals(af, None)
# make sure only 8 records exist now
self.assertEqual(len(list(self.db.get_all())), 8)
def test_del_audiofiles__full_delete_non_existing_fingerprint(self):
# SETUP
test_fingerprint_1 = "0000000000000020"
# Create db tables
self.assertTrue(self.db.create_tables())
self._add_test_audiofiles()
# make sure 10 records exist
self.assertEqual(len(list(self.db.get_all())), 10)
afm = do_delete_audio_file_from_db.AudioFileManager(
library_db_file=self.name)
# TEST
afm.del_audiofiles([test_fingerprint_1])
# RESULTS
# make sure nothing was deleted
self.assertEqual(len(list(self.db.get_all())), 10)
def test_del_audiofiles__raises_exception(self):
# SETUP
test_fingerprint_1 = "0000000000000007"
# Create db tables
self.assertTrue(self.db.create_tables())
self._add_test_audiofiles()
# make sure 10 records exist
self.assertEqual(len(list(self.db.get_all())), 10)
afm = do_delete_audio_file_from_db.AudioFileManager(
library_db_file=self.name)
# TEST
def _raise_exception(*args, **kwargs):
raise Exception('Test')
with patch.object(afm, 'conn', autospec=True) as mock_conn:
mock_conn.execute.side_effect = _raise_exception
with self.assertRaises(Exception):
afm.del_audiofiles([test_fingerprint_1])
mock_conn.rollback.assert_called_with()
def test_get_audio_files__existing_record(self):
# SETUP
test_fingerprint = "0000000000000007"
# Create db tables
self.assertTrue(self.db.create_tables())
self._add_test_audiofiles()
afm = do_delete_audio_file_from_db.AudioFileManager(
library_db_file=self.name)
# TEST
af = afm.get_audio_files(fingerprints=[test_fingerprint])
# RESULTS
self.assertSetEqual(
set(a['fingerprint'] for a in af),
set([test_fingerprint]))
def test_get_audio_files__non_existing_records(self):
# SETUP
test_fingerprint_1 = "0000000000000020"
# Create db tables
self.assertTrue(self.db.create_tables())
self._add_test_audiofiles()
afm = do_delete_audio_file_from_db.AudioFileManager(
library_db_file=self.name)
# TEST
af = afm.get_audio_files(
fingerprints=[test_fingerprint_1])
# RESULTS
self.assertEqual(len(list(af)), 0)
def test_get_tags__existing_record(self):
# SETUP
test_fingerprint_1 = "0000000000000005"
# Create db tables
self.assertTrue(self.db.create_tables())
self._add_test_audiofiles()
afm = do_delete_audio_file_from_db.AudioFileManager(
library_db_file=self.name)
# TEST
af = afm.get_tags(
fingerprints=[test_fingerprint_1])
# RESULTS
self.assertListEqual(
list(a['fingerprint'] for a in af),
5 * [test_fingerprint_1])
def test_get_tags__non_existing_records(self):
# SETUP
test_fingerprint_1 = "0000000000000020"
# Create db tables
self.assertTrue(self.db.create_tables())
self._add_test_audiofiles()
afm = do_delete_audio_file_from_db.AudioFileManager(
library_db_file=self.name)
# TEST
af = afm.get_tags(
fingerprints=[test_fingerprint_1])
# RESULTS
self.assertEqual(len(list(af)), 0)
def test_print_rows_can_handle_non_ascii(self):
afm = do_delete_audio_file_from_db.AudioFileManager(
library_db_file=self.name
)
afm.print_rows([
[u'non-ascii string with a \xf8 character'],
])
|
from setuptools import setup
setup(
name='pytest-testmon',
description='take TDD to a new level with py.test and testmon',
long_description=''.join(open('README.rst').readlines()),
version='0.9.15',
license='MIT',
platforms=['linux', 'osx', 'win32'],
packages=['testmon'],
url='https://github.com/tarpas/pytest-testmon/',
author_email='tibor.arpas@infinit.sk',
author='Tibor Arpas, Jozef Knaperek, Martin Riesz, Daniel Hahler',
entry_points={
'pytest11': [
'testmon = testmon.pytest_testmon',
],
'tox': [
'testmon = testmon.tox_testmon',
],
},
install_requires=['pytest>=2.8.0,<5', 'coverage>=4,<5'],
classifiers=[
'Development Status :: 4 - Beta',
'Intended Audience :: Developers',
'Operating System :: POSIX',
'Operating System :: Microsoft :: Windows',
'Operating System :: MacOS :: MacOS X',
'Topic :: Software Development :: Testing',
'Topic :: Software Development :: Libraries',
'Topic :: Utilities',
'Programming Language :: Python', ],
)
|
import unittest
import pyrulo.class_imports
class TestImports(unittest.TestCase):
def setUp(self) -> None:
pass
def tearDown(self) -> None:
pass
def test_whenImportClassesByDir_resultIsTheExpected(self):
# arrange
path = "test_classes"
# act
classes = pyrulo.class_imports.import_classes_in_dir(path, object, False)
names = [cls.__name__ for cls in classes]
counts = {}
for name in names:
counts.setdefault(name, 0)
counts[name] += 1
# assert
self.assertIn("A", names)
self.assertIn("B", names)
self.assertIn("C", names)
self.assertEqual(counts["A"], 1)
self.assertEqual(counts["B"], 1)
self.assertEqual(counts["C"], 1)
def test_whenImportClassesByExternalDir_resultIsTheExpected(self):
# arrange
path = "C:/_cosas/Desarrollo/Proyectos/Python/propsettings/propsettings"
# act
classes = pyrulo.class_imports.import_classes_in_dir(path, object, False)
names = [cls.__name__ for cls in classes]
# assert
self.assertIn("Setting", names)
def test_whenImportClassFromFile_resultsIsTheExpected(self):
# arrange
path = "test_classes/a.py"
# act
classes = pyrulo.class_imports.import_classes_in_file(path, object)
names = [cls.__name__ for cls in classes]
# assert
self.assertIn("A", names)
def test_whenImportClassFromFileByKey_resultsIsTheExpected(self):
# arrange
path = "test_classes/a.py"
# act
classes = pyrulo.class_imports.import_classes_in_file(path, object)
names = [cls.__name__ for cls in classes]
# assert
self.assertIn("A", names)
def test_whenImportClassesFromExternalFile_resultIsTheExpected(self):
# arrange
path = "C:/_cosas/Desarrollo/Proyectos/Python/propsettings/propsettings/setting.py"
# act
classes = pyrulo.class_imports.import_classes_in_file(path, object)
names = [cls.__name__ for cls in classes]
# assert
self.assertIn("Setting", names)
def test_whenImportClassesFromSiblingFile_resultIsTheExpected(self):
# arrange
path = "sibling_classes.py"
# act
classes = pyrulo.class_imports.import_classes_in_file(path, object)
names = [cls.__name__ for cls in classes]
# assert
self.assertIn("Sibling", names)
if __name__ == '__main__':
unittest.main()
|
import collections
import pytest # noqa: F401
from pudb.py3compat import builtins
from pudb.settings import load_breakpoints, save_breakpoints
def test_load_breakpoints(mocker):
fake_data = ["b /home/user/test.py:41"], ["b /home/user/test.py:50"]
mock_open = mocker.mock_open()
mock_open.return_value.readlines.side_effect = fake_data
mocker.patch.object(builtins, "open", mock_open)
mocker.patch("pudb.settings.lookup_module",
mocker.Mock(return_value="/home/user/test.py"))
mocker.patch("pudb.settings.get_breakpoint_invalid_reason",
mocker.Mock(return_value=None))
result = load_breakpoints()
expected = [("/home/user/test.py", 41, False, None, None),
("/home/user/test.py", 50, False, None, None)]
assert result == expected
def test_save_breakpoints(mocker):
MockBP = collections.namedtuple("MockBreakpoint", "file line cond")
mock_breakpoints = [MockBP("/home/user/test.py", 41, None),
MockBP("/home/user/test.py", 50, None)]
mocker.patch("pudb.settings.get_breakpoints_file_name",
mocker.Mock(return_value="saved-breakpoints"))
mock_open = mocker.mock_open()
mocker.patch.object(builtins, "open", mock_open)
save_breakpoints(mock_breakpoints)
mock_open.assert_called_with("saved-breakpoints", "w")
|
# -*- coding: utf-8 -*-
import hashlib
from unittest.mock import MagicMock
from asyncy.AppConfig import Expose
from asyncy.Containers import Containers
from asyncy.Exceptions import ActionNotFound, ContainerSpecNotRegisteredError,\
EnvironmentVariableNotFound, K8sError
from asyncy.Kubernetes import Kubernetes
from asyncy.constants.LineConstants import LineConstants
from asyncy.constants.ServiceConstants import ServiceConstants
from asyncy.entities.Volume import Volume
from asyncy.processing import Story
import pytest
from pytest import fixture, mark
@fixture
def line():
return MagicMock()
def test_is_service_reusable(story):
story.app.services = {
'alpine': {
'configuration': {
'actions': {
'echo': {
'run': 'foo'
}
}
}
}
}
line = {
LineConstants.service: 'alpine',
LineConstants.command: 'echo'
}
assert Containers.is_service_reusable(story.app, line) is False
story.app.services['alpine']['configuration']['actions']['echo'][
'run'] = None
assert Containers.is_service_reusable(story.app, line) is True
@mark.parametrize('reusable', [False, True])
@mark.parametrize('name', ['alpine', 'a!lpine', 'ALPINE', '__aLpInE'])
def test_get_container_name(patch, story, line, reusable, name):
patch.object(Containers, 'is_service_reusable', return_value=reusable)
story.app.app_id = 'my_app'
story.app.version = 'v2'
ret = Containers.get_container_name(story.app, story.name, line, name)
if reusable:
assert ret == f'alpine-{Containers.hash_service_name(story.app, name)}'
else:
h = Containers.hash_service_name_and_story_line(story.app, story.name,
line, name)
assert ret == f'alpine-{h}'
@mark.asyncio
async def test_exec():
with pytest.raises(K8sError):
await Containers.exec(None, None, None, None, None)
@mark.asyncio
async def test_container_get_hostname(patch, story, line):
story.app.app_id = 'my_app'
patch.object(Containers, 'get_container_name', return_value='foo')
ret = await Containers.get_hostname(story, line, 'foo')
assert ret == 'foo.my_app.svc.cluster.local'
@mark.asyncio
async def test_clean_app(patch, async_mock):
patch.object(Kubernetes, 'clean_namespace', new=async_mock())
app = MagicMock()
await Containers.clean_app(app)
Kubernetes.clean_namespace.mock.assert_called_with(app)
@mark.asyncio
async def test_remove_volume(patch, story, line, async_mock):
patch.object(Kubernetes, 'remove_volume', new=async_mock())
await Containers.remove_volume(story.app, 'foo')
Kubernetes.remove_volume.mock.assert_called_with(story.app, 'foo')
@mark.asyncio
async def test_prepare_for_deployment(patch, async_mock):
patch.object(Kubernetes, 'clean_namespace', new=async_mock())
story = MagicMock()
await Containers.prepare_for_deployment(story)
Kubernetes.clean_namespace.mock.assert_called_with(story.app)
def test_format_command(logger, app, echo_service, echo_line):
story = Story.story(app, logger, 'echo.story')
app.services = echo_service
cmd = Containers.format_command(story, echo_line, 'alpine', 'echo')
assert ['echo', '{"msg":"foo"}'] == cmd
@mark.parametrize('reusable', [True, False])
def test_hash_volume_name(patch, story, line, reusable):
line['ln'] = '1'
patch.object(Containers, 'is_service_reusable', return_value=reusable)
name = 'my_volume'
service = 'foo'
key = name + '-' + service
if not reusable:
key = f'{key}-{line["ln"]}'
expected = f'myvolume-' + hashlib.sha1(key.encode('utf-8')).hexdigest()
assert Containers.hash_volume_name(story.app, line, service, name) == \
expected
def test_hash_ingress_name():
e = Expose(service='service',
service_expose_name='expose_name',
http_path='expose_path')
ret = Containers.hash_ingress_name(e)
assert ret == 'exposename-0cf994f170f9d213bb814f74baca87ea149f7536'
@mark.asyncio
async def test_expose_service(app, patch, async_mock):
container_name = 'container_name'
patch.object(Containers, 'get_container_name',
return_value=container_name)
patch.object(Containers, 'create_and_start', new=async_mock())
patch.object(Kubernetes, 'create_ingress', new=async_mock())
e = Expose(service='service',
service_expose_name='expose_name',
http_path='expose_path')
ingress_name = Containers.hash_ingress_name(e)
hostname = f'{app.app_dns}--{Containers.get_simple_name(e.service)}'
await Containers.expose_service(app, e)
Containers.create_and_start.mock.assert_called_with(app, None, e.service,
container_name)
Kubernetes.create_ingress.mock.assert_called_with(ingress_name, app, e,
container_name,
hostname=hostname)
def test_service_name_and_story_line(patch, story):
patch.object(hashlib, 'sha1')
story.name = 'story_name'
story.app.version = 'v29'
ret = Containers.hash_service_name_and_story_line(
story.app, story.name, {'ln': '1'}, 'alpine')
hashlib.sha1.assert_called_with(f'alpine-v29-{story.name}-1'
.encode('utf-8'))
assert ret == hashlib.sha1().hexdigest()
def test_service_name(patch, story):
story.app.version = 'v2'
patch.object(hashlib, 'sha1')
ret = Containers.hash_service_name(story.app, 'alpine')
hashlib.sha1.assert_called_with(f'alpine-v2'.encode('utf-8'))
assert ret == hashlib.sha1().hexdigest()
@mark.asyncio
async def test_create_and_start_no_action(story):
story.app.services = {'alpine': {'configuration': {}}}
with pytest.raises(ActionNotFound):
await Containers.create_and_start(story.app, {'command': 'foo'},
'alpine', 'alpine')
@mark.parametrize('run_command', [None, ['/bin/bash', 'sleep', '10000']])
@mark.parametrize('with_volumes', [True, False])
@mark.parametrize('missing_required_var', [False, True])
@mark.asyncio
async def test_start(patch, story, async_mock,
missing_required_var,
run_command, with_volumes):
line = {
LineConstants.service: 'alpine',
LineConstants.command: 'echo',
'ln': '1'
}
patch.object(Kubernetes, 'create_pod', new=async_mock())
story.app.services = {
'alpine': {
ServiceConstants.config: {
'actions': {
'echo': {
}
},
'volumes': {
'db': {
'persist': True,
'target': '/db'
},
'tmp': {
'persist': False,
'target': '/tmp'
}
},
'environment': {
'param_1': {
'required': True
},
'alpine_only': {}
}
}
}
}
if not with_volumes:
del story.app.services['alpine'][ServiceConstants.config]['volumes']
if run_command is not None:
story.app.services['alpine'][ServiceConstants.config]['actions'][
'echo'] = {'run': {'command': run_command}}
story.app.environment = {
'alpine': {
'alpine_only': True,
'param_1': 'hello_world'
},
'global': 'yes'
}
if missing_required_var:
story.app.environment['alpine']['param_1'] = None
patch.object(Containers, 'get_container_name',
return_value='asyncy-alpine')
expected_volumes = []
if with_volumes:
hash_db = Containers.hash_volume_name(story.app, line, 'alpine', 'db')
hash_tmp = Containers.hash_volume_name(story.app, line, 'alpine',
'tmp')
expected_volumes = [
Volume(persist=True, name=hash_db, mount_path='/db'),
Volume(persist=False, name=hash_tmp, mount_path='/tmp'),
]
if missing_required_var:
with pytest.raises(EnvironmentVariableNotFound):
await Containers.start(story, line)
return
else:
await Containers.start(story, line)
Kubernetes.create_pod.mock.assert_called_with(
app=story.app, service='alpine',
image='alpine', container_name='asyncy-alpine',
start_command=run_command or ['tail', '-f', '/dev/null'],
shutdown_command=None,
env={'alpine_only': True, 'param_1': 'hello_world'},
volumes=expected_volumes)
@mark.asyncio
async def test_init(story, patch, async_mock):
patch.object(Kubernetes, 'create_namespace', new=async_mock())
await Containers.init(story.app)
Kubernetes.create_namespace.mock.assert_called_with(story.app)
def test_format_command_no_format(logger, app, echo_service, echo_line):
story = Story.story(app, logger, 'echo.story')
app.services = echo_service
config = app.services['alpine'][ServiceConstants.config]
config['actions']['echo']['format'] = None
cmd = Containers.format_command(story, echo_line, 'alpine', 'echo')
assert ['echo', '{"msg":"foo"}'] == cmd
def test_format_command_no_spec(logger, app, echo_line):
story = Story.story(app, logger, 'echo.story')
app.services = {}
with pytest.raises(ContainerSpecNotRegisteredError):
Containers.format_command(story, echo_line, 'alpine', 'echo')
def test_format_command_no_args(logger, app, echo_service, echo_line):
story = Story.story(app, logger, 'echo.story')
app.services = echo_service
echo_service['alpine'][ServiceConstants.config]['actions']['echo'][
'arguments'] = None
cmd = Containers.format_command(story, echo_line, 'alpine', 'echo')
assert ['echo'] == cmd
def test_format_command_with_format(patch, logger, app,
echo_service, echo_line):
story = Story.story(app, logger, 'echo.story')
patch.object(story, 'argument_by_name', return_value='asyncy')
app.services = echo_service
config = app.services['alpine'][ServiceConstants.config]
config['actions']['echo']['format'] = 'echo {msg}'
cmd = Containers.format_command(story, echo_line, 'alpine', 'echo')
assert ['echo', 'asyncy'] == cmd
|
# -*- coding:utf-8 -*-
from __future__ import unicode_literals
from django.utils.translation import ugettext_lazy as _
from yepes import forms
from yepes.fields.char import CharField
from yepes.validators import PostalCodeValidator
from yepes.utils.deconstruct import clean_keywords
class PostalCodeField(CharField):
default_validators = [PostalCodeValidator()]
description = _('Generic postal code')
def __init__(self, *args, **kwargs):
kwargs['force_lower'] = False
kwargs['force_upper'] = True
kwargs.setdefault('max_length', 15)
kwargs['normalize_spaces'] = True
kwargs['trim_spaces'] = False
super(PostalCodeField, self).__init__(*args, **kwargs)
def deconstruct(self):
name, path, args, kwargs = super(PostalCodeField, self).deconstruct()
path = path.replace('yepes.fields.postal_code', 'yepes.fields')
clean_keywords(self, kwargs, variables={
'max_length': 15,
}, constants=[
'force_lower',
'force_upper',
'normalize_spaces',
'trim_spaces',
])
return name, path, args, kwargs
def formfield(self, **kwargs):
kwargs.setdefault('form_class', forms.PostalCodeField)
return super(PostalCodeField, self).formfield(**kwargs)
|
"""
Interface to a test suite module (one or more runs) used by ProductModelProgram
"""
from operator import concat
from .model import Model
from functools import reduce
class TestSuite(Model):
def __init__(self, module, exclude, include):
Model.__init__(self, module, exclude, include)
def post_init(self):
"""
Now that all modules have been imported and executed their __init__
do a postprocessing pass
to process metadata that might be affected by configuration modules
"""
# Do all of this work here rather than in __init__
# so it can include the effects of any pymodel config modules
# recognize PEP-8 style names (all lowercase) if present
if hasattr(self.module, 'testsuite'):
self.module.testSuite = self.module.testsuite
if hasattr(self.module, 'test_suite'):
self.module.testSuite = self.module.test_suite
if hasattr(self.module, 'actions'):
self.actions = list(self.module.actions) # copy, actions from cmd line
else:
self.actions = list(self.actions_in_suite()) # default, copy
Model.post_init(self) # uses self.actions
# Revise the test suite to account for excluded, included actions
self.test_suite = list()
for run in self.module.testSuite:
new_run = list() # list not tuple, must mutable
for action in run:
if action[0] in self.actions:
new_run.append(action)
else:
break # truncate the run before the excluded action
self.test_suite.append(new_run)
# prepare for first run
self.irun = 0 # index of current test run in test suite
self.pc = 0 # program counter
def actions_in_suite(self):
# there might be two or three items in action_tuple
return tuple(set(reduce(concat,[[action_tuple[0] for action_tuple in run]
for run in self.module.testSuite])))
def Accepting(self):
# In a test suite, the only accepting states are at ends of runs
# NB Here Accepting() is called *after* DoAction() that advances self.pc
length = len(self.test_suite[self.irun]) # number of tuples in run
return (self.pc == length)
def make_properties(self, accepting):
return { 'accepting': accepting, 'statefilter': True,
'stateinvariant': True }
def Properties(self):
return self.make_properties(self.Accepting())
def Reset(self): # needed by stepper
self.pc = 0
if self.irun < len(self.test_suite) - 1:
self.irun += 1
else:
raise StopIteration # no more runs in test suite
def ActionEnabled(self, a, args):
"""
action a with args is enabled in the current state
"""
step = self.test_suite[self.irun][self.pc]
action, arguments = step[0:2] # works whether or not step has result
return (a == action and args == arguments)
def EnabledTransitions(self, cleanup=False):
"""
Return list of all tuples for enabled actions. Here, there is just one.
(action, args, next state, next state is accepting state)
Next state is a list of two elements:the run number and step within the run
In a test suite, there is always just *one* next action, or *none*
Ignore cleanup, test suite should always end in accepting state.
"""
run = self.test_suite[self.irun]
length = len(run)
if self.pc < length:
step = run[self.pc]
action, args = step[0:2]
result = step[2] if len(step) > 2 else None # result is optional
next = self.pc + 1
accepting = (next == length)
return([(action, args, result, (self.irun,next),
self.make_properties(accepting))])
else:
return list() # test run finished, nothing enabled,
def DoAction(self, a, args):
step = self.test_suite[self.irun][self.pc]
result = step[2] if len(step) > 2 else None # result is optional
self.pc += 1
return result
def Current(self):
return (self.irun, self.pc)
def Restore(self, state):
"""
Restore state
"""
self.irun, self.pc = state
# GetNext not needed
|
from typing import ( # noqa: F401
Type,
)
from cytoolz import (
curry,
)
from eth_utils import (
encode_hex,
ValidationError,
)
from eth.constants import (
MAX_UNCLE_DEPTH,
)
from eth.rlp.blocks import BaseBlock # noqa: F401
from eth.rlp.receipts import Receipt
from eth.validation import (
validate_lte,
)
from eth.vm.forks.spurious_dragon import SpuriousDragonVM
from eth.vm.forks.frontier import make_frontier_receipt
from eth.vm.state import BaseState # noqa: F401
from .blocks import ByzantiumBlock
from .constants import (
EIP649_BLOCK_REWARD,
EIP658_TRANSACTION_STATUS_CODE_FAILURE,
EIP658_TRANSACTION_STATUS_CODE_SUCCESS,
)
from .headers import (
create_byzantium_header_from_parent,
configure_byzantium_header,
compute_byzantium_difficulty,
)
from .state import ByzantiumState
def make_byzantium_receipt(base_header, transaction, computation, state):
frontier_receipt = make_frontier_receipt(base_header, transaction, computation, state)
if computation.is_error:
status_code = EIP658_TRANSACTION_STATUS_CODE_FAILURE
else:
status_code = EIP658_TRANSACTION_STATUS_CODE_SUCCESS
return frontier_receipt.copy(state_root=status_code)
@curry
def get_uncle_reward(block_reward, block_number, uncle):
block_number_delta = block_number - uncle.block_number
validate_lte(block_number_delta, MAX_UNCLE_DEPTH)
return (8 - block_number_delta) * block_reward // 8
EIP658_STATUS_CODES = {
EIP658_TRANSACTION_STATUS_CODE_SUCCESS,
EIP658_TRANSACTION_STATUS_CODE_FAILURE,
}
class ByzantiumVM(SpuriousDragonVM):
# fork name
fork = 'byzantium'
# classes
block_class = ByzantiumBlock # type: Type[BaseBlock]
_state_class = ByzantiumState # type: Type[BaseState]
# Methods
create_header_from_parent = staticmethod(create_byzantium_header_from_parent)
compute_difficulty = staticmethod(compute_byzantium_difficulty)
configure_header = configure_byzantium_header
make_receipt = staticmethod(make_byzantium_receipt)
get_uncle_reward = staticmethod(get_uncle_reward(EIP649_BLOCK_REWARD))
@classmethod
def validate_receipt(cls, receipt: Receipt) -> None:
super().validate_receipt(receipt)
if receipt.state_root not in EIP658_STATUS_CODES:
raise ValidationError(
"The receipt's `state_root` must be one of [{0}, {1}]. Got: "
"{2}".format(
encode_hex(EIP658_TRANSACTION_STATUS_CODE_SUCCESS),
encode_hex(EIP658_TRANSACTION_STATUS_CODE_FAILURE),
encode_hex(receipt.state_root),
)
)
@staticmethod
def get_block_reward():
return EIP649_BLOCK_REWARD
|
from setuptools import find_packages, setup
__version__ = '1.0.1'
tests_require = [
"flake8==3.9.2",
"nose==1.3.7"
]
with open('README.md', 'r') as fh:
long_description = fh.read()
setup(
name='next-theme-kit',
author="29next",
author_email="dev@29next.com",
url='https://github.com/29next/theme-kit',
long_description=long_description,
long_description_content_type='text/markdown',
version=__version__,
install_requires=[
"PyYAML>=5.4",
"requests>=2.25",
"watchgod>=0.7",
"libsass>=0.21.0"
],
entry_points={
'console_scripts': [
'ntk = ntk.ntk:main',
],
},
packages=find_packages(),
python_requires='>=3.6'
)
|
from __future__ import annotations
import string
from dataclasses import dataclass
from typing import Any, Tuple
from expression import Error, Nothing, Ok, Option, Some, TaggedUnion, match, pipe, tag
from expression.collections import Block
from expression.extra.parser import (
Parser,
and_then,
any_of,
choice,
many,
opt,
pchar,
pfloat,
pint,
pstring,
)
def test_parse_pchar():
input = "ABC"
parseA: Parser[str] = pchar("A")
result = parseA(input)
assert result.is_ok()
with match(result) as case:
for a in case(Ok[str, str]):
assert a == "A"
if case._:
assert False
def test_parse_pchar_fluent():
input = "ABC"
parseA: Parser[str] = Parser.pchar("A")
result = parseA(input)
assert result.is_ok()
with match(result) as case:
for a in case(Ok[str, str]):
assert a == "A"
if case._:
assert False
def test_parse_a_then_b():
input = "ABC"
parse_a: Parser[str] = pchar("A")
parse_b: Parser[str] = pchar("B")
parseAB = pipe(
parse_a,
and_then(parse_b),
)
result = parseAB(input)
assert result.is_ok()
with match(result) as case:
for (a, b) in case(Ok[Tuple[str, str], str]):
assert (a, b) == ("A", "B")
if case._:
assert False
def test_parse_a_then_b_fluent():
input = "ABC"
parseAB = pchar("A").and_then(pchar("B"))
result = parseAB(input)
assert result.is_ok()
with match(result) as case:
for (a, b) in case(Ok[Tuple[str, str], str]):
assert (a, b) == ("A", "B")
if case._:
assert False
def test_pstring():
parse_abc = pstring("ABC")
ret = parse_abc("ABCDE") # Success ("ABC", "DE")
assert ret.is_ok()
with match(ret) as case:
for success in case(Ok[str, str]):
assert success == "ABC"
if case._:
assert False
ret = parse_abc("A|CDE") # Failure "Expecting 'B'. Got '|'"
assert ret.is_error()
with match(ret) as case:
for error in case(Error[str, str]):
assert error == "Expecting 'B'. Got '|'"
if case._:
assert False
ret = parse_abc("AB|DE") # Failure "Expecting 'C'. Got '|'"
assert ret.is_error()
with match(ret) as case:
for error in case(Error[str, str]):
assert error == "Expecting 'C'. Got '|'"
if case._:
assert False
def test_int():
ret = pint("123C")
with match(ret) as case:
for success in case(Ok[int, str]):
assert success == 123
if case._:
assert False
def test_int_negative():
ret = pint("-123C")
with match(ret) as case:
for success in case(Ok[int, str]):
assert success == -123
if case._:
assert False
def test_float():
ret = pfloat("123C")
with match(ret) as case:
for success in case(Ok[float, str]):
assert success == 123
if case._:
assert False
def test_float_with_decimal():
ret = pfloat("123.45C")
with match(ret) as case:
for success in case(Ok[float, str]):
assert success == 123.45
if case._:
assert False
def test_negative_float_with_decimal():
ret = pfloat("-123.45C")
with match(ret) as case:
for success in case(Ok[float, str]):
assert success == -123.45
if case._:
assert False
class ComparisonOperator(TaggedUnion):
EQ = tag()
NOT_EQ = tag()
LT = tag()
LT_E = tag()
GT = tag()
GT_E = tag()
IS = tag()
IS_NOT = tag()
IN = tag()
NOT_IN = tag()
@staticmethod
def eq() -> ComparisonOperator:
return ComparisonOperator(ComparisonOperator.EQ)
@staticmethod
def not_eq() -> ComparisonOperator:
return ComparisonOperator(ComparisonOperator.NOT_EQ)
@dataclass
class Compare:
left: Expression
comparators: Block[Expression]
ops: Block[ComparisonOperator]
class BoolOp(TaggedUnion):
AND = tag()
OR = tag()
@staticmethod
def and_() -> BoolOp:
return BoolOp(BoolOp.AND)
@staticmethod
def or_() -> BoolOp:
return BoolOp(BoolOp.OR)
class Expression(TaggedUnion):
CONSTANT = tag(Any)
NAME = tag(str)
BOOL_OP = tag(BoolOp)
COMPARE = tag(Compare)
@staticmethod
def name(name: str) -> Expression:
return Expression(Expression.NAME, name)
@staticmethod
def compare(compare: Compare) -> Expression:
return Expression(Expression.COMPARE, compare)
@staticmethod
def constant(value: Any) -> Expression:
return Expression(Expression.CONSTANT, value)
def pname() -> Parser[Expression]:
first = any_of(string.ascii_letters + "_")
rest = pipe(
any_of(string.ascii_letters + string.digits + "_"),
many,
opt,
)
def mapper(first: str, rest: Option[Block[str]]) -> str:
with match(rest) as case:
if case(Nothing):
return first
for letters in case(Some[Block[str]]):
return first + "".join(letters)
return case.default(first)
return first.and_then(rest).starmap(mapper).map(Expression.name)
def pexpr() -> Parser[Expression]:
parsers = [
pname(),
]
return pipe(
parsers,
Block[Parser[Expression]].of_seq,
choice,
)
def test_parse_name_expr():
name = pipe(
"test",
pexpr(),
)
assert name.is_ok()
with match(name) as case:
if case(Nothing):
assert False
for expr in case(Ok[Expression, str]):
with match(expr) as case:
for name in case(Expression.NAME):
assert name == "test"
break
else:
assert False
break
else:
assert False
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('petshop', '0002_dono'),
]
operations = [
migrations.AddField(
model_name='animal',
name='dono',
field=models.ForeignKey(to='petshop.Dono', default=1),
),
]
|
# -*- coding:utf-8 -*-
# Author:hankcs
# Date: 2018-06-21 19:46
# 《自然语言处理入门》5.3 基于感知机的人名性别分类
# 配套书籍:http://nlp.hankcs.com/book.php
# 讨论答疑:https://bbs.hankcs.com/
import sys,os# environment, adjust the priority
sys.path.insert(0,os.path.dirname(os.path.dirname(os.path.dirname(os.path.dirname(os.path.abspath(__file__))))))
from pyhanlp import *
from tests.test_utility import ensure_data
PerceptronNameGenderClassifier = JClass('com.hankcs.hanlp.model.perceptron.PerceptronNameGenderClassifier')
cnname = ensure_data('cnname', 'http://file.hankcs.com/corpus/cnname.zip')
TRAINING_SET = os.path.join(cnname, 'train.csv')
TESTING_SET = os.path.join(cnname, 'test.csv')
MODEL = cnname + ".bin"
def run_classifier(averaged_perceptron):
print('=====%s=====' % ('平均感知机算法' if averaged_perceptron else '朴素感知机算法'))
classifier = PerceptronNameGenderClassifier()
print('训练集准确率:', classifier.train(TRAINING_SET, 10, averaged_perceptron))
model = classifier.getModel()
print('特征数量:', len(model.parameter))
# model.save(MODEL, model.featureMap.entrySet(), 0, True)
# classifier = PerceptronNameGenderClassifier(MODEL)
for name in "赵建军", "沈雁冰", "陆雪琪", "李冰冰":
print('%s=%s' % (name, classifier.predict(name)))
print('测试集准确率:', classifier.evaluate(TESTING_SET))
if __name__ == '__main__':
run_classifier(False)
run_classifier(True)
|
# -*- coding: utf-8 -*-
# Generated by Django 1.11.10 on 2018-04-03 09:55
from __future__ import unicode_literals
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('core', '0142_auto_20180301_2143'),
]
operations = [
migrations.AlterField(
model_name='declaration',
name='person',
field=models.ForeignKey(default=None, on_delete=django.db.models.deletion.CASCADE, related_name='declarations', to='core.Person'),
),
]
|
import os
import json
from datetime import datetime
from .challenge import Challenge
from hrcm.services.db import DBConnector
from hrcm.errors.bad_request import BadRequest
from hrcm.helpers import format_username, format_message
class Candidate:
"""
@desc We prepare all the instance parameters along side the db instance
@params informations: a list of cli parameters
@returns
"""
def __init__(self, informations):
self._id = informations.get("_id", None)
self.firstname = informations.get("firstname")
self.lastname = informations.get("lastname")
self.email = informations.get("email")
self.job = informations.get("job")
self.phone = informations.get("phone", str())
self.username = format_username(
informations.get("firstname"),
informations.get("lastname")
)
self.messages = [format_message(message) for message in informations.get("messages", list())]
self.archived = informations.get("archived", False)
self.challenge = None
self.db = DBConnector()
def __repr__(self):
return json.dumps(self.get_profile())
def get_messages(self):
self.messages = self.db.get_messages(self)
return self
"""
@desc This methods create a new candidate and adds its id to self when
the DB requires an id, we first want to check if the user
doesn't already exist, only if the id is not yet set
@params self: instance of Candidate
@returns instance of Candidate
"""
def create(self):
profile = self.db.get_profile_by_email(self.email)
if self._id is not None or profile is not None:
raise BadRequest("This email already exists.")
self.db.create_candidate(self)
print("User created successfuly")
return self
def update(self):
self.db.update_candidate(self)
def delete(self):
self.db.delete_candidate(self)
print("User deleted successfuly")
return self
def archive(self):
self.archived = True
self.db.save_profile(self)
print("Candidate archived")
return self
def create_send_challenge(self):
self.challenge = Challenge()
self.challenge.send_challenge(self)
self.messages = self.challenge.get_sent_messages()
return self
def preview_challenge(self):
self.challenge = Challenge()
return self.challenge.preview_challenge(self)
def evaluate_candidate(self, evaluated_criterias):
self.challenge = Challenge()
return self.challenge.evaluate_challenge(self, evaluated_criterias)
def get_challenge_criterias(self):
self.challenge = Challenge()
return self.challenge.get_evalution_criterias(self)
def get_profile(self, show_id=True):
profile = {
"firstname": self.firstname,
"lastname": self.lastname,
"email": self.email,
"job": self.job,
"phone": self.phone,
"messages": self.messages,
"username": self.username,
"archived": self.archived
}
if show_id is True:
profile["_id"] = str(self._id)
return profile
"""
@desc Get the candidate profile, create a new instance of Candidate it the candidate
exists, else create a new one with the profile informations
@params profile: dict
@returns instance of Candidate
"""
@classmethod
def load_or_new(cls, profile):
loaded_candidate = cls.load_candidate(profile.get("email"))
if loaded_candidate is not None:
return loaded_candidate
else:
return cls(profile)
"""
@desc Get the candidate profile and returns an instance of Candidate
@params email: str
@returns instance of Candidate, or None
"""
@classmethod
def load_candidate(cls, email):
db = DBConnector()
try:
profile = db.get_profile_by_email(email)
return cls(profile)
except:
return None
"""
@desc Get all the candidates and return an list of Candidate instances
The archive option tells if the method returns the (non-)archived candidates
@params: archive: bool
@returns [instance of Candidate]
"""
@classmethod
def load_candidates(cls, archive=False):
db = DBConnector()
return [cls(candidate) for candidate in db.get_profiles(archived=False)]
|
import base64
import json
import re
import requests
import psutil
from pysys.basetest import BaseTest
from pysys.constants import FAILED
from cumulocity import Cumulocity
from environment_tedge import TedgeEnvironment
"""
Environment to manage automated connects and disconnects to c8y
"""
class EnvironmentC8y(TedgeEnvironment):
"""
Pysys Environment to manage automated connect and disconnect to c8y
Tests that derive from class EnvironmentC8y use automated connect and
disconnect to Cumulocity. Additional checks are made for the status of
service mosquitto and service tedge-mapper.
"""
cumulocity: Cumulocity
def setup(self):
self.log.debug("EnvironmentC8y Setup")
super().setup()
if self.project.c8yurl == "":
self.abort(
FAILED,
"Cumulocity tenant URL is not set. Set with the env variable C8YURL",
)
if self.project.tenant == "":
self.abort(
FAILED,
"Cumulocity tenant ID is not set. Set with the env variable C8YTENANT",
)
if self.project.c8yusername == "":
self.abort(
FAILED,
"Cumulocity tenant username is not set. Set with the env variable C8YUSERNAME",
)
if self.project.c8ypass == "":
self.abort(
FAILED,
"Cumulocity tenant password is not set. Set with the env variable C8YPASS",
)
if self.project.deviceid == "":
self.abort(
FAILED, "Device ID is not set. Set with the env variable C8YDEVICEID"
)
self.log.info("EnvironmentC8y Setup")
self.addCleanupFunction(self.myenvcleanup)
# Check if tedge-mapper is in disabled state
serv_mapper = self.startProcess(
command=self.systemctl,
arguments=["status", self.tedge_mapper_c8y],
stdouterr="serv_mapper1",
expectedExitStatus="==3", # 3: disabled
)
# Connect the bridge
self.tedge_connect_c8y()
# Test the bridge connection
self.tedge_connect_c8y_test()
# Check if mosquitto is running well
serv_mosq = self.startProcess(
command=self.systemctl,
arguments=["status", "mosquitto"],
stdouterr="serv_mosq2",
)
# Check if tedge-mapper is active again
serv_mapper = self.startProcess(
command=self.systemctl,
arguments=["status", self.tedge_mapper_c8y],
stdouterr="serv_mapper3",
)
self.cumulocity = Cumulocity(
self.project.c8yurl,
self.project.tenant,
self.project.c8yusername,
self.project.c8ypass,
self.log,
)
def execute(self):
self.log.debug("EnvironmentC8y Execute")
def validate(self):
self.log.debug("EnvironmentC8y Validate")
# Check if mosquitto is running well
serv_mosq = self.startProcess(
command=self.systemctl,
arguments=["status", "mosquitto"],
stdouterr="serv_mosq",
)
# Check if tedge-mapper is active
serv_mapper = self.startProcess(
command=self.systemctl,
arguments=["status", self.tedge_mapper_c8y],
stdouterr="serv_mapper4",
)
def myenvcleanup(self):
self.log.debug("EnvironmentC8y Cleanup")
# Disconnect Bridge
self.tedge_disconnect_c8y()
# Check if tedge-mapper is disabled
serv_mosq = self.startProcess(
command=self.systemctl,
arguments=["status", self.tedge_mapper_c8y],
stdouterr="serv_mapper5",
expectedExitStatus="==3",
)
|
from p4app import P4Program
import json
# Compile a P4_16 program:
prog16 = P4Program('wire.p4')
prog16.compile()
# Inspect the compiled JSON file
with open(prog16.json(), 'r') as f:
bmv2_json = json.load(f)
#print bmv2_json['actions']
# Compile a P4_14 program:
prog14 = P4Program('wire14.p4', version=14)
prog14.compile()
with open(prog14.json(), 'r') as f:
bmv2_json = json.load(f)
print("OK")
|
from __future__ import print_function
import sys
class MappingReader():
def __init__(self, mapping_file):
self.mapping_file = mapping_file
def pump(self, mapping_processor):
reader = open(self.mapping_file, 'r')
try:
class_name = None
# Read the subsequent class mappings and class member mappings.
while True:
line = reader.readline()
if not line:
break
line = line.strip()
# The distinction between a class mapping and a class
# member mapping is the initial whitespace.
if line.endswith(':'):
# Process the class mapping and remember the class's
# old name.
class_name = self.process_class_mapping(line, mapping_processor)
elif class_name is not None:
# Process the class member mapping, in the context of the
# current old class name.
self.process_class_member_mapping(class_name, line, mapping_processor)
except Exception as ex:
print('Can\'t process mapping file (%s)' % ex)
sys.exit(1)
finally:
reader.close()
@staticmethod
def process_class_mapping(line, mapping_processor):
# See if we can parse "___ -> ___:", containing the original
# class name and the new class name.
arrow_index = line.find('->')
if arrow_index < 0:
return None
colon_index = line.find(':', arrow_index + 2)
if colon_index < 0:
return None
# Extract the elements.
class_name = line[0: arrow_index].strip()
new_class_name = line[arrow_index + 2: colon_index].strip()
# Process this class name mapping.
interested = mapping_processor.process_class_mapping(class_name, new_class_name)
if interested:
return class_name
else:
return None
@staticmethod
def process_class_member_mapping(class_name, line, mapping_processor):
# See if we can parse "___:___:___ ___(___) -> ___",
# containing the optional line numbers, the return type, the original
# field/method name, optional arguments, and the new field/method name.
colon_index1 = line.find(':')
colon_index2 = -1 if colon_index1 < 0 else line.find(':', colon_index1 + 1)
space_index = line.find(' ', colon_index2 + 2)
argument_index1 = line.find('(', space_index + 1)
argument_index2 = -1 if argument_index1 < 0 else line.find(')', argument_index1 + 1)
arrow_index = line.find('->', max(space_index, argument_index2) + 1)
if space_index < 0 or arrow_index < 0:
return
# Extract the elements.
type = line[colon_index2 + 1: space_index].strip()
name = line[space_index + 1: argument_index1 if argument_index1 >= 0 else arrow_index].strip()
new_name = line[arrow_index + 2: len(line)].strip()
# Process this class member mapping.
if len(type) > 0 and \
len(name) > 0 and \
len(new_name) > 0:
# Is it a field or a method?
if argument_index2 < 0:
mapping_processor.process_field_mapping(class_name, type, name, new_name)
else:
first_line_number = 0
last_line_number = 0
if colon_index2 > 0:
first_line_number = int(line[0: colon_index1].strip())
last_line_number = int(line[colon_index1 + 1: colon_index2].strip())
arguments = line[argument_index1 + 1: argument_index2].strip()
mapping_processor.process_method_mapping(class_name,
first_line_number,
last_line_number,
type,
name,
arguments,
new_name)
|
import os
EXECUTABLE_PATH_WINDOWS = '/game/bin/win64/dota2.exe'
EXECUTABLE_PATH_LINUX = '/game/dota.sh'
EXECUTABLE_PATH_LINUX = '/game/bin/linuxsteamrt64/dota2'
BOT_PATH = '/game/dota/scripts/vscripts/bots/'
CONSOLE_LOG = '/game/dota/scripts/vscripts/bots/console.log'
SEND_MSG = '/game/dota/scripts/vscripts/bots/IPC_recv.lua'
CONFIG_MSG = '/game/dota/scripts/vscripts/bots/IPC_config.lua'
LINUX_APP_PATH = "~/Steam/steamapps/common/dota 2 beta"
OSX_APP_PATH = "~/Library/Application Support/Steam/SteamApps/common/dota 2 beta"
WINDOWS_APP_PATH = "C:/Program Files (x86)/Steam/steamapps/common/dota 2 beta"
# <steam path>/ubuntu12_32/steam-runtime/run.sh
class DotaPaths:
"""Class to hold system specific configuration"""
def __init__(self, path=None):
if path is None:
path = self.guess()
self.path = path
def guess(self):
from sys import platform
if platform == "linux" or platform == "linux2":
return os.path.expanduser(LINUX_APP_PATH)
elif platform == "darwin":
return os.path.expanduser(OSX_APP_PATH)
return WINDOWS_APP_PATH
@property
def executable_path(self):
from sys import platform
if platform == "linux" or platform == "linux2":
return self.path + '/' + EXECUTABLE_PATH_LINUX
return self.path + '/' + EXECUTABLE_PATH_WINDOWS
@property
def ipc_recv_handle(self):
return self.path + '/' + CONSOLE_LOG
@property
def console_log(self):
return self.ipc_recv_handle
@property
def ipc_send_handle(self):
return self.path + '/' + SEND_MSG
@property
def ipc_config_handle(self):
return self.path + '/' + CONFIG_MSG
def bot_file(self, filename):
"""Return a file path that is located in the bot folder"""
return self.path + '/' + BOT_PATH + filename
|
import sys
import pandas as pd
import requests
import nltk
nltk.download('stopwords')
from nltk.tokenize import RegexpTokenizer
from nltk.corpus import stopwords
from bs4 import BeautifulSoup
# --- open dataset --- #
data = pd.read_csv('./dataset/translated_twitter_posts.csv')
documents = data['translated_posts']
# --- create an instance of tokenizer --- #
premises = []
tokenizer = RegexpTokenizer(r'\w+')
progress = 0
total_posts = documents.shape[0]
for document in documents:
sentence = ''
tokens = tokenizer.tokenize(document)
for token in tokens:
if not token in stopwords.words('english'):
try:
request = requests.get("http://www.urbandictionary.com/define.php?term={}".format(token))
extract_mening = BeautifulSoup(request.content, 'html.parser')
meaning = extract_mening.find("div",attrs={"class":"meaning"})
if meaning != None:
meaning = meaning.text
sentence = sentence + meaning + ' '
else:
sentence = sentence + token + ' '
except Exception as e:
print('Exception at token ', token, '\n', e)
else:
sentence = sentence + token + ' '
premises.append(sentence)
progress = progress + 1
percentage = round((progress / total_posts) * 100, 2)
output_print = "{}% | {}/{}".format(percentage, progress, total_posts)
# Poor way to show a progress bar :|
sys.stdout.write("\r {:<70}".format(output_print))
sys.stdout.flush()
data['premises'] = premises
data.to_csv('./dataset/premises_twitter_posts.csv')
|
"""
This model supports user labeling of resources in various ways.
For a User u, this instantiates a subobject u.ulabels (like u.uaccess)
that contains all the labeling functions.
Functions include:
* u.ulabels.label_resource(r, label)
instantiates a label for a resource. Resources can have multiple labels.
* u.ulabels.unlabel_resource(r, label)
removes a label; there can be many labels.
* u.ulabels.clear_resource_labels(r)
removes all labels for a resource
* u.ulabels.favorite_resource(r)
favorites a resource
* u.ulabels.unfavorite_resource(r)
removes a favorite
and the reporting functions
* u.ulabels.labeled_resources
A queryset of resources that are labeled.
* u.ulabels.favorited_resources
A queryset of resources that have been favorited
* u.ulabels.get_resources_with_label(label)
Get a queryset of resources possessing a specific label.
For a BaseResource r, this also adds a subobject rlabels that reports on labels for resources
* r.rlabels.get_labels(u)
* r.rlabels.is_favorite(u)
* r.rlabels.is_mine(u)
"""
# TODO: combine label filtering with access control
import re
from django.contrib.auth.models import User
from django.db import models
from django.db import transaction
from django.db.models import Q
from hs_core.models import BaseResource
class FlagCodes(object):
"""
Flag codes describe the meanings of per-user flags for a resource.
* 1 or FlagCodes.FAVORITE:
marked as a favorite on "My Resources" page
* 2 or FlagCodes.MINE:
marked as being part of "My Resources" on "Discover" page.
"""
FAVORITE = 1
MINE = 2
OPEN_WITH_APP = 3
FLAG_CHOICES = (
(FAVORITE, 'Favorite'), # marked as favorite in my resources page.
(MINE, 'Mine'), # marked as mine in discovery page.
(OPEN_WITH_APP, 'Open With App'), # marked as a open_with app
)
class UserResourceLabels(models.Model):
"""
Labels of a user for a resource
This model stores labels of an individual user, like an access control list. T
"""
start = models.DateTimeField(editable=False, auto_now=True)
user = models.ForeignKey(User, null=False, editable=False,
related_name='u2url', # unused but must be defined and unique
help_text='user assigning a label',
on_delete=models.CASCADE)
resource = models.ForeignKey(BaseResource, null=False, editable=False,
related_name='r2url', # unused but must be defined and unique
help_text='resource to which a label applies',
on_delete=models.CASCADE)
label = models.TextField(null=False)
class Meta:
unique_together = ('user', 'resource', 'label')
class UserResourceFlags(models.Model):
"""
Per-user flagging of resources.
This model stores labels of an individual user, like an access
control list; There are several kinds of labels documented in FlagCodes.
These are similar in implementation but differ in semantics.
"""
kind = models.IntegerField(choices=FlagCodes.FLAG_CHOICES,
editable=False,
default=FlagCodes.FAVORITE)
start = models.DateTimeField(editable=False, auto_now=True)
user = models.ForeignKey(User, null=False, editable=False,
related_name='u2urf', # unused but must be defined and unique
help_text='user assigning a flag',
on_delete=models.CASCADE)
resource = models.ForeignKey(BaseResource, null=False, editable=False,
related_name="r2urf", # unused but must be defined and unique
help_text='resource to which a flag applies',
on_delete=models.CASCADE)
class Meta:
unique_together = ('user', 'resource', 'kind')
class UserStoredLabels(models.Model):
"""
Storage class for persistent labels that are reusable across different kinds of objects
"""
user = models.ForeignKey(User, null=False,
help_text='user who stored the label',
related_name='ul2usl',
on_delete=models.CASCADE)
label = models.TextField(help_text='label to be stored by user')
class Meta:
unique_together = ('user', 'label')
class UserLabels(models.Model):
"""
Projection class puts methods and content inside basic User object
so that one can access things easily from that context.
This model is injected into the BaseResource as the related name "user".
Thus for an User u, u.user is this model.
"""
user = models.OneToOneField(User,
editable=False,
null=True,
related_name='ulabels', # induced field in User class.
related_query_name='ulabels',
on_delete=models.CASCADE)
##########################################
# PUBLIC FUNCTIONS: resources
##########################################
@property
def labeled_resources(self):
"""
Get a QuerySet of resources labeled by a user.
This eliminates duplicates.
"""
return BaseResource.objects.filter(r2url__user=self.user).distinct()
def get_flagged_resources(self, this_flagcode):
"""
Get resources with a specific flag.
"""
if __debug__: # during testing only, check argument types and preconditions
assert this_flagcode == FlagCodes.FAVORITE or this_flagcode == FlagCodes.MINE or \
this_flagcode == FlagCodes.OPEN_WITH_APP
return BaseResource.objects.filter(r2urf__user=self.user,
r2urf__kind=this_flagcode)
@property
def favorited_resources(self):
"""
Get a QuerySet of resources favorited by a user.
This eliminates duplicates.
"""
return self.get_flagged_resources(FlagCodes.FAVORITE)
@property
def my_resources(self):
"""
Get a QuerySet of resources marked as mine (add to my resources) by a user.
This eliminates duplicates.
"""
return self.get_flagged_resources(FlagCodes.MINE)
@property
def resources_of_interest(self):
"""
Get a QuerySet of resources the user has tagged in any way.
"""
return BaseResource.objects.filter(Q(r2url__user=self.user) | Q(r2urf__user=self.user)).distinct()
def get_resources_with_label(self, this_label):
"""
Get a QuerySet of resources with a specific label.
"""
if __debug__: # during testing only, check argument types and preconditions
assert isinstance(this_label, str)
label_string = UserLabels.clean_label(this_label) # remove leading and trailing spaces
return BaseResource.objects.filter(r2url__user=self.user,
r2url__label__exact=label_string)\
.distinct()\
.order_by('r2url__label')
@property
def user_labels(self):
"""
Get a QuerySet of labels in use now.
"""
return UserResourceLabels.objects.values_list('label', flat=True)\
.filter(user=self.user)\
.distinct().order_by('label')
######################################
# Label a resource
######################################
@staticmethod
def clean_label(name):
label_string = re.sub('/', r'', name) # no /'s
label_string = label_string.strip() # no leading or trailing whitespace
label_string = re.sub(r'\s+', r' ', label_string) # collapse multiple whitespace, including tabs
return label_string
def label_resource(self, this_resource, this_label):
"""
Assign a label to a resource
Users are allowed to label any resource, including resources to which they do not have access.
This is not an access control problem because labeling information is private.
"""
if __debug__: # during testing only, check argument types and preconditions
assert isinstance(this_resource, BaseResource)
assert isinstance(this_label, str)
# remove leading and trailing spaces
label_string = UserLabels.clean_label(this_label)
with transaction.atomic(): # empirically, get_or_create is not atomic.
UserResourceLabels.objects.get_or_create(resource=this_resource,
label=label_string,
user=self.user)
def unlabel_resource(self, this_resource, this_label):
"""
Remove one label from a resource
Users are allowed to label any resource, including resources to which they do not have access.
This is not an access control problem because labeling information is private.
"""
if __debug__: # during testing only, check argument types and preconditions
assert isinstance(this_resource, BaseResource)
assert isinstance(this_label, str)
# remove leading and trailing spaces
label_string = UserLabels.clean_label(this_label)
UserResourceLabels.objects.filter(resource=this_resource,
label__exact=label_string,
user=self.user).delete()
def clear_resource_labels(self, this_resource):
"""
Clear all labels for a resource
"""
if __debug__: # during testing only, check argument types and preconditions
assert isinstance(this_resource, BaseResource)
UserResourceLabels.objects.filter(resource=this_resource,
user=self.user).delete()
def remove_resource_label(self, this_label):
"""
clear a label from the labeling system.
"""
if __debug__: # during testing only, check argument types and preconditions
assert isinstance(this_label, str)
UserResourceLabels.objects.filter(label=this_label, user=self.user)\
.delete()
##########################################
# general flagging of resources
##########################################
def flag_resource(self, this_resource, this_flagcode):
"""
flag a resource with a specific flag code from FlagCodes
Users are allowed to flag any resource, including resources to which they do not have access.
This is not an access control problem because flagging information is private.
"""
if __debug__: # during testing only, check argument types and preconditions
assert isinstance(this_resource, BaseResource)
assert this_flagcode == FlagCodes.FAVORITE or this_flagcode == FlagCodes.MINE or \
this_flagcode == FlagCodes.OPEN_WITH_APP
with transaction.atomic(): # empirically, get_or_create is not atomic.
UserResourceFlags.objects.get_or_create(resource=this_resource,
kind=this_flagcode,
user=self.user)
def unflag_resource(self, this_resource, this_flagcode):
"""
unflag a resource with a specific flag.
Users are allowed to flag any resource, including resources to which they do not have access.
This is not an access control problem because flagging information is private.
"""
if __debug__: # during testing only, check argument types and preconditions
assert isinstance(this_resource, BaseResource)
assert this_flagcode == FlagCodes.FAVORITE or this_flagcode == FlagCodes.MINE or \
this_flagcode == FlagCodes.OPEN_WITH_APP
UserResourceFlags.objects.filter(user=self.user,
resource=this_resource,
kind=this_flagcode).delete()
def clear_all_flags(self, this_flagcode):
"""
remove all flags of a specific kind for a user
"""
UserResourceFlags.objects.filter(user=self.user,
kind=this_flagcode)\
.delete()
##########################################
# favorite resources
##########################################
def favorite_resource(self, this_resource):
"""
Mark a resource as favorite.
Users are allowed to flag any resource, including resources to which they do not have access.
This is not an access control problem because labeling information is private.
"""
self.flag_resource(this_resource, FlagCodes.FAVORITE)
def unfavorite_resource(self, this_resource):
"""
Clear favorite label for a resource
Users are allowed to flag any resource, including resources to which they do not have access.
This is not an access control problem because labeling information is private.
"""
self.unflag_resource(this_resource, FlagCodes.FAVORITE)
##########################################
# my resources
##########################################
def claim_resource(self, this_resource):
"""
Label a resource as 'MINE' (adds to my resources).
Users are allowed to flag any resource, including resources to which they do not have access.
This is not an access control problem because labeling information is private.
"""
self.flag_resource(this_resource, FlagCodes.MINE)
def unclaim_resource(self, this_resource):
"""
Clear 'MINE' label for a resource (removes from my resources)
Users are allowed to flag any resource, including resources to which they do not have access.
This is not an access control problem because labeling information is private.
"""
self.unflag_resource(this_resource, FlagCodes.MINE)
##########################################
# open with app
##########################################
def add_open_with_app(self, this_resource):
"""
Mark a webapp resource as open-with-app
Users are allowed to flag any resource, including resources to which they do not have access.
This is not an access control problem because labeling information is private.
The calling function should make sure resource is a webapp resource
"""
self.flag_resource(this_resource, FlagCodes.OPEN_WITH_APP)
def remove_open_with_app(self, this_resource):
"""
Unmark a webapp resource as open-with-app
Users are allowed to flag any resource, including resources to which they do not have access.
This is not an access control problem because labeling information is private.
The calling function should make sure resource is a webapp resource
"""
self.unflag_resource(this_resource, FlagCodes.OPEN_WITH_APP)
##########################################
# routines that apply to all kinds of annotations
##########################################
def clear_resource_all(self, this_resource):
"""
Clear all annotations for a resource
"""
if __debug__: # during testing only, check argument types and preconditions
assert isinstance(this_resource, BaseResource)
UserResourceLabels.objects\
.filter(resource=this_resource,
user=self.user)\
.delete()
UserResourceFlags.objects\
.filter(resource=this_resource,
user=self.user)\
.delete()
##########################################
# save unused labels
##########################################
def save_label(self, this_label):
"""
Save a label for use later.
Users are allowed to label any resource, including resources to which they do not have access.
This is not an access control problem because labeling information is private.
"""
label_string = UserLabels.clean_label(this_label) # remove leading and trailing spaces
with transaction.atomic(): # empirically, get_or_create is not atomic.
UserStoredLabels.objects.get_or_create(label=label_string, user=self.user)
def unsave_label(self, this_label):
"""
Remove the specified saved label.
"""
# remove leading and trailing spaces
label_string = UserLabels.clean_label(this_label)
UserStoredLabels.objects.filter(label__exact=label_string, user=self.user).delete()
# remove all uses of that label from resources.
self.remove_resource_label(label_string)
def clear_saved_labels(self):
"""
Clear all saved labels for a user
"""
UserStoredLabels.objects.filter(user=self.user).delete()
@property
def saved_labels(self):
"""
Return a QuerySet of saved labels.
"""
return UserStoredLabels.objects.filter(user=self.user).values_list('label', flat=True).distinct()
class ResourceLabels(models.Model):
"""
For a BaseResource r, r.rlabels is this model. It contains functions relevant to resources.
"""
resource = models.OneToOneField(BaseResource,
editable=False,
null=True,
related_name='rlabels',
related_query_name='rlabels',
on_delete=models.CASCADE)
def get_users(self):
"""
Return a QuerySet of all users who have labeled this resource.
"""
return User.objects.filter(Q(u2url__resource=self.resource) | Q(u2urf__resource=self.resource))
def get_labels(self, this_user):
"""
Return a QuerySet of all user assigned labels for a resource
"""
if __debug__: # during testing only, check argument types and preconditions
assert isinstance(this_user, User)
labels = UserResourceLabels.objects\
.values_list('label', flat=True)\
.filter(user=this_user,
resource=self.resource)\
.order_by("label").all()
return labels
def is_flagged(self, this_user, this_flagcode):
"""
Return True if this resource has been flagged by a given user
"""
if __debug__: # during testing only, check argument types and preconditions
assert isinstance(this_user, User)
assert this_flagcode == FlagCodes.FAVORITE or this_flagcode == FlagCodes.MINE or \
this_flagcode == FlagCodes.OPEN_WITH_APP
return UserResourceFlags.objects.filter(user=this_user,
resource=self.resource,
kind=this_flagcode).exists()
def is_favorite(self, this_user):
"""
Return True if this resource has been favorited by a given user
"""
return self.is_flagged(this_user, FlagCodes.FAVORITE)
def is_mine(self, this_user):
"""
Return True if this resource has been labeled as mine by a given user
"""
return self.is_flagged(this_user, FlagCodes.MINE)
def is_open_with_app(self, this_user):
"""
Return True if this resource has been set as open-with-app by a given user
"""
return self.is_flagged(this_user, FlagCodes.OPEN_WITH_APP)
|
# Copyright 2019 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Run BERT on SQuAD 1.1 and SQuAD 2.0 in TF 2.x."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import json
import os
import time
# Import libraries
from absl import app
from absl import flags
from absl import logging
import gin
import tensorflow as tf
from official.common import distribute_utils
from official.nlp.bert import configs as bert_configs
from official.nlp.bert import run_squad_helper
from official.nlp.bert import tokenization
from official.nlp.data import squad_lib as squad_lib_wp
from official.utils.misc import keras_utils
flags.DEFINE_string('vocab_file', None,
'The vocabulary file that the BERT model was trained on.')
# More flags can be found in run_squad_helper.
run_squad_helper.define_common_squad_flags()
FLAGS = flags.FLAGS
def train_squad(strategy,
input_meta_data,
custom_callbacks=None,
run_eagerly=False,
init_checkpoint=None,
sub_model_export_name=None):
"""Run bert squad training."""
bert_config = bert_configs.BertConfig.from_json_file(FLAGS.bert_config_file)
init_checkpoint = init_checkpoint or FLAGS.init_checkpoint
run_squad_helper.train_squad(strategy, input_meta_data, bert_config,
custom_callbacks, run_eagerly, init_checkpoint,
sub_model_export_name=sub_model_export_name)
def predict_squad(strategy, input_meta_data):
"""Makes predictions for the squad dataset."""
bert_config = bert_configs.BertConfig.from_json_file(FLAGS.bert_config_file)
tokenizer = tokenization.FullTokenizer(
vocab_file=FLAGS.vocab_file, do_lower_case=FLAGS.do_lower_case)
run_squad_helper.predict_squad(
strategy, input_meta_data, tokenizer, bert_config, squad_lib_wp)
def eval_squad(strategy, input_meta_data):
"""Evaluate on the squad dataset."""
bert_config = bert_configs.BertConfig.from_json_file(FLAGS.bert_config_file)
tokenizer = tokenization.FullTokenizer(
vocab_file=FLAGS.vocab_file, do_lower_case=FLAGS.do_lower_case)
eval_metrics = run_squad_helper.eval_squad(
strategy, input_meta_data, tokenizer, bert_config, squad_lib_wp)
return eval_metrics
def export_squad(model_export_path, input_meta_data):
"""Exports a trained model as a `SavedModel` for inference.
Args:
model_export_path: a string specifying the path to the SavedModel directory.
input_meta_data: dictionary containing meta data about input and model.
Raises:
Export path is not specified, got an empty string or None.
"""
bert_config = bert_configs.BertConfig.from_json_file(FLAGS.bert_config_file)
run_squad_helper.export_squad(model_export_path, input_meta_data, bert_config)
def main(_):
gin.parse_config_files_and_bindings(FLAGS.gin_file, FLAGS.gin_param)
with tf.io.gfile.GFile(FLAGS.input_meta_data_path, 'rb') as reader:
input_meta_data = json.loads(reader.read().decode('utf-8'))
if FLAGS.mode == 'export_only':
export_squad(FLAGS.model_export_path, input_meta_data)
return
# Configures cluster spec for multi-worker distribution strategy.
if FLAGS.num_gpus > 0:
_ = distribute_utils.configure_cluster(FLAGS.worker_hosts, FLAGS.task_index)
strategy = distribute_utils.get_distribution_strategy(
distribution_strategy=FLAGS.distribution_strategy,
num_gpus=FLAGS.num_gpus,
all_reduce_alg=FLAGS.all_reduce_alg,
tpu_address=FLAGS.tpu)
if 'train' in FLAGS.mode:
if FLAGS.log_steps:
custom_callbacks = [keras_utils.TimeHistory(
batch_size=FLAGS.train_batch_size,
log_steps=FLAGS.log_steps,
logdir=FLAGS.model_dir,
)]
else:
custom_callbacks = None
train_squad(
strategy,
input_meta_data,
custom_callbacks=custom_callbacks,
run_eagerly=FLAGS.run_eagerly,
sub_model_export_name=FLAGS.sub_model_export_name,
)
if 'predict' in FLAGS.mode:
predict_squad(strategy, input_meta_data)
if 'eval' in FLAGS.mode:
eval_metrics = eval_squad(strategy, input_meta_data)
f1_score = eval_metrics['final_f1']
logging.info('SQuAD eval F1-score: %f', f1_score)
summary_dir = os.path.join(FLAGS.model_dir, 'summaries', 'eval')
summary_writer = tf.summary.create_file_writer(summary_dir)
with summary_writer.as_default():
# TODO(lehou): write to the correct step number.
tf.summary.scalar('F1-score', f1_score, step=0)
summary_writer.flush()
# Also write eval_metrics to json file.
squad_lib_wp.write_to_json_files(
eval_metrics, os.path.join(summary_dir, 'eval_metrics.json'))
time.sleep(60)
if __name__ == '__main__':
flags.mark_flag_as_required('bert_config_file')
flags.mark_flag_as_required('model_dir')
app.run(main)
|
#!/usr/bin/python
from mininet.net import Mininet
from mininet.node import Controller, RemoteController, OVSController
from mininet.node import CPULimitedHost, Host, Node
from mininet.node import OVSKernelSwitch, UserSwitch
from mininet.node import IVSSwitch
from mininet.cli import CLI
from mininet.log import setLogLevel, info
from mininet.link import TCLink, Intf
from subprocess import call
from mininet.node import OVSKernelSwitch, UserSwitch
def myNetwork():
net = Mininet( topo=None,
build=False,
ipBase='10.0.0.0/8',controller=RemoteController,host=CPULimitedHost,link=TCLink,switch=UserSwitch)
info( '*** Adding controller\n' )
net.addController('c0',controller=RemoteController,ip='192.168.56.1',port=6653)
info( '*** Add routers\n')
r1 = net.addHost('r1', cls=Node, ip='0.0.0.0')
info( '*** Add switches\n')
s1 =net.addSwitch('s1')
s2 =net.addSwitch('s2')
## switch = net.switches[ 0 ]
info( '*** Add hosts\n')
h1 = net.addHost('h1', cls=Host, ip='192.168.11.1/24', defaultRoute=None)
h2 = net.addHost('h2', cls=Host, ip='192.168.12.1/24', defaultRoute=None)
info( '*** Add links\n')
net.addLink(r1, s1, cls=TCLink )
net.addLink(s1, r1, cls=TCLink )
net.addLink(s2, r1, cls=TCLink )
net.addLink(r1, s2, cls=TCLink )
net.addLink(h1, s1, cls=TCLink )
net.addLink(h2, s2, cls=TCLink )
## net.addLink(r1, h1, cls=TCLink )
## net.addLink(h2, r1, cls=TCLink )
info( '*** Starting network\n')
net.build()
info( '*** Starting controllers\n')
for controller in net.controllers:
controller.start()
info( '*** Starting switches\n')
info( '*** Post configure switches and hosts\n')
r1.cmd('ifconfig r1-eth0 192.168.11.2 netmask 255.255.255.0')
r1.cmd('ifconfig r1-eth1 192.168.12.2 netmask 255.255.255.0')
## r1.cmd('ifconfig r1-eth3 10.0.2.225 netmask 255.255.255.0')
h1.cmd('route add default gw 192.168.11.2')
h2.cmd('route add default gw 192.168.12.2')
## r1.cmd('route add default gw 192.168.56.1')
r1.cmd('sysctl net.ipv4.ip_forward=1')
CLI(net)
net.stop()
if __name__ == '__main__':
setLogLevel( 'info' )
myNetwork()
|
import pickle
import plaidrl.torch.pytorch_util as ptu
from plaidrl.core import logger
from plaidrl.core.meta_rl_algorithm import MetaRLAlgorithm
from plaidrl.core.simple_offline_rl_algorithm import OfflineMetaRLAlgorithm
from plaidrl.data_management.env_replay_buffer import EnvReplayBuffer
from plaidrl.demos.source.mdp_path_loader import MDPPathLoader
from plaidrl.envs.pearl_envs import ENVS, register_pearl_envs
from plaidrl.envs.wrappers import NormalizedBoxEnv
from plaidrl.torch.networks import ConcatMlp
from plaidrl.torch.smac.agent import SmacAgent
from plaidrl.torch.smac.diagnostics import get_env_info_sizes
from plaidrl.torch.smac.launcher_util import (
EvalPearl,
load_buffer_onto_algo,
load_macaw_buffer_onto_algo,
policy_class_from_str,
relabel_offline_data,
)
from plaidrl.torch.smac.networks import DummyMlpEncoder, MlpDecoder, MlpEncoder
from plaidrl.torch.smac.smac import SmacTrainer
from plaidrl.util.io import load_local_or_remote_file
def smac_experiment(
trainer_kwargs=None,
algo_kwargs=None,
qf_kwargs=None,
policy_kwargs=None,
context_encoder_kwargs=None,
context_decoder_kwargs=None,
env_name=None,
env_params=None,
path_loader_kwargs=None,
latent_dim=None,
policy_class="TanhGaussianPolicy",
# video/debug
debug=False,
use_dummy_encoder=False,
networks_ignore_context=False,
use_ground_truth_context=False,
save_video=False,
save_video_period=False,
# Pre-train params
pretrain_rl=False,
pretrain_offline_algo_kwargs=None,
pretrain_buffer_kwargs=None,
load_buffer_kwargs=None,
saved_tasks_path=None,
macaw_format_base_path=None, # overrides saved_tasks_path and load_buffer_kwargs
load_macaw_buffer_kwargs=None,
train_task_idxs=None,
eval_task_idxs=None,
relabel_offline_dataset=False,
skip_initial_data_collection_if_pretrained=False,
relabel_kwargs=None,
# PEARL
n_train_tasks=0,
n_eval_tasks=0,
use_next_obs_in_context=False,
tags=None,
online_trainer_kwargs=None,
):
if not skip_initial_data_collection_if_pretrained:
raise NotImplementedError("deprecated! make sure to skip it!")
if relabel_kwargs is None:
relabel_kwargs = {}
del tags
pretrain_buffer_kwargs = pretrain_buffer_kwargs or {}
context_decoder_kwargs = context_decoder_kwargs or {}
pretrain_offline_algo_kwargs = pretrain_offline_algo_kwargs or {}
online_trainer_kwargs = online_trainer_kwargs or {}
register_pearl_envs()
env_params = env_params or {}
context_encoder_kwargs = context_encoder_kwargs or {}
trainer_kwargs = trainer_kwargs or {}
path_loader_kwargs = path_loader_kwargs or {}
load_macaw_buffer_kwargs = load_macaw_buffer_kwargs or {}
base_env = ENVS[env_name](**env_params)
if saved_tasks_path:
task_data = load_local_or_remote_file(saved_tasks_path, file_type="joblib")
tasks = task_data["tasks"]
train_task_idxs = task_data["train_task_indices"]
eval_task_idxs = task_data["eval_task_indices"]
base_env.tasks = tasks
elif macaw_format_base_path is not None:
tasks = pickle.load(open("{}/tasks.pkl".format(macaw_format_base_path), "rb"))
base_env.tasks = tasks
else:
tasks = base_env.tasks
task_indices = base_env.get_all_task_idx()
train_task_idxs = list(task_indices[:n_train_tasks])
eval_task_idxs = list(task_indices[-n_eval_tasks:])
if hasattr(base_env, "task_to_vec"):
train_tasks = [base_env.task_to_vec(tasks[i]) for i in train_task_idxs]
eval_tasks = [base_env.task_to_vec(tasks[i]) for i in eval_task_idxs]
else:
train_tasks = [tasks[i] for i in train_task_idxs]
eval_tasks = [tasks[i] for i in eval_task_idxs]
if use_ground_truth_context:
latent_dim = len(train_tasks[0])
expl_env = NormalizedBoxEnv(base_env)
reward_dim = 1
if debug:
algo_kwargs["max_path_length"] = 50
algo_kwargs["batch_size"] = 5
algo_kwargs["num_epochs"] = 5
algo_kwargs["num_eval_steps_per_epoch"] = 100
algo_kwargs["num_expl_steps_per_train_loop"] = 100
algo_kwargs["num_trains_per_train_loop"] = 10
algo_kwargs["min_num_steps_before_training"] = 100
obs_dim = expl_env.observation_space.low.size
action_dim = expl_env.action_space.low.size
if use_next_obs_in_context:
context_encoder_input_dim = 2 * obs_dim + action_dim + reward_dim
else:
context_encoder_input_dim = obs_dim + action_dim + reward_dim
context_encoder_output_dim = latent_dim * 2
def create_qf():
return ConcatMlp(
input_size=obs_dim + action_dim + latent_dim, output_size=1, **qf_kwargs
)
qf1 = create_qf()
qf2 = create_qf()
target_qf1 = create_qf()
target_qf2 = create_qf()
if isinstance(policy_class, str):
policy_class = policy_class_from_str(policy_class)
policy = policy_class(
obs_dim=obs_dim + latent_dim,
action_dim=action_dim,
**policy_kwargs,
)
encoder_class = DummyMlpEncoder if use_dummy_encoder else MlpEncoder
context_encoder = encoder_class(
input_size=context_encoder_input_dim,
output_size=context_encoder_output_dim,
hidden_sizes=[200, 200, 200],
use_ground_truth_context=use_ground_truth_context,
**context_encoder_kwargs,
)
context_decoder = MlpDecoder(
input_size=obs_dim + action_dim + latent_dim,
output_size=1,
**context_decoder_kwargs,
)
reward_predictor = context_decoder
agent = SmacAgent(
latent_dim,
context_encoder,
policy,
reward_predictor,
use_next_obs_in_context=use_next_obs_in_context,
_debug_ignore_context=networks_ignore_context,
_debug_use_ground_truth_context=use_ground_truth_context,
)
trainer = SmacTrainer(
agent=agent,
env=expl_env,
latent_dim=latent_dim,
qf1=qf1,
qf2=qf2,
target_qf1=target_qf1,
target_qf2=target_qf2,
reward_predictor=reward_predictor,
context_encoder=context_encoder,
context_decoder=context_decoder,
_debug_ignore_context=networks_ignore_context,
_debug_use_ground_truth_context=use_ground_truth_context,
**trainer_kwargs,
)
algorithm = MetaRLAlgorithm(
agent=agent,
env=expl_env,
trainer=trainer,
train_task_indices=train_task_idxs,
eval_task_indices=eval_task_idxs,
train_tasks=train_tasks,
eval_tasks=eval_tasks,
use_next_obs_in_context=use_next_obs_in_context,
use_ground_truth_context=use_ground_truth_context,
env_info_sizes=get_env_info_sizes(expl_env),
**algo_kwargs,
)
if macaw_format_base_path:
load_macaw_buffer_onto_algo(
algo=algorithm,
base_directory=macaw_format_base_path,
train_task_idxs=train_task_idxs,
**load_macaw_buffer_kwargs,
)
elif load_buffer_kwargs:
load_buffer_onto_algo(algorithm, **load_buffer_kwargs)
if relabel_offline_dataset:
relabel_offline_data(
algorithm, tasks=tasks, env=expl_env.wrapped_env, **relabel_kwargs
)
if path_loader_kwargs:
replay_buffer = algorithm.replay_buffer.task_buffers[0]
enc_replay_buffer = algorithm.enc_replay_buffer.task_buffers[0]
demo_test_buffer = EnvReplayBuffer(env=expl_env, **pretrain_buffer_kwargs)
path_loader = MDPPathLoader(
trainer,
replay_buffer=replay_buffer,
demo_train_buffer=enc_replay_buffer,
demo_test_buffer=demo_test_buffer,
**path_loader_kwargs,
)
path_loader.load_demos()
if pretrain_rl:
eval_pearl_fn = EvalPearl(algorithm, train_task_idxs, eval_task_idxs)
pretrain_algo = OfflineMetaRLAlgorithm(
meta_replay_buffer=algorithm.meta_replay_buffer,
replay_buffer=algorithm.replay_buffer,
task_embedding_replay_buffer=algorithm.enc_replay_buffer,
trainer=trainer,
train_tasks=train_task_idxs,
extra_eval_fns=[eval_pearl_fn],
use_meta_learning_buffer=algorithm.use_meta_learning_buffer,
**pretrain_offline_algo_kwargs,
)
pretrain_algo.to(ptu.device)
logger.remove_tabular_output("progress.csv", relative_to_snapshot_dir=True)
logger.add_tabular_output("pretrain.csv", relative_to_snapshot_dir=True)
pretrain_algo.train()
logger.remove_tabular_output("pretrain.csv", relative_to_snapshot_dir=True)
logger.add_tabular_output(
"progress.csv",
relative_to_snapshot_dir=True,
)
if skip_initial_data_collection_if_pretrained:
algorithm.num_initial_steps = 0
algorithm.trainer.configure(**online_trainer_kwargs)
algorithm.to(ptu.device)
algorithm.train()
|
# -*- coding: utf-8 -*-
# Generated by Django 1.10.4 on 2017-03-05 13:47
from __future__ import unicode_literals
from django.db import migrations, models
import jsonfield.fields
import uuid
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='ChatSession',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('state', models.CharField(choices=[('in_progress', 'In progress'), ('complete', 'Complete')], default='in_progress', max_length=100)),
('uuid', models.UUIDField(default=uuid.uuid4, unique=True)),
('user_id', models.CharField(db_index=True, max_length=100)),
('created', models.DateTimeField(auto_now_add=True)),
('updated', models.DateTimeField(auto_now=True, db_index=True)),
('meta', jsonfield.fields.JSONField(blank=True, default=dict)),
],
),
]
|
# Generated by Django 1.9.13 on 2017-08-31 05:44
from django.conf import settings
import django.contrib.postgres.fields.jsonb
from django.db import migrations, models
import django.db.models.deletion
import django.utils.timezone
import markupfield.fields
class Migration(migrations.Migration):
replaces = [('community', '0001_initial'), ('community', '0002_auto_20150416_1853'), ('community', '0003_auto_20170831_0358'), ('community', '0004_auto_20170831_0541')]
initial = True
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
]
operations = [
migrations.CreateModel(
name='Link',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('created', models.DateTimeField(blank=True, db_index=True, default=django.utils.timezone.now)),
('updated', models.DateTimeField(default=django.utils.timezone.now, blank=True)),
('url', models.URLField(blank=True, max_length=1000, verbose_name='URL')),
('creator', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, related_name='community_link_creator', to=settings.AUTH_USER_MODEL)),
('last_modified_by', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, related_name='community_link_modified', to=settings.AUTH_USER_MODEL)),
],
options={
'verbose_name_plural': 'Links',
'ordering': ['-created'],
'verbose_name': 'Link',
'get_latest_by': 'created',
},
),
migrations.CreateModel(
name='Photo',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('created', models.DateTimeField(blank=True, db_index=True, default=django.utils.timezone.now)),
('updated', models.DateTimeField(default=django.utils.timezone.now, blank=True)),
('image', models.ImageField(blank=True, upload_to='community/photos/')),
('image_url', models.URLField(blank=True, max_length=1000, verbose_name='Image URL')),
('caption', models.TextField(blank=True)),
('click_through_url', models.URLField(blank=True)),
('creator', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, related_name='community_photo_creator', to=settings.AUTH_USER_MODEL)),
('last_modified_by', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, related_name='community_photo_modified', to=settings.AUTH_USER_MODEL)),
],
options={
'verbose_name_plural': 'photos',
'ordering': ['-created'],
'verbose_name': 'photo',
'get_latest_by': 'created',
},
),
migrations.CreateModel(
name='Post',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('created', models.DateTimeField(blank=True, db_index=True, default=django.utils.timezone.now)),
('updated', models.DateTimeField(default=django.utils.timezone.now, blank=True)),
('title', models.CharField(blank=True, max_length=200, null=True)),
('content', markupfield.fields.MarkupField(rendered_field=True)),
('abstract', models.TextField(blank=True, null=True)),
('content_markup_type', models.CharField(choices=[('', '--'), ('html', 'html'), ('plain', 'plain'), ('markdown', 'markdown'), ('restructuredtext', 'restructuredtext')], default='html', max_length=30)),
('_content_rendered', models.TextField(editable=False)),
('media_type', models.IntegerField(choices=[(1, 'text'), (2, 'photo'), (3, 'video'), (4, 'link')], default=1)),
('source_url', models.URLField(blank=True, max_length=1000)),
('meta', django.contrib.postgres.fields.jsonb.JSONField(blank=True, default={})),
('status', models.IntegerField(choices=[(1, 'private'), (2, 'public')], db_index=True, default=1)),
('creator', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, related_name='community_post_creator', to=settings.AUTH_USER_MODEL)),
('last_modified_by', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, related_name='community_post_modified', to=settings.AUTH_USER_MODEL)),
],
options={
'verbose_name_plural': 'posts',
'ordering': ['-created'],
'verbose_name': 'post',
'get_latest_by': 'created',
},
),
migrations.CreateModel(
name='Video',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('created', models.DateTimeField(blank=True, db_index=True, default=django.utils.timezone.now)),
('updated', models.DateTimeField(default=django.utils.timezone.now, blank=True)),
('video_embed', models.TextField(blank=True)),
('video_data', models.FileField(blank=True, upload_to='community/videos/')),
('caption', models.TextField(blank=True)),
('click_through_url', models.URLField(blank=True, verbose_name='Click Through URL')),
('creator', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, related_name='community_video_creator', to=settings.AUTH_USER_MODEL)),
('last_modified_by', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, related_name='community_video_modified', to=settings.AUTH_USER_MODEL)),
('post', models.ForeignKey(editable=False, null=True, on_delete=django.db.models.deletion.CASCADE, related_name='related_video', to='community.Post')),
],
options={
'verbose_name_plural': 'videos',
'ordering': ['-created'],
'verbose_name': 'video',
'get_latest_by': 'created',
},
),
migrations.AddField(
model_name='photo',
name='post',
field=models.ForeignKey(editable=False, null=True, on_delete=django.db.models.deletion.CASCADE, related_name='related_photo', to='community.Post'),
),
migrations.AddField(
model_name='link',
name='post',
field=models.ForeignKey(editable=False, null=True, on_delete=django.db.models.deletion.CASCADE, related_name='related_link', to='community.Post'),
),
migrations.AlterField(
model_name='post',
name='content_markup_type',
field=models.CharField(choices=[('', '--'), ('html', 'HTML'), ('plain', 'Plain'), ('markdown', 'Markdown'), ('restructuredtext', 'Restructured Text')], default='html', max_length=30),
),
migrations.AlterField(
model_name='post',
name='meta',
field=django.contrib.postgres.fields.jsonb.JSONField(blank=True, default={}),
),
migrations.AlterField(
model_name='post',
name='meta',
field=django.contrib.postgres.fields.jsonb.JSONField(blank=True, default=dict),
),
]
|
# Copyright 2017 Amazon.com, Inc. or its affiliates. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"). You
# may not use this file except in compliance with the License. A copy of
# the License is located at
#
# http://aws.amazon.com/apache2.0/
#
# or in the "license" file accompanying this file. This file is
# distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF
# ANY KIND, either express or implied. See the License for the specific
# language governing permissions and limitations under the License.
"""Unit test suite for ``aws_encryption_sdk.internal.crypto.encryption.Decryptor``."""
import pytest
from mock import MagicMock, sentinel
from pytest_mock import mocker # noqa pylint: disable=unused-import
import aws_encryption_sdk.internal.crypto.encryption
from aws_encryption_sdk.internal.crypto.encryption import Decryptor, decrypt
pytestmark = [pytest.mark.unit, pytest.mark.local]
@pytest.fixture
def patch_default_backend(mocker):
mocker.patch.object(aws_encryption_sdk.internal.crypto.encryption, "default_backend")
yield aws_encryption_sdk.internal.crypto.encryption.default_backend
@pytest.fixture
def patch_cipher(mocker):
mocker.patch.object(aws_encryption_sdk.internal.crypto.encryption, "Cipher")
yield aws_encryption_sdk.internal.crypto.encryption.Cipher
@pytest.fixture
def patch_decryptor(mocker):
mocker.patch.object(aws_encryption_sdk.internal.crypto.encryption, "Decryptor")
yield aws_encryption_sdk.internal.crypto.encryption.Decryptor
def test_decryptor_init(patch_default_backend, patch_cipher):
mock_algorithm = MagicMock()
tester = Decryptor(
algorithm=mock_algorithm, key=sentinel.key, associated_data=sentinel.aad, iv=sentinel.iv, tag=sentinel.tag
)
assert tester.source_key is sentinel.key
mock_algorithm.encryption_algorithm.assert_called_once_with(sentinel.key)
mock_algorithm.encryption_mode.assert_called_once_with(sentinel.iv, sentinel.tag)
patch_default_backend.assert_called_once_with()
patch_cipher.assert_called_once_with(
mock_algorithm.encryption_algorithm.return_value,
mock_algorithm.encryption_mode.return_value,
backend=patch_default_backend.return_value,
)
patch_cipher.return_value.decryptor.assert_called_once_with()
assert tester._decryptor is patch_cipher.return_value.decryptor.return_value
tester._decryptor.authenticate_additional_data.assert_called_once_with(sentinel.aad)
def test_decryptor_update(patch_default_backend, patch_cipher):
tester = Decryptor(
algorithm=MagicMock(), key=sentinel.key, associated_data=sentinel.aad, iv=sentinel.iv, tag=sentinel.tag
)
test = tester.update(sentinel.ciphertext)
tester._decryptor.update.assert_called_once_with(sentinel.ciphertext)
assert test is tester._decryptor.update.return_value
def test_decryptor_finalize(patch_default_backend, patch_cipher):
tester = Decryptor(
algorithm=MagicMock(), key=sentinel.key, associated_data=sentinel.aad, iv=sentinel.iv, tag=sentinel.tag
)
test = tester.finalize()
tester._decryptor.finalize.assert_called_once_with()
assert test is tester._decryptor.finalize.return_value
def test_decrypt(patch_decryptor):
patch_decryptor.return_value.update.return_value = b"some data-"
patch_decryptor.return_value.finalize.return_value = b"some more data"
test = decrypt(
algorithm=sentinel.algorithm,
key=sentinel.key,
encrypted_data=MagicMock(iv=sentinel.iv, tag=sentinel.tag, ciphertext=sentinel.ciphertext),
associated_data=sentinel.aad,
)
patch_decryptor.assert_called_once_with(sentinel.algorithm, sentinel.key, sentinel.aad, sentinel.iv, sentinel.tag)
patch_decryptor.return_value.update.assert_called_once_with(sentinel.ciphertext)
patch_decryptor.return_value.finalize.assert_called_once_with()
assert test == b"some data-some more data"
|
from pm4py.models.transition_system import transition_system, utils
|
# -*- coding: utf-8 -*-
#
# Copyright © Spyder Project Contributors
# Licensed under the terms of the MIT License
# (see spyder/__init__.py for details)
"""
Contains the text debugger manager.
"""
import os.path as osp
from qtpy.QtWidgets import QInputDialog, QLineEdit
from spyder.config.main import CONF
from spyder.config.base import _
from spyder.py3compat import to_text_string
from spyder.api.manager import Manager
from spyder.plugins.editor.utils.editor import BlockUserData
def _load_all_breakpoints():
bp_dict = CONF.get('run', 'breakpoints', {})
for filename in list(bp_dict.keys()):
if not osp.isfile(filename):
bp_dict.pop(filename)
return bp_dict
def load_breakpoints(filename):
breakpoints = _load_all_breakpoints().get(filename, [])
if breakpoints and isinstance(breakpoints[0], int):
# Old breakpoints format
breakpoints = [(lineno, None) for lineno in breakpoints]
return breakpoints
def save_breakpoints(filename, breakpoints):
if not osp.isfile(filename):
return
bp_dict = _load_all_breakpoints()
bp_dict[filename] = breakpoints
CONF.set('run', 'breakpoints', bp_dict)
def clear_all_breakpoints():
CONF.set('run', 'breakpoints', {})
def clear_breakpoint(filename, lineno):
breakpoints = load_breakpoints(filename)
if breakpoints:
for breakpoint in breakpoints[:]:
if breakpoint[0] == lineno:
breakpoints.remove(breakpoint)
save_breakpoints(filename, breakpoints)
class DebuggerManager(Manager):
"""
Manages adding/removing breakpoint from the editor.
"""
def __init__(self, editor):
super(DebuggerManager, self).__init__(editor)
self.filename = None
self.breakpoints = self.get_breakpoints()
self.editor.sig_breakpoints_changed.connect(self.breakpoints_changed)
self.editor.sig_filename_changed.connect(self.set_filename)
def set_filename(self, filename):
if filename is None:
return
if self.filename != filename:
old_filename = self.filename
self.filename = filename
if self.breakpoints:
save_breakpoints(old_filename, []) # clear old breakpoints
self.save_breakpoints()
def toogle_breakpoint(self, line_number=None, condition=None,
edit_condition=False):
"""Add/remove breakpoint."""
if not self.editor.is_python_like():
return
if line_number is None:
block = self.editor.textCursor().block()
else:
block = self.editor.document().findBlockByNumber(line_number-1)
data = block.userData()
if not data:
data = BlockUserData(self.editor)
data.breakpoint = True
elif not edit_condition:
data.breakpoint = not data.breakpoint
data.breakpoint_condition = None
if condition is not None:
data.breakpoint_condition = condition
if edit_condition:
condition = data.breakpoint_condition
condition, valid = QInputDialog.getText(self.editor,
_('Breakpoint'),
_("Condition:"),
QLineEdit.Normal,
condition)
if not valid:
return
data.breakpoint = True
data.breakpoint_condition = str(condition) if condition else None
if data.breakpoint:
text = to_text_string(block.text()).strip()
if len(text) == 0 or text.startswith(('#', '"', "'")):
data.breakpoint = False
block.setUserData(data)
self.editor.sig_flags_changed.emit()
self.editor.sig_breakpoints_changed.emit()
def get_breakpoints(self):
"""Get breakpoints"""
breakpoints = []
block = self.editor.document().firstBlock()
for line_number in range(1, self.editor.document().blockCount()+1):
data = block.userData()
if data and data.breakpoint:
breakpoints.append((line_number, data.breakpoint_condition))
block = block.next()
return breakpoints
def clear_breakpoints(self):
"""Clear breakpoints"""
self.breakpoints = []
for data in self.editor.blockuserdata_list():
data.breakpoint = False
# data.breakpoint_condition = None # not necessary, but logical
def set_breakpoints(self, breakpoints):
"""Set breakpoints"""
self.clear_breakpoints()
for line_number, condition in breakpoints:
self.toogle_breakpoint(line_number, condition)
self.breakpoints = self.get_breakpoints()
def update_breakpoints(self):
"""Update breakpoints"""
self.editor.sig_breakpoints_changed.emit()
def breakpoints_changed(self):
"""Breakpoint list has changed"""
breakpoints = self.get_breakpoints()
if self.breakpoints != breakpoints:
self.breakpoints = breakpoints
self.save_breakpoints()
def save_breakpoints(self):
breakpoints = repr(self.breakpoints)
filename = to_text_string(self.filename)
breakpoints = to_text_string(breakpoints)
filename = osp.normpath(osp.abspath(filename))
if breakpoints:
breakpoints = eval(breakpoints)
else:
breakpoints = []
save_breakpoints(filename, breakpoints)
self.editor.sig_breakpoints_saved.emit()
def load_breakpoints(self):
self.set_breakpoints(load_breakpoints(self.filename))
|
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import mock
from openstack.tests.unit import base
from openstack.clustering.v1 import profile_type
FAKE = {
'name': 'FAKE_PROFILE_TYPE',
'schema': {
'foo': 'bar'
},
'support_status': {
'1.0': [{
'status': 'supported',
'since': '2016.10',
}]
}
}
class TestProfileType(base.TestCase):
def test_basic(self):
sot = profile_type.ProfileType()
self.assertEqual('profile_type', sot.resource_key)
self.assertEqual('profile_types', sot.resources_key)
self.assertEqual('/profile-types', sot.base_path)
self.assertTrue(sot.allow_fetch)
self.assertTrue(sot.allow_list)
def test_instantiate(self):
sot = profile_type.ProfileType(**FAKE)
self.assertEqual(FAKE['name'], sot._get_id(sot))
self.assertEqual(FAKE['name'], sot.name)
self.assertEqual(FAKE['schema'], sot.schema)
self.assertEqual(FAKE['support_status'], sot.support_status)
def test_ops(self):
sot = profile_type.ProfileType(**FAKE)
resp = mock.Mock()
resp.json = mock.Mock(return_value='')
sess = mock.Mock()
sess.get = mock.Mock(return_value=resp)
self.assertEqual('', sot.type_ops(sess))
url = 'profile-types/%s/ops' % sot.id
sess.get.assert_called_once_with(url)
|
#! /usr/bin/python3
#
# Copyright (c) 2017 Intel Corporation
#
# SPDX-License-Identifier: Apache-2.0
#
# pylint: disable = missing-docstring
"""Testcase using two targets
--------------------------
Note n this case the target group names are listing two targets and
each target obejct has different values.
.. literalinclude:: /examples/test_dump_kws_two_targets.py
:language: python
:pyobject: _test
Execute :download:`the testcase
<../examples/test_dump_kws_two_targets.py>` with::
$ tcf run -vv /usr/share/tcf/examples/test_dump_kws_twp_targets.py
INFO0/ato4B /usr/share/tcf/examples/test_dump_kws_two_targets.py#_test @2psg: Keywords for testcase:
{'cwd': '/home/inaky/z/s/local',
...}
INFO0/ato4B /usr/share/tcf/examples/test_dump_kws_two_targets.py#_test @2psg|localhost/qz33b-arm: Keywords for target 0:
{u'board': u'qemu_cortex_m3',
'bsp': u'arm',
u'bsp_models': {u'arm': [u'arm']},
u'bsps': {u'arm': {u'board': u'qemu_cortex_m3',
u'console': u'arm',
u'kernelname': u'zephyr.elf',
...
u'zephyr_board': u'qemu_cortex_m3',
u'zephyr_kernelname': u'zephyr.elf'}
INFO0/ato4B /usr/share/tcf/examples/test_dump_kws_two_targets.py#_test @2psg|localhost/qz31a-x86: Keywords for target 1:
{u'board': u'qemu_x86',
'bsp': u'x86',
u'bsp_models': {u'x86': [u'x86']},
u'bsps': {u'x86': {u'board': u'qemu_x86',
u'console': u'x86',
u'kernelname': u'zephyr.elf',
u'quark_se_stub': False,
...
u'zephyr_board': u'qemu_x86',
u'zephyr_kernelname': u'zephyr.elf'}
PASS0/ toplevel @local: 1 tests (1 passed, 0 error, 0 failed, 0 blocked, 0 skipped, in 0:00:00.417956) - passed
(depending on your installation method, location might be
*~/.local/share/tcf/examples*)
"""
import pprint
import tcfl.tc
@tcfl.tc.target()
@tcfl.tc.target()
@tcfl.tc.tags(build_only = True, ignore_example = True)
class _test(tcfl.tc.tc_c):
def build(self, target, target1):
self.report_info("Keywords for testcase:\n%s"
% pprint.pformat(self.kws),
level = 0)
target.report_info("Keywords for target 0:\n%s"
% pprint.pformat(target.kws),
level = 0)
target1.report_info("Keywords for target 1:\n%s"
% pprint.pformat(target1.kws),
level = 0)
|
# coding=utf-8
# *** WARNING: this file was generated by pulumi-gen-eks. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union, overload
from . import _utilities
from ._inputs import *
from .vpc_cni import VpcCni
import pulumi_aws
import pulumi_kubernetes
__all__ = ['ManagedNodeGroupArgs', 'ManagedNodeGroup']
@pulumi.input_type
class ManagedNodeGroupArgs:
def __init__(__self__, *,
cluster: pulumi.Input['CoreDataArgs'],
ami_type: Optional[pulumi.Input[str]] = None,
capacity_type: Optional[pulumi.Input[str]] = None,
cluster_name: Optional[pulumi.Input[str]] = None,
disk_size: Optional[pulumi.Input[int]] = None,
force_update_version: Optional[pulumi.Input[bool]] = None,
instance_types: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,
labels: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None,
launch_template: Optional[pulumi.Input['pulumi_aws.eks.NodeGroupLaunchTemplateArgs']] = None,
node_group_name: Optional[pulumi.Input[str]] = None,
node_group_name_prefix: Optional[pulumi.Input[str]] = None,
node_role: Optional[pulumi.Input['pulumi_aws.iam.Role']] = None,
node_role_arn: Optional[pulumi.Input[str]] = None,
release_version: Optional[pulumi.Input[str]] = None,
remote_access: Optional[pulumi.Input['pulumi_aws.eks.NodeGroupRemoteAccessArgs']] = None,
scaling_config: Optional[pulumi.Input['pulumi_aws.eks.NodeGroupScalingConfigArgs']] = None,
subnet_ids: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,
tags: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None,
taints: Optional[pulumi.Input[Sequence[pulumi.Input['pulumi_aws.eks.NodeGroupTaintArgs']]]] = None,
version: Optional[pulumi.Input[str]] = None):
"""
The set of arguments for constructing a ManagedNodeGroup resource.
:param pulumi.Input['CoreDataArgs'] cluster: The target EKS cluster.
:param pulumi.Input[str] ami_type: Type of Amazon Machine Image (AMI) associated with the EKS Node Group. Defaults to `AL2_x86_64`. Valid values: `AL2_x86_64`, `AL2_x86_64_GPU`, `AL2_ARM_64`. This provider will only perform drift detection if a configuration value is provided.
:param pulumi.Input[str] capacity_type: Type of capacity associated with the EKS Node Group. Valid values: `ON_DEMAND`, `SPOT`. This provider will only perform drift detection if a configuration value is provided.
:param pulumi.Input[str] cluster_name: Name of the EKS Cluster.
:param pulumi.Input[int] disk_size: Disk size in GiB for worker nodes. Defaults to `20`. This provider will only perform drift detection if a configuration value is provided.
:param pulumi.Input[bool] force_update_version: Force version update if existing pods are unable to be drained due to a pod disruption budget issue.
:param pulumi.Input[Sequence[pulumi.Input[str]]] instance_types: Set of instance types associated with the EKS Node Group. Defaults to `["t3.medium"]`. This provider will only perform drift detection if a configuration value is provided. Currently, the EKS API only accepts a single value in the set.
:param pulumi.Input[Mapping[str, pulumi.Input[str]]] labels: Key-value map of Kubernetes labels. Only labels that are applied with the EKS API are managed by this argument. Other Kubernetes labels applied to the EKS Node Group will not be managed.
:param pulumi.Input['pulumi_aws.eks.NodeGroupLaunchTemplateArgs'] launch_template: Launch Template settings.
:param pulumi.Input[str] node_group_name: Name of the EKS Node Group. If omitted, this provider will assign a random, unique name. Conflicts with `nodeGroupNamePrefix`.
:param pulumi.Input[str] node_group_name_prefix: Creates a unique name beginning with the specified prefix. Conflicts with `nodeGroupName`.
:param pulumi.Input['pulumi_aws.iam.Role'] node_role: The IAM Role that provides permissions for the EKS Node Group.
Note, `nodeRole` and `nodeRoleArn` are mutually exclusive, and a single option must be used.
:param pulumi.Input[str] node_role_arn: Amazon Resource Name (ARN) of the IAM Role that provides permissions for the EKS Node Group.
Note, `nodeRoleArn` and `nodeRole` are mutually exclusive, and a single option must be used.
:param pulumi.Input[str] release_version: AMI version of the EKS Node Group. Defaults to latest version for Kubernetes version.
:param pulumi.Input['pulumi_aws.eks.NodeGroupRemoteAccessArgs'] remote_access: Remote access settings.
:param pulumi.Input['pulumi_aws.eks.NodeGroupScalingConfigArgs'] scaling_config: Scaling settings.
Default scaling amounts of the node group autoscaling group are:
- desiredSize: 2
- minSize: 1
- maxSize: 2
:param pulumi.Input[Sequence[pulumi.Input[str]]] subnet_ids: Identifiers of EC2 Subnets to associate with the EKS Node Group. These subnets must have the following resource tag: `kubernetes.io/cluster/CLUSTER_NAME` (where `CLUSTER_NAME` is replaced with the name of the EKS Cluster).
Default subnetIds is chosen from the following list, in order, if subnetIds arg is not set:
- core.subnetIds
- core.privateIds
- core.publicSubnetIds
This default logic is based on the existing subnet IDs logic of this package: https://git.io/JeM11
:param pulumi.Input[Mapping[str, pulumi.Input[str]]] tags: Key-value mapping of resource tags.
:param pulumi.Input[Sequence[pulumi.Input['pulumi_aws.eks.NodeGroupTaintArgs']]] taints: The Kubernetes taints to be applied to the nodes in the node group. Maximum of 50 taints per node group.
"""
pulumi.set(__self__, "cluster", cluster)
if ami_type is not None:
pulumi.set(__self__, "ami_type", ami_type)
if capacity_type is not None:
pulumi.set(__self__, "capacity_type", capacity_type)
if cluster_name is not None:
pulumi.set(__self__, "cluster_name", cluster_name)
if disk_size is not None:
pulumi.set(__self__, "disk_size", disk_size)
if force_update_version is not None:
pulumi.set(__self__, "force_update_version", force_update_version)
if instance_types is not None:
pulumi.set(__self__, "instance_types", instance_types)
if labels is not None:
pulumi.set(__self__, "labels", labels)
if launch_template is not None:
pulumi.set(__self__, "launch_template", launch_template)
if node_group_name is not None:
pulumi.set(__self__, "node_group_name", node_group_name)
if node_group_name_prefix is not None:
pulumi.set(__self__, "node_group_name_prefix", node_group_name_prefix)
if node_role is not None:
pulumi.set(__self__, "node_role", node_role)
if node_role_arn is not None:
pulumi.set(__self__, "node_role_arn", node_role_arn)
if release_version is not None:
pulumi.set(__self__, "release_version", release_version)
if remote_access is not None:
pulumi.set(__self__, "remote_access", remote_access)
if scaling_config is not None:
pulumi.set(__self__, "scaling_config", scaling_config)
if subnet_ids is not None:
pulumi.set(__self__, "subnet_ids", subnet_ids)
if tags is not None:
pulumi.set(__self__, "tags", tags)
if taints is not None:
pulumi.set(__self__, "taints", taints)
if version is not None:
pulumi.set(__self__, "version", version)
@property
@pulumi.getter
def cluster(self) -> pulumi.Input['CoreDataArgs']:
"""
The target EKS cluster.
"""
return pulumi.get(self, "cluster")
@cluster.setter
def cluster(self, value: pulumi.Input['CoreDataArgs']):
pulumi.set(self, "cluster", value)
@property
@pulumi.getter(name="amiType")
def ami_type(self) -> Optional[pulumi.Input[str]]:
"""
Type of Amazon Machine Image (AMI) associated with the EKS Node Group. Defaults to `AL2_x86_64`. Valid values: `AL2_x86_64`, `AL2_x86_64_GPU`, `AL2_ARM_64`. This provider will only perform drift detection if a configuration value is provided.
"""
return pulumi.get(self, "ami_type")
@ami_type.setter
def ami_type(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "ami_type", value)
@property
@pulumi.getter(name="capacityType")
def capacity_type(self) -> Optional[pulumi.Input[str]]:
"""
Type of capacity associated with the EKS Node Group. Valid values: `ON_DEMAND`, `SPOT`. This provider will only perform drift detection if a configuration value is provided.
"""
return pulumi.get(self, "capacity_type")
@capacity_type.setter
def capacity_type(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "capacity_type", value)
@property
@pulumi.getter(name="clusterName")
def cluster_name(self) -> Optional[pulumi.Input[str]]:
"""
Name of the EKS Cluster.
"""
return pulumi.get(self, "cluster_name")
@cluster_name.setter
def cluster_name(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "cluster_name", value)
@property
@pulumi.getter(name="diskSize")
def disk_size(self) -> Optional[pulumi.Input[int]]:
"""
Disk size in GiB for worker nodes. Defaults to `20`. This provider will only perform drift detection if a configuration value is provided.
"""
return pulumi.get(self, "disk_size")
@disk_size.setter
def disk_size(self, value: Optional[pulumi.Input[int]]):
pulumi.set(self, "disk_size", value)
@property
@pulumi.getter(name="forceUpdateVersion")
def force_update_version(self) -> Optional[pulumi.Input[bool]]:
"""
Force version update if existing pods are unable to be drained due to a pod disruption budget issue.
"""
return pulumi.get(self, "force_update_version")
@force_update_version.setter
def force_update_version(self, value: Optional[pulumi.Input[bool]]):
pulumi.set(self, "force_update_version", value)
@property
@pulumi.getter(name="instanceTypes")
def instance_types(self) -> Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]:
"""
Set of instance types associated with the EKS Node Group. Defaults to `["t3.medium"]`. This provider will only perform drift detection if a configuration value is provided. Currently, the EKS API only accepts a single value in the set.
"""
return pulumi.get(self, "instance_types")
@instance_types.setter
def instance_types(self, value: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]):
pulumi.set(self, "instance_types", value)
@property
@pulumi.getter
def labels(self) -> Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]]:
"""
Key-value map of Kubernetes labels. Only labels that are applied with the EKS API are managed by this argument. Other Kubernetes labels applied to the EKS Node Group will not be managed.
"""
return pulumi.get(self, "labels")
@labels.setter
def labels(self, value: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]]):
pulumi.set(self, "labels", value)
@property
@pulumi.getter(name="launchTemplate")
def launch_template(self) -> Optional[pulumi.Input['pulumi_aws.eks.NodeGroupLaunchTemplateArgs']]:
"""
Launch Template settings.
"""
return pulumi.get(self, "launch_template")
@launch_template.setter
def launch_template(self, value: Optional[pulumi.Input['pulumi_aws.eks.NodeGroupLaunchTemplateArgs']]):
pulumi.set(self, "launch_template", value)
@property
@pulumi.getter(name="nodeGroupName")
def node_group_name(self) -> Optional[pulumi.Input[str]]:
"""
Name of the EKS Node Group. If omitted, this provider will assign a random, unique name. Conflicts with `nodeGroupNamePrefix`.
"""
return pulumi.get(self, "node_group_name")
@node_group_name.setter
def node_group_name(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "node_group_name", value)
@property
@pulumi.getter(name="nodeGroupNamePrefix")
def node_group_name_prefix(self) -> Optional[pulumi.Input[str]]:
"""
Creates a unique name beginning with the specified prefix. Conflicts with `nodeGroupName`.
"""
return pulumi.get(self, "node_group_name_prefix")
@node_group_name_prefix.setter
def node_group_name_prefix(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "node_group_name_prefix", value)
@property
@pulumi.getter(name="nodeRole")
def node_role(self) -> Optional[pulumi.Input['pulumi_aws.iam.Role']]:
"""
The IAM Role that provides permissions for the EKS Node Group.
Note, `nodeRole` and `nodeRoleArn` are mutually exclusive, and a single option must be used.
"""
return pulumi.get(self, "node_role")
@node_role.setter
def node_role(self, value: Optional[pulumi.Input['pulumi_aws.iam.Role']]):
pulumi.set(self, "node_role", value)
@property
@pulumi.getter(name="nodeRoleArn")
def node_role_arn(self) -> Optional[pulumi.Input[str]]:
"""
Amazon Resource Name (ARN) of the IAM Role that provides permissions for the EKS Node Group.
Note, `nodeRoleArn` and `nodeRole` are mutually exclusive, and a single option must be used.
"""
return pulumi.get(self, "node_role_arn")
@node_role_arn.setter
def node_role_arn(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "node_role_arn", value)
@property
@pulumi.getter(name="releaseVersion")
def release_version(self) -> Optional[pulumi.Input[str]]:
"""
AMI version of the EKS Node Group. Defaults to latest version for Kubernetes version.
"""
return pulumi.get(self, "release_version")
@release_version.setter
def release_version(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "release_version", value)
@property
@pulumi.getter(name="remoteAccess")
def remote_access(self) -> Optional[pulumi.Input['pulumi_aws.eks.NodeGroupRemoteAccessArgs']]:
"""
Remote access settings.
"""
return pulumi.get(self, "remote_access")
@remote_access.setter
def remote_access(self, value: Optional[pulumi.Input['pulumi_aws.eks.NodeGroupRemoteAccessArgs']]):
pulumi.set(self, "remote_access", value)
@property
@pulumi.getter(name="scalingConfig")
def scaling_config(self) -> Optional[pulumi.Input['pulumi_aws.eks.NodeGroupScalingConfigArgs']]:
"""
Scaling settings.
Default scaling amounts of the node group autoscaling group are:
- desiredSize: 2
- minSize: 1
- maxSize: 2
"""
return pulumi.get(self, "scaling_config")
@scaling_config.setter
def scaling_config(self, value: Optional[pulumi.Input['pulumi_aws.eks.NodeGroupScalingConfigArgs']]):
pulumi.set(self, "scaling_config", value)
@property
@pulumi.getter(name="subnetIds")
def subnet_ids(self) -> Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]:
"""
Identifiers of EC2 Subnets to associate with the EKS Node Group. These subnets must have the following resource tag: `kubernetes.io/cluster/CLUSTER_NAME` (where `CLUSTER_NAME` is replaced with the name of the EKS Cluster).
Default subnetIds is chosen from the following list, in order, if subnetIds arg is not set:
- core.subnetIds
- core.privateIds
- core.publicSubnetIds
This default logic is based on the existing subnet IDs logic of this package: https://git.io/JeM11
"""
return pulumi.get(self, "subnet_ids")
@subnet_ids.setter
def subnet_ids(self, value: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]):
pulumi.set(self, "subnet_ids", value)
@property
@pulumi.getter
def tags(self) -> Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]]:
"""
Key-value mapping of resource tags.
"""
return pulumi.get(self, "tags")
@tags.setter
def tags(self, value: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]]):
pulumi.set(self, "tags", value)
@property
@pulumi.getter
def taints(self) -> Optional[pulumi.Input[Sequence[pulumi.Input['pulumi_aws.eks.NodeGroupTaintArgs']]]]:
"""
The Kubernetes taints to be applied to the nodes in the node group. Maximum of 50 taints per node group.
"""
return pulumi.get(self, "taints")
@taints.setter
def taints(self, value: Optional[pulumi.Input[Sequence[pulumi.Input['pulumi_aws.eks.NodeGroupTaintArgs']]]]):
pulumi.set(self, "taints", value)
@property
@pulumi.getter
def version(self) -> Optional[pulumi.Input[str]]:
return pulumi.get(self, "version")
@version.setter
def version(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "version", value)
class ManagedNodeGroup(pulumi.ComponentResource):
@overload
def __init__(__self__,
resource_name: str,
opts: Optional[pulumi.ResourceOptions] = None,
ami_type: Optional[pulumi.Input[str]] = None,
capacity_type: Optional[pulumi.Input[str]] = None,
cluster: Optional[pulumi.Input[pulumi.InputType['CoreDataArgs']]] = None,
cluster_name: Optional[pulumi.Input[str]] = None,
disk_size: Optional[pulumi.Input[int]] = None,
force_update_version: Optional[pulumi.Input[bool]] = None,
instance_types: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,
labels: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None,
launch_template: Optional[pulumi.Input[pulumi.InputType['pulumi_aws.eks.NodeGroupLaunchTemplateArgs']]] = None,
node_group_name: Optional[pulumi.Input[str]] = None,
node_group_name_prefix: Optional[pulumi.Input[str]] = None,
node_role: Optional[pulumi.Input['pulumi_aws.iam.Role']] = None,
node_role_arn: Optional[pulumi.Input[str]] = None,
release_version: Optional[pulumi.Input[str]] = None,
remote_access: Optional[pulumi.Input[pulumi.InputType['pulumi_aws.eks.NodeGroupRemoteAccessArgs']]] = None,
scaling_config: Optional[pulumi.Input[pulumi.InputType['pulumi_aws.eks.NodeGroupScalingConfigArgs']]] = None,
subnet_ids: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,
tags: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None,
taints: Optional[pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['pulumi_aws.eks.NodeGroupTaintArgs']]]]] = None,
version: Optional[pulumi.Input[str]] = None,
__props__=None):
"""
ManagedNodeGroup is a component that wraps creating an AWS managed node group.
See for more details:
https://docs.aws.amazon.com/eks/latest/userguide/managed-node-groups.html
:param str resource_name: The name of the resource.
:param pulumi.ResourceOptions opts: Options for the resource.
:param pulumi.Input[str] ami_type: Type of Amazon Machine Image (AMI) associated with the EKS Node Group. Defaults to `AL2_x86_64`. Valid values: `AL2_x86_64`, `AL2_x86_64_GPU`, `AL2_ARM_64`. This provider will only perform drift detection if a configuration value is provided.
:param pulumi.Input[str] capacity_type: Type of capacity associated with the EKS Node Group. Valid values: `ON_DEMAND`, `SPOT`. This provider will only perform drift detection if a configuration value is provided.
:param pulumi.Input[pulumi.InputType['CoreDataArgs']] cluster: The target EKS cluster.
:param pulumi.Input[str] cluster_name: Name of the EKS Cluster.
:param pulumi.Input[int] disk_size: Disk size in GiB for worker nodes. Defaults to `20`. This provider will only perform drift detection if a configuration value is provided.
:param pulumi.Input[bool] force_update_version: Force version update if existing pods are unable to be drained due to a pod disruption budget issue.
:param pulumi.Input[Sequence[pulumi.Input[str]]] instance_types: Set of instance types associated with the EKS Node Group. Defaults to `["t3.medium"]`. This provider will only perform drift detection if a configuration value is provided. Currently, the EKS API only accepts a single value in the set.
:param pulumi.Input[Mapping[str, pulumi.Input[str]]] labels: Key-value map of Kubernetes labels. Only labels that are applied with the EKS API are managed by this argument. Other Kubernetes labels applied to the EKS Node Group will not be managed.
:param pulumi.Input[pulumi.InputType['pulumi_aws.eks.NodeGroupLaunchTemplateArgs']] launch_template: Launch Template settings.
:param pulumi.Input[str] node_group_name: Name of the EKS Node Group. If omitted, this provider will assign a random, unique name. Conflicts with `nodeGroupNamePrefix`.
:param pulumi.Input[str] node_group_name_prefix: Creates a unique name beginning with the specified prefix. Conflicts with `nodeGroupName`.
:param pulumi.Input['pulumi_aws.iam.Role'] node_role: The IAM Role that provides permissions for the EKS Node Group.
Note, `nodeRole` and `nodeRoleArn` are mutually exclusive, and a single option must be used.
:param pulumi.Input[str] node_role_arn: Amazon Resource Name (ARN) of the IAM Role that provides permissions for the EKS Node Group.
Note, `nodeRoleArn` and `nodeRole` are mutually exclusive, and a single option must be used.
:param pulumi.Input[str] release_version: AMI version of the EKS Node Group. Defaults to latest version for Kubernetes version.
:param pulumi.Input[pulumi.InputType['pulumi_aws.eks.NodeGroupRemoteAccessArgs']] remote_access: Remote access settings.
:param pulumi.Input[pulumi.InputType['pulumi_aws.eks.NodeGroupScalingConfigArgs']] scaling_config: Scaling settings.
Default scaling amounts of the node group autoscaling group are:
- desiredSize: 2
- minSize: 1
- maxSize: 2
:param pulumi.Input[Sequence[pulumi.Input[str]]] subnet_ids: Identifiers of EC2 Subnets to associate with the EKS Node Group. These subnets must have the following resource tag: `kubernetes.io/cluster/CLUSTER_NAME` (where `CLUSTER_NAME` is replaced with the name of the EKS Cluster).
Default subnetIds is chosen from the following list, in order, if subnetIds arg is not set:
- core.subnetIds
- core.privateIds
- core.publicSubnetIds
This default logic is based on the existing subnet IDs logic of this package: https://git.io/JeM11
:param pulumi.Input[Mapping[str, pulumi.Input[str]]] tags: Key-value mapping of resource tags.
:param pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['pulumi_aws.eks.NodeGroupTaintArgs']]]] taints: The Kubernetes taints to be applied to the nodes in the node group. Maximum of 50 taints per node group.
"""
...
@overload
def __init__(__self__,
resource_name: str,
args: ManagedNodeGroupArgs,
opts: Optional[pulumi.ResourceOptions] = None):
"""
ManagedNodeGroup is a component that wraps creating an AWS managed node group.
See for more details:
https://docs.aws.amazon.com/eks/latest/userguide/managed-node-groups.html
:param str resource_name: The name of the resource.
:param ManagedNodeGroupArgs args: The arguments to use to populate this resource's properties.
:param pulumi.ResourceOptions opts: Options for the resource.
"""
...
def __init__(__self__, resource_name: str, *args, **kwargs):
resource_args, opts = _utilities.get_resource_args_opts(ManagedNodeGroupArgs, pulumi.ResourceOptions, *args, **kwargs)
if resource_args is not None:
__self__._internal_init(resource_name, opts, **resource_args.__dict__)
else:
__self__._internal_init(resource_name, *args, **kwargs)
def _internal_init(__self__,
resource_name: str,
opts: Optional[pulumi.ResourceOptions] = None,
ami_type: Optional[pulumi.Input[str]] = None,
capacity_type: Optional[pulumi.Input[str]] = None,
cluster: Optional[pulumi.Input[pulumi.InputType['CoreDataArgs']]] = None,
cluster_name: Optional[pulumi.Input[str]] = None,
disk_size: Optional[pulumi.Input[int]] = None,
force_update_version: Optional[pulumi.Input[bool]] = None,
instance_types: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,
labels: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None,
launch_template: Optional[pulumi.Input[pulumi.InputType['pulumi_aws.eks.NodeGroupLaunchTemplateArgs']]] = None,
node_group_name: Optional[pulumi.Input[str]] = None,
node_group_name_prefix: Optional[pulumi.Input[str]] = None,
node_role: Optional[pulumi.Input['pulumi_aws.iam.Role']] = None,
node_role_arn: Optional[pulumi.Input[str]] = None,
release_version: Optional[pulumi.Input[str]] = None,
remote_access: Optional[pulumi.Input[pulumi.InputType['pulumi_aws.eks.NodeGroupRemoteAccessArgs']]] = None,
scaling_config: Optional[pulumi.Input[pulumi.InputType['pulumi_aws.eks.NodeGroupScalingConfigArgs']]] = None,
subnet_ids: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,
tags: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None,
taints: Optional[pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['pulumi_aws.eks.NodeGroupTaintArgs']]]]] = None,
version: Optional[pulumi.Input[str]] = None,
__props__=None):
if opts is None:
opts = pulumi.ResourceOptions()
if not isinstance(opts, pulumi.ResourceOptions):
raise TypeError('Expected resource options to be a ResourceOptions instance')
if opts.version is None:
opts.version = _utilities.get_version()
if opts.id is not None:
raise ValueError('ComponentResource classes do not support opts.id')
else:
if __props__ is not None:
raise TypeError('__props__ is only valid when passed in combination with a valid opts.id to get an existing resource')
__props__ = ManagedNodeGroupArgs.__new__(ManagedNodeGroupArgs)
__props__.__dict__["ami_type"] = ami_type
__props__.__dict__["capacity_type"] = capacity_type
if cluster is None and not opts.urn:
raise TypeError("Missing required property 'cluster'")
__props__.__dict__["cluster"] = cluster
__props__.__dict__["cluster_name"] = cluster_name
__props__.__dict__["disk_size"] = disk_size
__props__.__dict__["force_update_version"] = force_update_version
__props__.__dict__["instance_types"] = instance_types
__props__.__dict__["labels"] = labels
__props__.__dict__["launch_template"] = launch_template
__props__.__dict__["node_group_name"] = node_group_name
__props__.__dict__["node_group_name_prefix"] = node_group_name_prefix
__props__.__dict__["node_role"] = node_role
__props__.__dict__["node_role_arn"] = node_role_arn
__props__.__dict__["release_version"] = release_version
__props__.__dict__["remote_access"] = remote_access
__props__.__dict__["scaling_config"] = scaling_config
__props__.__dict__["subnet_ids"] = subnet_ids
__props__.__dict__["tags"] = tags
__props__.__dict__["taints"] = taints
__props__.__dict__["version"] = version
__props__.__dict__["node_group"] = None
super(ManagedNodeGroup, __self__).__init__(
'eks:index:ManagedNodeGroup',
resource_name,
__props__,
opts,
remote=True)
@property
@pulumi.getter(name="nodeGroup")
def node_group(self) -> pulumi.Output['pulumi_aws.eks.NodeGroup']:
"""
The AWS managed node group.
"""
return pulumi.get(self, "node_group")
|
__doc__="""An experimental SVG renderer for the ReportLab graphics framework.
This will create SVG code from the ReportLab Graphics API (RLG).
To read existing SVG code and convert it into ReportLab graphics
objects download the svglib module here:
http://python.net/~gherman/#svglib
"""
import math, types, sys, os, codecs, base64
from operator import getitem
from reportlab.pdfbase.pdfmetrics import stringWidth # for font info
from reportlab.lib.rl_accel import fp_str
from reportlab.lib.colors import black
from reportlab.lib.utils import asNative, getBytesIO
from reportlab.graphics.renderbase import StateTracker, getStateDelta, Renderer, renderScaledDrawing
from reportlab.graphics.shapes import STATE_DEFAULTS, Path, UserNode
from reportlab.graphics.shapes import * # (only for test0)
from reportlab import rl_config
from reportlab.lib.utils import getStringIO, RLString, isUnicode, isBytes
from reportlab.pdfgen.canvas import FILL_EVEN_ODD, FILL_NON_ZERO
from .renderPM import _getImage
from xml.dom import getDOMImplementation
### some constants ###
sin = math.sin
cos = math.cos
pi = math.pi
AREA_STYLES = 'stroke-width stroke-linecap stroke stroke-opacity fill fill-opacity stroke-dasharray stroke-dashoffset fill-rule id'.split()
LINE_STYLES = 'stroke-width stroke-linecap stroke stroke-opacity stroke-dasharray stroke-dashoffset id'.split()
TEXT_STYLES = 'font-family font-weight font-style font-variant font-size id'.split()
EXTRA_STROKE_STYLES = 'stroke-width stroke-linecap stroke stroke-opacity stroke-dasharray stroke-dashoffset'.split()
EXTRA_FILL_STYLES = 'fill fill-opacity'.split()
### top-level user function ###
def drawToString(d, showBoundary=rl_config.showBoundary,**kwds):
"Returns a SVG as a string in memory, without touching the disk"
s = getStringIO()
drawToFile(d, s, showBoundary=showBoundary,**kwds)
return s.getvalue()
def drawToFile(d, fn, showBoundary=rl_config.showBoundary,**kwds):
d = renderScaledDrawing(d)
c = SVGCanvas((d.width, d.height),**kwds)
draw(d, c, 0, 0, showBoundary=showBoundary)
c.save(fn)
def draw(drawing, canvas, x=0, y=0, showBoundary=rl_config.showBoundary):
"""As it says."""
r = _SVGRenderer()
r.draw(renderScaledDrawing(drawing), canvas, x, y, showBoundary=showBoundary)
### helper functions ###
def _pointsFromList(L):
"""
given a list of coordinates [x0, y0, x1, y1....]
produce a list of points [(x0,y0), (y1,y0),....]
"""
P=[]
for i in range(0,len(L), 2):
P.append((L[i], L[i+1]))
return P
def transformNode(doc, newTag, node=None, **attrDict):
"""Transform a DOM node into new node and copy selected attributes.
Creates a new DOM node with tag name 'newTag' for document 'doc'
and copies selected attributes from an existing 'node' as provided
in 'attrDict'. The source 'node' can be None. Attribute values will
be converted to strings.
E.g.
n = transformNode(doc, "node1", x="0", y="1")
-> DOM node for <node1 x="0" y="1"/>
n = transformNode(doc, "node1", x=0, y=1+1)
-> DOM node for <node1 x="0" y="2"/>
n = transformNode(doc, "node1", node0, x="x0", y="x0", zoo=bar())
-> DOM node for <node1 x="[node0.x0]" y="[node0.y0]" zoo="[bar()]"/>
"""
newNode = doc.createElement(newTag)
for newAttr, attr in attrDict.items():
sattr = str(attr)
if not node:
newNode.setAttribute(newAttr, sattr)
else:
attrVal = node.getAttribute(sattr)
newNode.setAttribute(newAttr, attrVal or sattr)
return newNode
class EncodedWriter(list):
'''
EncodedWriter(encoding) assumes .write will be called with
either unicode or utf8 encoded bytes. it will accumulate
unicode
'''
BOMS = {
'utf-32':codecs.BOM_UTF32,
'utf-32-be':codecs.BOM_UTF32_BE,
'utf-32-le':codecs.BOM_UTF32_LE,
'utf-16':codecs.BOM_UTF16,
'utf-16-be':codecs.BOM_UTF16_BE,
'utf-16-le':codecs.BOM_UTF16_LE,
}
def __init__(self,encoding,bom=False):
list.__init__(self)
self.encoding = encoding = codecs.lookup(encoding).name
if bom and '16' in encoding or '32' in encoding:
self.write(self.BOMS[encoding])
def write(self,u):
if isBytes(u):
try:
u = u.decode('utf-8')
except:
et, ev, tb = sys.exc_info()
ev = str(ev)
del et, tb
raise ValueError("String %r not encoded as 'utf-8'\nerror=%s" % (u,ev))
elif not isUnicode(u):
raise ValueError("EncodedWriter.write(%s) argument should be 'utf-8' bytes or str" % ascii(u))
self.append(u)
def getvalue(self):
r = ''.join(self)
del self[:]
return r
_fillRuleMap = {
FILL_NON_ZERO: 'nonzero',
'non-zero': 'nonzero',
'nonzero': 'nonzero',
FILL_EVEN_ODD: 'evenodd',
'even-odd': 'evenodd',
'evenodd': 'evenodd',
}
def py_fp_str(*args):
return ' '.join((('%f' % a).rstrip('0').rstrip('.') for a in args))
### classes ###
class SVGCanvas:
def __init__(self, size=(300,300), encoding='utf-8', verbose=0, bom=False, **kwds):
'''
verbose = 0 >0 means do verbose stuff
useClip = False True means don't use a clipPath definition put the global clip into the clip property
to get around an issue with safari
extraXmlDecl = '' use to add extra xml declarations
scaleGroupId = '' id of an extra group to add around the drawing to allow easy scaling
svgAttrs = {} dictionary of attributes to be applied to the svg tag itself
'''
self.verbose = verbose
self.encoding = codecs.lookup(encoding).name
self.bom = bom
useClip = kwds.pop('useClip',False)
self.fontHacks = kwds.pop('fontHacks',{})
self.extraXmlDecl = kwds.pop('extraXmlDecl','')
scaleGroupId = kwds.pop('scaleGroupId','')
self._fillMode = FILL_EVEN_ODD
self.width, self.height = self.size = size
# self.height = size[1]
self.code = []
self.style = {}
self.path = ''
self._strokeColor = self._fillColor = self._lineWidth = \
self._font = self._fontSize = self._lineCap = \
self._lineJoin = None
if kwds.pop('use_fp_str',False):
self.fp_str = fp_str
else:
self.fp_str = py_fp_str
self.cfp_str = lambda *args: self.fp_str(*args).replace(' ',',')
implementation = getDOMImplementation('minidom')
#Based on official example here http://www.w3.org/TR/SVG10/linking.html want:
#<!DOCTYPE svg PUBLIC "-//W3C//DTD SVG 20010904//EN"
# "http://www.w3.org/TR/2001/REC-SVG-20010904/DTD/svg10.dtd">
#Thus,
#doctype = implementation.createDocumentType("svg",
# "-//W3C//DTD SVG 20010904//EN",
# "http://www.w3.org/TR/2001/REC-SVG-20010904/DTD/svg10.dtd")
#
#However, putting that example through http://validator.w3.org/ recommends:
#<!DOCTYPE svg PUBLIC "-//W3C//DTD SVG 1.0//EN"
# "http://www.w3.org/TR/2001/REC-SVG-20010904/DTD/svg10.dtd">
#So we'll use that for our SVG 1.0 output.
doctype = implementation.createDocumentType("svg",
"-//W3C//DTD SVG 1.0//EN",
"http://www.w3.org/TR/2001/REC-SVG-20010904/DTD/svg10.dtd")
self.doc = implementation.createDocument(None,"svg",doctype)
self.svg = self.doc.documentElement
svgAttrs = dict(
width = str(size[0]),
height=str(self.height),
preserveAspectRatio="xMinYMin meet",
viewBox="0 0 %d %d" % (self.width, self.height),
#baseProfile = "full", #disliked in V 1.0
#these suggested by Tim Roberts, as updated by peter@maubp.freeserve.co.uk
xmlns="http://www.w3.org/2000/svg",
version="1.0",
)
svgAttrs['fill-rule'] = _fillRuleMap[self._fillMode]
svgAttrs["xmlns:xlink"] = "http://www.w3.org/1999/xlink"
svgAttrs.update(kwds.pop('svgAttrs',{}))
for k,v in svgAttrs.items():
self.svg.setAttribute(k,v)
title = self.doc.createElement('title')
text = self.doc.createTextNode('...')
title.appendChild(text)
self.svg.appendChild(title)
desc = self.doc.createElement('desc')
text = self.doc.createTextNode('...')
desc.appendChild(text)
self.svg.appendChild(desc)
self.setFont(STATE_DEFAULTS['fontName'], STATE_DEFAULTS['fontSize'])
self.setStrokeColor(STATE_DEFAULTS['strokeColor'])
self.setLineCap(2)
self.setLineJoin(0)
self.setLineWidth(1)
if not useClip:
# Add a rectangular clipping path identical to view area.
clipPath = transformNode(self.doc, "clipPath", id="clip")
clipRect = transformNode(self.doc, "rect", x=0, y=0,
width=self.width, height=self.height)
clipPath.appendChild(clipRect)
self.svg.appendChild(clipPath)
gtkw = dict(style="clip-path: url(#clip)")
else:
gtkw = dict(clip="0 0 %d %d" % (self.width,self.height))
self.groupTree = transformNode(self.doc, "g",
id="group",
transform="scale(1,-1) translate(0,-%d)" % self.height,
**gtkw
)
if scaleGroupId:
self.scaleTree = transformNode(self.doc, "g", id=scaleGroupId, transform="scale(1,1)")
self.scaleTree.appendChild(self.groupTree)
self.svg.appendChild(self.scaleTree)
else:
self.svg.appendChild(self.groupTree)
self.currGroup = self.groupTree
def save(self, fn=None):
writer = EncodedWriter(self.encoding,bom=self.bom)
self.doc.writexml(writer,addindent="\t",newl="\n",encoding=self.encoding)
if hasattr(fn,'write'):
f = fn
else:
f = open(fn, 'w',encoding=self.encoding)
svg = writer.getvalue()
exd = self.extraXmlDecl
if exd:
svg = svg.replace('?>','?>'+exd)
f.write(svg)
if f is not fn:
f.close()
### helpers ###
def NOTUSED_stringWidth(self, s, font=None, fontSize=None):
"""Return the logical width of the string if it were drawn
in the current font (defaults to self.font).
"""
font = font or self._font
fontSize = fontSize or self._fontSize
return stringWidth(s, font, fontSize)
def _formatStyle(self, include=[], exclude='',**kwds):
style = self.style.copy()
style.update(kwds)
keys = list(style.keys())
if include:
keys = [k for k in keys if k in include]
if exclude:
exclude = exclude.split()
items = [k+': '+str(style[k]) for k in keys if k not in exclude]
else:
items = [k+': '+str(style[k]) for k in keys]
return '; '.join(items) + ';'
def _escape(self, s):
'''I don't think this was ever needed; seems to have been copied from renderPS'''
return s
def _genArcCode(self, x1, y1, x2, y2, startAng, extent):
"""Calculate the path for an arc inscribed in rectangle defined
by (x1,y1),(x2,y2)."""
return
#calculate semi-minor and semi-major axes of ellipse
xScale = abs((x2-x1)/2.0)
yScale = abs((y2-y1)/2.0)
#calculate centre of ellipse
x, y = (x1+x2)/2.0, (y1+y2)/2.0
codeline = 'matrix currentmatrix %s %s translate %s %s scale 0 0 1 %s %s %s setmatrix'
if extent >= 0:
arc='arc'
else:
arc='arcn'
data = (x,y, xScale, yScale, startAng, startAng+extent, arc)
return codeline % data
def _fillAndStroke(self, code, clip=0, link_info=None,styles=AREA_STYLES,fillMode=None):
xtra = {}
if fillMode:
xtra['fill-rule'] = _fillRuleMap[fillMode]
path = transformNode(self.doc, "path",
d=self.path, style=self._formatStyle(styles),
)
if link_info :
path = self._add_link(path, link_info)
self.currGroup.appendChild(path)
self.path = ''
### styles ###
def setLineCap(self, v):
vals = {0:'butt', 1:'round', 2:'square'}
if self._lineCap != v:
self._lineCap = v
self.style['stroke-linecap'] = vals[v]
def setLineJoin(self, v):
vals = {0:'miter', 1:'round', 2:'bevel'}
if self._lineJoin != v:
self._lineJoin = v
self.style['stroke-linecap'] = vals[v]
def setDash(self, array=[], phase=0):
"""Two notations. Pass two numbers, or an array and phase."""
if isinstance(array,(float,int)):
self.style['stroke-dasharray'] = ', '.join(map(str, ([array, phase])))
elif isinstance(array,(tuple,list)) and len(array) > 0:
assert phase >= 0, "phase is a length in user space"
self.style['stroke-dasharray'] = ', '.join(map(str, array))
if phase>0:
self.style['stroke-dashoffset'] = str(phase)
def setStrokeColor(self, color):
self._strokeColor = color
if color == None:
self.style['stroke'] = 'none'
else:
r, g, b = color.red, color.green, color.blue
self.style['stroke'] = 'rgb(%d%%,%d%%,%d%%)' % (r*100, g*100, b*100)
alpha = color.normalizedAlpha
if alpha!=1:
self.style['stroke-opacity'] = '%s' % alpha
elif 'stroke-opacity' in self.style:
del self.style['stroke-opacity']
def setFillColor(self, color):
self._fillColor = color
if color == None:
self.style['fill'] = 'none'
else:
r, g, b = color.red, color.green, color.blue
self.style['fill'] = 'rgb(%d%%,%d%%,%d%%)' % (r*100, g*100, b*100)
alpha = color.normalizedAlpha
if alpha!=1:
self.style['fill-opacity'] = '%s' % alpha
elif 'fill-opacity' in self.style:
del self.style['fill-opacity']
def setFillMode(self, v):
self._fillMode = v
self.style['fill-rule'] = _fillRuleMap[v]
def setLineWidth(self, width):
if width != self._lineWidth:
self._lineWidth = width
self.style['stroke-width'] = width
def setFont(self, font, fontSize):
if self._font != font or self._fontSize != fontSize:
self._font = font
self._fontSize = fontSize
style = self.style
for k in TEXT_STYLES:
if k in style:
del style[k]
svgAttrs = self.fontHacks[font] if font in self.fontHacks else {}
if isinstance(font,RLString):
svgAttrs.update(iter(font.svgAttrs.items()))
if svgAttrs:
for k,v in svgAttrs.items():
a = 'font-'+k
if a in TEXT_STYLES:
style[a] = v
if 'font-family' not in style:
style['font-family'] = font
style['font-size'] = '%spx' % fontSize
def _add_link(self, dom_object, link_info) :
assert isinstance(link_info, dict)
link = transformNode(self.doc, "a", **link_info)
link.appendChild(dom_object)
return link
### shapes ###
def rect(self, x1,y1, x2,y2, rx=8, ry=8, link_info=None, **_svgAttrs):
"Draw a rectangle between x1,y1 and x2,y2."
if self.verbose: print("+++ SVGCanvas.rect")
x = min(x1,x2)
y = min(y1,y2)
kwds = {}
rect = transformNode(self.doc, "rect",
x=x, y=y, width=max(x1,x2)-x, height=max(y1,y2)-y,
style=self._formatStyle(AREA_STYLES),**_svgAttrs)
if link_info :
rect = self._add_link(rect, link_info)
self.currGroup.appendChild(rect)
def roundRect(self, x1,y1, x2,y2, rx=8, ry=8, link_info=None, **_svgAttrs):
"""Draw a rounded rectangle between x1,y1 and x2,y2.
Corners inset as ellipses with x-radius rx and y-radius ry.
These should have x1<x2, y1<y2, rx>0, and ry>0.
"""
rect = transformNode(self.doc, "rect",
x=x1, y=y1, width=x2-x1, height=y2-y1, rx=rx, ry=ry,
style=self._formatStyle(AREA_STYLES), **_svgAttrs)
if link_info:
rect = self._add_link(rect, link_info)
self.currGroup.appendChild(rect)
def drawString(self, s, x, y, angle=0, link_info=None, text_anchor='left', textRenderMode=0, **_svgAttrs):
if textRenderMode==3: return #invisible
s = asNative(s)
if self.verbose: print("+++ SVGCanvas.drawString")
needFill = textRenderMode==0 or textRenderMode==2 or textRenderMode==4 or textRenderMode==6
needStroke = textRenderMode==1 or textRenderMode==2 or textRenderMode==5 or textRenderMode==6
if (self._fillColor!=None and needFill) or (self._strokeColor!=None and needStroke):
if not text_anchor in ['start', 'inherited', 'left']:
textLen = stringWidth(s,self._font,self._fontSize)
if text_anchor=='end':
x -= textLen
elif text_anchor=='middle':
x -= textLen/2.
elif text_anchor=='numeric':
x -= numericXShift(text_anchor,s,textLen,self._font,self._fontSize)
else:
raise ValueError('bad value for text_anchor ' + str(text_anchor))
s = self._escape(s)
st = self._formatStyle(TEXT_STYLES)
if angle != 0:
st = st + " rotate(%s);" % self.fp_str(angle, x, y)
if needFill:
st += self._formatStyle(EXTRA_FILL_STYLES)
else:
st += " fill:none;"
if needStroke:
st += self._formatStyle(EXTRA_STROKE_STYLES)
else:
st += " stroke:none;"
#if textRenderMode>=4:
# _gstate_clipPathSetOrAddself, -1, 1, 0 /*we are adding*/
text = transformNode(self.doc, "text",
x=x, y=y, style=st,
transform="translate(0,%d) scale(1,-1)" % (2*y),
**_svgAttrs
)
content = self.doc.createTextNode(s)
text.appendChild(content)
if link_info:
text = self._add_link(text, link_info)
self.currGroup.appendChild(text)
def drawCentredString(self, s, x, y, angle=0, text_anchor='middle',
link_info=None, textRenderMode=0, **_svgAttrs):
if self.verbose: print("+++ SVGCanvas.drawCentredString")
self.drawString(s,x,y,angle=angle, link_info=link_info, text_anchor=text_anchor,
textRenderMode=textRenderMode, **_svgAttrs)
def drawRightString(self, text, x, y, angle=0,text_anchor='end',
link_info=None, textRenderMode=0, **_svgAttrs):
if self.verbose: print("+++ SVGCanvas.drawRightString")
self.drawString(text,x,y,angle=angle, link_info=link_info, text_anchor=text_anchor,
textRenderMode=textRenderMode, **_svgAttrs)
def comment(self, data):
"Add a comment."
comment = self.doc.createComment(data)
# self.currGroup.appendChild(comment)
def drawImage(self, image, x, y, width, height, embed=True):
buf = getBytesIO()
image.save(buf,'png')
buf = asNative(base64.b64encode(buf.getvalue()))
self.currGroup.appendChild(
transformNode(self.doc,'image',
x=x,y=y,width=width,height=height,
href="data:image/png;base64,"+buf,
transform="matrix(%s)" % self.cfp_str(1,0,0,-1,0,height+2*y),
)
)
def line(self, x1, y1, x2, y2):
if self._strokeColor != None:
if 0: # something is wrong with line in my SVG viewer...
line = transformNode(self.doc, "line",
x=x1, y=y1, x2=x2, y2=y2,
style=self._formatStyle(LINE_STYLES))
self.currGroup.appendChild(line)
path = transformNode(self.doc, "path",
d="M %s L %s Z" % (self.cfp_str(x1,y1),self.cfp_str(x2,y2)),
style=self._formatStyle(LINE_STYLES))
self.currGroup.appendChild(path)
def ellipse(self, x1, y1, x2, y2, link_info=None):
"""Draw an orthogonal ellipse inscribed within the rectangle x1,y1,x2,y2.
These should have x1<x2 and y1<y2.
"""
ellipse = transformNode(self.doc, "ellipse",
cx=(x1+x2)/2.0, cy=(y1+y2)/2.0, rx=(x2-x1)/2.0, ry=(y2-y1)/2.0,
style=self._formatStyle(AREA_STYLES))
if link_info:
ellipse = self._add_link(ellipse, link_info)
self.currGroup.appendChild(ellipse)
def circle(self, xc, yc, r, link_info=None):
circle = transformNode(self.doc, "circle",
cx=xc, cy=yc, r=r,
style=self._formatStyle(AREA_STYLES))
if link_info:
circle = self._add_link(circle, link_info)
self.currGroup.appendChild(circle)
def drawCurve(self, x1, y1, x2, y2, x3, y3, x4, y4, closed=0):
pass
return
codeline = '%s m %s curveto'
data = (fp_str(x1, y1), fp_str(x2, y2, x3, y3, x4, y4))
if self._fillColor != None:
self.code.append((codeline % data) + ' eofill')
if self._strokeColor != None:
self.code.append((codeline % data)
+ ((closed and ' closepath') or '')
+ ' stroke')
def drawArc(self, x1,y1, x2,y2, startAng=0, extent=360, fromcenter=0):
"""Draw a partial ellipse inscribed within the rectangle x1,y1,x2,y2.
Starting at startAng degrees and covering extent degrees. Angles
start with 0 to the right (+x) and increase counter-clockwise.
These should have x1<x2 and y1<y2.
"""
cx, cy = (x1+x2)/2.0, (y1+y2)/2.0
rx, ry = (x2-x1)/2.0, (y2-y1)/2.0
mx = rx * cos(startAng*pi/180) + cx
my = ry * sin(startAng*pi/180) + cy
ax = rx * cos((startAng+extent)*pi/180) + cx
ay = ry * sin((startAng+extent)*pi/180) + cy
cfp_str = self.cfp_str
s = [].append
if fromcenter:
s("M %s L %s" % (cfp_str(cx, cy), cfp_str(ax, ay)))
if fromcenter:
s("A %s %d %d %d %s" % \
(cfp_str(rx, ry), 0, extent>=180, 0, cfp_str(mx, my)))
else:
s("M %s A %s %d %d %d %s Z" % \
(cfp_str(mx, my), cfp_str(rx, ry), 0, extent>=180, 0, cfp_str(mx, my)))
if fromcenter:
s("L %s Z" % cfp_str(cx, cy))
path = transformNode(self.doc, "path",
d=' '.join(s.__self__), style=self._formatStyle())
self.currGroup.appendChild(path)
def polygon(self, points, closed=0, link_info=None):
assert len(points) >= 2, 'Polygon must have 2 or more points'
if self._strokeColor!=None or self._fillColor!=None:
pts = ', '.join([fp_str(*p) for p in points])
polyline = transformNode(self.doc, "polygon",
points=pts, style=self._formatStyle(AREA_STYLES))
if link_info:
polyline = self._add_link(polyline, link_info)
self.currGroup.appendChild(polyline)
# self._fillAndStroke(polyCode)
def lines(self, lineList, color=None, width=None):
# print "### lineList", lineList
return
if self._strokeColor != None:
codeline = '%s m %s l stroke'
for line in lineList:
self.code.append(codeline % (fp_str(line[0]), fp_str(line[1])))
def polyLine(self, points):
assert len(points) >= 1, 'Polyline must have 1 or more points'
if self._strokeColor != None:
pts = ', '.join([fp_str(*p) for p in points])
polyline = transformNode(self.doc, "polyline",
points=pts, style=self._formatStyle(AREA_STYLES,fill=None))
self.currGroup.appendChild(polyline)
### groups ###
def startGroup(self,attrDict=dict(transform="")):
if self.verbose: print("+++ begin SVGCanvas.startGroup")
currGroup = self.currGroup
group = transformNode(self.doc, "g", **attrDict)
currGroup.appendChild(group)
self.currGroup = group
if self.verbose: print("+++ end SVGCanvas.startGroup")
return currGroup
def endGroup(self,currGroup):
if self.verbose: print("+++ begin SVGCanvas.endGroup")
self.currGroup = currGroup
if self.verbose: print("+++ end SVGCanvas.endGroup")
def transform(self, a, b, c, d, e, f):
if self.verbose: print("!!! begin SVGCanvas.transform", a, b, c, d, e, f)
tr = self.currGroup.getAttribute("transform")
if (a, b, c, d, e, f) != (1, 0, 0, 1, 0, 0):
t = 'matrix(%s)' % self.cfp_str(a,b,c,d,e,f)
self.currGroup.setAttribute("transform", "%s %s" % (tr, t))
def translate(self, x, y):
if (x,y) != (0,0):
self.currGroup.setAttribute("transform", "%s %s"
% (self.currGroup.getAttribute("transform"),
'translate(%s)' % self.cfp_str(x,y)))
def scale(self, sx, sy):
if (sx,sy) != (1,1):
self.currGroup.setAttribute("transform", "%s %s"
% (self.groups[-1].getAttribute("transform"),
'scale(%s)' % self.cfp_str(sx, sy)))
### paths ###
def moveTo(self, x, y):
self.path = self.path + 'M %s ' % self.fp_str(x, y)
def lineTo(self, x, y):
self.path = self.path + 'L %s ' % self.fp_str(x, y)
def curveTo(self, x1, y1, x2, y2, x3, y3):
self.path = self.path + 'C %s ' % self.fp_str(x1, y1, x2, y2, x3, y3)
def closePath(self):
self.path = self.path + 'Z '
def saveState(self):
pass
def restoreState(self):
pass
class _SVGRenderer(Renderer):
"""This draws onto an SVG document.
"""
def __init__(self):
self.verbose = 0
def drawNode(self, node):
"""This is the recursive method called for each node in the tree.
"""
if self.verbose: print("### begin _SVGRenderer.drawNode(%r)" % node)
self._canvas.comment('begin node %r'%node)
style = self._canvas.style.copy()
if not (isinstance(node, Path) and node.isClipPath):
pass # self._canvas.saveState()
#apply state changes
deltas = getStateDelta(node)
self._tracker.push(deltas)
self.applyStateChanges(deltas, {})
#draw the object, or recurse
self.drawNodeDispatcher(node)
rDeltas = self._tracker.pop()
if not (isinstance(node, Path) and node.isClipPath):
pass #self._canvas.restoreState()
self._canvas.comment('end node %r'%node)
#restore things we might have lost (without actually doing anything).
for k, v in rDeltas.items():
if k in self._restores:
setattr(self._canvas,self._restores[k],v)
self._canvas.style = style
if self.verbose: print("### end _SVGRenderer.drawNode(%r)" % node)
_restores = {'strokeColor':'_strokeColor','strokeWidth': '_lineWidth','strokeLineCap':'_lineCap',
'strokeLineJoin':'_lineJoin','fillColor':'_fillColor','fontName':'_font',
'fontSize':'_fontSize'}
def _get_link_info_dict(self, obj):
#We do not want None or False as the link, even if it is the
#attribute's value - use the empty string instead.
url = getattr(obj, "hrefURL", "") or ""
title = getattr(obj, "hrefTitle", "") or ""
if url :
#Is it valid to have a link with no href? The XML requires
#the xlink:href to be present, but you might just want a
#tool tip shown (via the xlink:title attribute). Note that
#giving an href of "" is equivalent to "the current page"
#(a relative link saying go nowhere).
return {"xlink:href":url, "xlink:title":title, "target":"_top"}
#Currently of all the mainstream browsers I have tested, only Safari/webkit
#will show SVG images embedded in HTML using a simple <img src="..." /> tag.
#However, the links don't work (Safari 3.2.1 on the Mac).
#
#Therefore I use the following, which also works for Firefox, Opera, and
#IE 6.0 with Adobe SVG Viewer 6 beta:
#<object data="..." type="image/svg+xml" width="430" height="150" class="img">
#
#Once displayed, Firefox and Safari treat the SVG like a frame, and
#by default clicking on links acts "in frame" and replaces the image.
#Opera does what I expect, and replaces the whole page with the link.
#
#Therefore I use target="_top" to force the links to replace the whole page.
#This now works as expected on Safari 3.2.1, Firefox 3.0.6, Opera 9.20.
#Perhaps the target attribute should be an option, perhaps defaulting to
#"_top" as used here?
else :
return None
def drawGroup(self, group):
if self.verbose: print("### begin _SVGRenderer.drawGroup")
currGroup = self._canvas.startGroup()
a, b, c, d, e, f = self._tracker.getState()['transform']
for childNode in group.getContents():
if isinstance(childNode, UserNode):
node2 = childNode.provideNode()
else:
node2 = childNode
self.drawNode(node2)
self._canvas.transform(a, b, c, d, e, f)
self._canvas.endGroup(currGroup)
if self.verbose: print("### end _SVGRenderer.drawGroup")
def drawRect(self, rect):
link_info = self._get_link_info_dict(rect)
svgAttrs = getattr(rect,'_svgAttrs',{})
if rect.rx == rect.ry == 0:
#plain old rectangle
self._canvas.rect(
rect.x, rect.y,
rect.x+rect.width, rect.y+rect.height, link_info=link_info, **svgAttrs)
else:
#cheat and assume ry = rx; better to generalize
#pdfgen roundRect function. TODO
self._canvas.roundRect(
rect.x, rect.y,
rect.x+rect.width, rect.y+rect.height,
rect.rx, rect.ry,
link_info=link_info, **svgAttrs)
def drawString(self, stringObj):
S = self._tracker.getState()
text_anchor, x, y, text = S['textAnchor'], stringObj.x, stringObj.y, stringObj.text
self._canvas.drawString(text,x,y,link_info=self._get_link_info_dict(stringObj),
text_anchor=text_anchor, textRenderMode=getattr(stringObj,'textRenderMode',0),
**getattr(stringObj,'_svgAttrs',{}))
def drawLine(self, line):
if self._canvas._strokeColor:
self._canvas.line(line.x1, line.y1, line.x2, line.y2)
def drawCircle(self, circle):
self._canvas.circle( circle.cx, circle.cy, circle.r, link_info=self._get_link_info_dict(circle))
def drawWedge(self, wedge):
yradius, radius1, yradius1 = wedge._xtraRadii()
if (radius1==0 or radius1 is None) and (yradius1==0 or yradius1 is None) and not wedge.annular:
centerx, centery, radius, startangledegrees, endangledegrees = \
wedge.centerx, wedge.centery, wedge.radius, wedge.startangledegrees, wedge.endangledegrees
yradius = wedge.yradius or wedge.radius
(x1, y1) = (centerx-radius, centery-yradius)
(x2, y2) = (centerx+radius, centery+yradius)
extent = endangledegrees - startangledegrees
self._canvas.drawArc(x1, y1, x2, y2, startangledegrees, extent, fromcenter=1)
else:
P = wedge.asPolygon()
if isinstance(P,Path):
self.drawPath(P)
else:
self.drawPolygon(P)
def drawPolyLine(self, p):
if self._canvas._strokeColor:
self._canvas.polyLine(_pointsFromList(p.points))
def drawEllipse(self, ellipse):
#need to convert to pdfgen's bounding box representation
x1 = ellipse.cx - ellipse.rx
x2 = ellipse.cx + ellipse.rx
y1 = ellipse.cy - ellipse.ry
y2 = ellipse.cy + ellipse.ry
self._canvas.ellipse(x1,y1,x2,y2, link_info=self._get_link_info_dict(ellipse))
def drawPolygon(self, p):
self._canvas.polygon(_pointsFromList(p.points), closed=1, link_info=self._get_link_info_dict(p))
def drawPath(self, path, fillMode=FILL_EVEN_ODD):
# print "### drawPath", path.points
from reportlab.graphics.shapes import _renderPath
c = self._canvas
drawFuncs = (c.moveTo, c.lineTo, c.curveTo, c.closePath)
if fillMode is None:
fillMode = getattr(path,'fillMode',FILL_EVEN_ODD)
link_info = self._get_link_info_dict(path)
autoclose = getattr(path,'autoclose','')
def rP(**kwds):
return _renderPath(path, drawFuncs, **kwds)
if autoclose=='svg':
rP()
c._fillAndStroke([], clip=path.isClipPath, link_info=link_info, fillMode=fillMode)
elif autoclose=='pdf':
rP(forceClose=True)
c._fillAndStroke([], clip=path.isClipPath, link_info=link_info, fillMode=fillMode)
else:
isClosed = rP()
if not isClosed:
ofc = c._fillColor
c.setFillColor(None)
try:
link_info = None
c._fillAndStroke([], clip=path.isClipPath, link_info=link_info, fillMode=fillMode)
finally:
c.setFillColor(ofc)
else:
c._fillAndStroke([], clip=path.isClipPath, link_info=link_info, fillMode=fillMode)
def drawImage(self, image):
path = image.path
if isinstance(path,str):
if not (path and os.path.isfile(path)): return
im = _getImage().open(path)
elif hasattr(path,'convert'):
im = path
else:
return
srcW, srcH = im.size
dstW, dstH = image.width, image.height
if dstW is None: dstW = srcW
if dstH is None: dstH = srcH
self._canvas.drawImage(im, image.x, image.y, dstW, dstH, embed=True)
def applyStateChanges(self, delta, newState):
"""This takes a set of states, and outputs the operators
needed to set those properties"""
for key, value in delta.items():
if key == 'transform':
pass
#self._canvas.transform(value[0], value[1], value[2], value[3], value[4], value[5])
elif key == 'strokeColor':
self._canvas.setStrokeColor(value)
elif key == 'strokeWidth':
self._canvas.setLineWidth(value)
elif key == 'strokeLineCap': #0,1,2
self._canvas.setLineCap(value)
elif key == 'strokeLineJoin':
self._canvas.setLineJoin(value)
elif key == 'strokeDashArray':
if value:
if isinstance(value,(list,tuple)) and len(value)==2 and isinstance(value[1],(tuple,list)):
phase = value[0]
value = value[1]
else:
phase = 0
self._canvas.setDash(value,phase)
else:
self._canvas.setDash()
elif key == 'fillColor':
self._canvas.setFillColor(value)
elif key in ['fontSize', 'fontName']:
fontname = delta.get('fontName', self._canvas._font)
fontsize = delta.get('fontSize', self._canvas._fontSize)
self._canvas.setFont(fontname, fontsize)
elif key == 'fillMode':
self._canvas.setFillMode(value)
def test(outDir='out-svg'):
# print all drawings and their doc strings from the test
# file
if not os.path.isdir(outDir):
os.mkdir(outDir)
#grab all drawings from the test module
from reportlab.graphics import testshapes
drawings = []
for funcname in dir(testshapes):
if funcname[0:10] == 'getDrawing':
func = getattr(testshapes,funcname)
drawing = func()
docstring = getattr(func,'__doc__','')
drawings.append((drawing, docstring))
i = 0
for (d, docstring) in drawings:
filename = os.path.join(outDir,'renderSVG_%d.svg' % i)
drawToFile(d, filename)
i += 1
from reportlab.graphics.testshapes import getDrawing01
d = getDrawing01()
drawToFile(d, os.path.join(outDir,"test.svg"))
from reportlab.lib.corp import RL_CorpLogo
from reportlab.graphics.shapes import Drawing
rl = RL_CorpLogo()
d = Drawing(rl.width,rl.height)
d.add(rl)
drawToFile(d, os.path.join(outDir,"corplogo.svg"))
if __name__=='__main__':
test()
|
# -*- coding: utf-8 -*-
"""
configfile.py - Human-readable text configuration file library
Copyright 2010 Luke Campagnola
Distributed under MIT/X11 license. See license.txt for more infomation.
Used for reading and writing dictionary objects to a python-like configuration
file format. Data structures may be nested and contain any data type as long
as it can be converted to/from a string using repr and eval.
"""
import re, os, sys
from .pgcollections import OrderedDict
GLOBAL_PATH = None # so not thread safe.
from . import units
from .python2_3 import asUnicode
class ParseError(Exception):
def __init__(self, message, lineNum, line, fileName=None):
self.lineNum = lineNum
self.line = line
#self.message = message
self.fileName = fileName
Exception.__init__(self, message)
def __str__(self):
if self.fileName is None:
msg = "Error parsing string at line %d:\n" % self.lineNum
else:
msg = "Error parsing config file '%s' at line %d:\n" % (self.fileName, self.lineNum)
msg += "%s\n%s" % (self.line, self.message)
return msg
#raise Exception()
def writeConfigFile(data, fname):
s = genString(data)
fd = open(fname, 'w')
fd.write(s)
fd.close()
def readConfigFile(fname):
#cwd = os.getcwd()
global GLOBAL_PATH
if GLOBAL_PATH is not None:
fname2 = os.path.join(GLOBAL_PATH, fname)
if os.path.exists(fname2):
fname = fname2
GLOBAL_PATH = os.path.dirname(os.path.abspath(fname))
try:
#os.chdir(newDir) ## bad.
fd = open(fname)
s = asUnicode(fd.read())
fd.close()
s = s.replace("\r\n", "\n")
s = s.replace("\r", "\n")
data = parseString(s)[1]
except ParseError:
sys.exc_info()[1].fileName = fname
raise
except:
print("Error while reading config file %s:"% fname)
raise
#finally:
#os.chdir(cwd)
return data
def appendConfigFile(data, fname):
s = genString(data)
fd = open(fname, 'a')
fd.write(s)
fd.close()
def genString(data, indent=''):
s = ''
for k in data:
sk = str(k)
if len(sk) == 0:
print(data)
raise Exception('blank dict keys not allowed (see data above)')
if sk[0] == ' ' or ':' in sk:
print(data)
raise Exception('dict keys must not contain ":" or start with spaces [offending key is "%s"]' % sk)
if isinstance(data[k], dict):
s += indent + sk + ':\n'
s += genString(data[k], indent + ' ')
else:
s += indent + sk + ': ' + repr(data[k]) + '\n'
return s
def parseString(lines, start=0):
data = OrderedDict()
if isinstance(lines, basestring):
lines = lines.split('\n')
lines = [l for l in lines if re.search(r'\S', l) and not re.match(r'\s*#', l)] ## remove empty lines
indent = measureIndent(lines[start])
ln = start - 1
try:
while True:
ln += 1
#print ln
if ln >= len(lines):
break
l = lines[ln]
## Skip blank lines or lines starting with #
if re.match(r'\s*#', l) or not re.search(r'\S', l):
continue
## Measure line indentation, make sure it is correct for this level
lineInd = measureIndent(l)
if lineInd < indent:
ln -= 1
break
if lineInd > indent:
#print lineInd, indent
raise ParseError('Indentation is incorrect. Expected %d, got %d' % (indent, lineInd), ln+1, l)
if ':' not in l:
raise ParseError('Missing colon', ln+1, l)
(k, p, v) = l.partition(':')
k = k.strip()
v = v.strip()
## set up local variables to use for eval
local = units.allUnits.copy()
local['OrderedDict'] = OrderedDict
local['readConfigFile'] = readConfigFile
if len(k) < 1:
raise ParseError('Missing name preceding colon', ln+1, l)
if k[0] == '(' and k[-1] == ')': ## If the key looks like a tuple, try evaluating it.
try:
k1 = eval(k, local)
if type(k1) is tuple:
k = k1
except:
pass
if re.search(r'\S', v) and v[0] != '#': ## eval the value
try:
val = eval(v, local)
except:
ex = sys.exc_info()[1]
raise ParseError("Error evaluating expression '%s': [%s: %s]" % (v, ex.__class__.__name__, str(ex)), (ln+1), l)
else:
if ln+1 >= len(lines) or measureIndent(lines[ln+1]) <= indent:
#print "blank dict"
val = {}
else:
#print "Going deeper..", ln+1
(ln, val) = parseString(lines, start=ln+1)
data[k] = val
#print k, repr(val)
except ParseError:
raise
except:
ex = sys.exc_info()[1]
raise ParseError("%s: %s" % (ex.__class__.__name__, str(ex)), ln+1, l)
#print "Returning shallower..", ln+1
return (ln, data)
def measureIndent(s):
n = 0
while n < len(s) and s[n] == ' ':
n += 1
return n
if __name__ == '__main__':
import tempfile
fn = tempfile.mktemp()
tf = open(fn, 'w')
cf = """
key: 'value'
key2: ##comment
##comment
key21: 'value' ## comment
##comment
key22: [1,2,3]
key23: 234 #comment
"""
tf.write(cf)
tf.close()
print("=== Test:===")
num = 1
for line in cf.split('\n'):
print("%02d %s" % (num, line))
num += 1
print(cf)
print("============")
data = readConfigFile(fn)
print(data)
os.remove(fn)
|
# Copyright 2012 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from tempest.api.compute import base
from tempest.common.utils import data_utils
from tempest.common import waiters
from tempest import config
from tempest import test
CONF = config.CONF
class VolumesTestJSON(base.BaseV2ComputeTest):
# NOTE: This test creates a number of 1G volumes. To run successfully,
# ensure that the backing file for the volume group that Nova uses
# has space for at least 3 1G volumes!
# If you are running a Devstack environment, ensure that the
# VOLUME_BACKING_FILE_SIZE is at least 4G in your localrc
@classmethod
def skip_checks(cls):
super(VolumesTestJSON, cls).skip_checks()
if not CONF.service_available.cinder:
skip_msg = ("%s skipped as Cinder is not available" % cls.__name__)
raise cls.skipException(skip_msg)
@classmethod
def setup_clients(cls):
super(VolumesTestJSON, cls).setup_clients()
cls.client = cls.volumes_extensions_client
@classmethod
def resource_setup(cls):
super(VolumesTestJSON, cls).resource_setup()
# Create 3 Volumes
cls.volume_list = []
cls.volume_id_list = []
for i in range(3):
v_name = data_utils.rand_name('volume')
metadata = {'Type': 'work'}
try:
volume = cls.client.create_volume(size=CONF.volume.volume_size,
display_name=v_name,
metadata=metadata)['volume']
waiters.wait_for_volume_status(cls.client,
volume['id'], 'available')
volume = cls.client.show_volume(volume['id'])['volume']
cls.volume_list.append(volume)
cls.volume_id_list.append(volume['id'])
except Exception:
if cls.volume_list:
# We could not create all the volumes, though we were able
# to create *some* of the volumes. This is typically
# because the backing file size of the volume group is
# too small. So, here, we clean up whatever we did manage
# to create and raise a SkipTest
for volume in cls.volume_list:
cls.delete_volume(volume['id'])
msg = ("Failed to create ALL necessary volumes to run "
"test. This typically means that the backing file "
"size of the nova-volumes group is too small to "
"create the 3 volumes needed by this test case")
raise cls.skipException(msg)
raise
@classmethod
def resource_cleanup(cls):
# Delete the created Volumes
for volume in cls.volume_list:
cls.delete_volume(volume['id'])
super(VolumesTestJSON, cls).resource_cleanup()
@test.idempotent_id('bc2dd1a0-15af-48e5-9990-f2e75a48325d')
def test_volume_list(self):
# Should return the list of Volumes
# Fetch all Volumes
fetched_list = self.client.list_volumes()['volumes']
# Now check if all the Volumes created in setup are in fetched list
missing_volumes = [
v for v in self.volume_list if v not in fetched_list
]
self.assertFalse(missing_volumes,
"Failed to find volume %s in fetched list" %
', '.join(m_vol['displayName']
for m_vol in missing_volumes))
@test.idempotent_id('bad0567a-5a4f-420b-851e-780b55bb867c')
def test_volume_list_with_details(self):
# Should return the list of Volumes with details
# Fetch all Volumes
fetched_list = self.client.list_volumes(detail=True)['volumes']
# Now check if all the Volumes created in setup are in fetched list
missing_volumes = [
v for v in self.volume_list if v not in fetched_list
]
self.assertFalse(missing_volumes,
"Failed to find volume %s in fetched list" %
', '.join(m_vol['displayName']
for m_vol in missing_volumes))
@test.idempotent_id('1048ed81-2baf-487a-b284-c0622b86e7b8')
def test_volume_list_param_limit(self):
# Return the list of volumes based on limit set
params = {'limit': 2}
fetched_vol_list = self.client.list_volumes(**params)['volumes']
self.assertEqual(len(fetched_vol_list), params['limit'],
"Failed to list volumes by limit set")
@test.idempotent_id('33985568-4965-49d5-9bcc-0aa007ca5b7a')
def test_volume_list_with_detail_param_limit(self):
# Return the list of volumes with details based on limit set.
params = {'limit': 2}
fetched_vol_list = self.client.list_volumes(detail=True,
**params)['volumes']
self.assertEqual(len(fetched_vol_list), params['limit'],
"Failed to list volume details by limit set")
@test.idempotent_id('51c22651-a074-4ea7-af0b-094f9331303e')
def test_volume_list_param_offset_and_limit(self):
# Return the list of volumes based on offset and limit set.
# get all volumes list
all_vol_list = self.client.list_volumes()['volumes']
params = {'offset': 1, 'limit': 1}
fetched_vol_list = self.client.list_volumes(**params)['volumes']
# Validating length of the fetched volumes
self.assertEqual(len(fetched_vol_list), params['limit'],
"Failed to list volumes by offset and limit")
# Validating offset of fetched volume
for index, volume in enumerate(fetched_vol_list):
self.assertEqual(volume['id'],
all_vol_list[index + params['offset']]['id'],
"Failed to list volumes by offset and limit")
@test.idempotent_id('06b6abc4-3f10-48e9-a7a1-3facc98f03e5')
def test_volume_list_with_detail_param_offset_and_limit(self):
# Return the list of volumes details based on offset and limit set.
# get all volumes list
all_vol_list = self.client.list_volumes(detail=True)['volumes']
params = {'offset': 1, 'limit': 1}
fetched_vol_list = self.client.list_volumes(detail=True,
**params)['volumes']
# Validating length of the fetched volumes
self.assertEqual(len(fetched_vol_list), params['limit'],
"Failed to list volume details by offset and limit")
# Validating offset of fetched volume
for index, volume in enumerate(fetched_vol_list):
self.assertEqual(volume['id'],
all_vol_list[index + params['offset']]['id'],
"Failed to list volume details by "
"offset and limit")
|
import os
from importlib import import_module
def get_providers():
for provider_file in os.listdir(os.path.dirname(os.path.abspath(__file__))):
if provider_file[0] != '$':
continue
provider = provider_file.replace('.py', '')
yield import_module(f'{__package__}.{provider}')
def get_prodvider(name, *args, **kwargs):
provider_module = import_module(f'{__name__}.${name}')
return provider_module.run(*args, **kwargs)
|
# Copyright (C) 2019 The Raphielscape Company LLC.
#
# Licensed under the Raphielscape Public License, Version 1.c (the "License");
# you may not use this file except in compliance with the License.
#
"""
This module updates the userbot based on Upstream revision
"""
from os import remove, execle, path, makedirs, getenv
from shutil import rmtree
import asyncio
import sys
from git import Repo
from git.exc import GitCommandError, InvalidGitRepositoryError, NoSuchPathError
from userbot import CMD_HELP, bot, HEROKU_APIKEY, HEROKU_APPNAME, UPSTREAM_REPO_URL
from userbot.events import register
requirements_path = path.join(
path.dirname(path.dirname(path.dirname(__file__))), 'requirements.txt')
async def gen_chlog(repo, diff):
ch_log = ''
d_form = "%d/%m/%y"
for c in repo.iter_commits(diff):
ch_log += f'•[{c.committed_datetime.strftime(d_form)}]: {c.summary} <{c.author}>\n'
return ch_log
async def update_requirements():
reqs = str(requirements_path)
try:
process = await asyncio.create_subprocess_shell(
' '.join([sys.executable, "-m", "pip", "install", "-r", reqs]),
stdout=asyncio.subprocess.PIPE,
stderr=asyncio.subprocess.PIPE)
await process.communicate()
return process.returncode
except Exception as e:
return repr(e)
@register(outgoing=True, pattern="^.update(?: |$)(.*)")
async def upstream(ups):
"For .update command, check if the bot is up to date, update if specified"
await ups.edit("`Checking for updates, please wait....`")
conf = ups.pattern_match.group(1)
off_repo = UPSTREAM_REPO_URL
force_update = False
try:
txt = "`Oops.. Updater cannot continue due to "
txt += "some problems occured`\n\n**LOGTRACE:**\n"
repo = Repo()
except NoSuchPathError as error:
await ups.edit(f'{txt}\n`directory {error} is not found`')
repo.__del__()
return
except GitCommandError as error:
await ups.edit(f'{txt}\n`Early failure! {error}`')
repo.__del__()
return
except InvalidGitRepositoryError as error:
if conf != "now":
await ups.edit(
f"`Unfortunately, the directory {error} does not seem to be a git repository.\
\nBut we can fix that by force updating the userbot using .update now.`"
)
return
repo = Repo.init()
origin = repo.create_remote('upstream', off_repo)
origin.fetch()
force_update = True
repo.create_head('master', origin.refs.master)
repo.heads.master.set_tracking_branch(origin.refs.master)
repo.heads.master.checkout(True)
ac_br = repo.active_branch.name
if ac_br != 'master':
await ups.edit(
f'**[UPDATER]:**` Looks like you are using your own custom branch ({ac_br}). '
'in that case, Updater is unable to identify '
'which branch is to be merged. '
'please checkout to any official branch`')
repo.__del__()
return
try:
repo.create_remote('upstream', off_repo)
except BaseException:
pass
ups_rem = repo.remote('upstream')
ups_rem.fetch(ac_br)
changelog = await gen_chlog(repo, f'HEAD..upstream/{ac_br}')
if not changelog and not force_update:
await ups.edit(
f'\n`Your BOT is` **up-to-date** `with` **{ac_br}**\n')
repo.__del__()
return
if conf != "now" and not force_update:
changelog_str = f'**New UPDATE available for [{ac_br}]:\n\nCHANGELOG:**\n`{changelog}`'
if len(changelog_str) > 4096:
await ups.edit("`Changelog is too big, view the file to see it.`")
file = open("output.txt", "w+")
file.write(changelog_str)
file.close()
await ups.client.send_file(
ups.chat_id,
"output.txt",
reply_to=ups.id,
)
remove("output.txt")
else:
await ups.edit(changelog_str)
await ups.respond('`do \".update now\" to update`')
return
if force_update:
await ups.edit(
'`Force-Syncing to latest stable userbot code, please wait...`')
else:
await ups.edit('`Updating userbot, please wait....`')
# We're in a Heroku Dyno, handle it's memez.
if HEROKU_APIKEY is not None:
import heroku3
heroku = heroku3.from_key(HEROKU_APIKEY)
heroku_app = None
heroku_applications = heroku.apps()
if not HEROKU_APPNAME:
await ups.edit(
'`[HEROKU MEMEZ] Please set up the HEROKU_APPNAME variable to be able to update userbot.`'
)
repo.__del__()
return
for app in heroku_applications:
if app.name == HEROKU_APPNAME:
heroku_app = app
break
if heroku_app is None:
await ups.edit(
f'{txt}\n`Invalid Heroku credentials for updating userbot dyno.`'
)
repo.__del__()
return
await ups.edit('`[HEROKU MEMEZ]\
\nUserbot dyno build in progress, please wait for it to complete.`'
)
ups_rem.fetch(ac_br)
repo.git.reset("--hard", "FETCH_HEAD")
heroku_git_url = heroku_app.git_url.replace(
"https://", "https://api:" + HEROKU_APIKEY + "@")
if "heroku" in repo.remotes:
remote = repo.remote("heroku")
remote.set_url(heroku_git_url)
else:
remote = repo.create_remote("heroku", heroku_git_url)
try:
remote.push(refspec="HEAD:refs/heads/master", force=True)
except GitCommandError as error:
await ups.edit(f'{txt}\n`Here is the error log:\n{error}`')
repo.__del__()
return
await ups.edit('`Successfully Updated!\n'
'Restarting, please wait...`')
else:
# Classic Updater, pretty straightforward.
try:
ups_rem.pull(ac_br)
except GitCommandError:
repo.git.reset("--hard", "FETCH_HEAD")
reqs_upgrade = await update_requirements()
await ups.edit('`Successfully Updated!\n'
'Bot is restarting... Wait for a second!`')
# Spin a new instance of bot
args = [sys.executable, "-m", "userbot"]
execle(sys.executable, *args, os.environ)
return
CMD_HELP.update({
'update':
".update\
\nUsage: Checks if the main userbot repository has any updates and shows a changelog if so.\
\n\n.update now\
\nUsage: Updates your userbot, if there are any updates in the main userbot repository."
})
|
#!/usr/bin/env python
#############################################################################
##
## Copyright (C) 2010 Riverbank Computing Limited.
## Copyright (C) 2010 Nokia Corporation and/or its subsidiary(-ies).
## All rights reserved.
##
## This file is part of the examples of PyQt.
##
## $QT_BEGIN_LICENSE:BSD$
## You may use this file under the terms of the BSD license as follows:
##
## "Redistribution and use in source and binary forms, with or without
## modification, are permitted provided that the following conditions are
## met:
## * Redistributions of source code must retain the above copyright
## notice, this list of conditions and the following disclaimer.
## * Redistributions in binary form must reproduce the above copyright
## notice, this list of conditions and the following disclaimer in
## the documentation and/or other materials provided with the
## distribution.
## * Neither the name of Nokia Corporation and its Subsidiary(-ies) nor
## the names of its contributors may be used to endorse or promote
## products derived from this software without specific prior written
## permission.
##
## THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
## "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
## LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
## A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
## OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
## SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
## LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
## DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
## THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
## (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
## OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE."
## $QT_END_LICENSE$
##
#############################################################################
from PyQt4 import QtCore, QtGui
class ImageViewer(QtGui.QMainWindow):
def __init__(self):
super(ImageViewer, self).__init__()
self.printer = QtGui.QPrinter()
self.scaleFactor = 0.0
self.imageLabel = QtGui.QLabel()
self.imageLabel.setBackgroundRole(QtGui.QPalette.Base)
self.imageLabel.setSizePolicy(QtGui.QSizePolicy.Ignored,
QtGui.QSizePolicy.Ignored)
self.imageLabel.setScaledContents(True)
self.scrollArea = QtGui.QScrollArea()
self.scrollArea.setBackgroundRole(QtGui.QPalette.Dark)
self.scrollArea.setWidget(self.imageLabel)
self.setCentralWidget(self.scrollArea)
self.createActions()
self.createMenus()
self.setWindowTitle("Image Viewer")
self.resize(500, 400)
def open(self):
fileName = QtGui.QFileDialog.getOpenFileName(self, "Open File",
QtCore.QDir.currentPath())
if fileName:
image = QtGui.QImage(fileName)
if image.isNull():
QtGui.QMessageBox.information(self, "Image Viewer",
"Cannot load %s." % fileName)
return
self.imageLabel.setPixmap(QtGui.QPixmap.fromImage(image))
self.scaleFactor = 1.0
self.printAct.setEnabled(True)
self.fitToWindowAct.setEnabled(True)
self.updateActions()
if not self.fitToWindowAct.isChecked():
self.imageLabel.adjustSize()
def print_(self):
dialog = QtGui.QPrintDialog(self.printer, self)
if dialog.exec_():
painter = QtGui.QPainter(self.printer)
rect = painter.viewport()
size = self.imageLabel.pixmap().size()
size.scale(rect.size(), QtCore.Qt.KeepAspectRatio)
painter.setViewport(rect.x(), rect.y(), size.width(), size.height())
painter.setWindow(self.imageLabel.pixmap().rect())
painter.drawPixmap(0, 0, self.imageLabel.pixmap())
def zoomIn(self):
self.scaleImage(1.25)
def zoomOut(self):
self.scaleImage(0.8)
def normalSize(self):
self.imageLabel.adjustSize()
self.scaleFactor = 1.0
def fitToWindow(self):
fitToWindow = self.fitToWindowAct.isChecked()
self.scrollArea.setWidgetResizable(fitToWindow)
if not fitToWindow:
self.normalSize()
self.updateActions()
def about(self):
QtGui.QMessageBox.about(self, "About Image Viewer",
"<p>The <b>Image Viewer</b> example shows how to combine "
"QLabel and QScrollArea to display an image. QLabel is "
"typically used for displaying text, but it can also display "
"an image. QScrollArea provides a scrolling view around "
"another widget. If the child widget exceeds the size of the "
"frame, QScrollArea automatically provides scroll bars.</p>"
"<p>The example demonstrates how QLabel's ability to scale "
"its contents (QLabel.scaledContents), and QScrollArea's "
"ability to automatically resize its contents "
"(QScrollArea.widgetResizable), can be used to implement "
"zooming and scaling features.</p>"
"<p>In addition the example shows how to use QPainter to "
"print an image.</p>")
def createActions(self):
self.openAct = QtGui.QAction("&Open...", self, shortcut="Ctrl+O",
triggered=self.open)
self.printAct = QtGui.QAction("&Print...", self, shortcut="Ctrl+P",
enabled=False, triggered=self.print_)
self.exitAct = QtGui.QAction("E&xit", self, shortcut="Ctrl+Q",
triggered=self.close)
self.zoomInAct = QtGui.QAction("Zoom &In (25%)", self,
shortcut="Ctrl++", enabled=False, triggered=self.zoomIn)
self.zoomOutAct = QtGui.QAction("Zoom &Out (25%)", self,
shortcut="Ctrl+-", enabled=False, triggered=self.zoomOut)
self.normalSizeAct = QtGui.QAction("&Normal Size", self,
shortcut="Ctrl+S", enabled=False, triggered=self.normalSize)
self.fitToWindowAct = QtGui.QAction("&Fit to Window", self,
enabled=False, checkable=True, shortcut="Ctrl+F",
triggered=self.fitToWindow)
self.aboutAct = QtGui.QAction("&About", self, triggered=self.about)
self.aboutQtAct = QtGui.QAction("About &Qt", self,
triggered=QtGui.qApp.aboutQt)
def createMenus(self):
self.fileMenu = QtGui.QMenu("&File", self)
self.fileMenu.addAction(self.openAct)
self.fileMenu.addAction(self.printAct)
self.fileMenu.addSeparator()
self.fileMenu.addAction(self.exitAct)
self.viewMenu = QtGui.QMenu("&View", self)
self.viewMenu.addAction(self.zoomInAct)
self.viewMenu.addAction(self.zoomOutAct)
self.viewMenu.addAction(self.normalSizeAct)
self.viewMenu.addSeparator()
self.viewMenu.addAction(self.fitToWindowAct)
self.helpMenu = QtGui.QMenu("&Help", self)
self.helpMenu.addAction(self.aboutAct)
self.helpMenu.addAction(self.aboutQtAct)
self.menuBar().addMenu(self.fileMenu)
self.menuBar().addMenu(self.viewMenu)
self.menuBar().addMenu(self.helpMenu)
def updateActions(self):
self.zoomInAct.setEnabled(not self.fitToWindowAct.isChecked())
self.zoomOutAct.setEnabled(not self.fitToWindowAct.isChecked())
self.normalSizeAct.setEnabled(not self.fitToWindowAct.isChecked())
def scaleImage(self, factor):
self.scaleFactor *= factor
self.imageLabel.resize(self.scaleFactor * self.imageLabel.pixmap().size())
self.adjustScrollBar(self.scrollArea.horizontalScrollBar(), factor)
self.adjustScrollBar(self.scrollArea.verticalScrollBar(), factor)
self.zoomInAct.setEnabled(self.scaleFactor < 3.0)
self.zoomOutAct.setEnabled(self.scaleFactor > 0.333)
def adjustScrollBar(self, scrollBar, factor):
scrollBar.setValue(int(factor * scrollBar.value()
+ ((factor - 1) * scrollBar.pageStep()/2)))
if __name__ == '__main__':
import sys
app = QtGui.QApplication(sys.argv)
imageViewer = ImageViewer()
imageViewer.show()
sys.exit(app.exec_())
|
import json
from re import split
import shutil
import os
import sys
import numpy as np
from PIL import Image, ImageDraw, ImageFont
from skimage import io
from shapely.geometry import Polygon
Image.MAX_IMAGE_PIXELS = None
def make_dir(path):
if not os.path.exists(path):
os.makedirs(path)
else:
shutil.rmtree(path)
os.makedirs(path)
def dice(a, b):
return 2 * a.intersection(b).area / (a.area + b.area)
def recall(a, b):
return a.intersection(b).area / b.area
def precision(a, b):
return a.intersection(b).area / a.area
def find_diff(dice_thred=0.5, draw_preview=True, log_score=True):
# A - new json
with open(file_A_path) as data_file:
data = json.load(data_file)
average_area = sum(
[Polygon(item["geometry"]["coordinates"][0]).area for item in data]
) / len(data)
area_threshold = average_area / 50
print("average area size: ", average_area)
print("size threshold: ", area_threshold)
coor_list_a = []
for item in data:
coor = item["geometry"]["coordinates"]
poly = Polygon(coor[0])
if poly.area > area_threshold:
coor_list_a.extend(item["geometry"]["coordinates"])
else:
print("A ignore", poly.area)
A_x_list = [[xy[0] for xy in coor] for coor in coor_list_a]
A_y_list = [[xy[1] for xy in coor] for coor in coor_list_a]
A_id_list = [i for i in range(len(coor_list_a))]
# B - old json
with open(file_B_path) as data_file:
data = json.load(data_file)
coor_list_b = []
for item in data:
coor = item["geometry"]["coordinates"]
coor = [
[[xy[1], xy[0]] for xy in coor[0]]
] # for some json. Comment this line if needed
poly = Polygon(coor[0])
if poly.area > area_threshold:
coor_list_b.extend(coor)
else:
print("B ignore", poly.area)
B_x_list = [[xy[0] for xy in coor] for coor in coor_list_b]
B_y_list = [[xy[1] for xy in coor] for coor in coor_list_b]
# find difference
center_list_new = []
for i in range(len(A_x_list)):
mean_x = (sum(A_x_list[i]) - A_x_list[i][-1]) / (len(A_x_list[i]) - 1)
mean_y = (sum(A_y_list[i]) - A_y_list[i][-1]) / (len(A_y_list[i]) - 1)
center_list_new.append((mean_x, mean_y))
center_list_old = []
for i in range(len(B_x_list)):
mean_x = (sum(B_x_list[i]) - B_x_list[i][-1]) / (len(B_x_list[i]) - 1)
mean_y = (sum(B_y_list[i]) - B_y_list[i][-1]) / (len(B_y_list[i]) - 1)
center_list_old.append((mean_x, mean_y))
new_added_list = []
new_added_f1_list = []
new_same_list = []
new_revised_list = []
f1_list = []
positon_threshold = 500
dice_threshold = dice_thred
ignore_count = 0
for i in A_id_list:
x, y = center_list_new[i]
new_p = Polygon(coor_list_a[i])
min_f1 = 0
min_j = -1
_recall, _precision = -1, -1
for j in range(len(center_list_old)):
_x, _y = center_list_old[j]
old_p = Polygon(coor_list_b[j])
if (x - _x) ** 2 + (y - _y) ** 2 <= positon_threshold ** 2:
f1 = dice(new_p, old_p)
if f1 > min_f1:
min_f1 = f1
min_j = j
_recall = recall(new_p, old_p)
_precision = precision(new_p, old_p)
if min_f1 >= 0.999:
_flag = f"Same\t{min_f1}"
new_same_list.append(i)
elif min_f1 >= dice_threshold:
_flag = f"Revised\t{min_f1}"
new_revised_list.append(i)
f1_list.append((min_f1, _recall, _precision))
else:
_flag = f"Added\t{min_f1}"
new_added_list.append(i)
new_added_f1_list.append(min_f1)
# print(min_f1)
if _flag.startswith("Same") or _flag.startswith("Revised"):
if min_j != -1:
coor_list_b.pop(min_j)
center_list_old.pop(min_j)
# print(i, _flag)
removed_count = len(center_list_old)
print(f"A\tB\tsame\tmatch\tadded\tdeleted")
print(
f"{len(A_x_list)}\t{len(B_x_list)}\t{len(new_same_list)}\t{len(new_revised_list)}"
f"\t{len(new_added_list)}\t{removed_count}"
)
print(f"[FP: {len(new_added_list)}/{len(A_x_list)}]")
print(f"[FN: {removed_count}/{len(B_x_list)}]")
# print(f"{len(new_same_list)} same")
# print(f"{len(new_revised_list)} revised")
# print(f"{len(new_added_list)} added")
# print(f"{removed_count} deleted")
# draw visualization
if draw_preview:
ref_image = io.imread(image_ref_path)
background = np.zeros(shape=ref_image.shape, dtype=np.uint8)
img = Image.fromarray(background, "L")
img = img.convert("RGB")
font_path = r"c:\windows\fonts\bahnschrift.ttf"
font = ImageFont.truetype(font_path, size=48)
title_font = ImageFont.truetype(font_path, size=72)
ImageDraw.Draw(img).text(
(100, 400),
text=f"DICE Threshold = {dice_thred}",
font=title_font,
fill="white",
)
ImageDraw.Draw(img).text(
(100, 480),
text=f"PREDICTION [FP: {len(new_added_list)}/{len(A_x_list)}]",
font=title_font,
fill="yellow",
)
ImageDraw.Draw(img).text(
(100, 560),
text=f"GROUND TRUTH [FN: {removed_count}/{len(B_x_list)}]",
font=title_font,
fill="red",
)
for i in new_added_list:
coor_tuple = [(xy[1], xy[0]) for xy in coor_list_a[i]]
# print(coor_tuple)
ImageDraw.Draw(img).line(coor_tuple, fill="yellow", width=6)
# text
f1 = new_added_f1_list[new_added_list.index(i)]
if f1 > 0:
text = "{:.3f}".format(f1) # + f",{Polygon(coor_list_a[i]).area}"
ImageDraw.Draw(img).text(
(center_list_new[i][1] - 40, center_list_new[i][0] + 60),
text,
font=font,
)
for coor_b in coor_list_b:
coor_tuple = [(xy[1], xy[0]) for xy in coor_b]
# print(coor_tuple)
ImageDraw.Draw(img).line(coor_tuple, fill="red", width=6)
# text = f",{Polygon(coor_b).area}"
# ImageDraw.Draw(img).text(
# (coor_tuple[0][0], coor_tuple[0][1]),
# text,
# font=font,
# )
img = np.array(img).astype("uint8")
output_path = image_ref_path.replace(
".png", f'_{str(dice_thred).replace(".","_")}.png'
)
io.imsave(output_path, img)
print(f"Image saved to {output_path}")
# write score
if log_score:
txt_path = file_A_path.replace("json", "txt")
with open(txt_path, "w") as f:
for item in f1_list:
f.write(f"{item[0]},{item[1]},{item[2]}\n")
if __name__ == "__main__":
file_A_path = (
r"C:\Users\yiju\Desktop\Copy\Scripts\masks\1-tom-new-kidney\pred_00a67c839.json"
)
file_B_path = r"C:\Users\yiju\Desktop\Copy\Data\hubmap-kidney-segmentation\test\00a67c839.json"
if len(sys.argv) >= 3:
file_A_path = sys.argv[1]
file_B_path = sys.argv[2]
image_ref_path = file_A_path.replace("json", "png")
A_name = file_A_path.split("\\")[-1].split(".")[0]
B_name = file_B_path.split("\\")[-1].split(".")[0]
print("A: ", A_name)
print("B: ", B_name)
for d in [0.5]: # [0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8, 0.9, 1.0]:
find_diff(dice_thred=d, draw_preview=True, log_score=True)
|
# ---------------------------------------------------------------
# imp_head.py
# Set-up time: 2020/5/21 下午11:22
# Copyright (c) 2020 ICT
# Licensed under The MIT License [see LICENSE for details]
# Written by Kenneth-Wong (Wenbin-Wang) @ VIPL.ICT
# Contact: wenbin.wang@vipl.ict.ac.cn [OR] nkwangwenbin@gmail.com
# ---------------------------------------------------------------
from ..registry import HEADS
import torch
from .relation_head import RelationHead
from .approaches import IMPContext
from mmdet.core import bbox2roi
@HEADS.register_module
class IMPHead(RelationHead):
def __init__(self, **kwargs):
super(IMPHead, self).__init__(**kwargs)
self.context_layer = IMPContext(self.head_config, self.obj_classes, self.rel_classes)
def forward(self,
img,
img_meta,
det_result,
gt_result=None,
is_testing=False,
ignore_classes=None):
"""
Obtain the relation prediction results based on detection results.
Args:
img (Tensor): of shape (N, C, H, W) encoding input images.
Typically these should be mean centered and std scaled.
img_meta (list[dict]): list of image info dict where each dict has:
'img_shape', 'scale_factor', 'flip', and may also contain
'filename', 'ori_shape', 'pad_shape', and 'img_norm_cfg'.
For details on the values of these keys see
`mmdet/datasets/pipelines/formatting.py:Collect`.
det_result: (Result): Result containing bbox, label, mask, point, rels,
etc. According to different mode, all the contents have been
set correctly. Feel free to use it.
gt_result : (Result): The ground truth information.
is_testing:
Returns:
det_result with the following newly added keys:
refine_scores (list[Tensor]): logits of object
rel_scores (list[Tensor]): logits of relation
rel_pair_idxes (list[Tensor]): (num_rel, 2) index of subject and object
relmaps (list[Tensor]): (num_obj, num_obj):
target_rel_labels (list[Tensor]): the target relation label.
"""
roi_feats, union_feats, det_result = self.frontend_features(img, img_meta, det_result, gt_result)
if roi_feats.shape[0] == 0:
return det_result
refine_obj_scores, rel_scores = self.context_layer(roi_feats, union_feats, det_result)
num_rels = [r.shape[0] for r in det_result.rel_pair_idxes]
num_objs = [len(b) for b in det_result.bboxes]
assert len(num_rels) == len(num_objs)
if self.use_bias:
obj_preds = refine_obj_scores.max(-1)[1]
obj_preds = obj_preds.split(num_objs, dim=0)
pair_preds = []
for pair_idx, obj_pred in zip(det_result.rel_pair_idxes, obj_preds):
pair_preds.append(torch.stack((obj_pred[pair_idx[:, 0]], obj_pred[pair_idx[:, 1]]), dim=1))
pair_pred = torch.cat(pair_preds, dim=0)
rel_scores = rel_scores + self.freq_bias.index_with_labels(pair_pred.long())
# make some changes: list to tensor or tensor to tuple
if self.training:
det_result.target_labels = torch.cat(det_result.target_labels, dim=-1)
det_result.target_rel_labels = torch.cat(det_result.target_rel_labels, dim=-1)
else:
refine_obj_scores = refine_obj_scores.split(num_objs, dim=0)
rel_scores = rel_scores.split(num_rels, dim=0)
det_result.refine_scores = refine_obj_scores
det_result.rel_scores = rel_scores
return det_result
|
"""
ASGI config for PlantEmissionController project.
It exposes the ASGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/3.1/howto/deployment/asgi/
"""
import os
from django.core.asgi import get_asgi_application
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'PlantEmissionController.settings')
application = get_asgi_application()
|
#!/usr/bin/env python
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
# Author: Piyush Agram
# Copyright 2013, by the California Institute of Technology. ALL RIGHTS RESERVED.
# United States Government Sponsorship acknowledged.
# Any commercial use must be negotiated with the Office of Technology Transfer at
# the California Institute of Technology.
# This software may be subject to U.S. export control laws.
# By accepting this software, the user agrees to comply with all applicable U.S.
# export laws and regulations. User has the responsibility to obtain export licenses,
# or other export authority as may be required before exporting such information to
# foreign countries or providing access to foreign persons.
#
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
import argparse
import symtable
import math
import numpy as np
from numpy.lib.stride_tricks import as_strided
import logging
import os
import sys
helpStr = """
ISCE Band image with imageMath.py
Examples:
*********
1) imageMath.py -e='a*exp(-1.0*J*arg(b))' -o test.int -t cfloat --a=resampOnlyImage.int --b=topophase.mph
This uses phase from topophase.mph to correct topophase from the interferograms
2) imageMath.py -e='a_0;a_1' --a=resampOnlyImage.amp -o test.amp -s BIL
This converts a BIP image to a BIL image
3) imageMath.py -e="abs(a);sqrt(b_0**2 + b_1**2)" --a=topophase.flat --b="topophase.mph;3419;float;2;BIP" -o test.mag -s BIL
This should produce a BIL (RMG) image where both channels are equal. Input the correct width before testing this.
Rules:
******
0) Input math expressions should be valid python expressions.
1) A math expression for every band of output image is needed. For a multi-band output image, these expressions are separated by a ;.
Example: See Example 2 above.
2) All variable names in the math expressions need to be lower case, single character. Capital characters and multi-char names are reserved for constants and functions respectively.
3) The band of multi-band input images are represented by adding _i to the variable name, where "i" is the band number. All indices are zero-based (C and python).
Example : a_0 represents the first band of the image represented by variable "a".
4) For a single band image, the _0 band notation is optional.
Example: a_0 and a are equivalent for a single band image.
5) For every lower case variable in the equations, another input "--varname" is needed. Example shown above where --a and --b are defined.
6) Variables can be defined in two ways:
a) File name (assuming an ISCE .xml file also exists).
Example --a=resamp.int
b) Image grammar: "Filename;width;datatype;bands;scheme"
Example --a="resamp.int;3200;cfloat;1;BSQ"
- Default value for datatype=float
- Default value for bands = 1
- Default value for scheme = BSQ
c) In the image grammar: Single character codes for datatypes are case sensitive (Numpy convention) whereas multi-character codes are case-insensitive. Internally, everything is translated to numpy convention by the code before processing.
"""
#######Current list of supported unitary functions - f(x)
fnDict = { 'cos': np.cos,
'sin': np.sin,
'exp': np.exp,
'log': np.log,
'log2': np.log2,
'log10': np.log10,
'tan' : np.tan,
'asin': np.arcsin,
'acos': np.arccos,
'atan': np.arctan,
'arg': np.angle,
'conj': np.conj,
'abs' : np.abs,
'round' : np.round,
'ceil' : np.ceil,
'floor' : np.floor,
'real' : np.real,
'imag' : np.imag,
'rad': np.radians,
'deg': np.degrees,
'sqrt': np.sqrt
}
#######Current list of constants
constDict = { "PI" : np.pi,
"J" : np.complex(0.0, 1.0),
"I" : np.complex(0.0, 1.0),
"E" : np.exp(1.0),
"NAN" : np.nan
}
#####Dictionary of global parameters
iMath = {
'outFile' : None, ####Output file name
'outBands' : [], ####List of out band mmaps
'outScheme' : 'BSQ', ####Output scheme
'equations' : [], #####List of math equations
'outType' : 'f', ####Output datatype
'width' : None, ####Width of images
'length' : None, ####Length of images
'inBands' : {}, ####Dictionary of input band mmaps
'inFiles' : {} ####Dictionary input file mmaps
}
######To deal with data types
'''
Translation between user inputs and numpy types.
Single char codes are case sensitive (Numpy convention).
Multiple char codes are case insensitive.
'''
####Signed byte
byte_tuple = ('b', 'byte', 'b8', 'b1')
####Unsigned byte
ubyte_tuple = ('B', 'ubyte', 'ub8', 'ub1')
####Short int
short_tuple = ('h', 'i2', 'short', 'int2', 'int16')
####Unsigned short int
ushort_tuple = ('H', 'ui2', 'ushort', 'uint2', 'uint16')
####Integer
int_tuple = ('i', 'i4', 'i32', 'int', 'int32','intc')
####Unsigned int
uint_tuple = ('I', 'ui4', 'ui32', 'uint', 'uint32', 'uintc')
####Long int
long_tuple = ('l', 'l8', 'l64', 'long', 'long64', 'longc',
'intpy', 'pyint', 'int64')
####Unsigned long int
ulong_tuple = ('L', 'ul8', 'ul64', 'ulong', 'ulong64', 'ulongc',
'uintpy', 'pyuint', 'uint64')
######Float
float_tuple =('f', 'float', 'single', 'float32', 'real4', 'r4')
######Complex float
cfloat_tuple = ('F', 'c8','complex','complex64','cfloat')
#####Double
double_tuple = ('d', 'double', 'real8', 'r8', 'float64',
'floatpy', 'pyfloat')
######Complex Double
cdouble_tuple=('D', 'c16', 'complex128', 'cdouble')
####Mapping to numpy data type
typeDict = {}
for dtuple in (byte_tuple, ubyte_tuple,
short_tuple, short_tuple,
int_tuple, uint_tuple,
long_tuple, ulong_tuple,
float_tuple, cfloat_tuple,
double_tuple, cdouble_tuple):
for dtype in dtuple:
typeDict[dtype] = dtuple[0]
def NUMPY_type(instr):
'''
Translates a given string into a numpy data type string.
'''
tstr = instr.strip()
if len(tstr) == 1:
key = tstr
else:
key = tstr.lower()
try:
npType = typeDict[key]
except:
raise ValueError('Unknown data type provided : %s '%(instr))
return npType
isceTypeDict = {
"f" : "FLOAT",
"F" : "CFLOAT",
"d" : "DOUBLE",
"h" : "SHORT",
"i" : "INT",
"l" : "LONG",
}
def printNUMPYMap():
import json
return json.dumps(typeDict, indent=4, sort_keys=True)
#########Classes and utils to deal with strings ###############
def isNumeric(s):
'''
Determine if a string is a number.
'''
try:
i = float(s)
return True
except ValueError, TypeError:
return False
class NumericStringParser(object):
'''
Parse the input expression using Python's inbuilt parser.
'''
def __init__(self, num_string):
'''
Create a parser object with input string.
'''
self.string = num_string
self._restricted = fnDict.keys() + constDict.keys()
def parse(self):
'''
Parse the input expression to get list of identifiers.
'''
try:
symTable = symtable.symtable(self.string, 'string', 'eval')
except:
raise IOError('Not a valid python math expression \n' +
self.string)
idents = symTable.get_identifiers()
known = []
unknown = []
for ident in idents:
if ident not in self._restricted:
unknown.append(ident)
else:
known.append(ident)
for val in unknown:
band = val.split('_')[0]
if len(band)!=1:
raise IOError('Multi character variables in input expressions represent functions or constants. Unknown function or constant : %s'%(val))
elif (band.lower() != band):
raise IOError('Single character upper case letters are used for constant. No available constant named %s'%(val))
return unknown, known
def uniqueList(seq):
'''
Returns a list with unique elements in a list.
'''
seen = set()
seen_add = seen.add
return [ x for x in seq if x not in seen and not seen_add(x)]
def bandsToFiles(bandList, logger):
'''
Take a list of band names and convert it to file names.
'''
flist = []
for band in bandList:
names = band.split('_')
if len(names) > 2:
logger.error('Invalid band name: %s'%band)
if names[0] not in flist:
flist.append(names[0])
logger.debug('Number of input files : %d'%len(flist))
logger.debug('Input files: ' + str(flist))
return flist
#######Create the logger for the application
def createLogger(debug):
'''
Creates an appopriate logger.
'''
# logging.basicConfig()
logger = logging.getLogger('imageMath')
consoleHandler = logging.StreamHandler()
formatter = logging.Formatter('%(asctime)s - %(name) s - %(levelname)s\n%(message)s')
consoleHandler.setFormatter(formatter)
if args.debug:
logger.setLevel(logging.DEBUG)
consoleHandler.setLevel(logging.DEBUG)
else:
logger.setLevel(logging.INFO)
consoleHandler.setLevel(logging.INFO)
logger.addHandler(consoleHandler)
return logger
##########Classes and utils for memory maps
class memmap(object):
'''Create the memap object.'''
def __init__(self,fname, mode='readonly', nchannels=1, nxx=None, nyy=None, scheme='BSQ', dataType='f'):
'''Init function.'''
fsize = np.zeros(1, dtype=dataType).itemsize
if nxx is None:
raise ValueError('Undefined file width for : %s'%(fname))
if mode=='write':
if nyy is None:
raise ValueError('Undefined file length for opening file: %s in write mode.'%(fname))
else:
try:
nbytes = os.path.getsize(fname)
except:
raise ValueError('Non-existent file : %s'%(fname))
if nyy is None:
nyy = nbytes/(fsize*nchannels*nxx)
if (nxx*nyy*fsize*nchannels) != nbytes:
raise ValueError('File size mismatch for %s. Fractional number of lines'(fname))
elif (nxx*nyy*fsize*nchannels) > nbytes:
raise ValueError('File size mismatch for %s. Number of bytes expected: %d'%(nbytes))
self.name = fname
self.width = nxx
self.length = nyy
####List of memmap objects
acc = []
####Create the memmap for the full file
nshape = nchannels*nyy*nxx
omap = np.memmap(fname, dtype=dataType, mode=mode,
shape = (nshape,))
if scheme.upper() == 'BIL':
nstrides = (nchannels*nxx*fsize, fsize)
for band in xrange(nchannels):
###Starting offset
noffset = band*nxx
###Temporary view
tmap = omap[noffset:]
####Trick it into creating a 2D array
fmap = as_strided(tmap, shape=(nyy,nxx), strides=nstrides)
###Add to list of objects
acc.append(fmap)
elif scheme.upper() == 'BSQ':
nstrides = (fsize, fsize)
for band in xrange(nchannels):
###Starting offset
noffset = band*nxx*nyy
###Temporary view
tmap = omap[noffset:noffset+nxx*nyy]
####Reshape into 2D array
fmap = as_strided(tmap, shape=(nyy,nxx))
###Add to lits of objects
acc.append(fmap)
elif scheme.upper() == 'BIP':
nstrides = (nchannels*nxx*fsize,nchannels*fsize)
for band in xrange(nchannels):
####Starting offset
noffset = band
####Temporary view
tmap = omap[noffset:]
####Trick it into interpreting ot as a 2D array
fmap = as_strided(tmap, shape=(nyy,nxx), strides=nstrides)
####Add to the list of objects
acc.append(fmap)
else:
raise ValueError('Unknown file scheme: %s for file %s'%(scheme,fname))
######Assigning list of objects to self.bands
self.bands = acc
def mmapFromISCE(fname, logger):
'''
Create a file mmap object using information in an ISCE XML.
'''
try:
import isce
import iscesys
from iscesys.Parsers.FileParserFactory import createFileParser
except:
raise ImportError('ISCE has not been installed or is not importable')
if not fname.endswith('.xml'):
dataName = fname
metaName = fname + '.xml'
else:
metaName = fname
dataName = os.path.splitext(fname)[0]
parser = createFileParser('xml')
prop, fac, misc = parser.parse(metaName)
logger.debug('Creating readonly ISCE mmap with \n' +
'file = %s \n'%(dataName) +
'bands = %d \n'%(prop['number_bands']) +
'width = %d \n'%(prop['width']) +
'length = %d \n'%(prop['length'])+
'scheme = %s \n'%(prop['scheme']) +
'dtype = %s \n'%(prop['data_type']))
mObj = memmap(dataName, nchannels=prop['number_bands'],
nxx=prop['width'], nyy=prop['length'], scheme=prop['scheme'],
dataType=NUMPY_type(prop['data_type']))
return mObj
def mmapFromStr(fstr, logger):
'''
Create a file mmap object using information provided on command line.
Grammar = 'filename;width;datatype;bands;scheme'
'''
def grammarError():
raise SyntaxError("Undefined image : %s \n" +
"Grammar='filename;width;datatype;bands;scheme'"%(fstr))
parms = fstr.split(';')
logger.debug('Input string: ' + str(parms))
if len(parms) < 2:
grammarError()
try:
fname = parms[0]
width = int(parms[1])
if len(parms)>2:
datatype = NUMPY_type(parms[2])
else:
datatype='f'
if len(parms)>3:
bands = int(parms[3])
else:
bands = 1
if len(parms)>4:
scheme = parms[4].upper()
else:
scheme = 'BSQ'
if scheme not in ['BIL', 'BIP', 'BSQ']:
raise IOError('Invalid file interleaving scheme: %s'%scheme)
except:
grammarError()
logger.debug('Creating readonly mmap from string with \n' +
'file = %s \n'%(fname) +
'bands = %d \n'%(bands) +
'width = %d \n'%(width) +
'scheme = %s \n'%(scheme) +
'dtype = %s \n'%(datatype))
mObj = memmap(fname, nchannels=bands, nxx=width,
scheme=scheme, dataType=datatype)
return mObj
pass
#######ISCE XML rendering
def renderISCEXML(fname, bands, nyy, nxx, datatype, scheme, descr):
'''
Renders an ISCE XML with the right information.
'''
try:
import isce
import isceobj
except:
raise ImportError('ISCE has not been installed or is not importable.')
img = isceobj.createImage()
img.filename = fname
img.scheme = scheme
img.width=nxx
img.length = nyy
try:
img.dataType = isceTypeDict[datatype]
except:
try:
img.dataType = isceTypeDict[NUMPY_type(datatype)]
except:
raise Exception('Processing complete but ISCE XML not written as the data type is currently not supported by ISCE Image Api')
img.addDescription(descr)
img.bands = bands
img.setAccessMode('read')
img.createImage()
img.renderHdr()
img.finalizeImage()
#######Command line parsing
def detailedHelp():
'''
Return the detailed help message.
'''
msg = helpStr + '\n\n'+ \
'Available Functions \n' + \
'********************\n' + \
str(fnDict.keys()) + '\n\n' + \
'Available Constants \n' + \
'********************\n' + \
str(constDict.keys()) + '\n\n' + \
'Available DataTypes -> numpy code mapping \n' + \
'***************************************** \n'+ \
printNUMPYMap() + '\n'
return msg
class customArgparseFormatter(argparse.ArgumentDefaultsHelpFormatter, argparse.RawDescriptionHelpFormatter):
pass
class customArgparseAction(argparse.Action):
def __call__(self, parser, args, values, option_string=None):
'''
The action to be performed.
'''
print detailedHelp()
parser.print_help()
parser.exit()
def firstPassCommandLine():
'''
Take a first parse at command line parsing.
Read only the basic required fields
'''
#####Create the generic parser to get equation and output format first
parser = argparse.ArgumentParser(description='ISCE Band math calculator.',
formatter_class=customArgparseFormatter)
# help_parser = subparser.add_
parser.add_argument('-H','--hh', nargs=0, action=customArgparseAction,
help='Display detailed help information.')
parser.add_argument('-e','--eval', type=str, required=True, action='store',
help='Expression to evaluate.', dest='equation')
parser.add_argument('-o','--out', type=str, default=None, action='store',
help='Name of the output file', dest='out')
parser.add_argument('-s','--scheme',type=str, default='BSQ', action='store',
help='Output file format.', dest='scheme')
parser.add_argument('-t','--type', type=str, default='float', action='store',
help='Output data type.', dest='dtype')
parser.add_argument('-d','--debug', action='store_true', default=False,
help='Print debugging statements', dest='debug')
parser.add_argument('-n','--noxml', action='store_true', default=False,
help='Do not create an ISCE XML file for the output.', dest='noxml')
#######Parse equation and output format first
args, files = parser.parse_known_args()
#####Check the output scheme for errors
if args.scheme.upper() not in ['BSQ', 'BIL', 'BIP']:
raise IOError('Unknown output scheme: %s'%(args.scheme))
iMath['outScheme'] = args.scheme.upper()
npType = NUMPY_type(args.dtype)
iMath['outType'] = npType
return args, files
class customArgumentParser(argparse.ArgumentParser):
def error(self, message):
raise Exception(message)
def parseInputFile(varname, args):
'''
Get the input string corresponding to given variable name.
'''
inarg = varname.strip()
####Keyname corresponds to specific
key = '--' + inarg
if len(varname.strip()) > 1:
raise IOError('Input variable names should be single characters.\n' +
'Invalid variable name: %s'%varname)
if (inarg != inarg.lower()):
raise IOError('Input variable names should be lower case. \n' +
'Invalud variable name: %s'%varname)
#####Create a simple parser
parser = customArgumentParser(description='Parser for band math.',
add_help=False)
parser.add_argument(key, type=str, required=True, action='store',
help='Input string for a particular variable.', dest='instr')
try:
infile, rest = parser.parse_known_args(args)
except:
raise SyntaxError('Input file : "%s" not defined on command line'%varname)
return infile.instr, rest
#######The main driver that puts everything together
if __name__ == '__main__':
args, files = firstPassCommandLine()
#######Set up logger appropriately
logger = createLogger(args.debug)
logger.debug('Known: '+ str(args))
logger.debug('Optional: '+ str(files))
#######Determine number of input and output bands
bandList = []
for ii,expr in enumerate(args.equation.split(';')):
#####Now parse the equation to get the file names used
nsp = NumericStringParser(expr)
logger.debug('Input Expression: %d : %s'%(ii, expr))
bands, known = nsp.parse()
logger.debug('Unknown variables: ' + str(bands))
logger.debug('Known variables: ' + str(known))
iMath['equations'].append(expr)
bandList = bandList + bands
bandList = uniqueList(bandList)
numOutBands = len(iMath['equations'])
logger.debug('Number of output bands = %d'%(numOutBands))
logger.debug('Number of input bands used = %d'%(len(bandList)))
logger.debug('Input bands used = ' + str(bandList))
#####Determine unique images from the bandList
fileList = bandsToFiles(bandList, logger)
######Create input memmaps
for ii,infile in enumerate(fileList):
fstr, files = parseInputFile(infile, files)
logger.debug('Input string for File %d: %s: %s'%(ii, infile, fstr))
if len(fstr.split(';')) > 1:
fmap = mmapFromStr(fstr, logger)
else:
fmap = mmapFromISCE(fstr, logger)
iMath['inFiles'][infile] = fmap
if len(fmap.bands) == 1:
iMath['inBands'][infile] = fmap.bands[0]
for ii in xrange(len(fmap.bands)):
iMath['inBands']['%s_%d'%(infile, ii)] = fmap.bands[ii]
if len(files):
raise IOError('Unused input variables set:\n'+ ' '.join(files))
#######Some debugging
logger.debug('List of available bands: ' + str(iMath['inBands'].keys()))
####If used in calculator mode.
if len(bandList) == 0:
dataDict=dict(fnDict.items() + constDict.items())
logger.info('Calculator mode. No output files created')
for ii, equation in enumerate(iMath['equations']):
res=eval(expr, dataDict)
logger.info('Output Band %d : %f '%(ii, res))
sys.exit(0)
else:
if args.out is None:
raise IOError('Output file has not been defined.')
#####Check if all bands in bandList have been accounted for
for band in bandList:
if band not in iMath['inBands'].keys():
raise ValueError('Undefined band : %s '%(band))
######Check if all the widths match
widths = [img.width for var,img in iMath['inFiles'].iteritems() ]
if len(widths) != widths.count(widths[0]):
logger.debug('Widths of images: ' +
str([(var, img.name, img.width) for var,img in iMath['inFiles'].iteritems()]))
raise IOError('Input images are not of same width')
iMath['width'] = widths[0]
logger.debug('Output Width = %d'%(iMath['width']))
#######Check if all the lengths match
lengths=[img.length for var,img in iMath['inFiles'].iteritems()]
if len(lengths) != lengths.count(lengths[0]):
logger.debug('Lengths of images: ' +
str([(var, img.name, img.length) for var,img in iMath['inFiles'].iteritems()]))
raise IOError('Input images are not of the same length')
iMath['length'] = lengths[0]
logger.debug('Output Length = %d'%(iMath['length']))
#####Now create the output file
outmap = memmap(args.out, mode='write', nchannels=numOutBands,
nxx=iMath['width'], nyy=iMath['length'], scheme=iMath['outScheme'],
dataType=iMath['outType'])
logger.debug('Creating output ISCE mmap with \n' +
'file = %s \n'%(args.out) +
'bands = %d \n'%(numOutBands) +
'width = %d \n'%(iMath['width']) +
'length = %d \n'%(iMath['length'])+
'scheme = %s \n'%(iMath['outScheme']) +
'dtype = %s \n'%(iMath['outType']))
iMath['outBands'] = outmap.bands
#####Start evaluating the expressions
####Set up the name space to use
dataDict=dict(fnDict.items() + constDict.items())
bands = iMath['inBands']
outBands = iMath['outBands']
#####Replace ^ by **
for lineno in xrange(iMath['length']):
####Load one line from each of the the bands
for band in bandList: #iMath['inBands'].iteritems():
dataDict[band] = bands[band][lineno,:]
####For each output band
for kk,expr in enumerate(iMath['equations']):
res = eval(expr, dataDict)
outBands[kk][lineno,:] = res
######Render ISCE XML if needed
if not args.noxml:
renderISCEXML(args.out, numOutBands, iMath['length'], iMath['width'],
iMath['outType'], iMath['outScheme'], ' '.join(sys.argv))
|
# -*- coding: utf-8 -*-
# @Time : 6/10/21 5:04 PM
# @Author : Yuan Gong
# @Affiliation : Massachusetts Institute of Technology
# @Email : yuangong@mit.edu
# @File : ast_models.py
import os
os.environ["CUDA_DEVICE_ORDER"] = "PCI_BUS_ID"
os.environ["CUDA_VISIBLE_DEVICES"] = "0,1,2,3"
import torch
import torch.nn as nn
from torch.cuda.amp import autocast
import wget
os.environ['TORCH_HOME'] = '../../pretrained_models'
import timm
from timm.models.layers import to_2tuple, trunc_normal_
# override the timm package to relax the input shape constraint.
class PatchEmbed(nn.Module):
def __init__(self, img_size=224, patch_size=16, in_chans=3, embed_dim=768):
super().__init__()
img_size = to_2tuple(img_size)
patch_size = to_2tuple(patch_size)
num_patches = (img_size[1] // patch_size[1]) * (img_size[0] // patch_size[0])
self.img_size = img_size
self.patch_size = patch_size
self.num_patches = num_patches
self.proj = nn.Conv2d(in_chans, embed_dim, kernel_size=patch_size, stride=patch_size)
def forward(self, x):
x = self.proj(x).flatten(2).transpose(1, 2)
return x
class ASTModel(nn.Module):
"""
The AST model.
:param label_dim: the label dimension, i.e., the number of total classes, it is 527 for AudioSet, 50 for ESC-50, and 35 for speechcommands v2-35
:param fstride: the stride of patch spliting on the frequency dimension, for 16*16 patchs, fstride=16 means no overlap, fstride=10 means overlap of 6
:param tstride: the stride of patch spliting on the time dimension, for 16*16 patchs, tstride=16 means no overlap, tstride=10 means overlap of 6
:param input_fdim: the number of frequency bins of the input spectrogram
:param input_tdim: the number of time frames of the input spectrogram
:param imagenet_pretrain: if use ImageNet pretrained model
:param audioset_pretrain: if use full AudioSet and ImageNet pretrained model
:param model_size: the model size of AST, should be in [tiny224, small224, base224, base384], base224 and base 384 are same model, but are trained differently during ImageNet pretraining.
"""
def __init__(self, label_dim=3, fstride=10, tstride=10, input_fdim=128, input_tdim=1024, imagenet_pretrain=True,
audioset_pretrain=True, model_size='base384', verbose=True):
super(ASTModel, self).__init__()
assert timm.__version__ == '0.4.5', 'Please use timm == 0.4.5, the code might not be compatible with newer versions.'
if verbose == True:
print('---------------AST Model Summary---------------')
print('ImageNet pretraining: {:s}, AudioSet pretraining: {:s}'.format(str(imagenet_pretrain),
str(audioset_pretrain)))
# override timm input shape restriction
timm.models.vision_transformer.PatchEmbed = PatchEmbed
# if AudioSet pretraining is not used (but ImageNet pretraining may still apply)
if audioset_pretrain == False:
if model_size == 'tiny224':
self.v = timm.create_model('vit_deit_tiny_distilled_patch16_224', pretrained=imagenet_pretrain)
elif model_size == 'small224':
self.v = timm.create_model('vit_deit_small_distilled_patch16_224', pretrained=imagenet_pretrain)
elif model_size == 'base224':
self.v = timm.create_model('vit_deit_base_distilled_patch16_224', pretrained=imagenet_pretrain)
elif model_size == 'base384':
self.v = timm.create_model('vit_deit_base_distilled_patch16_384', pretrained=imagenet_pretrain)
else:
raise Exception('Model size must be one of tiny224, small224, base224, base384.')
self.original_num_patches = self.v.patch_embed.num_patches
self.oringal_hw = int(self.original_num_patches ** 0.5)
self.original_embedding_dim = self.v.pos_embed.shape[2]
self.mlp_head = nn.Sequential(nn.LayerNorm(self.original_embedding_dim),
nn.Linear(self.original_embedding_dim, label_dim))
# automatcially get the intermediate shape
f_dim, t_dim = self.get_shape(fstride, tstride, input_fdim, input_tdim)
num_patches = f_dim * t_dim
self.v.patch_embed.num_patches = num_patches
if verbose == True:
print('frequncey stride={:d}, time stride={:d}'.format(fstride, tstride))
print('number of patches={:d}'.format(num_patches))
# the linear projection layer
new_proj = torch.nn.Conv2d(1, self.original_embedding_dim, kernel_size=(16, 16), stride=(fstride, tstride))
if imagenet_pretrain == True:
new_proj.weight = torch.nn.Parameter(torch.sum(self.v.patch_embed.proj.weight, dim=1).unsqueeze(1))
new_proj.bias = self.v.patch_embed.proj.bias
self.v.patch_embed.proj = new_proj
# the positional embedding
if imagenet_pretrain == True:
# get the positional embedding from deit model, skip the first two tokens (cls token and distillation token), reshape it to original 2D shape (24*24).
new_pos_embed = self.v.pos_embed[:, 2:, :].detach().reshape(1, self.original_num_patches,
self.original_embedding_dim).transpose(1,
2).reshape(
1, self.original_embedding_dim, self.oringal_hw, self.oringal_hw)
# cut (from middle) or interpolate the second dimension of the positional embedding
if t_dim <= self.oringal_hw:
new_pos_embed = new_pos_embed[:, :, :,
int(self.oringal_hw / 2) - int(t_dim / 2): int(self.oringal_hw / 2) - int(
t_dim / 2) + t_dim]
else:
new_pos_embed = torch.nn.functional.interpolate(new_pos_embed, size=(self.oringal_hw, t_dim),
mode='bilinear')
# cut (from middle) or interpolate the first dimension of the positional embedding
if f_dim <= self.oringal_hw:
new_pos_embed = new_pos_embed[:, :,
int(self.oringal_hw / 2) - int(f_dim / 2): int(self.oringal_hw / 2) - int(
f_dim / 2) + f_dim, :]
else:
new_pos_embed = torch.nn.functional.interpolate(new_pos_embed, size=(f_dim, t_dim), mode='bilinear')
# flatten the positional embedding
new_pos_embed = new_pos_embed.reshape(1, self.original_embedding_dim, num_patches).transpose(1, 2)
# concatenate the above positional embedding with the cls token and distillation token of the deit model.
self.v.pos_embed = nn.Parameter(torch.cat([self.v.pos_embed[:, :2, :].detach(), new_pos_embed], dim=1))
else:
# if not use imagenet pretrained model, just randomly initialize a learnable positional embedding
# TODO can use sinusoidal positional embedding instead
new_pos_embed = nn.Parameter(
torch.zeros(1, self.v.patch_embed.num_patches + 2, self.original_embedding_dim))
self.v.pos_embed = new_pos_embed
trunc_normal_(self.v.pos_embed, std=.02)
# now load a model that is pretrained on both ImageNet and AudioSet
elif audioset_pretrain == True:
if audioset_pretrain == True and imagenet_pretrain == False:
raise ValueError(
'currently model pretrained on only audioset is not supported, please set imagenet_pretrain = True to use audioset pretrained model.')
if model_size != 'base384':
raise ValueError('currently only has base384 AudioSet pretrained model.')
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
if os.path.exists('../../pretrained_models/audioset_10_10_0.4593.pth') == False:
# this model performs 0.4593 mAP on the audioset eval set
audioset_mdl_url = 'https://www.dropbox.com/s/cv4knew8mvbrnvq/audioset_0.4593.pth?dl=1'
wget.download(audioset_mdl_url, out='../../pretrained_models/audioset_10_10_0.4593.pth')
sd = torch.load('../../pretrained_models/audioset_10_10_0.4593.pth', map_location=device)
# sd = torch.load('../../pretrained_models/ast_audioset.pth', map_location=device)
audio_model = ASTModel(label_dim=527, fstride=10, tstride=10, input_fdim=128, input_tdim=1024,
imagenet_pretrain=False, audioset_pretrain=False, model_size='base384',
verbose=False)
audio_model = torch.nn.DataParallel(audio_model)
print("***************USING=>", torch.cuda.current_device())
audio_model.load_state_dict(sd, strict=False)
self.v = audio_model.module.v
self.original_embedding_dim = self.v.pos_embed.shape[2]
self.mlp_head = nn.Sequential(nn.LayerNorm(self.original_embedding_dim),
nn.Linear(self.original_embedding_dim, label_dim))
f_dim, t_dim = self.get_shape(fstride, tstride, input_fdim, input_tdim)
num_patches = f_dim * t_dim
self.v.patch_embed.num_patches = num_patches
if verbose == True:
print('frequncey stride={:d}, time stride={:d}'.format(fstride, tstride))
print('number of patches={:d}'.format(num_patches))
new_pos_embed = self.v.pos_embed[:, 2:, :].detach().reshape(1, 1212, 768).transpose(1, 2).reshape(1, 768,
12, 101)
# if the input sequence length is larger than the original audioset (10s), then cut the positional embedding
if t_dim < 101:
new_pos_embed = new_pos_embed[:, :, :, 50 - int(t_dim / 2): 50 - int(t_dim / 2) + t_dim]
# otherwise interpolate
else:
new_pos_embed = torch.nn.functional.interpolate(new_pos_embed, size=(12, t_dim), mode='bilinear')
print("NEW POST EMBED:", new_pos_embed.shape)
new_pos_embed = new_pos_embed.reshape(1, 768, num_patches).transpose(1, 2)
print("NEW POST EMBED:", new_pos_embed.shape)
self.v.pos_embed = nn.Parameter(torch.cat([self.v.pos_embed[:, :2, :].detach(), new_pos_embed], dim=1))
def get_shape(self, fstride, tstride, input_fdim=128, input_tdim=1024):
test_input = torch.randn(1, 1, input_fdim, input_tdim)
test_proj = nn.Conv2d(1, self.original_embedding_dim, kernel_size=(16, 16), stride=(fstride, tstride))
test_out = test_proj(test_input)
f_dim = test_out.shape[2]
t_dim = test_out.shape[3]
return f_dim, t_dim
@autocast()
def forward(self, x):
"""
:param x: the input spectrogram, expected shape: (batch_size, time_frame_num, frequency_bins), e.g., (12, 1024, 128)
:return: prediction
"""
# expect input x = (batch_size, time_frame_num, frequency_bins), e.g., (12, 1024, 128)
x = x.unsqueeze(1)
x = x.transpose(2, 3)
B = x.shape[0]
x = self.v.patch_embed(x)
cls_tokens = self.v.cls_token.expand(B, -1, -1)
dist_token = self.v.dist_token.expand(B, -1, -1)
x = torch.cat((cls_tokens, dist_token, x), dim=1)
x = x + self.v.pos_embed
x = self.v.pos_drop(x)
for blk in self.v.blocks:
x = blk(x)
x = self.v.norm(x)
x = (x[:, 0] + x[:, 1]) / 2
# x = self.mlp_head(x)
return x
# if __name__ == '__main__':
# input_tdim = 100
# ast_mdl = ASTModel(input_tdim=input_tdim)
# # input a batch of 10 spectrogram, each with 100 time frames and 128 frequency bins
# test_input = torch.rand([10, input_tdim, 128])
# test_output = ast_mdl(test_input)
# # output should be in shape [10, 527], i.e., 10 samples, each with prediction of 527 classes.
# print(test_output.shape)
#
# input_tdim = 512
# ast_mdl = ASTModel(input_tdim=input_tdim, label_dim=50, audioset_pretrain=True)
# # input a batch of 10 spectrogram, each with 512 time frames and 128 frequency bins
# test_input = torch.rand([10, input_tdim, 128])
# test_output = ast_mdl(test_input)
# # output should be in shape [10, 527], i.e., 10 samples, each with prediction of 527 classes.
# print(test_output.shape)
|
"""
Read in the output from the trace-inputlocator script and create a GraphViz file.
Pass as input the path to the yaml output of the trace-inputlocator script via config file.
The output is written to the trace-inputlocator location.
WHY? because the trace-inputlocator only has the GraphViz output of the last call to the script. This
version re-creates the trace-data from the (merged) yaml file (the yaml output is merged if pre-existing in the output
file).
"""
from __future__ import print_function
import yaml
import cea.config
from cea.tests.trace_inputlocator import create_graphviz_output
def main(config):
with open(config.trace_inputlocator.yaml_output_file, 'r') as f:
yaml_data = yaml.load(f)
trace_data = []
for script in yaml_data.keys():
for direction in ('input', 'output'):
for locator, file in yaml_data[script][direction]:
trace_data.append((direction, script, locator, file))
create_graphviz_output(trace_data, config.trace_inputlocator.graphviz_output_file)
if __name__ == '__main__':
main(cea.config.Configuration())
|
# Copyright 2017 Intel Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ------------------------------------------------------------------------------
import argparse
import asyncio
import logging
import os
from signal import signal, SIGINT
import sys
# import rethinkdb as r
from sanic import Sanic
from sawtooth_signing import create_context
from sawtooth_signing import ParseError
# from sawtooth_signing.secp256k1 import Secp256k1PrivateKey
from sawtooth_signing import CryptoFactory
# from sawtooth_signing.secp256k1 import Secp256k1PrivateKey
from zmq.asyncio import ZMQEventLoop
from rest_api.workflow.general import get_keyfile, get_signer_from_file
from rest_api.workflow.doctors import DOCTORS_BP
from rest_api.workflow.patients import PATIENTS_BP
from rest_api.workflow.clients import CLIENTS_BP
from rest_api.workflow.evaluation import EVALUATION_BP
from rest_api.workflow.Consent import CONSENT_BP
from sawtooth_rest_api.messaging import Connection
# from api.authorization import AUTH_BP
# from api.errors import ERRORS_BP
# from api.holdings import HOLDINGS_BP
# from api.offers import OFFERS_BP
logging.basicConfig(level=logging.DEBUG)
LOGGER = logging.getLogger(__name__)
DEFAULT_CONFIG = {
'HOST': 'localhost',
'PORT': 8000,
'TIMEOUT': 500,
'VALIDATOR_URL': 'tcp://localhost:4004',
# 'DB_HOST': 'localhost',
# 'DB_PORT': 28015,
# 'DB_NAME': 'marketplace',
'DEBUG': True,
'KEEP_ALIVE': False,
'SECRET_KEY': 'ABCDEFGHIJKLMNOPQRSTUVWXYZ1234567890',
'AES_KEY': 'ffffffffffffffffffffffffffffffff',
'BATCHER_PRIVATE_KEY': '1111111111111111111111111111111111111111111111111111111111111111',
'BATCHER_PRIVATE_KEY_FILE_NAME_CLINIC': None,
'BATCHER_PRIVATE_KEY_FILE_NAME_PATIENT': None,
'BATCHER_PRIVATE_KEY_FILE_NAME_DOCTOR': None,
'BATCHER_PRIVATE_KEY_FILE_NAME_LAB': None,
'BATCHER_PRIVATE_KEY_FILE_NAME_INSURANCE': None
}
async def open_connections(appl):
# LOGGER.warning('opening database connection')
# r.set_loop_type('asyncio')
# app.config.DB_CONN = await r.connect(
# host=app.config.DB_HOST,
# port=app.config.DB_PORT,
# db=app.config.DB_NAME)
appl.config.VAL_CONN = Connection(appl.config.VALIDATOR_URL)
LOGGER.warning('opening validator connection: ' + str(appl.config.VALIDATOR_URL))
appl.config.VAL_CONN.open()
def close_connections(appl):
# LOGGER.warning('closing database connection')
# app.config.DB_CONN.close()
LOGGER.warning('closing validator connection')
appl.config.VAL_CONN.close()
def parse_args(args):
parser = argparse.ArgumentParser()
parser.add_argument('--host',
help='The host for the api to run on.')
parser.add_argument('--port',
help='The port for the api to run on.')
parser.add_argument('--timeout',
help='Seconds to wait for a validator response')
parser.add_argument('--validator',
help='The url to connect to a running validator')
# parser.add_argument('--db-host',
# help='The host for the state database')
# parser.add_argument('--db-port',
# help='The port for the state database')
# parser.add_argument('--db-name',
# help='The name of the database')
parser.add_argument('--debug',
help='Option to run Sanic in debug mode')
parser.add_argument('--secret_key',
help='The API secret key')
parser.add_argument('--aes-key',
help='The AES key used for private key encryption')
parser.add_argument('--batcher-private-key',
help='The sawtooth key used for transaction signing')
parser.add_argument('--batcher-private-key-file-name-clinic',
help='The sawtooth key used for batch signing having clinic role')
parser.add_argument('--batcher-private-key-file-name-doctor',
help='The sawtooth key used for batch signing having doctor role')
parser.add_argument('--batcher-private-key-file-name-patient',
help='The sawtooth key used for batch signing having patient role')
parser.add_argument('--batcher-private-key-file-name-lab',
help='The sawtooth key used for batch signing having lab role')
parser.add_argument('--batcher-private-key-file-name-insurance',
help='The sawtooth key used for batch signing having insurance role')
return parser.parse_args(args)
def load_config(appl): # pylint: disable=too-many-branches
appl.config.update(DEFAULT_CONFIG)
config_file_path = os.path.join(
os.path.dirname(os.path.dirname(os.path.realpath(__file__))),
'config.py')
try:
appl.config.from_pyfile(config_file_path)
except FileNotFoundError:
LOGGER.warning("No config file provided")
# CLI Options will override config file options
opts = parse_args(sys.argv[1:])
if opts.host is not None:
appl.config.HOST = opts.host
if opts.port is not None:
appl.config.PORT = opts.port
if opts.timeout is not None:
appl.config.TIMEOUT = opts.timeout
if opts.validator is not None:
appl.config.VALIDATOR_URL = opts.validator
# if opts.db_host is not None:
# app.config.DB_HOST = opts.db_host
# if opts.db_port is not None:
# app.config.DB_PORT = opts.db_port
# if opts.db_name is not None:
# app.config.DB_NAME = opts.db_name
if opts.debug is not None:
appl.config.DEBUG = opts.debug
if opts.secret_key is not None:
appl.config.SECRET_KEY = opts.secret_key
if appl.config.SECRET_KEY is None:
LOGGER.exception("API secret key was not provided")
sys.exit(1)
if opts.aes_key is not None:
appl.config.AES_KEY = opts.aes_key
if appl.config.AES_KEY is None:
LOGGER.exception("AES key was not provided")
sys.exit(1)
if opts.batcher_private_key is not None:
appl.config.BATCHER_PRIVATE_KEY = opts.batcher_private_key
if appl.config.BATCHER_PRIVATE_KEY is None:
LOGGER.exception("Batcher private key was not provided")
sys.exit(1)
if opts.batcher_private_key_file_name_clinic is not None:
appl.config.BATCHER_PRIVATE_KEY_FILE_NAME_CLINIC = opts.batcher_private_key_file_name_clinic
if opts.batcher_private_key_file_name_doctor is not None:
appl.config.BATCHER_PRIVATE_KEY_FILE_NAME_DOCTOR = opts.batcher_private_key_file_name_doctor
if opts.batcher_private_key_file_name_patient is not None:
appl.config.BATCHER_PRIVATE_KEY_FILE_NAME_PATIENT = opts.batcher_private_key_file_name_patient
if opts.batcher_private_key_file_name_lab is not None:
appl.config.BATCHER_PRIVATE_KEY_FILE_NAME_LAB = opts.batcher_private_key_file_name_lab
if opts.batcher_private_key_file_name_insurance is not None:
appl.config.BATCHER_PRIVATE_KEY_FILE_NAME_INSURANCE = opts.batcher_private_key_file_name_insurance
if appl.config.BATCHER_PRIVATE_KEY_FILE_NAME_CLINIC is None:
LOGGER.exception("Batcher private key file name for Clinic entity was not provided")
sys.exit(1)
if appl.config.BATCHER_PRIVATE_KEY_FILE_NAME_DOCTOR is None:
LOGGER.exception("Batcher private key file name for Doctor entity was not provided")
sys.exit(1)
if appl.config.BATCHER_PRIVATE_KEY_FILE_NAME_PATIENT is None:
LOGGER.exception("Batcher private key file name for Patient entity was not provided")
sys.exit(1)
if appl.config.BATCHER_PRIVATE_KEY_FILE_NAME_LAB is None:
LOGGER.exception("Batcher private key file name for Lab entity was not provided")
sys.exit(1)
if appl.config.BATCHER_PRIVATE_KEY_FILE_NAME_INSURANCE is None:
LOGGER.exception("Batcher private key file name for Insurance entity was not provided")
sys.exit(1)
try:
private_key_file_name_clinic = get_keyfile(appl.config.BATCHER_PRIVATE_KEY_FILE_NAME_CLINIC)
clinic_private_key = get_signer_from_file(private_key_file_name_clinic)
private_key_file_name_doctor = get_keyfile(appl.config.BATCHER_PRIVATE_KEY_FILE_NAME_DOCTOR)
doctor_private_key = get_signer_from_file(private_key_file_name_doctor)
private_key_file_name_patient = get_keyfile(appl.config.BATCHER_PRIVATE_KEY_FILE_NAME_PATIENT)
patient_private_key = get_signer_from_file(private_key_file_name_patient)
private_key_file_name_lab = get_keyfile(appl.config.BATCHER_PRIVATE_KEY_FILE_NAME_LAB)
lab_private_key = get_signer_from_file(private_key_file_name_lab)
private_key_file_name_insurance = get_keyfile(appl.config.BATCHER_PRIVATE_KEY_FILE_NAME_INSURANCE)
insurance_private_key = get_signer_from_file(private_key_file_name_insurance)
# private_key = Secp256k1PrivateKey.from_hex(
# app.config.BATCHER_PRIVATE_KEY)
except ParseError as err:
LOGGER.exception('Unable to load private key: %s', str(err))
sys.exit(1)
appl.config.CONTEXT = create_context('secp256k1')
appl.config.SIGNER_CLINIC = CryptoFactory(
appl.config.CONTEXT).new_signer(clinic_private_key)
appl.config.SIGNER_DOCTOR = CryptoFactory(
appl.config.CONTEXT).new_signer(doctor_private_key)
appl.config.SIGNER_PATIENT = CryptoFactory(
appl.config.CONTEXT).new_signer(patient_private_key)
appl.config.SIGNER_LAB = CryptoFactory(
appl.config.CONTEXT).new_signer(lab_private_key)
appl.config.SIGNER_INSURANCE = CryptoFactory(
appl.config.CONTEXT).new_signer(insurance_private_key)
app = Sanic(__name__)
app.config['CORS_AUTOMATIC_OPTIONS'] = True
def main():
LOGGER.info('Starting Clinic Rest API server...')
# CORS(app)
app.blueprint(DOCTORS_BP)
app.blueprint(PATIENTS_BP)
app.blueprint(CLIENTS_BP)
app.blueprint(CONSENT_BP)
app.blueprint(EVALUATION_BP)
load_config(app)
zmq = ZMQEventLoop()
asyncio.set_event_loop(zmq)
server = app.create_server(
host=app.config.HOST, port=app.config.PORT, debug=app.config.DEBUG, return_asyncio_server=True)
loop = asyncio.get_event_loop()
asyncio.ensure_future(open_connections(app))
asyncio.ensure_future(server)
signal(SIGINT, lambda s, f: loop.close())
try:
LOGGER.info('Clinic Rest API server starting')
loop.run_forever()
except KeyboardInterrupt:
LOGGER.info('Clinic Rest API started interrupted')
close_connections(app)
loop.stop()
if __name__ == "__main__":
main()
|
import time
import pytest
import jwt
from cryptography.hazmat.primitives.asymmetric import rsa
from cryptography.hazmat.primitives import serialization
from authlib.jose import jwk
from util.security.jwtutil import (
decode,
exp_max_s_option,
jwk_dict_to_public_key,
InvalidTokenError,
InvalidAlgorithmError,
)
@pytest.fixture(scope="session")
def private_key():
return rsa.generate_private_key(
public_exponent=65537,
key_size=2048,
)
@pytest.fixture(scope="session")
def private_key_pem(private_key):
return private_key.private_bytes(
encoding=serialization.Encoding.PEM,
format=serialization.PrivateFormat.TraditionalOpenSSL,
encryption_algorithm=serialization.NoEncryption(),
)
@pytest.fixture(scope="session")
def public_key(private_key):
return private_key.public_key().public_bytes(
encoding=serialization.Encoding.PEM,
format=serialization.PublicFormat.SubjectPublicKeyInfo,
)
def _token_data(audience, subject, iss, iat=None, exp=None, nbf=None):
return {
"iss": iss,
"aud": audience,
"nbf": nbf() if nbf is not None else int(time.time()),
"iat": iat() if iat is not None else int(time.time()),
"exp": exp() if exp is not None else int(time.time() + 3600),
"sub": subject,
}
@pytest.mark.parametrize(
"aud, iss, nbf, iat, exp, expected_exception",
[
pytest.param(
"invalidaudience",
"someissuer",
None,
None,
None,
"Invalid audience",
id="invalid audience",
),
pytest.param(
"someaudience", "invalidissuer", None, None, None, "Invalid issuer", id="invalid issuer"
),
pytest.param(
"someaudience",
"someissuer",
lambda: time.time() + 120,
None,
None,
"The token is not yet valid",
id="invalid not before",
),
pytest.param(
"someaudience",
"someissuer",
None,
lambda: time.time() + 120,
None,
"Issued At claim",
id="issued at in future",
),
pytest.param(
"someaudience",
"someissuer",
None,
None,
lambda: time.time() - 100,
"Signature has expired",
id="already expired",
),
pytest.param(
"someaudience",
"someissuer",
None,
None,
lambda: time.time() + 10000,
"Token was signed for more than",
id="expiration too far in future",
),
pytest.param(
"someaudience",
"someissuer",
lambda: time.time() + 10,
None,
None,
None,
id="not before in future by within leeway",
),
pytest.param(
"someaudience",
"someissuer",
None,
lambda: time.time() + 10,
None,
None,
id="issued at in future but within leeway",
),
pytest.param(
"someaudience",
"someissuer",
None,
None,
lambda: time.time() - 10,
None,
id="expiration in past but within leeway",
),
],
)
def test_decode_jwt_validation(
aud, iss, nbf, iat, exp, expected_exception, private_key_pem, public_key
):
token = jwt.encode(_token_data(aud, "subject", iss, iat, exp, nbf), private_key_pem, "RS256")
if expected_exception is not None:
with pytest.raises(InvalidTokenError) as ite:
max_exp = exp_max_s_option(3600)
decode(
token,
public_key,
algorithms=["RS256"],
audience="someaudience",
issuer="someissuer",
options=max_exp,
leeway=60,
)
assert ite.match(expected_exception)
else:
max_exp = exp_max_s_option(3600)
decode(
token,
public_key,
algorithms=["RS256"],
audience="someaudience",
issuer="someissuer",
options=max_exp,
leeway=60,
)
def test_decode_jwt_invalid_key(private_key_pem):
# Encode with the test private key.
token = jwt.encode(_token_data("aud", "subject", "someissuer"), private_key_pem, "RS256")
# Try to decode with a different public key.
another_public_key = (
rsa.generate_private_key(
public_exponent=65537,
key_size=2048,
)
.public_key()
.public_bytes(
encoding=serialization.Encoding.PEM,
format=serialization.PublicFormat.SubjectPublicKeyInfo,
)
)
with pytest.raises(InvalidTokenError) as ite:
max_exp = exp_max_s_option(3600)
decode(
token,
another_public_key,
algorithms=["RS256"],
audience="aud",
issuer="someissuer",
options=max_exp,
leeway=60,
)
assert ite.match("Signature verification failed")
def test_decode_jwt_invalid_algorithm(private_key_pem, public_key):
# Encode with the test private key.
token = jwt.encode(_token_data("aud", "subject", "someissuer"), private_key_pem, "RS256")
# Attempt to decode but only with a different algorithm than that used.
with pytest.raises(InvalidAlgorithmError) as ite:
max_exp = exp_max_s_option(3600)
decode(
token,
public_key,
algorithms=["ES256"],
audience="aud",
issuer="someissuer",
options=max_exp,
leeway=60,
)
assert ite.match("are not whitelisted")
def test_jwk_dict_to_public_key(private_key, private_key_pem):
public_key = private_key.public_key()
key_dict = jwk.dumps(
public_key.public_bytes(
encoding=serialization.Encoding.PEM,
format=serialization.PublicFormat.SubjectPublicKeyInfo,
)
)
converted = jwk_dict_to_public_key(key_dict)
# Encode with the test private key.
token = jwt.encode(_token_data("aud", "subject", "someissuer"), private_key_pem, "RS256")
# Decode with the converted key.
max_exp = exp_max_s_option(3600)
decode(
token,
converted,
algorithms=["RS256"],
audience="aud",
issuer="someissuer",
options=max_exp,
leeway=60,
)
|
from typing import Any, Dict, Optional
from django.contrib.postgres.fields import JSONField
from django.core.validators import RegexValidator
from django.db import models
from django.utils.timezone import datetime
from django.utils.translation import gettext_lazy as _
from core.fields import IntegerChoicesField
# Validators
def CountryCodeValidator() -> RegexValidator:
return RegexValidator(r'^[a-z]{2}$')
# Enums
CHART_TYPE_API = ['top-free', 'top-paid']
class ChartType(models.IntegerChoices):
FREE = 0, _("Top Free Apps")
PAID = 1, _("Top Paid Apps")
@classmethod
def from_api(cls, value: str) -> 'ChartType':
if value not in CHART_TYPE_API:
raise ValueError(f"Unknown chart type: {value}")
return cls(CHART_TYPE_API.index(value))
def to_api(self) -> str:
return CHART_TYPE_API[int(self)]
# Models
class Application(models.Model):
itunes_id = models.PositiveIntegerField(primary_key=True)
@property
def latest_metadata(self) -> Optional['Metadata']:
metadata = Metadata.objects.filter(
application=self,
data__isnull=False,
).order_by('-timestamp')
if not metadata.exists():
return None
return metadata.first()
@property
def is_known(self) -> bool:
metadata = self.latest_metadata
if metadata is None:
return False
return 'attributes' in metadata.data
@property
def is_bundle(self) -> bool:
metadata = self.latest_metadata
if metadata is None:
return False
return metadata.data.get('type', None) == 'app-bundles'
@property
def timestamp(self) -> Optional[datetime]:
metadata = self.latest_metadata
if metadata is None:
return None
return metadata.timestamp
@property
def attributes(self) -> Dict[str, Any]:
metadata = self.latest_metadata
if metadata is None:
return {}
return metadata.data.get('attributes', {})
def platform_attributes(self, platform: str = 'osx') -> Dict[str, Any]:
return self.attributes.get('platformAttributes', {}).get(platform, {})
@property
def name(self) -> Optional[str]:
return self.attributes.get('name', None)
@property
def bundle_identifier(self) -> Optional[str]:
return self.platform_attributes().get('bundleId', None)
def __str__(self) -> str:
if self.name is None:
return str(self.itunes_id)
return self.name
class Genre(models.Model):
itunes_id = models.PositiveSmallIntegerField(primary_key=True)
name = models.CharField(max_length=255, blank=True, null=True, default=None)
parent = models.ForeignKey(
'Genre',
on_delete=models.CASCADE,
related_name='children',
blank=True,
null=True,
default=None,
)
def __str__(self) -> str:
if self.name is None:
return str(self.itunes_id)
return self.name
class AppStore(models.Model):
country = models.CharField(
max_length=2,
primary_key=True,
validators=[CountryCodeValidator],
)
applications = models.ManyToManyField(
Application,
related_name='stores',
through='Metadata',
through_fields=('store', 'application'),
)
def __str__(self) -> str:
return f"{self.country}"
class Metadata(models.Model):
application = models.ForeignKey(Application, on_delete=models.CASCADE)
store = models.ForeignKey(AppStore, on_delete=models.CASCADE)
source = models.URLField(max_length=4096)
timestamp = models.DateTimeField()
data = JSONField()
class Meta:
unique_together = (('application', 'store', 'source', 'timestamp'),)
class Chart(models.Model):
genre = models.ForeignKey(
Genre,
on_delete=models.CASCADE,
related_name='charts',
)
store = models.ForeignKey(
AppStore,
on_delete=models.CASCADE,
related_name='charts',
)
chart_type = IntegerChoicesField(ChartType)
timestamp = models.DateTimeField()
class Meta:
unique_together = (('genre', 'store', 'chart_type', 'timestamp'),)
class ChartEntry(models.Model):
chart = models.ForeignKey(
Chart,
on_delete=models.CASCADE,
related_name='entries',
)
application = models.ForeignKey(Application, on_delete=models.CASCADE)
position = models.PositiveSmallIntegerField()
class Meta:
unique_together = (
('chart', 'application'),
('chart', 'position'),
('chart', 'application', 'position'),
)
|
# -*- coding: utf-8 -*-
# Generated by Django 1.10.4 on 2016-12-27 15:15
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('switches', '0001_initial'),
]
operations = [
migrations.AlterField(
model_name='farfaraway',
name='last',
field=models.IntegerField(default=11),
),
]
|
# -*- coding: utf-8 -*-
# Define here the models for your spider middleware
#
# See documentation in:
# https://docs.scrapy.org/en/latest/topics/spider-middleware.html
from typing import Iterable
from scrapy import signals
from .items import TransportInfo, SeatInfo
from .settings import MOCKED_DATA_PATH
class TicketerSpiderMiddleware(object):
# Not all methods need to be defined. If a method is not defined,
# scrapy acts as if the spider middleware does not modify the
# passed objects.
@classmethod
def from_crawler(cls, crawler):
# This method is used by Scrapy to create your spiders.
s = cls()
crawler.signals.connect(s.spider_opened, signal=signals.spider_opened)
return s
def process_spider_input(self, response, spider):
# Called for each response that goes through the spider
# middleware and into the spider.
# Should return None or raise an exception.
return None
def process_spider_output(self, response, result, spider):
# Called with the results returned from the Spider, after
# it has processed the response.
# Must return an iterable of Request, dict or Item objects.
for i in result:
yield i
def process_spider_exception(self, response, exception, spider):
# Called when a spider or process_spider_input() method
# (from other spider middleware) raises an exception.
# Should return either None or an iterable of Request, dict
# or Item objects.
pass
def process_start_requests(self, start_requests, spider):
# Called with the start requests of the spider, and works
# similarly to the process_spider_output() method, except
# that it doesn’t have a response associated.
# Must return only requests (not items).
for r in start_requests:
yield r
def spider_opened(self, spider):
spider.logger.info('Spider opened: %s' % spider.name)
class TicketerDownloaderMiddleware(object):
# Not all methods need to be defined. If a method is not defined,
# scrapy acts as if the downloader middleware does not modify the
# passed objects.
@classmethod
def from_crawler(cls, crawler):
# This method is used by Scrapy to create your spiders.
s = cls()
crawler.signals.connect(s.spider_opened, signal=signals.spider_opened)
return s
def process_request(self, request, spider):
# Called for each request that goes through the downloader
# middleware.
# Must either:
# - return None: continue processing this request
# - or return a Response object
# - or return a Request object
# - or raise IgnoreRequest: process_exception() methods of
# installed downloader middleware will be called
return None
def process_response(self, request, response, spider):
# Called with the response returned from the downloader.
# Must either;
# - return a Response object
# - return a Request object
# - or raise IgnoreRequest
return response
def process_exception(self, request, exception, spider):
# Called when a download handler or a process_request()
# (from other downloader middleware) raises an exception.
# Must either:
# - return None: continue processing this exception
# - return a Response object: stops process_exception() chain
# - return a Request object: stops process_exception() chain
pass
def spider_opened(self, spider):
spider.logger.info('Spider opened: %s' % spider.name)
class MockedSpiderMiddleware(object):
def process_spider_output(self, response, result: Iterable[TransportInfo], spider):
with open(MOCKED_DATA_PATH, 'w') as fout:
fout.write(response.body)
for i in result:
yield i
class TransportScheduleSpiderMiddleware(object):
def process_spider_output(self, response, result: Iterable[TransportInfo], spider):
required_min_seats = int(spider.settings['MIN_SEATS'])
required_transport_num = spider.settings['NUM']
required_seat_type = spider.settings['SEAT_TYPE']
def eligible_transport(transport: TransportInfo) -> bool:
return required_transport_num is None or required_transport_num == transport['id']
def eligible_seat(seat: SeatInfo) -> bool:
return required_seat_type is None or seat['type'] == required_seat_type
def eligible_seats(seats: Iterable[SeatInfo]) -> Iterable[SeatInfo]:
return filter(eligible_seat, seats)
def available_seat(seat: SeatInfo) -> bool:
remaining = seat['remaining']
if remaining is None:
return False
return int(remaining) >= required_min_seats
found_any = False
for transport in result:
found = False
if eligible_transport(transport):
seats = eligible_seats(transport['seats'])
for seat in seats:
if available_seat(seat):
found = True
found_any = found_any or found
if found:
yield transport
if not found_any:
yield response.request
class MockedDownloaderMiddleware(object):
def process_request(self, request, spider):
from scrapy.http import HtmlResponse
with open(MOCKED_DATA_PATH, 'r') as fin:
body = fin.read()
response = HtmlResponse(url=request.url,
body=body)
return response
|
from marshmallow import INCLUDE, Schema, fields, post_load, pre_load
class Dates:
def __init__(self, on_sale=None, foc=None, unlimited=None, **kwargs):
self.on_sale = on_sale
self.foc = foc
self.unlimited = unlimited
self.unknown = kwargs
class DatesSchema(Schema):
onsaleDate = fields.DateTime(attribute="on_sale")
focDate = fields.DateTime(attribute="foc")
unlimitedDate = fields.DateTime(attribute="unlimited")
class Meta:
unknown = INCLUDE
@pre_load
def process_input(self, data, **kwargs):
new_data = {}
for d in data:
# Marvel comic 4373, and maybe others, returns a focDate of
# "-0001-11-30T00:00:00-0500". The best way to handle this is
# probably just to ignore it, since I don't know how to fix it.
if d["date"][0] != "-":
new_data[d["type"]] = d["date"]
return new_data
@post_load
def make(self, data, **kwargs):
return Dates(**data)
|
# Placeholder
|
#external tools: textgain
import requests
import json
def sentiment_result(text):
URL = 'http://text-processing.com/api/sentiment/'
raw_text = text
r = requests.post(URL, data = {'text':raw_text})
sentiment = json.loads(r.text).get('label')
return sentiment
|
# Copyright 2017 Insurance Australia Group Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
|
"""Stuff to parse AIFF-C and AIFF files.
Unless explicitly stated otherwise, the description below is true
both for AIFF-C files and AIFF files.
An AIFF-C file has the following structure.
+-----------------+
| FORM |
+-----------------+
| <size> |
+----+------------+
| | AIFC |
| +------------+
| | <chunks> |
| | . |
| | . |
| | . |
+----+------------+
An AIFF file has the string "AIFF" instead of "AIFC".
A chunk consists of an identifier (4 bytes) followed by a size (4 bytes,
big endian order), followed by the data. The size field does not include
the size of the 8 byte header.
The following chunk types are recognized.
FVER
<version number of AIFF-C defining document> (AIFF-C only).
MARK
<# of markers> (2 bytes)
list of markers:
<marker ID> (2 bytes, must be > 0)
<position> (4 bytes)
<marker name> ("pstring")
COMM
<# of channels> (2 bytes)
<# of sound frames> (4 bytes)
<size of the samples> (2 bytes)
<sampling frequency> (10 bytes, IEEE 80-bit extended
floating point)
in AIFF-C files only:
<compression type> (4 bytes)
<human-readable version of compression type> ("pstring")
SSND
<offset> (4 bytes, not used by this program)
<blocksize> (4 bytes, not used by this program)
<sound data>
A pstring consists of 1 byte length, a string of characters, and 0 or 1
byte pad to make the total length even.
Usage.
Reading AIFF files:
f = aifc.open(file, 'r')
where file is either the name of a file or an open file pointer.
The open file pointer must have methods read(), seek(), and close().
In some types of audio files, if the setpos() method is not used,
the seek() method is not necessary.
This returns an instance of a class with the following public methods:
getnchannels() -- returns number of audio channels (1 for
mono, 2 for stereo)
getsampwidth() -- returns sample width in bytes
getframerate() -- returns sampling frequency
getnframes() -- returns number of audio frames
getcomptype() -- returns compression type ('NONE' for AIFF files)
getcompname() -- returns human-readable version of
compression type ('not compressed' for AIFF files)
getparams() -- returns a tuple consisting of all of the
above in the above order
getmarkers() -- get the list of marks in the audio file or None
if there are no marks
getmark(id) -- get mark with the specified id (raises an error
if the mark does not exist)
readframes(n) -- returns at most n frames of audio
rewind() -- rewind to the beginning of the audio stream
setpos(pos) -- seek to the specified position
tell() -- return the current position
close() -- close the instance (make it unusable)
The position returned by tell(), the position given to setpos() and
the position of marks are all compatible and have nothing to do with
the actual position in the file.
The close() method is called automatically when the class instance
is destroyed.
Writing AIFF files:
f = aifc.open(file, 'w')
where file is either the name of a file or an open file pointer.
The open file pointer must have methods write(), tell(), seek(), and
close().
This returns an instance of a class with the following public methods:
aiff() -- create an AIFF file (AIFF-C default)
aifc() -- create an AIFF-C file
setnchannels(n) -- set the number of channels
setsampwidth(n) -- set the sample width
setframerate(n) -- set the frame rate
setnframes(n) -- set the number of frames
setcomptype(type, name)
-- set the compression type and the
human-readable compression type
setparams(tuple)
-- set all parameters at once
setmark(id, pos, name)
-- add specified mark to the list of marks
tell() -- return current position in output file (useful
in combination with setmark())
writeframesraw(data)
-- write audio frames without pathing up the
file header
writeframes(data)
-- write audio frames and patch up the file header
close() -- patch up the file header and close the
output file
You should set the parameters before the first writeframesraw or
writeframes. The total number of frames does not need to be set,
but when it is set to the correct value, the header does not have to
be patched up.
It is best to first set all parameters, perhaps possibly the
compression type, and then write audio frames using writeframesraw.
When all frames have been written, either call writeframes('') or
close() to patch up the sizes in the header.
Marks can be added anytime. If there are any marks, ypu must call
close() after all frames have been written.
The close() method is called automatically when the class instance
is destroyed.
When a file is opened with the extension '.aiff', an AIFF file is
written, otherwise an AIFF-C file is written. This default can be
changed by calling aiff() or aifc() before the first writeframes or
writeframesraw.
"""
import struct
import __builtin__
class Error(Exception):
pass
_AIFC_version = 0xA2805140 # Version 1 of AIFF-C
_skiplist = 'COMT', 'INST', 'MIDI', 'AESD', \
'APPL', 'NAME', 'AUTH', '(c) ', 'ANNO'
def _read_long(file):
try:
return struct.unpack('>l', file.read(4))[0]
except struct.error:
raise EOFError
def _read_ulong(file):
try:
return struct.unpack('>L', file.read(4))[0]
except struct.error:
raise EOFError
def _read_short(file):
try:
return struct.unpack('>h', file.read(2))[0]
except struct.error:
raise EOFError
def _read_string(file):
length = ord(file.read(1))
if length == 0:
data = ''
else:
data = file.read(length)
if length & 1 == 0:
dummy = file.read(1)
return data
_HUGE_VAL = 1.79769313486231e+308 # See <limits.h>
def _read_float(f): # 10 bytes
import math
expon = _read_short(f) # 2 bytes
sign = 1
if expon < 0:
sign = -1
expon = expon + 0x8000
himant = _read_ulong(f) # 4 bytes
lomant = _read_ulong(f) # 4 bytes
if expon == himant == lomant == 0:
f = 0.0
elif expon == 0x7FFF:
f = _HUGE_VAL
else:
expon = expon - 16383
f = (himant * 0x100000000L + lomant) * pow(2.0, expon - 63)
return sign * f
def _write_short(f, x):
f.write(struct.pack('>h', x))
def _write_long(f, x):
f.write(struct.pack('>L', x))
def _write_string(f, s):
f.write(chr(len(s)))
f.write(s)
if len(s) & 1 == 0:
f.write(chr(0))
def _write_float(f, x):
import math
if x < 0:
sign = 0x8000
x = x * -1
else:
sign = 0
if x == 0:
expon = 0
himant = 0
lomant = 0
else:
fmant, expon = math.frexp(x)
if expon > 16384 or fmant >= 1: # Infinity or NaN
expon = sign|0x7FFF
himant = 0
lomant = 0
else: # Finite
expon = expon + 16382
if expon < 0: # denormalized
fmant = math.ldexp(fmant, expon)
expon = 0
expon = expon | sign
fmant = math.ldexp(fmant, 32)
fsmant = math.floor(fmant)
himant = long(fsmant)
fmant = math.ldexp(fmant - fsmant, 32)
fsmant = math.floor(fmant)
lomant = long(fsmant)
_write_short(f, expon)
_write_long(f, himant)
_write_long(f, lomant)
from chunk import Chunk
class Aifc_read:
# Variables used in this class:
#
# These variables are available to the user though appropriate
# methods of this class:
# _file -- the open file with methods read(), close(), and seek()
# set through the __init__() method
# _nchannels -- the number of audio channels
# available through the getnchannels() method
# _nframes -- the number of audio frames
# available through the getnframes() method
# _sampwidth -- the number of bytes per audio sample
# available through the getsampwidth() method
# _framerate -- the sampling frequency
# available through the getframerate() method
# _comptype -- the AIFF-C compression type ('NONE' if AIFF)
# available through the getcomptype() method
# _compname -- the human-readable AIFF-C compression type
# available through the getcomptype() method
# _markers -- the marks in the audio file
# available through the getmarkers() and getmark()
# methods
# _soundpos -- the position in the audio stream
# available through the tell() method, set through the
# setpos() method
#
# These variables are used internally only:
# _version -- the AIFF-C version number
# _decomp -- the decompressor from builtin module cl
# _comm_chunk_read -- 1 iff the COMM chunk has been read
# _aifc -- 1 iff reading an AIFF-C file
# _ssnd_seek_needed -- 1 iff positioned correctly in audio
# file for readframes()
# _ssnd_chunk -- instantiation of a chunk class for the SSND chunk
# _framesize -- size of one frame in the file
def initfp(self, file):
self._version = 0
self._decomp = None
self._convert = None
self._markers = []
self._soundpos = 0
self._file = Chunk(file)
if self._file.getname() != 'FORM':
raise Error, 'file does not start with FORM id'
formdata = self._file.read(4)
if formdata == 'AIFF':
self._aifc = 0
elif formdata == 'AIFC':
self._aifc = 1
else:
raise Error, 'not an AIFF or AIFF-C file'
self._comm_chunk_read = 0
while 1:
self._ssnd_seek_needed = 1
try:
chunk = Chunk(self._file)
except EOFError:
break
chunkname = chunk.getname()
if chunkname == 'COMM':
self._read_comm_chunk(chunk)
self._comm_chunk_read = 1
elif chunkname == 'SSND':
self._ssnd_chunk = chunk
dummy = chunk.read(8)
self._ssnd_seek_needed = 0
elif chunkname == 'FVER':
self._version = _read_long(chunk)
elif chunkname == 'MARK':
self._readmark(chunk)
elif chunkname in _skiplist:
pass
else:
raise Error, 'unrecognized chunk type '+chunk.chunkname
chunk.skip()
if not self._comm_chunk_read or not self._ssnd_chunk:
raise Error, 'COMM chunk and/or SSND chunk missing'
if self._aifc and self._decomp:
import cl
params = [cl.ORIGINAL_FORMAT, 0,
cl.BITS_PER_COMPONENT, self._sampwidth * 8,
cl.FRAME_RATE, self._framerate]
if self._nchannels == 1:
params[1] = cl.MONO
elif self._nchannels == 2:
params[1] = cl.STEREO_INTERLEAVED
else:
raise Error, 'cannot compress more than 2 channels'
self._decomp.SetParams(params)
def __init__(self, f):
if type(f) == type(''):
f = __builtin__.open(f, 'rb')
# else, assume it is an open file object already
self.initfp(f)
#
# User visible methods.
#
def getfp(self):
return self._file
def rewind(self):
self._ssnd_seek_needed = 1
self._soundpos = 0
def close(self):
if self._decomp:
self._decomp.CloseDecompressor()
self._decomp = None
self._file = None
def tell(self):
return self._soundpos
def getnchannels(self):
return self._nchannels
def getnframes(self):
return self._nframes
def getsampwidth(self):
return self._sampwidth
def getframerate(self):
return self._framerate
def getcomptype(self):
return self._comptype
def getcompname(self):
return self._compname
## def getversion(self):
## return self._version
def getparams(self):
return self.getnchannels(), self.getsampwidth(), \
self.getframerate(), self.getnframes(), \
self.getcomptype(), self.getcompname()
def getmarkers(self):
if len(self._markers) == 0:
return None
return self._markers
def getmark(self, id):
for marker in self._markers:
if id == marker[0]:
return marker
raise Error, 'marker ' + `id` + ' does not exist'
def setpos(self, pos):
if pos < 0 or pos > self._nframes:
raise Error, 'position not in range'
self._soundpos = pos
self._ssnd_seek_needed = 1
def readframes(self, nframes):
if self._ssnd_seek_needed:
self._ssnd_chunk.seek(0)
dummy = self._ssnd_chunk.read(8)
pos = self._soundpos * self._framesize
if pos:
self._ssnd_chunk.seek(pos + 8)
self._ssnd_seek_needed = 0
if nframes == 0:
return ''
data = self._ssnd_chunk.read(nframes * self._framesize)
if self._convert and data:
data = self._convert(data)
self._soundpos = self._soundpos + len(data) / (self._nchannels * self._sampwidth)
return data
#
# Internal methods.
#
def _decomp_data(self, data):
import cl
dummy = self._decomp.SetParam(cl.FRAME_BUFFER_SIZE,
len(data) * 2)
return self._decomp.Decompress(len(data) / self._nchannels,
data)
def _ulaw2lin(self, data):
import audioop
return audioop.ulaw2lin(data, 2)
def _adpcm2lin(self, data):
import audioop
if not hasattr(self, '_adpcmstate'):
# first time
self._adpcmstate = None
data, self._adpcmstate = audioop.adpcm2lin(data, 2,
self._adpcmstate)
return data
def _read_comm_chunk(self, chunk):
self._nchannels = _read_short(chunk)
self._nframes = _read_long(chunk)
self._sampwidth = (_read_short(chunk) + 7) / 8
self._framerate = int(_read_float(chunk))
self._framesize = self._nchannels * self._sampwidth
if self._aifc:
#DEBUG: SGI's soundeditor produces a bad size :-(
kludge = 0
if chunk.chunksize == 18:
kludge = 1
print 'Warning: bad COMM chunk size'
chunk.chunksize = 23
#DEBUG end
self._comptype = chunk.read(4)
#DEBUG start
if kludge:
length = ord(chunk.file.read(1))
if length & 1 == 0:
length = length + 1
chunk.chunksize = chunk.chunksize + length
chunk.file.seek(-1, 1)
#DEBUG end
self._compname = _read_string(chunk)
if self._comptype != 'NONE':
if self._comptype == 'G722':
try:
import audioop
except ImportError:
pass
else:
self._convert = self._adpcm2lin
self._framesize = self._framesize / 4
return
# for ULAW and ALAW try Compression Library
try:
import cl
except ImportError:
if self._comptype == 'ULAW':
try:
import audioop
self._convert = self._ulaw2lin
self._framesize = self._framesize / 2
return
except ImportError:
pass
raise Error, 'cannot read compressed AIFF-C files'
if self._comptype == 'ULAW':
scheme = cl.G711_ULAW
self._framesize = self._framesize / 2
elif self._comptype == 'ALAW':
scheme = cl.G711_ALAW
self._framesize = self._framesize / 2
else:
raise Error, 'unsupported compression type'
self._decomp = cl.OpenDecompressor(scheme)
self._convert = self._decomp_data
else:
self._comptype = 'NONE'
self._compname = 'not compressed'
def _readmark(self, chunk):
nmarkers = _read_short(chunk)
# Some files appear to contain invalid counts.
# Cope with this by testing for EOF.
try:
for i in range(nmarkers):
id = _read_short(chunk)
pos = _read_long(chunk)
name = _read_string(chunk)
if pos or name:
# some files appear to have
# dummy markers consisting of
# a position 0 and name ''
self._markers.append((id, pos, name))
except EOFError:
print 'Warning: MARK chunk contains only',
print len(self._markers),
if len(self._markers) == 1: print 'marker',
else: print 'markers',
print 'instead of', nmarkers
class Aifc_write:
# Variables used in this class:
#
# These variables are user settable through appropriate methods
# of this class:
# _file -- the open file with methods write(), close(), tell(), seek()
# set through the __init__() method
# _comptype -- the AIFF-C compression type ('NONE' in AIFF)
# set through the setcomptype() or setparams() method
# _compname -- the human-readable AIFF-C compression type
# set through the setcomptype() or setparams() method
# _nchannels -- the number of audio channels
# set through the setnchannels() or setparams() method
# _sampwidth -- the number of bytes per audio sample
# set through the setsampwidth() or setparams() method
# _framerate -- the sampling frequency
# set through the setframerate() or setparams() method
# _nframes -- the number of audio frames written to the header
# set through the setnframes() or setparams() method
# _aifc -- whether we're writing an AIFF-C file or an AIFF file
# set through the aifc() method, reset through the
# aiff() method
#
# These variables are used internally only:
# _version -- the AIFF-C version number
# _comp -- the compressor from builtin module cl
# _nframeswritten -- the number of audio frames actually written
# _datalength -- the size of the audio samples written to the header
# _datawritten -- the size of the audio samples actually written
def __init__(self, f):
if type(f) == type(''):
filename = f
f = __builtin__.open(f, 'wb')
else:
# else, assume it is an open file object already
filename = '???'
self.initfp(f)
if filename[-5:] == '.aiff':
self._aifc = 0
else:
self._aifc = 1
def initfp(self, file):
self._file = file
self._version = _AIFC_version
self._comptype = 'NONE'
self._compname = 'not compressed'
self._comp = None
self._convert = None
self._nchannels = 0
self._sampwidth = 0
self._framerate = 0
self._nframes = 0
self._nframeswritten = 0
self._datawritten = 0
self._datalength = 0
self._markers = []
self._marklength = 0
self._aifc = 1 # AIFF-C is default
def __del__(self):
if self._file:
self.close()
#
# User visible methods.
#
def aiff(self):
if self._nframeswritten:
raise Error, 'cannot change parameters after starting to write'
self._aifc = 0
def aifc(self):
if self._nframeswritten:
raise Error, 'cannot change parameters after starting to write'
self._aifc = 1
def setnchannels(self, nchannels):
if self._nframeswritten:
raise Error, 'cannot change parameters after starting to write'
if nchannels < 1:
raise Error, 'bad # of channels'
self._nchannels = nchannels
def getnchannels(self):
if not self._nchannels:
raise Error, 'number of channels not set'
return self._nchannels
def setsampwidth(self, sampwidth):
if self._nframeswritten:
raise Error, 'cannot change parameters after starting to write'
if sampwidth < 1 or sampwidth > 4:
raise Error, 'bad sample width'
self._sampwidth = sampwidth
def getsampwidth(self):
if not self._sampwidth:
raise Error, 'sample width not set'
return self._sampwidth
def setframerate(self, framerate):
if self._nframeswritten:
raise Error, 'cannot change parameters after starting to write'
if framerate <= 0:
raise Error, 'bad frame rate'
self._framerate = framerate
def getframerate(self):
if not self._framerate:
raise Error, 'frame rate not set'
return self._framerate
def setnframes(self, nframes):
if self._nframeswritten:
raise Error, 'cannot change parameters after starting to write'
self._nframes = nframes
def getnframes(self):
return self._nframeswritten
def setcomptype(self, comptype, compname):
if self._nframeswritten:
raise Error, 'cannot change parameters after starting to write'
if comptype not in ('NONE', 'ULAW', 'ALAW', 'G722'):
raise Error, 'unsupported compression type'
self._comptype = comptype
self._compname = compname
def getcomptype(self):
return self._comptype
def getcompname(self):
return self._compname
## def setversion(self, version):
## if self._nframeswritten:
## raise Error, 'cannot change parameters after starting to write'
## self._version = version
def setparams(self, (nchannels, sampwidth, framerate, nframes, comptype, compname)):
if self._nframeswritten:
raise Error, 'cannot change parameters after starting to write'
if comptype not in ('NONE', 'ULAW', 'ALAW', 'G722'):
raise Error, 'unsupported compression type'
self.setnchannels(nchannels)
self.setsampwidth(sampwidth)
self.setframerate(framerate)
self.setnframes(nframes)
self.setcomptype(comptype, compname)
def getparams(self):
if not self._nchannels or not self._sampwidth or not self._framerate:
raise Error, 'not all parameters set'
return self._nchannels, self._sampwidth, self._framerate, \
self._nframes, self._comptype, self._compname
def setmark(self, id, pos, name):
if id <= 0:
raise Error, 'marker ID must be > 0'
if pos < 0:
raise Error, 'marker position must be >= 0'
if type(name) != type(''):
raise Error, 'marker name must be a string'
for i in range(len(self._markers)):
if id == self._markers[i][0]:
self._markers[i] = id, pos, name
return
self._markers.append((id, pos, name))
def getmark(self, id):
for marker in self._markers:
if id == marker[0]:
return marker
raise Error, 'marker ' + `id` + ' does not exist'
def getmarkers(self):
if len(self._markers) == 0:
return None
return self._markers
def tell(self):
return self._nframeswritten
def writeframesraw(self, data):
self._ensure_header_written(len(data))
nframes = len(data) / (self._sampwidth * self._nchannels)
if self._convert:
data = self._convert(data)
self._file.write(data)
self._nframeswritten = self._nframeswritten + nframes
self._datawritten = self._datawritten + len(data)
def writeframes(self, data):
self.writeframesraw(data)
if self._nframeswritten != self._nframes or \
self._datalength != self._datawritten:
self._patchheader()
def close(self):
self._ensure_header_written(0)
if self._datawritten & 1:
# quick pad to even size
self._file.write(chr(0))
self._datawritten = self._datawritten + 1
self._writemarkers()
if self._nframeswritten != self._nframes or \
self._datalength != self._datawritten or \
self._marklength:
self._patchheader()
if self._comp:
self._comp.CloseCompressor()
self._comp = None
self._file.flush()
self._file = None
#
# Internal methods.
#
def _comp_data(self, data):
import cl
dum = self._comp.SetParam(cl.FRAME_BUFFER_SIZE, len(data))
dum = self._comp.SetParam(cl.COMPRESSED_BUFFER_SIZE, len(data))
return self._comp.Compress(self._nframes, data)
def _lin2ulaw(self, data):
import audioop
return audioop.lin2ulaw(data, 2)
def _lin2adpcm(self, data):
import audioop
if not hasattr(self, '_adpcmstate'):
self._adpcmstate = None
data, self._adpcmstate = audioop.lin2adpcm(data, 2,
self._adpcmstate)
return data
def _ensure_header_written(self, datasize):
if not self._nframeswritten:
if self._comptype in ('ULAW', 'ALAW'):
if not self._sampwidth:
self._sampwidth = 2
if self._sampwidth != 2:
raise Error, 'sample width must be 2 when compressing with ULAW or ALAW'
if self._comptype == 'G722':
if not self._sampwidth:
self._sampwidth = 2
if self._sampwidth != 2:
raise Error, 'sample width must be 2 when compressing with G7.22 (ADPCM)'
if not self._nchannels:
raise Error, '# channels not specified'
if not self._sampwidth:
raise Error, 'sample width not specified'
if not self._framerate:
raise Error, 'sampling rate not specified'
self._write_header(datasize)
def _init_compression(self):
if self._comptype == 'G722':
import audioop
self._convert = self._lin2adpcm
return
try:
import cl
except ImportError:
if self._comptype == 'ULAW':
try:
import audioop
self._convert = self._lin2ulaw
return
except ImportError:
pass
raise Error, 'cannot write compressed AIFF-C files'
if self._comptype == 'ULAW':
scheme = cl.G711_ULAW
elif self._comptype == 'ALAW':
scheme = cl.G711_ALAW
else:
raise Error, 'unsupported compression type'
self._comp = cl.OpenCompressor(scheme)
params = [cl.ORIGINAL_FORMAT, 0,
cl.BITS_PER_COMPONENT, self._sampwidth * 8,
cl.FRAME_RATE, self._framerate,
cl.FRAME_BUFFER_SIZE, 100,
cl.COMPRESSED_BUFFER_SIZE, 100]
if self._nchannels == 1:
params[1] = cl.MONO
elif self._nchannels == 2:
params[1] = cl.STEREO_INTERLEAVED
else:
raise Error, 'cannot compress more than 2 channels'
self._comp.SetParams(params)
# the compressor produces a header which we ignore
dummy = self._comp.Compress(0, '')
self._convert = self._comp_data
def _write_header(self, initlength):
if self._aifc and self._comptype != 'NONE':
self._init_compression()
self._file.write('FORM')
if not self._nframes:
self._nframes = initlength / (self._nchannels * self._sampwidth)
self._datalength = self._nframes * self._nchannels * self._sampwidth
if self._datalength & 1:
self._datalength = self._datalength + 1
if self._aifc:
if self._comptype in ('ULAW', 'ALAW'):
self._datalength = self._datalength / 2
if self._datalength & 1:
self._datalength = self._datalength + 1
elif self._comptype == 'G722':
self._datalength = (self._datalength + 3) / 4
if self._datalength & 1:
self._datalength = self._datalength + 1
self._form_length_pos = self._file.tell()
commlength = self._write_form_length(self._datalength)
if self._aifc:
self._file.write('AIFC')
self._file.write('FVER')
_write_long(self._file, 4)
_write_long(self._file, self._version)
else:
self._file.write('AIFF')
self._file.write('COMM')
_write_long(self._file, commlength)
_write_short(self._file, self._nchannels)
self._nframes_pos = self._file.tell()
_write_long(self._file, self._nframes)
_write_short(self._file, self._sampwidth * 8)
_write_float(self._file, self._framerate)
if self._aifc:
self._file.write(self._comptype)
_write_string(self._file, self._compname)
self._file.write('SSND')
self._ssnd_length_pos = self._file.tell()
_write_long(self._file, self._datalength + 8)
_write_long(self._file, 0)
_write_long(self._file, 0)
def _write_form_length(self, datalength):
if self._aifc:
commlength = 18 + 5 + len(self._compname)
if commlength & 1:
commlength = commlength + 1
verslength = 12
else:
commlength = 18
verslength = 0
_write_long(self._file, 4 + verslength + self._marklength + \
8 + commlength + 16 + datalength)
return commlength
def _patchheader(self):
curpos = self._file.tell()
if self._datawritten & 1:
datalength = self._datawritten + 1
self._file.write(chr(0))
else:
datalength = self._datawritten
if datalength == self._datalength and \
self._nframes == self._nframeswritten and \
self._marklength == 0:
self._file.seek(curpos, 0)
return
self._file.seek(self._form_length_pos, 0)
dummy = self._write_form_length(datalength)
self._file.seek(self._nframes_pos, 0)
_write_long(self._file, self._nframeswritten)
self._file.seek(self._ssnd_length_pos, 0)
_write_long(self._file, datalength + 8)
self._file.seek(curpos, 0)
self._nframes = self._nframeswritten
self._datalength = datalength
def _writemarkers(self):
if len(self._markers) == 0:
return
self._file.write('MARK')
length = 2
for marker in self._markers:
id, pos, name = marker
length = length + len(name) + 1 + 6
if len(name) & 1 == 0:
length = length + 1
_write_long(self._file, length)
self._marklength = length + 8
_write_short(self._file, len(self._markers))
for marker in self._markers:
id, pos, name = marker
_write_short(self._file, id)
_write_long(self._file, pos)
_write_string(self._file, name)
def open(f, mode=None):
if mode is None:
if hasattr(f, 'mode'):
mode = f.mode
else:
mode = 'rb'
if mode in ('r', 'rb'):
return Aifc_read(f)
elif mode in ('w', 'wb'):
return Aifc_write(f)
else:
raise Error, "mode must be 'r', 'rb', 'w', or 'wb'"
openfp = open # B/W compatibility
if __name__ == '__main__':
import sys
if not sys.argv[1:]:
sys.argv.append('/usr/demos/data/audio/bach.aiff')
fn = sys.argv[1]
f = open(fn, 'r')
print "Reading", fn
print "nchannels =", f.getnchannels()
print "nframes =", f.getnframes()
print "sampwidth =", f.getsampwidth()
print "framerate =", f.getframerate()
print "comptype =", f.getcomptype()
print "compname =", f.getcompname()
if sys.argv[2:]:
gn = sys.argv[2]
print "Writing", gn
g = open(gn, 'w')
g.setparams(f.getparams())
while 1:
data = f.readframes(1024)
if not data:
break
g.writeframes(data)
g.close()
f.close()
print "Done."
|
# Generated by Django 2.1.5 on 2019-02-01 08:51
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('categories', '0001_initial'),
]
operations = [
migrations.AddField(
model_name='category',
name='category_image',
field=models.ImageField(blank=True, null=True, upload_to='categories'),
),
]
|
"""singosgu URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/3.0/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: path('', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.urls import include, path
2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))
"""
from django.urls import path, re_path
from django.conf import settings
from django.conf.urls.static import static
from . import views
urlpatterns = [
path(r'freight/', views.TransportationFeeListViewSet.as_view({"get": "list", "post": "create"}), name="transportationfee"),
re_path(r'^freight/(?P<pk>\d+)/$', views.TransportationFeeListViewSet.as_view({
'get': 'retrieve',
'put': 'update',
'patch': 'partial_update',
'delete': 'destroy'
}), name="transportationfee_1")
]
|
# Save the geometry (triangles, verticies) of a FESOM grid to a gdal dataset
# Author: R. Rietbroek
# Date 17 May 2019
# Currently this save the surface nodes only
# Improvements are possible
# * Optionally store the 3D surfaces
# * add info on e.g. bathymetry the nodes
# *
import osgeo,ogr as ogr
import osgeo.osr as osr
def fesom2gdal(mesh,outputname,gdaldriver='GPKG'):
"""Export a FESOM surface mesh to a GIS shapefile
input: mesh a FESOM mesh loaded with fesom_mesh(meshpath, get3d=True)
outputname: the name of the output dataset
gdaldriver: the driver to use to write the output (defaults to geopackage, but could be anything the gdal library supports including POSTGIS)
returns: nothing"""
driver = ogr.GetDriverByName(gdaldriver)
data_source = driver.CreateDataSource(outputname)
# create the spatial reference, WGS84
srs = osr.SpatialReference()
srs.ImportFromEPSG(4326)
# create the layer containing the vertices
vertlayer = data_source.CreateLayer("vert", srs, ogr.wkbPoint)
field_onbound = ogr.FieldDefn("onboundary", ogr.OFTInteger)
vertlayer.CreateField(field_onbound)
field_topo = ogr.FieldDefn("topo", ogr.OFTReal)
vertlayer.CreateField(field_topo)
#also store the indices of the 3Delems for the corresponding z-levels (to look up 3d indices)
# NOTE: ESRI SHapefiles do not support lists as attributes!! so this will not be registered when
field_zindx=ogr.FieldDefn("nodeid",ogr.OFTIntegerList)
vertlayer.CreateField(field_zindx)
# add vertices
for id,(lon,lat,onbnd) in enumerate(zip(mesh.x2,mesh.y2,mesh.ind2d)):
feature = ogr.Feature(vertlayer.GetLayerDefn())
# Note: we need a conversion to int so the value get's accepted by the gdal library
feature.SetField("onboundary", int(onbnd))
feature.SetField('topo',mesh.topo[id])
# note: we subtract 1 here to be consistent with the zero-indexing used in nodeid
idxfield=feature.GetFieldIndex("nodeid")
feature.SetFieldIntegerList(idxfield,[int(x-1) for x in mesh.n32[id,:] if x >=0])
#create a point geometry
point = ogr.Geometry(ogr.wkbPoint)
point.AddPoint(lon,lat)
feature.SetGeometry(point)
vertlayer.CreateFeature(feature)
# Dereference the feature in order to appropriately call the destructor
feature = None
# create the layer containing the triangles
# NOTE: the dedicated triangle type is not visible in Qgis
# surftype=ogr.wkbTriangle
surftype=ogr.wkbPolygon
tinlayer = data_source.CreateLayer("tin", srs, ogr.wkbTriangle)
# Add the fields we're interested in (nodeid's of
field_nodeid = ogr.FieldDefn("nodeid", ogr.OFTIntegerList)
tinlayer.CreateField(field_nodeid)
field_area = ogr.FieldDefn("area", ogr.OFTReal)
tinlayer.CreateField(field_area)
field_topo = ogr.FieldDefn("topo", ogr.OFTReal)
tinlayer.CreateField(field_topo)
# exclude cyclic elements
elem2=mesh.elem[mesh.no_cyclic_elem,:]
#loop over triangular elements
for i1,i2,i3 in elem2:
feature = ogr.Feature(tinlayer.GetLayerDefn())
ring=ogr.Geometry(ogr.wkbLinearRing)
tri=ogr.Geometry(surftype)
ring.AddPoint(mesh.x2[i1],mesh.y2[i1])
ring.AddPoint(mesh.x2[i2],mesh.y2[i2])
ring.AddPoint(mesh.x2[i3],mesh.y2[i3])
tri.AddGeometry(ring)
idxfield=feature.GetFieldIndex("nodeid")
feature.SetFieldIntegerList(idxfield,[int(i1),int(i2),int(i3)])
# TODO convert to squared km (which projection is used for FESOM??)
feature.SetField("area", tri.Area())
#currently just set topo to the mean of the topo of the vertices
feature.SetField("topo", (mesh.topo[i1]+mesh.topo[i2]+mesh.topo[i3])/3.0)
feature.SetGeometry(tri)
tinlayer.CreateFeature(feature)
feature=None
# Save and close the data source
data_source = None
|
# This file is part of the Edison Project.
# Please refer to the LICENSE document that was supplied with this software for information on how it can be used.
# Django settings for Edison project.
DEBUG = True
TEMPLATE_DEBUG = DEBUG
ADMINS = (
# ('Your Name', 'your_email@domain.com'),
)
# Django Debug Toolbar settings
INTERNAL_IPS = ('127.0.0.1','192.168.1.56','192.168.3.57')
DEBUG_TOOLBAR_PANELS = (
'debug_toolbar.panels.version.VersionDebugPanel',
'debug_toolbar.panels.timer.TimerDebugPanel',
'debug_toolbar.panels.settings_vars.SettingsVarsDebugPanel',
'debug_toolbar.panels.headers.HeaderDebugPanel',
'debug_toolbar.panels.request_vars.RequestVarsDebugPanel',
'debug_toolbar.panels.template.TemplateDebugPanel',
'debug_toolbar.panels.sql.SQLDebugPanel',
'debug_toolbar.panels.signals.SignalDebugPanel',
'debug_toolbar.panels.logger.LoggingPanel',
)
DEBUG_TOOLBAR_CONFIG = {
'INTERCEPT_REDIRECTS':False,
'HIDE_DJANGO_SQL': False,
}
MANAGERS = ADMINS
DATABASE_ENGINE = 'mysql' # 'postgresql_psycopg2', 'postgresql', 'mysql', 'sqlite3' or 'oracle'.
DATABASE_NAME = 'edison' # Or path to database file if using sqlite3.
DATABASE_USER = 'edison' # Not used with sqlite3.
DATABASE_PASSWORD = 'edison' # Not used with sqlite3.
DATABASE_HOST = 'localhost' # Set to empty string for localhost. Not used with sqlite3.
DATABASE_PORT = '3306' # Set to empty string for default. Not used with sqlite3.
# Local time zone for this installation. Choices can be found here:
# http://en.wikipedia.org/wiki/List_of_tz_zones_by_name
# although not all choices may be available on all operating systems.
# If running in a Windows environment this must be set to the same as your
# system time zone.
TIME_ZONE = 'Europe/London'
# Language code for this installation. All choices can be found here:
# http://www.i18nguy.com/unicode/language-identifiers.html
LANGUAGE_CODE = 'en-gb'
SITE_ID = 1
# If you set this to False, Django will make some optimizations so as not
# to load the internationalization machinery.
USE_I18N = True
# Absolute path to the directory that holds media.
# Example: "/home/media/media.lawrence.com/"
MEDIA_ROOT = "/var/djangosites/edison/media"
# URL that handles the media served from MEDIA_ROOT. Make sure to use a
# trailing slash if there is a path component (optional in other cases).
# Examples: "http://media.lawrence.com", "http://example.com/media/"
MEDIA_URL = 'http://edison/media/'
# URL prefix for admin media -- CSS, JavaScript and images. Make sure to use a
# trailing slash.
# Examples: "http://foo.com/media/", "/media/".
ADMIN_MEDIA_PREFIX = 'http://edison/admin_m/'
# Make this unique, and don't share it with anybody.
SECRET_KEY = '&(nwanlz8mdftiy06qrjkqh_i428x90u&ajb%lipbc(wk79gb*'
# List of callables that know how to import templates from various sources.
TEMPLATE_LOADERS = (
'django.template.loaders.filesystem.load_template_source',
'django.template.loaders.app_directories.load_template_source',
# 'django.template.loaders.eggs.load_template_source',
)
MIDDLEWARE_CLASSES = (
'django.middleware.common.CommonMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.auth.middleware.RemoteUserMiddleware',
# 'django.middleware.csrf.CsrfMiddleware',
#'django.middleware.csrf.CsrfResponseMiddleware',
)
ROOT_URLCONF = 'urls'
TEMPLATE_DIRS = (
# Put strings here, like "/home/html/django_templates" or "C:/www/django/templates".
# Always use forward slashes, even on Windows.
# Don't forget to use absolute paths, not relative paths.
"/var/djangosites/edison/templates",
)
# set oauth callback address
OAUTH_CALLBACK_VIEW="api.views.request_token_ready"
INSTALLED_APPS = (
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.sites',
'django.contrib.auth',
'django.contrib.admin',
'django.contrib.admindocs',
'cmdb',
'piston',
'changemanagement',
'orchestra',
'auditorium',
)
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
# flake8: noqa
"""
Unit tests for Morse Code: Bits
Students should not modify this file.
"""
__author__ = 'madarp'
import sys
import unittest
import importlib
import subprocess
# suppress __pycache__ and .pyc files
sys.dont_write_bytecode = True
# Kenzie devs: change this to 'soln.morse' to test solution
PKG_NAME = 'morse'
# some handy morse strings
# HEY JUDE
morse_hey_jude = '.... . -.-- .--- ..- -.. .'
# THE QUICK BROWN FOX JUMPS OVER THE LAZY DOG.
morse_quick_fox = '- .... . --.- ..- .. -.-. -.- -... .-. --- .-- -. ..-. --- -..- .--- ..- -- .--. ... --- ...- . .-. - .... . .-.. .- --.. -.-- -.. --- --. .-.-.-'
class TestDecodeMorse(unittest.TestCase):
"""Only tests the decode_morse() function"""
@classmethod
def setUpClass(cls):
"""Performs module import and suite setup at test-runtime"""
cls.assertGreaterEqual(cls, sys.version_info[0], 3)
cls.module = importlib.import_module(PKG_NAME)
def test_hey_jude(self):
"""Check basic HEY JUDE"""
actual = self.module.decode_morse(morse_hey_jude)
self.assertEqual(actual, 'HEY JUDE')
def test_basic_letters(self):
"""Check Basic Morse decoding"""
self.assertEqual(self.module.decode_morse('.-'), 'A')
self.assertEqual(self.module.decode_morse('.'), 'E')
self.assertEqual(self.module.decode_morse('..'), 'I')
self.assertEqual(self.module.decode_morse('. .'), 'EE')
self.assertEqual(self.module.decode_morse('. .'), 'E E')
self.assertEqual(self.module.decode_morse('...---...'), 'SOS')
self.assertEqual(self.module.decode_morse('... --- ...'), 'SOS')
self.assertEqual(self.module.decode_morse('... --- ...'), 'S O S')
def test_extra_spaces(self):
"""Check handling of spaces"""
self.assertEqual(self.module.decode_morse(' . '), 'E')
self.assertEqual(self.module.decode_morse(' . . '), 'E E')
def test_complex(self):
"""Check long message decoding"""
morse = ' ...---... -.-.-- - .... . --.- ..- .. -.-. -.- -... .-. --- .-- -. ..-. --- -..- .--- ..- -- .--. ... --- ...- . .-. - .... . ' ' .-.. .- --.. -.-- -.. --- --. .-.-.- '
actual = self.module.decode_morse(morse)
self.assertEqual(actual, 'SOS! THE QUICK BROWN FOX JUMPS OVER THE LAZY DOG.')
def test_flake8(self):
"""Checking for PEP8/flake8 compliance"""
result = subprocess.run(['flake8', self.module.__file__])
self.assertEqual(result.returncode, 0)
def test_author_string(self):
"""Checking for __author__ string"""
self.assertNotEqual(self.module.__author__, '???')
class TestDecodeBits(unittest.TestCase):
@classmethod
def setUpClass(cls):
"""Performs module import and suite setup at test-runtime"""
cls.assertGreaterEqual(cls, sys.version_info[0], 3)
cls.module = importlib.import_module(PKG_NAME)
def test_et_phone_home(self):
"""Check if ET PHONE HOME can be transcoded to Morse"""
bits = '11000000111111000000000000001100111111001111110011000000110011001100110000001111110011111100111111000000111111001100000011000000000000001100110011001100000011111100111111001111110000001111110011111100000011'
morse = self.module.decode_bits(bits)
self.assertEqual(morse, '. - .--. .... --- -. . .... --- -- .')
def test_hey_jude_2x(self):
"""Check if HEY JUDE can be transcoded to Morse"""
bits = '1100110011001100000011000000111111001100111111001111110000000000000011001111110011111100111111000000110011001111110000001111110011001100000011'
morse = self.module.decode_bits(bits)
self.assertEqual(morse, morse_hey_jude)
def test_hey_jude_6x(self):
bits = '111111000000111111000000111111000000111111000000000000000000111111000000000000000000111111111111111111000000111111000000111111111111111111000000111111111111111111000000000000000000000000000000000000000000111111000000111111111111111111000000111111111111111111000000111111111111111111000000000000000000111111000000111111000000111111111111111111000000000000000000111111111111111111000000111111000000111111000000000000000000111111'
morse = self.module.decode_bits(bits)
self.assertEqual(morse, morse_hey_jude)
def test_basic_bits(self):
"""Check short letters transcoding to Morse"""
self.assertEqual(self.module.decode_bits('1'), '.') # E
self.assertEqual(self.module.decode_bits('101'), '..') # I
self.assertEqual(self.module.decode_bits('10001'), '. .') # E E
self.assertEqual(self.module.decode_bits('10111'), '.-') # A
self.assertEqual(self.module.decode_bits('1110111'), '--') # M
def test_multiple_bits_per_dot(self):
"""Multiple bits per dot handling"""
self.assertEqual(self.module.decode_bits('111'), '.') # E
self.assertEqual(self.module.decode_bits('1111111'), '.') # E
self.assertEqual(self.module.decode_bits('110011'), '..') # I
self.assertEqual(self.module.decode_bits('111000111'), '..') # I
self.assertEqual(self.module.decode_bits('111110000011111'), '..') # I
self.assertEqual(self.module.decode_bits('111000000000111'), '. .') # EE
self.assertEqual(self.module.decode_bits('11111100111111'), '--') # M
self.assertEqual(self.module.decode_bits('111000111000111'), '...') # S
def test_extra_zeroes(self):
"""Check handling of leading and trailing zeros"""
self.assertEqual(self.module.decode_bits('01110'), '.')
self.assertEqual(self.module.decode_bits('000000011100000'), '.')
def test_long_message_1x(self):
"""Check long message at 1x time unit"""
bits = (
'0001110001010101000100000001110111010111000101011100010100011101'
'0111010001110101110000000111010101000101110100011101110111000101'
'1101110001110100000001010111010001110111011100011101010111000000'
'01011101110111000101011100011101110001011101110100010101000000011'
'10111011100010101011100010001011101000000011100010101010001000000'
'01011101010001011100011101110101000111010111011100000001110101000'
'11101110111000111011101000101110101110101110'
)
actual = self.module.decode_bits(bits)
self.assertEqual(actual, morse_quick_fox)
def test_long_message_5x(self):
bits = (
'1111111111111110000000000000001111100000111110000011111000001111'
'1000000000000000111110000000000000000000000000000000000011111111'
'1111111000001111111111111110000011111000001111111111111110000000'
'0000000011111000001111100000111111111111111000000000000000111110'
'0000111110000000000000001111111111111110000011111000001111111111'
'1111100000111110000000000000001111111111111110000011111000001111'
'1111111111100000000000000000000000000000000000111111111111111000'
'00111110000011111000001111100000000000000011111000001111111111111'
'11000001111100000000000000011111111111111100000111111111111111000'
'00111111111111111000000000000000111110000011111111111111100000111'
'11111111111100000000000000011111111111111100000111110000000000000'
'00000000000000000000001111100000111110000011111111111111100000111'
'11000000000000000111111111111111000001111111111111110000011111111'
'111111100000000000000011111111111111100000111110000011111000001111'
'11111111111000000000000000000000000000000000001111100000111111111'
'11111100000111111111111111000001111111111111110000000000000001111'
'10000011111000001111111111111110000000000000001111111111111110000'
'011111111111111100000000000000011111000001111111111111110000011111'
'111111111100000111110000000000000001111100000111110000011111000000'
'000000000000000000000000000001111111111111110000011111111111111100'
'000111111111111111000000000000000111110000011111000001111100000111'
'111111111111000000000000000111110000000000000001111100000111111111'
'111111000001111100000000000000000000000000000000000111111111111111'
'0000000000000001111100000111110000011111000001111100000000000000011111000000000000000000000000000000000001111100000111111111111111000001111100000111110000000000000001111100000111111111111111000000000000000111111111111111000001111111111111110000011111000001111100000000000000011111111111111100000111110000011111111111111100000111111111111111000000000000000000000000000000000001111111111111110000011111000001111100000000000000011111111111111100000111111111111111000001111111111111110000000000000001111111111111110000011111111111111100000111110000000000000001111100000111111111111111000001111100000111111111111111000001111100000111111111111111'
)
actual = self.module.decode_bits(bits)
self.assertEqual(actual, morse_quick_fox)
if __name__ == '__main__':
unittest.main(verbosity=2)
|
# Copyright 2019 Objectif Libre
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
import collections
import datetime
import decimal
import functools
import voluptuous
from werkzeug import datastructures
from cloudkitty.utils import json
from cloudkitty.utils import tz as tzutils
from cloudkitty.utils import validation as vutils
# NOTE(peschk_l): qty and price are converted to strings to avoid
# floating-point conversion issues:
# Decimal(0.121) == Decimal('0.12099999999999999644728632119')
# Decimal(str(0.121)) == Decimal('0.121')
DATAPOINT_SCHEMA = voluptuous.Schema({
voluptuous.Required('vol'): {
voluptuous.Required('unit'): vutils.get_string_type(),
voluptuous.Required('qty'): voluptuous.Coerce(str),
},
voluptuous.Required('rating', default={}): {
voluptuous.Required('price', default=0):
voluptuous.Coerce(str),
},
voluptuous.Required('groupby'): vutils.DictTypeValidator(str, str),
voluptuous.Required('metadata'): vutils.DictTypeValidator(str, str),
})
_DataPointBase = collections.namedtuple(
"DataPoint",
field_names=("unit", "qty", "price", "groupby", "metadata"))
class DataPoint(_DataPointBase):
def __new__(cls, unit, qty, price, groupby, metadata):
return _DataPointBase.__new__(
cls,
unit or "undefined",
# NOTE(peschk_l): avoids floating-point issues.
decimal.Decimal(str(qty) if isinstance(qty, float) else qty),
decimal.Decimal(str(price) if isinstance(price, float) else price),
datastructures.ImmutableDict(groupby),
datastructures.ImmutableDict(metadata),
)
def set_price(self, price):
"""Sets the price of the DataPoint and returns a new object."""
return self._replace(price=price)
def as_dict(self, legacy=False, mutable=False):
"""Returns a dict representation of the object.
The returned dict is immutable by default and has the
following format::
{
"vol": {
"unit": "GiB",
"qty": 1.2,
},
"rating": {
"price": 0.04,
},
"groupby": {
"group_one": "one",
"group_two": "two",
},
"metadata": {
"attr_one": "one",
"attr_two": "two",
},
}
The dict can also be returned in the legacy (v1 storage) format. In
that case, `groupby` and `metadata` will be removed and merged together
into the `desc` key.
:param legacy: Defaults to False. If True, returned dict is in legacy
format.
:type legacy: bool
:param mutable: Defaults to False. If True, returns a normal dict
instead of an ImmutableDict.
:type mutable: bool
"""
output = {
"vol": {
"unit": self.unit,
"qty": self.qty,
},
"rating": {
"price": self.price,
},
"groupby": dict(self.groupby) if mutable else self.groupby,
"metadata": dict(self.metadata) if mutable else self.metadata,
}
if legacy:
desc = output.pop("metadata")
desc.update(output.pop("groupby"))
output['desc'] = desc
return output if mutable else datastructures.ImmutableDict(output)
def json(self, legacy=False):
"""Returns a json representation of the dict returned by `as_dict`.
:param legacy: Defaults to False. If True, returned dict is in legacy
format.
:type legacy: bool
:rtype: str
"""
return json.dumps(self.as_dict(legacy=legacy, mutable=True))
@classmethod
def from_dict(cls, dict_, legacy=False):
"""Returns a new DataPoint instance build from a dict.
:param dict_: Dict to build the DataPoint from
:type dict_: dict
:param legacy: Set to true to convert the dict to a the new format
before validating it.
:rtype: DataPoint
"""
try:
if legacy:
dict_['groupby'] = dict_.pop('desc')
dict_['metadata'] = {}
valid = DATAPOINT_SCHEMA(dict_)
return cls(
unit=valid["vol"]["unit"],
qty=valid["vol"]["qty"],
price=valid["rating"]["price"],
groupby=valid["groupby"],
metadata=valid["metadata"],
)
except (voluptuous.Invalid, KeyError) as e:
raise ValueError("{} isn't a valid DataPoint: {}".format(dict_, e))
@property
def desc(self):
output = dict(self.metadata)
output.update(self.groupby)
return datastructures.ImmutableDict(output)
DATAFRAME_SCHEMA = voluptuous.Schema({
voluptuous.Required('period'): {
voluptuous.Required('begin'): voluptuous.Any(
datetime.datetime, voluptuous.Coerce(tzutils.dt_from_iso)),
voluptuous.Required('end'): voluptuous.Any(
datetime.datetime, voluptuous.Coerce(tzutils.dt_from_iso)),
},
voluptuous.Required('usage'): vutils.IterableValuesDict(
str, DataPoint.from_dict),
})
class DataFrame(object):
__slots__ = ("start", "end", "_usage")
def __init__(self, start, end, usage=None):
if not isinstance(start, datetime.datetime):
raise TypeError(
'"start" must be of type datetime.datetime, not {}'.format(
type(start)))
if not isinstance(end, datetime.datetime):
raise TypeError(
'"end" must be of type datetime.datetime, not {}'.format(
type(end)))
if usage is not None and not isinstance(usage, dict):
raise TypeError(
'"usage" must be a dict, not {}'.format(type(usage)))
self.start = start
self.end = end
self._usage = collections.OrderedDict()
if usage:
for key in sorted(usage.keys()):
self.add_points(usage[key], key)
def as_dict(self, legacy=False, mutable=False):
output = {
"period": {"begin": self.start, "end": self.end},
"usage": {
key: [v.as_dict(legacy=legacy, mutable=mutable) for v in val]
for key, val in self._usage.items()
},
}
return output if mutable else datastructures.ImmutableDict(output)
def json(self, legacy=False):
return json.dumps(self.as_dict(legacy=legacy, mutable=True))
@classmethod
def from_dict(cls, dict_, legacy=False):
try:
schema = DATAFRAME_SCHEMA
if legacy:
validator = functools.partial(DataPoint.from_dict, legacy=True)
# NOTE(peschk_l): __name__ is required for voluptuous exception
# message formatting
validator.__name__ = 'DataPoint.from_dict'
# NOTE(peschk_l): In case the legacy format is required, we
# create a new schema where DataPoint.from_dict is called with
# legacy=True. The "extend" method does create a new objects,
# and replaces existing keys with new ones.
schema = DATAFRAME_SCHEMA.extend({
voluptuous.Required('usage'): vutils.IterableValuesDict(
str, validator
),
})
valid = schema(dict_)
return cls(
valid["period"]["begin"],
valid["period"]["end"],
usage=valid["usage"])
except (voluptuous.error.Invalid, KeyError) as e:
raise ValueError("{} isn't a valid DataFrame: {}".format(dict_, e))
def add_points(self, points, type_):
"""Adds multiple points to the DataFrame
:param points: DataPoints to add.
:type point: list of DataPoints
"""
if type_ in self._usage:
self._usage[type_] += points
else:
self._usage[type_] = points
def add_point(self, point, type_):
"""Adds a single point to the DataFrame
:param point: DataPoint to add.
:type point: DataPoint
"""
if type_ in self._usage:
self._usage[type_].append(point)
else:
self._usage[type_] = [point]
def iterpoints(self):
"""Iterates over all datapoints of the dataframe.
Yields (type, point) tuples.
:rtype: (str, DataPoint)
"""
for type_, points in self._usage.items():
for point in points:
yield type_, point
def itertypes(self):
"""Iterates over all types of the dataframe.
Yields (type, (point, )) tuples.
:rtype: (str, (DataPoint, ))
"""
for type_, points in self._usage.items():
yield type_, points
def __repr__(self):
return 'DataFrame(metrics=[{}])'.format(','.join(self._usage.keys()))
|
# -*- coding: utf-8 -*-
from plone.app.registry.browser import controlpanel
from plone.protect.interfaces import IDisableCSRFProtection
from collective.solr.interfaces import ISolrSchema, _
from plone.restapi.controlpanels import RegistryConfigletPanel
from Products.CMFPlone.utils import safe_unicode
from Products.Five.browser.pagetemplatefile import ViewPageTemplateFile
from Products.PythonScripts.PythonScript import PythonScript
from zope.component import adapter
from zope.interface import alsoProvides
from zope.interface import Interface
@adapter(Interface, Interface)
class SolrControlpanelAdapter(RegistryConfigletPanel):
schema = ISolrSchema
configlet_id = "SolrSettings"
configlet_category_id = "Products"
schema_prefix = "collective.solr"
class SolrControlPanelForm(controlpanel.RegistryEditForm):
id = "SolrControlPanel"
label = _("label_solr_settings", default="Solr settings")
schema = ISolrSchema
schema_prefix = "collective.solr"
boost_script_id = "solr_boost_index_values"
def getContent(self):
content = super(SolrControlPanelForm, self).getContent()
portal = self.context
if self.boost_script_id in portal:
boost_script = safe_unicode(portal[self.boost_script_id].read())
# strip script metadata for display
content.boost_script = "\n".join(
[
line
for line in boost_script.splitlines()
if not line.startswith("##")
]
)
alsoProvides(self.request, IDisableCSRFProtection)
return content
def applyChanges(self, data):
changes = super(SolrControlPanelForm, self).applyChanges(data)
boost_script = data.get("boost_script", "")
if "##parameters=data\n" not in boost_script:
boost_script = "##parameters=data\n" + boost_script
portal = self.context
if self.boost_script_id not in self.context:
# "special" documents get boosted during indexing...
portal[self.boost_script_id] = PythonScript(self.boost_script_id)
# since we create a PythonScript in ZODB we need to
# disable CSRF protection
alsoProvides(self.request, IDisableCSRFProtection)
portal[self.boost_script_id].write(boost_script)
return changes
class SolrControlPanel(controlpanel.ControlPanelFormWrapper):
form = SolrControlPanelForm
index = ViewPageTemplateFile("controlpanel.pt")
|
class Linux32: pass
|
"""
binaries hook for pygame seems to be required for pygame 2.0 Windows.
Otherwise some essential DLLs will not be transfered to the exe.
And also put hooks for datas, resources that pygame uses, to work
correctly with pyinstaller
"""
import os
import platform
from pygame import __file__ as pygame_main_file
# Get pygame's folder
pygame_folder = os.path.dirname(os.path.abspath(pygame_main_file))
# datas is the variable that pyinstaller looks for while processing hooks
datas = []
# exclude some unneeded binaries
exclude_bin = ('libFLAC-8', 'libfreetype-6', 'libjpeg-9', 'libmodplug-1', 'libmpg123-0', 'libogg-0', 'libopus-0',
'libopusfile-0', 'libpng16-16', 'libtiff-5', 'libvorbis-0', 'libvorbisfile-3', 'libwebp-7', 'portmidi',
'SDL2_image', 'SDL2_mixer', 'SDL2_ttf')
# A helper to append the relative path of a resource to hook variable - datas
def _append_to_datas(file_path):
global datas
res_path = os.path.join(pygame_folder, file_path)
if os.path.exists(res_path):
datas.append((res_path, "pygame"))
# First append the font file, then based on the OS, append pygame icon file
_append_to_datas("freesansbold.ttf")
if platform.system() == "Darwin":
_append_to_datas("pygame_icon.tiff")
else:
_append_to_datas("pygame_icon.bmp")
if platform.system() == "Windows":
from PyInstaller.utils.hooks import collect_dynamic_libs
pre_binaries = collect_dynamic_libs('pygame')
binaries = []
for b in pre_binaries:
binary, location = b
filename = os.path.split(binary)[-1]
if filename.removesuffix('.dll') in exclude_bin:
print('Custom pygame hook excluding binary:', filename)
continue
# settles all the DLLs into the top level folder, which prevents duplication
# with the DLLs already being put there.
binaries.append((binary, "."))
|
import scrapy
import json
import datetime
POSTED_DATE_FORMAT = "%Y-%m-%d"
# BOOKMARK is cursor that tracks just how far back we should scrape each time
BOOKMARK = datetime.datetime(
year=2020, month=1, day=1
) # TODO factor bookmark into its own logic
class ChemRXIVSpider(scrapy.Spider):
name = "chemrxiv"
start_urls = [
"https://chemrxiv.org/api/institutions/259/items?types=&licenses=&orderBy=published_date&orderType=desc&limit=40&search=&categories=&itemTypes=articles"
]
id_prefix = "chemrxiv"
def parse(self, response):
# Chem archrive features an infinite scrolling site that makes a JSON request
# for 40 new items upon each scrolling event. The first request is without a
# cursor query. The first response returns 40 items + a cursor. Subsequent
# requests need this cursor.
json_data = json.loads(response.body_as_unicode())
cursor = self._extract_cursor(json_data)
article_stubs = self._extract_stubs(json_data)
dates = []
for stub in article_stubs:
data = self._process_stub(stub)
dates.append(self._get_publication_date(stub))
yield data
if dates:
oldest_date = min(dates)
else:
oldest_date = None
next_page = self._next_json_page(cursor)
if oldest_date is not None and self._is_page_new(oldest_date):
self.logger.info(f"Follow to next page: {next_page}")
yield response.follow(next_page, callback=self.parse)
else:
self.logger.info(
f"Do not follow to next page, bookmark reached: {BOOKMARK}"
)
def _extract_cursor(self, json_data):
return json_data["cursor"]
def _extract_stubs(self, json_data):
return json_data["items"]
def _process_stub(self, stub_data):
data = {
"title": self._get_article_title(stub_data),
"url": self._get_article_url(stub_data),
"posted": self._get_article_posted_date(stub_data),
"is_revision": self._get_revision_status(stub_data),
"id": self._get_article_id(stub_data),
}
return data
def _get_article_title(self, stub_data):
return stub_data["data"]["title"]
def _get_article_url(self, stub_data):
return stub_data["data"]["publicUrl"]
def _get_article_posted_date(self, stub_data):
date_string = stub_data["data"]["timeline"]["posted"]
date_string = date_string.strip("Z")
date_time = datetime.datetime.fromisoformat(date_string)
date = date_time.strftime(POSTED_DATE_FORMAT)
return date
def _get_revision_status(self, stub_data):
version = stub_data["data"]["version"]
return version > 1
def _get_article_id(self, stub_data):
return self.id_prefix + "_" + str(stub_data["data"]["id"])
def _get_publication_date(self, stub_data):
date_string = stub_data["data"]["publishedDate"]
date_string = date_string.strip("Z")
return datetime.datetime.fromisoformat(date_string)
def _is_page_new(self, date):
return date > BOOKMARK
def _next_json_page(self, cursor):
base = self.start_urls[0]
return base + f"&cursor={cursor}"
|
"""
ASGI config for managair_server project.
It exposes the ASGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/3.1/howto/deployment/asgi/
"""
import os
from django.core.asgi import get_asgi_application
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "managair_server.settings")
application = get_asgi_application()
|
#!/usr/bin/env python3
import datetime
import os
import time
from pathlib import Path
from typing import Dict, Optional, Tuple
from collections import namedtuple, OrderedDict
import psutil
from smbus2 import SMBus
import cereal.messaging as messaging
from cereal import log
from common.filter_simple import FirstOrderFilter
from common.numpy_fast import interp
from common.params import Params, ParamKeyType
from common.realtime import DT_TRML, sec_since_boot
from common.dict_helpers import strip_deprecated_keys
from selfdrive.controls.lib.alertmanager import set_offroad_alert
from selfdrive.controls.lib.pid import PIDController
from selfdrive.hardware import EON, TICI, PC, HARDWARE
from selfdrive.loggerd.config import get_available_percent
from selfdrive.swaglog import cloudlog
from selfdrive.thermald.power_monitoring import PowerMonitoring
from selfdrive.version import terms_version, training_version
ThermalStatus = log.DeviceState.ThermalStatus
NetworkType = log.DeviceState.NetworkType
NetworkStrength = log.DeviceState.NetworkStrength
CURRENT_TAU = 15. # 15s time constant
TEMP_TAU = 5. # 5s time constant
DISCONNECT_TIMEOUT = 5. # wait 5 seconds before going offroad after disconnect so you get an alert
ThermalBand = namedtuple("ThermalBand", ['min_temp', 'max_temp'])
# List of thermal bands. We will stay within this region as long as we are within the bounds.
# When exiting the bounds, we'll jump to the lower or higher band. Bands are ordered in the dict.
THERMAL_BANDS = OrderedDict({
ThermalStatus.green: ThermalBand(None, 80.0),
ThermalStatus.yellow: ThermalBand(75.0, 96.0),
ThermalStatus.red: ThermalBand(80.0, 107.),
ThermalStatus.danger: ThermalBand(94.0, None),
})
# Override to highest thermal band when offroad and above this temp
OFFROAD_DANGER_TEMP = 79.5 if TICI else 70.0
prev_offroad_states: Dict[str, Tuple[bool, Optional[str]]] = {}
prebuiltfile = '/data/openpilot/prebuilt'
def read_tz(x):
if x is None:
return 0
try:
with open(f"/sys/devices/virtual/thermal/thermal_zone{x}/temp") as f:
return int(f.read())
except FileNotFoundError:
return 0
def read_thermal(thermal_config):
dat = messaging.new_message('deviceState')
dat.deviceState.cpuTempC = [read_tz(z) / thermal_config.cpu[1] for z in thermal_config.cpu[0]]
dat.deviceState.gpuTempC = [read_tz(z) / thermal_config.gpu[1] for z in thermal_config.gpu[0]]
dat.deviceState.memoryTempC = read_tz(thermal_config.mem[0]) / thermal_config.mem[1]
dat.deviceState.ambientTempC = read_tz(thermal_config.ambient[0]) / thermal_config.ambient[1]
dat.deviceState.pmicTempC = [read_tz(z) / thermal_config.pmic[1] for z in thermal_config.pmic[0]]
return dat
def setup_eon_fan():
os.system("echo 2 > /sys/module/dwc3_msm/parameters/otg_switch")
last_eon_fan_val = None
def set_eon_fan(val):
global last_eon_fan_val
if last_eon_fan_val is None or last_eon_fan_val != val:
bus = SMBus(7, force=True)
try:
i = [0x1, 0x3 | 0, 0x3 | 0x08, 0x3 | 0x10][val]
bus.write_i2c_block_data(0x3d, 0, [i])
except IOError:
# tusb320
if val == 0:
bus.write_i2c_block_data(0x67, 0xa, [0])
else:
bus.write_i2c_block_data(0x67, 0xa, [0x20])
bus.write_i2c_block_data(0x67, 0x8, [(val - 1) << 6])
bus.close()
last_eon_fan_val = val
# temp thresholds to control fan speed - high hysteresis
_TEMP_THRS_H = [50., 65., 80., 10000]
# temp thresholds to control fan speed - low hysteresis
_TEMP_THRS_L = [42.5, 57.5, 72.5, 10000]
# fan speed options
_FAN_SPEEDS = [0, 16384, 32768, 65535]
def handle_fan_eon(controller, max_cpu_temp, fan_speed, ignition):
new_speed_h = next(speed for speed, temp_h in zip(_FAN_SPEEDS, _TEMP_THRS_H) if temp_h > max_cpu_temp)
new_speed_l = next(speed for speed, temp_l in zip(_FAN_SPEEDS, _TEMP_THRS_L) if temp_l > max_cpu_temp)
if new_speed_h > fan_speed:
# update speed if using the high thresholds results in fan speed increment
fan_speed = new_speed_h
elif new_speed_l < fan_speed:
# update speed if using the low thresholds results in fan speed decrement
fan_speed = new_speed_l
set_eon_fan(fan_speed // 16384)
return fan_speed
def handle_fan_uno(controller, max_cpu_temp, fan_speed, ignition):
new_speed = int(interp(max_cpu_temp, [40.0, 80.0], [0, 80]))
if not ignition:
new_speed = min(30, new_speed)
return new_speed
last_ignition = False
def handle_fan_tici(controller, max_cpu_temp, fan_speed, ignition):
global last_ignition
controller.neg_limit = -(80 if ignition else 30)
controller.pos_limit = -(30 if ignition else 0)
if ignition != last_ignition:
controller.reset()
target = 75
fan_pwr_out = -int(controller.update(setpoint=target,
measurement=max_cpu_temp,
feedforward=interp(target,[60, 100],[-80, 0])
))
fan_pwr_out = max(fan_pwr_out, 30) if ignition else min(fan_pwr_out, 30)
last_ignition = ignition
return fan_pwr_out
def set_offroad_alert_if_changed(offroad_alert: str, show_alert: bool, extra_text: Optional[str]=None):
if prev_offroad_states.get(offroad_alert, None) == (show_alert, extra_text):
return
prev_offroad_states[offroad_alert] = (show_alert, extra_text)
set_offroad_alert(offroad_alert, show_alert, extra_text)
def thermald_thread():
pm = messaging.PubMaster(['deviceState'])
pandaState_timeout = int(1000 * 2.5 * DT_TRML) # 2.5x the expected pandaState frequency
pandaState_sock = messaging.sub_sock('pandaStates', timeout=pandaState_timeout)
sm = messaging.SubMaster(["peripheralState", "gpsLocationExternal", "managerState"])
fan_speed = 0
count = 0
onroad_conditions = {
"ignition": False,
}
startup_conditions = {}
startup_conditions_prev = {}
off_ts = None
started_ts = None
started_seen = False
thermal_status = ThermalStatus.green
usb_power = True
network_type = NetworkType.none
network_strength = NetworkStrength.unknown
network_info = None
modem_version = None
registered_count = 0
nvme_temps = None
modem_temps = None
current_filter = FirstOrderFilter(0., CURRENT_TAU, DT_TRML)
temp_filter = FirstOrderFilter(0., TEMP_TAU, DT_TRML)
pandaState_prev = None
should_start_prev = False
in_car = False
handle_fan = None
is_uno = False
ui_running_prev = False
params = Params()
power_monitor = PowerMonitoring()
no_panda_cnt = 0
HARDWARE.initialize_hardware()
thermal_config = HARDWARE.get_thermal_config()
# TODO: use PI controller for UNO
controller = PIDController(k_p=2, k_i=2e-3, k_f=1, neg_limit=-80, pos_limit=0, rate=(1 / DT_TRML))
# Leave flag for loggerd to indicate device was left onroad
if params.get_bool("IsOnroad"):
params.put_bool("BootedOnroad", True)
is_openpilot_dir = True
while True:
pandaStates = messaging.recv_sock(pandaState_sock, wait=True)
sm.update(0)
peripheralState = sm['peripheralState']
msg = read_thermal(thermal_config)
if pandaStates is not None and len(pandaStates.pandaStates) > 0:
pandaState = pandaStates.pandaStates[0]
# If we lose connection to the panda, wait 5 seconds before going offroad
if pandaState.pandaType == log.PandaState.PandaType.unknown:
no_panda_cnt += 1
if no_panda_cnt > DISCONNECT_TIMEOUT / DT_TRML:
if onroad_conditions["ignition"]:
cloudlog.error("Lost panda connection while onroad")
onroad_conditions["ignition"] = False
else:
no_panda_cnt = 0
onroad_conditions["ignition"] = pandaState.ignitionLine or pandaState.ignitionCan
in_car = pandaState.harnessStatus != log.PandaState.HarnessStatus.notConnected
usb_power = peripheralState.usbPowerMode != log.PeripheralState.UsbPowerMode.client
# Setup fan handler on first connect to panda
if handle_fan is None and peripheralState.pandaType != log.PandaState.PandaType.unknown:
is_uno = peripheralState.pandaType == log.PandaState.PandaType.uno
if TICI:
cloudlog.info("Setting up TICI fan handler")
handle_fan = handle_fan_tici
elif is_uno or PC:
cloudlog.info("Setting up UNO fan handler")
handle_fan = handle_fan_uno
else:
cloudlog.info("Setting up EON fan handler")
setup_eon_fan()
handle_fan = handle_fan_eon
# Handle disconnect
if pandaState_prev is not None:
if pandaState.pandaType == log.PandaState.PandaType.unknown and \
pandaState_prev.pandaType != log.PandaState.PandaType.unknown:
params.clear_all(ParamKeyType.CLEAR_ON_PANDA_DISCONNECT)
pandaState_prev = pandaState
# these are expensive calls. update every 10s
if (count % int(10. / DT_TRML)) == 0:
try:
network_type = HARDWARE.get_network_type()
network_strength = HARDWARE.get_network_strength(network_type)
network_info = HARDWARE.get_network_info() # pylint: disable=assignment-from-none
nvme_temps = HARDWARE.get_nvme_temperatures()
modem_temps = HARDWARE.get_modem_temperatures()
# Log modem version once
if modem_version is None:
modem_version = HARDWARE.get_modem_version() # pylint: disable=assignment-from-none
if modem_version is not None:
cloudlog.warning(f"Modem version: {modem_version}")
if TICI and (network_info.get('state', None) == "REGISTERED"):
registered_count += 1
else:
registered_count = 0
if registered_count > 10:
cloudlog.warning(f"Modem stuck in registered state {network_info}. nmcli conn up lte")
os.system("nmcli conn up lte")
registered_count = 0
except Exception:
cloudlog.exception("Error getting network status")
msg.deviceState.freeSpacePercent = get_available_percent(default=100.0)
msg.deviceState.memoryUsagePercent = int(round(psutil.virtual_memory().percent))
msg.deviceState.cpuUsagePercent = [int(round(n)) for n in psutil.cpu_percent(percpu=True)]
msg.deviceState.gpuUsagePercent = int(round(HARDWARE.get_gpu_usage_percent()))
msg.deviceState.networkType = network_type
msg.deviceState.networkStrength = network_strength
if network_info is not None:
msg.deviceState.networkInfo = network_info
if nvme_temps is not None:
msg.deviceState.nvmeTempC = nvme_temps
if modem_temps is not None:
msg.deviceState.modemTempC = modem_temps
msg.deviceState.screenBrightnessPercent = HARDWARE.get_screen_brightness()
msg.deviceState.batteryPercent = HARDWARE.get_battery_capacity()
msg.deviceState.batteryCurrent = HARDWARE.get_battery_current()
msg.deviceState.usbOnline = HARDWARE.get_usb_present()
current_filter.update(msg.deviceState.batteryCurrent / 1e6)
max_comp_temp = temp_filter.update(
max(max(msg.deviceState.cpuTempC), msg.deviceState.memoryTempC, max(msg.deviceState.gpuTempC))
)
if handle_fan is not None:
fan_speed = handle_fan(controller, max_comp_temp, fan_speed, onroad_conditions["ignition"])
msg.deviceState.fanSpeedPercentDesired = fan_speed
is_offroad_for_5_min = (started_ts is None) and ((not started_seen) or (off_ts is None) or (sec_since_boot() - off_ts > 60 * 5))
if is_offroad_for_5_min and max_comp_temp > OFFROAD_DANGER_TEMP:
# If device is offroad we want to cool down before going onroad
# since going onroad increases load and can make temps go over 107
thermal_status = ThermalStatus.danger
else:
current_band = THERMAL_BANDS[thermal_status]
band_idx = list(THERMAL_BANDS.keys()).index(thermal_status)
if current_band.min_temp is not None and max_comp_temp < current_band.min_temp:
thermal_status = list(THERMAL_BANDS.keys())[band_idx - 1]
elif current_band.max_temp is not None and max_comp_temp > current_band.max_temp:
thermal_status = list(THERMAL_BANDS.keys())[band_idx + 1]
# **** starting logic ****
# Ensure date/time are valid
now = datetime.datetime.utcnow()
startup_conditions["time_valid"] = (now.year > 2020) or (now.year == 2020 and now.month >= 10)
set_offroad_alert_if_changed("Offroad_InvalidTime", (not startup_conditions["time_valid"]))
startup_conditions["up_to_date"] = params.get("Offroad_ConnectivityNeeded") is None or params.get_bool("DisableUpdates") or params.get_bool("SnoozeUpdate")
startup_conditions["not_uninstalling"] = not params.get_bool("DoUninstall")
startup_conditions["accepted_terms"] = params.get("HasAcceptedTerms") == terms_version
# with 2% left, we killall, otherwise the phone will take a long time to boot
startup_conditions["free_space"] = msg.deviceState.freeSpacePercent > 2
startup_conditions["completed_training"] = params.get("CompletedTrainingVersion") == training_version or \
params.get_bool("Passive")
startup_conditions["not_driver_view"] = not params.get_bool("IsDriverViewEnabled")
startup_conditions["not_taking_snapshot"] = not params.get_bool("IsTakingSnapshot")
# if any CPU gets above 107 or the battery gets above 63, kill all processes
# controls will warn with CPU above 95 or battery above 60
onroad_conditions["device_temp_good"] = thermal_status < ThermalStatus.danger
set_offroad_alert_if_changed("Offroad_TemperatureTooHigh", (not onroad_conditions["device_temp_good"]))
if TICI:
set_offroad_alert_if_changed("Offroad_StorageMissing", (not Path("/data/media").is_mount()))
# Handle offroad/onroad transition
should_start = all(onroad_conditions.values())
if started_ts is None:
should_start = should_start and all(startup_conditions.values())
if should_start != should_start_prev or (count == 0):
params.put_bool("IsOnroad", should_start)
params.put_bool("IsOffroad", not should_start)
HARDWARE.set_power_save(not should_start)
if should_start:
off_ts = None
if started_ts is None:
started_ts = sec_since_boot()
started_seen = True
else:
if onroad_conditions["ignition"] and (startup_conditions != startup_conditions_prev):
cloudlog.event("Startup blocked", startup_conditions=startup_conditions, onroad_conditions=onroad_conditions)
started_ts = None
if off_ts is None:
off_ts = sec_since_boot()
prebuilt_on = params.get_bool("PrebuiltOn")
if not os.path.isdir("/data/openpilot"):
if is_openpilot_dir:
os.system("cd /data/params/d; rm -f DongleId") # Delete DongleID if the Openpilot directory disappears, Seems you want to switch fork/branch.
is_openpilot_dir = False
elif not os.path.isfile(prebuiltfile) and prebuilt_on and is_openpilot_dir:
os.system("cd /data/openpilot; touch prebuilt")
elif os.path.isfile(prebuiltfile) and not prebuilt_on:
os.system("cd /data/openpilot; rm -f prebuilt")
# Offroad power monitoring
power_monitor.calculate(peripheralState, onroad_conditions["ignition"])
msg.deviceState.offroadPowerUsageUwh = power_monitor.get_power_used()
msg.deviceState.carBatteryCapacityUwh = max(0, power_monitor.get_car_battery_capacity())
current_power_draw = HARDWARE.get_current_power_draw() # pylint: disable=assignment-from-none
msg.deviceState.powerDrawW = current_power_draw if current_power_draw is not None else 0
# Check if we need to disable charging (handled by boardd)
msg.deviceState.chargingDisabled = power_monitor.should_disable_charging(onroad_conditions["ignition"], in_car, off_ts)
# Check if we need to shut down
if power_monitor.should_shutdown(peripheralState, onroad_conditions["ignition"], in_car, off_ts, started_seen):
cloudlog.info(f"shutting device down, offroad since {off_ts}")
# TODO: add function for blocking cloudlog instead of sleep
time.sleep(10)
HARDWARE.shutdown()
# If UI has crashed, set the brightness to reasonable non-zero value
ui_running = "ui" in (p.name for p in sm["managerState"].processes if p.running)
if ui_running_prev and not ui_running:
HARDWARE.set_screen_brightness(20)
ui_running_prev = ui_running
msg.deviceState.chargingError = current_filter.x > 0. and msg.deviceState.batteryPercent < 90 # if current is positive, then battery is being discharged
msg.deviceState.started = started_ts is not None
msg.deviceState.startedMonoTime = int(1e9*(started_ts or 0))
last_ping = params.get("LastAthenaPingTime")
if last_ping is not None:
msg.deviceState.lastAthenaPingTime = int(last_ping)
msg.deviceState.thermalStatus = thermal_status
pm.send("deviceState", msg)
if EON and not is_uno:
set_offroad_alert_if_changed("Offroad_ChargeDisabled", (not usb_power))
should_start_prev = should_start
startup_conditions_prev = startup_conditions.copy()
# report to server once every 10 minutes
if (count % int(600. / DT_TRML)) == 0:
if EON and started_ts is None and msg.deviceState.memoryUsagePercent > 40:
cloudlog.event("High offroad memory usage", mem=msg.deviceState.memoryUsagePercent)
cloudlog.event("STATUS_PACKET",
count=count,
pandaStates=(strip_deprecated_keys(pandaStates.to_dict()) if pandaStates else None),
peripheralState=strip_deprecated_keys(peripheralState.to_dict()),
location=(strip_deprecated_keys(sm["gpsLocationExternal"].to_dict()) if sm.alive["gpsLocationExternal"] else None),
deviceState=strip_deprecated_keys(msg.to_dict()))
count += 1
def main():
thermald_thread()
if __name__ == "__main__":
main()
|
from tqdm import tqdm
import numpy as np
import torch
import torch.nn as nn
def build_mlp(input_dim, output_dim, hidden_units=[64, 64],
hidden_activation=nn.Tanh(), output_activation=None):
layers = []
units = input_dim
for next_units in hidden_units:
layers.append(nn.Linear(units, next_units))
layers.append(hidden_activation)
units = next_units
layers.append(nn.Linear(units, output_dim))
if output_activation is not None:
layers.append(output_activation)
return nn.Sequential(*layers)
def dict_concat(x):
return torch.cat([value for key, value in x.items()], dim=0)
def dict_config_concat(x):
return torch.cat([torch.cat((value, key.repeat(value.size(0),1)), dim=1) for key, value in x.items()], dim=0)
|
from .scf_base import SiriusBaseCalculation, make_sirius_json
from aiida.plugins import DataFactory
from aiida.common import datastructures
import tempfile
import json
import yaml
import six
SiriusMDParameters = DataFactory('sirius.md')
SinglefileData = DataFactory('singlefile')
ArrayData = DataFactory('array')
List = DataFactory('list')
class SiriusMDCalculation(SiriusBaseCalculation):
@classmethod
def define(cls, spec):
super(SiriusMDCalculation, cls).define(spec)
spec.input('sirius_md_params', valid_type=SiriusMDParameters, help='MD Parameters')
spec.input('metadata.options.parser_name', valid_type=six.string_types, default='sirius.md')
spec.input('metadata.options.output_filename', valid_type=six.string_types, default='sirius.md.out')
spec.output('md', valid_type=SinglefileData)
spec.output('md_results', valid_type=List)
def prepare_for_submission(self, folder):
"""
Create input files.
sirius.json,
input.yml
:param folder: an `aiida.common.folders.Folder` where the plugin should temporarily place all files needed by
the calculation.
:return: `aiida.common.datastructures.CalcInfo` instance
"""
codeinfo = datastructures.CodeInfo()
output_filename = self.metadata.options.output_filename
codeinfo.cmdline_params = ['--input=input.yml']
codeinfo.code_uuid = self.inputs.code.uuid
codeinfo.stdout_name = self.metadata.options.output_filename
codeinfo.withmpi = self.inputs.metadata.options.withmpi
# with config from input
structure = self.inputs.structure
kpoints = self.inputs.kpoints
magnetization = self.inputs.magnetization
# sirius_json = make_sirius_json(self.inputs.sirius_config.get_dict()['parameters'],
sirius_json = self.inputs.sirius_config.get_dict()
with tempfile.NamedTemporaryFile(mode='w', suffix='.json', delete=False) as sirius_tmpfile:
# insert Pseudopotentials directly into json
sirius_json = self._read_pseudos(sirius_json)
# dump to file
json.dump(sirius_json, sirius_tmpfile)
sirius_config = SinglefileData(file=sirius_tmpfile.name)
sirius_config.store()
# prepare YAML input for NLCG
with tempfile.NamedTemporaryFile(mode='w', suffix='.yml', delete=False) as sirius_md_yaml:
out = yaml.dump({'parameters': self.inputs.sirius_md_params.get_dict()})
md_tmpfile_name = sirius_md_yaml.name
sirius_md_yaml.write(out)
sirius_md_config = SinglefileData(file=md_tmpfile_name)
sirius_md_config.store()
# Prepare a `CalcInfo` to be returned to the engine
calcinfo = datastructures.CalcInfo()
calcinfo.codes_info = [codeinfo]
calcinfo.local_copy_list = [
(sirius_config.uuid, sirius_config.filename, 'sirius.json'),
(sirius_md_config.uuid, sirius_md_config.filename, 'input.yml')
]
calcinfo.retrieve_list = [self.metadata.options.output_filename, 'md_results.json']
return calcinfo
|
"""
Support for the NetAtmo Weather Service.
For more details about this platform, please refer to the documentation at
https://home-assistant.io/components/sensor.netatmo/
"""
import logging
from time import time
import threading
import voluptuous as vol
from homeassistant.components.sensor import PLATFORM_SCHEMA
from homeassistant.const import (
TEMP_CELSIUS, DEVICE_CLASS_HUMIDITY, DEVICE_CLASS_TEMPERATURE,
STATE_UNKNOWN)
from homeassistant.helpers.entity import Entity
import homeassistant.helpers.config_validation as cv
_LOGGER = logging.getLogger(__name__)
CONF_MODULES = 'modules'
CONF_STATION = 'station'
DEPENDENCIES = ['netatmo']
# This is the NetAtmo data upload interval in seconds
NETATMO_UPDATE_INTERVAL = 600
SENSOR_TYPES = {
'temperature': ['Temperature', TEMP_CELSIUS, None,
DEVICE_CLASS_TEMPERATURE],
'co2': ['CO2', 'ppm', 'mdi:cloud', None],
'pressure': ['Pressure', 'mbar', 'mdi:gauge', None],
'noise': ['Noise', 'dB', 'mdi:volume-high', None],
'humidity': ['Humidity', '%', None, DEVICE_CLASS_HUMIDITY],
'rain': ['Rain', 'mm', 'mdi:weather-rainy', None],
'sum_rain_1': ['sum_rain_1', 'mm', 'mdi:weather-rainy', None],
'sum_rain_24': ['sum_rain_24', 'mm', 'mdi:weather-rainy', None],
'battery_vp': ['Battery', '', 'mdi:battery', None],
'battery_lvl': ['Battery_lvl', '', 'mdi:battery', None],
'min_temp': ['Min Temp.', TEMP_CELSIUS, 'mdi:thermometer', None],
'max_temp': ['Max Temp.', TEMP_CELSIUS, 'mdi:thermometer', None],
'windangle': ['Angle', '', 'mdi:compass', None],
'windangle_value': ['Angle Value', 'º', 'mdi:compass', None],
'windstrength': ['Strength', 'km/h', 'mdi:weather-windy', None],
'gustangle': ['Gust Angle', '', 'mdi:compass', None],
'gustangle_value': ['Gust Angle Value', 'º', 'mdi:compass', None],
'guststrength': ['Gust Strength', 'km/h', 'mdi:weather-windy', None],
'rf_status': ['Radio', '', 'mdi:signal', None],
'rf_status_lvl': ['Radio_lvl', '', 'mdi:signal', None],
'wifi_status': ['Wifi', '', 'mdi:wifi', None],
'wifi_status_lvl': ['Wifi_lvl', 'dBm', 'mdi:wifi', None],
}
MODULE_SCHEMA = vol.Schema({
vol.Required(cv.string):
vol.All(cv.ensure_list, [vol.In(SENSOR_TYPES)]),
})
PLATFORM_SCHEMA = PLATFORM_SCHEMA.extend({
vol.Optional(CONF_STATION): cv.string,
vol.Optional(CONF_MODULES): MODULE_SCHEMA,
})
def setup_platform(hass, config, add_entities, discovery_info=None):
"""Set up the available Netatmo weather sensors."""
netatmo = hass.components.netatmo
data = NetAtmoData(netatmo.NETATMO_AUTH, config.get(CONF_STATION, None))
dev = []
import pyatmo
try:
if CONF_MODULES in config:
# Iterate each module
for module_name, monitored_conditions in\
config[CONF_MODULES].items():
# Test if module exists
if module_name not in data.get_module_names():
_LOGGER.error('Module name: "%s" not found', module_name)
continue
# Only create sensors for monitored properties
for variable in monitored_conditions:
dev.append(NetAtmoSensor(data, module_name, variable))
else:
for module_name in data.get_module_names():
for variable in\
data.station_data.monitoredConditions(module_name):
if variable in SENSOR_TYPES.keys():
dev.append(NetAtmoSensor(data, module_name, variable))
else:
_LOGGER.warning("Ignoring unknown var %s for mod %s",
variable, module_name)
except pyatmo.NoDevice:
return None
add_entities(dev, True)
class NetAtmoSensor(Entity):
"""Implementation of a Netatmo sensor."""
def __init__(self, netatmo_data, module_name, sensor_type):
"""Initialize the sensor."""
self._name = 'Netatmo {} {}'.format(module_name,
SENSOR_TYPES[sensor_type][0])
self.netatmo_data = netatmo_data
self.module_name = module_name
self.type = sensor_type
self._state = None
self._device_class = SENSOR_TYPES[self.type][3]
self._icon = SENSOR_TYPES[self.type][2]
self._unit_of_measurement = SENSOR_TYPES[self.type][1]
module_id = self.netatmo_data.\
station_data.moduleByName(module=module_name)['_id']
self.module_id = module_id[1]
@property
def name(self):
"""Return the name of the sensor."""
return self._name
@property
def icon(self):
"""Icon to use in the frontend, if any."""
return self._icon
@property
def device_class(self):
"""Return the device class of the sensor."""
return self._device_class
@property
def state(self):
"""Return the state of the device."""
return self._state
@property
def unit_of_measurement(self):
"""Return the unit of measurement of this entity, if any."""
return self._unit_of_measurement
def update(self):
"""Get the latest data from NetAtmo API and updates the states."""
self.netatmo_data.update()
data = self.netatmo_data.data.get(self.module_name)
if data is None:
_LOGGER.warning("No data found for %s", self.module_name)
self._state = STATE_UNKNOWN
return
if self.type == 'temperature':
self._state = round(data['Temperature'], 1)
elif self.type == 'humidity':
self._state = data['Humidity']
elif self.type == 'rain':
self._state = data['Rain']
elif self.type == 'sum_rain_1':
self._state = data['sum_rain_1']
elif self.type == 'sum_rain_24':
self._state = data['sum_rain_24']
elif self.type == 'noise':
self._state = data['Noise']
elif self.type == 'co2':
self._state = data['CO2']
elif self.type == 'pressure':
self._state = round(data['Pressure'], 1)
elif self.type == 'battery_lvl':
self._state = data['battery_vp']
elif self.type == 'battery_vp' and self.module_id == '6':
if data['battery_vp'] >= 5590:
self._state = "Full"
elif data['battery_vp'] >= 5180:
self._state = "High"
elif data['battery_vp'] >= 4770:
self._state = "Medium"
elif data['battery_vp'] >= 4360:
self._state = "Low"
elif data['battery_vp'] < 4360:
self._state = "Very Low"
elif self.type == 'battery_vp' and self.module_id == '5':
if data['battery_vp'] >= 5500:
self._state = "Full"
elif data['battery_vp'] >= 5000:
self._state = "High"
elif data['battery_vp'] >= 4500:
self._state = "Medium"
elif data['battery_vp'] >= 4000:
self._state = "Low"
elif data['battery_vp'] < 4000:
self._state = "Very Low"
elif self.type == 'battery_vp' and self.module_id == '3':
if data['battery_vp'] >= 5640:
self._state = "Full"
elif data['battery_vp'] >= 5280:
self._state = "High"
elif data['battery_vp'] >= 4920:
self._state = "Medium"
elif data['battery_vp'] >= 4560:
self._state = "Low"
elif data['battery_vp'] < 4560:
self._state = "Very Low"
elif self.type == 'battery_vp' and self.module_id == '2':
if data['battery_vp'] >= 5500:
self._state = "Full"
elif data['battery_vp'] >= 5000:
self._state = "High"
elif data['battery_vp'] >= 4500:
self._state = "Medium"
elif data['battery_vp'] >= 4000:
self._state = "Low"
elif data['battery_vp'] < 4000:
self._state = "Very Low"
elif self.type == 'min_temp':
self._state = data['min_temp']
elif self.type == 'max_temp':
self._state = data['max_temp']
elif self.type == 'windangle_value':
self._state = data['WindAngle']
elif self.type == 'windangle':
if data['WindAngle'] >= 330:
self._state = "N (%d\xb0)" % data['WindAngle']
elif data['WindAngle'] >= 300:
self._state = "NW (%d\xb0)" % data['WindAngle']
elif data['WindAngle'] >= 240:
self._state = "W (%d\xb0)" % data['WindAngle']
elif data['WindAngle'] >= 210:
self._state = "SW (%d\xb0)" % data['WindAngle']
elif data['WindAngle'] >= 150:
self._state = "S (%d\xb0)" % data['WindAngle']
elif data['WindAngle'] >= 120:
self._state = "SE (%d\xb0)" % data['WindAngle']
elif data['WindAngle'] >= 60:
self._state = "E (%d\xb0)" % data['WindAngle']
elif data['WindAngle'] >= 30:
self._state = "NE (%d\xb0)" % data['WindAngle']
elif data['WindAngle'] >= 0:
self._state = "N (%d\xb0)" % data['WindAngle']
elif self.type == 'windstrength':
self._state = data['WindStrength']
elif self.type == 'gustangle_value':
self._state = data['GustAngle']
elif self.type == 'gustangle':
if data['GustAngle'] >= 330:
self._state = "N (%d\xb0)" % data['GustAngle']
elif data['GustAngle'] >= 300:
self._state = "NW (%d\xb0)" % data['GustAngle']
elif data['GustAngle'] >= 240:
self._state = "W (%d\xb0)" % data['GustAngle']
elif data['GustAngle'] >= 210:
self._state = "SW (%d\xb0)" % data['GustAngle']
elif data['GustAngle'] >= 150:
self._state = "S (%d\xb0)" % data['GustAngle']
elif data['GustAngle'] >= 120:
self._state = "SE (%d\xb0)" % data['GustAngle']
elif data['GustAngle'] >= 60:
self._state = "E (%d\xb0)" % data['GustAngle']
elif data['GustAngle'] >= 30:
self._state = "NE (%d\xb0)" % data['GustAngle']
elif data['GustAngle'] >= 0:
self._state = "N (%d\xb0)" % data['GustAngle']
elif self.type == 'guststrength':
self._state = data['GustStrength']
elif self.type == 'rf_status_lvl':
self._state = data['rf_status']
elif self.type == 'rf_status':
if data['rf_status'] >= 90:
self._state = "Low"
elif data['rf_status'] >= 76:
self._state = "Medium"
elif data['rf_status'] >= 60:
self._state = "High"
elif data['rf_status'] <= 59:
self._state = "Full"
elif self.type == 'wifi_status_lvl':
self._state = data['wifi_status']
elif self.type == 'wifi_status':
if data['wifi_status'] >= 86:
self._state = "Low"
elif data['wifi_status'] >= 71:
self._state = "Medium"
elif data['wifi_status'] >= 56:
self._state = "High"
elif data['wifi_status'] <= 55:
self._state = "Full"
class NetAtmoData:
"""Get the latest data from NetAtmo."""
def __init__(self, auth, station):
"""Initialize the data object."""
self.auth = auth
self.data = None
self.station_data = None
self.station = station
self._next_update = time()
self._update_in_progress = threading.Lock()
def get_module_names(self):
"""Return all module available on the API as a list."""
self.update()
return self.data.keys()
def update(self):
"""Call the Netatmo API to update the data.
This method is not throttled by the builtin Throttle decorator
but with a custom logic, which takes into account the time
of the last update from the cloud.
"""
if time() < self._next_update or \
not self._update_in_progress.acquire(False):
return
try:
import pyatmo
try:
self.station_data = pyatmo.WeatherStationData(self.auth)
except TypeError:
_LOGGER.error("Failed to connect to NetAtmo")
return # finally statement will be executed
if self.station is not None:
self.data = self.station_data.lastData(
station=self.station, exclude=3600)
else:
self.data = self.station_data.lastData(exclude=3600)
newinterval = 0
for module in self.data:
if 'When' in self.data[module]:
newinterval = self.data[module]['When']
break
if newinterval:
# Try and estimate when fresh data will be available
newinterval += NETATMO_UPDATE_INTERVAL - time()
if newinterval > NETATMO_UPDATE_INTERVAL - 30:
newinterval = NETATMO_UPDATE_INTERVAL
else:
if newinterval < NETATMO_UPDATE_INTERVAL / 2:
# Never hammer the NetAtmo API more than
# twice per update interval
newinterval = NETATMO_UPDATE_INTERVAL / 2
_LOGGER.info(
"NetAtmo refresh interval reset to %d seconds",
newinterval)
else:
# Last update time not found, fall back to default value
newinterval = NETATMO_UPDATE_INTERVAL
self._next_update = time() + newinterval
finally:
self._update_in_progress.release()
|
#!/usr/bin/env python3
from __future__ import division
import sys
import math
import random
import time
from collections import deque
from pyglet import image
from pyglet.gl import *
from pyglet.graphics import TextureGroup
from pyglet.window import key, mouse
TICKS_PER_SEC = 60
# Size of sectors used to ease block loading.
SECTOR_SIZE = 16
WALKING_SPEED = 5
FLYING_SPEED = 15
GRAVITY = 20.0
MAX_JUMP_HEIGHT = 1.0 # About the height of a block.
# To derive the formula for calculating jump speed, first solve
# v_t = v_0 + a * t
# for the time at which you achieve maximum height, where a is the acceleration
# due to gravity and v_t = 0. This gives:
# t = - v_0 / a
# Use t and the desired MAX_JUMP_HEIGHT to solve for v_0 (jump speed) in
# s = s_0 + v_0 * t + (a * t^2) / 2
JUMP_SPEED = math.sqrt(2 * GRAVITY * MAX_JUMP_HEIGHT)
TERMINAL_VELOCITY = 50
PLAYER_HEIGHT = 2
if sys.version_info[0] >= 3:
xrange = range
def cube_vertices(x, y, z, n):
""" Return the vertices of the cube at position x, y, z with size 2*n.
"""
return [
x-n,y+n,z-n, x-n,y+n,z+n, x+n,y+n,z+n, x+n,y+n,z-n, # top
x-n,y-n,z-n, x+n,y-n,z-n, x+n,y-n,z+n, x-n,y-n,z+n, # bottom
x-n,y-n,z-n, x-n,y-n,z+n, x-n,y+n,z+n, x-n,y+n,z-n, # left
x+n,y-n,z+n, x+n,y-n,z-n, x+n,y+n,z-n, x+n,y+n,z+n, # right
x-n,y-n,z+n, x+n,y-n,z+n, x+n,y+n,z+n, x-n,y+n,z+n, # front
x+n,y-n,z-n, x-n,y-n,z-n, x-n,y+n,z-n, x+n,y+n,z-n, # back
]
def tex_coord(x, y, n=4):
""" Return the bounding vertices of the texture square.
"""
m = 1.0 / n
dx = x * m
dy = y * m
return dx, dy, dx + m, dy, dx + m, dy + m, dx, dy + m
def tex_coords(top, bottom, side):
""" Return a list of the texture squares for the top, bottom and side.
"""
top = tex_coord(*top)
bottom = tex_coord(*bottom)
side = tex_coord(*side)
result = []
result.extend(top)
result.extend(bottom)
result.extend(side * 4)
return result
TEXTURE_PATH = 'texture.png'
GRASS = tex_coords((1, 0), (0, 1), (0, 0))
SAND = tex_coords((1, 1), (1, 1), (1, 1))
BRICK = tex_coords((2, 0), (2, 0), (2, 0))
STONE = tex_coords((2, 1), (2, 1), (2, 1))
FACES = [
( 0, 1, 0),
( 0,-1, 0),
(-1, 0, 0),
( 1, 0, 0),
( 0, 0, 1),
( 0, 0,-1),
]
def normalize(position):
""" Accepts `position` of arbitrary precision and returns the block
containing that position.
Parameters
----------
position : tuple of len 3
Returns
-------
block_position : tuple of ints of len 3
"""
x, y, z = position
x, y, z = (int(round(x)), int(round(y)), int(round(z)))
return (x, y, z)
def sectorize(position):
""" Returns a tuple representing the sector for the given `position`.
Parameters
----------
position : tuple of len 3
Returns
-------
sector : tuple of len 3
"""
x, y, z = normalize(position)
x, y, z = x // SECTOR_SIZE, y // SECTOR_SIZE, z // SECTOR_SIZE
return (x, 0, z)
class Model(object):
def __init__(self):
# A Batch is a collection of vertex lists for batched rendering.
self.batch = pyglet.graphics.Batch()
# A TextureGroup manages an OpenGL texture.
self.group = TextureGroup(image.load(TEXTURE_PATH).get_texture())
# A mapping from position to the texture of the block at that position.
# This defines all the blocks that are currently in the world.
self.world = {}
# Same mapping as `world` but only contains blocks that are shown.
self.shown = {}
# Mapping from position to a pyglet `VertextList` for all shown blocks.
self._shown = {}
# Mapping from sector to a list of positions inside that sector.
self.sectors = {}
# Simple function queue implementation. The queue is populated with
# _show_block() and _hide_block() calls
self.queue = deque()
self._initialize()
def _initialize(self):
""" Initialize the world by placing all the blocks.
"""
n = 80 # 1/2 width and height of world
s = 1 # step size
y = 0 # initial y height
for x in xrange(-n, n + 1, s):
for z in xrange(-n, n + 1, s):
# create a layer stone an grass everywhere.
self.add_block((x, y - 2, z), GRASS, immediate=False)
self.add_block((x, y - 3, z), STONE, immediate=False)
if x in (-n, n) or z in (-n, n):
# create outer walls.
for dy in xrange(-2, 3):
self.add_block((x, y + dy, z), STONE, immediate=False)
# generate the hills randomly
o = n - 10
for _ in xrange(120):
a = random.randint(-o, o) # x position of the hill
b = random.randint(-o, o) # z position of the hill
c = -1 # base of the hill
h = random.randint(1, 6) # height of the hill
s = random.randint(4, 8) # 2 * s is the side length of the hill
d = 1 # how quickly to taper off the hills
t = random.choice([GRASS, SAND, BRICK])
for y in xrange(c, c + h):
for x in xrange(a - s, a + s + 1):
for z in xrange(b - s, b + s + 1):
if (x - a) ** 2 + (z - b) ** 2 > (s + 1) ** 2:
continue
if (x - 0) ** 2 + (z - 0) ** 2 < 5 ** 2:
continue
self.add_block((x, y, z), t, immediate=False)
s -= d # decrement side lenth so hills taper off
def hit_test(self, position, vector, max_distance=8):
""" Line of sight search from current position. If a block is
intersected it is returned, along with the block previously in the line
of sight. If no block is found, return None, None.
Parameters
----------
position : tuple of len 3
The (x, y, z) position to check visibility from.
vector : tuple of len 3
The line of sight vector.
max_distance : int
How many blocks away to search for a hit.
"""
m = 8
x, y, z = position
dx, dy, dz = vector
previous = None
for _ in xrange(max_distance * m):
key = normalize((x, y, z))
if key != previous and key in self.world:
return key, previous
previous = key
x, y, z = x + dx / m, y + dy / m, z + dz / m
return None, None
def exposed(self, position):
""" Returns False is given `position` is surrounded on all 6 sides by
blocks, True otherwise.
"""
x, y, z = position
for dx, dy, dz in FACES:
if (x + dx, y + dy, z + dz) not in self.world:
return True
return False
def add_block(self, position, texture, immediate=True):
""" Add a block with the given `texture` and `position` to the world.
Parameters
----------
position : tuple of len 3
The (x, y, z) position of the block to add.
texture : list of len 3
The coordinates of the texture squares. Use `tex_coords()` to
generate.
immediate : bool
Whether or not to draw the block immediately.
"""
if position in self.world:
self.remove_block(position, immediate)
self.world[position] = texture
self.sectors.setdefault(sectorize(position), []).append(position)
if immediate:
if self.exposed(position):
self.show_block(position)
self.check_neighbors(position)
def remove_block(self, position, immediate=True):
""" Remove the block at the given `position`.
Parameters
----------
position : tuple of len 3
The (x, y, z) position of the block to remove.
immediate : bool
Whether or not to immediately remove block from canvas.
"""
del self.world[position]
self.sectors[sectorize(position)].remove(position)
if immediate:
if position in self.shown:
self.hide_block(position)
self.check_neighbors(position)
def check_neighbors(self, position):
""" Check all blocks surrounding `position` and ensure their visual
state is current. This means hiding blocks that are not exposed and
ensuring that all exposed blocks are shown. Usually used after a block
is added or removed.
"""
x, y, z = position
for dx, dy, dz in FACES:
key = (x + dx, y + dy, z + dz)
if key not in self.world:
continue
if self.exposed(key):
if key not in self.shown:
self.show_block(key)
else:
if key in self.shown:
self.hide_block(key)
def show_block(self, position, immediate=True):
""" Show the block at the given `position`. This method assumes the
block has already been added with add_block()
Parameters
----------
position : tuple of len 3
The (x, y, z) position of the block to show.
immediate : bool
Whether or not to show the block immediately.
"""
texture = self.world[position]
self.shown[position] = texture
if immediate:
self._show_block(position, texture)
else:
self._enqueue(self._show_block, position, texture)
def _show_block(self, position, texture):
""" Private implementation of the `show_block()` method.
Parameters
----------
position : tuple of len 3
The (x, y, z) position of the block to show.
texture : list of len 3
The coordinates of the texture squares. Use `tex_coords()` to
generate.
"""
x, y, z = position
vertex_data = cube_vertices(x, y, z, 0.5)
texture_data = list(texture)
# create vertex list
# FIXME Maybe `add_indexed()` should be used instead
self._shown[position] = self.batch.add(24, GL_QUADS, self.group,
('v3f/static', vertex_data),
('t2f/static', texture_data))
def hide_block(self, position, immediate=True):
""" Hide the block at the given `position`. Hiding does not remove the
block from the world.
Parameters
----------
position : tuple of len 3
The (x, y, z) position of the block to hide.
immediate : bool
Whether or not to immediately remove the block from the canvas.
"""
self.shown.pop(position)
if immediate:
self._hide_block(position)
else:
self._enqueue(self._hide_block, position)
def _hide_block(self, position):
""" Private implementation of the 'hide_block()` method.
"""
self._shown.pop(position).delete()
def show_sector(self, sector):
""" Ensure all blocks in the given sector that should be shown are
drawn to the canvas.
"""
for position in self.sectors.get(sector, []):
if position not in self.shown and self.exposed(position):
self.show_block(position, False)
def hide_sector(self, sector):
""" Ensure all blocks in the given sector that should be hidden are
removed from the canvas.
"""
for position in self.sectors.get(sector, []):
if position in self.shown:
self.hide_block(position, False)
def change_sectors(self, before, after):
""" Move from sector `before` to sector `after`. A sector is a
contiguous x, y sub-region of world. Sectors are used to speed up
world rendering.
"""
before_set = set()
after_set = set()
pad = 4
for dx in xrange(-pad, pad + 1):
for dy in [0]: # xrange(-pad, pad + 1):
for dz in xrange(-pad, pad + 1):
if dx ** 2 + dy ** 2 + dz ** 2 > (pad + 1) ** 2:
continue
if before:
x, y, z = before
before_set.add((x + dx, y + dy, z + dz))
if after:
x, y, z = after
after_set.add((x + dx, y + dy, z + dz))
show = after_set - before_set
hide = before_set - after_set
for sector in show:
self.show_sector(sector)
for sector in hide:
self.hide_sector(sector)
def _enqueue(self, func, *args):
""" Add `func` to the internal queue.
"""
self.queue.append((func, args))
def _dequeue(self):
""" Pop the top function from the internal queue and call it.
"""
func, args = self.queue.popleft()
func(*args)
def process_queue(self):
""" Process the entire queue while taking periodic breaks. This allows
the game loop to run smoothly. The queue contains calls to
_show_block() and _hide_block() so this method should be called if
add_block() or remove_block() was called with immediate=False
"""
start = time.process_time()
while self.queue and time.process_time() - start < 1.0 / TICKS_PER_SEC:
self._dequeue()
def process_entire_queue(self):
""" Process the entire queue with no breaks.
"""
while self.queue:
self._dequeue()
class Window(pyglet.window.Window):
def __init__(self, *args, **kwargs):
super(Window, self).__init__(*args, **kwargs)
# Whether or not the window exclusively captures the mouse.
self.exclusive = False
# When flying gravity has no effect and speed is increased.
self.flying = False
# Strafing is moving lateral to the direction you are facing,
# e.g. moving to the left or right while continuing to face forward.
#
# First element is -1 when moving forward, 1 when moving back, and 0
# otherwise. The second element is -1 when moving left, 1 when moving
# right, and 0 otherwise.
self.strafe = [0, 0]
# Current (x, y, z) position in the world, specified with floats. Note
# that, perhaps unlike in math class, the y-axis is the vertical axis.
self.position = (0, 0, 0)
# First element is rotation of the player in the x-z plane (ground
# plane) measured from the z-axis down. The second is the rotation
# angle from the ground plane up. Rotation is in degrees.
#
# The vertical plane rotation ranges from -90 (looking straight down) to
# 90 (looking straight up). The horizontal rotation range is unbounded.
self.rotation = (0, 0)
# Which sector the player is currently in.
self.sector = None
# The crosshairs at the center of the screen.
self.reticle = None
# Velocity in the y (upward) direction.
self.dy = 0
# A list of blocks the player can place. Hit num keys to cycle.
self.inventory = [BRICK, GRASS, SAND]
# The current block the user can place. Hit num keys to cycle.
self.block = self.inventory[0]
# Convenience list of num keys.
self.num_keys = [
key._1, key._2, key._3, key._4, key._5,
key._6, key._7, key._8, key._9, key._0]
# Instance of the model that handles the world.
self.model = Model()
# The label that is displayed in the top left of the canvas.
self.label = pyglet.text.Label('', font_name='Arial', font_size=18,
x=10, y=self.height - 10, anchor_x='left', anchor_y='top',
color=(0, 0, 0, 255))
# This call schedules the `update()` method to be called
# TICKS_PER_SEC. This is the main game event loop.
pyglet.clock.schedule_interval(self.update, 1.0 / TICKS_PER_SEC)
def set_exclusive_mouse(self, exclusive):
""" If `exclusive` is True, the game will capture the mouse, if False
the game will ignore the mouse.
"""
super(Window, self).set_exclusive_mouse(exclusive)
self.exclusive = exclusive
def get_sight_vector(self):
""" Returns the current line of sight vector indicating the direction
the player is looking.
"""
x, y = self.rotation
# y ranges from -90 to 90, or -pi/2 to pi/2, so m ranges from 0 to 1 and
# is 1 when looking ahead parallel to the ground and 0 when looking
# straight up or down.
m = math.cos(math.radians(y))
# dy ranges from -1 to 1 and is -1 when looking straight down and 1 when
# looking straight up.
dy = math.sin(math.radians(y))
dx = math.cos(math.radians(x - 90)) * m
dz = math.sin(math.radians(x - 90)) * m
return (dx, dy, dz)
def get_motion_vector(self):
""" Returns the current motion vector indicating the velocity of the
player.
Returns
-------
vector : tuple of len 3
Tuple containing the velocity in x, y, and z respectively.
"""
if any(self.strafe):
x, y = self.rotation
strafe = math.degrees(math.atan2(*self.strafe))
y_angle = math.radians(y)
x_angle = math.radians(x + strafe)
if self.flying:
m = math.cos(y_angle)
dy = math.sin(y_angle)
if self.strafe[1]:
# Moving left or right.
dy = 0.0
m = 1
if self.strafe[0] > 0:
# Moving backwards.
dy *= -1
# When you are flying up or down, you have less left and right
# motion.
dx = math.cos(x_angle) * m
dz = math.sin(x_angle) * m
else:
dy = 0.0
dx = math.cos(x_angle)
dz = math.sin(x_angle)
else:
dy = 0.0
dx = 0.0
dz = 0.0
return (dx, dy, dz)
def update(self, dt):
""" This method is scheduled to be called repeatedly by the pyglet
clock.
Parameters
----------
dt : float
The change in time since the last call.
"""
self.model.process_queue()
sector = sectorize(self.position)
if sector != self.sector:
self.model.change_sectors(self.sector, sector)
if self.sector is None:
self.model.process_entire_queue()
self.sector = sector
m = 8
dt = min(dt, 0.2)
for _ in xrange(m):
self._update(dt / m)
def _update(self, dt):
""" Private implementation of the `update()` method. This is where most
of the motion logic lives, along with gravity and collision detection.
Parameters
----------
dt : float
The change in time since the last call.
"""
# walking
speed = FLYING_SPEED if self.flying else WALKING_SPEED
d = dt * speed # distance covered this tick.
dx, dy, dz = self.get_motion_vector()
# New position in space, before accounting for gravity.
dx, dy, dz = dx * d, dy * d, dz * d
# gravity
if not self.flying:
# Update your vertical speed: if you are falling, speed up until you
# hit terminal velocity; if you are jumping, slow down until you
# start falling.
self.dy -= dt * GRAVITY
self.dy = max(self.dy, -TERMINAL_VELOCITY)
dy += self.dy * dt
# collisions
x, y, z = self.position
x, y, z = self.collide((x + dx, y + dy, z + dz), PLAYER_HEIGHT)
self.position = (x, y, z)
def collide(self, position, height):
""" Checks to see if the player at the given `position` and `height`
is colliding with any blocks in the world.
Parameters
----------
position : tuple of len 3
The (x, y, z) position to check for collisions at.
height : int or float
The height of the player.
Returns
-------
position : tuple of len 3
The new position of the player taking into account collisions.
"""
# How much overlap with a dimension of a surrounding block you need to
# have to count as a collision. If 0, touching terrain at all counts as
# a collision. If .49, you sink into the ground, as if walking through
# tall grass. If >= .5, you'll fall through the ground.
pad = 0.25
p = list(position)
np = normalize(position)
for face in FACES: # check all surrounding blocks
for i in xrange(3): # check each dimension independently
if not face[i]:
continue
# How much overlap you have with this dimension.
d = (p[i] - np[i]) * face[i]
if d < pad:
continue
for dy in xrange(height): # check each height
op = list(np)
op[1] -= dy
op[i] += face[i]
if tuple(op) not in self.model.world:
continue
p[i] -= (d - pad) * face[i]
if face == (0, -1, 0) or face == (0, 1, 0):
# You are colliding with the ground or ceiling, so stop
# falling / rising.
self.dy = 0
break
return tuple(p)
def on_mouse_press(self, x, y, button, modifiers):
""" Called when a mouse button is pressed. See pyglet docs for button
amd modifier mappings.
Parameters
----------
x, y : int
The coordinates of the mouse click. Always center of the screen if
the mouse is captured.
button : int
Number representing mouse button that was clicked. 1 = left button,
4 = right button.
modifiers : int
Number representing any modifying keys that were pressed when the
mouse button was clicked.
"""
if self.exclusive:
vector = self.get_sight_vector()
block, previous = self.model.hit_test(self.position, vector)
if (button == mouse.RIGHT) or \
((button == mouse.LEFT) and (modifiers & key.MOD_CTRL)):
# ON OSX, control + left click = right click.
if previous:
self.model.add_block(previous, self.block)
elif button == pyglet.window.mouse.LEFT and block:
texture = self.model.world[block]
if texture != STONE:
self.model.remove_block(block)
else:
self.set_exclusive_mouse(True)
def on_mouse_motion(self, x, y, dx, dy):
""" Called when the player moves the mouse.
Parameters
----------
x, y : int
The coordinates of the mouse click. Always center of the screen if
the mouse is captured.
dx, dy : float
The movement of the mouse.
"""
if self.exclusive:
m = 0.15
x, y = self.rotation
x, y = x + dx * m, y + dy * m
y = max(-90, min(90, y))
self.rotation = (x, y)
def on_key_press(self, symbol, modifiers):
""" Called when the player presses a key. See pyglet docs for key
mappings.
Parameters
----------
symbol : int
Number representing the key that was pressed.
modifiers : int
Number representing any modifying keys that were pressed.
"""
if symbol == key.W:
self.strafe[0] -= 1
elif symbol == key.S:
self.strafe[0] += 1
elif symbol == key.A:
self.strafe[1] -= 1
elif symbol == key.D:
self.strafe[1] += 1
elif symbol == key.SPACE:
if self.dy == 0:
self.dy = JUMP_SPEED
elif symbol == key.ESCAPE:
self.set_exclusive_mouse(False)
elif symbol == key.TAB:
self.flying = not self.flying
elif symbol in self.num_keys:
index = (symbol - self.num_keys[0]) % len(self.inventory)
self.block = self.inventory[index]
def on_key_release(self, symbol, modifiers):
""" Called when the player releases a key. See pyglet docs for key
mappings.
Parameters
----------
symbol : int
Number representing the key that was pressed.
modifiers : int
Number representing any modifying keys that were pressed.
"""
if symbol == key.W:
self.strafe[0] += 1
elif symbol == key.S:
self.strafe[0] -= 1
elif symbol == key.A:
self.strafe[1] += 1
elif symbol == key.D:
self.strafe[1] -= 1
def on_resize(self, width, height):
""" Called when the window is resized to a new `width` and `height`.
"""
# label
self.label.y = height - 10
# reticle
if self.reticle:
self.reticle.delete()
x, y = self.width // 2, self.height // 2
n = 10
self.reticle = pyglet.graphics.vertex_list(4,
('v2i', (x - n, y, x + n, y, x, y - n, x, y + n))
)
def set_2d(self):
""" Configure OpenGL to draw in 2d.
"""
width, height = self.get_size()
glDisable(GL_DEPTH_TEST)
viewport = self.get_viewport_size()
glViewport(0, 0, max(1, viewport[0]), max(1, viewport[1]))
glMatrixMode(GL_PROJECTION)
glLoadIdentity()
glOrtho(0, max(1, width), 0, max(1, height), -1, 1)
glMatrixMode(GL_MODELVIEW)
glLoadIdentity()
def set_3d(self):
""" Configure OpenGL to draw in 3d.
"""
width, height = self.get_size()
glEnable(GL_DEPTH_TEST)
viewport = self.get_viewport_size()
glViewport(0, 0, max(1, viewport[0]), max(1, viewport[1]))
glMatrixMode(GL_PROJECTION)
glLoadIdentity()
gluPerspective(65.0, width / float(height), 0.1, 60.0)
glMatrixMode(GL_MODELVIEW)
glLoadIdentity()
x, y = self.rotation
glRotatef(x, 0, 1, 0)
glRotatef(-y, math.cos(math.radians(x)), 0, math.sin(math.radians(x)))
x, y, z = self.position
glTranslatef(-x, -y, -z)
def on_draw(self):
""" Called by pyglet to draw the canvas.
"""
self.clear()
self.set_3d()
glColor3d(1, 1, 1)
self.model.batch.draw()
self.draw_focused_block()
self.set_2d()
self.draw_label()
self.draw_reticle()
def draw_focused_block(self):
""" Draw black edges around the block that is currently under the
crosshairs.
"""
vector = self.get_sight_vector()
block = self.model.hit_test(self.position, vector)[0]
if block:
x, y, z = block
vertex_data = cube_vertices(x, y, z, 0.51)
glColor3d(0, 0, 0)
glPolygonMode(GL_FRONT_AND_BACK, GL_LINE)
pyglet.graphics.draw(24, GL_QUADS, ('v3f/static', vertex_data))
glPolygonMode(GL_FRONT_AND_BACK, GL_FILL)
def draw_label(self):
""" Draw the label in the top left of the screen.
"""
x, y, z = self.position
self.label.text = '%02d (%.2f, %.2f, %.2f) %d / %d' % (
pyglet.clock.get_fps(), x, y, z,
len(self.model._shown), len(self.model.world))
self.label.draw()
def draw_reticle(self):
""" Draw the crosshairs in the center of the screen.
"""
glColor3d(0, 0, 0)
self.reticle.draw(GL_LINES)
def setup_fog():
""" Configure the OpenGL fog properties.
"""
# Enable fog. Fog "blends a fog color with each rasterized pixel fragment's
# post-texturing color."
glEnable(GL_FOG)
# Set the fog color.
glFogfv(GL_FOG_COLOR, (GLfloat * 4)(0.5, 0.69, 1.0, 1))
# Say we have no preference between rendering speed and quality.
glHint(GL_FOG_HINT, GL_DONT_CARE)
# Specify the equation used to compute the blending factor.
glFogi(GL_FOG_MODE, GL_LINEAR)
# How close and far away fog starts and ends. The closer the start and end,
# the denser the fog in the fog range.
glFogf(GL_FOG_START, 20.0)
glFogf(GL_FOG_END, 60.0)
def setup():
""" Basic OpenGL configuration.
"""
# Set the color of "clear", i.e. the sky, in rgba.
glClearColor(0.5, 0.69, 1.0, 1)
# Enable culling (not rendering) of back-facing facets -- facets that aren't
# visible to you.
glEnable(GL_CULL_FACE)
# Set the texture minification/magnification function to GL_NEAREST (nearest
# in Manhattan distance) to the specified texture coordinates. GL_NEAREST
# "is generally faster than GL_LINEAR, but it can produce textured images
# with sharper edges because the transition between texture elements is not
# as smooth."
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_NEAREST)
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MAG_FILTER, GL_NEAREST)
setup_fog()
def main():
window = Window(width=800, height=600, caption='Pyglet', resizable=True)
# Hide the mouse cursor and prevent the mouse from leaving the window.
window.set_exclusive_mouse(True)
setup()
pyglet.app.run()
if __name__ == '__main__':
main()
|
#!/usr/bin/env python3
import sys
def get_next_one(wint):
"""
returns next generation as list of rows where each row contain string of 0|1 characters
:param wint array of integers 0|1 older generation
:retval next_one list of rows next generation
"""
next_one = []
rows = len(wint)
cols = len(wint[0])
def fixed_i(i):
# return divmod(i, rows)[1] , but lets optimize this a little
if 0 <= i < rows:
return i
return i % rows
def fixed_j(j):
if 0 <= j < cols:
return j
return divmod(j, cols)[1]
def neighbors_include_me(center_i, center_j):
neighbors_and_me = 0
for i in range(center_i - 1, center_i + 2):
for j in range(center_j - 1, center_j + 2):
neighbors_and_me += wint[fixed_i(i)][fixed_j(j)]
return neighbors_and_me
for i, row in enumerate(wint):
next_row = ''
for j, elem in enumerate(row):
neighbors = neighbors_include_me(i, j) - elem
if elem and 2 <= neighbors <= 3 or not elem and neighbors == 3:
next_row += '1'
else:
next_row += '0'
next_one.append(next_row)
return next_one
def word_to_int(world):
"""
converts list of strings (where each char is an element) into array of integers 0|1
array means list of rows where each row contains list of elements 0|1
"""
wint = []
for row in world:
wint.append(tuple(map(int, tuple(row))))
return wint
def validated_world(world):
if type(world) not in (tuple, list) or len(world) == 0:
raise TypeError('need a non empty list')
cols = None
for row in world:
if type(row) != str:
raise TypeError('list elements must be strings')
if cols is None:
cols = len(row)
if not cols:
raise TypeError('strings inside the list must be non empty')
elif len(row) != cols:
raise TypeError('strings inside the list must have the same length')
if row.replace('0', '').replace('1', ''):
raise TypeError('allowed characters are: 01')
return world
def from_stdin():
console = sys.stdin.isatty()
world = []
row_no = 1
while True:
try:
row = input(('row %s (empty to start) : ' % row_no) if console else '')
except EOFError:
break
if not row:
break
world.append(row)
row_no += 1
main(world)
def main(world):
for row in get_next_one(word_to_int(validated_world(world))):
print(row)
if __name__ == '__main__':
from_stdin()
|
from django.core.management import setup_environ
from geonition import settings
setup_environ(settings)
jsonfile = open("../data/geojson_rest.json")
print jsonfile
import json
json_list = json.loads(jsonfile.read())
jsonfile.close()
data_dict = {}
for obj in json_list:
if obj['model'] == 'geojson_rest.feature':
data_dict[obj['pk']] = {}
data_dict[obj['pk']]['feature'] = obj['fields']
for obj in json_list:
if obj['model'] == 'geojson_rest.property':
data_dict[obj['pk']]['property'] = obj['fields']
from geojson_rest.models import Feature
from geojson_rest.models import Property
from django.db import models
from django.conf import settings
from django.contrib.gis.db import models as gismodels
from django.contrib.gis.gdal import OGRGeometry
from django.contrib.auth.models import User
from geonition_utils.models import JSON
from geonition_utils.models import TimeD
for key, value in data_dict.items():
print key
print value
#create feature
feature = value['feature']
geometry = OGRGeometry(feature['geometry']).geos
private = feature.get('private', True)
user = User.objects.get(id = feature['user'])
group = 'PP-perhela-perhelan-kortteli' #they are all put into the same group Jarvenpaa
time = TimeD(create_time = feature['create_time'],
expire_time = feature['expire_time'])
time.save()
new_feature = Feature(geometry = geometry,
user = user,
group = group,
private = private,
time = time)
new_feature.save()
#create property
timed = TimeD()
timed.save()
time = timed
prop = Property()
#self.properties.add(prop)
|
import sys
from cx_Freeze import setup, Executable
# Dependencies are automatically detected, but it might need fine tuning.
build_exe_options = {"optimize": 2,
"packages": ["dbm"],
"include_files": ["image(s)", "font(s)", "db", "README.txt"]
}
setup( name = "Tetris Code",
version = "3.6",
options = {"build_exe": build_exe_options},
description = "My take on Tetris.",
executables = [Executable("tetris_code.py", base="Win32GUI", icon="image(s)\\favicon.ico")])
|
# Copyright (C) 2014-2017 New York University
# This file is part of ReproZip which is released under the Revised BSD License
# See file LICENSE for full license details.
"""Utility functions dealing with package managers.
"""
from __future__ import division, print_function, unicode_literals
import logging
import platform
import subprocess
from reprounzip.unpackers.common.misc import UsageError
from reprounzip.utils import itervalues
logger = logging.getLogger('reprounzip')
THIS_DISTRIBUTION = platform.linux_distribution()[0].lower()
PKG_NOT_INSTALLED = "(not installed)"
class CantFindInstaller(UsageError):
def __init__(self, msg="Can't select a package installer"):
UsageError.__init__(self, msg)
class AptInstaller(object):
"""Installer for deb-based systems (Debian, Ubuntu).
"""
def __init__(self, binary):
self.bin = binary
def install(self, packages, assume_yes=False):
# Installs
options = []
if assume_yes:
options.append('-y')
required_pkgs = set(pkg.name for pkg in packages)
r = subprocess.call([self.bin, 'install'] +
options + list(required_pkgs))
# Checks on packages
pkgs_status = self.get_packages_info(packages)
for pkg, status in itervalues(pkgs_status):
if status is not None:
required_pkgs.discard(pkg.name)
if required_pkgs:
logger.error("Error: some packages could not be installed:%s",
''.join("\n %s" % pkg for pkg in required_pkgs))
return r, pkgs_status
@staticmethod
def get_packages_info(packages):
if not packages:
return {}
p = subprocess.Popen(['dpkg-query',
'--showformat=${Package;-50}\t${Version}\n',
'-W'] +
[pkg.name for pkg in packages],
stdout=subprocess.PIPE)
# name -> (pkg, installed_version)
pkgs_dict = dict((pkg.name, (pkg, PKG_NOT_INSTALLED))
for pkg in packages)
try:
for l in p.stdout:
fields = l.split()
if len(fields) == 2:
name = fields[0].decode('ascii')
status = fields[1].decode('ascii')
pkg, _ = pkgs_dict[name]
pkgs_dict[name] = pkg, status
finally:
p.wait()
return pkgs_dict
def update_script(self):
return '%s update' % self.bin
def install_script(self, packages):
return '%s install -y %s' % (self.bin,
' '.join(pkg.name for pkg in packages))
class YumInstaller(object):
"""Installer for systems using RPM and Yum (Fedora, CentOS, Red-Hat).
"""
@classmethod
def install(cls, packages, assume_yes=False):
options = []
if assume_yes:
options.append('-y')
required_pkgs = set(pkg.name for pkg in packages)
r = subprocess.call(['yum', 'install'] + options + list(required_pkgs))
# Checks on packages
pkgs_status = cls.get_packages_info(packages)
for pkg, status in itervalues(pkgs_status):
if status is not None:
required_pkgs.discard(pkg.name)
if required_pkgs:
logger.error("Error: some packages could not be installed:%s",
''.join("\n %s" % pkg for pkg in required_pkgs))
return r, pkgs_status
@staticmethod
def get_packages_info(packages):
if not packages:
return {}
p = subprocess.Popen(['rpm', '-q'] +
[pkg.name for pkg in packages] +
['--qf', '+%{NAME} %{VERSION}-%{RELEASE}\\n'],
stdout=subprocess.PIPE)
# name -> {pkg, installed_version}
pkgs_dict = dict((pkg.name, (pkg, PKG_NOT_INSTALLED))
for pkg in packages)
try:
for l in p.stdout:
if l[0] == b'+':
fields = l[1:].split()
if len(fields) == 2:
name = fields[0].decode('ascii')
status = fields[1].decode('ascii')
pkg, _ = pkgs_dict[name]
pkgs_dict[name] = pkg, status
finally:
p.wait()
return pkgs_dict
@staticmethod
def update_script():
return ''
@staticmethod
def install_script(packages):
return 'yum install -y %s' % ' '.join(pkg.name for pkg in packages)
def select_installer(pack, runs, target_distribution=THIS_DISTRIBUTION,
check_distrib_compat=True):
"""Selects the right package installer for a Linux distribution.
"""
orig_distribution = runs[0]['distribution'][0].lower()
# Checks that the distributions match
if not check_distrib_compat:
pass
elif (set([orig_distribution, target_distribution]) ==
set(['ubuntu', 'debian'])):
# Packages are more or less the same on Debian and Ubuntu
logger.warning("Installing on %s but pack was generated on %s",
target_distribution.capitalize(),
orig_distribution.capitalize())
elif target_distribution is None:
raise CantFindInstaller("Target distribution is unknown; try using "
"--distribution")
elif orig_distribution != target_distribution:
raise CantFindInstaller(
"Installing on %s but pack was generated on %s" % (
target_distribution.capitalize(),
orig_distribution.capitalize()))
# Selects installation method
if target_distribution == 'ubuntu':
installer = AptInstaller('apt-get')
elif target_distribution == 'debian':
# aptitude is not installed by default, so use apt-get here too
installer = AptInstaller('apt-get')
elif (target_distribution in ('centos', 'centos linux',
'fedora', 'scientific linux') or
target_distribution.startswith('red hat')):
installer = YumInstaller()
else:
raise CantFindInstaller("This distribution, \"%s\", is not supported" %
target_distribution.capitalize())
return installer
|
#!/usr/bin/env python
#
# Electrum - lightweight Bitcoin client
# Copyright (C) 2011 Thomas Voegtlin
#
# Permission is hereby granted, free of charge, to any person
# obtaining a copy of this software and associated documentation files
# (the "Software"), to deal in the Software without restriction,
# including without limitation the rights to use, copy, modify, merge,
# publish, distribute, sublicense, and/or sell copies of the Software,
# and to permit persons to whom the Software is furnished to do so,
# subject to the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
# BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
# ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
# CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
# Note: The deserialization code originally comes from ABE.
import bitcoin
from bitcoin import *
from util import print_error, profiler
import time
import sys
import struct
#
# Workalike python implementation of Bitcoin's CDataStream class.
#
import struct
import StringIO
import random
NO_SIGNATURE = 'ff'
class SerializationError(Exception):
""" Thrown when there's a problem deserializing or serializing """
class BCDataStream(object):
def __init__(self):
self.input = None
self.read_cursor = 0
def clear(self):
self.input = None
self.read_cursor = 0
def write(self, bytes): # Initialize with string of bytes
if self.input is None:
self.input = bytes
else:
self.input += bytes
def read_string(self):
# Strings are encoded depending on length:
# 0 to 252 : 1-byte-length followed by bytes (if any)
# 253 to 65,535 : byte'253' 2-byte-length followed by bytes
# 65,536 to 4,294,967,295 : byte '254' 4-byte-length followed by bytes
# ... and the Bitcoin client is coded to understand:
# greater than 4,294,967,295 : byte '255' 8-byte-length followed by bytes of string
# ... but I don't think it actually handles any strings that big.
if self.input is None:
raise SerializationError("call write(bytes) before trying to deserialize")
try:
length = self.read_compact_size()
except IndexError:
raise SerializationError("attempt to read past end of buffer")
return self.read_bytes(length)
def write_string(self, string):
# Length-encoded as with read-string
self.write_compact_size(len(string))
self.write(string)
def read_bytes(self, length):
try:
result = self.input[self.read_cursor:self.read_cursor+length]
self.read_cursor += length
return result
except IndexError:
raise SerializationError("attempt to read past end of buffer")
return ''
def read_boolean(self): return self.read_bytes(1)[0] != chr(0)
def read_int16(self): return self._read_num('<h')
def read_uint16(self): return self._read_num('<H')
def read_int32(self): return self._read_num('<i')
def read_uint32(self): return self._read_num('<I')
def read_int64(self): return self._read_num('<q')
def read_uint64(self): return self._read_num('<Q')
def write_boolean(self, val): return self.write(chr(1) if val else chr(0))
def write_int16(self, val): return self._write_num('<h', val)
def write_uint16(self, val): return self._write_num('<H', val)
def write_int32(self, val): return self._write_num('<i', val)
def write_uint32(self, val): return self._write_num('<I', val)
def write_int64(self, val): return self._write_num('<q', val)
def write_uint64(self, val): return self._write_num('<Q', val)
def read_compact_size(self):
size = ord(self.input[self.read_cursor])
self.read_cursor += 1
if size == 253:
size = self._read_num('<H')
elif size == 254:
size = self._read_num('<I')
elif size == 255:
size = self._read_num('<Q')
return size
def write_compact_size(self, size):
if size < 0:
raise SerializationError("attempt to write size < 0")
elif size < 253:
self.write(chr(size))
elif size < 2**16:
self.write('\xfd')
self._write_num('<H', size)
elif size < 2**32:
self.write('\xfe')
self._write_num('<I', size)
elif size < 2**64:
self.write('\xff')
self._write_num('<Q', size)
def _read_num(self, format):
(i,) = struct.unpack_from(format, self.input, self.read_cursor)
self.read_cursor += struct.calcsize(format)
return i
def _write_num(self, format, num):
s = struct.pack(format, num)
self.write(s)
#
# enum-like type
# From the Python Cookbook, downloaded from http://code.activestate.com/recipes/67107/
#
import types, string, exceptions
class EnumException(exceptions.Exception):
pass
class Enumeration:
def __init__(self, name, enumList):
self.__doc__ = name
lookup = { }
reverseLookup = { }
i = 0
uniqueNames = [ ]
uniqueValues = [ ]
for x in enumList:
if type(x) == types.TupleType:
x, i = x
if type(x) != types.StringType:
raise EnumException, "enum name is not a string: " + x
if type(i) != types.IntType:
raise EnumException, "enum value is not an integer: " + i
if x in uniqueNames:
raise EnumException, "enum name is not unique: " + x
if i in uniqueValues:
raise EnumException, "enum value is not unique for " + x
uniqueNames.append(x)
uniqueValues.append(i)
lookup[x] = i
reverseLookup[i] = x
i = i + 1
self.lookup = lookup
self.reverseLookup = reverseLookup
def __getattr__(self, attr):
if not self.lookup.has_key(attr):
raise AttributeError
return self.lookup[attr]
def whatis(self, value):
return self.reverseLookup[value]
# This function comes from bitcointools, bct-LICENSE.txt.
def long_hex(bytes):
return bytes.encode('hex_codec')
# This function comes from bitcointools, bct-LICENSE.txt.
def short_hex(bytes):
t = bytes.encode('hex_codec')
if len(t) < 11:
return t
return t[0:4]+"..."+t[-4:]
opcodes = Enumeration("Opcodes", [
("OP_0", 0), ("OP_PUSHDATA1",76), "OP_PUSHDATA2", "OP_PUSHDATA4", "OP_1NEGATE", "OP_RESERVED",
"OP_1", "OP_2", "OP_3", "OP_4", "OP_5", "OP_6", "OP_7",
"OP_8", "OP_9", "OP_10", "OP_11", "OP_12", "OP_13", "OP_14", "OP_15", "OP_16",
"OP_NOP", "OP_VER", "OP_IF", "OP_NOTIF", "OP_VERIF", "OP_VERNOTIF", "OP_ELSE", "OP_ENDIF", "OP_VERIFY",
"OP_RETURN", "OP_TOALTSTACK", "OP_FROMALTSTACK", "OP_2DROP", "OP_2DUP", "OP_3DUP", "OP_2OVER", "OP_2ROT", "OP_2SWAP",
"OP_IFDUP", "OP_DEPTH", "OP_DROP", "OP_DUP", "OP_NIP", "OP_OVER", "OP_PICK", "OP_ROLL", "OP_ROT",
"OP_SWAP", "OP_TUCK", "OP_CAT", "OP_SUBSTR", "OP_LEFT", "OP_RIGHT", "OP_SIZE", "OP_INVERT", "OP_AND",
"OP_OR", "OP_XOR", "OP_EQUAL", "OP_EQUALVERIFY", "OP_RESERVED1", "OP_RESERVED2", "OP_1ADD", "OP_1SUB", "OP_2MUL",
"OP_2DIV", "OP_NEGATE", "OP_ABS", "OP_NOT", "OP_0NOTEQUAL", "OP_ADD", "OP_SUB", "OP_MUL", "OP_DIV",
"OP_MOD", "OP_LSHIFT", "OP_RSHIFT", "OP_BOOLAND", "OP_BOOLOR",
"OP_NUMEQUAL", "OP_NUMEQUALVERIFY", "OP_NUMNOTEQUAL", "OP_LESSTHAN",
"OP_GREATERTHAN", "OP_LESSTHANOREQUAL", "OP_GREATERTHANOREQUAL", "OP_MIN", "OP_MAX",
"OP_WITHIN", "OP_RIPEMD160", "OP_SHA1", "OP_SHA256", "OP_HASH160",
"OP_HASH256", "OP_CODESEPARATOR", "OP_CHECKSIG", "OP_CHECKSIGVERIFY", "OP_CHECKMULTISIG",
"OP_CHECKMULTISIGVERIFY",
("OP_SINGLEBYTE_END", 0xF0),
("OP_DOUBLEBYTE_BEGIN", 0xF000),
"OP_PUBKEY", "OP_PUBKEYHASH",
("OP_INVALIDOPCODE", 0xFFFF),
])
def script_GetOp(bytes):
i = 0
while i < len(bytes):
vch = None
opcode = ord(bytes[i])
i += 1
if opcode >= opcodes.OP_SINGLEBYTE_END:
opcode <<= 8
opcode |= ord(bytes[i])
i += 1
if opcode <= opcodes.OP_PUSHDATA4:
nSize = opcode
if opcode == opcodes.OP_PUSHDATA1:
nSize = ord(bytes[i])
i += 1
elif opcode == opcodes.OP_PUSHDATA2:
(nSize,) = struct.unpack_from('<H', bytes, i)
i += 2
elif opcode == opcodes.OP_PUSHDATA4:
(nSize,) = struct.unpack_from('<I', bytes, i)
i += 4
vch = bytes[i:i+nSize]
i += nSize
yield (opcode, vch, i)
def script_GetOpName(opcode):
return (opcodes.whatis(opcode)).replace("OP_", "")
def decode_script(bytes):
result = ''
for (opcode, vch, i) in script_GetOp(bytes):
if len(result) > 0: result += " "
if opcode <= opcodes.OP_PUSHDATA4:
result += "%d:"%(opcode,)
result += short_hex(vch)
else:
result += script_GetOpName(opcode)
return result
def match_decoded(decoded, to_match):
if len(decoded) != len(to_match):
return False;
for i in range(len(decoded)):
if to_match[i] == opcodes.OP_PUSHDATA4 and decoded[i][0] <= opcodes.OP_PUSHDATA4 and decoded[i][0]>0:
continue # Opcodes below OP_PUSHDATA4 all just push data onto stack, and are equivalent.
if to_match[i] != decoded[i][0]:
return False
return True
def parse_sig(x_sig):
s = []
for sig in x_sig:
if sig[-2:] == '01':
s.append(sig[:-2])
else:
assert sig == NO_SIGNATURE
s.append(None)
return s
def is_extended_pubkey(x_pubkey):
return x_pubkey[0:2] in ['fe', 'ff']
def x_to_xpub(x_pubkey):
if x_pubkey[0:2] == 'ff':
from account import BIP32_Account
xpub, s = BIP32_Account.parse_xpubkey(x_pubkey)
return xpub
def parse_xpub(x_pubkey):
if x_pubkey[0:2] in ['02','03','04']:
pubkey = x_pubkey
elif x_pubkey[0:2] == 'ff':
from account import BIP32_Account
xpub, s = BIP32_Account.parse_xpubkey(x_pubkey)
pubkey = BIP32_Account.derive_pubkey_from_xpub(xpub, s[0], s[1])
elif x_pubkey[0:2] == 'fe':
from account import OldAccount
mpk, s = OldAccount.parse_xpubkey(x_pubkey)
pubkey = OldAccount.get_pubkey_from_mpk(mpk.decode('hex'), s[0], s[1])
elif x_pubkey[0:2] == 'fd':
addrtype = ord(x_pubkey[2:4].decode('hex'))
hash160 = x_pubkey[4:].decode('hex')
pubkey = None
address = hash_160_to_bc_address(hash160, addrtype)
else:
raise BaseException("Cannnot parse pubkey")
if pubkey:
address = public_key_to_bc_address(pubkey.decode('hex'))
return pubkey, address
def parse_scriptSig(d, bytes):
try:
decoded = [ x for x in script_GetOp(bytes) ]
except Exception:
# coinbase transactions raise an exception
print_error("cannot find address in input script", bytes.encode('hex'))
return
# payto_pubkey
match = [ opcodes.OP_PUSHDATA4 ]
if match_decoded(decoded, match):
sig = decoded[0][1].encode('hex')
d['address'] = "(pubkey)"
d['signatures'] = [sig]
d['num_sig'] = 1
d['x_pubkeys'] = ["(pubkey)"]
d['pubkeys'] = ["(pubkey)"]
return
# non-generated TxIn transactions push a signature
# (seventy-something bytes) and then their public key
# (65 bytes) onto the stack:
match = [ opcodes.OP_PUSHDATA4, opcodes.OP_PUSHDATA4 ]
if match_decoded(decoded, match):
sig = decoded[0][1].encode('hex')
x_pubkey = decoded[1][1].encode('hex')
try:
signatures = parse_sig([sig])
pubkey, address = parse_xpub(x_pubkey)
except:
import traceback
traceback.print_exc(file=sys.stdout)
print_error("cannot find address in input script", bytes.encode('hex'))
return
d['signatures'] = signatures
d['x_pubkeys'] = [x_pubkey]
d['num_sig'] = 1
d['pubkeys'] = [pubkey]
d['address'] = address
return
# p2sh transaction, m of n
match = [ opcodes.OP_0 ] + [ opcodes.OP_PUSHDATA4 ] * (len(decoded) - 1)
if not match_decoded(decoded, match):
print_error("cannot find address in input script", bytes.encode('hex'))
return
x_sig = [x[1].encode('hex') for x in decoded[1:-1]]
dec2 = [ x for x in script_GetOp(decoded[-1][1]) ]
m = dec2[0][0] - opcodes.OP_1 + 1
n = dec2[-2][0] - opcodes.OP_1 + 1
op_m = opcodes.OP_1 + m - 1
op_n = opcodes.OP_1 + n - 1
match_multisig = [ op_m ] + [opcodes.OP_PUSHDATA4]*n + [ op_n, opcodes.OP_CHECKMULTISIG ]
if not match_decoded(dec2, match_multisig):
print_error("cannot find address in input script", bytes.encode('hex'))
return
x_pubkeys = map(lambda x: x[1].encode('hex'), dec2[1:-2])
pubkeys = [parse_xpub(x)[0] for x in x_pubkeys] # xpub, addr = parse_xpub()
redeemScript = Transaction.multisig_script(pubkeys, m)
# write result in d
d['num_sig'] = m
d['signatures'] = parse_sig(x_sig)
d['x_pubkeys'] = x_pubkeys
d['pubkeys'] = pubkeys
d['redeemScript'] = redeemScript
d['address'] = hash_160_to_bc_address(hash_160(redeemScript.decode('hex')), 5)
def get_address_from_output_script(bytes):
decoded = [ x for x in script_GetOp(bytes) ]
# The Genesis Block, self-payments, and pay-by-IP-address payments look like:
# 65 BYTES:... CHECKSIG
match = [ opcodes.OP_PUSHDATA4, opcodes.OP_CHECKSIG ]
if match_decoded(decoded, match):
return TYPE_PUBKEY, decoded[0][1].encode('hex')
# Pay-by-Bitcoin-address TxOuts look like:
# DUP HASH160 20 BYTES:... EQUALVERIFY CHECKSIG
match = [ opcodes.OP_DUP, opcodes.OP_HASH160, opcodes.OP_PUSHDATA4, opcodes.OP_EQUALVERIFY, opcodes.OP_CHECKSIG ]
if match_decoded(decoded, match):
return TYPE_ADDRESS, hash_160_to_bc_address(decoded[2][1])
# p2sh
match = [ opcodes.OP_HASH160, opcodes.OP_PUSHDATA4, opcodes.OP_EQUAL ]
if match_decoded(decoded, match):
return TYPE_ADDRESS, hash_160_to_bc_address(decoded[1][1],5)
return TYPE_SCRIPT, bytes
def parse_input(vds):
d = {}
prevout_hash = hash_encode(vds.read_bytes(32))
prevout_n = vds.read_uint32()
scriptSig = vds.read_bytes(vds.read_compact_size())
d['scriptSig'] = scriptSig.encode('hex')
sequence = vds.read_uint32()
if prevout_hash == '00'*32:
d['is_coinbase'] = True
else:
d['is_coinbase'] = False
d['prevout_hash'] = prevout_hash
d['prevout_n'] = prevout_n
d['sequence'] = sequence
d['pubkeys'] = []
d['signatures'] = {}
d['address'] = None
if scriptSig:
parse_scriptSig(d, scriptSig)
return d
def parse_output(vds, i):
d = {}
d['value'] = vds.read_int64()
scriptPubKey = vds.read_bytes(vds.read_compact_size())
d['type'], d['address'] = get_address_from_output_script(scriptPubKey)
d['scriptPubKey'] = scriptPubKey.encode('hex')
d['prevout_n'] = i
return d
def deserialize(raw):
vds = BCDataStream()
vds.write(raw.decode('hex'))
d = {}
start = vds.read_cursor
d['version'] = vds.read_int32()
n_vin = vds.read_compact_size()
d['inputs'] = list(parse_input(vds) for i in xrange(n_vin))
n_vout = vds.read_compact_size()
d['outputs'] = list(parse_output(vds,i) for i in xrange(n_vout))
d['lockTime'] = vds.read_uint32()
d['refheight'] = vds.read_int32()
return d
def push_script(x):
return op_push(len(x)/2) + x
class Transaction:
def __str__(self):
if self.raw is None:
self.raw = self.serialize()
return self.raw
def __init__(self, raw):
if raw is None:
self.raw = None
elif type(raw) in [str, unicode]:
self.raw = raw.strip() if raw else None
elif type(raw) is dict:
self.raw = raw['hex']
else:
raise BaseException("cannot initialize transaction", raw)
self._inputs = None
self._outputs = None
def update(self, raw):
self.raw = raw
self._inputs = None
self.deserialize()
def inputs(self):
if self._inputs is None:
self.deserialize()
return self._inputs
def outputs(self):
if self._outputs is None:
self.deserialize()
return self._outputs
def update_signatures(self, raw):
"""Add new signatures to a transaction"""
d = deserialize(raw)
for i, txin in enumerate(self.inputs()):
sigs1 = txin.get('signatures')
sigs2 = d['inputs'][i].get('signatures')
for sig in sigs2:
if sig in sigs1:
continue
for_sig = Hash(self.tx_for_sig(i).decode('hex'))
# der to string
order = ecdsa.ecdsa.generator_secp256k1.order()
r, s = ecdsa.util.sigdecode_der(sig.decode('hex'), order)
sig_string = ecdsa.util.sigencode_string(r, s, order)
pubkeys = txin.get('pubkeys')
compressed = True
for recid in range(4):
public_key = MyVerifyingKey.from_signature(sig_string, recid, for_sig, curve = SECP256k1)
pubkey = point_to_ser(public_key.pubkey.point, compressed).encode('hex')
if pubkey in pubkeys:
public_key.verify_digest(sig_string, for_sig, sigdecode = ecdsa.util.sigdecode_string)
j = pubkeys.index(pubkey)
print_error("adding sig", i, j, pubkey, sig)
self._inputs[i]['signatures'][j] = sig
self._inputs[i]['x_pubkeys'][j] = pubkey
break
# redo raw
self.raw = self.serialize()
def deserialize(self):
if self.raw is None:
self.raw = self.serialize()
if self._inputs is not None:
return
d = deserialize(self.raw)
self._inputs = d['inputs']
self._outputs = [(x['type'], x['address'], x['value']) for x in d['outputs']]
self.locktime = d['lockTime']
self.refheight = d['refheight']
return d
@classmethod
def from_io(klass, inputs, outputs, locktime=0, refheight=0):
self = klass(None)
self._inputs = inputs
self._outputs = outputs
self.locktime = locktime
self.refheight = refheight
return self
@classmethod
def sweep(klass, privkeys, network, to_address, fee):
inputs = []
keypairs = {}
for privkey in privkeys:
pubkey = public_key_from_private_key(privkey)
address = address_from_private_key(privkey)
u = network.synchronous_get(('blockchain.address.listunspent',[address]))
pay_script = klass.pay_script(TYPE_ADDRESS, address)
for item in u:
item['scriptPubKey'] = pay_script
item['redeemPubkey'] = pubkey
item['address'] = address
item['prevout_hash'] = item['tx_hash']
item['prevout_n'] = item['tx_pos']
item['pubkeys'] = [pubkey]
item['x_pubkeys'] = [pubkey]
item['signatures'] = [None]
item['num_sig'] = 1
inputs += u
keypairs[pubkey] = privkey
if not inputs:
return
total = sum(i.get('value') for i in inputs) - fee
outputs = [(TYPE_ADDRESS, to_address, total)]
self = klass.from_io(inputs, outputs)
self.sign(keypairs)
return self
@classmethod
def multisig_script(klass, public_keys, m):
n = len(public_keys)
assert n <= 15
assert m <= n
op_m = format(opcodes.OP_1 + m - 1, 'x')
op_n = format(opcodes.OP_1 + n - 1, 'x')
keylist = [op_push(len(k)/2) + k for k in public_keys]
return op_m + ''.join(keylist) + op_n + 'ae'
@classmethod
def pay_script(self, output_type, addr):
if output_type == TYPE_SCRIPT:
return addr.encode('hex')
elif output_type == TYPE_ADDRESS:
addrtype, hash_160 = bc_address_to_hash_160(addr)
if addrtype == 0:
script = '76a9' # op_dup, op_hash_160
script += push_script(hash_160.encode('hex'))
script += '88ac' # op_equalverify, op_checksig
elif addrtype == 5:
script = 'a9' # op_hash_160
script += push_script(hash_160.encode('hex'))
script += '87' # op_equal
else:
raise
else:
raise
return script
@classmethod
def input_script(self, txin, i, for_sig):
# for_sig:
# -1 : do not sign, estimate length
# i>=0 : serialized tx for signing input i
# None : add all known signatures
p2sh = txin.get('redeemScript') is not None
num_sig = txin['num_sig'] if p2sh else 1
address = txin['address']
x_signatures = txin['signatures']
signatures = filter(None, x_signatures)
is_complete = len(signatures) == num_sig
if for_sig in [-1, None]:
# if we have enough signatures, we use the actual pubkeys
# use extended pubkeys (with bip32 derivation)
if for_sig == -1:
# we assume that signature will be 0x48 bytes long
pubkeys = txin['pubkeys']
sig_list = [ "00" * 0x48 ] * num_sig
elif is_complete:
pubkeys = txin['pubkeys']
sig_list = ((sig + '01') for sig in signatures)
else:
pubkeys = txin['x_pubkeys']
sig_list = ((sig + '01') if sig else NO_SIGNATURE for sig in x_signatures)
script = ''.join(push_script(x) for x in sig_list)
if not p2sh:
x_pubkey = pubkeys[0]
if x_pubkey is None:
addrtype, h160 = bc_address_to_hash_160(txin['address'])
x_pubkey = 'fd' + (chr(addrtype) + h160).encode('hex')
script += push_script(x_pubkey)
else:
script = '00' + script # put op_0 in front of script
redeem_script = self.multisig_script(pubkeys, num_sig)
script += push_script(redeem_script)
elif for_sig==i:
script = txin['redeemScript'] if p2sh else self.pay_script(TYPE_ADDRESS, address)
else:
script = ''
return script
@classmethod
def serialize_input(self, txin, i, for_sig):
# Prev hash and index
s = txin['prevout_hash'].decode('hex')[::-1].encode('hex')
s += int_to_hex(txin['prevout_n'], 4)
# Script length, script, sequence
script = self.input_script(txin, i, for_sig)
s += var_int(len(script) / 2)
s += script
s += "ffffffff"
return s
def BIP_LI01_sort(self):
# See https://github.com/kristovatlas/rfc/blob/master/bips/bip-li01.mediawiki
self._inputs.sort(key = lambda i: (i['prevout_hash'], i['prevout_n']))
self._outputs.sort(key = lambda o: (o[2], self.pay_script(o[0], o[1])))
def serialize(self, for_sig=None):
inputs = self.inputs()
outputs = self.outputs()refheight = self.refheight
s = int_to_hex(2,4) # version
s += var_int( len(inputs) ) # number of inputs
for i, txin in enumerate(inputs):
s += self.serialize_input(txin, i, for_sig)
s += var_int( len(outputs) ) # number of outputs
for output in outputs:
output_type, addr, amount = output
s += int_to_hex( amount, 8) # amount
script = self.pay_script(output_type, addr)
s += var_int( len(script)/2 ) # script length
s += script # script
s += int_to_hex(0,4) # lock time
s += int_to_hex(refheight,4) # refheight
if for_sig is not None and for_sig != -1:
s += int_to_hex(1, 4) # hash type
return s
def tx_for_sig(self,i):
return self.serialize(for_sig = i)
def hash(self):
return Hash(self.raw.decode('hex') )[::-1].encode('hex')
def add_inputs(self, inputs):
self._inputs.extend(inputs)
self.raw = None
def add_outputs(self, outputs):
self._outputs.extend(outputs)
self.raw = None
def input_value(self):
return sum(x['value'] for x in self.inputs())
def output_value(self):
return sum( val for tp,addr,val in self.outputs())
def get_fee(self):
return self.input_value() - self.output_value()
def is_final(self):
return not any([x.get('sequence') < 0xffffffff - 1 for x in self.inputs()])
@profiler
def estimated_size(self):
'''Return an estimated tx size in bytes.'''
return len(self.serialize(-1)) / 2 # ASCII hex string
@classmethod
def estimated_input_size(self, txin):
'''Return an estimated of serialized input size in bytes.'''
return len(self.serialize_input(txin, -1, -1)) / 2
def signature_count(self):
r = 0
s = 0
for txin in self.inputs():
if txin.get('is_coinbase'):
continue
signatures = filter(None, txin.get('signatures',[]))
s += len(signatures)
r += txin.get('num_sig',-1)
return s, r
def is_complete(self):
s, r = self.signature_count()
return r == s
def inputs_without_script(self):
out = set()
for i, txin in enumerate(self.inputs()):
if txin.get('scriptSig') == '':
out.add(i)
return out
def inputs_to_sign(self):
out = set()
for txin in self.inputs():
num_sig = txin.get('num_sig')
if num_sig is None:
continue
x_signatures = txin['signatures']
signatures = filter(None, x_signatures)
if len(signatures) == num_sig:
# input is complete
continue
for k, x_pubkey in enumerate(txin['x_pubkeys']):
if x_signatures[k] is not None:
# this pubkey already signed
continue
out.add(x_pubkey)
return out
def sign(self, keypairs):
for i, txin in enumerate(self.inputs()):
num = txin['num_sig']
for x_pubkey in txin['x_pubkeys']:
signatures = filter(None, txin['signatures'])
if len(signatures) == num:
# txin is complete
break
if x_pubkey in keypairs.keys():
print_error("adding signature for", x_pubkey)
# add pubkey to txin
txin = self._inputs[i]
x_pubkeys = txin['x_pubkeys']
ii = x_pubkeys.index(x_pubkey)
sec = keypairs[x_pubkey]
pubkey = public_key_from_private_key(sec)
txin['x_pubkeys'][ii] = pubkey
txin['pubkeys'][ii] = pubkey
self._inputs[i] = txin
# add signature
for_sig = Hash(self.tx_for_sig(i).decode('hex'))
pkey = regenerate_key(sec)
secexp = pkey.secret
private_key = bitcoin.MySigningKey.from_secret_exponent( secexp, curve = SECP256k1 )
public_key = private_key.get_verifying_key()
sig = private_key.sign_digest_deterministic( for_sig, hashfunc=hashlib.sha256, sigencode = ecdsa.util.sigencode_der )
assert public_key.verify_digest( sig, for_sig, sigdecode = ecdsa.util.sigdecode_der)
txin['signatures'][ii] = sig.encode('hex')
self._inputs[i] = txin
print_error("is_complete", self.is_complete())
self.raw = self.serialize()
def get_outputs(self):
"""convert pubkeys to addresses"""
o = []
for type, x, v in self.outputs():
if type == TYPE_ADDRESS:
addr = x
elif type == TYPE_PUBKEY:
addr = public_key_to_bc_address(x.decode('hex'))
else:
addr = 'SCRIPT ' + x.encode('hex')
o.append((addr,v)) # consider using yield (addr, v)
return o
def get_output_addresses(self):
return [addr for addr, val in self.get_outputs()]
def has_address(self, addr):
return (addr in self.get_output_addresses()) or (addr in (tx.get("address") for tx in self.inputs()))
def as_dict(self):
if self.raw is None:
self.raw = self.serialize()
self.deserialize()
out = {
'hex': self.raw,
'complete': self.is_complete()
}
return out
def requires_fee(self, wallet):
# see https://en.bitcoin.it/wiki/Transaction_fees
#
# size must be smaller than 1 kbyte for free tx
size = len(self.serialize(-1))/2
if size >= 10000:
return True
# all outputs must be 0.01 BTC or larger for free tx
for addr, value in self.get_outputs():
if value < 1000000:
return True
# priority must be large enough for free tx
threshold = 57600000
weight = 0
for txin in self.inputs():
age = wallet.get_confirmations(txin["prevout_hash"])[0]
weight += txin["value"] * age
priority = weight / size
print_error(priority, threshold)
return priority < threshold
def tx_from_str(txt):
"json or raw hexadecimal"
import json
txt = txt.strip()
try:
txt.decode('hex')
is_hex = True
except:
is_hex = False
if is_hex:
return txt
tx_dict = json.loads(str(txt))
assert "hex" in tx_dict.keys()
return tx_dict["hex"]
|
# coding: utf-8
"""
Kubernetes
No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501
OpenAPI spec version: v1.14.7
Generated by: https://openapi-generator.tech
"""
import pprint
import re # noqa: F401
import six
class ExtensionsV1beta1PodSecurityPolicySpec(object):
"""NOTE: This class is auto generated by OpenAPI Generator.
Ref: https://openapi-generator.tech
Do not edit the class manually.
"""
"""
Attributes:
openapi_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
openapi_types = {
'allow_privilege_escalation': 'bool',
'allowed_csi_drivers': 'list[ExtensionsV1beta1AllowedCSIDriver]',
'allowed_capabilities': 'list[str]',
'allowed_flex_volumes': 'list[ExtensionsV1beta1AllowedFlexVolume]',
'allowed_host_paths': 'list[ExtensionsV1beta1AllowedHostPath]',
'allowed_proc_mount_types': 'list[str]',
'allowed_unsafe_sysctls': 'list[str]',
'default_add_capabilities': 'list[str]',
'default_allow_privilege_escalation': 'bool',
'forbidden_sysctls': 'list[str]',
'fs_group': 'ExtensionsV1beta1FSGroupStrategyOptions',
'host_ipc': 'bool',
'host_network': 'bool',
'host_pid': 'bool',
'host_ports': 'list[ExtensionsV1beta1HostPortRange]',
'privileged': 'bool',
'read_only_root_filesystem': 'bool',
'required_drop_capabilities': 'list[str]',
'run_as_group': 'ExtensionsV1beta1RunAsGroupStrategyOptions',
'run_as_user': 'ExtensionsV1beta1RunAsUserStrategyOptions',
'se_linux': 'ExtensionsV1beta1SELinuxStrategyOptions',
'supplemental_groups': 'ExtensionsV1beta1SupplementalGroupsStrategyOptions',
'volumes': 'list[str]'
}
attribute_map = {
'allow_privilege_escalation': 'allowPrivilegeEscalation',
'allowed_csi_drivers': 'allowedCSIDrivers',
'allowed_capabilities': 'allowedCapabilities',
'allowed_flex_volumes': 'allowedFlexVolumes',
'allowed_host_paths': 'allowedHostPaths',
'allowed_proc_mount_types': 'allowedProcMountTypes',
'allowed_unsafe_sysctls': 'allowedUnsafeSysctls',
'default_add_capabilities': 'defaultAddCapabilities',
'default_allow_privilege_escalation': 'defaultAllowPrivilegeEscalation',
'forbidden_sysctls': 'forbiddenSysctls',
'fs_group': 'fsGroup',
'host_ipc': 'hostIPC',
'host_network': 'hostNetwork',
'host_pid': 'hostPID',
'host_ports': 'hostPorts',
'privileged': 'privileged',
'read_only_root_filesystem': 'readOnlyRootFilesystem',
'required_drop_capabilities': 'requiredDropCapabilities',
'run_as_group': 'runAsGroup',
'run_as_user': 'runAsUser',
'se_linux': 'seLinux',
'supplemental_groups': 'supplementalGroups',
'volumes': 'volumes'
}
def __init__(self, allow_privilege_escalation=None, allowed_csi_drivers=None, allowed_capabilities=None, allowed_flex_volumes=None, allowed_host_paths=None, allowed_proc_mount_types=None, allowed_unsafe_sysctls=None, default_add_capabilities=None, default_allow_privilege_escalation=None, forbidden_sysctls=None, fs_group=None, host_ipc=None, host_network=None, host_pid=None, host_ports=None, privileged=None, read_only_root_filesystem=None, required_drop_capabilities=None, run_as_group=None, run_as_user=None, se_linux=None, supplemental_groups=None, volumes=None): # noqa: E501
"""ExtensionsV1beta1PodSecurityPolicySpec - a model defined in OpenAPI""" # noqa: E501
self._allow_privilege_escalation = None
self._allowed_csi_drivers = None
self._allowed_capabilities = None
self._allowed_flex_volumes = None
self._allowed_host_paths = None
self._allowed_proc_mount_types = None
self._allowed_unsafe_sysctls = None
self._default_add_capabilities = None
self._default_allow_privilege_escalation = None
self._forbidden_sysctls = None
self._fs_group = None
self._host_ipc = None
self._host_network = None
self._host_pid = None
self._host_ports = None
self._privileged = None
self._read_only_root_filesystem = None
self._required_drop_capabilities = None
self._run_as_group = None
self._run_as_user = None
self._se_linux = None
self._supplemental_groups = None
self._volumes = None
self.discriminator = None
if allow_privilege_escalation is not None:
self.allow_privilege_escalation = allow_privilege_escalation
if allowed_csi_drivers is not None:
self.allowed_csi_drivers = allowed_csi_drivers
if allowed_capabilities is not None:
self.allowed_capabilities = allowed_capabilities
if allowed_flex_volumes is not None:
self.allowed_flex_volumes = allowed_flex_volumes
if allowed_host_paths is not None:
self.allowed_host_paths = allowed_host_paths
if allowed_proc_mount_types is not None:
self.allowed_proc_mount_types = allowed_proc_mount_types
if allowed_unsafe_sysctls is not None:
self.allowed_unsafe_sysctls = allowed_unsafe_sysctls
if default_add_capabilities is not None:
self.default_add_capabilities = default_add_capabilities
if default_allow_privilege_escalation is not None:
self.default_allow_privilege_escalation = default_allow_privilege_escalation
if forbidden_sysctls is not None:
self.forbidden_sysctls = forbidden_sysctls
self.fs_group = fs_group
if host_ipc is not None:
self.host_ipc = host_ipc
if host_network is not None:
self.host_network = host_network
if host_pid is not None:
self.host_pid = host_pid
if host_ports is not None:
self.host_ports = host_ports
if privileged is not None:
self.privileged = privileged
if read_only_root_filesystem is not None:
self.read_only_root_filesystem = read_only_root_filesystem
if required_drop_capabilities is not None:
self.required_drop_capabilities = required_drop_capabilities
if run_as_group is not None:
self.run_as_group = run_as_group
self.run_as_user = run_as_user
self.se_linux = se_linux
self.supplemental_groups = supplemental_groups
if volumes is not None:
self.volumes = volumes
@property
def allow_privilege_escalation(self):
"""Gets the allow_privilege_escalation of this ExtensionsV1beta1PodSecurityPolicySpec. # noqa: E501
allowPrivilegeEscalation determines if a pod can request to allow privilege escalation. If unspecified, defaults to true. # noqa: E501
:return: The allow_privilege_escalation of this ExtensionsV1beta1PodSecurityPolicySpec. # noqa: E501
:rtype: bool
"""
return self._allow_privilege_escalation
@allow_privilege_escalation.setter
def allow_privilege_escalation(self, allow_privilege_escalation):
"""Sets the allow_privilege_escalation of this ExtensionsV1beta1PodSecurityPolicySpec.
allowPrivilegeEscalation determines if a pod can request to allow privilege escalation. If unspecified, defaults to true. # noqa: E501
:param allow_privilege_escalation: The allow_privilege_escalation of this ExtensionsV1beta1PodSecurityPolicySpec. # noqa: E501
:type: bool
"""
self._allow_privilege_escalation = allow_privilege_escalation
@property
def allowed_csi_drivers(self):
"""Gets the allowed_csi_drivers of this ExtensionsV1beta1PodSecurityPolicySpec. # noqa: E501
AllowedCSIDrivers is a whitelist of inline CSI drivers that must be explicitly set to be embedded within a pod spec. An empty value means no CSI drivers can run inline within a pod spec. # noqa: E501
:return: The allowed_csi_drivers of this ExtensionsV1beta1PodSecurityPolicySpec. # noqa: E501
:rtype: list[ExtensionsV1beta1AllowedCSIDriver]
"""
return self._allowed_csi_drivers
@allowed_csi_drivers.setter
def allowed_csi_drivers(self, allowed_csi_drivers):
"""Sets the allowed_csi_drivers of this ExtensionsV1beta1PodSecurityPolicySpec.
AllowedCSIDrivers is a whitelist of inline CSI drivers that must be explicitly set to be embedded within a pod spec. An empty value means no CSI drivers can run inline within a pod spec. # noqa: E501
:param allowed_csi_drivers: The allowed_csi_drivers of this ExtensionsV1beta1PodSecurityPolicySpec. # noqa: E501
:type: list[ExtensionsV1beta1AllowedCSIDriver]
"""
self._allowed_csi_drivers = allowed_csi_drivers
@property
def allowed_capabilities(self):
"""Gets the allowed_capabilities of this ExtensionsV1beta1PodSecurityPolicySpec. # noqa: E501
allowedCapabilities is a list of capabilities that can be requested to add to the container. Capabilities in this field may be added at the pod author's discretion. You must not list a capability in both allowedCapabilities and requiredDropCapabilities. # noqa: E501
:return: The allowed_capabilities of this ExtensionsV1beta1PodSecurityPolicySpec. # noqa: E501
:rtype: list[str]
"""
return self._allowed_capabilities
@allowed_capabilities.setter
def allowed_capabilities(self, allowed_capabilities):
"""Sets the allowed_capabilities of this ExtensionsV1beta1PodSecurityPolicySpec.
allowedCapabilities is a list of capabilities that can be requested to add to the container. Capabilities in this field may be added at the pod author's discretion. You must not list a capability in both allowedCapabilities and requiredDropCapabilities. # noqa: E501
:param allowed_capabilities: The allowed_capabilities of this ExtensionsV1beta1PodSecurityPolicySpec. # noqa: E501
:type: list[str]
"""
self._allowed_capabilities = allowed_capabilities
@property
def allowed_flex_volumes(self):
"""Gets the allowed_flex_volumes of this ExtensionsV1beta1PodSecurityPolicySpec. # noqa: E501
allowedFlexVolumes is a whitelist of allowed Flexvolumes. Empty or nil indicates that all Flexvolumes may be used. This parameter is effective only when the usage of the Flexvolumes is allowed in the \"volumes\" field. # noqa: E501
:return: The allowed_flex_volumes of this ExtensionsV1beta1PodSecurityPolicySpec. # noqa: E501
:rtype: list[ExtensionsV1beta1AllowedFlexVolume]
"""
return self._allowed_flex_volumes
@allowed_flex_volumes.setter
def allowed_flex_volumes(self, allowed_flex_volumes):
"""Sets the allowed_flex_volumes of this ExtensionsV1beta1PodSecurityPolicySpec.
allowedFlexVolumes is a whitelist of allowed Flexvolumes. Empty or nil indicates that all Flexvolumes may be used. This parameter is effective only when the usage of the Flexvolumes is allowed in the \"volumes\" field. # noqa: E501
:param allowed_flex_volumes: The allowed_flex_volumes of this ExtensionsV1beta1PodSecurityPolicySpec. # noqa: E501
:type: list[ExtensionsV1beta1AllowedFlexVolume]
"""
self._allowed_flex_volumes = allowed_flex_volumes
@property
def allowed_host_paths(self):
"""Gets the allowed_host_paths of this ExtensionsV1beta1PodSecurityPolicySpec. # noqa: E501
allowedHostPaths is a white list of allowed host paths. Empty indicates that all host paths may be used. # noqa: E501
:return: The allowed_host_paths of this ExtensionsV1beta1PodSecurityPolicySpec. # noqa: E501
:rtype: list[ExtensionsV1beta1AllowedHostPath]
"""
return self._allowed_host_paths
@allowed_host_paths.setter
def allowed_host_paths(self, allowed_host_paths):
"""Sets the allowed_host_paths of this ExtensionsV1beta1PodSecurityPolicySpec.
allowedHostPaths is a white list of allowed host paths. Empty indicates that all host paths may be used. # noqa: E501
:param allowed_host_paths: The allowed_host_paths of this ExtensionsV1beta1PodSecurityPolicySpec. # noqa: E501
:type: list[ExtensionsV1beta1AllowedHostPath]
"""
self._allowed_host_paths = allowed_host_paths
@property
def allowed_proc_mount_types(self):
"""Gets the allowed_proc_mount_types of this ExtensionsV1beta1PodSecurityPolicySpec. # noqa: E501
AllowedProcMountTypes is a whitelist of allowed ProcMountTypes. Empty or nil indicates that only the DefaultProcMountType may be used. This requires the ProcMountType feature flag to be enabled. # noqa: E501
:return: The allowed_proc_mount_types of this ExtensionsV1beta1PodSecurityPolicySpec. # noqa: E501
:rtype: list[str]
"""
return self._allowed_proc_mount_types
@allowed_proc_mount_types.setter
def allowed_proc_mount_types(self, allowed_proc_mount_types):
"""Sets the allowed_proc_mount_types of this ExtensionsV1beta1PodSecurityPolicySpec.
AllowedProcMountTypes is a whitelist of allowed ProcMountTypes. Empty or nil indicates that only the DefaultProcMountType may be used. This requires the ProcMountType feature flag to be enabled. # noqa: E501
:param allowed_proc_mount_types: The allowed_proc_mount_types of this ExtensionsV1beta1PodSecurityPolicySpec. # noqa: E501
:type: list[str]
"""
self._allowed_proc_mount_types = allowed_proc_mount_types
@property
def allowed_unsafe_sysctls(self):
"""Gets the allowed_unsafe_sysctls of this ExtensionsV1beta1PodSecurityPolicySpec. # noqa: E501
allowedUnsafeSysctls is a list of explicitly allowed unsafe sysctls, defaults to none. Each entry is either a plain sysctl name or ends in \"*\" in which case it is considered as a prefix of allowed sysctls. Single * means all unsafe sysctls are allowed. Kubelet has to whitelist all allowed unsafe sysctls explicitly to avoid rejection. Examples: e.g. \"foo/*\" allows \"foo/bar\", \"foo/baz\", etc. e.g. \"foo.*\" allows \"foo.bar\", \"foo.baz\", etc. # noqa: E501
:return: The allowed_unsafe_sysctls of this ExtensionsV1beta1PodSecurityPolicySpec. # noqa: E501
:rtype: list[str]
"""
return self._allowed_unsafe_sysctls
@allowed_unsafe_sysctls.setter
def allowed_unsafe_sysctls(self, allowed_unsafe_sysctls):
"""Sets the allowed_unsafe_sysctls of this ExtensionsV1beta1PodSecurityPolicySpec.
allowedUnsafeSysctls is a list of explicitly allowed unsafe sysctls, defaults to none. Each entry is either a plain sysctl name or ends in \"*\" in which case it is considered as a prefix of allowed sysctls. Single * means all unsafe sysctls are allowed. Kubelet has to whitelist all allowed unsafe sysctls explicitly to avoid rejection. Examples: e.g. \"foo/*\" allows \"foo/bar\", \"foo/baz\", etc. e.g. \"foo.*\" allows \"foo.bar\", \"foo.baz\", etc. # noqa: E501
:param allowed_unsafe_sysctls: The allowed_unsafe_sysctls of this ExtensionsV1beta1PodSecurityPolicySpec. # noqa: E501
:type: list[str]
"""
self._allowed_unsafe_sysctls = allowed_unsafe_sysctls
@property
def default_add_capabilities(self):
"""Gets the default_add_capabilities of this ExtensionsV1beta1PodSecurityPolicySpec. # noqa: E501
defaultAddCapabilities is the default set of capabilities that will be added to the container unless the pod spec specifically drops the capability. You may not list a capability in both defaultAddCapabilities and requiredDropCapabilities. Capabilities added here are implicitly allowed, and need not be included in the allowedCapabilities list. # noqa: E501
:return: The default_add_capabilities of this ExtensionsV1beta1PodSecurityPolicySpec. # noqa: E501
:rtype: list[str]
"""
return self._default_add_capabilities
@default_add_capabilities.setter
def default_add_capabilities(self, default_add_capabilities):
"""Sets the default_add_capabilities of this ExtensionsV1beta1PodSecurityPolicySpec.
defaultAddCapabilities is the default set of capabilities that will be added to the container unless the pod spec specifically drops the capability. You may not list a capability in both defaultAddCapabilities and requiredDropCapabilities. Capabilities added here are implicitly allowed, and need not be included in the allowedCapabilities list. # noqa: E501
:param default_add_capabilities: The default_add_capabilities of this ExtensionsV1beta1PodSecurityPolicySpec. # noqa: E501
:type: list[str]
"""
self._default_add_capabilities = default_add_capabilities
@property
def default_allow_privilege_escalation(self):
"""Gets the default_allow_privilege_escalation of this ExtensionsV1beta1PodSecurityPolicySpec. # noqa: E501
defaultAllowPrivilegeEscalation controls the default setting for whether a process can gain more privileges than its parent process. # noqa: E501
:return: The default_allow_privilege_escalation of this ExtensionsV1beta1PodSecurityPolicySpec. # noqa: E501
:rtype: bool
"""
return self._default_allow_privilege_escalation
@default_allow_privilege_escalation.setter
def default_allow_privilege_escalation(self, default_allow_privilege_escalation):
"""Sets the default_allow_privilege_escalation of this ExtensionsV1beta1PodSecurityPolicySpec.
defaultAllowPrivilegeEscalation controls the default setting for whether a process can gain more privileges than its parent process. # noqa: E501
:param default_allow_privilege_escalation: The default_allow_privilege_escalation of this ExtensionsV1beta1PodSecurityPolicySpec. # noqa: E501
:type: bool
"""
self._default_allow_privilege_escalation = default_allow_privilege_escalation
@property
def forbidden_sysctls(self):
"""Gets the forbidden_sysctls of this ExtensionsV1beta1PodSecurityPolicySpec. # noqa: E501
forbiddenSysctls is a list of explicitly forbidden sysctls, defaults to none. Each entry is either a plain sysctl name or ends in \"*\" in which case it is considered as a prefix of forbidden sysctls. Single * means all sysctls are forbidden. Examples: e.g. \"foo/*\" forbids \"foo/bar\", \"foo/baz\", etc. e.g. \"foo.*\" forbids \"foo.bar\", \"foo.baz\", etc. # noqa: E501
:return: The forbidden_sysctls of this ExtensionsV1beta1PodSecurityPolicySpec. # noqa: E501
:rtype: list[str]
"""
return self._forbidden_sysctls
@forbidden_sysctls.setter
def forbidden_sysctls(self, forbidden_sysctls):
"""Sets the forbidden_sysctls of this ExtensionsV1beta1PodSecurityPolicySpec.
forbiddenSysctls is a list of explicitly forbidden sysctls, defaults to none. Each entry is either a plain sysctl name or ends in \"*\" in which case it is considered as a prefix of forbidden sysctls. Single * means all sysctls are forbidden. Examples: e.g. \"foo/*\" forbids \"foo/bar\", \"foo/baz\", etc. e.g. \"foo.*\" forbids \"foo.bar\", \"foo.baz\", etc. # noqa: E501
:param forbidden_sysctls: The forbidden_sysctls of this ExtensionsV1beta1PodSecurityPolicySpec. # noqa: E501
:type: list[str]
"""
self._forbidden_sysctls = forbidden_sysctls
@property
def fs_group(self):
"""Gets the fs_group of this ExtensionsV1beta1PodSecurityPolicySpec. # noqa: E501
:return: The fs_group of this ExtensionsV1beta1PodSecurityPolicySpec. # noqa: E501
:rtype: ExtensionsV1beta1FSGroupStrategyOptions
"""
return self._fs_group
@fs_group.setter
def fs_group(self, fs_group):
"""Sets the fs_group of this ExtensionsV1beta1PodSecurityPolicySpec.
:param fs_group: The fs_group of this ExtensionsV1beta1PodSecurityPolicySpec. # noqa: E501
:type: ExtensionsV1beta1FSGroupStrategyOptions
"""
if fs_group is None:
raise ValueError("Invalid value for `fs_group`, must not be `None`") # noqa: E501
self._fs_group = fs_group
@property
def host_ipc(self):
"""Gets the host_ipc of this ExtensionsV1beta1PodSecurityPolicySpec. # noqa: E501
hostIPC determines if the policy allows the use of HostIPC in the pod spec. # noqa: E501
:return: The host_ipc of this ExtensionsV1beta1PodSecurityPolicySpec. # noqa: E501
:rtype: bool
"""
return self._host_ipc
@host_ipc.setter
def host_ipc(self, host_ipc):
"""Sets the host_ipc of this ExtensionsV1beta1PodSecurityPolicySpec.
hostIPC determines if the policy allows the use of HostIPC in the pod spec. # noqa: E501
:param host_ipc: The host_ipc of this ExtensionsV1beta1PodSecurityPolicySpec. # noqa: E501
:type: bool
"""
self._host_ipc = host_ipc
@property
def host_network(self):
"""Gets the host_network of this ExtensionsV1beta1PodSecurityPolicySpec. # noqa: E501
hostNetwork determines if the policy allows the use of HostNetwork in the pod spec. # noqa: E501
:return: The host_network of this ExtensionsV1beta1PodSecurityPolicySpec. # noqa: E501
:rtype: bool
"""
return self._host_network
@host_network.setter
def host_network(self, host_network):
"""Sets the host_network of this ExtensionsV1beta1PodSecurityPolicySpec.
hostNetwork determines if the policy allows the use of HostNetwork in the pod spec. # noqa: E501
:param host_network: The host_network of this ExtensionsV1beta1PodSecurityPolicySpec. # noqa: E501
:type: bool
"""
self._host_network = host_network
@property
def host_pid(self):
"""Gets the host_pid of this ExtensionsV1beta1PodSecurityPolicySpec. # noqa: E501
hostPID determines if the policy allows the use of HostPID in the pod spec. # noqa: E501
:return: The host_pid of this ExtensionsV1beta1PodSecurityPolicySpec. # noqa: E501
:rtype: bool
"""
return self._host_pid
@host_pid.setter
def host_pid(self, host_pid):
"""Sets the host_pid of this ExtensionsV1beta1PodSecurityPolicySpec.
hostPID determines if the policy allows the use of HostPID in the pod spec. # noqa: E501
:param host_pid: The host_pid of this ExtensionsV1beta1PodSecurityPolicySpec. # noqa: E501
:type: bool
"""
self._host_pid = host_pid
@property
def host_ports(self):
"""Gets the host_ports of this ExtensionsV1beta1PodSecurityPolicySpec. # noqa: E501
hostPorts determines which host port ranges are allowed to be exposed. # noqa: E501
:return: The host_ports of this ExtensionsV1beta1PodSecurityPolicySpec. # noqa: E501
:rtype: list[ExtensionsV1beta1HostPortRange]
"""
return self._host_ports
@host_ports.setter
def host_ports(self, host_ports):
"""Sets the host_ports of this ExtensionsV1beta1PodSecurityPolicySpec.
hostPorts determines which host port ranges are allowed to be exposed. # noqa: E501
:param host_ports: The host_ports of this ExtensionsV1beta1PodSecurityPolicySpec. # noqa: E501
:type: list[ExtensionsV1beta1HostPortRange]
"""
self._host_ports = host_ports
@property
def privileged(self):
"""Gets the privileged of this ExtensionsV1beta1PodSecurityPolicySpec. # noqa: E501
privileged determines if a pod can request to be run as privileged. # noqa: E501
:return: The privileged of this ExtensionsV1beta1PodSecurityPolicySpec. # noqa: E501
:rtype: bool
"""
return self._privileged
@privileged.setter
def privileged(self, privileged):
"""Sets the privileged of this ExtensionsV1beta1PodSecurityPolicySpec.
privileged determines if a pod can request to be run as privileged. # noqa: E501
:param privileged: The privileged of this ExtensionsV1beta1PodSecurityPolicySpec. # noqa: E501
:type: bool
"""
self._privileged = privileged
@property
def read_only_root_filesystem(self):
"""Gets the read_only_root_filesystem of this ExtensionsV1beta1PodSecurityPolicySpec. # noqa: E501
readOnlyRootFilesystem when set to true will force containers to run with a read only root file system. If the container specifically requests to run with a non-read only root file system the PSP should deny the pod. If set to false the container may run with a read only root file system if it wishes but it will not be forced to. # noqa: E501
:return: The read_only_root_filesystem of this ExtensionsV1beta1PodSecurityPolicySpec. # noqa: E501
:rtype: bool
"""
return self._read_only_root_filesystem
@read_only_root_filesystem.setter
def read_only_root_filesystem(self, read_only_root_filesystem):
"""Sets the read_only_root_filesystem of this ExtensionsV1beta1PodSecurityPolicySpec.
readOnlyRootFilesystem when set to true will force containers to run with a read only root file system. If the container specifically requests to run with a non-read only root file system the PSP should deny the pod. If set to false the container may run with a read only root file system if it wishes but it will not be forced to. # noqa: E501
:param read_only_root_filesystem: The read_only_root_filesystem of this ExtensionsV1beta1PodSecurityPolicySpec. # noqa: E501
:type: bool
"""
self._read_only_root_filesystem = read_only_root_filesystem
@property
def required_drop_capabilities(self):
"""Gets the required_drop_capabilities of this ExtensionsV1beta1PodSecurityPolicySpec. # noqa: E501
requiredDropCapabilities are the capabilities that will be dropped from the container. These are required to be dropped and cannot be added. # noqa: E501
:return: The required_drop_capabilities of this ExtensionsV1beta1PodSecurityPolicySpec. # noqa: E501
:rtype: list[str]
"""
return self._required_drop_capabilities
@required_drop_capabilities.setter
def required_drop_capabilities(self, required_drop_capabilities):
"""Sets the required_drop_capabilities of this ExtensionsV1beta1PodSecurityPolicySpec.
requiredDropCapabilities are the capabilities that will be dropped from the container. These are required to be dropped and cannot be added. # noqa: E501
:param required_drop_capabilities: The required_drop_capabilities of this ExtensionsV1beta1PodSecurityPolicySpec. # noqa: E501
:type: list[str]
"""
self._required_drop_capabilities = required_drop_capabilities
@property
def run_as_group(self):
"""Gets the run_as_group of this ExtensionsV1beta1PodSecurityPolicySpec. # noqa: E501
:return: The run_as_group of this ExtensionsV1beta1PodSecurityPolicySpec. # noqa: E501
:rtype: ExtensionsV1beta1RunAsGroupStrategyOptions
"""
return self._run_as_group
@run_as_group.setter
def run_as_group(self, run_as_group):
"""Sets the run_as_group of this ExtensionsV1beta1PodSecurityPolicySpec.
:param run_as_group: The run_as_group of this ExtensionsV1beta1PodSecurityPolicySpec. # noqa: E501
:type: ExtensionsV1beta1RunAsGroupStrategyOptions
"""
self._run_as_group = run_as_group
@property
def run_as_user(self):
"""Gets the run_as_user of this ExtensionsV1beta1PodSecurityPolicySpec. # noqa: E501
:return: The run_as_user of this ExtensionsV1beta1PodSecurityPolicySpec. # noqa: E501
:rtype: ExtensionsV1beta1RunAsUserStrategyOptions
"""
return self._run_as_user
@run_as_user.setter
def run_as_user(self, run_as_user):
"""Sets the run_as_user of this ExtensionsV1beta1PodSecurityPolicySpec.
:param run_as_user: The run_as_user of this ExtensionsV1beta1PodSecurityPolicySpec. # noqa: E501
:type: ExtensionsV1beta1RunAsUserStrategyOptions
"""
if run_as_user is None:
raise ValueError("Invalid value for `run_as_user`, must not be `None`") # noqa: E501
self._run_as_user = run_as_user
@property
def se_linux(self):
"""Gets the se_linux of this ExtensionsV1beta1PodSecurityPolicySpec. # noqa: E501
:return: The se_linux of this ExtensionsV1beta1PodSecurityPolicySpec. # noqa: E501
:rtype: ExtensionsV1beta1SELinuxStrategyOptions
"""
return self._se_linux
@se_linux.setter
def se_linux(self, se_linux):
"""Sets the se_linux of this ExtensionsV1beta1PodSecurityPolicySpec.
:param se_linux: The se_linux of this ExtensionsV1beta1PodSecurityPolicySpec. # noqa: E501
:type: ExtensionsV1beta1SELinuxStrategyOptions
"""
if se_linux is None:
raise ValueError("Invalid value for `se_linux`, must not be `None`") # noqa: E501
self._se_linux = se_linux
@property
def supplemental_groups(self):
"""Gets the supplemental_groups of this ExtensionsV1beta1PodSecurityPolicySpec. # noqa: E501
:return: The supplemental_groups of this ExtensionsV1beta1PodSecurityPolicySpec. # noqa: E501
:rtype: ExtensionsV1beta1SupplementalGroupsStrategyOptions
"""
return self._supplemental_groups
@supplemental_groups.setter
def supplemental_groups(self, supplemental_groups):
"""Sets the supplemental_groups of this ExtensionsV1beta1PodSecurityPolicySpec.
:param supplemental_groups: The supplemental_groups of this ExtensionsV1beta1PodSecurityPolicySpec. # noqa: E501
:type: ExtensionsV1beta1SupplementalGroupsStrategyOptions
"""
if supplemental_groups is None:
raise ValueError("Invalid value for `supplemental_groups`, must not be `None`") # noqa: E501
self._supplemental_groups = supplemental_groups
@property
def volumes(self):
"""Gets the volumes of this ExtensionsV1beta1PodSecurityPolicySpec. # noqa: E501
volumes is a white list of allowed volume plugins. Empty indicates that no volumes may be used. To allow all volumes you may use '*'. # noqa: E501
:return: The volumes of this ExtensionsV1beta1PodSecurityPolicySpec. # noqa: E501
:rtype: list[str]
"""
return self._volumes
@volumes.setter
def volumes(self, volumes):
"""Sets the volumes of this ExtensionsV1beta1PodSecurityPolicySpec.
volumes is a white list of allowed volume plugins. Empty indicates that no volumes may be used. To allow all volumes you may use '*'. # noqa: E501
:param volumes: The volumes of this ExtensionsV1beta1PodSecurityPolicySpec. # noqa: E501
:type: list[str]
"""
self._volumes = volumes
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.openapi_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, ExtensionsV1beta1PodSecurityPolicySpec):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
|
# Copyright (c) 2020-2021, NVIDIA CORPORATION.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from cudf import Series
from cuml.common.exceptions import NotFittedError
from cuml.feature_extraction._stop_words import ENGLISH_STOP_WORDS
from cuml.common.sparsefuncs import csr_row_normalize_l1, csr_row_normalize_l2
from cuml.common.sparsefuncs import create_csr_matrix_from_count_df
from functools import partial
import cupy as cp
import numbers
import cudf
from cuml.common.type_utils import CUPY_SPARSE_DTYPES
from cudf.utils.dtypes import min_signed_type
import cuml.common.logger as logger
def _preprocess(doc, lower=False, remove_non_alphanumeric=False, delimiter=" ",
keep_underscore_char=True, remove_single_token_len=True):
"""
Chain together an optional series of text preprocessing steps to
apply to a document.
Parameters
----------
doc: cudf.Series[str]
The string to preprocess
lower: bool
Whether to use str.lower to lowercase all of the text
remove_non_alphanumeric: bool
Whether or not to remove non-alphanumeric characters.
keep_underscore_char: bool
Whether or not to keep the underscore character
Returns
-------
doc: cudf.Series[str]
preprocessed string
"""
if lower:
doc = doc.str.lower()
if remove_non_alphanumeric:
if keep_underscore_char:
# why: sklearn by default keeps `_` char along with alphanumerics
# currently we dont have a easy way of removing
# all chars but `_`
# in cudf.Series[str] below works around it
temp_string = 'cumlSt'
doc = doc.str.replace('_', temp_string, regex=False)
doc = doc.str.filter_alphanum(' ', keep=True)
doc = doc.str.replace(temp_string, '_', regex=False)
else:
doc = doc.str.filter_alphanum(' ', keep=True)
# sklearn by default removes tokens of
# length 1, if its remove alphanumerics
if remove_single_token_len:
doc = doc.str.filter_tokens(2)
return doc
class _VectorizerMixin:
"""
Provides common code for text vectorizers (tokenization logic).
"""
def _remove_stop_words(self, doc):
"""
Remove stop words only if needed.
"""
if self.analyzer == 'word' and self.stop_words is not None:
stop_words = Series(self._get_stop_words())
doc = doc.str.replace_tokens(stop_words,
replacements=self.delimiter,
delimiter=self.delimiter)
return doc
def build_preprocessor(self):
"""
Return a function to preprocess the text before tokenization.
If analyzer == 'word' and stop_words is not None, stop words are
removed from the input documents after preprocessing.
Returns
-------
preprocessor: callable
A function to preprocess the text before tokenization.
"""
if self.preprocessor is not None:
preprocess = self.preprocessor
else:
remove_non_alpha = self.analyzer == 'word'
preprocess = partial(_preprocess, lower=self.lowercase,
remove_non_alphanumeric=remove_non_alpha,
delimiter=self.delimiter)
return lambda doc: self._remove_stop_words(preprocess(doc))
def _get_stop_words(self):
"""
Build or fetch the effective stop words list.
Returns
-------
stop_words: list or None
A list of stop words.
"""
if self.stop_words == "english":
return list(ENGLISH_STOP_WORDS)
elif isinstance(self.stop_words, str):
raise ValueError("not a built-in stop list: %s" % self.stop_words)
elif self.stop_words is None:
return None
else: # assume it's a collection
return list(self.stop_words)
def get_char_ngrams(self, ngram_size, str_series, doc_id_sr):
"""
Handles ngram generation for characters analyzers.
When analyzer is 'char_wb', we generate ngrams within word boundaries,
meaning we need to first tokenize and pad each token with a delimiter.
"""
if self.analyzer == 'char_wb' and ngram_size != 1:
token_count = str_series.str.token_count(self.delimiter)
tokens = str_series.str.tokenize(self.delimiter)
del str_series
padding = Series(self.delimiter).repeat(len(tokens))
tokens = tokens.str.cat(padding)
padding = padding.reset_index(drop=True)
tokens = padding.str.cat(tokens)
tokens = tokens.reset_index(drop=True)
ngram_sr = tokens.str.character_ngrams(n=ngram_size)
doc_id_df = cudf.DataFrame({
'doc_id': doc_id_sr.repeat(token_count).reset_index(drop=True),
# formula to count ngrams given number of letters per token:
'ngram_count': tokens.str.len() - (ngram_size - 1)
})
del tokens
ngram_count = doc_id_df.groupby('doc_id',
sort=True).sum()['ngram_count']
return ngram_sr, ngram_count, token_count
if ngram_size == 1:
token_count = str_series.str.len()
ngram_sr = str_series.str.character_tokenize()
del str_series
elif self.analyzer == 'char':
token_count = str_series.str.len()
ngram_sr = str_series.str.character_ngrams(n=ngram_size)
del str_series
ngram_count = token_count - (ngram_size - 1)
return ngram_sr, ngram_count, token_count
def get_ngrams(self, str_series, ngram_size, doc_id_sr):
"""
This returns the ngrams for the string series
Parameters
----------
str_series : (cudf.Series)
String series to tokenize
ngram_size : int
Gram level to get (1 for unigram, 2 for bigram etc)
doc_id_sr : cudf.Series
Int series containing documents ids
"""
if self.analyzer == 'word':
token_count_sr = str_series.str.token_count(self.delimiter)
ngram_sr = str_series.str.ngrams_tokenize(n=ngram_size,
separator=" ",
delimiter=self.delimiter)
# formula to count ngrams given number of tokens x per doc: x-(n-1)
ngram_count = token_count_sr - (ngram_size - 1)
else:
ngram_sr, ngram_count, token_count_sr = self.get_char_ngrams(
ngram_size, str_series, doc_id_sr
)
not_empty_docs = token_count_sr > 0
doc_id_sr = doc_id_sr[not_empty_docs]
ngram_count = ngram_count[not_empty_docs]
doc_id_sr = doc_id_sr.repeat(ngram_count).reset_index(drop=True)
tokenized_df = cudf.DataFrame()
tokenized_df["doc_id"] = doc_id_sr
tokenized_df["token"] = ngram_sr
return tokenized_df
def _create_tokenized_df(self, docs):
"""
Creates a tokenized DataFrame from a string Series.
Each row describes the token string and the corresponding document id.
"""
min_n, max_n = self.ngram_range
doc_id = cp.arange(start=0, stop=len(docs), dtype=cp.int32)
doc_id = Series(doc_id)
tokenized_df_ls = [
self.get_ngrams(docs, n, doc_id)
for n in range(min_n, max_n + 1)
]
del docs
tokenized_df = cudf.concat(tokenized_df_ls)
tokenized_df = tokenized_df.reset_index(drop=True)
return tokenized_df
def _compute_empty_doc_ids(self, count_df, n_doc):
"""
Compute empty docs ids using the remaining docs, given the total number
of documents.
"""
remaining_docs = count_df['doc_id'].unique()
dtype = min_signed_type(n_doc)
doc_ids = cudf.DataFrame(data={'all_ids': cp.arange(0, n_doc,
dtype=dtype)},
dtype=dtype)
empty_docs = doc_ids - doc_ids.iloc[remaining_docs]
empty_ids = empty_docs[empty_docs['all_ids'].isnull()].index.values
return empty_ids
def _validate_params(self):
"""
Check validity of ngram_range parameter
"""
min_n, max_m = self.ngram_range
msg = ""
if min_n < 1:
msg += "lower boundary must be >= 1. "
if min_n > max_m:
msg += "lower boundary larger than the upper boundary. "
if msg != "":
msg = f"Invalid value for ngram_range={self.ngram_range} {msg}"
raise ValueError(msg)
if hasattr(self, "n_features"):
if not isinstance(self.n_features, numbers.Integral):
raise TypeError(
f"n_features must be integral, got {self.n_features}\
({type(self.n_features)})."
)
def _warn_for_unused_params(self):
if self.analyzer != "word" and self.stop_words is not None:
logger.warn(
"The parameter 'stop_words' will not be used"
" since 'analyzer' != 'word'"
)
def _check_sklearn_params(self, analyzer, sklearn_params):
if callable(analyzer):
raise ValueError(
"cuML does not support callable analyzer,"
" please refer to the cuML documentation for"
" more information."
)
for key, vals in sklearn_params.items():
if vals is not None:
raise TypeError(
"The Scikit-learn variable",
key,
" is not supported in cuML,"
" please read the cuML documentation for"
" more information.",
)
def _document_frequency(X):
"""
Count the number of non-zero values for each feature in X.
"""
doc_freq = (
X[["token", "doc_id"]]
.groupby(["token"], sort=True)
.count()
)
return doc_freq["doc_id"].values
def _term_frequency(X):
"""
Count the number of occurrences of each term in X.
"""
term_freq = (
X[["token", "count"]]
.groupby(["token"], sort=True)
.sum()
)
return term_freq["count"].values
class CountVectorizer(_VectorizerMixin):
"""
Convert a collection of text documents to a matrix of token counts
If you do not provide an a-priori dictionary then the number of features
will be equal to the vocabulary size found by analyzing the data.
Parameters
----------
lowercase : boolean, True by default
Convert all characters to lowercase before tokenizing.
preprocessor : callable or None (default)
Override the preprocessing (string transformation) stage while
preserving the tokenizing and n-grams generation steps.
stop_words : string {'english'}, list, or None (default)
If 'english', a built-in stop word list for English is used.
If a list, that list is assumed to contain stop words, all of which
will be removed from the input documents.
If None, no stop words will be used. max_df can be set to a value
to automatically detect and filter stop words based on intra corpus
document frequency of terms.
ngram_range : tuple (min_n, max_n), default=(1, 1)
The lower and upper boundary of the range of n-values for different
word n-grams or char n-grams to be extracted. All values of n such
such that min_n <= n <= max_n will be used. For example an
``ngram_range`` of ``(1, 1)`` means only unigrams, ``(1, 2)`` means
unigrams and bigrams, and ``(2, 2)`` means only bigrams.
analyzer : string, {'word', 'char', 'char_wb'}
Whether the feature should be made of word n-gram or character
n-grams.
Option 'char_wb' creates character n-grams only from text inside
word boundaries; n-grams at the edges of words are padded with space.
max_df : float in range [0.0, 1.0] or int, default=1.0
When building the vocabulary ignore terms that have a document
frequency strictly higher than the given threshold (corpus-specific
stop words).
If float, the parameter represents a proportion of documents, integer
absolute counts.
This parameter is ignored if vocabulary is not None.
min_df : float in range [0.0, 1.0] or int, default=1
When building the vocabulary ignore terms that have a document
frequency strictly lower than the given threshold. This value is also
called cut-off in the literature.
If float, the parameter represents a proportion of documents, integer
absolute counts.
This parameter is ignored if vocabulary is not None.
max_features : int or None, default=None
If not None, build a vocabulary that only consider the top
max_features ordered by term frequency across the corpus.
This parameter is ignored if vocabulary is not None.
vocabulary : cudf.Series, optional
If not given, a vocabulary is determined from the input documents.
binary : boolean, default=False
If True, all non zero counts are set to 1. This is useful for discrete
probabilistic models that model binary events rather than integer
counts.
dtype : type, optional
Type of the matrix returned by fit_transform() or transform().
delimiter : str, whitespace by default
String used as a replacement for stop words if stop_words is not None.
Typically the delimiting character between words is a good choice.
Attributes
----------
vocabulary_ : cudf.Series[str]
Array mapping from feature integer indices to feature name.
stop_words_ : cudf.Series[str]
Terms that were ignored because they either:
- occurred in too many documents (`max_df`)
- occurred in too few documents (`min_df`)
- were cut off by feature selection (`max_features`).
This is only available if no vocabulary was given.
"""
def __init__(self, input=None, encoding=None, decode_error=None,
strip_accents=None, lowercase=True, preprocessor=None,
tokenizer=None, stop_words=None, token_pattern=None,
ngram_range=(1, 1), analyzer='word', max_df=1.0, min_df=1,
max_features=None, vocabulary=None, binary=False,
dtype=cp.float32, delimiter=' '):
self.preprocessor = preprocessor
self.analyzer = analyzer
self.lowercase = lowercase
self.stop_words = stop_words
self.max_df = max_df
self.min_df = min_df
if max_df < 0 or min_df < 0:
raise ValueError("negative value for max_df or min_df")
self.max_features = max_features
if max_features is not None:
if not isinstance(max_features, int) or max_features <= 0:
raise ValueError(
"max_features=%r, neither a positive integer nor None"
% max_features)
self.ngram_range = ngram_range
self.vocabulary = vocabulary
self.binary = binary
self.dtype = dtype
self.delimiter = delimiter
if dtype not in CUPY_SPARSE_DTYPES:
msg = f"Expected dtype in {CUPY_SPARSE_DTYPES}, got {dtype}"
raise ValueError(msg)
sklearn_params = {"input": input,
"encoding": encoding,
"decode_error": decode_error,
"strip_accents": strip_accents,
"tokenizer": tokenizer,
"token_pattern": token_pattern}
self._check_sklearn_params(analyzer, sklearn_params)
def _count_vocab(self, tokenized_df):
"""
Count occurrences of tokens in each document.
"""
# Transform string tokens into token indexes from 0 to len(vocab)
# The indexes are based on lexicographical ordering.
tokenized_df['token'] = tokenized_df['token'].astype('category')
tokenized_df['token'] = tokenized_df['token'].cat.set_categories(
self.vocabulary_
)._column.codes
# Count of each token in each document
count_df = (
tokenized_df[["doc_id", "token"]]
.groupby(["doc_id", "token"], sort=True)
.size()
.reset_index()
.rename({0: "count"}, axis=1)
)
return count_df
def _filter_and_renumber(self, df, keep_values, column):
"""
Filter dataframe to keep only values from column matching
keep_values.
"""
df[column] = (
df[column].astype('category')
.cat.set_categories(keep_values)
._column.codes
)
df = df.dropna(subset=column)
return df
def _limit_features(self, count_df, vocab, high, low, limit):
"""
Remove too rare or too common features.
Prune features that are non zero in more samples than high or less
documents than low, modifying the vocabulary, and restricting it to
at most the limit most frequent.
Sets `self.vocabulary_` and `self.stop_words_` with the new values.
"""
if high is None and low is None and limit is None:
self.stop_words_ = None
return count_df
document_frequency = _document_frequency(count_df)
mask = cp.ones(len(document_frequency), dtype=bool)
if high is not None:
mask &= document_frequency <= high
if low is not None:
mask &= document_frequency >= low
if limit is not None and mask.sum() > limit:
term_frequency = _term_frequency(count_df)
mask_inds = (-term_frequency[mask]).argsort()[:limit]
new_mask = cp.zeros(len(document_frequency), dtype=bool)
new_mask[cp.where(mask)[0][mask_inds]] = True
mask = new_mask
keep_idx = cp.where(mask)[0].astype(cp.int32)
keep_num = keep_idx.shape[0]
if keep_num == 0:
raise ValueError("After pruning, no terms remain. Try a lower"
" min_df or a higher max_df.")
if len(vocab) - keep_num != 0:
count_df = self._filter_and_renumber(count_df, keep_idx, 'token')
self.stop_words_ = vocab[~mask].reset_index(drop=True)
self.vocabulary_ = vocab[mask].reset_index(drop=True)
return count_df
def _preprocess(self, raw_documents):
preprocess = self.build_preprocessor()
return preprocess(raw_documents)
def fit(self, raw_documents):
"""
Build a vocabulary of all tokens in the raw documents.
Parameters
----------
raw_documents : cudf.Series
A Series of string documents
Returns
-------
self
"""
self.fit_transform(raw_documents)
return self
def fit_transform(self, raw_documents):
"""
Build the vocabulary and return document-term matrix.
Equivalent to ``self.fit(X).transform(X)`` but preprocess `X` only
once.
Parameters
----------
raw_documents : cudf.Series
A Series of string documents
Returns
-------
X : cupy csr array of shape (n_samples, n_features)
Document-term matrix.
"""
self._warn_for_unused_params()
self._validate_params()
self._fixed_vocabulary = self.vocabulary is not None
docs = self._preprocess(raw_documents)
n_doc = len(docs)
tokenized_df = self._create_tokenized_df(docs)
if self._fixed_vocabulary:
self.vocabulary_ = self.vocabulary
else:
self.vocabulary_ = tokenized_df["token"].unique()
count_df = self._count_vocab(tokenized_df)
if not self._fixed_vocabulary:
max_doc_count = (self.max_df
if isinstance(self.max_df, numbers.Integral)
else self.max_df * n_doc)
min_doc_count = (self.min_df
if isinstance(self.min_df, numbers.Integral)
else self.min_df * n_doc)
if max_doc_count < min_doc_count:
raise ValueError(
"max_df corresponds to < documents than min_df")
count_df = self._limit_features(count_df, self.vocabulary_,
max_doc_count,
min_doc_count,
self.max_features)
empty_doc_ids = self._compute_empty_doc_ids(count_df, n_doc)
X = create_csr_matrix_from_count_df(count_df, empty_doc_ids,
n_doc, len(self.vocabulary_),
dtype=self.dtype)
if self.binary:
X.data.fill(1)
return X
def transform(self, raw_documents):
"""
Transform documents to document-term matrix.
Extract token counts out of raw text documents using the vocabulary
fitted with fit or the one provided to the constructor.
Parameters
----------
raw_documents : cudf.Series
A Series of string documents
Returns
-------
X : cupy csr array of shape (n_samples, n_features)
Document-term matrix.
"""
if not hasattr(self, "vocabulary_"):
if self.vocabulary is not None:
self.vocabulary_ = self.vocabulary
else:
raise NotFittedError()
docs = self._preprocess(raw_documents)
n_doc = len(docs)
tokenized_df = self._create_tokenized_df(docs)
count_df = self._count_vocab(tokenized_df)
empty_doc_ids = self._compute_empty_doc_ids(count_df, n_doc)
X = create_csr_matrix_from_count_df(
count_df, empty_doc_ids, n_doc, len(self.vocabulary_),
dtype=self.dtype
)
if self.binary:
X.data.fill(1)
return X
def inverse_transform(self, X):
"""
Return terms per document with nonzero entries in X.
Parameters
----------
X : array-like of shape (n_samples, n_features)
Document-term matrix.
Returns
-------
X_inv : list of cudf.Series of shape (n_samples,)
List of Series of terms.
"""
vocab = Series(self.vocabulary_)
return [vocab[X[i, :].indices] for i in range(X.shape[0])]
def get_feature_names(self):
"""
Array mapping from feature integer indices to feature name.
Returns
-------
feature_names : Series
A list of feature names.
"""
return self.vocabulary_
class HashingVectorizer(_VectorizerMixin):
"""
Convert a collection of text documents to a matrix of token occurrences
It turns a collection of text documents into a cupyx.scipy.sparse matrix
holding token occurrence counts (or binary occurrence information),
possibly normalized as token frequencies if norm='l1' or projected on the
euclidean unit sphere if norm='l2'.
This text vectorizer implementation uses the hashing trick to find the
token string name to feature integer index mapping.
This strategy has several advantages:
- it is very low memory scalable to large datasets as there is no need to
store a vocabulary dictionary in memory which is even more important
as GPU's that are often memory constrained
- it is fast to pickle and un-pickle as it holds no state besides the
constructor parameters
- it can be used in a streaming (partial fit) or parallel pipeline as
there is no state computed during fit.
There are also a couple of cons (vs using a CountVectorizer with an
in-memory vocabulary):
- there is no way to compute the inverse transform (from feature indices
to string feature names) which can be a problem when trying to
introspect which features are most important to a model.
- there can be collisions: distinct tokens can be mapped to the same
feature index. However in practice this is rarely an issue if n_features
is large enough (e.g. 2 ** 18 for text classification problems).
- no IDF weighting as this would render the transformer stateful.
The hash function employed is the signed 32-bit version of Murmurhash3.
Parameters
----------
lowercase : bool, default=True
Convert all characters to lowercase before tokenizing.
preprocessor : callable or None (default)
Override the preprocessing (string transformation) stage while
preserving the tokenizing and n-grams generation steps.
stop_words : string {'english'}, list, default=None
If 'english', a built-in stop word list for English is used.
There are several known issues with 'english' and you should
consider an alternative.
If a list, that list is assumed to contain stop words, all of which
will be removed from the resulting tokens.
Only applies if ``analyzer == 'word'``.
ngram_range : tuple (min_n, max_n), default=(1, 1)
The lower and upper boundary of the range of n-values for different
word n-grams or char n-grams to be extracted. All values of n such
such that min_n <= n <= max_n will be used. For example an
``ngram_range`` of ``(1, 1)`` means only unigrams, ``(1, 2)`` means
unigrams and bigrams, and ``(2, 2)`` means only bigrams.
analyzer : string, {'word', 'char', 'char_wb'}
Whether the feature should be made of word n-gram or character
n-grams.
Option 'char_wb' creates character n-grams only from text inside
word boundaries; n-grams at the edges of words are padded with space.
n_features : int, default=(2 ** 20)
The number of features (columns) in the output matrices. Small numbers
of features are likely to cause hash collisions, but large numbers
will cause larger coefficient dimensions in linear learners.
binary : bool, default=False.
If True, all non zero counts are set to 1. This is useful for discrete
probabilistic models that model binary events rather than integer
counts.
norm : {'l1', 'l2'}, default='l2'
Norm used to normalize term vectors. None for no normalization.
alternate_sign : bool, default=True
When True, an alternating sign is added to the features as to
approximately conserve the inner product in the hashed space even for
small n_features. This approach is similar to sparse random projection.
dtype : type, optional
Type of the matrix returned by fit_transform() or transform().
delimiter : str, whitespace by default
String used as a replacement for stop words if `stop_words` is not
None. Typically the delimiting character between words is a good
choice.
Examples
--------
.. code-block:: python
from cuml.feature_extraction.text import HashingVectorizer
corpus = [
'This is the first document.',
'This document is the second document.',
'And this is the third one.',
'Is this the first document?',
]
vectorizer = HashingVectorizer(n_features=2**4)
X = vectorizer.fit_transform(corpus)
print(X.shape)
Output:
.. code-block:: python
(4, 16)
See Also
--------
CountVectorizer, TfidfVectorizer
"""
def __init__(
self,
input=None,
encoding=None,
decode_error=None,
strip_accents=None,
lowercase=True,
preprocessor=None,
tokenizer=None,
stop_words=None,
token_pattern=None,
ngram_range=(1, 1),
analyzer="word",
n_features=(2 ** 20),
binary=False,
norm="l2",
alternate_sign=True,
dtype=cp.float32,
delimiter=" ",
):
self.preprocessor = preprocessor
self.analyzer = analyzer
self.lowercase = lowercase
self.stop_words = stop_words
self.n_features = n_features
self.ngram_range = ngram_range
self.binary = binary
self.norm = norm
self.alternate_sign = alternate_sign
self.dtype = dtype
self.delimiter = delimiter
if dtype not in CUPY_SPARSE_DTYPES:
msg = f"Expected dtype in {CUPY_SPARSE_DTYPES}, got {dtype}"
raise ValueError(msg)
if self.norm not in ("l1", "l2", None):
raise ValueError(f"{self.norm} is not a supported norm")
sklearn_params = {
"input": input,
"encoding": encoding,
"decode_error": decode_error,
"strip_accents": strip_accents,
"tokenizer": tokenizer,
"token_pattern": token_pattern,
}
self._check_sklearn_params(analyzer, sklearn_params)
def partial_fit(self, X, y=None):
"""
Does nothing: This transformer is stateless
This method is just there to mark the fact that this transformer
can work in a streaming setup.
Parameters
----------
X : cudf.Series(A Series of string documents).
"""
return self
def fit(self, X, y=None):
"""
This method only checks the input type and the model parameter.
It does not do anything meaningful as this transformer is stateless
Parameters
----------
X : cudf.Series
A Series of string documents
"""
if not (
isinstance(X, cudf.Series)
and isinstance(X._column, cudf.core.column.StringColumn)
):
raise ValueError(f"cudf.Series([str]) expected ,got {type(X)}")
self._warn_for_unused_params()
self._validate_params()
return self
def _preprocess(self, raw_documents):
preprocess = self.build_preprocessor()
return preprocess(raw_documents)
def _count_hash(self, tokenized_df):
"""
Count occurrences of tokens in each document.
"""
# Transform string tokens into token indexes from 0 to n_features
tokenized_df["token"] = tokenized_df["token"].hash_values()
if self.alternate_sign:
# below logic is equivalent to: value *= ((h >= 0) * 2) - 1
tokenized_df["value"] = ((tokenized_df["token"] >= 0) * 2) - 1
tokenized_df["token"] = tokenized_df["token"].abs() %\
self.n_features
count_ser = tokenized_df.groupby(["doc_id", "token"],
sort=True).value.sum()
count_ser.name = "count"
else:
tokenized_df["token"] = tokenized_df["token"].abs() %\
self.n_features
count_ser = tokenized_df.groupby(["doc_id", "token"],
sort=True).size()
count_ser.name = "count"
count_df = count_ser.reset_index(drop=False)
del count_ser, tokenized_df
return count_df
def fit_transform(self, X, y=None):
"""
Transform a sequence of documents to a document-term matrix.
Parameters
----------
X : iterable over raw text documents, length = n_samples
Samples. Each sample must be a text document (either bytes or
unicode strings, file name or file object depending on the
constructor argument) which will be tokenized and hashed.
y : any
Ignored. This parameter exists only for compatibility with
sklearn.pipeline.Pipeline.
Returns
-------
X : sparse CuPy CSR matrix of shape (n_samples, n_features)
Document-term matrix.
"""
return self.fit(X, y).transform(X)
def transform(self, raw_documents):
"""
Transform documents to document-term matrix.
Extract token counts out of raw text documents using the vocabulary
fitted with fit or the one provided to the constructor.
Parameters
----------
raw_documents : cudf.Series
A Series of string documents
Returns
-------
X : sparse CuPy CSR matrix of shape (n_samples, n_features)
Document-term matrix.
"""
docs = self._preprocess(raw_documents)
del raw_documents
n_doc = len(docs)
tokenized_df = self._create_tokenized_df(docs)
del docs
count_df = self._count_hash(tokenized_df)
del tokenized_df
empty_doc_ids = self._compute_empty_doc_ids(count_df, n_doc)
X = create_csr_matrix_from_count_df(
count_df, empty_doc_ids, n_doc, self.n_features,
dtype=self.dtype
)
if self.binary:
X.data.fill(1)
if self.norm:
if self.norm == "l1":
csr_row_normalize_l1(X, inplace=True)
elif self.norm == "l2":
csr_row_normalize_l2(X, inplace=True)
return X
|
from multiprocessing import Value
from random import choice
from chillow.service.ai.pathfinding_ai import PathfindingAI
from chillow.service.ai.search_tree_ai import SearchTreeAI
from chillow.model.action import Action
from chillow.model.game import Game
from chillow.model.player import Player
from chillow.service.game_service import GameService
class SearchTreePathfindingAI(PathfindingAI, SearchTreeAI):
"""This AI combines the SearchTreeAI and the PathfindingAI by favoring the former.
Therefore it finds all actions that let the player survive the next rounds by using the SearchTreeAI and
afterwards lets the PathfindingAI check which of these is the best action to perform.
Attributes:
player: The player associated with this AI.
"""
def __init__(self, player: Player, max_speed: int, count_paths_to_check: int, depth: int,
distance_to_check: int = 0):
"""Creates a new object of the SearchTreePathfindingAI.
Args:
player: The player assigned to the AI.
max_speed: The maximum speed the AI can reach.
count_paths_to_check: The number of paths used to avoid dead ends.
depth: Number of pre-calculating actions.
distance_to_check:
Distance an enemy player is allowed to be at maximum distance, so that he is taken into
account in the calculations.
"""
PathfindingAI.__init__(self, player, max_speed, count_paths_to_check)
SearchTreeAI.__init__(self, player, depth, max_speed, distance_to_check=distance_to_check)
def get_information(self) -> str:
"""See base class."""
return "max_speed=" + str(self._max_speed) \
+ ", count_paths_to_check=" + str(self._get_count_paths_to_check()) \
+ ", depth=" + str(self._get_depth()) \
+ ", distance_to_check=" + str(self._get_distance_to_check())
def create_next_action(self, game: Game, return_value: Value):
"""See base class."""
self._turn_ctr += 1
surviving_actions = self.create_all_next_surviving_actions(game)
if surviving_actions is not None and len(surviving_actions) > 0:
return_value.value = choice(surviving_actions).get_index()
return_value.value = self.find_actions_by_best_path_connection(surviving_actions, game)[0][0].get_index()
else:
surviving_pathfinding_actions = self.find_actions_by_best_path_connection(
self.find_surviving_actions(GameService(game), 1), game)
return_value.value = surviving_pathfinding_actions[0][0].get_index() \
if surviving_pathfinding_actions is not None and len(surviving_pathfinding_actions) > 0 \
else Action.get_default().get_index()
|
'''
This is a extended unittest module for Kivy, to make unittests based on
graphics with an OpenGL context.
The idea is to render a Widget tree, and after 1, 2 or more frames, a
screenshot will be made and be compared to the original one.
If no screenshot exists for the current test, the very first one will be used.
The screenshots live in the 'kivy/tests/results' folder and are in PNG format,
320x240 pixels.
'''
__all__ = ('GraphicUnitTest', 'UnitTestTouch', 'UTMotionEvent', 'async_run')
import unittest
import logging
import pytest
import sys
import os
import threading
from kivy.graphics.cgl import cgl_get_backend_name
from kivy.input.motionevent import MotionEvent
log = logging.getLogger('unittest')
_base = object
if 'mock' != cgl_get_backend_name():
# check what the gl backend might be, we can't know for sure
# what it'll be untill actually initialized by the window.
_base = unittest.TestCase
make_screenshots = os.environ.get('KIVY_UNITTEST_SCREENSHOTS')
http_server = None
http_server_ready = threading.Event()
kivy_eventloop = os.environ.get('KIVY_EVENTLOOP', 'asyncio')
def ensure_web_server():
if http_server is not None:
return True
def _start_web_server():
global http_server
try:
from SimpleHTTPServer import SimpleHTTPRequestHandler
from SocketServer import TCPServer
except ImportError:
from http.server import SimpleHTTPRequestHandler
from socketserver import TCPServer
try:
handler = SimpleHTTPRequestHandler
handler.directory = os.path.join(
os.path.dirname(__file__), "..", "..")
http_server = TCPServer(
("", 8000), handler, bind_and_activate=False)
http_server.daemon_threads = True
http_server.allow_reuse_address = True
http_server.server_bind()
http_server.server_activate()
http_server_ready.set()
http_server.serve_forever()
except:
import traceback
traceback.print_exc()
finally:
http_server = None
http_server_ready.set()
th = threading.Thread(target=_start_web_server)
th.daemon = True
th.start()
http_server_ready.wait()
if http_server is None:
raise Exception("Unable to start webserver")
class GraphicUnitTest(_base):
framecount = 0
def _force_refresh(self, *largs):
# this prevent in some case to be stuck if the screen doesn't refresh
# and we wait for a number of self.framecount that never goes down
from kivy.base import EventLoop
win = EventLoop.window
if win and win.canvas:
win.canvas.ask_update()
def render(self, root, framecount=1):
'''Call rendering process using the `root` widget.
The screenshot will be done in `framecount` frames.
'''
from kivy.base import runTouchApp
from kivy.clock import Clock
self.framecount = framecount
try:
Clock.schedule_interval(self._force_refresh, 1)
runTouchApp(root)
finally:
Clock.unschedule(self._force_refresh)
# reset for the next test, but nobody will know if it will be used :/
if self.test_counter != 0:
self.tearDown(fake=True)
self.setUp()
def run(self, *args, **kwargs):
'''Extend the run of unittest, to check if results directory have been
found. If no results directory exists, the test will be ignored.
'''
from os.path import join, dirname, exists
results_dir = join(dirname(__file__), 'results')
if make_screenshots and not exists(results_dir):
log.warning('No result directory found, cancel test.')
os.mkdir(results_dir)
self.test_counter = 0
self.results_dir = results_dir
self.test_failed = False
return super(GraphicUnitTest, self).run(*args, **kwargs)
def setUp(self):
'''Prepare the graphic test, with:
- Window size fixed to 320x240
- Default kivy configuration
- Without any kivy input
'''
# use default kivy configuration (don't load user file.)
from os import environ
environ['KIVY_USE_DEFAULTCONFIG'] = '1'
# force window size + remove all inputs
from kivy.config import Config
Config.set('graphics', 'width', '320')
Config.set('graphics', 'height', '240')
for items in Config.items('input'):
Config.remove_option('input', items[0])
# bind ourself for the later screenshot
from kivy.core.window import Window
self.Window = Window
Window.bind(on_flip=self.on_window_flip)
# ensure our window is correctly created
Window.create_window()
Window.register()
Window.initialized = True
Window.canvas.clear()
Window.close = lambda *s: True
def on_window_flip(self, window):
'''Internal method to be called when the window have just displayed an
image.
When an image is showed, we decrement our framecount. If framecount is
come to 0, we are taking the screenshot.
The screenshot is done in a temporary place, and is compared to the
original one -> test ok/ko.
If no screenshot is available in the results directory, a new one will
be created.
'''
from kivy.base import EventLoop
from tempfile import mkstemp
from os.path import join, exists
from os import unlink, close
from shutil import move, copy
# don't save screenshot until we have enough frames.
# log.debug('framecount %d' % self.framecount)
# ! check if there is 'framecount', otherwise just
# ! assume zero e.g. if handling runTouchApp manually
self.framecount = getattr(self, 'framecount', 0) - 1
if self.framecount > 0:
return
# don't create screenshots if not requested manually
if not make_screenshots:
EventLoop.stop()
return
reffn = None
match = False
try:
# just get a temporary name
fd, tmpfn = mkstemp(suffix='.png', prefix='kivyunit-')
close(fd)
unlink(tmpfn)
# get a filename for the current unit test
self.test_counter += 1
test_uid = '%s-%d.png' % (
'_'.join(self.id().split('.')[-2:]),
self.test_counter)
# capture the screen
log.info('Capturing screenshot for %s' % test_uid)
tmpfn = window.screenshot(tmpfn)
log.info('Capture saved at %s' % tmpfn)
# search the file to compare to
reffn = join(self.results_dir, test_uid)
log.info('Compare with %s' % reffn)
# get sourcecode
import inspect
frame = inspect.getouterframes(inspect.currentframe())[6]
sourcecodetab, line = inspect.getsourcelines(frame[0])
line = frame[2] - line
currentline = sourcecodetab[line]
sourcecodetab[line] = '<span style="color: red;">%s</span>' % (
currentline)
sourcecode = ''.join(sourcecodetab)
sourcecodetab[line] = '>>>>>>>>\n%s<<<<<<<<\n' % currentline
sourcecodeask = ''.join(sourcecodetab)
if not exists(reffn):
log.info('No image reference, move %s as ref ?' % test_uid)
if self.interactive_ask_ref(sourcecodeask, tmpfn, self.id()):
move(tmpfn, reffn)
tmpfn = reffn
log.info('Image used as reference')
match = True
else:
log.info('Image discarded')
else:
from kivy.core.image import Image as CoreImage
s1 = CoreImage(tmpfn, keep_data=True)
sd1 = s1.image._data[0].data
s2 = CoreImage(reffn, keep_data=True)
sd2 = s2.image._data[0].data
if sd1 != sd2:
log.critical(
'%s at render() #%d, images are different.' % (
self.id(), self.test_counter))
if self.interactive_ask_diff(sourcecodeask,
tmpfn, reffn, self.id()):
log.critical('user ask to use it as ref.')
move(tmpfn, reffn)
tmpfn = reffn
match = True
else:
self.test_failed = True
else:
match = True
# generate html
from os.path import join, dirname, exists, basename
from os import mkdir
build_dir = join(dirname(__file__), 'build')
if not exists(build_dir):
mkdir(build_dir)
copy(reffn, join(build_dir, 'ref_%s' % basename(reffn)))
if tmpfn != reffn:
copy(tmpfn, join(build_dir, 'test_%s' % basename(reffn)))
with open(join(build_dir, 'index.html'), 'at') as fd:
color = '#ffdddd' if not match else '#ffffff'
fd.write('<div style="background-color: %s">' % color)
fd.write('<h2>%s #%d</h2>' % (self.id(), self.test_counter))
fd.write('<table><tr><th>Reference</th>'
'<th>Test</th>'
'<th>Comment</th>')
fd.write('<tr><td><img src="ref_%s"/></td>' %
basename(reffn))
if tmpfn != reffn:
fd.write('<td><img src="test_%s"/></td>' %
basename(reffn))
else:
fd.write('<td>First time, no comparison.</td>')
fd.write('<td><pre>%s</pre></td>' % sourcecode)
fd.write('</table></div>')
finally:
try:
if reffn != tmpfn:
unlink(tmpfn)
except:
pass
EventLoop.stop()
def tearDown(self, fake=False):
'''When the test is finished, stop the application, and unbind our
current flip callback.
'''
from kivy.base import stopTouchApp
from kivy.core.window import Window
Window.unbind(on_flip=self.on_window_flip)
stopTouchApp()
if not fake and self.test_failed:
self.assertTrue(False)
super(GraphicUnitTest, self).tearDown()
def interactive_ask_ref(self, code, imagefn, testid):
from os import environ
if 'UNITTEST_INTERACTIVE' not in environ:
return True
from tkinter import Tk, Label, LEFT, RIGHT, BOTTOM, Button
from PIL import Image, ImageTk
self.retval = False
root = Tk()
def do_close():
root.destroy()
def do_yes():
self.retval = True
do_close()
image = Image.open(imagefn)
photo = ImageTk.PhotoImage(image)
Label(root, text='The test %s\nhave no reference.' % testid).pack()
Label(root, text='Use this image as a reference ?').pack()
Label(root, text=code, justify=LEFT).pack(side=RIGHT)
Label(root, image=photo).pack(side=LEFT)
Button(root, text='Use as reference', command=do_yes).pack(side=BOTTOM)
Button(root, text='Discard', command=do_close).pack(side=BOTTOM)
root.mainloop()
return self.retval
def interactive_ask_diff(self, code, tmpfn, reffn, testid):
from os import environ
if 'UNITTEST_INTERACTIVE' not in environ:
return False
from tkinter import Tk, Label, LEFT, RIGHT, BOTTOM, Button
from PIL import Image, ImageTk
self.retval = False
root = Tk()
def do_close():
root.destroy()
def do_yes():
self.retval = True
do_close()
phototmp = ImageTk.PhotoImage(Image.open(tmpfn))
photoref = ImageTk.PhotoImage(Image.open(reffn))
Label(root, text='The test %s\nhave generated an different'
'image as the reference one..' % testid).pack()
Label(root, text='Which one is good ?').pack()
Label(root, text=code, justify=LEFT).pack(side=RIGHT)
Label(root, image=phototmp).pack(side=RIGHT)
Label(root, image=photoref).pack(side=LEFT)
Button(root, text='Use the new image -->',
command=do_yes).pack(side=BOTTOM)
Button(root, text='<-- Use the reference',
command=do_close).pack(side=BOTTOM)
root.mainloop()
return self.retval
def advance_frames(self, count):
'''Render the new frames and:
* tick the Clock
* dispatch input from all registered providers
* flush all the canvas operations
* redraw Window canvas if necessary
'''
from kivy.base import EventLoop
for i in range(count):
EventLoop.idle()
class UnitTestTouch(MotionEvent):
'''Custom MotionEvent representing a single touch. Similar to `on_touch_*`
methods from the Widget class, this one introduces:
* touch_down
* touch_move
* touch_up
Create a new touch with::
touch = UnitTestTouch(x, y)
then you press it on the default position with::
touch.touch_down()
or move it or even release with these simple calls::
touch.touch_move(new_x, new_y)
touch.touch_up()
'''
def __init__(self, x, y):
'''Create a MotionEvent instance with X and Y of the first
position a touch is at.
'''
from kivy.base import EventLoop
self.eventloop = EventLoop
win = EventLoop.window
super(UnitTestTouch, self).__init__(
# device, (tuio) id, args
self.__class__.__name__, 99, {
"x": x / float(win.width),
"y": y / float(win.height),
}
)
def touch_down(self, *args):
self.eventloop.post_dispatch_input("begin", self)
def touch_move(self, x, y):
win = self.eventloop.window
self.move({
"x": x / float(win.width),
"y": y / float(win.height)
})
self.eventloop.post_dispatch_input("update", self)
def touch_up(self, *args):
self.eventloop.post_dispatch_input("end", self)
def depack(self, args):
# set MotionEvent to touch
self.is_touch = True
# set sx/sy properties to ratio (e.g. X / win.width)
self.sx = args['x']
self.sy = args['y']
# set profile to accept x, y and pos properties
self.profile = ['pos']
# run depack after we set the values
super(UnitTestTouch, self).depack(args)
class UTMotionEvent(MotionEvent):
def depack(self, args):
self.is_touch = True
self.sx = args['x']
self.sy = args['y']
self.profile = ['pos']
super(UTMotionEvent, self).depack(args)
def async_run(func=None, app_cls_func=None):
def inner_func(func):
if 'mock' == cgl_get_backend_name():
return pytest.mark.skip(
reason='Skipping because gl backend is set to mock')(func)
if sys.version_info[0] < 3 or sys.version_info[1] <= 5:
return pytest.mark.skip(
reason='Skipping because graphics tests are not supported on '
'py3.5, only on py3.6+')(func)
if app_cls_func is not None:
func = pytest.mark.parametrize(
"kivy_app", [[app_cls_func], ], indirect=True)(func)
if kivy_eventloop == 'asyncio':
try:
import pytest_asyncio
return pytest.mark.asyncio(func)
except ImportError:
return pytest.mark.skip(
reason='KIVY_EVENTLOOP == "asyncio" but '
'"pytest-asyncio" is not installed')(func)
elif kivy_eventloop == 'trio':
try:
import trio
from pytest_trio import trio_fixture
func._force_trio_fixture = True
return func
except ImportError:
return pytest.mark.skip(
reason='KIVY_EVENTLOOP == "trio" but '
'"pytest-trio" is not installed')(func)
else:
return pytest.mark.skip(
reason='KIVY_EVENTLOOP must be set to either of "asyncio" or '
'"trio" to run async tests')(func)
if func is None:
return inner_func
return inner_func(func)
|
"""A Minecraft remapper for already deobfuscated forge mod source code."""
__version__ = "1.1.0"
from minecraft_remapper.remapper import Remapper as Remapper
|
# -*- coding: utf-8 -*-
# Generated by Django 1.9.6 on 2016-10-13 15:51
from __future__ import unicode_literals
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
('payments', '0002_auto_20160718_2345'),
]
operations = [
migrations.CreateModel(
name='VitepayPayment',
fields=[
('payment_ptr', models.OneToOneField(auto_created=True, on_delete=django.db.models.deletion.CASCADE, parent_link=True, primary_key=True, serialize=False, to='payments.Payment')),
('language_code', models.CharField(default=b'en', max_length=10)),
('currency_code', models.CharField(default=b'XOF', max_length=10)),
('country_code', models.CharField(default=b'ML', max_length=10)),
('order_id', models.CharField(max_length=10, null=True)),
('description', models.CharField(max_length=500, null=True)),
('amount_100', models.IntegerField(null=True)),
('buyer_ip_adress', models.CharField(max_length=200, null=True)),
('return_url', models.CharField(max_length=500, null=True)),
('decline_url', models.CharField(max_length=500, null=True)),
('cancel_url', models.CharField(max_length=500, null=True)),
('callback_url', models.CharField(max_length=500, null=True)),
('email', models.CharField(max_length=500, null=True)),
('p_type', models.CharField(default=b'orange_money', max_length=500)),
('payment_url', models.CharField(max_length=500, null=True)),
],
options={
'ordering': ('-created', '-updated'),
'verbose_name': 'Vitepay Payment',
'verbose_name_plural': 'Vitepay Payments',
},
bases=('payments.payment',),
),
]
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.