text
stringlengths 2
999k
|
|---|
from engine import *
chingyatsikpingwu = []
chingyatsikpingwu.append(SimpleTile(2, 'Man'))
chingyatsikpingwu.append(SimpleTile(3, 'Man'))
chingyatsikpingwu.append(SimpleTile(4, 'Man'))
chingyatsikpingwu.append(SimpleTile(1, 'Man'))
chingyatsikpingwu.append(SimpleTile(2, 'Man'))
chingyatsikpingwu.append(SimpleTile(3, 'Man'))
chingyatsikpingwu.append(SimpleTile(4, 'Man'))
chingyatsikpingwu.append(SimpleTile(5, 'Man'))
chingyatsikpingwu.append(SimpleTile(6, 'Man'))
chingyatsikpingwu.append(SimpleTile(2, 'Man'))
chingyatsikpingwu.append(SimpleTile(3, 'Man'))
chingyatsikpingwu.append(SimpleTile(4, 'Man'))
chingyatsikpingwu.append(SimpleTile(9, 'Man'))
chingyatsikpingwu.append(SimpleTile(9, 'Man'))
supcharmyiu = []
supcharmyiu.append(SimpleTile(1, 'Man'))
supcharmyiu.append(SimpleTile(1, 'Bamboo'))
supcharmyiu.append(SimpleTile(1, 'Circle'))
supcharmyiu.append(SimpleTile(9, 'Man'))
supcharmyiu.append(SimpleTile(9, 'Bamboo'))
supcharmyiu.append(SimpleTile(9, 'Circle'))
supcharmyiu.append(HonorTile.from_str('g'))
supcharmyiu.append(HonorTile.from_str('r'))
supcharmyiu.append(HonorTile.from_str('wh'))
supcharmyiu.append(HonorTile.from_str('e'))
supcharmyiu.append(HonorTile.from_str('s'))
supcharmyiu.append(HonorTile.from_str('we'))
supcharmyiu.append(HonorTile.from_str('n'))
supcharmyiu.append(HonorTile.from_str('n'))
#print(supcharmyiu)
asetoftiles = FanCalculator()
asetoftiles.tiles = chingyatsikpingwu
print(asetoftiles.tiles)
print()
print(asetoftiles.all_com())
eatable, legit_hands = asetoftiles.legitimate_hands()
if eatable:
print('It is a legitimate hand and there is(are) {} possible hand(s)!\n'.format(len(legit_hands)))
for legit_hand in legit_hands:
fan, reasons = asetoftiles.handtype_fan_calculator(legit_hand)
print('Total {} Fan\n'.format(fan))
print("The reasons are:")
for reason in reasons:
print(reason)
else:
print('There is no legitimate hand!')
|
# Generated by Django 3.1.3 on 2020-12-09 01:27
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('soulcalibur_vi', '0019_auto_20201205_0415'),
]
operations = [
migrations.AlterField(
model_name='move',
name='attack_name',
field=models.CharField(blank=True, max_length=100, null=True),
),
migrations.AlterField(
model_name='move',
name='command',
field=models.CharField(max_length=100),
),
]
|
"""
Django settings for beekeeper project on Heroku. For more info, see:
https://github.com/heroku/heroku-django-template
For more information on this file, see
https://docs.djangoproject.com/en/1.11/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/1.11/ref/settings/
"""
import os
import dj_database_url
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
PROJECT_ROOT = os.path.dirname(os.path.abspath(__file__))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/1.11/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = os.environ['DJ_SECRET_KEY']
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = os.getenv('DJ_DEBUG',False)
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
# Disable Django's own staticfiles handling in favour of WhiteNoise, for
# greater consistency between gunicorn and `./manage.py runserver`. See:
# http://whitenoise.evans.io/en/stable/django.html#using-whitenoise-in-development
'whitenoise.runserver_nostatic',
'django.contrib.staticfiles',
'beekeeper'
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'whitenoise.middleware.WhiteNoiseMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'beekeeper.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
'debug': DEBUG,
},
},
]
WSGI_APPLICATION = 'beekeeper.wsgi.application'
# Database
# https://docs.djangoproject.com/en/1.11/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/1.11/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'US/Eastern'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Change 'default' database configuration with $DATABASE_URL.
DATABASES['default'].update(dj_database_url.config(conn_max_age=500))
# Honor the 'X-Forwarded-Proto' header for request.is_secure()
SECURE_PROXY_SSL_HEADER = ('HTTP_X_FORWARDED_PROTO', 'https')
# Allow all host headers
ALLOWED_HOSTS = ['*']
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.11/howto/static-files/
STATIC_ROOT = os.path.join(PROJECT_ROOT, 'staticfiles')
STATIC_URL = '/static/'
# Extra places for collectstatic to find static files.
STATICFILES_DIRS = [
os.path.join(PROJECT_ROOT, 'static'),
]
# Simplified static file serving.
# https://warehouse.python.org/project/whitenoise/
STATICFILES_STORAGE = 'whitenoise.storage.CompressedManifestStaticFilesStorage'
|
from maza.core.exploit import *
from maza.core.telnet.telnet_client import TelnetClient
from maza.resources import wordlists
class Exploit(TelnetClient):
__info__ = {
"name": "Telnet Default Creds",
"description": "Module performs dictionary attack with default credentials against Telnet service. "
"If valid credentials are found, they are displayed to the user.",
"authors": (
"Marcin Bury <marcin[at]threat9.com>", # routersploit module
),
"devices": (
"Multiple devices",
)
}
target = OptIP("", "Target IPv4, IPv6 address or file with ip:port (file://)")
port = OptPort(23, "Target Telnet port")
threads = OptInteger(8, "Number of threads")
defaults = OptWordlist(wordlists.defaults, "User:Pass or file with default credentials (file://)")
stop_on_success = OptBool(True, "Stop on first valid authentication attempt")
verbosity = OptBool(True, "Display authentication attempts")
def run(self):
self.credentials = []
self.attack()
@multi
def attack(self):
if not self.check():
return
print_status("Starting default credentials attack against Telnet service")
data = LockedIterator(self.defaults)
self.run_threads(self.threads, self.target_function, data)
if self.credentials:
print_success("Credentials found!")
headers = ("Target", "Port", "Service", "Username", "Password")
print_table(headers, *self.credentials)
else:
print_error("Credentials not found")
def target_function(self, running, data):
while running.is_set():
try:
username, password = data.next().split(":")
telnet_client = self.telnet_create()
if telnet_client.login(username, password, retries=3):
if self.stop_on_success:
running.clear()
self.credentials.append((self.target, self.port, self.target_protocol, username, password))
telnet_client.close()
except StopIteration:
break
def check(self):
telnet_client = self.telnet_create()
if telnet_client.test_connect():
print_status("Target exposes Telnet service", verbose=self.verbosity)
return True
print_status("Target does not expose Telnet service", verbose=self.verbosity)
return False
@mute
def check_default(self):
if self.check():
self.credentials = []
data = LockedIterator(self.defaults)
self.run_threads(self.threads, self.target_function, data)
if self.credentials:
return self.credentials
return None
|
# Copyright (c) 2016 Universidade Federal Fluminense (UFF)
# Copyright (c) 2016 Polytechnic Institute of New York University.
# This file is part of noWorkflow.
# Please, consult the license terms in the LICENSE file.
"""Trial Dot Object"""
from __future__ import (absolute_import, print_function,
division, unicode_literals)
import weakref
from future.utils import viewitems
from .base import Model
from . import FileAccess
from .graphs.dependency_graph import DependencyFilter, variable_id
from .graphs.dependency_graph import DotVisitor
CALL_SCHEMA = "#3A85B9", "box", "white", "filled"
VAR_SCHEMA = "#85CBD0", "box", "black", "rounded,filled"
FILE_SCHEMA = "white", "box", "black", "rounded,filled"
BLACKBOX_SCHEMA = "black", "box", "grey", "filled"
GRAYBOX_SCHEMA = "grey", "box", "black", "filled"
IMPORT_SCHEMA = "#1B2881", "box", "#7AC5F9", "filled"
TYPES = {
"call": CALL_SCHEMA,
"normal": VAR_SCHEMA,
"virtual": VAR_SCHEMA,
"param": VAR_SCHEMA,
"import": IMPORT_SCHEMA,
"--blackbox--": BLACKBOX_SCHEMA,
"--graybox--": GRAYBOX_SCHEMA,
"access": FILE_SCHEMA,
}
class TrialDot(Model): # pylint: disable=too-many-instance-attributes
"""Handle Dot export"""
__modelname__ = "TrialDot"
def __init__(self, trial):
super(TrialDot, self).__init__()
self.trial = weakref.proxy(trial)
self.format = "svg"
self.value_length = 0
self.name_length = 55
self.fallback = None
self.run = True
def simulation(self):
"""Configure simulation graph"""
self.fallback = None
def prospective(self):
"""Configure prospective graph"""
self.fallback = None
def dependency(self):
"""Configure dependency graph"""
self.fallback = VAR_SCHEMA
def export_text(self):
"""Export facts from trial as text"""
dep_filter = self.trial.dependency_filter
if self.run:
dep_filter.run()
getattr(self, self.trial.dependency_config.mode)()
visitor = DotVisitor(self.fallback, self.name_length,
self.value_length, TYPES, dep_filter)
visitor.visit(dep_filter.main_cluster)
return "\n".join(visitor.result)
def _repr_svg_(self):
if self.format == "svg":
ipython = get_ipython()
return ipython.run_cell_magic(
"dot", "--format {}".format(self.format), self.export_text()
)
def _repr_png_(self):
if self.format == "png":
ipython = get_ipython()
return ipython.run_cell_magic(
"dot", "--format {}".format(self.format), self.export_text()
)
|
import pytest
import datetime
#from datetime import datetime
from freezerstate.statusupdate import StatusUpdate
@pytest.fixture
def statusobj():
obj = StatusUpdate(True, '8:00,9:30,21:00,26:99')
return obj
def test_update_initialization(statusobj):
assert len(statusobj.notification_times) == 3
def test_should_notify_should_be_false_if_time_not_in_list(statusobj):
test_time = datetime.time(10,30)
result = statusobj.should_notify(test_time)
assert result is False
def test_should_notify_should_be_true_if_time_is_in_list(statusobj):
test_time = datetime.time(9, 30, 0)
result = statusobj.should_notify(test_time)
assert result is True
def test_should_notify_should_be_true_if_now_is_in_list(statusobj):
test_time = datetime.datetime.now()
result = statusobj.should_notify(test_time)
assert result is False
|
import argparse
import asyncio
import html
import json
import logging
import os
import textwrap
import time
import xmltodict
from aiohttp import ClientSession, ClientConnectorError, ServerDisconnectedError, ContentTypeError
from articlemeta.client import RestfulClient
from datetime import datetime
from json import JSONDecodeError
from pyexpat import ExpatError
from pymongo import errors, MongoClient, uri_parser
from utils.string_processor import preprocess_author_name, preprocess_doi, preprocess_journal_title
from xylose.scielodocument import Article, Citation
DIR_DATA = os.environ.get('DIR_DATA', '/opt/data')
MONGO_STDCITS_COLLECTION = os.environ.get('MONGO_STDCITS_COLLECTION', 'standardized')
CROSSREF_URL_WORKS = os.environ.get('CROSSREF_URL_WORKS', 'https://api.crossref.org/works/{}')
CROSSREF_URL_OPENURL = os.environ.get('CROSSREF_URL_OPENURL', 'https://doi.crossref.org/openurl?')
CROSSREF_SEMAPHORE_LIMIT = int(os.environ.get('CROSSREF_SEMAPHORE_LIMIT', '20'))
class CrossrefAsyncCollector(object):
logging.basicConfig(level=logging.INFO)
def __init__(self, email: None, mongo_uri_std_cits=None):
self.email = email
if mongo_uri_std_cits:
try:
self.persist_mode = 'mongo'
mongo_col = uri_parser.parse_uri(mongo_uri_std_cits).get('collection')
if not mongo_col:
mongo_col = MONGO_STDCITS_COLLECTION
self.standardizer = MongoClient(mongo_uri_std_cits).get_database().get_collection(mongo_col)
total_docs = self.standardizer.count_documents({})
logging.info('There are {0} documents in the collection {1}'.format(total_docs, mongo_col))
except ConnectionError as e:
logging.error('ConnectionError %s' % mongo_uri_std_cits)
logging.error(e)
else:
self.persist_mode = 'json'
file_name_results = 'crossref-results-' + str(time.time()) + '.json'
self.path_results = os.path.join(DIR_DATA, file_name_results)
def extract_attrs(self, article: Article):
"""
Extrai os atributos de todas as referências citadas de um documento.
:param article: documento do qual serão extraídos os atributos das referências citadas
:return: dicionário de ids de citações e respectivos atributos
"""
cit_id_to_attrs = {}
if article.citations:
for cit in article.citations:
if cit.publication_type == 'article':
cit_id = self.mount_id(cit, article.collection_acronym)
cit_attrs = {}
if self.persist_mode == 'json':
cit_attrs = self._extract_cit_attrs(cit)
elif self.persist_mode == 'mongo':
cit_data = self.standardizer.find_one({'_id': cit_id})
if not cit_data or not cit_data.get('crossref'):
cit_attrs = self._extract_cit_attrs(cit)
if cit_attrs:
cit_id_to_attrs[cit_id] = cit_attrs
return cit_id_to_attrs
def _extract_cit_attrs(self, cit: Citation):
"""
Extrai os atributos de uma referência citada necessários para requisitar metadados CrossRef.
:param cit: referência citada
:return: dicionário de atributos para consulta no serviço CrossRef
"""
if cit.doi:
valid_doi = preprocess_doi(cit.doi)
if valid_doi:
return {'doi': valid_doi}
attrs = {}
if cit.first_author:
first_author_surname = cit.first_author.get('surname', '')
cleaned_author_surname = preprocess_author_name(first_author_surname)
if cleaned_author_surname:
attrs.update({'aulast': cleaned_author_surname})
journal_title = cit.source
if journal_title:
cleaned_journal_title = preprocess_journal_title(journal_title)
if cleaned_journal_title:
attrs.update({'title': cleaned_journal_title})
publication_date = html.unescape(cit.publication_date) if cit.publication_date else None
if publication_date and len(publication_date) >= 4:
publication_year = publication_date[:4]
if publication_year.isdigit():
attrs.update({'data': publication_year})
volume = html.unescape(cit.volume) if cit.volume else None
if volume:
attrs.update({'volume': volume})
issue = html.unescape(cit.issue) if cit.issue else None
if issue:
attrs.update({'issue': issue})
first_page = html.unescape(cit.first_page) if cit.first_page else None
if first_page:
attrs.update({'spage': first_page})
if attrs:
return attrs
def parse_crossref_openurl_result(self, text):
"""
Converte response.text para JSON com metadados obtidos do endpoint OPENURL.
:param response: resposta de requisição em formato de texto
:return: JSON com metadados obtidos do serviço CrossRef
"""
try:
raw = xmltodict.parse(text)
for v in raw.get('doi_records', {}).values():
metadata = v.get('crossref')
if metadata and 'error' not in metadata.keys():
owner = v.get('@owner')
if owner:
metadata.update({'owner': owner})
timestamp = v.get('@timestamp')
if timestamp:
metadata.update({'timestamp': timestamp})
journal_article = metadata.get('journal', {}).get('journal_article', {})
if 'citation_list' in journal_article:
journal_article.__delitem__('citation_list')
return metadata
except ExpatError as e:
logging.warning("ExpatError {0}".format(text))
logging.warning(e)
def parse_crossref_works_result(self, raw_metadata):
"""
Limpa dicionário de metadados obtidos do endpoint WORKS.
Remove campo de referências
:param raw_metadata: resposta de requisição em formato de dicionário
:return: JSON com metadados obtidos do serviço Crossref
"""
raw_status = raw_metadata.get('status', '')
if raw_status == 'ok':
metadata = raw_metadata.get('message')
if metadata:
if 'reference' in metadata:
metadata.__delitem__('reference')
return metadata
def mount_id(self, cit: Citation, collection: str):
"""
Monta o identificador de uma referência citada.
:param cit: referência citada
:param collection: coleção em que a referência foi citada
:return: código identificador da citação
"""
cit_id = cit.data['v880'][0]['_']
return '{0}-{1}'.format(cit_id, collection)
def save_crossref_metadata(self, id_to_metadata: dict):
"""
Persiste os metadados da referência citada.
:param id_to_metadata: dicionário com id da referência citada e seus respectivos metadados Crossref
"""
if self.persist_mode == 'json':
with open(self.path_results, 'a') as f:
json.dump(id_to_metadata, f)
f.write('\n')
elif self.persist_mode == 'mongo':
self.standardizer.update_one(filter={'_id': id_to_metadata['_id']},
update={'$set': {
'crossref': id_to_metadata['crossref'],
'update-date': datetime.now().strftime('%Y-%m-%d')
}},
upsert=True)
async def run(self, citations_attrs: dict):
sem = asyncio.Semaphore(CROSSREF_SEMAPHORE_LIMIT)
tasks = []
async with ClientSession(headers={'mailto:': self.email}) as session:
for cit_id, attrs in citations_attrs.items():
if 'doi' in attrs:
url = CROSSREF_URL_WORKS.format(attrs['doi'])
mode = 'doi'
else:
url = CROSSREF_URL_OPENURL
for k, v in attrs.items():
if k != 'doi':
url += '&' + k + '=' + v
url += '&pid=' + self.email
url += '&format=unixref'
url += '&multihit=false'
mode = 'attrs'
task = asyncio.ensure_future(self.bound_fetch(cit_id, url, sem, session, mode))
tasks.append(task)
responses = asyncio.gather(*tasks)
await responses
async def bound_fetch(self, cit_id, url, semaphore, session, mode):
async with semaphore:
await self.fetch(cit_id, url, session, mode)
async def fetch(self, cit_id, url, session, mode):
try:
async with session.get(url) as response:
try:
logging.info('Collecting metadata for %s' % cit_id)
if mode == 'doi':
raw_metadata = await response.json(content_type=None)
if raw_metadata:
metadata = self.parse_crossref_works_result(raw_metadata)
else:
raw_metadata = await response.text()
if raw_metadata:
metadata = self.parse_crossref_openurl_result(raw_metadata)
if metadata:
id_to_metadata = {'_id': cit_id, 'crossref': metadata}
self.save_crossref_metadata(id_to_metadata)
except JSONDecodeError as e:
logging.warning('JSONDecodeError: %s' % cit_id)
logging.warning(e)
except TimeoutError as e:
logging.warning('TimeoutError [INNER]: %s' % cit_id)
logging.warning(e)
except ContentTypeError as e:
logging.warning('ContentTypeError: %s' % cit_id)
logging.warning(e)
except ServerDisconnectedError as e:
logging.warning('ServerDisconnectedError: %s' % cit_id)
logging.warning(e)
except TimeoutError as e:
logging.warning('TimeoutError [OUTER]: %s' % cit_id)
logging.warning(e)
except ClientConnectorError as e:
logging.warning('ClientConectorError: %s' % cit_id)
logging.warning(e)
def format_date(date: datetime):
if not date:
return None
return date.strftime('%Y-%m-%d')
def main():
usage = "collect metadata from the Crossref Service"
parser = argparse.ArgumentParser(textwrap.dedent(usage))
parser.add_argument(
'-c', '--col',
default=None,
dest='col',
help='normalize cited references in an entire collection'
)
parser.add_argument(
'-f', '--from_date',
type=lambda x: datetime.strptime(x, '%Y-%m-%d'),
nargs='?',
help='collect metadata for cited references in documents published from a date (YYYY-MM-DD)'
)
parser.add_argument(
'-u', '--until_date',
type=lambda x: datetime.strptime(x, '%Y-%m-%d'),
nargs='?',
default=datetime.now(),
help='collect metadata for cited references in documents published until a date (YYYY-MM-DD)'
)
parser.add_argument(
'-i', '--document_id',
default=None,
dest='pid',
help='collect metadata for cited for the cited references in a PID (document)'
)
parser.add_argument(
'--mongo_uri',
default=None,
dest='mongo_uri_std_cits',
help='mongo uri string in the format mongodb://[username:password@]host1[:port1][,...hostN[:portN]][/[defaultauthdb][?options]]'
)
parser.add_argument(
'-e', '--email',
required=True,
default=None,
dest='email',
help='an e-mail registered in the Crossref service'
)
args = parser.parse_args()
try:
art_meta = RestfulClient()
cac = CrossrefAsyncCollector(email=args.email, mongo_uri_std_cits=args.mongo_uri_std_cits)
cit_ids_to_attrs = {}
start_time = time.time()
if args.pid:
logging.info('Running in one PID mode')
document = art_meta.document(collection=args.col, code=args.pid)
if document:
logging.info('Extracting info from cited references in %s ' % document.publisher_id)
cit_ids_to_attrs = cac.extract_attrs(document)
else:
logging.info('Running in many PIDs mode')
for document in art_meta.documents(collection=args.col,
from_date=format_date(args.from_date),
until_date=format_date(args.until_date)):
logging.info('Extracting info from cited references in %s ' % document.publisher_id)
cit_ids_to_attrs.update(cac.extract_attrs(document))
loop = asyncio.get_event_loop()
future = asyncio.ensure_future(cac.run(cit_ids_to_attrs))
loop.run_until_complete(future)
end_time = time.time()
logging.info('Duration {0} seconds.'.format(end_time - start_time))
except KeyboardInterrupt:
print("Interrupt by user")
|
# Written by Nikhil D'Souza
# Data from http://lib.stat.cmu.edu/datasets/boston
# This neural network predicts the values of houses in Boston based on:
# 1. per capita crime rate by town
# 2. proportion of residential land zoned for lots over 25,000 sq.ft.
# 3. proportion of non-retail business acres per town
# 4. Charles River dummy variable (= 1 if tract bounds river; 0 otherwise)
# 5. nitric oxides concentration (parts per 10 million)
# 6. average number of rooms per dwelling
# 7. proportion of owner-occupied units built prior to 1940
# 8. weighted distances to five Boston employment centres
# 9. index of accessibility to radial highways
# 10. full-value property-tax rate per $10,000
# 11. pupil-teacher ratio by town
# 12. 1000(Bk - 0.63)^2 where Bk is the proportion of blacks by town
# 13. % lower status of the population
from __future__ import print_function
from matplotlib import pyplot as plt
from keras.datasets import boston_housing
from keras.models import Sequential
from keras.layers import Dense, Dropout, Flatten
from keras.layers import Conv2D, MaxPooling2D
from keras.utils import np_utils
from keras import backend as K
import coremltools
import keras
import numpy as np
(x_train, y_train), (x_val, y_val) = boston_housing.load_data()
# Model architecture
model = Sequential()
model.add(Dense(20, input_dim=13, kernel_initializer='normal', activation='relu'))
model.add(Dense(1, kernel_initializer='normal'))
# Callback list (saves models at checkpoints)
callbacks_list = [
keras.callbacks.ModelCheckpoint(
filepath='best_model.{epoch:02d}.h5',
monitor='val_loss', save_best_only=True),
]
# Compile model
model.compile(loss='mean_squared_error', optimizer='adam')
# Train model
batch_size = 5
epochs = 100
model.fit(x_train, y_train, batch_size=batch_size, epochs=epochs, callbacks=callbacks_list, validation_data=(x_val, y_val), verbose=1)
# Create CoreML model
coreml_mnist = coremltools.converters.keras.convert(
'best_model.48.h5',
input_names=['input'],
output_names=['output']
)
coreml_mnist.author = 'Nikhil DSouza'
coreml_mnist.license = 'Nikhil'
coreml_mnist.short_description = 'Boston housing price regression'
coreml_mnist.save('BostonClassifier.mlmodel')
|
# --------------------------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# --------------------------------------------------------------------------------------------
__path__ = __import__("pkgutil").extend_path(__path__, __name__) # type: ignore
|
# -*- coding: utf-8 -*-
import numpy as np
import tensorflow as tf
# Default hyperparameters
hparams = tf.contrib.training.HParams(
# Comma-separated list of cleaners to run on text prior to training and eval. For non-English
# text, you may want to use "basic_cleaners" or "transliteration_cleaners".
cleaners='transliteration_cleaners',
# If you only have 1 GPU or want to use only one GPU, please set num_gpus=0 and specify the GPU idx on run. example:
# expample 1 GPU of index 2 (train on "/gpu2" only): CUDA_VISIBLE_DEVICES=2 python train.py --model='Tacotron' --hparams='tacotron_gpu_start_idx=2'
# If you want to train on multiple GPUs, simply specify the number of GPUs available, and the idx of the first GPU to use. example:
# example 4 GPUs starting from index 0 (train on "/gpu0"->"/gpu3"): python train.py --model='Tacotron' --hparams='tacotron_num_gpus=4, tacotron_gpu_start_idx=0'
# The hparams arguments can be directly modified on this hparams.py file instead of being specified on run if preferred!
# If one wants to train both Tacotron and WaveNet in parallel (provided WaveNet will be trained on True mel spectrograms), one needs to specify different GPU idxes.
# example Tacotron+WaveNet on a machine with 4 or more GPUs. Two GPUs for each model:
# CUDA_VISIBLE_DEVICES=0,1 python train.py --model='Tacotron' --hparams='tacotron_num_gpus=2'
# Cuda_VISIBLE_DEVICES=2,3 python train.py --model='WaveNet' --hparams='wavenet_num_gpus=2'
# IMPORTANT NOTES: The Multi-GPU performance highly depends on your hardware and optimal parameters change between rigs. Default are optimized for servers.
# If using N GPUs, please multiply the tacotron_batch_size by N below in the hparams! (tacotron_batch_size = 32 * N)
# Never use lower batch size than 32 on a single GPU!
# Same applies for Wavenet: wavenet_batch_size = 8 * N (wavenet_batch_size can be smaller than 8 if GPU is having OOM, minimum 2)
# Please also apply the synthesis batch size modification likewise. (if N GPUs are used for synthesis, minimal batch size must be N, minimum of 1 sample per GPU)
# We did not add an automatic multi-GPU batch size computation to avoid confusion in the user's mind and to provide more control to the user for
# resources related decisions.
# Acknowledgement:
# Many thanks to @MlWoo for his awesome work on multi-GPU Tacotron which showed to work a little faster than the original
# pipeline for a single GPU as well. Great work!
# Hardware setup: Default supposes user has only one GPU: "/gpu:0" (Both Tacotron and WaveNet can be trained on multi-GPU: data parallelization)
# Synthesis also uses the following hardware parameters for multi-GPU parallel synthesis.
tacotron_num_gpus=1, # Determines the number of gpus in use for Tacotron training.
wavenet_num_gpus=1, # Determines the number of gpus in use for WaveNet training.
split_on_cpu=False,
# Determines whether to split data on CPU or on first GPU. This is automatically True when more than 1 GPU is used.
# (Recommend: False on slow CPUs/Disks, True otherwise for small speed boost)
###########################################################################################################################################
# Audio
# Audio parameters are the most important parameters to tune when using this work on your personal data. Below are the beginner steps to adapt
# this work to your personal data:
# 1- Determine my data sample rate: First you need to determine your audio sample_rate (how many samples are in a second of audio). This can be done using sox: "sox --i <filename>"
# (For this small tuto, I will consider 24kHz (24000 Hz), and defaults are 22050Hz, so there are plenty of examples to refer to)
# 2- set sample_rate parameter to your data correct sample rate
# 3- Fix win_size and and hop_size accordingly: (Supposing you will follow our advice: 50ms window_size, and 12.5ms frame_shift(hop_size))
# a- win_size = 0.05 * sample_rate. In the tuto example, 0.05 * 24000 = 1200
# b- hop_size = 0.25 * win_size. Also equal to 0.0125 * sample_rate. In the tuto example, 0.25 * 1200 = 0.0125 * 24000 = 300 (Can set frame_shift_ms=12.5 instead)
# 4- Fix n_fft, num_freq and upsample_scales parameters accordingly.
# a- n_fft can be either equal to win_size or the first power of 2 that comes after win_size. I usually recommend using the latter
# to be more consistent with signal processing friends. No big difference to be seen however. For the tuto example: n_fft = 2048 = 2**11
# b- num_freq = (n_fft / 2) + 1. For the tuto example: num_freq = 2048 / 2 + 1 = 1024 + 1 = 1025.
# c- For WaveNet, upsample_scales products must be equal to hop_size. For the tuto example: upsample_scales=[15, 20] where 15 * 20 = 300
# it is also possible to use upsample_scales=[3, 4, 5, 5] instead. One must only keep in mind that upsample_kernel_size[0] = 2*upsample_scales[0]
# so the training segments should be long enough (2.8~3x upsample_scales[0] * hop_size or longer) so that the first kernel size can see the middle
# of the samples efficiently. The length of WaveNet training segments is under the parameter "max_time_steps".
# 5- Finally comes the silence trimming. This very much data dependent, so I suggest trying preprocessing (or part of it, ctrl-C to stop), then use the
# .ipynb provided in the repo to listen to some inverted mel/linear spectrograms. That will first give you some idea about your above parameters, and
# it will also give you an idea about trimming. If silences persist, try reducing trim_top_db slowly. If samples are trimmed mid words, try increasing it.
# 6- If audio quality is too metallic or fragmented (or if linear spectrogram plots are showing black silent regions on top), then restart from step 2.
num_mels=80, # Number of mel-spectrogram channels and local conditioning dimensionality
num_freq=1025, # (= n_fft / 2 + 1) only used when adding linear spectrograms post processing network
rescale=True, # Whether to rescale audio prior to preprocessing
rescaling_max=0.999, # Rescaling value
# train samples of lengths between 3sec and 14sec are more than enough to make a model capable of generating consistent speech.
clip_mels_length=True,
# For cases of OOM (Not really recommended, only use if facing unsolvable OOM errors, also consider clipping your samples to smaller chunks)
max_mel_frames=800,
# Only relevant when clip_mels_length = True, please only use after trying output_per_steps=3 and still getting OOM errors.
output_per_steps=3,
# Use LWS (https://github.com/Jonathan-LeRoux/lws) for STFT and phase reconstruction
# It's preferred to set True to use with https://github.com/r9y9/wavenet_vocoder
# Does not work if n_ffit is not multiple of hop_size!!
use_lws=False,
# Only used to set as True if using WaveNet, no difference in performance is observed in either cases.
silence_threshold=2, # silence threshold used for sound trimming for wavenet preprocessing
# Mel spectrogram
n_fft=2048, # Extra window size is filled with 0 paddings to match this parameter
hop_size=275, # For 22050Hz, 275 ~= 12.5 ms (0.0125 * sample_rate)
win_size=1100, # For 22050Hz, 1100 ~= 50 ms (If None, win_size = n_fft) (0.05 * sample_rate)
sample_rate=22050, # 22050 Hz (corresponding to ljspeech dataset) (sox --i <filename>)
frame_shift_ms=None, # Can replace hop_size parameter. (Recommended: 12.5)
magnitude_power=2., # The power of the spectrogram magnitude (1. for energy, 2. for power)
# M-AILABS (and other datasets) trim params (there parameters are usually correct for any data, but definitely must be tuned for specific speakers)
trim_silence=True, # Whether to clip silence in Audio (at beginning and end of audio only, not the middle)
trim_fft_size=2048, # Trimming window size
trim_hop_size=512, # Trimmin hop length
trim_top_db=40, # Trimming db difference from reference db (smaller==harder trim.)
# Mel and Linear spectrograms normalization/scaling and clipping
signal_normalization=True,
# Whether to normalize mel spectrograms to some predefined range (following below parameters)
allow_clipping_in_normalization=True, # Only relevant if mel_normalization = True
symmetric_mels=True,
# Whether to scale the data to be symmetric around 0. (Also multiplies the output range by 2, faster and cleaner convergence)
max_abs_value=4.,
# max absolute value of data. If symmetric, data will be [-max, max] else [0, max] (Must not be too big to avoid gradient explosion,
# not too small for fast convergence)
normalize_for_wavenet=True, # whether to rescale to [0, 1] for wavenet. (better audio quality)
clip_for_wavenet=True,
# whether to clip [-max, max] before training/synthesizing with wavenet (better audio quality)
wavenet_pad_sides=1, # Can be 1 or 2. 1 for pad right only, 2 for both sides padding.
# Contribution by @begeekmyfriend
# Spectrogram Pre-Emphasis (Lfilter: Reduce spectrogram noise and helps model certitude levels. Also allows for better G&L phase reconstruction)
preemphasize=True, # whether to apply filter
preemphasis=0.97, # filter coefficient.
# Limits
min_level_db=-100,
ref_level_db=20,
fmin=55,
# Set this to 55 if your speaker is male! if female, 95 should help taking off noise. (To test depending on dataset. Pitch info: male~[65, 260], female~[100, 525])
fmax=7600, # To be increased/reduced depending on data.
# Griffin Lim
power=1.5, # Only used in G&L inversion, usually values between 1.2 and 1.5 are a good choice.
griffin_lim_iters=60, # Number of G&L iterations, typically 30 is enough but we use 60 to ensure convergence.
GL_on_GPU=True,
# Whether to use G&L GPU version as part of tensorflow graph. (Usually much faster than CPU but slightly worse quality too).
###########################################################################################################################################
# Tacotron
# Model general type
outputs_per_step=3,
# number of frames to generate at each decoding step (increase to speed up computation and allows for higher batch size, decreases G&L audio quality)
stop_at_any=True,
# Determines whether the decoder should stop when predicting <stop> to any frame or to all of them (True works pretty well)
batch_norm_position='after',
# Can be in ('before', 'after'). Determines whether we use batch norm before or after the activation function (relu). Matter for debate.
clip_outputs=True,
# Whether to clip spectrograms to T2_output_range (even in loss computation). ie: Don't penalize model for exceeding output range and bring back to borders.
lower_bound_decay=0.1,
# Small regularizer for noise synthesis by adding small range of penalty for silence regions. Set to 0 to clip in Tacotron range.
# Input parameters
embedding_dim=512, # dimension of embedding space
# Encoder parameters
enc_conv_num_layers=3, # number of encoder convolutional layers
enc_conv_kernel_size=(5,), # size of encoder convolution filters for each layer
enc_conv_channels=512, # number of encoder convolutions filters for each layer
encoder_lstm_units=256, # number of lstm units for each direction (forward and backward)
# Attention mechanism
smoothing=False, # Whether to smooth the attention normalization function
attention_dim=128, # dimension of attention space
attention_filters=32, # number of attention convolution filters
attention_kernel=(31,), # kernel size of attention convolution
cumulative_weights=True,
# Whether to cumulate (sum) all previous attention weights or simply feed previous weights (Recommended: True)
# Attention synthesis constraints
# "Monotonic" constraint forces the model to only look at the forwards attention_win_size steps.
# "Window" allows the model to look at attention_win_size neighbors, both forward and backward steps.
synthesis_constraint=False,
# Whether to use attention windows constraints in synthesis only (Useful for long utterances synthesis)
synthesis_constraint_type='window', # can be in ('window', 'monotonic').
attention_win_size=7,
# Side of the window. Current step does not count. If mode is window and attention_win_size is not pair, the 1 extra is provided to backward part of the window.
# Decoder
prenet_layers=[256, 256], # number of layers and number of units of prenet
decoder_layers=2, # number of decoder lstm layers
decoder_lstm_units=1024, # number of decoder lstm units on each layer
max_iters=10000, # Max decoder steps during inference (Just for safety from infinite loop cases)
# Residual postnet
postnet_num_layers=5, # number of postnet convolutional layers
postnet_kernel_size=(5,), # size of postnet convolution filters for each layer
postnet_channels=512, # number of postnet convolution filters for each layer
# CBHG mel->linear postnet
cbhg_kernels=8,
# All kernel sizes from 1 to cbhg_kernels will be used in the convolution bank of CBHG to act as "K-grams"
cbhg_conv_channels=128, # Channels of the convolution bank
cbhg_pool_size=2, # pooling size of the CBHG
cbhg_projection=256, # projection channels of the CBHG (1st projection, 2nd is automatically set to num_mels)
cbhg_projection_kernel_size=3, # kernel_size of the CBHG projections
cbhg_highwaynet_layers=4, # Number of HighwayNet layers
cbhg_highway_units=128, # Number of units used in HighwayNet fully connected layers
cbhg_rnn_units=128,
# Number of GRU units used in bidirectional RNN of CBHG block. CBHG output is 2x rnn_units in shape
# Loss params
mask_encoder=True,
# whether to mask encoder padding while computing attention. Set to True for better prosody but slower convergence.
mask_decoder=False,
# Whether to use loss mask for padded sequences (if False, <stop_token> loss function will not be weighted, else recommended pos_weight = 20)
cross_entropy_pos_weight=1,
# Use class weights to reduce the stop token classes imbalance (by adding more penalty on False Negatives (FN)) (1 = disabled)
predict_linear=True,
# Whether to add a post-processing network to the Tacotron to predict linear spectrograms (True mode Not tested!!)
###########################################################################################################################################
# Wavenet
# Input type:
# 1. raw [-1, 1]
# 2. mulaw [-1, 1]
# 3. mulaw-quantize [0, mu]
# If input_type is raw or mulaw, network assumes scalar input and
# discretized mixture of logistic distributions output, otherwise one-hot
# input and softmax output are assumed.
# Model general type
input_type="raw",
# Raw has better quality but harder to train. mulaw-quantize is easier to train but has lower quality.
quantize_channels=2 ** 16,
# 65536 (16-bit) (raw) or 256 (8-bit) (mulaw or mulaw-quantize) // number of classes = 256 <=> mu = 255
use_bias=True, # Whether to use bias in convolutional layers of the Wavenet
legacy=True,
# Whether to use legacy mode: Multiply all skip outputs but the first one with sqrt(0.5) (True for more early training stability, especially for large models)
residual_legacy=True,
# Whether to scale residual blocks outputs by a factor of sqrt(0.5) (True for input variance preservation early in training and better overall stability)
# Model Losses parmeters
# Minimal scales ranges for MoL and Gaussian modeling
log_scale_min=float(np.log(1e-14)), # Mixture of logistic distributions minimal log scale
log_scale_min_gauss=float(np.log(1e-7)), # Gaussian distribution minimal allowed log scale
# Loss type
cdf_loss=False,
# Whether to use CDF loss in Gaussian modeling. Advantages: non-negative loss term and more training stability. (Automatically True for MoL)
# model parameters
# To use Gaussian distribution as output distribution instead of mixture of logistics, set "out_channels = 2" instead of "out_channels = 10 * 3". (UNDER TEST)
out_channels=2,
# This should be equal to quantize channels when input type is 'mulaw-quantize' else: num_distributions * 3 (prob, mean, log_scale).
layers=20, # Number of dilated convolutions (Default: Simplified Wavenet of Tacotron-2 paper)
stacks=2, # Number of dilated convolution stacks (Default: Simplified Wavenet of Tacotron-2 paper)
residual_channels=128, # Number of residual block input/output channels.
gate_channels=256, # split in 2 in gated convolutions
skip_out_channels=128, # Number of residual block skip convolution channels.
kernel_size=3, # The number of inputs to consider in dilated convolutions.
# Upsampling parameters (local conditioning)
cin_channels=80, # Set this to -1 to disable local conditioning, else it must be equal to num_mels!!
# Upsample types: ('1D', '2D', 'Resize', 'SubPixel', 'NearestNeighbor')
# All upsampling initialization/kernel_size are chosen to omit checkerboard artifacts as much as possible. (Resize is designed to omit that by nature).
# To be specific, all initial upsample weights/biases (when NN_init=True) ensure that the upsampling layers act as a "Nearest neighbor upsample" of size "hop_size" (checkerboard free).
# 1D spans all frequency bands for each frame (channel-wise) while 2D spans "freq_axis_kernel_size" bands at a time. Both are vanilla transpose convolutions.
# Resize is a 2D convolution that follows a Nearest Neighbor (NN) resize. For reference, this is: "NN resize->convolution".
# SubPixel (2D) is the ICNR version (initialized to be equivalent to "convolution->NN resize") of Sub-Pixel convolutions. also called "checkered artifact free sub-pixel conv".
# Finally, NearestNeighbor is a non-trainable upsampling layer that just expands each frame (or "pixel") to the equivalent hop size. Ignores all upsampling parameters.
upsample_type='SubPixel',
# Type of the upsampling deconvolution. Can be ('1D' or '2D', 'Resize', 'SubPixel' or simple 'NearestNeighbor').
upsample_activation='Relu', # Activation function used during upsampling. Can be ('LeakyRelu', 'Relu' or None)
upsample_scales=[11, 25], # prod(upsample_scales) should be equal to hop_size
freq_axis_kernel_size=3,
# Only used for 2D upsampling types. This is the number of requency bands that are spanned at a time for each frame.
leaky_alpha=0.4, # slope of the negative portion of LeakyRelu (LeakyRelu: y=x if x>0 else y=alpha * x)
NN_init=True,
# Determines whether we want to initialize upsampling kernels/biases in a way to ensure upsample is initialize to Nearest neighbor upsampling. (Mostly for debug)
NN_scaler=0.3,
# Determines the initial Nearest Neighbor upsample values scale. i.e: upscaled_input_values = input_values * NN_scaler (1. to disable)
# global conditioning
gin_channels=-1,
# Set this to -1 to disable global conditioning, Only used for multi speaker dataset. It defines the depth of the embeddings (Recommended: 16)
use_speaker_embedding=True, # whether to make a speaker embedding
n_speakers=5, # number of speakers (rows of the embedding)
speakers_path=None,
# Defines path to speakers metadata. Can be either in "speaker\tglobal_id" (with header) tsv format, or a single column tsv with speaker names. If None, use "speakers".
speakers=['speaker0', 'speaker1',
# List of speakers used for embeddings visualization. (Consult "wavenet_vocoder/train.py" if you want to modify the speaker names source).
'speaker2', 'speaker3', 'speaker4'],
# Must be consistent with speaker ids specified for global conditioning for correct visualization.
###########################################################################################################################################
# Tacotron Training
# Reproduction seeds
tacotron_random_seed=5339,
# Determines initial graph and operations (i.e: model) random state for reproducibility
tacotron_data_random_state=1234, # random state for train test split repeatability
# performance parameters
tacotron_swap_with_cpu=False,
# Whether to use cpu as support to gpu for decoder computation (Not recommended: may cause major slowdowns! Only use when critical!)
# train/test split ratios, mini-batches sizes
tacotron_batch_size=32, # number of training samples on each training steps
# Tacotron Batch synthesis supports ~16x the training batch size (no gradients during testing).
# Training Tacotron with unmasked paddings makes it aware of them, which makes synthesis times different from training. We thus recommend masking the encoder.
tacotron_synthesis_batch_size=1,
# DO NOT MAKE THIS BIGGER THAN 1 IF YOU DIDN'T TRAIN TACOTRON WITH "mask_encoder=True"!!
tacotron_test_size=0.05,
# % of data to keep as test data, if None, tacotron_test_batches must be not None. (5% is enough to have a good idea about overfit)
tacotron_test_batches=None, # number of test batches.
# Learning rate schedule
tacotron_decay_learning_rate=True, # boolean, determines if the learning rate will follow an exponential decay
tacotron_start_decay=40000, # Step at which learning decay starts
tacotron_decay_steps=18000, # Determines the learning rate decay slope (UNDER TEST)
tacotron_decay_rate=0.5, # learning rate decay rate (UNDER TEST)
tacotron_initial_learning_rate=1e-3, # starting learning rate
tacotron_final_learning_rate=1e-4, # minimal learning rate
# Optimization parameters
tacotron_adam_beta1=0.9, # AdamOptimizer beta1 parameter
tacotron_adam_beta2=0.999, # AdamOptimizer beta2 parameter
tacotron_adam_epsilon=1e-6, # AdamOptimizer Epsilon parameter
# Regularization parameters
tacotron_reg_weight=1e-6, # regularization weight (for L2 regularization)
tacotron_scale_regularization=False,
# Whether to rescale regularization weight to adapt for outputs range (used when reg_weight is high and biasing the model)
tacotron_zoneout_rate=0.1, # zoneout rate for all LSTM cells in the network
tacotron_dropout_rate=0.5, # dropout rate for all convolutional layers + prenet
tacotron_clip_gradients=True, # whether to clip gradients
# Evaluation parameters
tacotron_natural_eval=False,
# Whether to use 100% natural eval (to evaluate Curriculum Learning performance) or with same teacher-forcing ratio as in training (just for overfit)
# Decoder RNN learning can take be done in one of two ways:
# Teacher Forcing: vanilla teacher forcing (usually with ratio = 1). mode='constant'
# Scheduled Sampling Scheme: From Teacher-Forcing to sampling from previous outputs is function of global step. (teacher forcing ratio decay) mode='scheduled'
# The second approach is inspired by:
# Bengio et al. 2015: Scheduled Sampling for Sequence Prediction with Recurrent Neural Networks.
# Can be found under: https://arxiv.org/pdf/1506.03099.pdf
tacotron_teacher_forcing_mode='constant',
# Can be ('constant' or 'scheduled'). 'scheduled' mode applies a cosine teacher forcing ratio decay. (Preference: scheduled)
tacotron_teacher_forcing_ratio=1.,
# Value from [0., 1.], 0.=0%, 1.=100%, determines the % of times we force next decoder inputs, Only relevant if mode='constant'
tacotron_teacher_forcing_init_ratio=1., # initial teacher forcing ratio. Relevant if mode='scheduled'
tacotron_teacher_forcing_final_ratio=0.,
# final teacher forcing ratio. (Set None to use alpha instead) Relevant if mode='scheduled'
tacotron_teacher_forcing_start_decay=10000,
# starting point of teacher forcing ratio decay. Relevant if mode='scheduled'
tacotron_teacher_forcing_decay_steps=40000,
# Determines the teacher forcing ratio decay slope. Relevant if mode='scheduled'
tacotron_teacher_forcing_decay_alpha=None,
# teacher forcing ratio decay rate. Defines the final tfr as a ratio of initial tfr. Relevant if mode='scheduled'
# Speaker adaptation parameters
tacotron_fine_tuning=False,
# Set to True to freeze encoder and only keep training pretrained decoder. Used for speaker adaptation with small data.
###########################################################################################################################################
# Wavenet Training
wavenet_random_seed=5339, # S=5, E=3, D=9 :)
wavenet_data_random_state=1234, # random state for train test split repeatability
# performance parameters
wavenet_swap_with_cpu=False,
# Whether to use cpu as support to gpu for synthesis computation (while loop).(Not recommended: may cause major slowdowns! Only use when critical!)
# train/test split ratios, mini-batches sizes
wavenet_batch_size=8, # batch size used to train wavenet.
# During synthesis, there is no max_time_steps limitation so the model can sample much longer audio than 8k(or 13k) steps. (Audio can go up to 500k steps, equivalent to ~21sec on 24kHz)
# Usually your GPU can handle ~2x wavenet_batch_size during synthesis for the same memory amount during training (because no gradients to keep and ops to register for backprop)
wavenet_synthesis_batch_size=10 * 2,
# This ensure that wavenet synthesis goes up to 4x~8x faster when synthesizing multiple sentences. Watch out for OOM with long audios.
wavenet_test_size=None, # % of data to keep as test data, if None, wavenet_test_batches must be not None
wavenet_test_batches=1, # number of test batches.
# Learning rate schedule
wavenet_lr_schedule='exponential', # learning rate schedule. Can be ('exponential', 'noam')
wavenet_learning_rate=1e-3, # wavenet initial learning rate
wavenet_warmup=float(4000), # Only used with 'noam' scheme. Defines the number of ascending learning rate steps.
wavenet_decay_rate=0.5, # Only used with 'exponential' scheme. Defines the decay rate.
wavenet_decay_steps=200000, # Only used with 'exponential' scheme. Defines the decay steps.
# Optimization parameters
wavenet_adam_beta1=0.9, # Adam beta1
wavenet_adam_beta2=0.999, # Adam beta2
wavenet_adam_epsilon=1e-6, # Adam Epsilon
# Regularization parameters
wavenet_clip_gradients=True, # Whether the clip the gradients during wavenet training.
wavenet_ema_decay=0.9999, # decay rate of exponential moving average
wavenet_weight_normalization=False,
# Whether to Apply Saliman & Kingma Weight Normalization (reparametrization) technique. (Used in DeepVoice3, not critical here)
wavenet_init_scale=1.,
# Only relevent if weight_normalization=True. Defines the initial scale in data dependent initialization of parameters.
wavenet_dropout=0.05, # drop rate of wavenet layers
wavenet_gradient_max_norm=100.0, # Norm used to clip wavenet gradients
wavenet_gradient_max_value=5.0, # Value used to clip wavenet gradients
# training samples length
max_time_sec=None, # Max time of audio for training. If None, we use max_time_steps.
max_time_steps=11000,
# Max time steps in audio used to train wavenet (decrease to save memory) (Recommend: 8000 on modest GPUs, 13000 on stronger ones)
# Evaluation parameters
wavenet_natural_eval=False,
# Whether to use 100% natural eval (to evaluate autoregressivity performance) or with teacher forcing to evaluate overfit and model consistency.
# Tacotron-2 integration parameters
train_with_GTA=True, # Whether to use GTA mels to train WaveNet instead of ground truth mels.
###########################################################################################################################################
# Eval/Debug parameters
# Eval sentences (if no eval text file was specified during synthesis, these sentences are used for eval)
sentences=[
'Handballer setzen mit Training aus',
'Weil die Einsicht bei den Verantwortlichen spät kam',
'Die geforderte Kehrtwende war am Ende unausweichlich',
'Wir haben uns am Nachmittag dazu entschieden',
'Er plädierte zugleich für eine Absage der Ende März geplanten',
'Für die prestigeträchtigen Spiele hat Japan aus der Staatskasse Milliarden investiert und laut Aussagen',
'Mit einem eindringlichen Appell an Kunden und Politik',
'Knapp eine halbe Million Nutzerinnen und Nutzer sahen binnen'
],
# Wavenet Debug
wavenet_synth_debug=False, # Set True to use target as debug in WaveNet synthesis.
wavenet_debug_wavs=['training_data/audio/audio-alter_afrikaner_01_f000008.npy'],
# Path to debug audios. Must be multiple of wavenet_num_gpus.
wavenet_debug_mels=['training_data/mels/mel-alter_afrikaner_01_f000008.npy'],
# Path to corresponding mels. Must be of same length and order as wavenet_debug_wavs.
)
def hparams_debug_string():
values = hparams.values()
hp = [' %s: %s' % (name, values[name]) for name in sorted(values) if name != 'sentences']
return 'Hyperparameters:\n' + '\n'.join(hp)
|
API_GET_TOKEN = 'http://sign.vsdouyin.com/api/token/gen/'
API_EP_DOUYIN = "https://sign.vsdouyin.com/api"
API_DEVICE_REGISTER = "https://log.snssdk.com/service/2/device_register/"
#生成签名服务器专用token
ROUTE_GEN_TOKEN = "token/gen"
ROUTE_INFO_TOKEN = "token/info"
# 抖音相关入口
ROUTE_SIGN_DOUYIN = "653d33c/sign"
ROUTE_CRYPT_DOUYIN = "653d33c/crypt"
#操作相关
FEED_OPT = "v1/feed"
USER_INFO_OPT = "v1/user"
USER_FANS_OPT = "v1/user/follower/list"
USER_POSTS_OPT = "v1/aweme/post"
USER_LIKE_OPT = "v1/aweme/favorite"
VIDEO_COMMENTS_OPT = "v1/comment/list"
HEADERS = {
'Content-Type': 'application/octet-stream;tt-data=a',
'sdk-version': '1',
'user-agent': 'okhttp/3.10.0.1',
}
|
#!/usr/bin/env python
import json
import os
import re
import sys
import tarfile
from urllib.request import urlretrieve
def main():
# Read in given out_file and create target directory for file download
with open(sys.argv[1]) as fh:
params = json.load(fh)
target_directory = params['output_data'][0]['extra_files_path']
os.mkdir(target_directory)
# Process parameters for metadata and file download
url = params['param_dict']['url'].rstrip("/") + "/" + params['param_dict']['file_name'].lstrip("/")
m = re.search(r"(.*?)(merged|refseq)?_vep_(\d+?)_", params['param_dict']['file_name'])
version = str(m.group(3))
cache_type = m.group(2) if m.group(2) else "default"
species = m.group(1).rstrip("_")
display_name = f"{species.capitalize().replace('_', ' ')} {params['param_dict']['dbkey']} (V{version}{'' if cache_type == 'default' else ', ' + cache_type.capitalize()})"
# Download and extract given cache archive, remove archive afterwards
final_file, headers = urlretrieve(url, os.path.join(target_directory, params['param_dict']['file_name']))
tar = tarfile.open(final_file, "r:gz")
tar.extractall(target_directory)
tar.close()
os.remove(final_file)
# Construct metadata for the new data table entry
data_manager_dict = {
'data_tables': {
'vep_versioned_annotation_cache': [
{
'value': params['param_dict']['file_name'].strip(".tar.gz"),
'dbkey': params['param_dict']['dbkey'],
'version': version,
'cachetype': cache_type,
'name': display_name,
'species': species,
'path': './%s' % params['param_dict']['file_name'].strip(".tar.gz")
}
]
}
}
# Save metadata to out_file
with open(sys.argv[1], 'w') as fh:
json.dump(data_manager_dict, fh, sort_keys=True)
if __name__ == "__main__":
main()
|
#
# This file is part of LiteX-Boards.
#
# Copyright (c) 2018-2019 Rohit Singh <rohit@rohitksingh.in>
# Copyright (c) 2019 Florent Kermarrec <florent@enjoy-digital.fr>
# SPDX-License-Identifier: BSD-2-Clause
from litex.build.generic_platform import *
from litex.build.xilinx import XilinxPlatform
from litex.build.openocd import OpenOCD
# IOs ----------------------------------------------------------------------------------------------
_io = [
# clk / rst
("clk100", 0, Pins("W19"), IOStandard("LVCMOS33")),
# leds (only a single rgb led, aliased here also)
("user_led", 0, Pins("AB21"), IOStandard("LVCMOS33")),
("user_led", 1, Pins("AB22"), IOStandard("LVCMOS33")),
("user_led", 2, Pins("U20"), IOStandard("LVCMOS33")),
# rgb led, active-low
("rgb_led", 0,
Subsignal("r", Pins("AB21")),
Subsignal("g", Pins("AB22")),
Subsignal("b", Pins("U20")),
IOStandard("LVCMOS33"),
),
# flash
("flash", 0,
Subsignal("cs_n", Pins("T19")),
Subsignal("mosi", Pins("P22")),
Subsignal("miso", Pins("R22")),
Subsignal("hold", Pins("R21")),
Subsignal("rst_n", Pins("R19")),
IOStandard("LVCMOS33")
),
("flash4x", 0, # clock needs to be accessed through STARTUPE2
Subsignal("cs_n", Pins("T19")),
Subsignal("dq", Pins("P22", "R22", "P21", "R21")),
IOStandard("LVCMOS33")
),
# tpm
("tpm", 0,
Subsignal("clk", Pins("W20")),
Subsignal("rst_n", Pins("V19")),
Subsignal("cs_n", Pins("Y18")),
Subsignal("mosi", Pins("Y19")),
Subsignal("miso", Pins("V18")),
IOStandard("LVCMOS33"),
),
# pcie
("pcie_x1", 0,
Subsignal("rst_n", Pins("AB20"), IOStandard("LVCMOS33"), Misc("PULLUP=TRUE")),
Subsignal("clk_p", Pins("F6")),
Subsignal("clk_n", Pins("E6")),
Subsignal("rx_p", Pins("B8")),
Subsignal("rx_n", Pins("A8")),
Subsignal("tx_p", Pins("B4")),
Subsignal("tx_n", Pins("A4"))
),
("pcie_x4", 0,
Subsignal("rst_n", Pins("AB20"), IOStandard("LVCMOS33"), Misc("PULLUP=TRUE")),
Subsignal("clk_p", Pins("F6")),
Subsignal("clk_n", Pins("E6")),
Subsignal("rx_p", Pins("B8 D11 B10 D9")),
Subsignal("rx_n", Pins("A8 C11 A10 C9")),
Subsignal("tx_p", Pins("B4 D5 B6 D7")),
Subsignal("tx_n", Pins("A4 C5 A6 C7"))
),
# dram
("ddram", 0,
Subsignal("a", Pins(
"U6 T5 Y6 T6 V2 T4 Y2 R2",
"Y1 R4 W5 W1 AA6 U2"),
IOStandard("SSTL15")),
Subsignal("ba", Pins("W6 U5 R6"), IOStandard("SSTL15")),
Subsignal("ras_n", Pins("V5"), IOStandard("SSTL15")),
Subsignal("cas_n", Pins("T1"), IOStandard("SSTL15")),
Subsignal("we_n", Pins("R3"), IOStandard("SSTL15")),
Subsignal("dm", Pins("Y7 AA1"), IOStandard("SSTL15")),
Subsignal("dq", Pins(
"Y8 AB6 W9 AA8 AB7 V7 AB8 W7",
"V4 AB2 AA5 AB3 AB5 W4 AB1 AA4"),
IOStandard("SSTL15"),
Misc("IN_TERM=UNTUNED_SPLIT_50")),
Subsignal("dqs_p", Pins("V9 Y3"), IOStandard("DIFF_SSTL15")),
Subsignal("dqs_n", Pins("V8 AA3"), IOStandard("DIFF_SSTL15")),
Subsignal("clk_p", Pins("U3"), IOStandard("DIFF_SSTL15")),
Subsignal("clk_n", Pins("V3"), IOStandard("DIFF_SSTL15")),
Subsignal("cke", Pins("U1"), IOStandard("SSTL15")),
Subsignal("odt", Pins("W2"), IOStandard("SSTL15")),
Subsignal("reset_n", Pins("U7"), IOStandard("LVCMOS15")),
Subsignal("cs_n", Pins("T3"), IOStandard("SSTL15")),
Misc("SLEW=FAST"),
),
]
# Platform -----------------------------------------------------------------------------------------
class Platform(XilinxPlatform):
default_clk_name = "clk100"
default_clk_period = 1e9/100e6
def __init__(self):
XilinxPlatform.__init__(self, "xc7a200t-fbg484-2", _io, toolchain="vivado")
self.add_platform_command("set_property INTERNAL_VREF 0.750 [get_iobanks 34]")
self.toolchain.bitstream_commands = [
"set_property BITSTREAM.CONFIG.SPI_BUSWIDTH 4 [current_design]",
"set_property BITSTREAM.CONFIG.CONFIGRATE 16 [current_design]",
"set_property BITSTREAM.GENERAL.COMPRESS TRUE [current_design]"
]
self.toolchain.additional_commands = \
["write_cfgmem -force -format bin -interface spix4 -size 16 "
"-loadbit \"up 0x0 {build_name}.bit\" -file {build_name}.bin"]
def create_programmer(self):
return OpenOCD("openocd_xc7_ft232.cfg", "bscan_spi_xc7a200t.bit")
def do_finalize(self, fragment):
XilinxPlatform.do_finalize(self, fragment)
self.add_period_constraint(self.lookup_request("clk100", loose=True), 1e9/100e6)
|
#!/usr/bin/env python
"""Cloudflare API code - example"""
from __future__ import print_function
import os
import sys
sys.path.insert(0, os.path.abspath('..'))
import CloudFlare
def main():
"""Cloudflare API code - example"""
cf = CloudFlare.CloudFlare()
zones = cf.zones.get(params={'per_page':50})
for zone in zones:
zone_name = zone['name']
zone_id = zone['id']
settings_ipv6 = cf.zones.settings.ipv6.get(zone_id)
ipv6_on = settings_ipv6['value']
print(zone_id, ipv6_on, zone_name)
exit(0)
if __name__ == '__main__':
main()
|
from django.contrib.auth.models import User
from django.core.paginator import Paginator
from django.shortcuts import render
from blog.models.clasDict import classes
from blog.models.post import Post
from django.template.defaulttags import register
from django.contrib.auth.mixins import LoginRequiredMixin, UserPassesTestMixin
from django.urls import reverse_lazy
from django.views import generic
from django.views.generic import CreateView, UpdateView, DeleteView
from blog.models.comment import Comment
from blog.models.post import Post
from blog.models.clasDict import classes
@register.filter
def get_item(dictionary, key):
return dictionary.get(key)
class PostClassView(generic.DetailView):
model = Post
template_name = 'blog/post_class.html'
def get_context_data(self, **kwargs):
# Call the base implementation first to get a context
context = super().get_context_data(**kwargs)
context['posts'] = Post.objects.filter(clss=self.kwargs['pk'])
context['classes'] = classes
return context
|
#!/usr/bin/env python3
import sys
from PySide2 import QtCore, QtWidgets, QtGui
from os.path import exists, join, abspath
from os import remove
import tweepy
from auth.auth import AuthData
from tweet.tweet import getTweetsKeyword, tweetRandom, OutputerInterface
import types
class AuthDataInput(QtWidgets.QWidget):
def __init__(self, wrapper: 'AuthDataWidget', authdata_path: str):
super().__init__()
self.authdata_path = authdata_path
self.wrapper = wrapper
self.layout_consumer_key = QtWidgets.QHBoxLayout()
self.label_consumer_key = QtWidgets.QLabel("Consumer Key", alignment=QtCore.Qt.AlignCenter)
self.edit_consumer_key = QtWidgets.QLineEdit()
self.layout_consumer_key.addWidget(self.label_consumer_key)
self.layout_consumer_key.addWidget(self.edit_consumer_key)
self.layout_consumer_secret = QtWidgets.QHBoxLayout()
self.label_consumer_secret = QtWidgets.QLabel("Consumer Secret", alignment=QtCore.Qt.AlignCenter)
self.edit_consumer_secret = QtWidgets.QLineEdit()
self.layout_consumer_secret.addWidget(self.label_consumer_secret)
self.layout_consumer_secret.addWidget(self.edit_consumer_secret)
self.layout_access_token = QtWidgets.QHBoxLayout()
self.label_access_token = QtWidgets.QLabel("Access Token", alignment=QtCore.Qt.AlignCenter)
self.edit_access_token = QtWidgets.QLineEdit()
self.layout_access_token.addWidget(self.label_access_token)
self.layout_access_token.addWidget(self.edit_access_token)
self.layout_access_token_secret = QtWidgets.QHBoxLayout()
self.label_access_token_secret = QtWidgets.QLabel("Access Token Secret", alignment=QtCore.Qt.AlignCenter)
self.edit_access_token_secret = QtWidgets.QLineEdit()
self.layout_access_token_secret.addWidget(self.label_access_token_secret)
self.layout_access_token_secret.addWidget(self.edit_access_token_secret)
self.button_save = QtWidgets.QPushButton("Save")
self.layout = QtWidgets.QVBoxLayout(self)
self.layout.addLayout(self.layout_consumer_key)
self.layout.addLayout(self.layout_consumer_secret)
self.layout.addLayout(self.layout_access_token)
self.layout.addLayout(self.layout_access_token_secret)
self.layout.addWidget(self.button_save)
self.button_save.clicked.connect(self.save)
@QtCore.Slot()
def save(self):
# Save in self.authdata_path
consumer_key = self.edit_consumer_key.text()
consumer_secret = self.edit_consumer_secret.text()
access_token = self.edit_access_token.text()
access_token_secret = self.edit_access_token_secret.text()
ad = AuthData(consumer_key, consumer_secret, access_token, access_token_secret)
ad.SaveToJson(self.authdata_path)
# Notify parent wrapper
self.wrapper.update_storage_status()
class AuthDataStored(QtWidgets.QWidget):
def __init__(self, wrapper: 'AuthDataWidget', authdata_path: str):
super().__init__()
self.wrapper = wrapper
self.authdata_path = authdata_path
# Read auth data
self.ad = AuthData.CreateFromJson(authdata_path)
self.layout_consumer_key = QtWidgets.QHBoxLayout()
self.label_consumer_key = QtWidgets.QLabel("Consumer Key", alignment=QtCore.Qt.AlignLeft)
self.label_literal_consumer_key = QtWidgets.QLabel(self.ad.consumer_key, alignment=QtCore.Qt.AlignRight)
self.layout_consumer_key.addWidget(self.label_consumer_key)
self.layout_consumer_key.addWidget(self.label_literal_consumer_key)
self.layout_consumer_secret = QtWidgets.QHBoxLayout()
self.label_consumer_secret = QtWidgets.QLabel("Consumer Secret", alignment=QtCore.Qt.AlignLeft)
self.label_literal_consumer_secret = QtWidgets.QLabel(self.ad.consumer_secret, alignment=QtCore.Qt.AlignRight)
self.layout_consumer_secret.addWidget(self.label_consumer_secret)
self.layout_consumer_secret.addWidget(self.label_literal_consumer_secret)
self.layout_access_token = QtWidgets.QHBoxLayout()
self.label_access_token = QtWidgets.QLabel("Access Token", alignment=QtCore.Qt.AlignLeft)
self.label_literal_access_token = QtWidgets.QLabel(self.ad.access_token, alignment=QtCore.Qt.AlignRight)
self.layout_access_token.addWidget(self.label_access_token)
self.layout_access_token.addWidget(self.label_literal_access_token)
self.layout_access_token_secret = QtWidgets.QHBoxLayout()
self.label_access_token_secret = QtWidgets.QLabel("Access Token Secret", alignment=QtCore.Qt.AlignLeft)
self.label_literal_access_token_secret = QtWidgets.QLabel(self.ad.access_token_secret, alignment=QtCore.Qt.AlignRight)
self.layout_access_token_secret.addWidget(self.label_access_token_secret)
self.layout_access_token_secret.addWidget(self.label_literal_access_token_secret)
self.text = QtWidgets.QLabel("", alignment=QtCore.Qt.AlignCenter)
self.layout_buttons = QtWidgets.QHBoxLayout()
self.button_connect = QtWidgets.QPushButton("Connect")
self.button_edit = QtWidgets.QPushButton("Edit")
self.button_delete = QtWidgets.QPushButton("Delete")
self.layout_buttons.addWidget(self.button_connect)
self.layout_buttons.addWidget(self.button_edit)
self.layout_buttons.addWidget(self.button_delete)
self.layout = QtWidgets.QVBoxLayout(self)
self.layout.addWidget(self.text)
self.layout.addLayout(self.layout_consumer_key)
self.layout.addLayout(self.layout_consumer_secret)
self.layout.addLayout(self.layout_access_token)
self.layout.addLayout(self.layout_access_token_secret)
self.layout.addLayout(self.layout_buttons)
self.button_connect.clicked.connect(self.authenticate)
self.button_delete.clicked.connect(self.delete_auth)
@QtCore.Slot()
def authenticate(self):
# Authenticate to Twitter
auth = tweepy.OAuthHandler(self.ad.consumer_key, self.ad.consumer_secret)
auth.set_access_token(self.ad.access_token, self.ad.access_token_secret)
api = tweepy.API(auth)
auth_success = False
try:
api.verify_credentials()
result_text = "Authentication OK"
auth_success = True
except:
result_text = "Error during authentication"
self.text.setText(result_text)
if (auth_success):
self.wrapper.update_api(api)
@QtCore.Slot()
def delete_auth(self):
# Remove auth file
if exists(self.authdata_path):
remove(self.authdata_path)
# Notify parent wrapper
self.wrapper.update_storage_status()
class AuthDataWidget(QtWidgets.QWidget):
def __init__(self, authdata_path, archytas: 'ArchytasWidget'):
super().__init__()
self.authdata_path = authdata_path
self.archytas = archytas
self.calculate_authdata()
self.title = QtWidgets.QLabel("Authentication", alignment=QtCore.Qt.AlignCenter)
self.title.setFont(QtGui.QFont("Default", 16))
self.layout = QtWidgets.QVBoxLayout(self)
self.layout.addWidget(self.title)
self.layout.addWidget(self.authdata_inner)
def calculate_authdata(self):
self.isAuthDataStored = exists(self.authdata_path)
ad = AuthData.CreateFromJson(self.authdata_path)
if (self.isAuthDataStored and ad is not None):
self.authdata_inner = AuthDataStored(self, self.authdata_path)
else:
self.authdata_inner = AuthDataInput(self, self.authdata_path)
def update_storage_status(self):
aw = self.layout.takeAt(1)
aw.widget().deleteLater()
self.calculate_authdata()
self.layout.addWidget(self.authdata_inner)
def update_api(self, api: tweepy.API):
self.api = api
if (self.api is not None):
self.archytas.update_api(api)
class RetweetWidget(QtWidgets.QWidget):
def __init__(self, archytas: 'ArchytasWidget'):
super().__init__()
self.archytas = archytas
self.title = QtWidgets.QLabel("Auto retweeter", alignment=QtCore.Qt.AlignCenter)
self.title.setFont(QtGui.QFont("Default", 16))
self.label_err_message = QtWidgets.QLabel("", alignment=QtCore.Qt.AlignCenter)
self.layout_number_retweets = QtWidgets.QHBoxLayout()
self.label_number_retweets = QtWidgets.QLabel("Number of retweets", alignment=QtCore.Qt.AlignLeft)
self.edit_number_retweets = QtWidgets.QLineEdit()
self.layout_number_retweets.addWidget(self.label_number_retweets)
self.layout_number_retweets.addWidget(self.edit_number_retweets)
self.layout_keyword = QtWidgets.QHBoxLayout()
self.label_keyword = QtWidgets.QLabel("Keyword", alignment=QtCore.Qt.AlignLeft)
self.edit_keyword = QtWidgets.QLineEdit()
self.layout_keyword.addWidget(self.label_keyword)
self.layout_keyword.addWidget(self.edit_keyword)
self.button_retweet = QtWidgets.QPushButton("Retweet")
self.layout = QtWidgets.QVBoxLayout(self)
self.layout.addWidget(self.title)
self.layout.addWidget(self.label_err_message)
self.layout.addLayout(self.layout_number_retweets)
self.layout.addLayout(self.layout_keyword)
self.layout.addWidget(self.button_retweet)
self.button_retweet.clicked.connect(self.retweet)
self.set_connected(False)
def QtRetweetList(self, api: tweepy.API, tweets: list, index: int, secs: int, finishedAction: types.FunctionType):
tweet = tweets[index]
try:
print('\nRetweet Bot found tweet by @' + tweet.user.screen_name + '. ' + 'Attempting to retweet.')
tweet.retweet()
print('Retweet published successfully.')
index = index + 1
if (index < len(tweets)):
QtCore.QTimer.singleShot(secs * 1000, lambda: self.QtRetweetList(api, tweets, index, secs, finishedAction))
else:
finishedAction()
# Some basic error handling. Will print out why retweet failed, into your terminal.
except tweepy.TweepyException as error:
print('\nError TweepyException. Retweet not successful. Reason: ')
print(error)
except tweepy.HTTPException as error:
print('\nError HTTPException. Retweet not successful. Reason: ')
print(error)
def QtRetweetKeyword(self, api, keyword, rewteetRange, secs, finishedAction: types.FunctionType):
tweets = getTweetsKeyword(api, keyword, rewteetRange)
self.QtRetweetList(api, tweets, 0, secs, finishedAction)
@QtCore.Slot()
def retweet(self):
self.label_err_message.setText("")
if (not self.archytas.connected):
self.label_err_message.setText("Error: The app is not connected to Twitter")
return
api = self.archytas.api
# Retweet some tweets with the hashtag
try:
retweetRange = int(self.edit_number_retweets.text())
except:
self.label_err_message.setText("Error: Number of retweets is not a number")
return
keyword = self.edit_keyword.text()
self.button_retweet.setEnabled(False)
self.label_err_message.setText("Retweeting...")
finishedAction = self.finishedRetweetingActions
seconds_between_retweets = 2
self.QtRetweetKeyword(api, keyword, retweetRange, seconds_between_retweets, finishedAction)
def finishedRetweetingActions(self):
self.button_retweet.setEnabled(True)
self.label_err_message.setText("Successfully retweeted")
def set_connected(self, connected: bool):
self.button_retweet.setEnabled(connected)
class OutputerTweetWidget(OutputerInterface):
def __init__(self, TweetWidget):
super().__init__()
self.tweetw = TweetWidget
def print(self, message: str) -> None:
self.tweetw.update_message(message)
class TweetWidget(QtWidgets.QWidget):
def __init__(self, archytas: 'ArchytasWidget'):
super().__init__()
self.archytas = archytas
self.csv_path = "<No file loaded>"
self.loaded_csv = False
self.connected = False
self.title = QtWidgets.QLabel("Random tweet", alignment=QtCore.Qt.AlignCenter)
self.title.setFont(QtGui.QFont("Default", 16))
self.label_err_message = QtWidgets.QLabel("", alignment=QtCore.Qt.AlignCenter)
self.layout_input_csv = QtWidgets.QHBoxLayout()
self.label_input_csv = QtWidgets.QLabel("Random tweet source:", alignment=QtCore.Qt.AlignLeft)
self.label_location_csv = QtWidgets.QLabel(self.csv_path, alignment=QtCore.Qt.AlignLeft)
self.button_load_csv = QtWidgets.QPushButton("Browse...")
self.layout_input_csv.addWidget(self.label_input_csv)
self.layout_input_csv.addWidget(self.label_location_csv)
self.layout_input_csv.addWidget(self.button_load_csv)
self.button_tweet = QtWidgets.QPushButton("Tweet")
self.layout = QtWidgets.QVBoxLayout(self)
self.layout.addWidget(self.title)
self.layout.addWidget(self.label_err_message)
self.layout.addLayout(self.layout_input_csv)
self.layout.addWidget(self.button_tweet)
self.button_tweet.clicked.connect(self.tweet)
self.button_load_csv.clicked.connect(self.browse_csv)
self.try_enabling_tweet_button()
@QtCore.Slot()
def tweet(self):
self.label_err_message.setText("")
if (not self.archytas.connected):
self.label_err_message.setText("Error: The app is not connected to Twitter")
return
if (not self.loaded_csv):
self.label_err_message.setText("Error: Invalid tweet source file path")
return
api = self.archytas.api
# Tweet randomly selected tweets
number_of_tweets = 1
outputer = OutputerTweetWidget(self)
tweetRandom(api, self.csv_path, number_of_tweets, outputer)
@QtCore.Slot()
def browse_csv(self):
file_input_csv = QtWidgets.QFileDialog()
path_tuple: tuple = file_input_csv.getOpenFileName()
path = path_tuple[0]
self.update_csv_path(path)
def update_csv_path(self, path):
self.loaded_csv = True
self.csv_path = path
self.label_location_csv.setText(self.csv_path)
self.try_enabling_tweet_button()
def try_enabling_tweet_button(self):
if (self.connected and self.loaded_csv):
self.button_tweet.setEnabled(True)
else:
self.button_tweet.setEnabled(False)
def set_connected(self, connected: bool):
self.connected = connected
self.try_enabling_tweet_button()
def update_message(self, message: str) -> None:
self.label_err_message.setText(message)
class ArchytasWidget(QtWidgets.QWidget):
def __init__(self, authdata_path):
super().__init__()
self.connected = False
self.authdataw = AuthDataWidget(authdata_path, self)
self.retweetw = RetweetWidget(self)
self.tweetw = TweetWidget(self)
self.line1 = QtWidgets.QFrame()
self.line1.setFrameShape(QtWidgets.QFrame.HLine)
self.line1.setFrameShadow(QtWidgets.QFrame.Sunken)
self.line2 = QtWidgets.QFrame()
self.line2.setFrameShape(QtWidgets.QFrame.HLine)
self.line2.setFrameShadow(QtWidgets.QFrame.Sunken)
self.layout = QtWidgets.QVBoxLayout(self)
self.layout.addWidget(self.authdataw)
self.layout.addWidget(self.line1)
self.layout.addWidget(self.retweetw)
self.layout.addWidget(self.line2)
self.layout.addWidget(self.tweetw)
def update_api(self, api: tweepy.API):
self.api = api
if (self.api is not None):
self.connected = True
else:
self.connected = False
self.retweetw.set_connected(self.connected)
self.tweetw.set_connected(self.connected)
def resource_path(relative_path):
if hasattr(sys, '_MEIPASS'):
return join(sys._MEIPASS, relative_path)
return join(abspath('.'), relative_path)
def main():
app = QtWidgets.QApplication([])
widget = ArchytasWidget("auth_data.json")
widget.resize(800, 600)
widget.show()
widget.setWindowTitle("Archytas")
widget.setWindowIcon( QtGui.QIcon(resource_path("./assets/icon.png")) )
sys.exit(app.exec_())
if __name__=="__main__":
main()
|
import pandas as p
import numpy as np
from file_setup_helper import FileSetupHelper as fsh
from preprocess_data import PreprocessData as pd
from model_export import ModelExport as me
import sys
from sklearn.linear_model import Ridge
def main():
#call for file download with given date
file_name = fsh(sys.argv[1], 1460, 0).download_csv()
#aquire features and target data filenames
features_file_name, target_file_name = pd(file_name).preprocess(True, None)
#init model, and open the features and target file
#(drop column added by pandas and bypass 29.02 potential error)
model = Ridge(alpha=1, fit_intercept=True)
X_train = p.read_csv(features_file_name).drop(['Unnamed: 0', 'Date_29.02'], axis=1)
y_train = p.read_csv(target_file_name).drop(['Unnamed: 0'], axis=1)
#export model with the given datasets and model
me(model, X_train, y_train).fit_and_export(sys.argv[2])
if __name__ == '__main__':
main()
|
from .models import Stock, Tag, Product
from rest_framework import viewsets
from rest_framework.permissions import AllowAny
from .serializers import (
StockSerializer,
TagSerializer,
ProductSerializer
)
class StockViewSet(viewsets.ModelViewSet):
"""
API endpoint that allows stock to be viewed or edited.
"""
queryset = Stock.objects.all().order_by('-date_added')
serializer_class = StockSerializer
permission_classes = [AllowAny]
class TagViewSet(viewsets.ModelViewSet):
"""
API endpoint that allows tag to be viewed or edited.
"""
queryset = Tag.objects.all().order_by('-id')
serializer_class = TagSerializer
permission_classes = [AllowAny]
class ProductViewSet(viewsets.ModelViewSet):
"""
API endpoint that allows product to be viewed or edited.
"""
queryset = Product.objects.all().order_by('-date_created')
serializer_class = ProductSerializer
permission_classes = [AllowAny]
|
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Implementation of the flags interface."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import argparse as _argparse
from tensorflow.python.util.all_util import remove_undocumented
_global_parser = _argparse.ArgumentParser()
# pylint: disable=invalid-name
class _FlagValues(object):
"""Global container and accessor for flags and their values."""
def __init__(self):
self.__dict__['__flags'] = {}
self.__dict__['__parsed'] = False
def _parse_flags(self, args=None):
result, unparsed = _global_parser.parse_known_args(args=args)
for flag_name, val in vars(result).items():
self.__dict__['__flags'][flag_name] = val
self.__dict__['__parsed'] = True
return unparsed
def __getattr__(self, name):
"""Retrieves the 'value' attribute of the flag --name."""
if not self.__dict__['__parsed']:
self._parse_flags()
if name not in self.__dict__['__flags']:
raise AttributeError(name)
return self.__dict__['__flags'][name]
def __setattr__(self, name, value):
"""Sets the 'value' attribute of the flag --name."""
if not self.__dict__['__parsed']:
self._parse_flags()
self.__dict__['__flags'][name] = value
def _define_helper(flag_name, default_value, docstring, flagtype):
"""Registers 'flag_name' with 'default_value' and 'docstring'."""
_global_parser.add_argument('--' + flag_name,
default=default_value,
help=docstring,
type=flagtype)
# Provides the global object that can be used to access flags.
FLAGS = _FlagValues()
def DEFINE_string(flag_name, default_value, docstring):
"""Defines a flag of type 'string'.
Args:
flag_name: The name of the flag as a string.
default_value: The default value the flag should take as a string.
docstring: A helpful message explaining the use of the flag.
"""
_define_helper(flag_name, default_value, docstring, str)
def DEFINE_integer(flag_name, default_value, docstring):
"""Defines a flag of type 'int'.
Args:
flag_name: The name of the flag as a string.
default_value: The default value the flag should take as an int.
docstring: A helpful message explaining the use of the flag.
"""
_define_helper(flag_name, default_value, docstring, int)
def DEFINE_boolean(flag_name, default_value, docstring):
"""Defines a flag of type 'boolean'.
Args:
flag_name: The name of the flag as a string.
default_value: The default value the flag should take as a boolean.
docstring: A helpful message explaining the use of the flag.
"""
# Register a custom function for 'bool' so --flag=True works.
def str2bool(v):
return v.lower() in ('true', 't', '1')
_global_parser.add_argument('--' + flag_name,
nargs='?',
const=True,
help=docstring,
default=default_value,
type=str2bool)
# Add negated version, stay consistent with argparse with regard to
# dashes in flag names.
_global_parser.add_argument('--no' + flag_name,
action='store_false',
dest=flag_name.replace('-', '_'))
# The internal google library defines the following alias, so we match
# the API for consistency.
DEFINE_bool = DEFINE_boolean # pylint: disable=invalid-name
def DEFINE_float(flag_name, default_value, docstring):
"""Defines a flag of type 'float'.
Args:
flag_name: The name of the flag as a string.
default_value: The default value the flag should take as a float.
docstring: A helpful message explaining the use of the flag.
"""
_define_helper(flag_name, default_value, docstring, float)
_allowed_symbols = [
# We rely on gflags documentation.
'DEFINE_bool',
'DEFINE_boolean',
'DEFINE_float',
'DEFINE_integer',
'DEFINE_string',
'FLAGS',
]
remove_undocumented(__name__, _allowed_symbols)
|
# Create your views here.
from rest_framework.views import APIView
from .serializers import UserSerializer, LoginSerializer
from ..utils.helpers.json_helpers import generate_response, raise_error, add_token_to_response, validate_url, validate_confirmation_request
from ..utils import success_messages
from ..utils.error_messages import serialization_errors, tokenization_errors
from rest_framework.status import HTTP_404_NOT_FOUND, HTTP_201_CREATED, HTTP_401_UNAUTHORIZED, HTTP_202_ACCEPTED
from ..utils.validators.token_validator import TokenValidator, VerifiedUserTokenValidator
from ..utils.constants import CONFIRM_EMAIL_TYPE, ADMIN_REQUEST_EMAIL_TYPE, ADMIN_REQUEST_SUBJECT, CONFRIM_EMAIL_SUBJECT
from django.http import HttpResponseRedirect
from rest_framework.decorators import api_view
from rest_framework.parsers import MultiPartParser, JSONParser
from ..utils.helpers.email_helpers import send_email_with_token
from airtech_api.services.cloudinary import upload_profile_picture
from airtech_api.users.models import User
from datetime import datetime, timedelta
from django.core.files.storage import default_storage
from django.core.files.base import ContentFile
from django.core.files.uploadedfile import UploadedFile
import os
class SignupView(APIView):
@staticmethod
def post(request):
"""
Saves a new user to the app
Returns:
A Response object containing the JSON response
"""
request_data = dict(**request.data)
callback_url = request_data.get('callbackURL', '')
err_dict = {}
if not validate_url(callback_url):
err_dict = {
'callbackURL': [serialization_errors['invalid_url_field']]
}
serializer = UserSerializer(data=request_data)
if serializer.is_valid() and not err_dict:
_ = serializer.save()
serialization_data = serializer.data
user_email = serialization_data['email']
server_host = os.getenv('HOSTNAME')
send_email_with_token(
user_email,
'confirm-email.html',
subject=CONFRIM_EMAIL_SUBJECT,
redirect_url=callback_url,
confirm_link=f'{server_host}/api/v1/auth/confirm-email',
mail_type=CONFIRM_EMAIL_TYPE,
)
return generate_response(
success_messages['confirm_mail'].format(user_email),
status_code=HTTP_201_CREATED)
err_dict.update(serializer.errors)
raise_error(serialization_errors['many_invalid_fields'],
err_dict=err_dict)
class ConfirmView(APIView):
@staticmethod
def get(request, **kwargs):
token = kwargs.get('token', '')
user, redirect_url = validate_confirmation_request(
token, CONFIRM_EMAIL_TYPE, success_key='verified')
if user:
user.verified = True
user.save()
return HttpResponseRedirect(redirect_to=redirect_url)
class RequestAdminAccessView(APIView):
permission_classes = [VerifiedUserTokenValidator]
protected_methods = ['POST']
@staticmethod
def post(request):
user = request.decoded_user
callback_url = request.data.get('callbackURL', '')
if not validate_url(callback_url):
raise_error(serialization_errors['many_invalid_fields'],
err_dict={
'callbackURL':
[serialization_errors['invalid_url_field']]
})
if user.admin:
raise_error(serialization_errors['regular_user_only'],
status_code=403)
if not user.image_url:
raise_error(serialization_errors['profile_not_updated'],
status_code=403)
server_host = os.getenv('HOSTNAME')
send_email_with_token(
os.getenv('OWNER_EMAIL'),
'admin-request-email.html',
subject=ADMIN_REQUEST_SUBJECT,
redirect_url=callback_url,
confirm_link=f'{server_host}/api/v1/auth/request-admin-access',
mail_type=ADMIN_REQUEST_EMAIL_TYPE,
first_name=user.first_name,
last_name=user.last_name,
user_email=user.email,
profile_picture=user.image_url,
)
return generate_response(success_messages['admin_request_sent'])
class LoginView(APIView):
@staticmethod
def post(request):
"""
Saves a new user to the app
Returns:
A Response object containing the JSON response
"""
request_data = dict(**request.data)
serializer = LoginSerializer(data=request_data)
if serializer.is_valid(raise_exception=False):
user = serializer.validated_data
serialized_user = UserSerializer(user).data
serialization_data = add_token_to_response(serialized_user,
exp=datetime.now() +
timedelta(days=4))
return generate_response(
success_messages['auth_successful'].format('Login'),
serialization_data)
if 'non_field_errors' in serializer.errors.keys():
raise_error(serialization_errors['user_not_found'],
status_code=HTTP_404_NOT_FOUND)
raise_error(serialization_errors['many_invalid_fields'],
err_dict=serializer.errors)
@api_view(['GET'])
def accept_admin_request(request, **kwargs):
token = kwargs.get('token', '')
user, redirect_url = validate_confirmation_request(
token, ADMIN_REQUEST_EMAIL_TYPE, success_key='admin_approval')
if user:
user.admin = True
user.save()
return HttpResponseRedirect(redirect_to=redirect_url)
class ResendEmailEndpoint(APIView):
permission_classes = [TokenValidator]
protected_methods = ['POST']
@staticmethod
def post(request):
callback_url = request.data.get('callbackURL', '')
if not validate_url(callback_url):
raise_error(serialization_errors['many_invalid_fields'],
err_dict={
'callbackURL':
serialization_errors['invalid_url_field']
})
email = request.data.get('email')
user = request.decoded_user
if user.verified:
raise_error(serialization_errors['user_already_verified'])
server_host = os.getenv('HOSTNAME')
send_email_with_token(
user.email,
'confirm-email.html',
subject=CONFRIM_EMAIL_SUBJECT,
redirect_url=callback_url,
confirm_link=f'{server_host}/api/v1/auth/confirm-email',
mail_type=CONFIRM_EMAIL_TYPE,
)
return generate_response(
success_messages['confirm_mail'].format(email))
class UserProfilePicture(APIView):
parser_classes = (
MultiPartParser,
JSONParser,
)
permission_classes = [TokenValidator]
@staticmethod
def patch(request):
file = request.data.get('picture')
if not file:
raise_error(serialization_errors['many_invalid_fields'],
err_dict={
'picture': serialization_errors['missing_field'],
})
if not isinstance(file, UploadedFile):
raise_error(serialization_errors['many_invalid_fields'],
err_dict={
'picture':
serialization_errors['value_not_a_file'],
})
user = request.decoded_user
octet_stream_is_valid = file.content_type == 'application/octet-stream' and os.getenv(
'ENVIRONMENT') == 'test'
file_is_image = file.content_type.split('/')[0] == 'image'
if not octet_stream_is_valid and not file_is_image:
raise_error(serialization_errors['not_an_image'])
file_size = file.size
if file_size > 2_000_000:
raise_error(serialization_errors['image_too_large'])
file_name = str(user.id) + datetime.now().strftime('%c') + '.jpg'
default_storage.save(file_name, ContentFile(file.read()))
upload_profile_picture.delay(user.id, user.image_public_id, file_name)
return generate_response('Your request is being processed.',
status_code=HTTP_202_ACCEPTED)
|
from metricsML.NormalizationType import NormalizationType
import numpy as np
import math
def normalization(normalization, train_data, test_data, validation_data=None):
if not isinstance(normalization, NormalizationType):
print("Unknown normalization specified, use " + str(NormalizationType.PERCENTAGE) + " for normalizing data")
normalization = NormalizationType.PERCENTAGE;
if (normalization is NormalizationType.NO_NORMALIZATION):
print("No normalization selected")
elif (normalization is NormalizationType.PERCENTAGE):
__percentageNormalization(train_data, test_data, validation_data)
elif (normalization is NormalizationType.LOGARITHM):
__logarithmNormalization(train_data, test_data, validation_data)
else:
raise TypeError("Unhandled normalization selected")
def __percentageNormalization(train_data, test_data, validation_data=None):
nColumns = train_data.shape[1] if len(train_data.shape) == 2 else 0;
train_max = np.amax(train_data, axis=0)
test_max = np.amax(test_data, axis=0)
if (validation_data is not None):
validation_max = np.amax(validation_data, axis=0)
else:
validation_max = np.zeros(nColumns)
max_vector = np.amax([train_max, test_max, validation_max], axis=0)
train_data = train_data/max_vector
test_data = test_data/max_vector
if (validation_data is not None):
validation_data = validation_data/max_vector
def __logarithmNormalization(train_data, test_data, validation_data=None):
nColumns = train_data.shape[1] if len(train_data.shape) == 2 else 0;
train_max = np.amax(train_data, axis=0)
test_max = np.amax(test_data, axis=0)
if (validation_data is not None):
validation_max = np.amax(validation_data, axis=0)
else:
validation_max = np.zeros(nColumns)
max_vector = np.amax([train_max, test_max, validation_max], axis=0)
for column in range(0, nColumns):
max_value = max_vector[column]
if (max_value > 1):
train_data[:, column] = [__positiveLogarithm(x) for x in train_data[:, column]]
test_data[:, column] = [__positiveLogarithm(x) for x in test_data[:, column]]
if (validation_data is not None):
validation_data[:, column] = [__positiveLogarithm(x) for x in validation_data[:, column]]
def __positiveLogarithm(number, base):
if (number > 1):
return math.log(number, base)
else:
return 0
|
from setuptools import setup
setup_kwargs = dict(
name='netsuite-bbc-python',
version='1.0.1',
description='Wrapper around Netsuite SuiteTalk Web Services',
packages=['netsuite_bbc'],
include_package_data=True,
author='Jacob Magnusson',
author_email='m@jacobian.se',
url='https://github.com/bluebottlecoffee/netsuite-bbc-python',
license='BSD',
platforms='any',
install_requires=[
'lxml',
'requests-ntlm',
'zeep',
],
extras_require={
'cli': [
'argh',
'ipython',
],
'test': {
'coverage>=4.2',
'flake8>=3.0.4',
'mypy>=0.560',
'pytest>=3.0.3',
'responses>=0.5.1',
},
},
entry_points={
'console_scripts': [
'netsuite = netsuite.__main__:main',
],
},
classifiers=[
'Development Status :: 3 - Alpha',
'Environment :: Web Environment',
'Intended Audience :: Developers',
'License :: OSI Approved :: BSD License',
'Operating System :: OS Independent',
'Programming Language :: Python',
'Programming Language :: Python :: 3.6',
'Programming Language :: Python :: 3.7',
],
)
if __name__ == '__main__':
setup(**setup_kwargs)
|
#!/usr/bin/env python3
#
# This file is part of LiteX-Boards.
#
# Copyright (c) 2019 Antony Pavlov <antonynpavlov@gmail.com>
# SPDX-License-Identifier: BSD-2-Clause
import os
import argparse
from migen import *
from migen.genlib.resetsync import AsyncResetSynchronizer
from litex.build.io import DDROutput
from litex_boards.platforms import de1soc
from litex.soc.cores.clock import CycloneVPLL
from litex.soc.integration.soc_core import *
from litex.soc.integration.builder import *
from litex.soc.cores.led import LedChaser
from litedram.modules import IS42S16320
from litedram.phy import GENSDRPHY
# CRG ----------------------------------------------------------------------------------------------
class _CRG(Module):
def __init__(self, platform, sys_clk_freq):
self.rst = Signal()
self.clock_domains.cd_sys = ClockDomain()
self.clock_domains.cd_sys_ps = ClockDomain(reset_less=True)
# # #
# Clk / Rst
clk50 = platform.request("clk50")
# PLL
self.submodules.pll = pll = CycloneVPLL(speedgrade="-C6")
self.comb += pll.reset.eq(self.rst)
pll.register_clkin(clk50, 50e6)
pll.create_clkout(self.cd_sys, sys_clk_freq)
pll.create_clkout(self.cd_sys_ps, sys_clk_freq, phase=90)
# SDRAM clock
self.specials += DDROutput(1, 0, platform.request("sdram_clock"), ClockSignal("sys_ps"))
# BaseSoC ------------------------------------------------------------------------------------------
class BaseSoC(SoCCore):
def __init__(self, sys_clk_freq=int(50e6), with_led_chaser=True, **kwargs):
platform = de1soc.Platform()
# SoCCore ----------------------------------------------------------------------------------
SoCCore.__init__(self, platform, sys_clk_freq,
ident = "LiteX SoC on DE1-SoC",
ident_version = True,
**kwargs)
# CRG --------------------------------------------------------------------------------------
self.submodules.crg = _CRG(platform, sys_clk_freq)
# SDR SDRAM --------------------------------------------------------------------------------
if not self.integrated_main_ram_size:
self.submodules.sdrphy = GENSDRPHY(platform.request("sdram"), sys_clk_freq)
self.add_sdram("sdram",
phy = self.sdrphy,
module = IS42S16320(sys_clk_freq, "1:1"),
l2_cache_size = kwargs.get("l2_size", 8192)
)
# Leds -------------------------------------------------------------------------------------
if with_led_chaser:
self.submodules.leds = LedChaser(
pads = platform.request_all("user_led"),
sys_clk_freq = sys_clk_freq)
# Build --------------------------------------------------------------------------------------------
def main():
parser = argparse.ArgumentParser(description="LiteX SoC on DE1-SoC")
parser.add_argument("--build", action="store_true", help="Build bitstream")
parser.add_argument("--load", action="store_true", help="Load bitstream")
parser.add_argument("--sys-clk-freq", default=50e6, help="System clock frequency (default: 50MHz)")
builder_args(parser)
soc_core_args(parser)
args = parser.parse_args()
soc = BaseSoC(
sys_clk_freq = int(float(args.sys_clk_freq)),
**soc_core_argdict(args)
)
builder = Builder(soc, **builder_argdict(args))
builder.build(run=args.build)
if args.load:
prog = soc.platform.create_programmer()
prog.load_bitstream(os.path.join(builder.gateware_dir, soc.build_name + ".sof"))
if __name__ == "__main__":
main()
|
from django_filters import rest_framework as filters
from rest_framework.filters import SearchFilter, OrderingFilter
from . import models
class SearchFilter(SearchFilter):
"""
A Filter backend
"""
class DjangoFilterBackend(filters.DjangoFilterBackend):
"""
A Filter backend
"""
class OrderingFilter(OrderingFilter):
"""
A Filter backend
"""
# class ListFilter(filters.Filter):
# def __init__(self, integer=False, **kwargs):
# super(ListFilter, self).__init__(**kwargs)
# if integer:
# self.filter_value_fn = lambda x: int(x)
# else:
# self.filter_value_fn = lambda x: x
#
# def sanitize(self, value_list):
# return [v for v in value_list if v != ""]
#
# def filter(self, qs, value):
# values = value.split(",")
# values = self.sanitize(values)
# f = Q()
# for v in values:
# kwargs = {self.field_name: v}
# f = f | Q(**kwargs)
# return qs.filter(f)
class NumberInFilter(filters.BaseInFilter, filters.NumberFilter):
pass
class CharInFilter(filters.BaseInFilter, filters.CharFilter):
pass
class GroceryGroupFilter(filters.FilterSet):
id = NumberInFilter(field_name='id')
name = CharInFilter(field_name='name')
class Meta:
model = models.GroceryGroup
fields = ['name', 'id']
class RecipeFilter(filters.FilterSet):
id = NumberInFilter(field_name='id')
user = NumberInFilter(field_name='user')
tags = NumberInFilter(field_name='tags')
class Meta:
model = models.Recipe
fields = ['id', 'user', 'tags']
|
# Copyright (c) 2015, Frappe Technologies Pvt. Ltd. and Contributors and Contributors
# See license.txt
from __future__ import unicode_literals
import frappe
import unittest
# test_records = frappe.get_test_records('Appraisal')
class TestAppraisal(unittest.TestCase):
pass
|
#! /usr/bin/env python
# Copyright 2019
#
# This file is part of WarpX.
#
# License: BSD-3-Clause-LBNL
import sys
sys.path.insert(1, '../../../../warpx/Regression/Checksum/')
import numpy as np
import yt
yt.funcs.mylog.setLevel(50)
import re
import checksumAPI
from scipy.constants import c
# Name of the last plotfile
fn = sys.argv[1]
# Load yt data
ds_old = yt.load('divb_cleaning_3d_plt00398')
ds_mid = yt.load('divb_cleaning_3d_plt00399')
ds_new = yt.load(fn) # this is the last plotfile
ad_old = ds_old.covering_grid(level = 0, left_edge = ds_old.domain_left_edge, dims = ds_old.domain_dimensions)
ad_mid = ds_mid.covering_grid(level = 0, left_edge = ds_mid.domain_left_edge, dims = ds_mid.domain_dimensions)
ad_new = ds_new.covering_grid(level = 0, left_edge = ds_new.domain_left_edge, dims = ds_new.domain_dimensions)
G_old = ad_old['boxlib', 'G'].v.squeeze()
G_new = ad_new['boxlib', 'G'].v.squeeze()
divB = ad_mid['boxlib', 'divB'].v.squeeze()
# Check max norm of error on c2 * div(B) = dG/dt
# (the time interval between old and new is 2*dt)
dt = 1.504557189e-15
x = G_new - G_old
y = divB * 2 * dt * c**2
rel_error = np.amax(abs(x - y)) / np.amax(abs(y))
tolerance = 1e-1
assert(rel_error < tolerance)
test_name = fn[:-9] # Could also be os.path.split(os.getcwd())[1]
if re.search('single_precision', fn):
checksumAPI.evaluate_checksum(test_name, fn, rtol=1.e-3)
else:
checksumAPI.evaluate_checksum(test_name, fn)
|
from flask_wtf import FlaskForm
from wtforms import StringField, PasswordField, SubmitField, SelectField
from wtforms.validators import DataRequired, Email, Length, EqualTo
from app.models.Model import Category
def get_categories():
categories_query = Category.query.all()
categories = []
for category in categories_query:
categories.append((category.id,category.title))
return categories
# Register form
class RegisterForm(FlaskForm):
email = StringField(label="email", validators=[DataRequired(), Email()])
username = StringField(label="username", validators=[DataRequired()])
password = PasswordField(label="password", validators=[DataRequired(), Length(min=6)])
confirm = PasswordField(label="confirm", validators=[DataRequired(),EqualTo(fieldname='password')])
category = SelectField('Selectionée une category', validators=[DataRequired()],choices=get_categories())
submit = SubmitField(label="inscrire")
# login form
class LoginForm(FlaskForm):
email = StringField('email', validators=[DataRequired(),Email()])
password = PasswordField('password', validators=[DataRequired()])
submit = SubmitField('identifier')
|
# RT Lib - Setting
from typing import (
TYPE_CHECKING, TypedDict, Optional, Union, Literal, Dict, Tuple, List,
overload, get_origin, get_args
)
from discord.ext import commands
import discord
from collections import defaultdict
from aiohttp import ClientSession
from functools import partial
from datetime import datetime
from ujson import dumps
from time import time
from pytz import utc
from . import websocket
from .slash import Option
if TYPE_CHECKING:
from .typed import RT
class CommandRunData(TypedDict):
command: str
kwargs: Dict[str, Union[str, int, float, bool]]
guild_id: Union[int, Literal[0]]
category: str
user_id: int
ip: str
class Setting:
@overload
def __init__(
_, mode: str, name: Optional[str] = None,
help_command: Tuple[str, str] = None, **kwargs
):
...
def __new__(cls, mode, name=None, help_command=None, **kwargs):
return lambda func: func
self = super().__new__(cls)
self.mode, self.name, self.kwargs = mode, name, kwargs
self.help_command = help_command
def _decorator(func):
func._setting = self
return func
return _decorator
class Context:
"ダッシュボードから呼ばれたコマンドで実行されるContextです。"
def __init__(
self, cog: "SettingManager", data: CommandRunData,
command: commands.Command, **kwargs
):
# IDを文字列から整数に変換する。
for key, value in list(data.items()):
if key.endswith("id"):
data[key] = int(value)
# 変数を作っていく。
self.data = data
self.setting_manager = cog
self.bot: "RT" = self.setting_manager.bot
self.guild: Optional[discord.Guild] = self.bot.get_guild(data["guild_id"])
self.created_at: datetime = datetime.now(utc)
self.edited_at = None
self.__setting_context__ = True
self.channel: Optional[
Union[discord.abc.GuildChannel, discord.DMChannel]
] = (
self.guild.get_channel(data["kwargs"].pop(
"channel_id", data["kwargs"].pop(
"channel", data["kwargs"].pop("Channel", 0)
)
))
if data["category"].endswith("guild")
else self.bot.get_user(data["user_id"])
)
self.author: Union[discord.User, discord.Member] = (
self.guild.get_member(data["user_id"]) if self.guild
else self.bot.get_user(data["user_id"])
)
for key in kwargs:
setattr(self, key, kwargs.pop(key, None))
self.command = command
self.cog = command.cog
self.voice_client: Optional[discord.VoiceClient] = \
getattr(self.guild, "voice_client", None)
self.prefix = "r2!" if self.bot.test else "rt!"
self.me: Union[discord.Member, discord.ClientUser] = \
getattr(self.guild, "me", self.bot.user)
self.message = self
self.reply = self.send
async def trigger_typing(self):
...
async def send(
self, content: str = None, embed: discord.Embed = None, *args, **kwargs
):
"返信をします。"
content = self.bot.cogs["Language"].get_text(
embed if embed else content, self.author.id
)
if isinstance(content, discord.Embed):
content = content.to_dict()
async with self.setting_manager.session.post(
f"{self.bot.get_url()}/api/settings/reply/{self.data['ip']}",
json={"data": content}
) as r:
self.bot.print(
"[SettingManager]", "[Reply]",
f"Response: {await r.text()}, Content: {content}"
)
@overload
async def reply(
self, content: str = None, embed: discord.Embed = None, *args, **kwargs
):
...
async def delete(self) -> None:
...
class SettingManager(commands.Cog):
SUPPORTED_DISCORD_ANNOTATIONS = (
"Member", "User", "TextChannel", "VoiceChannel", "StageChannel",
"Thread", "Role"
)
SUPPORTED_ANNOTATIONS = (str, int, float, bool)
def __init__(self, bot: "RT"):
self.bot = bot
self.data: Dict[
str, Tuple[commands.Command, Setting]
] = {}
self.before = {}
@property
def session(self) -> ClientSession:
if not hasattr(self, "_session"):
self._session = ClientSession(
loop=self.bot.loop, json_serialize=partial(
dumps, ensure_ascii=False
)
)
return self._session
def get_parsed_args(self, annotation: object) -> Union[str, List[str]]:
"渡されたオブジェクトから設定項目の型の名前を判定し返します。"
if isinstance(annotation, Option):
annotation = annotation.annotation
if annotation in self.SUPPORTED_ANNOTATIONS:
return annotation.__name__
elif getattr(annotation, "__name__", "") in self.SUPPORTED_DISCORD_ANNOTATIONS:
return annotation.__name__.replace("Text", "").replace("Voice", "") \
.replace("Stage", "").replace("Thread", "Channel").replace("User", "Member")
elif (origin := get_origin(annotation)) == Union:
return ["Union"] + [self.get_parsed_args(arg) for arg in get_args(annotation)]
elif origin == Literal:
return ["Literal"] + list(get_args(annotation))
else:
return "str"
def reset(self):
self.data = {}
def add_command(self, command: commands.Command) -> None:
self.data[command.qualified_name] = (command, command.callback._setting)
@commands.Cog.listener()
async def on_command_add(self, command: commands.Command):
if hasattr(command.callback, "_setting"):
self.add_command(command)
@commands.Cog.listener("on_update_api")
async def update(self):
"APIにBotにあるコマンドの設定のJSONデータを送る。"
# バックエンド用のデータを作る。
data = defaultdict(dict)
for command, setting in self.data.values():
kwargs = {
parameter.name: (
ant := self.get_parsed_args(parameter.annotation),
"" if parameter.default == parameter.empty
else parameter.default,
parameter.kind == parameter.KEYWORD_ONLY \
and ant == "str"
) for parameter in command.clean_params.values()
}
kwargs.update({
key: (self.get_parsed_args(value), "", False)
for key, value in setting.kwargs.items()
})
data[setting.mode][command.qualified_name] = {
"help": (
self.bot.cogs["BotGeneral"].get_help_url(*setting.help_command)
if setting.help_command
else self.bot.cogs["BotGeneral"].get_command_url(command)
), "kwargs": kwargs, "sub_category": getattr(
command.parent, "name", None
), "headding": (
command.extras.get("headding")
or command.__original_kwargs__.get("headding")
), "display_name": setting.name or command.name
}
# データを送る。
async with self.bot.session.post(
f"{self.bot.get_url()}/api/settings/commands/update",
json=data
) as r:
self.bot.print("[SettingManager]", "[Updater]", time(), await r.text())
self.before = data
@websocket.websocket("/api/settings/websocket", auto_connect=True, reconnect=True)
async def setting_websocket(self, ws: websocket.WebSocket, _):
# ユーザーがダッシュボードから設定を更新した際に、すぐに反応できるようにするためのものです。
await ws.send("on_ready")
@setting_websocket.event("on_post")
async def post(self, ws: websocket.WebSocket, data: CommandRunData):
if isinstance(data, dict):
self.bot.loop.create_task(
self.run_command(self.data[data["command"]][0], data),
name=f"UpdateSetting[{data.get('command')}]: {data.get('user_id')}"
)
await ws.send("on_posted")
@setting_websocket.event("on_posted")
async def posted(self, ws: websocket.WebSocket, _):
await self.setting_websocket(ws, None)
async def run_command(self, command: commands.Command, data: CommandRunData):
"コマンドを走らせます。"
ctx = None
try:
# コマンドのメッセージを組み立てる。
content = f"{self.bot.command_prefix[0]}{command.qualified_name}"
for parameter in command.clean_params.values():
tentative = f' "{data["kwargs"].get(parameter.name, "")}"'
if parameter.kind == parameter.KEYWORD_ONLY:
tentative = f" {tentative[2:-1]}"
content += tentative
# 実行できるかチェックをしてからオリジナルContextでコマンドを実行する。
ctx = Context(self, data, command)
ctx.content = content
ctx._state = self.bot.http
parsed_ctx = await self.bot.get_context(ctx)
ctx.view = parsed_ctx.view
ctx.args, ctx.kwargs = parsed_ctx.args, parsed_ctx.kwargs
for name in dir(parsed_ctx):
if not name.startswith(
(
"__", "send", "reply", "trigger", "typing", "created",
"channel", "message", "guild"
)
):
setattr(ctx, name, getattr(parsed_ctx, name))
return await self.bot.invoke(ctx.message)
except Exception as e:
if ctx:
self.bot.dispatch("command_error", ctx, e)
def cog_unload(self):
if hasattr(self, "_session"):
self.bot.loop.create_task(self._session.close())
def setup(bot):
return
bot.add_cog(SettingManager(bot))
|
import time
from threading import Event, RLock
from mltk.utils import hexdump
from .device_interface import DeviceInterface, MAX_BUFFER_SIZE
WAIT_FOREVER = 4294967.0
class JLinkDataStream(object):
"""JLink data stream"""
def __init__(
self,
name:str,
mode:str,
ifc: DeviceInterface,
stream_context: dict
):
self._name = name
self._mode = mode
self._ifc = ifc
self._context = stream_context
self._is_opened = Event()
self._buffer = bytearray()
self._buffer_lock = RLock()
self._buffer_event = Event()
self._notify_event = None
self._max_read_size = -1
self._timeout = -1
self._end_time = -1
self._requires_processing = False
self._id_mask = (1 << stream_context['id'])
self._is_opened.set()
@property
def name(self) -> str:
"""The name of the opened stream"""
return self._name
@property
def mode(self) -> str:
"""The mode the for which the stream was opened, r or w"""
return self._mode
@property
def is_opened(self) -> bool:
"""If the stream is opened to the device"""
return self._is_opened.is_set()
@property
def max_read_size(self) -> int:
"""The maximum amount of data to read
Set to -1 to disable limit
After each read, this value will decrement by the amount of data read.
One this value reaches zero, it must be reset otherwise subsequent reads
will always return zero.
"""
return self._max_read_size
@max_read_size.setter
def max_read_size(self, val:int):
if val is None:
val = -1
self._max_read_size = val
@property
def timeout(self) -> float:
"""The maximum about of time in seconds to read or write data.
This is only used if the 'timeout' argument to the read() or write() APIs is None
Set to -1 to never timeout
"""
return self._timeout
@timeout.setter
def timeout(self, val: float):
if val is None:
val = -1
self._timeout = val
@property
def end_time(self) -> float:
"""The absolute time in seconds to timeout reading or writing
Set to None to disable.
If end_time > time.time(), then return from the read() or write() API
"""
return self._end_time
@end_time.setter
def end_time(self, val:float):
if val is None:
val = -1
self._end_time = val
@property
def buffer_used(self) -> int:
"""The amount of the device data buffer used
If the stream was opened for reading then this
is the amount of data that was previous received from
the device and is waiting to be read by the python script.
If the stream was opened for writing, then this is
the amount of data that was previously written and is
pending to be sent to the device.
"""
with self._buffer_lock:
retval = len(self._buffer)
return retval
@property
def buffer_unused(self) -> int:
"""The amount of the device data buffer that is available"""
with self._buffer_lock:
retval = MAX_BUFFER_SIZE - len(self._buffer)
return retval
@property
def read_data_available(self) -> int:
"""The amount of data that is ready to be read by the python script"""
return self.buffer_used
@property
def write_data_available(self) -> int:
"""The amount of data that can immediately be written"""
return self.buffer_unused
@property
def buffer_hexdump(self, length=64) -> str:
"""Return a hexdump string"""
length = min(length, self.buffer_used)
return hexdump.hexdump(self._buffer[:length], result='return')
def close(self):
"""Close the data stream with the device"""
if self._is_opened.is_set():
self._is_opened.clear()
self._buffer_event.set()
self._ifc.close(self._name)
def read(self, max_size:int = None, timeout:float=None) -> bytes:
"""Read data from data stream opened for reading
NOTE: The only returns the data that is immediately available.
The amount of data returned may be less than max_size.
"""
if self.mode != 'r':
raise Exception(f'Stream: {self.name} not opened for reading')
timeout = self._get_timeout(timeout)
max_size = self._get_max_size(max_size)
start_time = time.time()
while True:
self._buffer_event.clear()
if not self.is_opened:
raise Exception(f'Stream: {self.name} closed')
if max_size == 0:
return None
bufsize = min(self.read_data_available, max_size)
if bufsize > 0:
retval = self._consume_buffer(bufsize)
self._notify_event.set()
return bytes(retval)
elapsed = (time.time() - start_time)
if elapsed >= timeout:
return None
if self._end_time > 0:
time_remaining = self._end_time - time.time()
if time_remaining <= 0:
return None
else:
time_remaining = WAIT_FOREVER
self._buffer_event.wait(min(min(timeout - elapsed, time_remaining), 0.100))
def read_all(self, amount:int, timeout:float=None, initial_timeout:float=None, throw_exception=True) -> bytes:
"""The the specified amount of data"""
if initial_timeout is None:
initial_timeout = timeout
retval = bytearray()
remaining = amount
while remaining > 0:
chunk_timeout = initial_timeout if len(retval) == 0 else timeout
chunk = self.read(max_size=remaining, timeout=chunk_timeout)
if chunk is None:
break
remaining -= len(chunk)
retval.extend(chunk)
if len(retval) != amount and throw_exception:
raise Exception('Failed to read all data')
return bytes(retval)
def write(self, data:bytes, timeout:float=None, flush=False) -> int:
"""Write data to a data stream opened for writing"""
if self.mode != 'w':
raise Exception(f'Stream: {self.name} not opened for writing')
timeout = self._get_timeout(timeout)
total_write_len = 0
start_time = time.time()
while len(data) > 0:
self._buffer_event.clear()
if not self.is_opened:
raise Exception(f'Stream: {self.name} closed')
bufsize = min(self.write_data_available, len(data))
if bufsize > 0:
self._populate_buffer(data[:bufsize])
data = data[bufsize:]
total_write_len += bufsize
self._requires_processing = True
self._notify_event.set()
if len(data) == 0:
break
elapsed = (time.time() - start_time)
if elapsed >= timeout:
break
if self._end_time > 0:
time_remaining = self._end_time - time.time()
if time_remaining <= 0:
break
else:
time_remaining = WAIT_FOREVER
self._buffer_event.wait(min(min(timeout - elapsed, time_remaining), 0.100))
if flush:
self.flush(timeout=timeout)
return total_write_len
def flush(self, timeout:float=None):
"""Wait while any pending data is transferred to/from the device"""
timeout = self._get_timeout(timeout)
start_time = time.time()
while self.buffer_used > 0:
self._buffer_event.clear()
if not self.is_opened:
raise Exception(f'Stream: {self.name} closed')
elapsed = (time.time() - start_time)
if elapsed >= timeout:
raise Exception('Time-out waiting for buffer to flush')
if self._end_time > 0:
time_remaining = self._end_time - time.time()
if time_remaining <= 0:
break
else:
time_remaining = WAIT_FOREVER
self._buffer_event.wait(min(min(timeout - elapsed, time_remaining), 0.100))
def _set_notify_event(self, event):
self._notify_event = event
def _process(self, buffer_status_mask):
if not self._requires_processing and (buffer_status_mask & self._id_mask) == 0:
return
self._requires_processing = False
if self.mode == 'r':
max_read_len = self.buffer_unused
if max_read_len > 0:
data = self._ifc.read(self._context, max_read_len)
if data:
self._populate_buffer(data)
else:
self._requires_processing = True
elif self.mode == 'w':
write_len = self._ifc.write(self._context, self._buffer)
if write_len:
self._consume_buffer(write_len)
if self.buffer_used > 0:
self._requires_processing = True
def _consume_buffer(self, size) -> bytes:
with self._buffer_lock:
retval = self._buffer[:size]
self._buffer = self._buffer[size:]
if self._max_read_size != -1:
if size <= self._max_read_size:
self._max_read_size -= size
else:
self._max_read_size = 0
if self.mode == 'w':
self._buffer_event.set()
return retval
def _populate_buffer(self, data):
with self._buffer_lock:
if isinstance(data, str):
data = data.encode()
self._buffer.extend(data)
if self.mode == 'r':
self._buffer_event.set()
def _get_timeout(self, timeout:float) -> float:
if timeout is None:
timeout = self._timeout
if timeout == -1:
timeout = WAIT_FOREVER
return timeout
def _get_max_size(self, max_size:int) -> int:
if max_size is None:
max_size = self._max_read_size
if max_size == -1:
max_size = MAX_BUFFER_SIZE
return max_size
def __iter__(self):
return self
def __next__(self):
retval = self.read()
if retval is None:
raise StopIteration # Done iterating.
return retval
def __enter__(self):
return self
def __exit__(self, dtype, value, traceback):
self.close()
|
import rstools
from tkinter import *
from functools import partial
class mainWindow:
def __init__(self,master) -> None:
self.master = master
self.constraint = IntVar()
self.constring = []
Label(self.master , text="Revised Simplex Method", font=("Arial",25)).pack()
Label(self.master,text="select number of constraint").pack()
Scale(self.master,variable=self.constraint,from_=2,to=4,orient=HORIZONTAL).pack()
Button(self.master,text="Submit",command=self.next).pack()
def next(self):
level1 = Toplevel()
level1.geometry("400x300")
a = self.constraint.get()
yy1 = 5
for i in range(a+1):
if i==0:
l1 = Label(level1,text="Z")
l1.place(x=120,y=yy1)
else:
l2 = Label(level1,text="Constraint"+str(i))
l2.place(x=70,y=yy1)
yy1+=20
yy = 5
for i in range(a+1):
va = StringVar()
e = Entry(level1,textvariable=va)
e.place(x=135,y=yy)
self.constring.append(va)
yy+=20
finalanswer = partial(self.finalanswer,level1)
Button(level1,text="calculate",command=finalanswer).place(x=225,y=20*(a+2))
level1.mainloop()
def finalanswer(self,level1):
Decodedstring = []
for i in self.constring:
Decodedstring.append(i.get())
a = len(Decodedstring)
cj = rstools.optimizationFunction(Decodedstring[0])
A = []
b = []
for i in range(1,a):
A.append(rstools.constraintsFunction(Decodedstring[i])[:-1])
b.append([rstools.constraintsFunction(Decodedstring[i])[-1]])
cb = [[0]*(a-1)]
cb = rstools.transpose(cb)
B = rstools.B(a-1)
print(A,B,b,cb,cj)
fans = rstools.answer(A,B,b,cb,cj)
fans0 = fans[0]
fans1 = fans[1]
yy = 150
a = rstools.variables(Decodedstring[0])
for i in range(len(fans0)):
Label(level1,text=a[fans1[i]]+" ="+str(fans0[i][0])).place(x=200,y=yy)
yy+=20
if __name__ == "__main__":
app = Tk()
app.title("Linear Programming Problem")
app.geometry("500x400")
win = mainWindow(app)
app.mainloop()
|
import struct
from hsdecomp import ptrutil
def read_arg_pattern(settings, address):
num_args = read_num_args(settings, address)
func_type = read_function_type(settings, address)
assert num_args >= len(func_type)
return func_type + 'v' * (num_args - len(func_type))
def read_num_args(settings, address):
return ptrutil.read_half_word(settings, settings.text_offset + address - settings.rt.halfword.size*5)
def read_function_type(settings, address):
type_table = {
3: '',
4: 'n',
5: 'p',
12: 'nn',
13: 'np',
14: 'pn',
15: 'pp',
16: 'nnn',
17: 'nnp',
18: 'npn',
19: 'npp',
20: 'pnn',
21: 'pnp',
22: 'ppn',
23: 'ppp',
24: 'pppp',
25: 'ppppp',
26: 'pppppp',
27: 'ppppppp',
28: 'pppppppp'
}
type = ptrutil.read_half_word(settings, settings.text_offset + address - settings.rt.halfword.size*6)
if type >= 12 and settings.version < (7, 8, 0):
# Introduction of vector arguments
type += 3
if type in type_table:
return type_table[type]
elif type == 0:
bitmap = ptrutil.read_word(settings, settings.text_offset + address - settings.rt.word.size*5)
size = bitmap & (settings.word.size - 1)
bits = bitmap >> settings.word.lg_size
ret = ''
for i in range(size):
if bits % 2 == 0:
ret += 'p'
else:
ret += 'n'
bits //= 2
return ret
else:
# TODO: Read large bitmaps
assert False, "unknown function type"
def read_closure_type(settings, address):
type_table = {
1: 'constructor',
2: 'constructor (1 ptr, 0 nonptr)',
3: 'constructor (0 ptr, 1 nonptr)',
4: 'constructor (2 ptr, 0 nonptr)',
5: 'constructor (1 ptr, 1 nonptr)',
6: 'constructor (0 ptr, 2 nonptr)',
7: 'constructor (static)',
8: 'constructor (no CAF, static)',
9: 'function',
10: 'function (1 ptr, 0 nonptr)',
11: 'function (0 ptr, 1 nonptr)',
12: 'function (2 ptr, 0 nonptr)',
13: 'function (1 ptr, 1 nonptr)',
14: 'function (0 ptr, 2 nonptr)',
15: 'function (static)',
16: 'thunk',
17: 'thunk (1 ptr, 0 nonptr)',
18: 'thunk (0 ptr, 1 nonptr)',
19: 'thunk (2 ptr, 0 nonptr)',
20: 'thunk (1 ptr, 1 nonptr)',
21: 'thunk (0 ptr, 2 nonptr)',
22: 'thunk (static)',
23: 'selector',
28: 'indirection',
29: 'indirection (permanent)',
30: 'indirection (static)'
}
type = ptrutil.read_half_word(settings, settings.text_offset + address - settings.rt.halfword.size*2)
if type in type_table:
return type_table[type]
else:
return 'unknown: ' + str(type)
|
"""Utilities functions for setting up test fixtures."""
import tempfile
from pathlib import Path
import shutil
import json
from uuid import uuid4
import pandas as pd
import numpy as np
from iblutil.io.parquet import uuid2np, np2str
import one.params
def set_up_env(use_temp_cache=True) -> tempfile.TemporaryDirectory:
"""
Create a temporary directory and copy cache fixtures over.
Parameters
----------
use_temp_cache : bool
If True, copies REST cache fixtures to the temporary directory, otherwise they are copied
to the directory returned by one.params.get_params_dir
Returns
-------
tempfile.TemporaryDirectory
The temporary directory containing the test ONE caches
"""
fixture = Path(__file__).parent.joinpath('fixtures')
tempdir = tempfile.TemporaryDirectory()
# Copy cache files to temporary directory
for cache_file in ('sessions', 'datasets'):
filename = shutil.copy(fixture / f'{cache_file}.pqt', tempdir.name)
assert Path(filename).exists()
# Copy cached rest responses
rest_cache_location = Path(tempdir.name) / '.one' if use_temp_cache else None
setup_rest_cache(rest_cache_location)
return tempdir
def setup_rest_cache(param_dir=None):
"""Copy REST cache fixtures to the .one parameter directory.
Parameters
----------
param_dir : str, pathlib.Path
The location of the ONE params directory (e.g. ~/.one)
"""
fixture = Path(__file__).parent.joinpath('fixtures')
path_parts = ('.rest', 'test.alyx.internationalbrainlab.org', 'https')
rest_cache_dir = Path(param_dir or one.params.get_params_dir()).joinpath(*path_parts)
# Ensure empty
shutil.rmtree(rest_cache_dir, ignore_errors=True)
rest_cache_dir.mkdir(parents=True, exist_ok=True)
# Copy over fixtures
for file in fixture.joinpath('rest_responses').glob('*'):
filename = shutil.copy(file, rest_cache_dir)
assert Path(filename).exists()
def create_file_tree(one):
"""Touch all the files in the datasets table.
Parameters
----------
one : one.api.One
An instance of One containing cache tables to use.
"""
# Create dset files from cache
for session_path, rel_path in one._cache.datasets[['session_path', 'rel_path']].values:
filepath = Path(one.cache_dir).joinpath(session_path, rel_path)
filepath.parent.mkdir(exist_ok=True, parents=True)
filepath.touch()
def setup_test_params(token=False, cache_dir=None):
"""
Copies cache parameter fixture to .one directory.
Parameters
----------
token : bool
If true, save a token file so that client doesn't hit auth endpoint
cache_dir : str, pathlib.Path
The cache_dir to save
"""
params_dir = Path(one.params.get_params_dir())
fixture = Path(__file__).parent.joinpath('fixtures')
test_pars = '.test.alyx.internationalbrainlab.org'
if not list(params_dir.glob(test_pars)):
filename = shutil.copy(fixture / 'params' / test_pars, params_dir)
assert Path(filename).exists()
# Add to cache map
map_file = params_dir / '.caches'
if not map_file.exists():
shutil.copy(fixture / 'params' / '.caches', map_file)
assert Path(filename).exists()
with open(map_file, 'r+') as f:
data = json.load(f)
data['CLIENT_MAP'][test_pars[1:]] = str(cache_dir or '')
f.seek(0)
json.dump(data, f)
f.truncate()
# Add token to file so db not hit
if token:
pars = one.params.get(client=test_pars[1:])
if not getattr(pars, 'TOKEN', False):
one.params.save(pars.set('TOKEN', {'token': 'T0k3N'}), test_pars[1:])
def revisions_datasets_table(collections=('', 'alf/probe00', 'alf/probe01'),
revisions=('', '2020-01-08', '2021-07-06'),
object='spikes',
attributes=('times', 'waveforems')):
"""Returns a datasets cache DataFrame containing datasets with revision folders.
As there are no revised datasets on the test databases, this function acts as a fixture for
testing the filtering of datasets by a revision.
Parameters
----------
collections : tuple
A list of collections
revisions : tuple
A list of revisions
object : str
An ALF object
attributes : tuple
A list of ALF attributes
Returns
-------
pd.DataFrame
A datasets cache table containing datasets made from the input names
"""
rel_path = []
for attr in attributes:
for collec in collections:
for rev in (f'#{x}#' if x else '' for x in revisions):
rel_path.append('/'.join(x for x in (collec, rev, f'{object}.{attr}.npy') if x))
ids = uuid2np([uuid4() for _ in range(len(rel_path))])
eid_0, eid_1 = uuid2np([uuid4()])[0]
return pd.DataFrame(data={
'rel_path': rel_path,
'session_path': 'subject/1900-01-01/001',
'file_size': None,
'hash': None,
'eid_0': eid_0,
'eid_1': eid_1
}, index=[ids[:, 0], ids[:, 1]])
def create_schema_cache(param_dir=None):
"""Save REST cache file for docs/ endpoint.
Ensures the database isn't hit when the rest_schemas property is accessed.
Parameters
----------
param_dir : str, pathlib.Path
The location of the parameter directory. If None, the default one is used.
"""
actions = dict.fromkeys(['list', 'read', 'create', 'update', 'partial_update', 'delete'])
endpoints = ['cache', 'dataset-types', 'datasets', 'downloads', 'insertions', 'sessions']
path_parts = ('.rest', 'test.alyx.internationalbrainlab.org', 'https')
rest_cache_dir = Path(param_dir or one.params.get_params_dir()).joinpath(*path_parts)
with open(rest_cache_dir / '1baff95c2d0e31059720a3716ad5b5a34b61a207', 'r') as f:
json.dump({k: actions for k in endpoints}, f)
def get_file(root: str, str_id: str) -> str:
"""
A stub function for iblutil.io.params.getfile. Allows the injection of a different param dir.
Parameters
----------
root : str, pathlib.Path
The root directory of the new parameters
str_id : str
The parameter string identifier
Returns
-------
str
The parameter file path
"""
parts = ['.' + p if not p.startswith('.') else p for p in Path(str_id).parts]
pfile = Path(root, *parts).as_posix()
return pfile
def caches_int2str(caches):
"""Convert int ids to str ids for cache tables.
Parameters
----------
caches : Bunch
A bunch of cache tables (from One._cache)
"""
for table in ('sessions', 'datasets'):
# Set integer uuids to NaN
cache = caches[table].reset_index()
int_cols = cache.filter(regex=r'_\d{1}$').columns
for i in range(0, len(int_cols), 2):
name = int_cols.values[i].rsplit('_', 1)[0]
cache[name] = np2str(cache[int_cols[i:i + 2]])
cache[int_cols] = np.nan
caches[table] = cache.set_index('id')
|
#!/usr/bin/env python
# Copyright 2019 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import os
from google.api_core.client_options import ClientOptions
# [START datalabeling_export_data_beta]
def export_data(dataset_resource_name, annotated_dataset_resource_name,
export_gcs_uri):
"""Exports a dataset from the given Google Cloud project."""
from google.cloud import datalabeling_v1beta1 as datalabeling
client = datalabeling.DataLabelingServiceClient()
# [END datalabeling_export_data_beta]
# If provided, use a provided test endpoint - this will prevent tests on
# this snippet from triggering any action by a real human
if 'DATALABELING_ENDPOINT' in os.environ:
opts = ClientOptions(api_endpoint=os.getenv('DATALABELING_ENDPOINT'))
client = datalabeling.DataLabelingServiceClient(client_options=opts)
# [START datalabeling_export_data_beta]
gcs_destination = datalabeling.types.GcsDestination(
output_uri=export_gcs_uri, mime_type='text/csv')
output_config = datalabeling.types.OutputConfig(
gcs_destination=gcs_destination)
response = client.export_data(
dataset_resource_name,
annotated_dataset_resource_name,
output_config
)
print('Dataset ID: {}\n'.format(response.result().dataset))
print('Output config:')
print('\tGcs destination:')
print('\t\tOutput URI: {}\n'.format(
response.result().output_config.gcs_destination.output_uri))
# [END datalabeling_export_data_beta]
if __name__ == '__main__':
parser = argparse.ArgumentParser(
description=__doc__,
formatter_class=argparse.RawDescriptionHelpFormatter
)
parser.add_argument(
'--dataset-resource-name',
help='Dataset resource name. Required.',
required=True
)
parser.add_argument(
'--annotated-dataset-resource-name',
help='Annotated Dataset resource name. Required.',
required=True
)
parser.add_argument(
'--export-gcs-uri',
help='The export GCS URI. Required.',
required=True
)
args = parser.parse_args()
export_data(
args.dataset_resource_name,
args.annotated_dataset_resource_name,
args.export_gcs_uri
)
|
from django.conf.urls import url
from django.contrib import admin
from mixpanel_django_graphos.views import ReportActivityView
admin.site.index_template = 'admin/index.html'
admin.autodiscover()
def get_admin_urls(urls):
"""
Extend admin to include additional urls
"""
def get_urls():
my_urls = [url(r'^activity-report/$', admin.site.admin_view(
ReportActivityView.as_view()), name='activity-report')]
return my_urls + urls
return get_urls
admin_urls = get_admin_urls(admin.site.get_urls())
admin.site.get_urls = admin_urls
urlpatterns = [
url(r'^admin/', admin.site.urls),
]
|
import pytest
import numpy as np
from numpy.testing import assert_allclose
import keras
from keras.utils.test_utils import layer_test
from keras.utils.test_utils import keras_test
from keras.layers import recurrent
from keras.layers import embeddings
from keras.models import Sequential
from keras.models import Model
from keras.engine.topology import Input
from keras.layers.core import Masking
from keras import regularizers
from keras import backend as K
num_samples, timesteps, embedding_dim, units = 2, 5, 4, 3
embedding_num = 12
@keras_test
def rnn_test(f):
"""
All the recurrent layers share the same interface,
so we can run through them with a single function.
"""
f = keras_test(f)
return pytest.mark.parametrize('layer_class', [
recurrent.SimpleRNN,
recurrent.GRU,
recurrent.LSTM
])(f)
@rnn_test
def test_return_sequences(layer_class):
layer_test(layer_class,
kwargs={'units': units,
'return_sequences': True},
input_shape=(num_samples, timesteps, embedding_dim))
@rnn_test
def test_dynamic_behavior(layer_class):
layer = layer_class(units, input_shape=(None, embedding_dim))
model = Sequential()
model.add(layer)
model.compile('sgd', 'mse')
x = np.random.random((num_samples, timesteps, embedding_dim))
y = np.random.random((num_samples, units))
model.train_on_batch(x, y)
@rnn_test
def test_stateful_invalid_use(layer_class):
layer = layer_class(units,
stateful=True,
batch_input_shape=(num_samples,
timesteps,
embedding_dim))
model = Sequential()
model.add(layer)
model.compile('sgd', 'mse')
x = np.random.random((num_samples * 2, timesteps, embedding_dim))
y = np.random.random((num_samples * 2, units))
with pytest.raises(ValueError):
model.fit(x, y)
with pytest.raises(ValueError):
model.predict(x, batch_size=num_samples + 1)
@rnn_test
@pytest.mark.skipif((K.backend() == 'cntk'),
reason='Not yet supported.')
def test_dropout(layer_class):
for unroll in [True, False]:
layer_test(layer_class,
kwargs={'units': units,
'dropout': 0.1,
'recurrent_dropout': 0.1,
'unroll': unroll},
input_shape=(num_samples, timesteps, embedding_dim))
# Test that dropout is applied during training
x = K.ones((num_samples, timesteps, embedding_dim))
layer = layer_class(units, dropout=0.5, recurrent_dropout=0.5,
input_shape=(timesteps, embedding_dim))
y = layer(x)
assert y._uses_learning_phase
y = layer(x, training=True)
assert not getattr(y, '_uses_learning_phase')
# Test that dropout is not applied during testing
x = np.random.random((num_samples, timesteps, embedding_dim))
layer = layer_class(units, dropout=0.5, recurrent_dropout=0.5,
unroll=unroll,
input_shape=(timesteps, embedding_dim))
model = Sequential([layer])
assert model.uses_learning_phase
y1 = model.predict(x)
y2 = model.predict(x)
assert_allclose(y1, y2)
@rnn_test
def test_statefulness(layer_class):
model = Sequential()
model.add(embeddings.Embedding(embedding_num, embedding_dim,
mask_zero=True,
input_length=timesteps,
batch_input_shape=(num_samples, timesteps)))
layer = layer_class(units, return_sequences=False,
stateful=True,
weights=None)
model.add(layer)
model.compile(optimizer='sgd', loss='mse')
out1 = model.predict(np.ones((num_samples, timesteps)))
assert(out1.shape == (num_samples, units))
# train once so that the states change
model.train_on_batch(np.ones((num_samples, timesteps)),
np.ones((num_samples, units)))
out2 = model.predict(np.ones((num_samples, timesteps)))
# if the state is not reset, output should be different
assert(out1.max() != out2.max())
# check that output changes after states are reset
# (even though the model itself didn't change)
layer.reset_states()
out3 = model.predict(np.ones((num_samples, timesteps)))
assert(out2.max() != out3.max())
# check that container-level reset_states() works
model.reset_states()
out4 = model.predict(np.ones((num_samples, timesteps)))
assert_allclose(out3, out4, atol=1e-5)
# check that the call to `predict` updated the states
out5 = model.predict(np.ones((num_samples, timesteps)))
assert(out4.max() != out5.max())
@rnn_test
def test_masking_correctness(layer_class):
# Check masking: output with left padding and right padding
# should be the same.
model = Sequential()
model.add(embeddings.Embedding(embedding_num, embedding_dim,
mask_zero=True,
input_length=timesteps,
batch_input_shape=(num_samples, timesteps)))
layer = layer_class(units, return_sequences=False)
model.add(layer)
model.compile(optimizer='sgd', loss='mse')
left_padded_input = np.ones((num_samples, timesteps))
left_padded_input[0, :1] = 0
left_padded_input[1, :2] = 0
out6 = model.predict(left_padded_input)
right_padded_input = np.ones((num_samples, timesteps))
right_padded_input[0, -1:] = 0
right_padded_input[1, -2:] = 0
out7 = model.predict(right_padded_input)
assert_allclose(out7, out6, atol=1e-5)
@rnn_test
def test_implementation_mode(layer_class):
for mode in [1, 2]:
# Without dropout
layer_test(layer_class,
kwargs={'units': units,
'implementation': mode},
input_shape=(num_samples, timesteps, embedding_dim))
# With dropout
layer_test(layer_class,
kwargs={'units': units,
'implementation': mode,
'dropout': 0.1,
'recurrent_dropout': 0.1},
input_shape=(num_samples, timesteps, embedding_dim))
# Without bias
layer_test(layer_class,
kwargs={'units': units,
'implementation': mode,
'use_bias': False},
input_shape=(num_samples, timesteps, embedding_dim))
@rnn_test
def test_regularizer(layer_class):
layer = layer_class(units, return_sequences=False, weights=None,
input_shape=(timesteps, embedding_dim),
kernel_regularizer=regularizers.l1(0.01),
recurrent_regularizer=regularizers.l1(0.01),
bias_regularizer='l2')
layer.build((None, None, embedding_dim))
assert len(layer.losses) == 3
assert len(layer.cell.losses) == 3
layer = layer_class(units, return_sequences=False, weights=None,
input_shape=(timesteps, embedding_dim),
activity_regularizer='l2')
assert layer.activity_regularizer
x = K.variable(np.ones((num_samples, timesteps, embedding_dim)))
layer(x)
assert len(layer.cell.get_losses_for(x)) == 0
assert len(layer.get_losses_for(x)) == 1
@rnn_test
def test_trainability(layer_class):
layer = layer_class(units)
layer.build((None, None, embedding_dim))
assert len(layer.weights) == 3
assert len(layer.trainable_weights) == 3
assert len(layer.non_trainable_weights) == 0
layer.trainable = False
assert len(layer.weights) == 3
assert len(layer.trainable_weights) == 0
assert len(layer.non_trainable_weights) == 3
layer.trainable = True
assert len(layer.weights) == 3
assert len(layer.trainable_weights) == 3
assert len(layer.non_trainable_weights) == 0
@keras_test
def test_masking_layer():
''' This test based on a previously failing issue here:
https://github.com/fchollet/keras/issues/1567
'''
inputs = np.random.random((6, 3, 4))
targets = np.abs(np.random.random((6, 3, 5)))
targets /= targets.sum(axis=-1, keepdims=True)
model = Sequential()
model.add(Masking(input_shape=(3, 4)))
model.add(recurrent.SimpleRNN(units=5, return_sequences=True, unroll=False))
model.compile(loss='categorical_crossentropy', optimizer='adam')
model.fit(inputs, targets, epochs=1, batch_size=100, verbose=1)
model = Sequential()
model.add(Masking(input_shape=(3, 4)))
model.add(recurrent.SimpleRNN(units=5, return_sequences=True, unroll=True))
model.compile(loss='categorical_crossentropy', optimizer='adam')
model.fit(inputs, targets, epochs=1, batch_size=100, verbose=1)
@rnn_test
def test_from_config(layer_class):
stateful_flags = (False, True)
for stateful in stateful_flags:
l1 = layer_class(units=1, stateful=stateful)
l2 = layer_class.from_config(l1.get_config())
assert l1.get_config() == l2.get_config()
@rnn_test
def test_specify_initial_state_keras_tensor(layer_class):
num_states = 2 if layer_class is recurrent.LSTM else 1
# Test with Keras tensor
inputs = Input((timesteps, embedding_dim))
initial_state = [Input((units,)) for _ in range(num_states)]
layer = layer_class(units)
if len(initial_state) == 1:
output = layer(inputs, initial_state=initial_state[0])
else:
output = layer(inputs, initial_state=initial_state)
assert initial_state[0] in layer.inbound_nodes[0].input_tensors
model = Model([inputs] + initial_state, output)
model.compile(loss='categorical_crossentropy', optimizer='adam')
inputs = np.random.random((num_samples, timesteps, embedding_dim))
initial_state = [np.random.random((num_samples, units))
for _ in range(num_states)]
targets = np.random.random((num_samples, units))
model.fit([inputs] + initial_state, targets)
@rnn_test
def test_specify_initial_state_non_keras_tensor(layer_class):
num_states = 2 if layer_class is recurrent.LSTM else 1
# Test with non-Keras tensor
inputs = Input((timesteps, embedding_dim))
initial_state = [K.random_normal_variable((num_samples, units), 0, 1)
for _ in range(num_states)]
layer = layer_class(units)
output = layer(inputs, initial_state=initial_state)
model = Model(inputs, output)
model.compile(loss='categorical_crossentropy', optimizer='adam')
inputs = np.random.random((num_samples, timesteps, embedding_dim))
targets = np.random.random((num_samples, units))
model.fit(inputs, targets)
@rnn_test
def test_reset_states_with_values(layer_class):
num_states = 2 if layer_class is recurrent.LSTM else 1
layer = layer_class(units, stateful=True)
layer.build((num_samples, timesteps, embedding_dim))
layer.reset_states()
assert len(layer.states) == num_states
assert layer.states[0] is not None
np.testing.assert_allclose(K.eval(layer.states[0]),
np.zeros(K.int_shape(layer.states[0])),
atol=1e-4)
state_shapes = [K.int_shape(state) for state in layer.states]
values = [np.ones(shape) for shape in state_shapes]
if len(values) == 1:
values = values[0]
layer.reset_states(values)
np.testing.assert_allclose(K.eval(layer.states[0]),
np.ones(K.int_shape(layer.states[0])),
atol=1e-4)
# Test fit with invalid data
with pytest.raises(ValueError):
layer.reset_states([1] * (len(layer.states) + 1))
@rnn_test
def test_initial_states_as_other_inputs(layer_class):
num_states = 2 if layer_class is recurrent.LSTM else 1
# Test with Keras tensor
main_inputs = Input((timesteps, embedding_dim))
initial_state = [Input((units,)) for _ in range(num_states)]
inputs = [main_inputs] + initial_state
layer = layer_class(units)
output = layer(inputs)
assert initial_state[0] in layer.inbound_nodes[0].input_tensors
model = Model(inputs, output)
model.compile(loss='categorical_crossentropy', optimizer='adam')
main_inputs = np.random.random((num_samples, timesteps, embedding_dim))
initial_state = [np.random.random((num_samples, units))
for _ in range(num_states)]
targets = np.random.random((num_samples, units))
model.train_on_batch([main_inputs] + initial_state, targets)
@rnn_test
def test_specify_state_with_masking(layer_class):
''' This test based on a previously failing issue here:
https://github.com/fchollet/keras/issues/1567
'''
num_states = 2 if layer_class is recurrent.LSTM else 1
inputs = Input((timesteps, embedding_dim))
_ = Masking()(inputs)
initial_state = [Input((units,)) for _ in range(num_states)]
output = layer_class(units)(inputs, initial_state=initial_state)
model = Model([inputs] + initial_state, output)
model.compile(loss='categorical_crossentropy', optimizer='adam')
inputs = np.random.random((num_samples, timesteps, embedding_dim))
initial_state = [np.random.random((num_samples, units))
for _ in range(num_states)]
targets = np.random.random((num_samples, units))
model.fit([inputs] + initial_state, targets)
@rnn_test
def test_return_state(layer_class):
num_states = 2 if layer_class is recurrent.LSTM else 1
inputs = Input(batch_shape=(num_samples, timesteps, embedding_dim))
layer = layer_class(units, return_state=True, stateful=True)
outputs = layer(inputs)
output, state = outputs[0], outputs[1:]
assert len(state) == num_states
model = Model(inputs, state[0])
inputs = np.random.random((num_samples, timesteps, embedding_dim))
state = model.predict(inputs)
np.testing.assert_allclose(K.eval(layer.states[0]), state, atol=1e-4)
@rnn_test
def test_state_reuse(layer_class):
inputs = Input(batch_shape=(num_samples, timesteps, embedding_dim))
layer = layer_class(units, return_state=True, return_sequences=True)
outputs = layer(inputs)
output, state = outputs[0], outputs[1:]
output = layer_class(units)(output, initial_state=state)
model = Model(inputs, output)
inputs = np.random.random((num_samples, timesteps, embedding_dim))
outputs = model.predict(inputs)
@keras_test
def test_minimal_rnn_cell_non_layer():
class MinimalRNNCell(object):
def __init__(self, units, input_dim):
self.units = units
self.state_size = units
self.kernel = keras.backend.variable(
np.random.random((input_dim, units)))
def call(self, inputs, states):
prev_output = states[0]
output = keras.backend.dot(inputs, self.kernel) + prev_output
return output, [output]
# Basic test case.
cell = MinimalRNNCell(32, 5)
x = keras.Input((None, 5))
layer = recurrent.RNN(cell)
y = layer(x)
model = keras.models.Model(x, y)
model.compile(optimizer='rmsprop', loss='mse')
model.train_on_batch(np.zeros((6, 5, 5)), np.zeros((6, 32)))
# Test stacking.
cells = [MinimalRNNCell(8, 5),
MinimalRNNCell(32, 8),
MinimalRNNCell(32, 32)]
layer = recurrent.RNN(cells)
y = layer(x)
model = keras.models.Model(x, y)
model.compile(optimizer='rmsprop', loss='mse')
model.train_on_batch(np.zeros((6, 5, 5)), np.zeros((6, 32)))
@keras_test
def test_minimal_rnn_cell_non_layer_multiple_states():
class MinimalRNNCell(object):
def __init__(self, units, input_dim):
self.units = units
self.state_size = (units, units)
self.kernel = keras.backend.variable(
np.random.random((input_dim, units)))
def call(self, inputs, states):
prev_output_1 = states[0]
prev_output_2 = states[1]
output = keras.backend.dot(inputs, self.kernel)
output += prev_output_1
output -= prev_output_2
return output, [output * 2, output * 3]
# Basic test case.
cell = MinimalRNNCell(32, 5)
x = keras.Input((None, 5))
layer = recurrent.RNN(cell)
y = layer(x)
model = keras.models.Model(x, y)
model.compile(optimizer='rmsprop', loss='mse')
model.train_on_batch(np.zeros((6, 5, 5)), np.zeros((6, 32)))
# Test stacking.
cells = [MinimalRNNCell(8, 5),
MinimalRNNCell(16, 8),
MinimalRNNCell(32, 16)]
layer = recurrent.RNN(cells)
assert layer.cell.state_size == (32, 32, 16, 16, 8, 8)
y = layer(x)
model = keras.models.Model(x, y)
model.compile(optimizer='rmsprop', loss='mse')
model.train_on_batch(np.zeros((6, 5, 5)), np.zeros((6, 32)))
@keras_test
def test_minimal_rnn_cell_layer():
class MinimalRNNCell(keras.layers.Layer):
def __init__(self, units, **kwargs):
self.units = units
self.state_size = units
super(MinimalRNNCell, self).__init__(**kwargs)
def build(self, input_shape):
self.kernel = self.add_weight(shape=(input_shape[-1], self.units),
initializer='uniform',
name='kernel')
self.recurrent_kernel = self.add_weight(
shape=(self.units, self.units),
initializer='uniform',
name='recurrent_kernel')
self.built = True
def call(self, inputs, states):
prev_output = states[0]
h = keras.backend.dot(inputs, self.kernel)
output = h + keras.backend.dot(prev_output, self.recurrent_kernel)
return output, [output]
def get_config(self):
config = {'units': self.units}
base_config = super(MinimalRNNCell, self).get_config()
return dict(list(base_config.items()) + list(config.items()))
# Test basic case.
x = keras.Input((None, 5))
cell = MinimalRNNCell(32)
layer = recurrent.RNN(cell)
y = layer(x)
model = keras.models.Model(x, y)
model.compile(optimizer='rmsprop', loss='mse')
model.train_on_batch(np.zeros((6, 5, 5)), np.zeros((6, 32)))
# Test basic case serialization.
x_np = np.random.random((6, 5, 5))
y_np = model.predict(x_np)
weights = model.get_weights()
config = layer.get_config()
with keras.utils.CustomObjectScope({'MinimalRNNCell': MinimalRNNCell}):
layer = recurrent.RNN.from_config(config)
y = layer(x)
model = keras.models.Model(x, y)
model.set_weights(weights)
y_np_2 = model.predict(x_np)
assert_allclose(y_np, y_np_2, atol=1e-4)
# Test stacking.
cells = [MinimalRNNCell(8),
MinimalRNNCell(12),
MinimalRNNCell(32)]
layer = recurrent.RNN(cells)
y = layer(x)
model = keras.models.Model(x, y)
model.compile(optimizer='rmsprop', loss='mse')
model.train_on_batch(np.zeros((6, 5, 5)), np.zeros((6, 32)))
# Test stacked RNN serialization.
x_np = np.random.random((6, 5, 5))
y_np = model.predict(x_np)
weights = model.get_weights()
config = layer.get_config()
with keras.utils.CustomObjectScope({'MinimalRNNCell': MinimalRNNCell}):
layer = recurrent.RNN.from_config(config)
y = layer(x)
model = keras.models.Model(x, y)
model.set_weights(weights)
y_np_2 = model.predict(x_np)
assert_allclose(y_np, y_np_2, atol=1e-4)
@keras_test
def test_stacked_rnn_attributes():
cells = [recurrent.LSTMCell(3),
recurrent.LSTMCell(3, kernel_regularizer='l2')]
layer = recurrent.RNN(cells)
layer.build((None, None, 5))
# Test regularization losses
assert len(layer.losses) == 1
# Test weights
assert len(layer.trainable_weights) == 6
cells[0].trainable = False
assert len(layer.trainable_weights) == 3
assert len(layer.non_trainable_weights) == 3
# Test `get_losses_for`
x = keras.Input((None, 5))
y = K.sum(x)
cells[0].add_loss(y, inputs=x)
assert layer.get_losses_for(x) == [y]
@rnn_test
def test_batch_size_equal_one(layer_class):
inputs = Input(batch_shape=(1, timesteps, embedding_dim))
layer = layer_class(units)
outputs = layer(inputs)
model = Model(inputs, outputs)
model.compile('sgd', 'mse')
x = np.random.random((1, timesteps, embedding_dim))
y = np.random.random((1, units))
model.train_on_batch(x, y)
def test_rnn_cell_with_constants_layer():
class RNNCellWithConstants(keras.layers.Layer):
def __init__(self, units, **kwargs):
self.units = units
self.state_size = units
super(RNNCellWithConstants, self).__init__(**kwargs)
def build(self, input_shape):
if not isinstance(input_shape, list):
raise TypeError('expects constants shape')
[input_shape, constant_shape] = input_shape
# will (and should) raise if more than one constant passed
self.input_kernel = self.add_weight(
shape=(input_shape[-1], self.units),
initializer='uniform',
name='kernel')
self.recurrent_kernel = self.add_weight(
shape=(self.units, self.units),
initializer='uniform',
name='recurrent_kernel')
self.constant_kernel = self.add_weight(
shape=(constant_shape[-1], self.units),
initializer='uniform',
name='constant_kernel')
self.built = True
def call(self, inputs, states, constants):
[prev_output] = states
[constant] = constants
h_input = keras.backend.dot(inputs, self.input_kernel)
h_state = keras.backend.dot(prev_output, self.recurrent_kernel)
h_const = keras.backend.dot(constant, self.constant_kernel)
output = h_input + h_state + h_const
return output, [output]
def get_config(self):
config = {'units': self.units}
base_config = super(RNNCellWithConstants, self).get_config()
return dict(list(base_config.items()) + list(config.items()))
# Test basic case.
x = keras.Input((None, 5))
c = keras.Input((3,))
cell = RNNCellWithConstants(32)
layer = recurrent.RNN(cell)
y = layer(x, constants=c)
model = keras.models.Model([x, c], y)
model.compile(optimizer='rmsprop', loss='mse')
model.train_on_batch(
[np.zeros((6, 5, 5)), np.zeros((6, 3))],
np.zeros((6, 32))
)
# Test basic case serialization.
x_np = np.random.random((6, 5, 5))
c_np = np.random.random((6, 3))
y_np = model.predict([x_np, c_np])
weights = model.get_weights()
config = layer.get_config()
custom_objects = {'RNNCellWithConstants': RNNCellWithConstants}
with keras.utils.CustomObjectScope(custom_objects):
layer = recurrent.RNN.from_config(config.copy())
y = layer(x, constants=c)
model = keras.models.Model([x, c], y)
model.set_weights(weights)
y_np_2 = model.predict([x_np, c_np])
assert_allclose(y_np, y_np_2, atol=1e-4)
# test flat list inputs
with keras.utils.CustomObjectScope(custom_objects):
layer = recurrent.RNN.from_config(config.copy())
y = layer([x, c])
model = keras.models.Model([x, c], y)
model.set_weights(weights)
y_np_3 = model.predict([x_np, c_np])
assert_allclose(y_np, y_np_3, atol=1e-4)
def test_rnn_cell_with_constants_layer_passing_initial_state():
class RNNCellWithConstants(keras.layers.Layer):
def __init__(self, units, **kwargs):
self.units = units
self.state_size = units
super(RNNCellWithConstants, self).__init__(**kwargs)
def build(self, input_shape):
if not isinstance(input_shape, list):
raise TypeError('expects constants shape')
[input_shape, constant_shape] = input_shape
# will (and should) raise if more than one constant passed
self.input_kernel = self.add_weight(
shape=(input_shape[-1], self.units),
initializer='uniform',
name='kernel')
self.recurrent_kernel = self.add_weight(
shape=(self.units, self.units),
initializer='uniform',
name='recurrent_kernel')
self.constant_kernel = self.add_weight(
shape=(constant_shape[-1], self.units),
initializer='uniform',
name='constant_kernel')
self.built = True
def call(self, inputs, states, constants):
[prev_output] = states
[constant] = constants
h_input = keras.backend.dot(inputs, self.input_kernel)
h_state = keras.backend.dot(prev_output, self.recurrent_kernel)
h_const = keras.backend.dot(constant, self.constant_kernel)
output = h_input + h_state + h_const
return output, [output]
def get_config(self):
config = {'units': self.units}
base_config = super(RNNCellWithConstants, self).get_config()
return dict(list(base_config.items()) + list(config.items()))
# Test basic case.
x = keras.Input((None, 5))
c = keras.Input((3,))
s = keras.Input((32,))
cell = RNNCellWithConstants(32)
layer = recurrent.RNN(cell)
y = layer(x, initial_state=s, constants=c)
model = keras.models.Model([x, s, c], y)
model.compile(optimizer='rmsprop', loss='mse')
model.train_on_batch(
[np.zeros((6, 5, 5)), np.zeros((6, 32)), np.zeros((6, 3))],
np.zeros((6, 32))
)
# Test basic case serialization.
x_np = np.random.random((6, 5, 5))
s_np = np.random.random((6, 32))
c_np = np.random.random((6, 3))
y_np = model.predict([x_np, s_np, c_np])
weights = model.get_weights()
config = layer.get_config()
custom_objects = {'RNNCellWithConstants': RNNCellWithConstants}
with keras.utils.CustomObjectScope(custom_objects):
layer = recurrent.RNN.from_config(config.copy())
y = layer(x, initial_state=s, constants=c)
model = keras.models.Model([x, s, c], y)
model.set_weights(weights)
y_np_2 = model.predict([x_np, s_np, c_np])
assert_allclose(y_np, y_np_2, atol=1e-4)
# verify that state is used
y_np_2_different_s = model.predict([x_np, s_np + 10., c_np])
with pytest.raises(AssertionError):
assert_allclose(y_np, y_np_2_different_s, atol=1e-4)
# test flat list inputs
with keras.utils.CustomObjectScope(custom_objects):
layer = recurrent.RNN.from_config(config.copy())
y = layer([x, s, c])
model = keras.models.Model([x, s, c], y)
model.set_weights(weights)
y_np_3 = model.predict([x_np, s_np, c_np])
assert_allclose(y_np, y_np_3, atol=1e-4)
if __name__ == '__main__':
pytest.main([__file__])
|
from django import forms
from django.core.validators import MinValueValidator
from django.utils.translation import gettext_lazy as _
from .models import DayType, Day
class DayTypeForm(forms.ModelForm):
class Meta:
model = DayType
fields = '__all__'
class DayForm(forms.ModelForm):
class Meta:
model = Day
fields = '__all__'
class CalendarUploadForm(forms.Form):
year = forms.IntegerField(
required=True,
validators=[
MinValueValidator(1999),
],
label=_('Year'),
help_text=_('Pick a year to import'),
)
file = forms.FileField(
required=True,
label=_('File'),
help_text=_('Pick a CSV file containing work calendar'),
)
def clean_file(self):
if any(self.errors):
return None
file = self.cleaned_data['file']
if not file.name.endswith('.csv'):
raise forms.ValidationError(_('CSV file required'))
return file
|
# Copyright 2019 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from flask_wtf import FlaskForm # type: ignore
from wtforms import ( # type: ignore
StringField,
TextAreaField,
SubmitField,
FieldList,
FormField,
IntegerField,
HiddenField,
BooleanField,
)
from wtforms import validators
from data.models import VulnerabilityGitCommits, VulnerabilityResources
from data.models.base import db
class BaseForm(FlaskForm):
@property
def non_hidden_fields(self):
for field in self:
if isinstance(field, HiddenField):
continue
yield field
class ModelFieldList(FieldList):
def __init__(self, *args, **kwargs):
self.model = kwargs.pop("model", None)
super().__init__(*args, **kwargs)
if not self.model:
raise ValueError("ModelFieldList requires model to be set")
def populate_obj(self, obj, name):
if not hasattr(obj, name):
setattr(obj, name, [])
while len(getattr(obj, name)) < len(self.entries):
new_model = self.model()
db.session.add(new_model)
getattr(obj, name).append(new_model)
while len(getattr(obj, name)) > len(self.entries):
db.session.delete(getattr(obj, name).pop())
super().populate_obj(obj, name)
class CommitLinksForm(FlaskForm):
repo_url = StringField(
"Git Repo URL", validators=[validators.Optional(), validators.URL()]
)
commit_hash = StringField("Commit Hash", validators=[])
# Commit data is optional -> otherwise use: validators.DataRequired(),
commit_link = StringField(
"Main commit link", validators=[validators.Optional(), validators.URL()]
)
repo_name = StringField("Repository Name", validators=[])
class Meta:
csrf = False
class VulnerabilityResourcesForm(FlaskForm):
link = StringField("Link", validators=[validators.DataRequired(), validators.URL()])
class Meta:
csrf = False
class VulnerabilityDetailsForm(FlaskForm):
commits = ModelFieldList(
FormField(CommitLinksForm),
model=VulnerabilityGitCommits,
min_entries=1,
default=[VulnerabilityGitCommits],
)
# Changing the CVE ID is disabled for now.
# The filters argument is used to have Null fields instead of empty strings.
# This is important since the cve_id is supposed to be unique OR Null.
# cve_id = StringField(
# "CVE-ID",
# filters=[lambda x: x and str(x).upper().strip(), lambda x: x or None],
# validators=[
# validators.Optional(),
# validators.Regexp(r"^CVE-\d{4}-\d+$")
# ],
# )
comment = TextAreaField(
"High-Level Bug Overview", validators=[validators.DataRequired()]
)
resources = ModelFieldList(
FormField(VulnerabilityResourcesForm), model=VulnerabilityResources
)
submit = SubmitField("Propose change")
class VulnerabilityProposalReject(FlaskForm):
review_feedback = TextAreaField(
"Feedback what should be changed", validators=[validators.DataRequired()]
)
submit_reject = SubmitField("Ask for improvements")
class VulnerabilityProposalApprove(FlaskForm):
submit_approve = SubmitField("Approve proposal")
class VulnerabilityProposalAssign(FlaskForm):
submit_assign = SubmitField("Take review")
class VulnerabilityProposalUnassign(FlaskForm):
submit_unassign = SubmitField("Unassign from this review")
class VulnerabilityProposalPublish(FlaskForm):
submit_publish = SubmitField("Publish entry")
class VulnerabilityDeleteForm(FlaskForm):
delete_entry = IntegerField("Delete entry", [validators.required()])
submit = SubmitField()
class UserProfileForm(BaseForm):
full_name = StringField(
"Name",
description=(
'<small class="form-text text-muted">'
"What should be shown next to your contributions.</small>"
),
)
hide_name = BooleanField("Hide Name")
profile_picture = StringField(
"Profile Picture URL", validators=[validators.Optional(), validators.URL()]
)
hide_picture = BooleanField("Hide Profile Picture")
|
# Copyright (c) 2015, Frappe Technologies Pvt. Ltd. and Contributors
# MIT License. See license.txt
"""
globals attached to frappe module
+ some utility functions that should probably be moved
"""
from __future__ import unicode_literals, print_function
from six import iteritems, binary_type, text_type, string_types
from werkzeug.local import Local, release_local
import os, sys, importlib, inspect, json
from past.builtins import cmp
from faker import Faker
# public
from .exceptions import *
from .utils.jinja import (get_jenv, get_template, render_template, get_email_from_template, get_jloader)
# Hamless for Python 3
# For Python 2 set default encoding to utf-8
if sys.version[0] == '2':
reload(sys)
sys.setdefaultencoding("utf-8")
__version__ = '11.1.65'
__title__ = "Frappe Framework"
local = Local()
class _dict(dict):
"""dict like object that exposes keys as attributes"""
def __getattr__(self, key):
ret = self.get(key)
if not ret and key.startswith("__"):
raise AttributeError()
return ret
def __setattr__(self, key, value):
self[key] = value
def __getstate__(self):
return self
def __setstate__(self, d):
self.update(d)
def update(self, d):
"""update and return self -- the missing dict feature in python"""
super(_dict, self).update(d)
return self
def copy(self):
return _dict(dict(self).copy())
def _(msg, lang=None):
"""Returns translated string in current lang, if exists."""
from frappe.translate import get_full_dict
from frappe.utils import strip_html_tags, is_html
if not hasattr(local, 'lang'):
local.lang = lang or 'en'
if not lang:
lang = local.lang
non_translated_msg = msg
if is_html(msg):
msg = strip_html_tags(msg)
# msg should always be unicode
msg = as_unicode(msg).strip()
# return lang_full_dict according to lang passed parameter
return get_full_dict(lang).get(msg) or non_translated_msg
def as_unicode(text, encoding='utf-8'):
'''Convert to unicode if required'''
if isinstance(text, text_type):
return text
elif text==None:
return ''
elif isinstance(text, binary_type):
return text_type(text, encoding)
else:
return text_type(text)
def get_lang_dict(fortype, name=None):
"""Returns the translated language dict for the given type and name.
:param fortype: must be one of `doctype`, `page`, `report`, `include`, `jsfile`, `boot`
:param name: name of the document for which assets are to be returned."""
from frappe.translate import get_dict
return get_dict(fortype, name)
def set_user_lang(user, user_language=None):
"""Guess and set user language for the session. `frappe.local.lang`"""
from frappe.translate import get_user_lang
local.lang = get_user_lang(user)
# local-globals
db = local("db")
conf = local("conf")
form = form_dict = local("form_dict")
request = local("request")
response = local("response")
session = local("session")
user = local("user")
flags = local("flags")
error_log = local("error_log")
debug_log = local("debug_log")
message_log = local("message_log")
lang = local("lang")
def init(site, sites_path=None, new_site=False):
"""Initialize frappe for the current site. Reset thread locals `frappe.local`"""
if getattr(local, "initialised", None):
return
if not sites_path:
sites_path = '.'
local.error_log = []
local.message_log = []
local.debug_log = []
local.realtime_log = []
local.flags = _dict({
"ran_schedulers": [],
"currently_saving": [],
"redirect_location": "",
"in_install_db": False,
"in_install_app": False,
"in_import": False,
"in_test": False,
"mute_messages": False,
"ignore_links": False,
"mute_emails": False,
"has_dataurl": False,
"new_site": new_site
})
local.rollback_observers = []
local.test_objects = {}
local.site = site
local.sites_path = sites_path
local.site_path = os.path.join(sites_path, site)
local.request_ip = None
local.response = _dict({"docs":[]})
local.task_id = None
local.conf = _dict(get_site_config())
local.lang = local.conf.lang or "en"
local.lang_full_dict = None
local.module_app = None
local.app_modules = None
local.system_settings = _dict()
local.user = None
local.user_perms = None
local.session = None
local.role_permissions = {}
local.valid_columns = {}
local.new_doc_templates = {}
local.link_count = {}
local.jenv = None
local.jloader =None
local.cache = {}
local.document_cache = {}
local.meta_cache = {}
local.form_dict = _dict()
local.session = _dict()
setup_module_map()
local.initialised = True
def connect(site=None, db_name=None):
"""Connect to site database instance.
:param site: If site is given, calls `frappe.init`.
:param db_name: Optional. Will use from `site_config.json`."""
from frappe.database import Database
if site:
init(site)
local.db = Database(user=db_name or local.conf.db_name)
set_user("Administrator")
def connect_replica():
from frappe.database import Database
user = local.conf.db_name
password = local.conf.db_password
if local.conf.different_credentials_for_replica:
user = local.conf.replica_db_name
password = local.conf.replica_db_password
local.replica_db = Database(host=local.conf.replica_host, user=user, password=password)
# swap db connections
local.primary_db = local.db
local.db = local.replica_db
def get_site_config(sites_path=None, site_path=None):
"""Returns `site_config.json` combined with `sites/common_site_config.json`.
`site_config` is a set of site wide settings like database name, password, email etc."""
config = {}
sites_path = sites_path or getattr(local, "sites_path", None)
site_path = site_path or getattr(local, "site_path", None)
if sites_path:
common_site_config = os.path.join(sites_path, "common_site_config.json")
if os.path.exists(common_site_config):
config.update(get_file_json(common_site_config))
if site_path:
site_config = os.path.join(site_path, "site_config.json")
if os.path.exists(site_config):
config.update(get_file_json(site_config))
elif local.site and not local.flags.new_site:
print("{0} does not exist".format(local.site))
sys.exit(1)
#raise IncorrectSitePath, "{0} does not exist".format(site_config)
return _dict(config)
def get_conf(site=None):
if hasattr(local, 'conf'):
return local.conf
else:
# if no site, get from common_site_config.json
with init_site(site):
return local.conf
class init_site:
def __init__(self, site=None):
'''If site==None, initialize it for empty site ('') to load common_site_config.json'''
self.site = site or ''
def __enter__(self):
init(self.site)
return local
def __exit__(self, type, value, traceback):
destroy()
def destroy():
"""Closes connection and releases werkzeug local."""
if db:
db.close()
release_local(local)
# memcache
redis_server = None
def cache():
"""Returns memcache connection."""
global redis_server
if not redis_server:
from frappe.utils.redis_wrapper import RedisWrapper
redis_server = RedisWrapper.from_url(conf.get('redis_cache')
or "redis://localhost:11311")
return redis_server
def get_traceback():
"""Returns error traceback."""
from frappe.utils import get_traceback
return get_traceback()
def errprint(msg):
"""Log error. This is sent back as `exc` in response.
:param msg: Message."""
msg = as_unicode(msg)
if not request or (not "cmd" in local.form_dict) or conf.developer_mode:
print(msg.encode('utf-8'))
error_log.append({"exc": msg})
def log(msg):
"""Add to `debug_log`.
:param msg: Message."""
if not request:
if conf.get("logging") or False:
print(repr(msg))
debug_log.append(as_unicode(msg))
def msgprint(msg, title=None, raise_exception=0, as_table=False, indicator=None, alert=False):
"""Print a message to the user (via HTTP response).
Messages are sent in the `__server_messages` property in the
response JSON and shown in a pop-up / modal.
:param msg: Message.
:param title: [optional] Message title.
:param raise_exception: [optional] Raise given exception and show message.
:param as_table: [optional] If `msg` is a list of lists, render as HTML table.
"""
from frappe.utils import encode
msg = safe_decode(msg)
out = _dict(message=msg)
def _raise_exception():
if raise_exception:
if flags.rollback_on_exception:
db.rollback()
import inspect
if inspect.isclass(raise_exception) and issubclass(raise_exception, Exception):
raise raise_exception(msg)
else:
raise ValidationError(msg)
if flags.mute_messages:
_raise_exception()
return
if as_table and type(msg) in (list, tuple):
out.msg = '<table border="1px" style="border-collapse: collapse" cellpadding="2px">' + ''.join(['<tr>'+''.join(['<td>%s</td>' % c for c in r])+'</tr>' for r in msg]) + '</table>'
if flags.print_messages and out.msg:
print("Message: " + repr(out.msg).encode("utf-8"))
if title:
out.title = title
if not indicator and raise_exception:
indicator = 'red'
if indicator:
out.indicator = indicator
if alert:
out.alert = 1
message_log.append(json.dumps(out))
if raise_exception and hasattr(raise_exception, '__name__'):
local.response['exc_type'] = raise_exception.__name__
_raise_exception()
def clear_messages():
local.message_log = []
def clear_last_message():
if len(local.message_log) > 0:
local.message_log = local.message_log[:-1]
def throw(msg, exc=ValidationError, title=None):
"""Throw execption and show message (`msgprint`).
:param msg: Message.
:param exc: Exception class. Default `frappe.ValidationError`"""
msgprint(msg, raise_exception=exc, title=title, indicator='red')
def emit_js(js, user=False, **kwargs):
from frappe.realtime import publish_realtime
if user == False:
user = session.user
publish_realtime('eval_js', js, user=user, **kwargs)
def create_folder(path, with_init=False):
"""Create a folder in the given path and add an `__init__.py` file (optional).
:param path: Folder path.
:param with_init: Create `__init__.py` in the new folder."""
from frappe.utils import touch_file
if not os.path.exists(path):
os.makedirs(path)
if with_init:
touch_file(os.path.join(path, "__init__.py"))
def set_user(username):
"""Set current user.
:param username: **User** name to set as current user."""
local.session.user = username
local.session.sid = username
local.cache = {}
local.form_dict = _dict()
local.jenv = None
local.session.data = _dict()
local.role_permissions = {}
local.new_doc_templates = {}
local.user_perms = None
def get_user():
from frappe.utils.user import UserPermissions
if not local.user_perms:
local.user_perms = UserPermissions(local.session.user)
return local.user_perms
def get_roles(username=None):
"""Returns roles of current user."""
if not local.session:
return ["Guest"]
if username:
import frappe.permissions
return frappe.permissions.get_roles(username)
else:
return get_user().get_roles()
def get_request_header(key, default=None):
"""Return HTTP request header.
:param key: HTTP header key.
:param default: Default value."""
return request.headers.get(key, default)
def sendmail(recipients=[], sender="", subject="No Subject", message="No Message",
as_markdown=False, delayed=True, reference_doctype=None, reference_name=None,
unsubscribe_method=None, unsubscribe_params=None, unsubscribe_message=None,
attachments=None, content=None, doctype=None, name=None, reply_to=None,
cc=[], bcc=[], message_id=None, in_reply_to=None, send_after=None, expose_recipients=None,
send_priority=1, communication=None, retry=1, now=None, read_receipt=None, is_notification=False,
inline_images=None, template=None, args=None, header=None, print_letterhead=False):
"""Send email using user's default **Email Account** or global default **Email Account**.
:param recipients: List of recipients.
:param sender: Email sender. Default is current user.
:param subject: Email Subject.
:param message: (or `content`) Email Content.
:param as_markdown: Convert content markdown to HTML.
:param delayed: Send via scheduled email sender **Email Queue**. Don't send immediately. Default is true
:param send_priority: Priority for Email Queue, default 1.
:param reference_doctype: (or `doctype`) Append as communication to this DocType.
:param reference_name: (or `name`) Append as communication to this document name.
:param unsubscribe_method: Unsubscribe url with options email, doctype, name. e.g. `/api/method/unsubscribe`
:param unsubscribe_params: Unsubscribe paramaters to be loaded on the unsubscribe_method [optional] (dict).
:param attachments: List of attachments.
:param reply_to: Reply-To Email Address.
:param message_id: Used for threading. If a reply is received to this email, Message-Id is sent back as In-Reply-To in received email.
:param in_reply_to: Used to send the Message-Id of a received email back as In-Reply-To.
:param send_after: Send after the given datetime.
:param expose_recipients: Display all recipients in the footer message - "This email was sent to"
:param communication: Communication link to be set in Email Queue record
:param inline_images: List of inline images as {"filename", "filecontent"}. All src properties will be replaced with random Content-Id
:param template: Name of html template from templates/emails folder
:param args: Arguments for rendering the template
:param header: Append header in email
"""
text_content = None
if template:
message, text_content = get_email_from_template(template, args)
message = content or message
if as_markdown:
message = frappe.utils.md_to_html(message)
if not delayed:
now = True
from frappe.email import queue
queue.send(recipients=recipients, sender=sender,
subject=subject, message=message, text_content=text_content,
reference_doctype = doctype or reference_doctype, reference_name = name or reference_name,
unsubscribe_method=unsubscribe_method, unsubscribe_params=unsubscribe_params, unsubscribe_message=unsubscribe_message,
attachments=attachments, reply_to=reply_to, cc=cc, bcc=bcc, message_id=message_id, in_reply_to=in_reply_to,
send_after=send_after, expose_recipients=expose_recipients, send_priority=send_priority,
communication=communication, now=now, read_receipt=read_receipt, is_notification=is_notification,
inline_images=inline_images, header=header, print_letterhead=print_letterhead)
whitelisted = []
guest_methods = []
xss_safe_methods = []
def whitelist(allow_guest=False, xss_safe=False):
"""
Decorator for whitelisting a function and making it accessible via HTTP.
Standard request will be `/api/method/[path.to.method]`
:param allow_guest: Allow non logged-in user to access this method.
Use as:
@frappe.whitelist()
def myfunc(param1, param2):
pass
"""
def innerfn(fn):
global whitelisted, guest_methods, xss_safe_methods
whitelisted.append(fn)
if allow_guest:
guest_methods.append(fn)
if xss_safe:
xss_safe_methods.append(fn)
return fn
return innerfn
def read_only():
def innfn(fn):
def wrapper_fn(*args, **kwargs):
if conf.read_from_replica:
connect_replica()
try:
retval = fn(*args, **get_newargs(fn, kwargs))
except:
raise
finally:
if local and hasattr(local, 'primary_db'):
local.db.close()
local.db = local.primary_db
return retval
return wrapper_fn
return innfn
def only_for(roles):
"""Raise `frappe.PermissionError` if the user does not have any of the given **Roles**.
:param roles: List of roles to check."""
if local.flags.in_test:
return
if not isinstance(roles, (tuple, list)):
roles = (roles,)
roles = set(roles)
myroles = set(get_roles())
if not roles.intersection(myroles):
raise PermissionError
def get_domain_data(module):
try:
domain_data = get_hooks('domains')
if module in domain_data:
return _dict(get_attr(get_hooks('domains')[module][0] + '.data'))
else:
return _dict()
except ImportError:
if local.flags.in_test:
return _dict()
else:
raise
def clear_cache(user=None, doctype=None):
"""Clear **User**, **DocType** or global cache.
:param user: If user is given, only user cache is cleared.
:param doctype: If doctype is given, only DocType cache is cleared."""
import frappe.cache_manager
if doctype:
frappe.cache_manager.clear_doctype_cache(doctype)
reset_metadata_version()
elif user:
frappe.cache_manager.clear_user_cache(user)
else: # everything
from frappe import translate
frappe.cache_manager.clear_user_cache()
translate.clear_cache()
reset_metadata_version()
local.cache = {}
local.new_doc_templates = {}
for fn in get_hooks("clear_cache"):
get_attr(fn)()
local.role_permissions = {}
def has_permission(doctype=None, ptype="read", doc=None, user=None, verbose=False, throw=False):
"""Raises `frappe.PermissionError` if not permitted.
:param doctype: DocType for which permission is to be check.
:param ptype: Permission type (`read`, `write`, `create`, `submit`, `cancel`, `amend`). Default: `read`.
:param doc: [optional] Checks User permissions for given doc.
:param user: [optional] Check for given user. Default: current user."""
if not doctype and doc:
doctype = doc.doctype
import frappe.permissions
out = frappe.permissions.has_permission(doctype, ptype, doc=doc, verbose=verbose, user=user)
if throw and not out:
if doc:
frappe.throw(_("No permission for {0}").format(doc.doctype + " " + doc.name))
else:
frappe.throw(_("No permission for {0}").format(doctype))
return out
def has_website_permission(doc=None, ptype='read', user=None, verbose=False, doctype=None):
"""Raises `frappe.PermissionError` if not permitted.
:param doctype: DocType for which permission is to be check.
:param ptype: Permission type (`read`, `write`, `create`, `submit`, `cancel`, `amend`). Default: `read`.
:param doc: Checks User permissions for given doc.
:param user: [optional] Check for given user. Default: current user."""
if not user:
user = session.user
if doc:
if isinstance(doc, string_types):
doc = get_doc(doctype, doc)
doctype = doc.doctype
if doc.flags.ignore_permissions:
return True
# check permission in controller
if hasattr(doc, 'has_website_permission'):
return doc.has_website_permission(ptype, user, verbose=verbose)
hooks = (get_hooks("has_website_permission") or {}).get(doctype, [])
if hooks:
for method in hooks:
result = call(method, doc=doc, ptype=ptype, user=user, verbose=verbose)
# if even a single permission check is Falsy
if not result:
return False
# else it is Truthy
return True
else:
return False
def is_table(doctype):
"""Returns True if `istable` property (indicating child Table) is set for given DocType."""
def get_tables():
return db.sql_list("select name from tabDocType where istable=1")
tables = cache().get_value("is_table", get_tables)
return doctype in tables
def get_precision(doctype, fieldname, currency=None, doc=None):
"""Get precision for a given field"""
from frappe.model.meta import get_field_precision
return get_field_precision(get_meta(doctype).get_field(fieldname), doc, currency)
def generate_hash(txt=None, length=None):
"""Generates random hash for given text + current timestamp + random string."""
import hashlib, time
from .utils import random_string
digest = hashlib.sha224(((txt or "") + repr(time.time()) + repr(random_string(8))).encode()).hexdigest()
if length:
digest = digest[:length]
return digest
def reset_metadata_version():
"""Reset `metadata_version` (Client (Javascript) build ID) hash."""
v = generate_hash()
cache().set_value("metadata_version", v)
return v
def new_doc(doctype, parent_doc=None, parentfield=None, as_dict=False):
"""Returns a new document of the given DocType with defaults set.
:param doctype: DocType of the new document.
:param parent_doc: [optional] add to parent document.
:param parentfield: [optional] add against this `parentfield`."""
from frappe.model.create_new import get_new_doc
return get_new_doc(doctype, parent_doc, parentfield, as_dict=as_dict)
def set_value(doctype, docname, fieldname, value=None):
"""Set document value. Calls `frappe.client.set_value`"""
import frappe.client
return frappe.client.set_value(doctype, docname, fieldname, value)
def get_cached_doc(*args, **kwargs):
if args and len(args) > 1 and isinstance(args[1], text_type):
key = get_document_cache_key(args[0], args[1])
# local cache
doc = local.document_cache.get(key)
if doc:
return doc
# redis cache
doc = cache().hget('document_cache', key)
if doc:
doc = get_doc(doc)
local.document_cache[key] = doc
return doc
# database
doc = get_doc(*args, **kwargs)
return doc
def get_document_cache_key(doctype, name):
return '{0}::{1}'.format(doctype, name)
def clear_document_cache(doctype, name):
cache().hdel("last_modified", doctype)
key = get_document_cache_key(doctype, name)
if key in local.document_cache:
del local.document_cache[key]
cache().hdel('document_cache', key)
def get_cached_value(doctype, name, fieldname, as_dict=False):
doc = get_cached_doc(doctype, name)
if isinstance(fieldname, string_types):
if as_dict:
throw('Cannot make dict for single fieldname')
return doc.get(fieldname)
values = [doc.get(f) for f in fieldname]
if as_dict:
return _dict(zip(fieldname, values))
return values
def get_doc(*args, **kwargs):
"""Return a `frappe.model.document.Document` object of the given type and name.
:param arg1: DocType name as string **or** document JSON.
:param arg2: [optional] Document name as string.
Examples:
# insert a new document
todo = frappe.get_doc({"doctype":"ToDo", "description": "test"})
tood.insert()
# open an existing document
todo = frappe.get_doc("ToDo", "TD0001")
"""
import frappe.model.document
doc = frappe.model.document.get_doc(*args, **kwargs)
# set in cache
if args and len(args) > 1:
key = get_document_cache_key(args[0], args[1])
local.document_cache[key] = doc
cache().hset('document_cache', key, doc.as_dict())
return doc
def get_last_doc(doctype):
"""Get last created document of this type."""
d = get_all(doctype, ["name"], order_by="creation desc", limit_page_length=1)
if d:
return get_doc(doctype, d[0].name)
else:
raise DoesNotExistError
def get_single(doctype):
"""Return a `frappe.model.document.Document` object of the given Single doctype."""
return get_doc(doctype, doctype)
def get_meta(doctype, cached=True):
"""Get `frappe.model.meta.Meta` instance of given doctype name."""
import frappe.model.meta
return frappe.model.meta.get_meta(doctype, cached=cached)
def get_meta_module(doctype):
import frappe.modules
return frappe.modules.load_doctype_module(doctype)
def delete_doc(doctype=None, name=None, force=0, ignore_doctypes=None, for_reload=False,
ignore_permissions=False, flags=None, ignore_on_trash=False, ignore_missing=True):
"""Delete a document. Calls `frappe.model.delete_doc.delete_doc`.
:param doctype: DocType of document to be delete.
:param name: Name of document to be delete.
:param force: Allow even if document is linked. Warning: This may lead to data integrity errors.
:param ignore_doctypes: Ignore if child table is one of these.
:param for_reload: Call `before_reload` trigger before deleting.
:param ignore_permissions: Ignore user permissions."""
import frappe.model.delete_doc
frappe.model.delete_doc.delete_doc(doctype, name, force, ignore_doctypes, for_reload,
ignore_permissions, flags, ignore_on_trash, ignore_missing)
def delete_doc_if_exists(doctype, name, force=0):
"""Delete document if exists."""
if db.exists(doctype, name):
delete_doc(doctype, name, force=force)
def reload_doctype(doctype, force=False, reset_permissions=False):
"""Reload DocType from model (`[module]/[doctype]/[name]/[name].json`) files."""
reload_doc(scrub(db.get_value("DocType", doctype, "module")), "doctype", scrub(doctype),
force=force, reset_permissions=reset_permissions)
def reload_doc(module, dt=None, dn=None, force=False, reset_permissions=False):
"""Reload Document from model (`[module]/[doctype]/[name]/[name].json`) files.
:param module: Module name.
:param dt: DocType name.
:param dn: Document name.
:param force: Reload even if `modified` timestamp matches.
"""
import frappe.modules
return frappe.modules.reload_doc(module, dt, dn, force=force, reset_permissions=reset_permissions)
def rename_doc(*args, **kwargs):
"""Rename a document. Calls `frappe.model.rename_doc.rename_doc`"""
from frappe.model.rename_doc import rename_doc
return rename_doc(*args, **kwargs)
def get_module(modulename):
"""Returns a module object for given Python module name using `importlib.import_module`."""
return importlib.import_module(modulename)
def scrub(txt):
"""Returns sluggified string. e.g. `Sales Order` becomes `sales_order`."""
return txt.replace(' ','_').replace('-', '_').lower()
def unscrub(txt):
"""Returns titlified string. e.g. `sales_order` becomes `Sales Order`."""
return txt.replace('_',' ').replace('-', ' ').title()
def get_module_path(module, *joins):
"""Get the path of the given module name.
:param module: Module name.
:param *joins: Join additional path elements using `os.path.join`."""
module = scrub(module)
return get_pymodule_path(local.module_app[module] + "." + module, *joins)
def get_app_path(app_name, *joins):
"""Return path of given app.
:param app: App name.
:param *joins: Join additional path elements using `os.path.join`."""
return get_pymodule_path(app_name, *joins)
def get_site_path(*joins):
"""Return path of current site.
:param *joins: Join additional path elements using `os.path.join`."""
return os.path.join(local.site_path, *joins)
def get_pymodule_path(modulename, *joins):
"""Return path of given Python module name.
:param modulename: Python module name.
:param *joins: Join additional path elements using `os.path.join`."""
if not "public" in joins:
joins = [scrub(part) for part in joins]
return os.path.join(os.path.dirname(get_module(scrub(modulename)).__file__), *joins)
def get_module_list(app_name):
"""Get list of modules for given all via `app/modules.txt`."""
return get_file_items(os.path.join(os.path.dirname(get_module(app_name).__file__), "modules.txt"))
def get_all_apps(with_internal_apps=True, sites_path=None):
"""Get list of all apps via `sites/apps.txt`."""
if not sites_path:
sites_path = local.sites_path
apps = get_file_items(os.path.join(sites_path, "apps.txt"), raise_not_found=True)
if with_internal_apps:
for app in get_file_items(os.path.join(local.site_path, "apps.txt")):
if app not in apps:
apps.append(app)
if "frappe" in apps:
apps.remove("frappe")
apps.insert(0, 'frappe')
return apps
def get_installed_apps(sort=False, frappe_last=False):
"""Get list of installed apps in current site."""
if getattr(flags, "in_install_db", True):
return []
if not db:
connect()
installed = json.loads(db.get_global("installed_apps") or "[]")
if sort:
installed = [app for app in get_all_apps(True) if app in installed]
if frappe_last:
if 'frappe' in installed:
installed.remove('frappe')
installed.append('frappe')
return installed
def get_doc_hooks():
'''Returns hooked methods for given doc. It will expand the dict tuple if required.'''
if not hasattr(local, 'doc_events_hooks'):
hooks = get_hooks('doc_events', {})
out = {}
for key, value in iteritems(hooks):
if isinstance(key, tuple):
for doctype in key:
append_hook(out, doctype, value)
else:
append_hook(out, key, value)
local.doc_events_hooks = out
return local.doc_events_hooks
def get_hooks(hook=None, default=None, app_name=None):
"""Get hooks via `app/hooks.py`
:param hook: Name of the hook. Will gather all hooks for this name and return as a list.
:param default: Default if no hook found.
:param app_name: Filter by app."""
def load_app_hooks(app_name=None):
hooks = {}
for app in [app_name] if app_name else get_installed_apps(sort=True):
app = "frappe" if app=="webnotes" else app
try:
app_hooks = get_module(app + ".hooks")
except ImportError:
if local.flags.in_install_app:
# if app is not installed while restoring
# ignore it
pass
print('Could not find app "{0}"'.format(app_name))
if not request:
sys.exit(1)
raise
for key in dir(app_hooks):
if not key.startswith("_"):
append_hook(hooks, key, getattr(app_hooks, key))
return hooks
no_cache = conf.developer_mode or False
if app_name:
hooks = _dict(load_app_hooks(app_name))
else:
if no_cache:
hooks = _dict(load_app_hooks())
else:
hooks = _dict(cache().get_value("app_hooks", load_app_hooks))
if hook:
return hooks.get(hook) or (default if default is not None else [])
else:
return hooks
def append_hook(target, key, value):
'''appends a hook to the the target dict.
If the hook key, exists, it will make it a key.
If the hook value is a dict, like doc_events, it will
listify the values against the key.
'''
if isinstance(value, dict):
# dict? make a list of values against each key
target.setdefault(key, {})
for inkey in value:
append_hook(target[key], inkey, value[inkey])
else:
# make a list
target.setdefault(key, [])
if not isinstance(value, list):
value = [value]
target[key].extend(value)
def setup_module_map():
"""Rebuild map of all modules (internal)."""
_cache = cache()
if conf.db_name:
local.app_modules = _cache.get_value("app_modules")
local.module_app = _cache.get_value("module_app")
if not (local.app_modules and local.module_app):
local.module_app, local.app_modules = {}, {}
for app in get_all_apps(True):
if app=="webnotes": app="frappe"
local.app_modules.setdefault(app, [])
for module in get_module_list(app):
module = scrub(module)
local.module_app[module] = app
local.app_modules[app].append(module)
if conf.db_name:
_cache.set_value("app_modules", local.app_modules)
_cache.set_value("module_app", local.module_app)
def get_file_items(path, raise_not_found=False, ignore_empty_lines=True):
"""Returns items from text file as a list. Ignores empty lines."""
import frappe.utils
content = read_file(path, raise_not_found=raise_not_found)
if content:
content = frappe.utils.strip(content)
return [p.strip() for p in content.splitlines() if (not ignore_empty_lines) or (p.strip() and not p.startswith("#"))]
else:
return []
def get_file_json(path):
"""Read a file and return parsed JSON object."""
with open(path, 'r') as f:
return json.load(f)
def read_file(path, raise_not_found=False):
"""Open a file and return its content as Unicode."""
if isinstance(path, text_type):
path = path.encode("utf-8")
if os.path.exists(path):
with open(path, "r") as f:
return as_unicode(f.read())
elif raise_not_found:
raise IOError("{} Not Found".format(path))
else:
return None
def get_attr(method_string):
"""Get python method object from its name."""
app_name = method_string.split(".")[0]
if not local.flags.in_install and app_name not in get_installed_apps():
throw(_("App {0} is not installed").format(app_name), AppNotInstalledError)
modulename = '.'.join(method_string.split('.')[:-1])
methodname = method_string.split('.')[-1]
return getattr(get_module(modulename), methodname)
def call(fn, *args, **kwargs):
"""Call a function and match arguments."""
if isinstance(fn, string_types):
fn = get_attr(fn)
newargs = get_newargs(fn, kwargs)
return fn(*args, **newargs)
def get_newargs(fn, kwargs):
if hasattr(fn, 'fnargs'):
fnargs = fn.fnargs
else:
fnargs, varargs, varkw, defaults = inspect.getargspec(fn)
newargs = {}
for a in kwargs:
if (a in fnargs) or varkw:
newargs[a] = kwargs.get(a)
if "flags" in newargs:
del newargs["flags"]
return newargs
def make_property_setter(args, ignore_validate=False, validate_fields_for_doctype=True):
"""Create a new **Property Setter** (for overriding DocType and DocField properties).
If doctype is not specified, it will create a property setter for all fields with the
given fieldname"""
args = _dict(args)
if not args.doctype_or_field:
args.doctype_or_field = 'DocField'
if not args.property_type:
args.property_type = db.get_value('DocField',
{'parent': 'DocField', 'fieldname': args.property}, 'fieldtype') or 'Data'
if not args.doctype:
doctype_list = db.sql_list('select distinct parent from tabDocField where fieldname=%s', args.fieldname)
else:
doctype_list = [args.doctype]
for doctype in doctype_list:
if not args.property_type:
args.property_type = db.get_value('DocField',
{'parent': doctype, 'fieldname': args.fieldname}, 'fieldtype') or 'Data'
ps = get_doc({
'doctype': "Property Setter",
'doctype_or_field': args.doctype_or_field,
'doc_type': doctype,
'field_name': args.fieldname,
'property': args.property,
'value': args.value,
'property_type': args.property_type or "Data",
'__islocal': 1
})
ps.flags.ignore_validate = ignore_validate
ps.flags.validate_fields_for_doctype = validate_fields_for_doctype
ps.validate_fieldtype_change()
ps.insert()
def import_doc(path, ignore_links=False, ignore_insert=False, insert=False):
"""Import a file using Data Import."""
from frappe.core.doctype.data_import import data_import
data_import.import_doc(path, ignore_links=ignore_links, ignore_insert=ignore_insert, insert=insert)
def copy_doc(doc, ignore_no_copy=True):
""" No_copy fields also get copied."""
import copy
def remove_no_copy_fields(d):
for df in d.meta.get("fields", {"no_copy": 1}):
if hasattr(d, df.fieldname):
d.set(df.fieldname, None)
fields_to_clear = ['name', 'owner', 'creation', 'modified', 'modified_by']
if not local.flags.in_test:
fields_to_clear.append("docstatus")
if not isinstance(doc, dict):
d = doc.as_dict()
else:
d = doc
newdoc = get_doc(copy.deepcopy(d))
newdoc.set("__islocal", 1)
for fieldname in (fields_to_clear + ['amended_from', 'amendment_date']):
newdoc.set(fieldname, None)
if not ignore_no_copy:
remove_no_copy_fields(newdoc)
for i, d in enumerate(newdoc.get_all_children()):
d.set("__islocal", 1)
for fieldname in fields_to_clear:
d.set(fieldname, None)
if not ignore_no_copy:
remove_no_copy_fields(d)
return newdoc
def compare(val1, condition, val2):
"""Compare two values using `frappe.utils.compare`
`condition` could be:
- "^"
- "in"
- "not in"
- "="
- "!="
- ">"
- "<"
- ">="
- "<="
- "not None"
- "None"
"""
import frappe.utils
return frappe.utils.compare(val1, condition, val2)
def respond_as_web_page(title, html, success=None, http_status_code=None,
context=None, indicator_color=None, primary_action='/', primary_label = None, fullpage=False,
width=None, template='message'):
"""Send response as a web page with a message rather than JSON. Used to show permission errors etc.
:param title: Page title and heading.
:param message: Message to be shown.
:param success: Alert message.
:param http_status_code: HTTP status code
:param context: web template context
:param indicator_color: color of indicator in title
:param primary_action: route on primary button (default is `/`)
:param primary_label: label on primary button (default is "Home")
:param fullpage: hide header / footer
:param width: Width of message in pixels
:param template: Optionally pass view template
"""
local.message_title = title
local.message = html
local.response['type'] = 'page'
local.response['route'] = template
local.no_cache = 1
if http_status_code:
local.response['http_status_code'] = http_status_code
if not context:
context = {}
if not indicator_color:
if success:
indicator_color = 'green'
elif http_status_code and http_status_code > 300:
indicator_color = 'red'
else:
indicator_color = 'blue'
context['indicator_color'] = indicator_color
context['primary_label'] = primary_label
context['primary_action'] = primary_action
context['error_code'] = http_status_code
context['fullpage'] = fullpage
if width:
context['card_width'] = width
local.response['context'] = context
def redirect_to_message(title, html, http_status_code=None, context=None, indicator_color=None):
"""Redirects to /message?id=random
Similar to respond_as_web_page, but used to 'redirect' and show message pages like success, failure, etc. with a detailed message
:param title: Page title and heading.
:param message: Message to be shown.
:param http_status_code: HTTP status code.
Example Usage:
frappe.redirect_to_message(_('Thank you'), "<div><p>You will receive an email at test@example.com</p></div>")
"""
message_id = generate_hash(length=8)
message = {
'context': context or {},
'http_status_code': http_status_code or 200
}
message['context'].update({
'header': title,
'title': title,
'message': html
})
if indicator_color:
message['context'].update({
"indicator_color": indicator_color
})
cache().set_value("message_id:{0}".format(message_id), message, expires_in_sec=60)
location = '/message?id={0}'.format(message_id)
if not getattr(local, 'is_ajax', False):
local.response["type"] = "redirect"
local.response["location"] = location
else:
return location
def build_match_conditions(doctype, as_condition=True):
"""Return match (User permissions) for given doctype as list or SQL."""
import frappe.desk.reportview
return frappe.desk.reportview.build_match_conditions(doctype, as_condition=as_condition)
def get_list(doctype, *args, **kwargs):
"""List database query via `frappe.model.db_query`. Will also check for permissions.
:param doctype: DocType on which query is to be made.
:param fields: List of fields or `*`.
:param filters: List of filters (see example).
:param order_by: Order By e.g. `modified desc`.
:param limit_page_start: Start results at record #. Default 0.
:param limit_page_length: No of records in the page. Default 20.
Example usage:
# simple dict filter
frappe.get_list("ToDo", fields=["name", "description"], filters = {"owner":"test@example.com"})
# filter as a list of lists
frappe.get_list("ToDo", fields="*", filters = [["modified", ">", "2014-01-01"]])
# filter as a list of dicts
frappe.get_list("ToDo", fields="*", filters = {"description": ("like", "test%")})
"""
import frappe.model.db_query
return frappe.model.db_query.DatabaseQuery(doctype).execute(None, *args, **kwargs)
def get_all(doctype, *args, **kwargs):
"""List database query via `frappe.model.db_query`. Will **not** check for permissions.
Parameters are same as `frappe.get_list`
:param doctype: DocType on which query is to be made.
:param fields: List of fields or `*`. Default is: `["name"]`.
:param filters: List of filters (see example).
:param order_by: Order By e.g. `modified desc`.
:param limit_page_start: Start results at record #. Default 0.
:param limit_page_length: No of records in the page. Default 20.
Example usage:
# simple dict filter
frappe.get_all("ToDo", fields=["name", "description"], filters = {"owner":"test@example.com"})
# filter as a list of lists
frappe.get_all("ToDo", fields=["*"], filters = [["modified", ">", "2014-01-01"]])
# filter as a list of dicts
frappe.get_all("ToDo", fields=["*"], filters = {"description": ("like", "test%")})
"""
kwargs["ignore_permissions"] = True
if not "limit_page_length" in kwargs:
kwargs["limit_page_length"] = 0
return get_list(doctype, *args, **kwargs)
def get_value(*args, **kwargs):
"""Returns a document property or list of properties.
Alias for `frappe.db.get_value`
:param doctype: DocType name.
:param filters: Filters like `{"x":"y"}` or name of the document. `None` if Single DocType.
:param fieldname: Column name.
:param ignore: Don't raise exception if table, column is missing.
:param as_dict: Return values as dict.
:param debug: Print query in error log.
"""
return db.get_value(*args, **kwargs)
def as_json(obj, indent=1):
from frappe.utils.response import json_handler
return json.dumps(obj, indent=indent, sort_keys=True, default=json_handler)
def are_emails_muted():
from frappe.utils import cint
return flags.mute_emails or cint(conf.get("mute_emails") or 0) or False
def get_test_records(doctype):
"""Returns list of objects from `test_records.json` in the given doctype's folder."""
from frappe.modules import get_doctype_module, get_module_path
path = os.path.join(get_module_path(get_doctype_module(doctype)), "doctype", scrub(doctype), "test_records.json")
if os.path.exists(path):
with open(path, "r") as f:
return json.loads(f.read())
else:
return []
def format_value(*args, **kwargs):
"""Format value with given field properties.
:param value: Value to be formatted.
:param df: (Optional) DocField object with properties `fieldtype`, `options` etc."""
import frappe.utils.formatters
return frappe.utils.formatters.format_value(*args, **kwargs)
def format(*args, **kwargs):
"""Format value with given field properties.
:param value: Value to be formatted.
:param df: (Optional) DocField object with properties `fieldtype`, `options` etc."""
import frappe.utils.formatters
return frappe.utils.formatters.format_value(*args, **kwargs)
def get_print(doctype=None, name=None, print_format=None, style=None, html=None, as_pdf=False, doc=None, output = None, no_letterhead = 0):
"""Get Print Format for given document.
:param doctype: DocType of document.
:param name: Name of document.
:param print_format: Print Format name. Default 'Standard',
:param style: Print Format style.
:param as_pdf: Return as PDF. Default False."""
from frappe.website.render import build_page
from frappe.utils.pdf import get_pdf
local.form_dict.doctype = doctype
local.form_dict.name = name
local.form_dict.format = print_format
local.form_dict.style = style
local.form_dict.doc = doc
local.form_dict.no_letterhead = no_letterhead
if not html:
html = build_page("printview")
if as_pdf:
return get_pdf(html, output = output)
else:
return html
def attach_print(doctype, name, file_name=None, print_format=None, style=None, html=None, doc=None, lang=None, print_letterhead=True):
from frappe.utils import scrub_urls
if not file_name: file_name = name
file_name = file_name.replace(' ','').replace('/','-')
print_settings = db.get_singles_dict("Print Settings")
_lang = local.lang
#set lang as specified in print format attachment
if lang: local.lang = lang
local.flags.ignore_print_permissions = True
no_letterhead = not print_letterhead
if int(print_settings.send_print_as_pdf or 0):
out = {
"fname": file_name + ".pdf",
"fcontent": get_print(doctype, name, print_format=print_format, style=style, html=html, as_pdf=True, doc=doc, no_letterhead=no_letterhead)
}
else:
out = {
"fname": file_name + ".html",
"fcontent": scrub_urls(get_print(doctype, name, print_format=print_format, style=style, html=html, doc=doc, no_letterhead=no_letterhead)).encode("utf-8")
}
local.flags.ignore_print_permissions = False
#reset lang to original local lang
local.lang = _lang
return out
def publish_progress(*args, **kwargs):
"""Show the user progress for a long request
:param percent: Percent progress
:param title: Title
:param doctype: Optional, for document type
:param docname: Optional, for document name
:param description: Optional description
"""
import frappe.realtime
return frappe.realtime.publish_progress(*args, **kwargs)
def publish_realtime(*args, **kwargs):
"""Publish real-time updates
:param event: Event name, like `task_progress` etc.
:param message: JSON message object. For async must contain `task_id`
:param room: Room in which to publish update (default entire site)
:param user: Transmit to user
:param doctype: Transmit to doctype, docname
:param docname: Transmit to doctype, docname
:param after_commit: (default False) will emit after current transaction is committed
"""
import frappe.realtime
return frappe.realtime.publish_realtime(*args, **kwargs)
def local_cache(namespace, key, generator, regenerate_if_none=False):
"""A key value store for caching within a request
:param namespace: frappe.local.cache[namespace]
:param key: frappe.local.cache[namespace][key] used to retrieve value
:param generator: method to generate a value if not found in store
"""
if namespace not in local.cache:
local.cache[namespace] = {}
if key not in local.cache[namespace]:
local.cache[namespace][key] = generator()
elif local.cache[namespace][key]==None and regenerate_if_none:
# if key exists but the previous result was None
local.cache[namespace][key] = generator()
return local.cache[namespace][key]
def enqueue(*args, **kwargs):
'''
Enqueue method to be executed using a background worker
:param method: method string or method object
:param queue: (optional) should be either long, default or short
:param timeout: (optional) should be set according to the functions
:param event: this is passed to enable clearing of jobs from queues
:param is_async: (optional) if is_async=False, the method is executed immediately, else via a worker
:param job_name: (optional) can be used to name an enqueue call, which can be used to prevent duplicate calls
:param kwargs: keyword arguments to be passed to the method
'''
import frappe.utils.background_jobs
return frappe.utils.background_jobs.enqueue(*args, **kwargs)
def enqueue_doc(*args, **kwargs):
'''
Enqueue method to be executed using a background worker
:param doctype: DocType of the document on which you want to run the event
:param name: Name of the document on which you want to run the event
:param method: method string or method object
:param queue: (optional) should be either long, default or short
:param timeout: (optional) should be set according to the functions
:param kwargs: keyword arguments to be passed to the method
'''
import frappe.utils.background_jobs
return frappe.utils.background_jobs.enqueue_doc(*args, **kwargs)
def get_doctype_app(doctype):
def _get_doctype_app():
doctype_module = local.db.get_value("DocType", doctype, "module")
return local.module_app[scrub(doctype_module)]
return local_cache("doctype_app", doctype, generator=_get_doctype_app)
loggers = {}
log_level = None
def logger(module=None, with_more_info=True):
'''Returns a python logger that uses StreamHandler'''
from frappe.utils.logger import get_logger
return get_logger(module or 'default', with_more_info=with_more_info)
def log_error(message=None, title=None):
'''Log error to Error Log'''
return get_doc(dict(doctype='Error Log', error=as_unicode(message or get_traceback()),
method=title)).insert(ignore_permissions=True)
def get_desk_link(doctype, name):
return '<a href="#Form/{0}/{1}" style="font-weight: bold;">{2} {1}</a>'.format(doctype, name, _(doctype))
def bold(text):
return '<b>{0}</b>'.format(text)
def safe_eval(code, eval_globals=None, eval_locals=None):
'''A safer `eval`'''
whitelisted_globals = {
"int": int,
"float": float,
"long": int,
"round": round
}
if '__' in code:
throw('Illegal rule {0}. Cannot use "__"'.format(bold(code)))
if not eval_globals:
eval_globals = {}
eval_globals['__builtins__'] = {}
eval_globals.update(whitelisted_globals)
return eval(code, eval_globals, eval_locals)
def get_system_settings(key):
if key not in local.system_settings:
local.system_settings.update({key: db.get_single_value('System Settings', key)})
return local.system_settings.get(key)
def get_active_domains():
from frappe.core.doctype.domain_settings.domain_settings import get_active_domains
return get_active_domains()
def get_version(doctype, name, limit = None, head = False, raise_err = True):
'''
Returns a list of version information of a given DocType (Applicable only if DocType has changes tracked).
Example
>>> frappe.get_version('User', 'foobar@gmail.com')
>>>
[
{
"version": [version.data], # Refer Version DocType get_diff method and data attribute
"user": "admin@gmail.com" # User that created this version
"creation": <datetime.datetime> # Creation timestamp of that object.
}
]
'''
meta = get_meta(doctype)
if meta.track_changes:
names = db.sql("""
SELECT name from tabVersion
WHERE ref_doctype = '{doctype}' AND docname = '{name}'
{order_by}
{limit}
""".format(
doctype = doctype,
name = name,
order_by = 'ORDER BY creation' if head else '',
limit = 'LIMIT {limit}'.format(limit = limit) if limit else ''
))
from frappe.chat.util import squashify, dictify, safe_json_loads
versions = [ ]
for name in names:
name = squashify(name)
doc = get_doc('Version', name)
data = doc.data
data = safe_json_loads(data)
data = dictify(dict(
version = data,
user = doc.owner,
creation = doc.creation
))
versions.append(data)
return versions
else:
if raise_err:
raise ValueError('{doctype} has no versions tracked.'.format(
doctype = doctype
))
@whitelist(allow_guest = True)
def ping():
return "pong"
def safe_encode(param, encoding = 'utf-8'):
try:
param = param.encode(encoding)
except Exception:
pass
return param
def safe_decode(param, encoding = 'utf-8'):
try:
param = param.decode(encoding)
except Exception:
pass
return param
def parse_json(val):
from frappe.utils import parse_json
return parse_json(val)
def mock(type, size = 1, locale = 'en'):
results = [ ]
faker = Faker(locale)
if not type in dir(faker):
raise ValueError('Not a valid mock type.')
else:
for i in range(size):
data = getattr(faker, type)()
results.append(data)
from frappe.chat.util import squashify
results = squashify(results)
return results
|
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""A sphnix-doc plugin to build mxnet docs"""
from __future__ import print_function
import subprocess
import re
import os
import json
import sys
from recommonmark import transform
import pypandoc
import contextlib
# Use six for Python 2 / 3 compatibility
from six import StringIO
from six.moves import configparser
_BUILD_VER = os.getenv('BUILD_VER', 'default')
print("Building version {}".format(_BUILD_VER))
_DOC_SET = 'document_sets_' + _BUILD_VER
parser = configparser.SafeConfigParser()
parser.read('settings.ini')
if _DOC_SET not in parser.sections():
_DOC_SET = 'document_sets_default'
for section in [ _DOC_SET ]:
print("Document sets to generate:")
for candidate in [ 'scala_docs', 'java_docs', 'clojure_docs', 'doxygen_docs', 'julia_docs', 'r_docs' ]:
print('%-12s : %s' % (candidate, parser.get(section, candidate)))
_MXNET_DOCS_BUILD_MXNET = parser.getboolean('mxnet', 'build_mxnet')
_SCALA_DOCS = parser.getboolean(_DOC_SET, 'scala_docs')
_JAVA_DOCS = parser.getboolean(_DOC_SET, 'java_docs')
_CLOJURE_DOCS = parser.getboolean(_DOC_SET, 'clojure_docs')
_DOXYGEN_DOCS = parser.getboolean(_DOC_SET, 'doxygen_docs')
_R_DOCS = parser.getboolean(_DOC_SET, 'r_docs')
_JULIA_DOCS = parser.getboolean(_DOC_SET, 'julia_docs')
_ARTIFACTS = parser.getboolean(_DOC_SET, 'artifacts')
# white list to evaluate the code block output, such as ['tutorials/gluon']
_EVAL_WHILTELIST = []
# start or end of a code block
_CODE_MARK = re.compile('^([ ]*)```([\w]*)')
# language names and the according file extensions and comment symbol
_LANGS = {'python' : ('py', '#'),
'r' : ('R','#'),
'scala' : ('scala', '//'),
'java' : ('java', '//'),
'julia' : ('jl', '#'),
'perl' : ('pl', '#'),
'cpp' : ('cc', '//'),
'bash' : ('sh', '#')}
_LANG_SELECTION_MARK = 'INSERT SELECTION BUTTONS'
_SRC_DOWNLOAD_MARK = 'INSERT SOURCE DOWNLOAD BUTTONS'
def _run_cmd(cmds):
"""Run commands, raise exception if failed"""
if not isinstance(cmds, str):
cmds = "".join(cmds)
print("Execute \"%s\"" % cmds)
try:
subprocess.check_call(cmds, shell=True)
except subprocess.CalledProcessError as err:
print(err)
raise err
def generate_doxygen(app):
"""Run the doxygen make commands"""
_run_cmd("cd %s/.. && make doxygen" % app.builder.srcdir)
_run_cmd("cp -rf doxygen/html %s/doxygen" % app.builder.outdir)
def build_mxnet(app):
"""Build mxnet .so lib"""
if not os.path.exists(os.path.join(app.builder.srcdir, '..', 'config.mk')):
_run_cmd("cd %s/.. && cp make/config.mk config.mk && make -j$(nproc) USE_MKLDNN=0 USE_CPP_PACKAGE=1 " %
app.builder.srcdir)
else:
_run_cmd("cd %s/.. && make -j$(nproc) USE_MKLDNN=0 USE_CPP_PACKAGE=1 " %
app.builder.srcdir)
def build_julia_docs(app):
"""build Julia docs"""
dest_path = app.builder.outdir + '/api/julia/site'
_run_cmd('cd {}/.. && make -C julia/docs'.format(app.builder.srcdir))
_run_cmd('mkdir -p {}'.format(dest_path))
_run_cmd('cd {}/.. && cp -a julia/docs/site/* {}'.format(app.builder.srcdir, dest_path))
def build_r_docs(app):
"""build r pdf"""
r_root = app.builder.srcdir + '/../R-package'
pdf_path = app.builder.srcdir + '/api/r/mxnet-r-reference-manual.pdf'
_run_cmd('cd ' + r_root +
'; R -e "roxygen2::roxygenize()"; R CMD Rd2pdf . --no-preview -o ' + pdf_path)
dest_path = app.builder.outdir + '/api/r/'
_run_cmd('mkdir -p ' + dest_path + '; mv ' + pdf_path + ' ' + dest_path)
def build_scala(app):
"""build scala for scala docs, java docs, and clojure docs to use"""
if any(v in _BUILD_VER for v in ['1.2.', '1.3.', '1.4.']):
_run_cmd("cd %s/.. && make scalapkg" % app.builder.srcdir)
_run_cmd("cd %s/.. && make scalainstall" % app.builder.srcdir)
else:
_run_cmd("cd %s/../scala-package && mvn -B install -DskipTests" % app.builder.srcdir)
def build_scala_docs(app):
"""build scala doc and then move the outdir"""
scala_path = app.builder.srcdir + '/../scala-package'
scala_doc_sources = 'find . -type f -name "*.scala" | egrep \"\.\/core|\.\/infer\" | egrep -v \"\/javaapi\" | egrep -v \"Suite\"'
scala_doc_classpath = ':'.join([
'`find native -name "*.jar" | grep "target/lib/" | tr "\\n" ":" `',
'`find macros -name "*.jar" | tr "\\n" ":" `',
'`find core -name "*.jar" | tr "\\n" ":" `',
'`find infer -name "*.jar" | tr "\\n" ":" `'
])
# There are unresolvable errors on mxnet 1.2.x. We are ignoring those errors while aborting the ci on newer versions
scala_ignore_errors = '; exit 0' if any(v in _BUILD_VER for v in ['1.2.', '1.3.']) else ''
_run_cmd('cd {}; scaladoc `{}` -classpath {} -feature -deprecation {}'
.format(scala_path, scala_doc_sources, scala_doc_classpath, scala_ignore_errors))
dest_path = app.builder.outdir + '/api/scala/docs'
_run_cmd('rm -rf ' + dest_path)
_run_cmd('mkdir -p ' + dest_path)
# 'index' and 'package.html' do not exist in later versions of scala; delete these after upgrading scala>2.12.x
scaladocs = ['index', 'index.html', 'org', 'lib', 'index.js', 'package.html']
for doc_file in scaladocs:
_run_cmd('cd ' + scala_path + ' && mv -f ' + doc_file + ' ' + dest_path + '; exit 0')
def build_java_docs(app):
"""build java docs and then move the outdir"""
java_path = app.builder.srcdir + '/../scala-package'
java_doc_sources = 'find . -type f -name "*.scala" | egrep \"\.\/core|\.\/infer\" | egrep \"\/javaapi\" | egrep -v \"Suite\"'
java_doc_classpath = ':'.join([
'`find native -name "*.jar" | grep "target/lib/" | tr "\\n" ":" `',
'`find macros -name "*.jar" | tr "\\n" ":" `',
'`find core -name "*.jar" | tr "\\n" ":" `',
'`find infer -name "*.jar" | tr "\\n" ":" `'
])
_run_cmd('cd {}; scaladoc `{}` -classpath {} -feature -deprecation'
.format(java_path, java_doc_sources, java_doc_classpath))
dest_path = app.builder.outdir + '/api/java/docs'
_run_cmd('rm -rf ' + dest_path)
_run_cmd('mkdir -p ' + dest_path)
javadocs = ['index', 'index.html', 'org', 'lib', 'index.js', 'package.html']
for doc_file in javadocs:
_run_cmd('cd ' + java_path + ' && mv -f ' + doc_file + ' ' + dest_path + '; exit 0')
def build_clojure_docs(app):
"""build clojure doc and then move the outdir"""
clojure_path = app.builder.srcdir + '/../contrib/clojure-package'
_run_cmd('cd ' + clojure_path + '; lein codox')
dest_path = app.builder.outdir + '/api/clojure/docs'
_run_cmd('rm -rf ' + dest_path)
_run_cmd('mkdir -p ' + dest_path)
clojure_doc_path = app.builder.srcdir + '/../contrib/clojure-package/target/doc'
_run_cmd('cd ' + clojure_doc_path + ' && cp -r * ' + dest_path + '; exit 0')
def _convert_md_table_to_rst(table):
"""Convert a markdown table to rst format"""
if len(table) < 3:
return ''
out = '```eval_rst\n.. list-table::\n :header-rows: 1\n\n'
for i,l in enumerate(table):
cols = l.split('|')[1:-1]
if i == 0:
ncol = len(cols)
else:
if len(cols) != ncol:
return ''
if i == 1:
for c in cols:
if len(c) is not 0 and '---' not in c:
return ''
else:
for j,c in enumerate(cols):
out += ' * - ' if j == 0 else ' - '
out += pypandoc.convert_text(
c, 'rst', format='md').replace('\n', ' ').replace('\r', '') + '\n'
out += '```\n'
return out
def convert_table(app, docname, source):
"""Find tables in a markdown and then convert them into the rst format"""
num_tables = 0
for i,j in enumerate(source):
table = []
output = ''
in_table = False
for l in j.split('\n'):
r = l.strip()
if r.startswith('|'):
table.append(r)
in_table = True
else:
if in_table is True:
converted = _convert_md_table_to_rst(table)
if converted is '':
print("Failed to convert the markdown table")
print(table)
else:
num_tables += 1
output += converted
in_table = False
table = []
output += l + '\n'
source[i] = output
if num_tables > 0:
print('Converted %d tables in %s' % (num_tables, docname))
def _parse_code_lines(lines):
"""A iterator that returns if a line is within a code block
Returns
-------
iterator of (str, bool, str, int)
- line: the line
- in_code: if this line is in a code block
- lang: the code block langunage
- indent: the code indent
"""
in_code = False
lang = None
indent = None
for l in lines:
m = _CODE_MARK.match(l)
if m is not None:
if not in_code:
if m.groups()[1].lower() in _LANGS:
lang = m.groups()[1].lower()
indent = len(m.groups()[0])
in_code = True
yield (l, in_code, lang, indent)
else:
yield (l, in_code, lang, indent)
lang = None
indent = None
in_code = False
else:
yield (l, in_code, lang, indent)
def _get_lang_selection_btn(langs):
active = True
btngroup = '<div class="text-center">\n<div class="btn-group opt-group" role="group">'
for l in langs:
btngroup += '<button type="button" class="btn btn-default opt %s">%s</button>\n' % (
'active' if active else '', l[0].upper()+l[1:].lower())
active = False
btngroup += '</div>\n</div> <script type="text/javascript" src="../../_static/js/options.js"></script>'
return btngroup
def _get_blocks(lines):
"""split lines into code and non-code blocks
Returns
-------
iterator of (bool, str, list of str)
- if it is a code block
- source language
- lines of source
"""
cur_block = []
pre_lang = None
pre_in_code = None
for (l, in_code, cur_lang, _) in _parse_code_lines(lines):
if in_code != pre_in_code:
if pre_in_code and len(cur_block) >= 2:
cur_block = cur_block[1:-1] # remove ```
# remove empty lines at head
while len(cur_block) > 0:
if len(cur_block[0]) == 0:
cur_block.pop(0)
else:
break
# remove empty lines at tail
while len(cur_block) > 0:
if len(cur_block[-1]) == 0:
cur_block.pop()
else:
break
if len(cur_block):
yield (pre_in_code, pre_lang, cur_block)
cur_block = []
cur_block.append(l)
pre_lang = cur_lang
pre_in_code = in_code
if len(cur_block):
yield (pre_in_code, pre_lang, cur_block)
def _get_mk_code_block(src, lang):
"""Return a markdown code block
E.g.
```python
import mxnet
````
"""
if lang is None:
lang = ''
return '```'+lang+'\n'+src.rstrip()+'\n'+'```\n'
@contextlib.contextmanager
def _string_io():
oldout = sys.stdout
olderr = sys.stderr
strio = StringIO.StringIO()
sys.stdout = strio
sys.stderr = strio
yield strio
sys.stdout = oldout
sys.stderr = olderr
def _get_python_block_output(src, global_dict, local_dict):
"""Evaluate python source codes
Returns
(bool, str):
- True if success
- output
"""
src = '\n'.join([l for l in src.split('\n')
if not l.startswith('%') and not 'plt.show()' in l])
ret_status = True
err = ''
with _string_io() as s:
try:
exec(src, global_dict, global_dict)
except Exception as e:
err = str(e)
ret_status = False
return (ret_status, s.getvalue()+err)
def _get_jupyter_notebook(lang, all_lines):
cells = []
# Exclude lines containing <!--notebook-skip-line-->
filtered_lines = [line for line in all_lines if "<!--notebook-skip-line-->" not in line]
for in_code, blk_lang, lines in _get_blocks(filtered_lines):
if blk_lang != lang:
in_code = False
src = '\n'.join(lines)
cell = {
"cell_type": "code" if in_code else "markdown",
"metadata": {},
"source": src
}
if in_code:
cell.update({
"outputs": [],
"execution_count": None,
})
cells.append(cell)
ipynb = {"nbformat" : 4,
"nbformat_minor" : 2,
"metadata" : {"language":lang, "display_name":'', "name":''},
"cells" : cells}
return ipynb
def _get_source(lang, lines):
cmt = _LANGS[lang][1] + ' '
out = []
for in_code, lines in _get_blocks(lang, lines):
if in_code:
out.append('')
for l in lines:
if in_code:
if '%matplotlib' not in l:
out.append(l)
else:
if ('<div>' in l or '</div>' in l or
'<script>' in l or '</script>' in l or
'<!--' in l or '-->' in l or
'%matplotlib' in l ):
continue
out.append(cmt+l)
if in_code:
out.append('')
return out
def _get_src_download_btn(out_prefix, langs, lines):
btn = '<div class="btn-group" role="group">\n'
for lang in langs:
ipynb = out_prefix
if lang == 'python':
ipynb += '.ipynb'
else:
ipynb += '_' + lang + '.ipynb'
with open(ipynb, 'w') as f:
json.dump(_get_jupyter_notebook(lang, lines), f)
f = ipynb.split('/')[-1]
btn += '<div class="download-btn"><a href="%s" download="%s">' \
'<span class="glyphicon glyphicon-download-alt"></span> %s</a></div>' % (f, f, f)
btn += '</div>\n'
return btn
def add_buttons(app, docname, source):
out_prefix = app.builder.outdir + '/' + docname
dirname = os.path.dirname(out_prefix)
if not os.path.exists(dirname):
os.makedirs(dirname)
for i,j in enumerate(source):
local_dict = {}
global_dict = {}
lines = j.split('\n')
langs = set([l for (_, _, l, _) in _parse_code_lines(lines)
if l is not None and l in _LANGS])
# first convert
for k,l in enumerate(lines):
if _SRC_DOWNLOAD_MARK in l:
lines[k] = _get_src_download_btn(
out_prefix, langs, lines)
# # then add lang buttons
# for k,l in enumerate(lines):
# if _LANG_SELECTION_MARK in l:
# lines[k] = _get_lang_selection_btn(langs)
output = ''
for in_code, lang, lines in _get_blocks(lines):
src = '\n'.join(lines)+'\n'
if in_code:
output += _get_mk_code_block(src, lang)
if lang == 'python' and any([w in docname for w in _EVAL_WHILTELIST]):
status, blk_out = _get_python_block_output(src, global_dict, local_dict)
if len(blk_out):
output += '<div class=\"cell-results-header\">Output:</div>\n\n'
output += _get_mk_code_block(blk_out, 'results')
else:
output += src
source[i] = output
# source[i] = '\n'.join(lines)
def copy_artifacts(app):
"""Copies artifacts needed for website presentation"""
dest_path = app.builder.outdir + '/error'
source_path = app.builder.srcdir + '/build_version_doc/artifacts'
_run_cmd('cd ' + app.builder.srcdir)
_run_cmd('rm -rf ' + dest_path)
_run_cmd('mkdir -p ' + dest_path)
_run_cmd('cp ' + source_path + '/404.html ' + dest_path)
_run_cmd('cp ' + source_path + '/api.html ' + dest_path)
dest_path = app.builder.outdir + '/_static'
_run_cmd('rm -rf ' + dest_path)
_run_cmd('mkdir -p ' + dest_path)
_run_cmd('cp ' + app.builder.srcdir + '/_static/mxnet.css ' + dest_path)
def setup(app):
# If MXNET_DOCS_BUILD_MXNET is set something different than 1
# Skip the build step
if os.getenv('MXNET_DOCS_BUILD_MXNET') == '1'or _MXNET_DOCS_BUILD_MXNET:
print("Building MXNet!")
app.connect("builder-inited", build_mxnet)
if _DOXYGEN_DOCS:
print("Building Doxygen!")
app.connect("builder-inited", generate_doxygen)
if _SCALA_DOCS or _CLOJURE_DOCS:
print("Building Scala!")
app.connect("builder-inited", build_scala)
if _SCALA_DOCS:
print("Building Scala Docs!")
app.connect("builder-inited", build_scala_docs)
if _JAVA_DOCS:
print("Building Java Docs!")
app.connect("builder-inited", build_java_docs)
if _CLOJURE_DOCS:
print("Building Clojure Docs!")
app.connect("builder-inited", build_clojure_docs)
if _JULIA_DOCS:
print("Building Julia Docs!")
app.connect("builder-inited", build_julia_docs)
if _R_DOCS:
print("Building R Docs!")
app.connect("builder-inited", build_r_docs)
if _ARTIFACTS:
print("Copying Artifacts!")
app.connect("builder-inited", copy_artifacts)
app.connect('source-read', convert_table)
app.connect('source-read', add_buttons)
app.add_config_value('recommonmark_config', {
'url_resolver': lambda url: 'http://mxnet.io/' + url,
'enable_eval_rst': True,
}, True)
app.add_transform(transform.AutoStructify)
|
# coding: utf-8
#
# Copyright 2022 :Barry-Thomas-Paul: Moss
#
# Licensed under the Apache License, Version 2.0 (the "License")
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http: // www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# Service Class
# this is a auto generated file generated by Cheetah
# Libre Office Version: 7.3
# Namespace: com.sun.star.sheet
from ...lo.sheet.spreadsheet_view_panes_enumeration import SpreadsheetViewPanesEnumeration as SpreadsheetViewPanesEnumeration
__all__ = ['SpreadsheetViewPanesEnumeration']
|
import os
import sqlite3
from typing import List, Optional
from .storage_base import Storage
class SqliteStorage(Storage):
def __init__(self, db_path):
"""
Init table "data" with the attribute "key" being the primary key
:param db_path: str. Path to database file
"""
super().__init__()
db_path = os.path.expanduser(db_path)
if len(os.path.dirname(db_path)) > 0 and not os.path.exists(os.path.dirname(db_path)):
try:
os.makedirs(os.path.dirname(db_path))
except PermissionError:
raise PermissionError(f'Could not create database directory: {db_path}') from None
self.conn = sqlite3.connect(os.path.expanduser(db_path))
c = self.conn.cursor()
c.execute("""
CREATE TABLE IF NOT EXISTS data (
key BLOB PRIMARY KEY,
value BLOB,
expire_time_ms INTEGER
)
""")
self.conn.commit()
def _put(self, key: bytes, value: bytes, expire_time_ms=None):
"""
Insert value and its expiration time into sqlite3, overwrite if already exists.
:param key: bytes.
:param value: bytes.
:param expire_time_ms: Optional[int]. Value is not fresh if expire_time_ms is not specified.
"""
c = self.conn.cursor()
c.execute('INSERT OR REPLACE INTO data (key, value, expire_time_ms) VALUES (?, ?, ?)',
(key, value, expire_time_ms))
self.conn.commit()
def _put_batch(self, keys: List[bytes], values: List[bytes], expire_time_mss:List[Optional[int]]):
"""
Batch insert.
:param key: List[bytes].
:param value: List[bytes].
:param expire_time_ms: List[Optional[int]].
"""
c = self.conn.cursor()
c.executemany('INSERT OR REPLACE INTO data (key, value, expire_time_ms) VALUES (?, ?, ?)',
zip(keys, values, expire_time_mss))
self.conn.commit()
def _get(self, key: bytes, can_be_prefix=False, must_be_fresh=False) -> Optional[bytes]:
"""
Get value from sqlite3.
:param key: bytes.
:param can_be_prefix: bool.
:param must_be_fresh: bool.
:return: bytes.
"""
c = self.conn.cursor()
query = 'SELECT value FROM data WHERE '
if must_be_fresh:
query += f'(expire_time_ms > {time_ms()}) AND '
if can_be_prefix:
query += 'hex(key) LIKE ?'
c.execute(query, (key.hex() + '%', ))
else:
query += 'key = ?'
c.execute(query, (key, ))
ret = c.fetchone()
return ret[0] if ret else None
def _remove(self, key: bytes) -> bool:
"""
Remove value from sqlite. Return whether removal is successful.
:param key: bytes.
:return: bool.
"""
c = self.conn.cursor()
n_removed = c.execute('DELETE FROM data WHERE key = ?', (key, )).rowcount
self.conn.commit()
return n_removed > 0
|
'''
Serve up a fake REST server acting as device REST API
'''
import sys
import argparse
from pathlib import Path
import ssl
import logging
from aiohttp import web
import xmltodict
def get_filename_from_cmd(cmd_dict: dict) -> str:
"""Build filename from an xml command"""
keys = []
def recursive_items(cmd_dict: dict):
for key, value in cmd_dict.items():
if isinstance(value, dict):
# do not use list entry for filename
if key != "entry":
keys.append(key)
recursive_items(value)
elif value:
# do not use property names for filename
if not key.startswith("@"):
keys.append(key)
keys.append(value)
else:
keys.append(key)
recursive_items(cmd_dict)
return "_".join(keys).replace("-", "_")
def run_server(port=443, input_dir: str = None):
"""Run sim rest server for the given nos, version and hostname"""
api_key = "xxXYHqhFIAWAlWTulizbXtfVfvV5ETfNynHxAlV3ZEUTtrUNKZBDY3aKmCFC"
auth_response = f"""<response status="success">
<result>
<key>{api_key}</key>
</result>
</response>"""
username = password = "vagrant"
routes = web.RouteTableDef()
@routes.get('/api/')
async def panos_cmd(request):
req_type = request.query.get("type", "")
req_auth_key = request.headers.get("X-PAN-KEY", "") or \
request.query.get("key", "")
# authentication with user and pass to get api key
if req_type == "keygen":
user = request.query.get("user", "")
passwd = request.query.get("password", "")
if user == username and passwd == password:
return web.Response(text=auth_response)
raise web.HTTPForbidden()
# cmd queries
if req_type == "op" and req_auth_key == api_key:
xml_cmd = request.query.get("cmd", "")
if xml_cmd == "":
raise web.HTTPBadRequest()
cmd_dict = xmltodict.parse(xml_cmd)
cmd = get_filename_from_cmd(cmd_dict)
return web.FileResponse(f"{input_dir}/{cmd}.xml")
ssl_context = ssl.create_default_context(ssl.Purpose.CLIENT_AUTH)
ssl_context.load_cert_chain(
'suzieq/config/etc/cert.pem', 'suzieq/config/etc/key.pem')
app = web.Application()
app.add_routes(routes)
logging.basicConfig(level=logging.DEBUG)
web.run_app(app, ssl_context=ssl_context, port=port)
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument(
"-p", "--listening-port", type=int, default=10000,
help="Listening port of the ssh server (default: 10000)")
parser.add_argument(
"-d", "--input-dir", type=str, default=None,
help="Input dir to search for host files")
args = parser.parse_args()
if not Path(args.input_dir).exists():
print(f'ERROR: Path {args.input_dir} does not exist, aborting')
sys.exit(1)
run_server(port=args.listening_port, input_dir=args.input_dir)
|
# -*- coding: utf-8 -*-
'''
otsu.fun - SSWA Resources Docs String
@version: 0.1
@author: PurePeace
@time: 2020-01-07
@describe: docs string for api resources!!!
'''
demo = \
'''
演示接口
传参:{ foo }
---
- Nothing here
```
null
```
'''
# run? not.
if __name__ == '__main__':
print('only docs, so it doesnt work')
|
# _*_ coding: utf-8 _*_
"""
-------------------------------------------------
File Name: __init__.py.py
Description :
Author : ericdoug
date:2021/3/7
-------------------------------------------------
Change Activity:
2021/3/7: created
-------------------------------------------------
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
from __future__ import absolute_import
# sys packages
import os
# third packages
# my packages
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
#
# Copyright (c) 2020 University of Dundee.
#
# Redistribution and use in source and binary forms, with or without modification,
# are permitted provided that the following conditions are met:
#
# Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
# Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
# OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
# IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
# INCIDENTAL, SPECIAL, EXEMPLARY OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
# HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED
# OF THE POSSIBILITY OF SUCH DAMAGE.
#
# Version: 1.0
#
import time
import tempfile
import tarfile
import numpy
import os
import zarr
import dask.array as da
import omero.clients
from omero.gateway import BlitzGateway
from getpass import getpass
from ilastik import app
from ilastik.applets.dataSelection.opDataSelection import PreloadedArrayDatasetInfo # noqa
import vigra
# Connect to the server
def connect(hostname, username, password):
conn = BlitzGateway(username, password,
host=hostname, secure=True)
conn.connect()
return conn
# Load-images
def load_images(conn, dataset_id):
return conn.getObjects('Image', opts={'dataset': dataset_id})
# Create-dataset
def create_dataset(conn, dataset_id):
dataset = omero.model.DatasetI()
v = "ilastik_probabilities_from_dataset_%s" % dataset_id
dataset.setName(omero.rtypes.rstring(v))
v = "ilatisk results probabilities from Dataset:%s" % dataset_id
dataset.setDescription(omero.rtypes.rstring(v))
return conn.getUpdateService().saveAndReturnObject(dataset)
# Load-data
def load_numpy_array(image, path, extension=".tar", resolution=0):
# load annotation linked to the image. Download in a tmp dir
for ann in image.listAnnotations():
if isinstance(ann, omero.gateway.FileAnnotationWrapper):
name = ann.getFile().getName()
ns = ann.getNs()
if (name.endswith(".zip") or name.endswith(".tar")) and ns is None:
file_path = os.path.join(path, name)
f_path = os.path.join(path, name.strip(extension))
with open(str(file_path), 'wb') as f:
for chunk in ann.getFileInChunks():
f.write(chunk)
# extract the file
if extension == ".tar":
tf = tarfile.open(file_path)
tf.extractall(path)
tf.close()
data = zarr.open(f_path)
values = data[resolution][:]
# from tczyx to tzyxc
values = values.swapaxes(1, 2).swapaxes(2, 3)
values = values.swapaxes(3, 4)
return values
else:
data = zarr.open(file_path)
return data[:]
return None
def load_from_s3(image, resolution='0'):
id = image.getId()
endpoint_url = 'https://minio-dev.openmicroscopy.org/'
root = 'idr/outreach/%s.zarr/' % id
# data.shape is (t, c, z, y, x) by convention
data = da.from_zarr(endpoint_url + root)
values = data[:]
values = values.swapaxes(1, 2).swapaxes(2, 3).swapaxes(3, 4)
return numpy.asarray(values)
# Analyze-data
def analyze(conn, images, model, new_dataset, extension=".tar", resolution=0):
# Prepare ilastik
# temporary directory where to download files
path = tempfile.mkdtemp()
if not os.path.exists(path):
os.makedirs(path)
os.environ["LAZYFLOW_THREADS"] = "2"
os.environ["LAZYFLOW_TOTAL_RAM_MB"] = "2000"
args = app.parse_args([])
args.headless = True
args.project = model
args.readonly = True
shell = app.main(args)
start = time.time()
for image in images:
input_data = load_from_s3(image, path)
# run ilastik headless
print('running ilastik using %s and %s' % (model, image.getName()))
data = [ {"Raw Data": PreloadedArrayDatasetInfo(preloaded_array=input_data, axistags=vigra.defaultAxistags("tzyxc"))}] # noqa
shell.workflow.batchProcessingApplet.run_export(data, export_to_array=True) # noqa
elapsed = time.time() - start
print(elapsed)
# Save-results
def save_results(conn, image, data, dataset, path):
filename, file_extension = os.path.splitext(image.getName())
# Save the probabilities file as an image
print("Saving Probabilities as zarr file attached to the original Image")
name = filename + "_Probabilities_zarr.zip"
desc = "ilastik probabilities from Image:%s" % image.getId()
# Re-organise array from tzyxc to zctyx order expected by OMERO
# data = data.swapaxes(0, 1).swapaxes(3, 4).swapaxes(2, 3).swapaxes(1, 2)
namespace = "ilastik.zarr.demo"
fp = os.path.join(path, name)
with zarr.ZipStore(fp, mode='w') as store:
zarr.array(data, store=store, dtype='int16',
compressor=zarr.Blosc(cname='zstd'))
ann = conn.createFileAnnfromLocalFile(fp, mimetype="application/zip",
ns=namespace, desc=desc)
image.linkAnnotation(ann)
# Disconnect
def disconnect(conn):
conn.close()
# main
def main():
# Collect user credentials
try:
host = input("Host [wss://outreach.openmicroscopy.org/omero-ws]: ") or 'wss://outreach.openmicroscopy.org/omero-ws' # noqa
username = input("Username [trainer-1]: ") or 'trainer-1'
password = getpass("Password: ")
dataset_id = input("Dataset ID [6161]: ") or '6161'
# Connect to the server
conn = connect(host, username, password)
conn.c.enableKeepAlive(60)
# path to the ilastik project
ilastik_project = "../notebooks/pipelines/pixel-class-133.ilp"
# Load the images in the dataset
images = load_images(conn, dataset_id)
new_dataset = create_dataset(conn, dataset_id)
analyze(conn, images, ilastik_project, new_dataset)
finally:
disconnect(conn)
print("done")
if __name__ == "__main__":
main()
|
# coding=utf-8
# *** WARNING: this file was generated by the Pulumi Terraform Bridge (tfgen) Tool. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union, overload
from .. import _utilities
__all__ = [
'DeploymentConfigMinimumHealthyHostsArgs',
'DeploymentConfigTrafficRoutingConfigArgs',
'DeploymentConfigTrafficRoutingConfigTimeBasedCanaryArgs',
'DeploymentConfigTrafficRoutingConfigTimeBasedLinearArgs',
'DeploymentGroupAlarmConfigurationArgs',
'DeploymentGroupAutoRollbackConfigurationArgs',
'DeploymentGroupBlueGreenDeploymentConfigArgs',
'DeploymentGroupBlueGreenDeploymentConfigDeploymentReadyOptionArgs',
'DeploymentGroupBlueGreenDeploymentConfigGreenFleetProvisioningOptionArgs',
'DeploymentGroupBlueGreenDeploymentConfigTerminateBlueInstancesOnDeploymentSuccessArgs',
'DeploymentGroupDeploymentStyleArgs',
'DeploymentGroupEc2TagFilterArgs',
'DeploymentGroupEc2TagSetArgs',
'DeploymentGroupEc2TagSetEc2TagFilterArgs',
'DeploymentGroupEcsServiceArgs',
'DeploymentGroupLoadBalancerInfoArgs',
'DeploymentGroupLoadBalancerInfoElbInfoArgs',
'DeploymentGroupLoadBalancerInfoTargetGroupInfoArgs',
'DeploymentGroupLoadBalancerInfoTargetGroupPairInfoArgs',
'DeploymentGroupLoadBalancerInfoTargetGroupPairInfoProdTrafficRouteArgs',
'DeploymentGroupLoadBalancerInfoTargetGroupPairInfoTargetGroupArgs',
'DeploymentGroupLoadBalancerInfoTargetGroupPairInfoTestTrafficRouteArgs',
'DeploymentGroupOnPremisesInstanceTagFilterArgs',
'DeploymentGroupTriggerConfigurationArgs',
]
@pulumi.input_type
class DeploymentConfigMinimumHealthyHostsArgs:
def __init__(__self__, *,
type: Optional[pulumi.Input[str]] = None,
value: Optional[pulumi.Input[int]] = None):
"""
:param pulumi.Input[str] type: The type can either be `FLEET_PERCENT` or `HOST_COUNT`.
:param pulumi.Input[int] value: The value when the type is `FLEET_PERCENT` represents the minimum number of healthy instances as
a percentage of the total number of instances in the deployment. If you specify FLEET_PERCENT, at the start of the
deployment, AWS CodeDeploy converts the percentage to the equivalent number of instance and rounds up fractional instances.
When the type is `HOST_COUNT`, the value represents the minimum number of healthy instances as an absolute value.
"""
if type is not None:
pulumi.set(__self__, "type", type)
if value is not None:
pulumi.set(__self__, "value", value)
@property
@pulumi.getter
def type(self) -> Optional[pulumi.Input[str]]:
"""
The type can either be `FLEET_PERCENT` or `HOST_COUNT`.
"""
return pulumi.get(self, "type")
@type.setter
def type(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "type", value)
@property
@pulumi.getter
def value(self) -> Optional[pulumi.Input[int]]:
"""
The value when the type is `FLEET_PERCENT` represents the minimum number of healthy instances as
a percentage of the total number of instances in the deployment. If you specify FLEET_PERCENT, at the start of the
deployment, AWS CodeDeploy converts the percentage to the equivalent number of instance and rounds up fractional instances.
When the type is `HOST_COUNT`, the value represents the minimum number of healthy instances as an absolute value.
"""
return pulumi.get(self, "value")
@value.setter
def value(self, value: Optional[pulumi.Input[int]]):
pulumi.set(self, "value", value)
@pulumi.input_type
class DeploymentConfigTrafficRoutingConfigArgs:
def __init__(__self__, *,
time_based_canary: Optional[pulumi.Input['DeploymentConfigTrafficRoutingConfigTimeBasedCanaryArgs']] = None,
time_based_linear: Optional[pulumi.Input['DeploymentConfigTrafficRoutingConfigTimeBasedLinearArgs']] = None,
type: Optional[pulumi.Input[str]] = None):
"""
:param pulumi.Input['DeploymentConfigTrafficRoutingConfigTimeBasedCanaryArgs'] time_based_canary: The time based canary configuration information. If `type` is `TimeBasedLinear`, use `time_based_linear` instead.
:param pulumi.Input['DeploymentConfigTrafficRoutingConfigTimeBasedLinearArgs'] time_based_linear: The time based linear configuration information. If `type` is `TimeBasedCanary`, use `time_based_canary` instead.
:param pulumi.Input[str] type: Type of traffic routing config. One of `TimeBasedCanary`, `TimeBasedLinear`, `AllAtOnce`.
"""
if time_based_canary is not None:
pulumi.set(__self__, "time_based_canary", time_based_canary)
if time_based_linear is not None:
pulumi.set(__self__, "time_based_linear", time_based_linear)
if type is not None:
pulumi.set(__self__, "type", type)
@property
@pulumi.getter(name="timeBasedCanary")
def time_based_canary(self) -> Optional[pulumi.Input['DeploymentConfigTrafficRoutingConfigTimeBasedCanaryArgs']]:
"""
The time based canary configuration information. If `type` is `TimeBasedLinear`, use `time_based_linear` instead.
"""
return pulumi.get(self, "time_based_canary")
@time_based_canary.setter
def time_based_canary(self, value: Optional[pulumi.Input['DeploymentConfigTrafficRoutingConfigTimeBasedCanaryArgs']]):
pulumi.set(self, "time_based_canary", value)
@property
@pulumi.getter(name="timeBasedLinear")
def time_based_linear(self) -> Optional[pulumi.Input['DeploymentConfigTrafficRoutingConfigTimeBasedLinearArgs']]:
"""
The time based linear configuration information. If `type` is `TimeBasedCanary`, use `time_based_canary` instead.
"""
return pulumi.get(self, "time_based_linear")
@time_based_linear.setter
def time_based_linear(self, value: Optional[pulumi.Input['DeploymentConfigTrafficRoutingConfigTimeBasedLinearArgs']]):
pulumi.set(self, "time_based_linear", value)
@property
@pulumi.getter
def type(self) -> Optional[pulumi.Input[str]]:
"""
Type of traffic routing config. One of `TimeBasedCanary`, `TimeBasedLinear`, `AllAtOnce`.
"""
return pulumi.get(self, "type")
@type.setter
def type(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "type", value)
@pulumi.input_type
class DeploymentConfigTrafficRoutingConfigTimeBasedCanaryArgs:
def __init__(__self__, *,
interval: Optional[pulumi.Input[int]] = None,
percentage: Optional[pulumi.Input[int]] = None):
"""
:param pulumi.Input[int] interval: The number of minutes between the first and second traffic shifts of a `TimeBasedCanary` deployment.
:param pulumi.Input[int] percentage: The percentage of traffic to shift in the first increment of a `TimeBasedCanary` deployment.
"""
if interval is not None:
pulumi.set(__self__, "interval", interval)
if percentage is not None:
pulumi.set(__self__, "percentage", percentage)
@property
@pulumi.getter
def interval(self) -> Optional[pulumi.Input[int]]:
"""
The number of minutes between the first and second traffic shifts of a `TimeBasedCanary` deployment.
"""
return pulumi.get(self, "interval")
@interval.setter
def interval(self, value: Optional[pulumi.Input[int]]):
pulumi.set(self, "interval", value)
@property
@pulumi.getter
def percentage(self) -> Optional[pulumi.Input[int]]:
"""
The percentage of traffic to shift in the first increment of a `TimeBasedCanary` deployment.
"""
return pulumi.get(self, "percentage")
@percentage.setter
def percentage(self, value: Optional[pulumi.Input[int]]):
pulumi.set(self, "percentage", value)
@pulumi.input_type
class DeploymentConfigTrafficRoutingConfigTimeBasedLinearArgs:
def __init__(__self__, *,
interval: Optional[pulumi.Input[int]] = None,
percentage: Optional[pulumi.Input[int]] = None):
"""
:param pulumi.Input[int] interval: The number of minutes between each incremental traffic shift of a `TimeBasedLinear` deployment.
:param pulumi.Input[int] percentage: The percentage of traffic that is shifted at the start of each increment of a `TimeBasedLinear` deployment.
"""
if interval is not None:
pulumi.set(__self__, "interval", interval)
if percentage is not None:
pulumi.set(__self__, "percentage", percentage)
@property
@pulumi.getter
def interval(self) -> Optional[pulumi.Input[int]]:
"""
The number of minutes between each incremental traffic shift of a `TimeBasedLinear` deployment.
"""
return pulumi.get(self, "interval")
@interval.setter
def interval(self, value: Optional[pulumi.Input[int]]):
pulumi.set(self, "interval", value)
@property
@pulumi.getter
def percentage(self) -> Optional[pulumi.Input[int]]:
"""
The percentage of traffic that is shifted at the start of each increment of a `TimeBasedLinear` deployment.
"""
return pulumi.get(self, "percentage")
@percentage.setter
def percentage(self, value: Optional[pulumi.Input[int]]):
pulumi.set(self, "percentage", value)
@pulumi.input_type
class DeploymentGroupAlarmConfigurationArgs:
def __init__(__self__, *,
alarms: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,
enabled: Optional[pulumi.Input[bool]] = None,
ignore_poll_alarm_failure: Optional[pulumi.Input[bool]] = None):
"""
:param pulumi.Input[Sequence[pulumi.Input[str]]] alarms: A list of alarms configured for the deployment group. _A maximum of 10 alarms can be added to a deployment group_.
:param pulumi.Input[bool] enabled: Indicates whether the alarm configuration is enabled. This option is useful when you want to temporarily deactivate alarm monitoring for a deployment group without having to add the same alarms again later.
:param pulumi.Input[bool] ignore_poll_alarm_failure: Indicates whether a deployment should continue if information about the current state of alarms cannot be retrieved from CloudWatch. The default value is `false`.
* `true`: The deployment will proceed even if alarm status information can't be retrieved.
* `false`: The deployment will stop if alarm status information can't be retrieved.
"""
if alarms is not None:
pulumi.set(__self__, "alarms", alarms)
if enabled is not None:
pulumi.set(__self__, "enabled", enabled)
if ignore_poll_alarm_failure is not None:
pulumi.set(__self__, "ignore_poll_alarm_failure", ignore_poll_alarm_failure)
@property
@pulumi.getter
def alarms(self) -> Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]:
"""
A list of alarms configured for the deployment group. _A maximum of 10 alarms can be added to a deployment group_.
"""
return pulumi.get(self, "alarms")
@alarms.setter
def alarms(self, value: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]):
pulumi.set(self, "alarms", value)
@property
@pulumi.getter
def enabled(self) -> Optional[pulumi.Input[bool]]:
"""
Indicates whether the alarm configuration is enabled. This option is useful when you want to temporarily deactivate alarm monitoring for a deployment group without having to add the same alarms again later.
"""
return pulumi.get(self, "enabled")
@enabled.setter
def enabled(self, value: Optional[pulumi.Input[bool]]):
pulumi.set(self, "enabled", value)
@property
@pulumi.getter(name="ignorePollAlarmFailure")
def ignore_poll_alarm_failure(self) -> Optional[pulumi.Input[bool]]:
"""
Indicates whether a deployment should continue if information about the current state of alarms cannot be retrieved from CloudWatch. The default value is `false`.
* `true`: The deployment will proceed even if alarm status information can't be retrieved.
* `false`: The deployment will stop if alarm status information can't be retrieved.
"""
return pulumi.get(self, "ignore_poll_alarm_failure")
@ignore_poll_alarm_failure.setter
def ignore_poll_alarm_failure(self, value: Optional[pulumi.Input[bool]]):
pulumi.set(self, "ignore_poll_alarm_failure", value)
@pulumi.input_type
class DeploymentGroupAutoRollbackConfigurationArgs:
def __init__(__self__, *,
enabled: Optional[pulumi.Input[bool]] = None,
events: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None):
"""
:param pulumi.Input[bool] enabled: Indicates whether a defined automatic rollback configuration is currently enabled for this Deployment Group. If you enable automatic rollback, you must specify at least one event type.
:param pulumi.Input[Sequence[pulumi.Input[str]]] events: The event type or types that trigger a rollback. Supported types are `DEPLOYMENT_FAILURE` and `DEPLOYMENT_STOP_ON_ALARM`.
"""
if enabled is not None:
pulumi.set(__self__, "enabled", enabled)
if events is not None:
pulumi.set(__self__, "events", events)
@property
@pulumi.getter
def enabled(self) -> Optional[pulumi.Input[bool]]:
"""
Indicates whether a defined automatic rollback configuration is currently enabled for this Deployment Group. If you enable automatic rollback, you must specify at least one event type.
"""
return pulumi.get(self, "enabled")
@enabled.setter
def enabled(self, value: Optional[pulumi.Input[bool]]):
pulumi.set(self, "enabled", value)
@property
@pulumi.getter
def events(self) -> Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]:
"""
The event type or types that trigger a rollback. Supported types are `DEPLOYMENT_FAILURE` and `DEPLOYMENT_STOP_ON_ALARM`.
"""
return pulumi.get(self, "events")
@events.setter
def events(self, value: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]):
pulumi.set(self, "events", value)
@pulumi.input_type
class DeploymentGroupBlueGreenDeploymentConfigArgs:
def __init__(__self__, *,
deployment_ready_option: Optional[pulumi.Input['DeploymentGroupBlueGreenDeploymentConfigDeploymentReadyOptionArgs']] = None,
green_fleet_provisioning_option: Optional[pulumi.Input['DeploymentGroupBlueGreenDeploymentConfigGreenFleetProvisioningOptionArgs']] = None,
terminate_blue_instances_on_deployment_success: Optional[pulumi.Input['DeploymentGroupBlueGreenDeploymentConfigTerminateBlueInstancesOnDeploymentSuccessArgs']] = None):
"""
:param pulumi.Input['DeploymentGroupBlueGreenDeploymentConfigDeploymentReadyOptionArgs'] deployment_ready_option: Information about the action to take when newly provisioned instances are ready to receive traffic in a blue/green deployment (documented below).
:param pulumi.Input['DeploymentGroupBlueGreenDeploymentConfigGreenFleetProvisioningOptionArgs'] green_fleet_provisioning_option: Information about how instances are provisioned for a replacement environment in a blue/green deployment (documented below).
:param pulumi.Input['DeploymentGroupBlueGreenDeploymentConfigTerminateBlueInstancesOnDeploymentSuccessArgs'] terminate_blue_instances_on_deployment_success: Information about whether to terminate instances in the original fleet during a blue/green deployment (documented below).
"""
if deployment_ready_option is not None:
pulumi.set(__self__, "deployment_ready_option", deployment_ready_option)
if green_fleet_provisioning_option is not None:
pulumi.set(__self__, "green_fleet_provisioning_option", green_fleet_provisioning_option)
if terminate_blue_instances_on_deployment_success is not None:
pulumi.set(__self__, "terminate_blue_instances_on_deployment_success", terminate_blue_instances_on_deployment_success)
@property
@pulumi.getter(name="deploymentReadyOption")
def deployment_ready_option(self) -> Optional[pulumi.Input['DeploymentGroupBlueGreenDeploymentConfigDeploymentReadyOptionArgs']]:
"""
Information about the action to take when newly provisioned instances are ready to receive traffic in a blue/green deployment (documented below).
"""
return pulumi.get(self, "deployment_ready_option")
@deployment_ready_option.setter
def deployment_ready_option(self, value: Optional[pulumi.Input['DeploymentGroupBlueGreenDeploymentConfigDeploymentReadyOptionArgs']]):
pulumi.set(self, "deployment_ready_option", value)
@property
@pulumi.getter(name="greenFleetProvisioningOption")
def green_fleet_provisioning_option(self) -> Optional[pulumi.Input['DeploymentGroupBlueGreenDeploymentConfigGreenFleetProvisioningOptionArgs']]:
"""
Information about how instances are provisioned for a replacement environment in a blue/green deployment (documented below).
"""
return pulumi.get(self, "green_fleet_provisioning_option")
@green_fleet_provisioning_option.setter
def green_fleet_provisioning_option(self, value: Optional[pulumi.Input['DeploymentGroupBlueGreenDeploymentConfigGreenFleetProvisioningOptionArgs']]):
pulumi.set(self, "green_fleet_provisioning_option", value)
@property
@pulumi.getter(name="terminateBlueInstancesOnDeploymentSuccess")
def terminate_blue_instances_on_deployment_success(self) -> Optional[pulumi.Input['DeploymentGroupBlueGreenDeploymentConfigTerminateBlueInstancesOnDeploymentSuccessArgs']]:
"""
Information about whether to terminate instances in the original fleet during a blue/green deployment (documented below).
"""
return pulumi.get(self, "terminate_blue_instances_on_deployment_success")
@terminate_blue_instances_on_deployment_success.setter
def terminate_blue_instances_on_deployment_success(self, value: Optional[pulumi.Input['DeploymentGroupBlueGreenDeploymentConfigTerminateBlueInstancesOnDeploymentSuccessArgs']]):
pulumi.set(self, "terminate_blue_instances_on_deployment_success", value)
@pulumi.input_type
class DeploymentGroupBlueGreenDeploymentConfigDeploymentReadyOptionArgs:
def __init__(__self__, *,
action_on_timeout: Optional[pulumi.Input[str]] = None,
wait_time_in_minutes: Optional[pulumi.Input[int]] = None):
"""
:param pulumi.Input[str] action_on_timeout: When to reroute traffic from an original environment to a replacement environment in a blue/green deployment.
* `CONTINUE_DEPLOYMENT`: Register new instances with the load balancer immediately after the new application revision is installed on the instances in the replacement environment.
* `STOP_DEPLOYMENT`: Do not register new instances with load balancer unless traffic is rerouted manually. If traffic is not rerouted manually before the end of the specified wait period, the deployment status is changed to Stopped.
:param pulumi.Input[int] wait_time_in_minutes: The number of minutes to wait before the status of a blue/green deployment changed to Stopped if rerouting is not started manually. Applies only to the `STOP_DEPLOYMENT` option for `action_on_timeout`.
"""
if action_on_timeout is not None:
pulumi.set(__self__, "action_on_timeout", action_on_timeout)
if wait_time_in_minutes is not None:
pulumi.set(__self__, "wait_time_in_minutes", wait_time_in_minutes)
@property
@pulumi.getter(name="actionOnTimeout")
def action_on_timeout(self) -> Optional[pulumi.Input[str]]:
"""
When to reroute traffic from an original environment to a replacement environment in a blue/green deployment.
* `CONTINUE_DEPLOYMENT`: Register new instances with the load balancer immediately after the new application revision is installed on the instances in the replacement environment.
* `STOP_DEPLOYMENT`: Do not register new instances with load balancer unless traffic is rerouted manually. If traffic is not rerouted manually before the end of the specified wait period, the deployment status is changed to Stopped.
"""
return pulumi.get(self, "action_on_timeout")
@action_on_timeout.setter
def action_on_timeout(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "action_on_timeout", value)
@property
@pulumi.getter(name="waitTimeInMinutes")
def wait_time_in_minutes(self) -> Optional[pulumi.Input[int]]:
"""
The number of minutes to wait before the status of a blue/green deployment changed to Stopped if rerouting is not started manually. Applies only to the `STOP_DEPLOYMENT` option for `action_on_timeout`.
"""
return pulumi.get(self, "wait_time_in_minutes")
@wait_time_in_minutes.setter
def wait_time_in_minutes(self, value: Optional[pulumi.Input[int]]):
pulumi.set(self, "wait_time_in_minutes", value)
@pulumi.input_type
class DeploymentGroupBlueGreenDeploymentConfigGreenFleetProvisioningOptionArgs:
def __init__(__self__, *,
action: Optional[pulumi.Input[str]] = None):
"""
:param pulumi.Input[str] action: The method used to add instances to a replacement environment.
* `DISCOVER_EXISTING`: Use instances that already exist or will be created manually.
* `COPY_AUTO_SCALING_GROUP`: Use settings from a specified **Auto Scaling** group to define and create instances in a new Auto Scaling group. _Exactly one Auto Scaling group must be specified_ when selecting `COPY_AUTO_SCALING_GROUP`. Use `autoscaling_groups` to specify the Auto Scaling group.
"""
if action is not None:
pulumi.set(__self__, "action", action)
@property
@pulumi.getter
def action(self) -> Optional[pulumi.Input[str]]:
"""
The method used to add instances to a replacement environment.
* `DISCOVER_EXISTING`: Use instances that already exist or will be created manually.
* `COPY_AUTO_SCALING_GROUP`: Use settings from a specified **Auto Scaling** group to define and create instances in a new Auto Scaling group. _Exactly one Auto Scaling group must be specified_ when selecting `COPY_AUTO_SCALING_GROUP`. Use `autoscaling_groups` to specify the Auto Scaling group.
"""
return pulumi.get(self, "action")
@action.setter
def action(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "action", value)
@pulumi.input_type
class DeploymentGroupBlueGreenDeploymentConfigTerminateBlueInstancesOnDeploymentSuccessArgs:
def __init__(__self__, *,
action: Optional[pulumi.Input[str]] = None,
termination_wait_time_in_minutes: Optional[pulumi.Input[int]] = None):
"""
:param pulumi.Input[str] action: The action to take on instances in the original environment after a successful blue/green deployment.
* `TERMINATE`: Instances are terminated after a specified wait time.
* `KEEP_ALIVE`: Instances are left running after they are deregistered from the load balancer and removed from the deployment group.
:param pulumi.Input[int] termination_wait_time_in_minutes: The number of minutes to wait after a successful blue/green deployment before terminating instances from the original environment.
"""
if action is not None:
pulumi.set(__self__, "action", action)
if termination_wait_time_in_minutes is not None:
pulumi.set(__self__, "termination_wait_time_in_minutes", termination_wait_time_in_minutes)
@property
@pulumi.getter
def action(self) -> Optional[pulumi.Input[str]]:
"""
The action to take on instances in the original environment after a successful blue/green deployment.
* `TERMINATE`: Instances are terminated after a specified wait time.
* `KEEP_ALIVE`: Instances are left running after they are deregistered from the load balancer and removed from the deployment group.
"""
return pulumi.get(self, "action")
@action.setter
def action(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "action", value)
@property
@pulumi.getter(name="terminationWaitTimeInMinutes")
def termination_wait_time_in_minutes(self) -> Optional[pulumi.Input[int]]:
"""
The number of minutes to wait after a successful blue/green deployment before terminating instances from the original environment.
"""
return pulumi.get(self, "termination_wait_time_in_minutes")
@termination_wait_time_in_minutes.setter
def termination_wait_time_in_minutes(self, value: Optional[pulumi.Input[int]]):
pulumi.set(self, "termination_wait_time_in_minutes", value)
@pulumi.input_type
class DeploymentGroupDeploymentStyleArgs:
def __init__(__self__, *,
deployment_option: Optional[pulumi.Input[str]] = None,
deployment_type: Optional[pulumi.Input[str]] = None):
"""
:param pulumi.Input[str] deployment_option: Indicates whether to route deployment traffic behind a load balancer. Valid Values are `WITH_TRAFFIC_CONTROL` or `WITHOUT_TRAFFIC_CONTROL`. Default is `WITHOUT_TRAFFIC_CONTROL`.
:param pulumi.Input[str] deployment_type: Indicates whether to run an in-place deployment or a blue/green deployment. Valid Values are `IN_PLACE` or `BLUE_GREEN`. Default is `IN_PLACE`.
"""
if deployment_option is not None:
pulumi.set(__self__, "deployment_option", deployment_option)
if deployment_type is not None:
pulumi.set(__self__, "deployment_type", deployment_type)
@property
@pulumi.getter(name="deploymentOption")
def deployment_option(self) -> Optional[pulumi.Input[str]]:
"""
Indicates whether to route deployment traffic behind a load balancer. Valid Values are `WITH_TRAFFIC_CONTROL` or `WITHOUT_TRAFFIC_CONTROL`. Default is `WITHOUT_TRAFFIC_CONTROL`.
"""
return pulumi.get(self, "deployment_option")
@deployment_option.setter
def deployment_option(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "deployment_option", value)
@property
@pulumi.getter(name="deploymentType")
def deployment_type(self) -> Optional[pulumi.Input[str]]:
"""
Indicates whether to run an in-place deployment or a blue/green deployment. Valid Values are `IN_PLACE` or `BLUE_GREEN`. Default is `IN_PLACE`.
"""
return pulumi.get(self, "deployment_type")
@deployment_type.setter
def deployment_type(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "deployment_type", value)
@pulumi.input_type
class DeploymentGroupEc2TagFilterArgs:
def __init__(__self__, *,
key: Optional[pulumi.Input[str]] = None,
type: Optional[pulumi.Input[str]] = None,
value: Optional[pulumi.Input[str]] = None):
"""
:param pulumi.Input[str] key: The key of the tag filter.
:param pulumi.Input[str] type: The type of the tag filter, either `KEY_ONLY`, `VALUE_ONLY`, or `KEY_AND_VALUE`.
:param pulumi.Input[str] value: The value of the tag filter.
"""
if key is not None:
pulumi.set(__self__, "key", key)
if type is not None:
pulumi.set(__self__, "type", type)
if value is not None:
pulumi.set(__self__, "value", value)
@property
@pulumi.getter
def key(self) -> Optional[pulumi.Input[str]]:
"""
The key of the tag filter.
"""
return pulumi.get(self, "key")
@key.setter
def key(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "key", value)
@property
@pulumi.getter
def type(self) -> Optional[pulumi.Input[str]]:
"""
The type of the tag filter, either `KEY_ONLY`, `VALUE_ONLY`, or `KEY_AND_VALUE`.
"""
return pulumi.get(self, "type")
@type.setter
def type(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "type", value)
@property
@pulumi.getter
def value(self) -> Optional[pulumi.Input[str]]:
"""
The value of the tag filter.
"""
return pulumi.get(self, "value")
@value.setter
def value(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "value", value)
@pulumi.input_type
class DeploymentGroupEc2TagSetArgs:
def __init__(__self__, *,
ec2_tag_filters: Optional[pulumi.Input[Sequence[pulumi.Input['DeploymentGroupEc2TagSetEc2TagFilterArgs']]]] = None):
"""
:param pulumi.Input[Sequence[pulumi.Input['DeploymentGroupEc2TagSetEc2TagFilterArgs']]] ec2_tag_filters: Tag filters associated with the deployment group. See the AWS docs for details.
"""
if ec2_tag_filters is not None:
pulumi.set(__self__, "ec2_tag_filters", ec2_tag_filters)
@property
@pulumi.getter(name="ec2TagFilters")
def ec2_tag_filters(self) -> Optional[pulumi.Input[Sequence[pulumi.Input['DeploymentGroupEc2TagSetEc2TagFilterArgs']]]]:
"""
Tag filters associated with the deployment group. See the AWS docs for details.
"""
return pulumi.get(self, "ec2_tag_filters")
@ec2_tag_filters.setter
def ec2_tag_filters(self, value: Optional[pulumi.Input[Sequence[pulumi.Input['DeploymentGroupEc2TagSetEc2TagFilterArgs']]]]):
pulumi.set(self, "ec2_tag_filters", value)
@pulumi.input_type
class DeploymentGroupEc2TagSetEc2TagFilterArgs:
def __init__(__self__, *,
key: Optional[pulumi.Input[str]] = None,
type: Optional[pulumi.Input[str]] = None,
value: Optional[pulumi.Input[str]] = None):
"""
:param pulumi.Input[str] key: The key of the tag filter.
:param pulumi.Input[str] type: The type of the tag filter, either `KEY_ONLY`, `VALUE_ONLY`, or `KEY_AND_VALUE`.
:param pulumi.Input[str] value: The value of the tag filter.
"""
if key is not None:
pulumi.set(__self__, "key", key)
if type is not None:
pulumi.set(__self__, "type", type)
if value is not None:
pulumi.set(__self__, "value", value)
@property
@pulumi.getter
def key(self) -> Optional[pulumi.Input[str]]:
"""
The key of the tag filter.
"""
return pulumi.get(self, "key")
@key.setter
def key(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "key", value)
@property
@pulumi.getter
def type(self) -> Optional[pulumi.Input[str]]:
"""
The type of the tag filter, either `KEY_ONLY`, `VALUE_ONLY`, or `KEY_AND_VALUE`.
"""
return pulumi.get(self, "type")
@type.setter
def type(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "type", value)
@property
@pulumi.getter
def value(self) -> Optional[pulumi.Input[str]]:
"""
The value of the tag filter.
"""
return pulumi.get(self, "value")
@value.setter
def value(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "value", value)
@pulumi.input_type
class DeploymentGroupEcsServiceArgs:
def __init__(__self__, *,
cluster_name: pulumi.Input[str],
service_name: pulumi.Input[str]):
"""
:param pulumi.Input[str] cluster_name: The name of the ECS cluster.
:param pulumi.Input[str] service_name: The name of the ECS service.
"""
pulumi.set(__self__, "cluster_name", cluster_name)
pulumi.set(__self__, "service_name", service_name)
@property
@pulumi.getter(name="clusterName")
def cluster_name(self) -> pulumi.Input[str]:
"""
The name of the ECS cluster.
"""
return pulumi.get(self, "cluster_name")
@cluster_name.setter
def cluster_name(self, value: pulumi.Input[str]):
pulumi.set(self, "cluster_name", value)
@property
@pulumi.getter(name="serviceName")
def service_name(self) -> pulumi.Input[str]:
"""
The name of the ECS service.
"""
return pulumi.get(self, "service_name")
@service_name.setter
def service_name(self, value: pulumi.Input[str]):
pulumi.set(self, "service_name", value)
@pulumi.input_type
class DeploymentGroupLoadBalancerInfoArgs:
def __init__(__self__, *,
elb_infos: Optional[pulumi.Input[Sequence[pulumi.Input['DeploymentGroupLoadBalancerInfoElbInfoArgs']]]] = None,
target_group_infos: Optional[pulumi.Input[Sequence[pulumi.Input['DeploymentGroupLoadBalancerInfoTargetGroupInfoArgs']]]] = None,
target_group_pair_info: Optional[pulumi.Input['DeploymentGroupLoadBalancerInfoTargetGroupPairInfoArgs']] = None):
"""
:param pulumi.Input[Sequence[pulumi.Input['DeploymentGroupLoadBalancerInfoElbInfoArgs']]] elb_infos: The Classic Elastic Load Balancer to use in a deployment. Conflicts with `target_group_info` and `target_group_pair_info`.
:param pulumi.Input[Sequence[pulumi.Input['DeploymentGroupLoadBalancerInfoTargetGroupInfoArgs']]] target_group_infos: The (Application/Network Load Balancer) target group to use in a deployment. Conflicts with `elb_info` and `target_group_pair_info`.
:param pulumi.Input['DeploymentGroupLoadBalancerInfoTargetGroupPairInfoArgs'] target_group_pair_info: The (Application/Network Load Balancer) target group pair to use in a deployment. Conflicts with `elb_info` and `target_group_info`.
"""
if elb_infos is not None:
pulumi.set(__self__, "elb_infos", elb_infos)
if target_group_infos is not None:
pulumi.set(__self__, "target_group_infos", target_group_infos)
if target_group_pair_info is not None:
pulumi.set(__self__, "target_group_pair_info", target_group_pair_info)
@property
@pulumi.getter(name="elbInfos")
def elb_infos(self) -> Optional[pulumi.Input[Sequence[pulumi.Input['DeploymentGroupLoadBalancerInfoElbInfoArgs']]]]:
"""
The Classic Elastic Load Balancer to use in a deployment. Conflicts with `target_group_info` and `target_group_pair_info`.
"""
return pulumi.get(self, "elb_infos")
@elb_infos.setter
def elb_infos(self, value: Optional[pulumi.Input[Sequence[pulumi.Input['DeploymentGroupLoadBalancerInfoElbInfoArgs']]]]):
pulumi.set(self, "elb_infos", value)
@property
@pulumi.getter(name="targetGroupInfos")
def target_group_infos(self) -> Optional[pulumi.Input[Sequence[pulumi.Input['DeploymentGroupLoadBalancerInfoTargetGroupInfoArgs']]]]:
"""
The (Application/Network Load Balancer) target group to use in a deployment. Conflicts with `elb_info` and `target_group_pair_info`.
"""
return pulumi.get(self, "target_group_infos")
@target_group_infos.setter
def target_group_infos(self, value: Optional[pulumi.Input[Sequence[pulumi.Input['DeploymentGroupLoadBalancerInfoTargetGroupInfoArgs']]]]):
pulumi.set(self, "target_group_infos", value)
@property
@pulumi.getter(name="targetGroupPairInfo")
def target_group_pair_info(self) -> Optional[pulumi.Input['DeploymentGroupLoadBalancerInfoTargetGroupPairInfoArgs']]:
"""
The (Application/Network Load Balancer) target group pair to use in a deployment. Conflicts with `elb_info` and `target_group_info`.
"""
return pulumi.get(self, "target_group_pair_info")
@target_group_pair_info.setter
def target_group_pair_info(self, value: Optional[pulumi.Input['DeploymentGroupLoadBalancerInfoTargetGroupPairInfoArgs']]):
pulumi.set(self, "target_group_pair_info", value)
@pulumi.input_type
class DeploymentGroupLoadBalancerInfoElbInfoArgs:
def __init__(__self__, *,
name: Optional[pulumi.Input[str]] = None):
"""
:param pulumi.Input[str] name: The name of the load balancer that will be used to route traffic from original instances to replacement instances in a blue/green deployment. For in-place deployments, the name of the load balancer that instances are deregistered from so they are not serving traffic during a deployment, and then re-registered with after the deployment completes.
"""
if name is not None:
pulumi.set(__self__, "name", name)
@property
@pulumi.getter
def name(self) -> Optional[pulumi.Input[str]]:
"""
The name of the load balancer that will be used to route traffic from original instances to replacement instances in a blue/green deployment. For in-place deployments, the name of the load balancer that instances are deregistered from so they are not serving traffic during a deployment, and then re-registered with after the deployment completes.
"""
return pulumi.get(self, "name")
@name.setter
def name(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "name", value)
@pulumi.input_type
class DeploymentGroupLoadBalancerInfoTargetGroupInfoArgs:
def __init__(__self__, *,
name: Optional[pulumi.Input[str]] = None):
"""
:param pulumi.Input[str] name: The name of the target group that instances in the original environment are deregistered from, and instances in the replacement environment registered with. For in-place deployments, the name of the target group that instances are deregistered from, so they are not serving traffic during a deployment, and then re-registered with after the deployment completes.
"""
if name is not None:
pulumi.set(__self__, "name", name)
@property
@pulumi.getter
def name(self) -> Optional[pulumi.Input[str]]:
"""
The name of the target group that instances in the original environment are deregistered from, and instances in the replacement environment registered with. For in-place deployments, the name of the target group that instances are deregistered from, so they are not serving traffic during a deployment, and then re-registered with after the deployment completes.
"""
return pulumi.get(self, "name")
@name.setter
def name(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "name", value)
@pulumi.input_type
class DeploymentGroupLoadBalancerInfoTargetGroupPairInfoArgs:
def __init__(__self__, *,
prod_traffic_route: pulumi.Input['DeploymentGroupLoadBalancerInfoTargetGroupPairInfoProdTrafficRouteArgs'],
target_groups: pulumi.Input[Sequence[pulumi.Input['DeploymentGroupLoadBalancerInfoTargetGroupPairInfoTargetGroupArgs']]],
test_traffic_route: Optional[pulumi.Input['DeploymentGroupLoadBalancerInfoTargetGroupPairInfoTestTrafficRouteArgs']] = None):
"""
:param pulumi.Input['DeploymentGroupLoadBalancerInfoTargetGroupPairInfoProdTrafficRouteArgs'] prod_traffic_route: Configuration block for the production traffic route (documented below).
:param pulumi.Input[Sequence[pulumi.Input['DeploymentGroupLoadBalancerInfoTargetGroupPairInfoTargetGroupArgs']]] target_groups: Configuration blocks for a target group within a target group pair (documented below).
:param pulumi.Input['DeploymentGroupLoadBalancerInfoTargetGroupPairInfoTestTrafficRouteArgs'] test_traffic_route: Configuration block for the test traffic route (documented below).
"""
pulumi.set(__self__, "prod_traffic_route", prod_traffic_route)
pulumi.set(__self__, "target_groups", target_groups)
if test_traffic_route is not None:
pulumi.set(__self__, "test_traffic_route", test_traffic_route)
@property
@pulumi.getter(name="prodTrafficRoute")
def prod_traffic_route(self) -> pulumi.Input['DeploymentGroupLoadBalancerInfoTargetGroupPairInfoProdTrafficRouteArgs']:
"""
Configuration block for the production traffic route (documented below).
"""
return pulumi.get(self, "prod_traffic_route")
@prod_traffic_route.setter
def prod_traffic_route(self, value: pulumi.Input['DeploymentGroupLoadBalancerInfoTargetGroupPairInfoProdTrafficRouteArgs']):
pulumi.set(self, "prod_traffic_route", value)
@property
@pulumi.getter(name="targetGroups")
def target_groups(self) -> pulumi.Input[Sequence[pulumi.Input['DeploymentGroupLoadBalancerInfoTargetGroupPairInfoTargetGroupArgs']]]:
"""
Configuration blocks for a target group within a target group pair (documented below).
"""
return pulumi.get(self, "target_groups")
@target_groups.setter
def target_groups(self, value: pulumi.Input[Sequence[pulumi.Input['DeploymentGroupLoadBalancerInfoTargetGroupPairInfoTargetGroupArgs']]]):
pulumi.set(self, "target_groups", value)
@property
@pulumi.getter(name="testTrafficRoute")
def test_traffic_route(self) -> Optional[pulumi.Input['DeploymentGroupLoadBalancerInfoTargetGroupPairInfoTestTrafficRouteArgs']]:
"""
Configuration block for the test traffic route (documented below).
"""
return pulumi.get(self, "test_traffic_route")
@test_traffic_route.setter
def test_traffic_route(self, value: Optional[pulumi.Input['DeploymentGroupLoadBalancerInfoTargetGroupPairInfoTestTrafficRouteArgs']]):
pulumi.set(self, "test_traffic_route", value)
@pulumi.input_type
class DeploymentGroupLoadBalancerInfoTargetGroupPairInfoProdTrafficRouteArgs:
def __init__(__self__, *,
listener_arns: pulumi.Input[Sequence[pulumi.Input[str]]]):
"""
:param pulumi.Input[Sequence[pulumi.Input[str]]] listener_arns: List of Amazon Resource Names (ARNs) of the load balancer listeners.
"""
pulumi.set(__self__, "listener_arns", listener_arns)
@property
@pulumi.getter(name="listenerArns")
def listener_arns(self) -> pulumi.Input[Sequence[pulumi.Input[str]]]:
"""
List of Amazon Resource Names (ARNs) of the load balancer listeners.
"""
return pulumi.get(self, "listener_arns")
@listener_arns.setter
def listener_arns(self, value: pulumi.Input[Sequence[pulumi.Input[str]]]):
pulumi.set(self, "listener_arns", value)
@pulumi.input_type
class DeploymentGroupLoadBalancerInfoTargetGroupPairInfoTargetGroupArgs:
def __init__(__self__, *,
name: pulumi.Input[str]):
"""
:param pulumi.Input[str] name: Name of the target group.
"""
pulumi.set(__self__, "name", name)
@property
@pulumi.getter
def name(self) -> pulumi.Input[str]:
"""
Name of the target group.
"""
return pulumi.get(self, "name")
@name.setter
def name(self, value: pulumi.Input[str]):
pulumi.set(self, "name", value)
@pulumi.input_type
class DeploymentGroupLoadBalancerInfoTargetGroupPairInfoTestTrafficRouteArgs:
def __init__(__self__, *,
listener_arns: pulumi.Input[Sequence[pulumi.Input[str]]]):
"""
:param pulumi.Input[Sequence[pulumi.Input[str]]] listener_arns: List of Amazon Resource Names (ARNs) of the load balancer listeners.
"""
pulumi.set(__self__, "listener_arns", listener_arns)
@property
@pulumi.getter(name="listenerArns")
def listener_arns(self) -> pulumi.Input[Sequence[pulumi.Input[str]]]:
"""
List of Amazon Resource Names (ARNs) of the load balancer listeners.
"""
return pulumi.get(self, "listener_arns")
@listener_arns.setter
def listener_arns(self, value: pulumi.Input[Sequence[pulumi.Input[str]]]):
pulumi.set(self, "listener_arns", value)
@pulumi.input_type
class DeploymentGroupOnPremisesInstanceTagFilterArgs:
def __init__(__self__, *,
key: Optional[pulumi.Input[str]] = None,
type: Optional[pulumi.Input[str]] = None,
value: Optional[pulumi.Input[str]] = None):
"""
:param pulumi.Input[str] key: The key of the tag filter.
:param pulumi.Input[str] type: The type of the tag filter, either `KEY_ONLY`, `VALUE_ONLY`, or `KEY_AND_VALUE`.
:param pulumi.Input[str] value: The value of the tag filter.
"""
if key is not None:
pulumi.set(__self__, "key", key)
if type is not None:
pulumi.set(__self__, "type", type)
if value is not None:
pulumi.set(__self__, "value", value)
@property
@pulumi.getter
def key(self) -> Optional[pulumi.Input[str]]:
"""
The key of the tag filter.
"""
return pulumi.get(self, "key")
@key.setter
def key(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "key", value)
@property
@pulumi.getter
def type(self) -> Optional[pulumi.Input[str]]:
"""
The type of the tag filter, either `KEY_ONLY`, `VALUE_ONLY`, or `KEY_AND_VALUE`.
"""
return pulumi.get(self, "type")
@type.setter
def type(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "type", value)
@property
@pulumi.getter
def value(self) -> Optional[pulumi.Input[str]]:
"""
The value of the tag filter.
"""
return pulumi.get(self, "value")
@value.setter
def value(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "value", value)
@pulumi.input_type
class DeploymentGroupTriggerConfigurationArgs:
def __init__(__self__, *,
trigger_events: pulumi.Input[Sequence[pulumi.Input[str]]],
trigger_name: pulumi.Input[str],
trigger_target_arn: pulumi.Input[str]):
"""
:param pulumi.Input[Sequence[pulumi.Input[str]]] trigger_events: The event type or types for which notifications are triggered. Some values that are supported: `DeploymentStart`, `DeploymentSuccess`, `DeploymentFailure`, `DeploymentStop`, `DeploymentRollback`, `InstanceStart`, `InstanceSuccess`, `InstanceFailure`. See [the CodeDeploy documentation](http://docs.aws.amazon.com/codedeploy/latest/userguide/monitoring-sns-event-notifications-create-trigger.html) for all possible values.
:param pulumi.Input[str] trigger_name: The name of the notification trigger.
:param pulumi.Input[str] trigger_target_arn: The ARN of the SNS topic through which notifications are sent.
"""
pulumi.set(__self__, "trigger_events", trigger_events)
pulumi.set(__self__, "trigger_name", trigger_name)
pulumi.set(__self__, "trigger_target_arn", trigger_target_arn)
@property
@pulumi.getter(name="triggerEvents")
def trigger_events(self) -> pulumi.Input[Sequence[pulumi.Input[str]]]:
"""
The event type or types for which notifications are triggered. Some values that are supported: `DeploymentStart`, `DeploymentSuccess`, `DeploymentFailure`, `DeploymentStop`, `DeploymentRollback`, `InstanceStart`, `InstanceSuccess`, `InstanceFailure`. See [the CodeDeploy documentation](http://docs.aws.amazon.com/codedeploy/latest/userguide/monitoring-sns-event-notifications-create-trigger.html) for all possible values.
"""
return pulumi.get(self, "trigger_events")
@trigger_events.setter
def trigger_events(self, value: pulumi.Input[Sequence[pulumi.Input[str]]]):
pulumi.set(self, "trigger_events", value)
@property
@pulumi.getter(name="triggerName")
def trigger_name(self) -> pulumi.Input[str]:
"""
The name of the notification trigger.
"""
return pulumi.get(self, "trigger_name")
@trigger_name.setter
def trigger_name(self, value: pulumi.Input[str]):
pulumi.set(self, "trigger_name", value)
@property
@pulumi.getter(name="triggerTargetArn")
def trigger_target_arn(self) -> pulumi.Input[str]:
"""
The ARN of the SNS topic through which notifications are sent.
"""
return pulumi.get(self, "trigger_target_arn")
@trigger_target_arn.setter
def trigger_target_arn(self, value: pulumi.Input[str]):
pulumi.set(self, "trigger_target_arn", value)
|
from .checker import CSPChecker
from .checkerro import CSPReportOnlyChecker
from securityheaders.checkers import Finding,FindingType,FindingSeverity
class CSPReportOnlyNoCSPChecker(CSPReportOnlyChecker, CSPChecker):
def check(self, headers, opt_options=dict()):
rocsp = CSPReportOnlyChecker.getcsp(self,headers)
csp = CSPChecker.getcsp(self,headers)
if not csp and rocsp:
description = "The CSP is not enforced as only the content-security-policy-report-only header is present. Can you set the content-security-policy?"
return [Finding(rocsp.headerkey, FindingType.REPORT_ONLY,description,FindingSeverity.INFO, None, None)]
return []
|
from corehq.apps.app_manager.dbaccessors import (
get_brief_apps_in_domain,
get_latest_released_app,
get_latest_released_app_versions_by_app_id,
)
from corehq.apps.linked_domain.models import DomainLink
from corehq.apps.linked_domain.remote_accessors import (
get_brief_apps,
get_latest_released_versions_by_app_id,
get_released_app,
)
def get_master_app_briefs(domain_link, family_id):
if domain_link.is_remote:
apps = get_brief_apps(domain_link)
else:
apps = get_brief_apps_in_domain(domain_link.master_domain, include_remote=False)
# Ignore deleted, linked and remote apps
return [app for app in apps if family_id in [app._id, app.family_id] and app.doc_type == 'Application']
def get_latest_master_app_release(domain_link, app_id):
master_domain = domain_link.master_domain
linked_domain = domain_link.linked_domain
if domain_link.is_remote:
return get_released_app(master_domain, app_id, linked_domain, domain_link.remote_details)
else:
return get_latest_released_app(master_domain, app_id)
def get_latest_master_releases_versions(domain_link):
if domain_link.is_remote:
return get_latest_released_versions_by_app_id(domain_link)
else:
return get_latest_released_app_versions_by_app_id(domain_link.master_domain)
def create_linked_app(master_domain, master_id, target_domain, target_name, remote_details=None):
from corehq.apps.app_manager.models import LinkedApplication
linked_app = LinkedApplication(
name=target_name,
domain=target_domain,
)
return link_app(linked_app, master_domain, master_id, remote_details)
def link_app(linked_app, master_domain, master_id, remote_details=None):
DomainLink.link_domains(linked_app.domain, master_domain, remote_details)
linked_app.family_id = master_id
linked_app.doc_type = 'LinkedApplication'
linked_app.save()
return linked_app
|
import numpy as np
import matplotlib.pyplot as plt
import WDRT.shortTermExtreme as ecm
import WDRT.fatigue as fatigue
method = 1
# 1 - All peaks Weibull
# 2 - Weibull tail fit
# 3 - Peaks over threshold
# 4 - Block maxima GEV
# 5 - Block maxima Gumbel
# load global peaks
t_peaks = np.loadtxt(r'C:\full\filepath\to\WDRT\examples\data\t.dat')
peaks = np.loadtxt(r'C:\full\filepath\to\WDRT_py3\WDRT\examples\data\peaks.dat')/1000.
# get the 1-hour extreme distribution using the method selected above
x_e = np.linspace(0, 2 * np.max(peaks), 10000)
t_x = (t_peaks[-1]-t_peaks[0]) + ((t_peaks[-1]-t_peaks[0])/(1.*len(peaks)))
t_st = 1. * 60. * 60.
if method==1:
stextreme_dist, peaks_dist, _ = ecm.extremeDistribution_Weibull(x=peaks, x_e=x_e, t_x=t_x, t_st=t_st)
elif method==2:
stextreme_dist, peaks_dist, _, _, _ = ecm.extremeDistribution_WeibullTailFit(x=peaks, x_e=x_e, t_x=t_x, t_st=t_st)
elif method==3:
thresh = np.mean(peaks) + 1.4*np.std(peaks)
thresh_x = np.min(x_e[x_e>thresh])
stextreme_dist, peaks_dist, pot_dist, _ = ecm.extremeDistribution_peaksOverThreshold(x=peaks, x_e=x_e, t_x=t_x, t_st=t_st, u=thresh)
elif method==4:
stextreme_dist,_,bm = ecm.extremeDistribution_blockMaximaGEV(x=peaks, t=t_peaks, t_st=t_st)
elif method == 5:
stextreme_dist,_,bm = ecm.extremeDistribution_blockMaximaGumb(x=peaks, t=t_peaks, t_st=t_st)
# goodness of fit plots
if method==1 or method==2:
bm = ecm.blockMaxima(x=peaks, t=t_peaks, t_st=t_st)
_ = ecm.goodnessOfFitPlots(data=peaks, prob_func=peaks_dist, np_return=1000001, x_pdf=x_e, bins_pdf=20, response_name='PTO Force', response_name_2='Peaks',response_units='kN')
if not method==3:
fig_gof = ecm.goodnessOfFitPlots(data=bm, prob_func=stextreme_dist, np_return=10001, x_pdf=x_e, bins_pdf=20, response_name='PTO Force', response_name_2='1-hr Extreme',response_units='kN')
if method==3:
bm = ecm.blockMaxima(x=peaks, t=t_peaks, t_st=t_st)
_ = ecm.goodnessOfFitPlots(data=peaks[peaks>thresh_x], prob_func=peaks_dist, np_return=100001, x_pdf=x_e[x_e>thresh_x], bins_pdf=20,m_prob=1.*len(peaks[peaks<thresh_x]), response_name='PTO Force', response_name_2='Peaks',response_units='kN')
_ = ecm.goodnessOfFitPlots(data=peaks[peaks>thresh]-thresh, prob_func=pot_dist, np_return=100001, x_pdf=x_e[x_e>thresh]-thresh, bins_pdf=20, response_name='PTO Force', response_name_2='Peaks Over Threshold',response_units='kN')
fig_gof = ecm.goodnessOfFitPlots(data=bm, prob_func=stextreme_dist, np_return=10001, x_pdf=x_e[x_e>thresh_x], bins_pdf=20, response_name='PTO Force', response_name_2='1-hr Extreme',response_units='kN')
# plot
plt.figure()
if method==3:
plt.plot(t_peaks[peaks<thresh], peaks[peaks<thresh], 'ko', alpha=0.2)
plt.plot(t_peaks[peaks>thresh], peaks[peaks>thresh], 'go')
plt.plot([0, t_peaks[-1]], [thresh, thresh], 'r--')
else:
plt.plot(t_peaks, peaks, 'go')
plt.plot([0, t_peaks[-1]], [0, 0], 'k--')
plt.xlabel('Time, $t$ [s]')
plt.ylabel('Response, $x$')
plt.xlim([0,3600*2])
plt.grid(True)
plt.ticklabel_format(style='sci', axis='y', scilimits=(0, 0))
plt.figure()
ax = plt.subplot(2, 1, 1)
if method==1 or method==2:
plt.plot(x_e, peaks_dist.pdf(x_e), 'g-', label='Peak distribution')
if not method==3:
plt.plot(x_e, stextreme_dist.pdf(x_e), 'r-', label='Extreme distribution')
if method==3:
plt.plot(x_e[x_e>thresh_x], peaks_dist.pdf(x_e[x_e>thresh_x]), 'g-', label='Peak distribution')
plt.plot(x_e[x_e>thresh_x], stextreme_dist.pdf(x_e[x_e>thresh_x]), 'r-', label='Extreme distribution')
xlim = ax.get_xlim()
ylim = ax.get_ylim()
if method==3:
plt.plot([thresh, thresh], [0, ylim[1]], 'k--')
plt.ylim([0,ylim[1]])
plt.xlim([0,xlim[1]])
plt.xlabel('Response, $x$')
plt.ylabel('$PDF(x)$')
plt.grid(True)
plt.ticklabel_format(style='sci', axis='x', scilimits=(0, 0))
plt.ticklabel_format(style='sci', axis='y', scilimits=(0, 0))
plt.legend()
ax = plt.subplot(2, 1, 2)
if method==1 or method==2:
plt.plot(x_e, peaks_dist.cdf(x_e), 'g-')
if not method==3:
plt.plot(x_e, stextreme_dist.cdf(x_e), 'r-')
if method==3:
plt.plot(x_e[x_e>thresh_x], peaks_dist.cdf(x_e[x_e>thresh_x]), 'g-')
plt.plot(x_e[x_e>thresh_x], stextreme_dist.cdf(x_e[x_e>thresh_x]), 'r-')
xlim = ax.get_xlim()
ylim = ax.get_ylim()
if method==3:
plt.plot([thresh, thresh], [0, ylim[1]], 'k--')
plt.ylim([0,ylim[1]])
plt.xlim([0,xlim[1]])
plt.xlabel('Response, $x$')
plt.ylabel('$CDF(x)$')
plt.grid(True)
plt.ticklabel_format(style='sci', axis='x', scilimits=(0, 0))
plt.show()
|
from .nginx import Nginx, NginxConfig
from . import tfserving_pb2 as tfserving_config
from . import tfserving_api
from ._base import TFSModelVersion, TFSModelConfig
from .tfserving_pb2 import TFSConfig
from .tfserving_api import TFSModelEndpoint, TFServeModel
__all__ = [
'Nginx',
'NginxConfig',
'tfserving_config',
'tfserving_pb2',
'tfserving_api',
'TFSModelVersion',
'TFSModelConfig',
'TFSConfig',
'TFSModelEndpoint',
'TFServeModel'
]
|
print("We Created Sub_Package for Numbers")
|
import itertools
import pandas as pd
from typing import Dict, Set, Hashable
def upset_from_dict_of_sets(inputs: Dict[Hashable, Set[Hashable]]):
''' Given a dictionary of sets, produce input ready for `upsetplot` python package
We produce this input by computing set intersections of all relevant combinations
of sets interacting with one another.
Example:
```python
import upsetplot
from maayanlab_bioinformatics.plotting import upset_from_dict_of_sets
upsetplot.plot(upset_from_dict_of_sets({
'A': {'a', 'b', 'c'},
'B': {'b', 'c', 'd'},
'C': {'d', 'e', 'f'},
}))
```
:param inputs: (Dict[Hashable, Set[Hashable]]) Several named sets
:return: (pd.DataFrame) in a form ready for `upsetplot.plot`
'''
sets = []
for n in range(1, len(inputs)+1):
if n == 1:
it = [[k] for k in inputs.keys()]
else:
it = map(list, itertools.combinations(inputs.keys(), n))
for V in it:
size = len(inputs[V[0]] if n == 1 else set.intersection(*[inputs[v] for v in V]))
if size > 0:
sets.append(dict({vv: vv in V for vv in inputs.keys()}, size=size))
return pd.DataFrame(sets).groupby(list(inputs.keys()))['size'].sum()
|
#FUPQ tenha uma lista chamada números e duas funções chamadas sorteio() e somaPar(). A primeira função vai sortear 5 números e vai colocá-las dentro da lista e a segunda função vai mostrar a soma entre todos os valores PARES sorteados pela função anterior.
#def sorteio(numeros):
# for i in range(0,5):
# numeros.append(0,100)
# print(numeros)
#def somaPar(numeros):
# s = 0
# while len(numeros)!=0:
# if numeros[0]%2==0:
# s += numeros[0]
# numeros.remove()
#print(f'A soma entre os número pares é {s}')
#numeros =[4,2,3]
#somaPar(numeros)
from random import randint
from time import sleep
def sorteia(lista):
print('Sorteando 5 valores da lista ', end='')
for cont in range(0,5):
n = randint(1,10)
lista.append(n)
print(f'{n} ',end='', flush=True)
sleep(1)
print('PRONTO!')
def somaPar(lista):
soma = 0
for valor in lista:
if valor%2 ==0:
soma+=valor
print(f'Somando os valores pares de {lista}, temos {soma}')
numeros = []
sorteia(numeros)
somaPar(numeros)
|
import numpy as np
import cv2
import os
from sklearn.model_selection import train_test_split
import matplotlib.pyplot as plt
from keras.preprocessing.image import ImageDataGenerator
from keras.utils.np_utils import to_categorical
from keras.models import Sequential
from keras.layers import Dense
from keras.optimizers import Adam
from keras.layers import Dropout,Flatten
from keras.layers.convolutional import Conv2D,MaxPooling2D
import pickle
from tensorflow.keras.layers import Activation, Dense, Conv2D, Dropout, Flatten, MaxPooling2D
# PARAMETERS
path = 'numbers/train2'
testRatio = 0.2
valRatio = 0.2
imageDimensions= (28,28,3)
batchSizeVal= 6
epochsVal = 10
stepsPerEpochVal = 2000
# IMPORTING DATA/IMAGES FROM FOLDERS
count = 0
images = [] # LIST CONTAINING ALL THE IMAGES
classNo = [] # LIST CONTAINING ALL THE CORRESPONDING CLASS ID OF IMAGES
myList = os.listdir(path)
print("Total Classes Detected:",len(myList))
noOfClasses = len(myList)
print("Importing Classes .......")
for x in range (0,noOfClasses):
myPicList = os.listdir(path+"/"+str(x))
for y in myPicList:
curImg = cv2.imread(path+"/"+str(x)+"/"+y)
curImg = cv2.resize(curImg,(28,28))
images.append(curImg)
classNo.append(x)
print(x,end= " ")
print(" ")
print("Total Images in Images List = ",len(images))
print("Total IDS in classNo List= ",len(classNo))
# CONVERT TO NUMPY ARRAY
images = np.array(images)
classNo = np.array(classNo)
print(images.shape)
print(classNo.shape)
# SPLITTING THE DATA
X_train,X_test,y_train,y_test = train_test_split(images,classNo,test_size=testRatio)
X_train,X_validation,y_train,y_validation = train_test_split(X_train,y_train,test_size=valRatio)
print(X_train.shape)
print(X_test.shape)
print(X_validation.shape)
# PLOT BAR CHART FOR DISTRIBUTION OF IMAGES
numOfSamples= []
for x in range(0,noOfClasses):
#print(len(np.where(y_train==x)[0]))
numOfSamples.append(len(np.where(y_train==x)[0]))
print(numOfSamples)
plt.figure(figsize=(10,5))
plt.bar(range(0,noOfClasses),numOfSamples)
plt.title("No of Images for each Class")
plt.xlabel("Class ID")
plt.ylabel("Number of Images")
plt.show()
# PREPOSSESSING FUNCTION FOR IMAGES FOR TRAINING
def preProcessing(img):
img = cv2.cvtColor(img,cv2.COLOR_BGR2GRAY)
img = cv2.equalizeHist(img)
img = img/255
return img
#img = preProcessing(X_train[30])
#img = cv2.resize(img,(300,300))
#cv2.imshow("PreProcesssed",img)
#cv2.waitKey(0)
X_train= np.array(list(map(preProcessing,X_train)))
X_test= np.array(list(map(preProcessing,X_test)))
X_validation= np.array(list(map(preProcessing,X_validation)))
# RESHAPE IMAGES
X_train = X_train.reshape(X_train.shape[0],X_train.shape[1],X_train.shape[2],1)
X_test = X_test.reshape(X_test.shape[0],X_test.shape[1],X_test.shape[2],1)
X_validation = X_validation.reshape(X_validation.shape[0],X_validation.shape[1],X_validation.shape[2],1)
# IMAGE AUGMENTATION
dataGen = ImageDataGenerator(width_shift_range=0.1,
height_shift_range=0.1,
zoom_range=0.2,
shear_range=0.1,
rotation_range=10)
dataGen.fit(X_train)
# ONE HOT ENCODING OF MATRICES
y_train = to_categorical(y_train,noOfClasses)
y_test = to_categorical(y_test,noOfClasses)
y_validation = to_categorical(y_validation,noOfClasses)
# CREATING THE MODEL
def myModel():
model = Sequential()
model.add(Conv2D(64, kernel_size=(3,3), input_shape= (28, 28, 1)))
model.add(Activation("relu"))
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(Conv2D(64, kernel_size=(3,3)))
model.add(Activation("relu"))
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(Conv2D(64, kernel_size=(3,3)))
model.add(Activation("relu"))
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(Flatten())
model.add(Dense(64))
model.add(Activation("relu"))
model.add(Dense(32))
model.add(Activation("relu"))
model.add(Dense(10))
model.add(Activation("softmax"))
model.compile(Adam(lr=0.001),loss='categorical_crossentropy',metrics=['accuracy'])
return model
model = myModel()
print(model.summary())
#### STARTING THE TRAINING PROCESS
history = model.fit_generator(dataGen.flow(X_train,y_train,
batch_size=batchSizeVal),
steps_per_epoch=stepsPerEpochVal,
epochs=epochsVal,
validation_data=(X_validation,y_validation),
shuffle=1)
# PLOT THE RESULTS
plt.figure(1)
plt.plot(history.history['loss'])
plt.plot(history.history['val_loss'])
plt.legend(['training','validation'])
plt.title('Loss')
plt.xlabel('epoch')
plt.figure(2)
plt.plot(history.history['accuracy'])
plt.plot(history.history['val_accuracy'])
plt.legend(['training','validation'])
plt.title('Accuracy')
plt.xlabel('epoch')
plt.show()
# EVALUATE USING TEST IMAGES
score = model.evaluate(X_test,y_test,verbose=0)
print('Test Score = ',score[0])
print('Test Accuracy =', score[1])
# SAVE THE TRAINED MODEL
model.save('model11.h5')
|
"""
CryptoAPIs
Crypto APIs 2.0 is a complex and innovative infrastructure layer that radically simplifies the development of any Blockchain and Crypto related applications. Organized around REST, Crypto APIs 2.0 can assist both novice Bitcoin/Ethereum enthusiasts and crypto experts with the development of their blockchain applications. Crypto APIs 2.0 provides unified endpoints and data, raw data, automatic tokens and coins forwardings, callback functionalities, and much more. # noqa: E501
The version of the OpenAPI document: 2.0.0
Contact: developers@cryptoapis.io
Generated by: https://openapi-generator.tech
"""
import re # noqa: F401
import sys # noqa: F401
from cryptoapis.model_utils import ( # noqa: F401
ApiTypeError,
ModelComposed,
ModelNormal,
ModelSimple,
cached_property,
change_keys_js_to_python,
convert_js_args_to_python_args,
date,
datetime,
file_type,
none_type,
validate_get_composed_info,
OpenApiModel
)
from cryptoapis.exceptions import ApiAttributeError
def lazy_import():
from cryptoapis.model.delete_automatic_tokens_forwarding_e403 import DeleteAutomaticTokensForwardingE403
globals()['DeleteAutomaticTokensForwardingE403'] = DeleteAutomaticTokensForwardingE403
class InlineResponse40382(ModelNormal):
"""NOTE: This class is auto generated by OpenAPI Generator.
Ref: https://openapi-generator.tech
Do not edit the class manually.
Attributes:
allowed_values (dict): The key is the tuple path to the attribute
and the for var_name this is (var_name,). The value is a dict
with a capitalized key describing the allowed value and an allowed
value. These dicts store the allowed enum values.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
discriminator_value_class_map (dict): A dict to go from the discriminator
variable value to the discriminator class name.
validations (dict): The key is the tuple path to the attribute
and the for var_name this is (var_name,). The value is a dict
that stores validations for max_length, min_length, max_items,
min_items, exclusive_maximum, inclusive_maximum, exclusive_minimum,
inclusive_minimum, and regex.
additional_properties_type (tuple): A tuple of classes accepted
as additional properties values.
"""
allowed_values = {
}
validations = {
}
@cached_property
def additional_properties_type():
"""
This must be a method because a model may have properties that are
of type self, this must run after the class is loaded
"""
lazy_import()
return (bool, date, datetime, dict, float, int, list, str, none_type,) # noqa: E501
_nullable = False
@cached_property
def openapi_types():
"""
This must be a method because a model may have properties that are
of type self, this must run after the class is loaded
Returns
openapi_types (dict): The key is attribute name
and the value is attribute type.
"""
lazy_import()
return {
'api_version': (str,), # noqa: E501
'request_id': (str,), # noqa: E501
'error': (DeleteAutomaticTokensForwardingE403,), # noqa: E501
'context': (str,), # noqa: E501
}
@cached_property
def discriminator():
return None
attribute_map = {
'api_version': 'apiVersion', # noqa: E501
'request_id': 'requestId', # noqa: E501
'error': 'error', # noqa: E501
'context': 'context', # noqa: E501
}
read_only_vars = {
}
_composed_schemas = {}
@classmethod
@convert_js_args_to_python_args
def _from_openapi_data(cls, api_version, request_id, error, *args, **kwargs): # noqa: E501
"""InlineResponse40382 - a model defined in OpenAPI
Args:
api_version (str): Specifies the version of the API that incorporates this endpoint.
request_id (str): Defines the ID of the request. The `requestId` is generated by Crypto APIs and it's unique for every request.
error (DeleteAutomaticTokensForwardingE403):
Keyword Args:
_check_type (bool): if True, values for parameters in openapi_types
will be type checked and a TypeError will be
raised if the wrong type is input.
Defaults to True
_path_to_item (tuple/list): This is a list of keys or values to
drill down to the model in received_data
when deserializing a response
_spec_property_naming (bool): True if the variable names in the input data
are serialized names, as specified in the OpenAPI document.
False if the variable names in the input data
are pythonic names, e.g. snake case (default)
_configuration (Configuration): the instance to use when
deserializing a file_type parameter.
If passed, type conversion is attempted
If omitted no type conversion is done.
_visited_composed_classes (tuple): This stores a tuple of
classes that we have traveled through so that
if we see that class again we will not use its
discriminator again.
When traveling through a discriminator, the
composed schema that is
is traveled through is added to this set.
For example if Animal has a discriminator
petType and we pass in "Dog", and the class Dog
allOf includes Animal, we move through Animal
once using the discriminator, and pick Dog.
Then in Dog, we will make an instance of the
Animal class but this time we won't travel
through its discriminator because we passed in
_visited_composed_classes = (Animal,)
context (str): In batch situations the user can use the context to correlate responses with requests. This property is present regardless of whether the response was successful or returned as an error. `context` is specified by the user.. [optional] # noqa: E501
"""
_check_type = kwargs.pop('_check_type', True)
_spec_property_naming = kwargs.pop('_spec_property_naming', False)
_path_to_item = kwargs.pop('_path_to_item', ())
_configuration = kwargs.pop('_configuration', None)
_visited_composed_classes = kwargs.pop('_visited_composed_classes', ())
self = super(OpenApiModel, cls).__new__(cls)
if args:
raise ApiTypeError(
"Invalid positional arguments=%s passed to %s. Remove those invalid positional arguments." % (
args,
self.__class__.__name__,
),
path_to_item=_path_to_item,
valid_classes=(self.__class__,),
)
self._data_store = {}
self._check_type = _check_type
self._spec_property_naming = _spec_property_naming
self._path_to_item = _path_to_item
self._configuration = _configuration
self._visited_composed_classes = _visited_composed_classes + (self.__class__,)
self.api_version = api_version
self.request_id = request_id
self.error = error
for var_name, var_value in kwargs.items():
if var_name not in self.attribute_map and \
self._configuration is not None and \
self._configuration.discard_unknown_keys and \
self.additional_properties_type is None:
# discard variable.
continue
setattr(self, var_name, var_value)
return self
required_properties = set([
'_data_store',
'_check_type',
'_spec_property_naming',
'_path_to_item',
'_configuration',
'_visited_composed_classes',
])
@convert_js_args_to_python_args
def __init__(self, api_version, request_id, error, *args, **kwargs): # noqa: E501
"""InlineResponse40382 - a model defined in OpenAPI
Args:
api_version (str): Specifies the version of the API that incorporates this endpoint.
request_id (str): Defines the ID of the request. The `requestId` is generated by Crypto APIs and it's unique for every request.
error (DeleteAutomaticTokensForwardingE403):
Keyword Args:
_check_type (bool): if True, values for parameters in openapi_types
will be type checked and a TypeError will be
raised if the wrong type is input.
Defaults to True
_path_to_item (tuple/list): This is a list of keys or values to
drill down to the model in received_data
when deserializing a response
_spec_property_naming (bool): True if the variable names in the input data
are serialized names, as specified in the OpenAPI document.
False if the variable names in the input data
are pythonic names, e.g. snake case (default)
_configuration (Configuration): the instance to use when
deserializing a file_type parameter.
If passed, type conversion is attempted
If omitted no type conversion is done.
_visited_composed_classes (tuple): This stores a tuple of
classes that we have traveled through so that
if we see that class again we will not use its
discriminator again.
When traveling through a discriminator, the
composed schema that is
is traveled through is added to this set.
For example if Animal has a discriminator
petType and we pass in "Dog", and the class Dog
allOf includes Animal, we move through Animal
once using the discriminator, and pick Dog.
Then in Dog, we will make an instance of the
Animal class but this time we won't travel
through its discriminator because we passed in
_visited_composed_classes = (Animal,)
context (str): In batch situations the user can use the context to correlate responses with requests. This property is present regardless of whether the response was successful or returned as an error. `context` is specified by the user.. [optional] # noqa: E501
"""
_check_type = kwargs.pop('_check_type', True)
_spec_property_naming = kwargs.pop('_spec_property_naming', False)
_path_to_item = kwargs.pop('_path_to_item', ())
_configuration = kwargs.pop('_configuration', None)
_visited_composed_classes = kwargs.pop('_visited_composed_classes', ())
if args:
raise ApiTypeError(
"Invalid positional arguments=%s passed to %s. Remove those invalid positional arguments." % (
args,
self.__class__.__name__,
),
path_to_item=_path_to_item,
valid_classes=(self.__class__,),
)
self._data_store = {}
self._check_type = _check_type
self._spec_property_naming = _spec_property_naming
self._path_to_item = _path_to_item
self._configuration = _configuration
self._visited_composed_classes = _visited_composed_classes + (self.__class__,)
self.api_version = api_version
self.request_id = request_id
self.error = error
for var_name, var_value in kwargs.items():
if var_name not in self.attribute_map and \
self._configuration is not None and \
self._configuration.discard_unknown_keys and \
self.additional_properties_type is None:
# discard variable.
continue
setattr(self, var_name, var_value)
if var_name in self.read_only_vars:
raise ApiAttributeError(f"`{var_name}` is a read-only attribute. Use `from_openapi_data` to instantiate "
f"class with read only attributes.")
|
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from typing import Any, TYPE_CHECKING
from azure.core.configuration import Configuration
from azure.core.pipeline import policies
from azure.mgmt.core.policies import ARMHttpLoggingPolicy, AsyncARMChallengeAuthenticationPolicy
from .._version import VERSION
if TYPE_CHECKING:
# pylint: disable=unused-import,ungrouped-imports
from azure.core.credentials_async import AsyncTokenCredential
class DeploymentScriptsClientConfiguration(Configuration): # pylint: disable=too-many-instance-attributes
"""Configuration for DeploymentScriptsClient.
Note that all parameters used to create this instance are saved as instance
attributes.
:param credential: Credential needed for the client to connect to Azure.
:type credential: ~azure.core.credentials_async.AsyncTokenCredential
:param subscription_id: Subscription Id which forms part of the URI for every service call.
:type subscription_id: str
:keyword api_version: Api Version. Default value is "2019-10-01-preview". Note that overriding
this default value may result in unsupported behavior.
:paramtype api_version: str
"""
def __init__(
self,
credential: "AsyncTokenCredential",
subscription_id: str,
**kwargs: Any
) -> None:
super(DeploymentScriptsClientConfiguration, self).__init__(**kwargs)
api_version = kwargs.pop('api_version', "2019-10-01-preview") # type: str
if credential is None:
raise ValueError("Parameter 'credential' must not be None.")
if subscription_id is None:
raise ValueError("Parameter 'subscription_id' must not be None.")
self.credential = credential
self.subscription_id = subscription_id
self.api_version = api_version
self.credential_scopes = kwargs.pop('credential_scopes', ['https://management.azure.com/.default'])
kwargs.setdefault('sdk_moniker', 'mgmt-resource/{}'.format(VERSION))
self._configure(**kwargs)
def _configure(
self,
**kwargs: Any
) -> None:
self.user_agent_policy = kwargs.get('user_agent_policy') or policies.UserAgentPolicy(**kwargs)
self.headers_policy = kwargs.get('headers_policy') or policies.HeadersPolicy(**kwargs)
self.proxy_policy = kwargs.get('proxy_policy') or policies.ProxyPolicy(**kwargs)
self.logging_policy = kwargs.get('logging_policy') or policies.NetworkTraceLoggingPolicy(**kwargs)
self.http_logging_policy = kwargs.get('http_logging_policy') or ARMHttpLoggingPolicy(**kwargs)
self.retry_policy = kwargs.get('retry_policy') or policies.AsyncRetryPolicy(**kwargs)
self.custom_hook_policy = kwargs.get('custom_hook_policy') or policies.CustomHookPolicy(**kwargs)
self.redirect_policy = kwargs.get('redirect_policy') or policies.AsyncRedirectPolicy(**kwargs)
self.authentication_policy = kwargs.get('authentication_policy')
if self.credential and not self.authentication_policy:
self.authentication_policy = AsyncARMChallengeAuthenticationPolicy(self.credential, *self.credential_scopes, **kwargs)
|
from drink_robot.controllers.index import bp as index
from drink_robot.controllers.recipe import bp as recipe
from drink_robot.controllers.bottle import bp as bottle
from drink_robot.controllers.pour import bp as pour
import pigpio
def init_pins(app):
if app.config['DEBUG']:
return
gpio = pigpio.pi()
for pin in app.config['PINS'].values():
gpio.set_mode(pin, pigpio.OUTPUT)
gpio.write(pin, 1)
def init_all_blueprints(app):
app.register_blueprint(index)
app.register_blueprint(recipe)
app.register_blueprint(bottle)
app.register_blueprint(pour)
|
from setuptools import setup
__version__ = "1.0.0"
with open('README.rst') as f:
long_description = f.read()
setup(
name = "django-spgateway",
version = __version__,
description = 'Django support for Spgateway',
keywords = "django, spgateway",
url = "https://github.com/superbil/django-spgateway",
license = "MIT",
packages = ["spgateway"],
include_package_data = True,
install_requires = ["django>=1.10", "pycrypto>=2.6.1"],
classifiers = [
"Development Status :: 4 - Beta",
"Programming Language :: Python",
"Programming Language :: Python :: 3.7",
"Topic :: Software Development :: Libraries :: Python Modules",
"Framework :: Django",
"Framework :: Django :: 2.2",
"License :: OSI Approved :: MIT License",
"Natural Language :: English",
"Environment :: Web Environment",
],
long_description = long_description,
projects_urls = {
'Bug Reports': 'https://github.com/Superbil/django-spgateway/issues',
'Source': 'https://github.com/superbil/django-spgateway',
}
)
|
"""Setup titiler."""
from setuptools import find_packages, setup
with open("README.md") as f:
long_description = f.read()
inst_reqs = [
"brotli-asgi>=1.0.0",
"cogeo-mosaic>=3.0.0rc2,<3.1",
"fastapi==0.63.0",
"geojson-pydantic",
"jinja2>=2.11.2,<3.0.0",
"morecantile",
"numpy",
"pydantic",
"python-dotenv",
"rasterio",
"rio-cogeo>=2.1,<2.2",
"rio-tiler>=2.0.5,<2.1",
"uvicorn[standard]>=0.12.0,<0.14.0",
# Additional requirements for python 3.6
"dataclasses;python_version<'3.7'",
"async_exit_stack>=1.0.1,<2.0.0;python_version<'3.7'",
"async_generator>=1.10,<2.0.0;python_version<'3.7'",
]
extra_reqs = {
"dev": ["pytest", "pytest-cov", "pytest-asyncio", "pre-commit", "requests"],
"test": ["pytest", "pytest-cov", "pytest-asyncio", "requests"],
"docs": ["nbconvert", "mkdocs", "mkdocs-material", "mkdocs-jupyter", "pygments"],
}
setup(
name="titiler",
version="0.2.0",
description=u"A modern dynamic tile server built on top of FastAPI and Rasterio/GDAL.",
long_description=long_description,
long_description_content_type="text/markdown",
python_requires=">=3.6",
classifiers=[
"Intended Audience :: Information Technology",
"Intended Audience :: Science/Research",
"License :: OSI Approved :: BSD License",
"Programming Language :: Python :: 3.6",
"Programming Language :: Python :: 3.7",
"Programming Language :: Python :: 3.8",
],
keywords="COG STAC MosaicJSON FastAPI",
author=u"Vincent Sarago",
author_email="vincent@developmentseed.org",
url="https://github.com/developmentseed/titiler",
license="MIT",
packages=find_packages(exclude=["tests*"]),
package_data={"titiler": ["templates/*.html", "templates/*.xml"]},
include_package_data=True,
zip_safe=False,
install_requires=inst_reqs,
extras_require=extra_reqs,
)
|
#
# Copyright 2019 Xilinx Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from enum import auto, unique, Enum
from typing import Any
from nndct_shared.base import NNDCT_OP
from nndct_shared.nndct_graph.base_operator import (AutoName, NndctIrAttr,
OccurenceType, Operation)
from nndct_shared.nndct_graph.base_tensor import Tensor
import numpy as np
class Conv1d(Operation):
@unique
class AttrName(AutoName):
KERNEL = auto()
STRIDE = auto()
DILATION = auto()
PAD_MODE = auto()
PAD = auto()
GROUP = auto()
BIAS_TERM = auto()
IN_DIM = auto()
OUT_DIM = auto()
@unique
class ParamName(AutoName):
WEIGHTS = auto()
BIAS = auto()
def __init__(self, *args, **kwargs) -> None:
super(Conv1d, self).__init__(*args, **kwargs)
# allocate memory for attr value
self._attr_value_mem = {
self.AttrName.KERNEL: [None],
self.AttrName.STRIDE: [None],
self.AttrName.DILATION: [None],
self.AttrName.PAD_MODE: [None],
self.AttrName.PAD: [None, None],
self.AttrName.GROUP: [None],
self.AttrName.BIAS_TERM: [None],
self.AttrName.IN_DIM: [None],
self.AttrName.OUT_DIM: [None],
}
self._attrs[self.AttrName.KERNEL] = NndctIrAttr(
name=self.AttrName.KERNEL,
value_type=int,
size=1,
value_mem=self._attr_value_mem[self.AttrName.KERNEL],
occurence_type=OccurenceType.REQUIRED,
annotation=r"""kernel size, [kernel_w, kernel_h]""")
self._attrs[self.AttrName.STRIDE] = NndctIrAttr(
name=self.AttrName.STRIDE,
value_type=int,
size=1,
value_mem=self._attr_value_mem[self.AttrName.STRIDE],
occurence_type=OccurenceType.REQUIRED,
annotation=r"""stride [stride_w, stride_h]""")
self._attrs[self.AttrName.DILATION] = NndctIrAttr(
name=self.AttrName.DILATION,
value_type=int,
size=1,
value_mem=self._attr_value_mem[self.AttrName.DILATION],
occurence_type=OccurenceType.OPTIONAL,
default_value=[1],
annotation=r"""dilation, [dilation_w, dilation_h]""")
self._attrs[self.AttrName.PAD_MODE] = NndctIrAttr(
name=self.AttrName.PAD_MODE,
value_type=str,
size=1,
value_mem=self._attr_value_mem[self.AttrName.PAD_MODE],
occurence_type=OccurenceType.REQUIRED,
annotation=r"""padding mode, 0-PADDING, 1-SAME, 2-VALID, 3-CEIL
for the FUTURE. use attr pad. SAME, make output with same
width and height as input. VALID, no padding""")
self._attrs[self.AttrName.PAD] = NndctIrAttr(
name=self.AttrName.PAD,
value_type=int,
size=2,
value_mem=self._attr_value_mem[self.AttrName.PAD],
occurence_type=OccurenceType.OPTIONAL,
default_value=[0,0],
annotation=r"""padding size, only effective when pad mode is PADDING, ["
"left, right, top, bottom],""")
self._attrs[self.AttrName.GROUP] = NndctIrAttr(
name=self.AttrName.GROUP,
value_type=int,
size=1,
value_mem=self._attr_value_mem[self.AttrName.GROUP],
occurence_type=OccurenceType.OPTIONAL,
default_value=1,
annotation=r"""group""")
self._attrs[self.AttrName.BIAS_TERM] = NndctIrAttr(
name=self.AttrName.BIAS_TERM,
value_type=bool,
size=1,
value_mem=self._attr_value_mem[self.AttrName.BIAS_TERM],
occurence_type=OccurenceType.REQUIRED,
annotation=r"""whether bias exist""")
self._attrs[self.AttrName.IN_DIM] = NndctIrAttr(
name=self.AttrName.IN_DIM,
value_type=int,
size=1,
value_mem=self._attr_value_mem[self.AttrName.IN_DIM],
occurence_type=OccurenceType.REQUIRED,
annotation=r"""in_channels""")
self._attrs[self.AttrName.OUT_DIM] = NndctIrAttr(
name=self.AttrName.OUT_DIM,
value_type=int,
size=1,
value_mem=self._attr_value_mem[self.AttrName.OUT_DIM],
occurence_type=OccurenceType.REQUIRED,
annotation=r"""out_channels""")
class Conv2d(Operation):
@unique
class AttrName(AutoName):
KERNEL = auto()
STRIDE = auto()
DILATION = auto()
PAD_MODE = auto()
PAD = auto()
GROUP = auto()
BIAS_TERM = auto()
IN_DIM = auto()
OUT_DIM = auto()
@unique
class ParamName(AutoName):
WEIGHTS = auto()
BIAS = auto()
def __init__(self, *args, **kwargs) -> None:
super(Conv2d, self).__init__(*args, **kwargs)
# allocate memory for attr value
self._attr_value_mem = {
self.AttrName.KERNEL: [None, None],
self.AttrName.STRIDE: [None, None],
self.AttrName.DILATION: [None, None],
self.AttrName.PAD_MODE: [None],
self.AttrName.PAD: [None, None, None, None],
self.AttrName.GROUP: [None],
self.AttrName.BIAS_TERM: [None],
self.AttrName.IN_DIM: [None],
self.AttrName.OUT_DIM: [None],
}
self._attrs[self.AttrName.KERNEL] = NndctIrAttr(
name=self.AttrName.KERNEL,
value_type=int,
size=2,
value_mem=self._attr_value_mem[self.AttrName.KERNEL],
occurence_type=OccurenceType.REQUIRED,
annotation=r"""kernel size, [kernel_w, kernel_h]""")
self._attrs[self.AttrName.STRIDE] = NndctIrAttr(
name=self.AttrName.STRIDE,
value_type=int,
size=2,
value_mem=self._attr_value_mem[self.AttrName.STRIDE],
occurence_type=OccurenceType.REQUIRED,
annotation=r"""stride [stride_w, stride_h]""")
self._attrs[self.AttrName.DILATION] = NndctIrAttr(
name=self.AttrName.DILATION,
value_type=int,
size=2,
value_mem=self._attr_value_mem[self.AttrName.DILATION],
occurence_type=OccurenceType.OPTIONAL,
default_value=[1, 1],
annotation=r"""dilation, [dilation_w, dilation_h]""")
self._attrs[self.AttrName.PAD_MODE] = NndctIrAttr(
name=self.AttrName.PAD_MODE,
value_type=int,
size=1,
value_mem=self._attr_value_mem[self.AttrName.PAD_MODE],
occurence_type=OccurenceType.REQUIRED,
annotation=r"""padding mode, 0-PADDING, 1-SAME, 2-VALID, 3-CEIL
for the FUTURE. use attr pad. SAME, make output with same
width and height as input. VALID, no padding""")
self._attrs[self.AttrName.PAD] = NndctIrAttr(
name=self.AttrName.PAD,
value_type=int,
size=4,
value_mem=self._attr_value_mem[self.AttrName.PAD],
occurence_type=OccurenceType.OPTIONAL,
default_value=[0, 0, 0, 0],
annotation=r"""padding size, only effective when pad mode is PADDING, ["
"left, right, top, bottom],""")
self._attrs[self.AttrName.GROUP] = NndctIrAttr(
name=self.AttrName.GROUP,
value_type=int,
size=1,
value_mem=self._attr_value_mem[self.AttrName.GROUP],
occurence_type=OccurenceType.OPTIONAL,
default_value=1,
annotation=r"""group""")
self._attrs[self.AttrName.BIAS_TERM] = NndctIrAttr(
name=self.AttrName.BIAS_TERM,
value_type=bool,
size=1,
value_mem=self._attr_value_mem[self.AttrName.BIAS_TERM],
occurence_type=OccurenceType.REQUIRED,
annotation=r"""whether bias exist""")
self._attrs[self.AttrName.IN_DIM] = NndctIrAttr(
name=self.AttrName.IN_DIM,
value_type=int,
size=1,
value_mem=self._attr_value_mem[self.AttrName.IN_DIM],
occurence_type=OccurenceType.REQUIRED,
annotation=r"""in_channels""")
self._attrs[self.AttrName.OUT_DIM] = NndctIrAttr(
name=self.AttrName.OUT_DIM,
value_type=int,
size=1,
value_mem=self._attr_value_mem[self.AttrName.OUT_DIM],
occurence_type=OccurenceType.REQUIRED,
annotation=r"""out_channels""")
class Conv3d(Operation):
@unique
class AttrName(AutoName):
KERNEL = auto()
STRIDE = auto()
DILATION = auto()
GROUP = auto()
PAD_MODE = auto()
PAD = auto()
BIAS_TERM = auto()
IN_DIM = auto()
OUT_DIM = auto()
OUTPUT_PAD = auto()
@unique
class ParamName(AutoName):
WEIGHTS = auto()
BIAS = auto()
def __init__(self, *args, **kwargs) -> None:
super().__init__(*args, **kwargs)
# allocate memory for attr value
self._attr_value_mem = {
self.AttrName.KERNEL: [None, None, None],
self.AttrName.STRIDE: [None, None, None],
self.AttrName.DILATION: [None, None, None],
self.AttrName.GROUP: [None],
self.AttrName.PAD_MODE: [None],
self.AttrName.PAD: [None, None, None, None, None, None],
self.AttrName.OUTPUT_PAD: [None, None, None, None, None, None],
self.AttrName.BIAS_TERM: [None],
self.AttrName.IN_DIM: [None],
self.AttrName.OUT_DIM: [None],
}
self._attrs[self.AttrName.KERNEL] = NndctIrAttr(
name=self.AttrName.KERNEL,
value_type=int,
size=3,
value_mem=self._attr_value_mem[self.AttrName.KERNEL],
occurence_type=OccurenceType.REQUIRED,
annotation=r"""kernel size, [kernel_w, kernel_h, kernel_d]""")
self._attrs[self.AttrName.STRIDE] = NndctIrAttr(
name=self.AttrName.STRIDE,
value_type=int,
size=None,
value_mem=self._attr_value_mem[self.AttrName.STRIDE],
occurence_type=OccurenceType.REQUIRED,
annotation=r"""stride [stride_w, stride_h, stride_d]""")
self._attrs[self.AttrName.DILATION] = NndctIrAttr(
name=self.AttrName.DILATION,
value_type=int,
size=None,
value_mem=self._attr_value_mem[self.AttrName.DILATION],
occurence_type=OccurenceType.OPTIONAL,
default_value=[1, 1, 1],
annotation=r"""dilation, [dilation_w, dilation_h, dilation_d]""")
self._attrs[self.AttrName.GROUP] = NndctIrAttr(
name=self.AttrName.GROUP,
value_type=int,
size=1,
value_mem=self._attr_value_mem[self.AttrName.GROUP],
occurence_type=OccurenceType.OPTIONAL,
default_value=1,
annotation=r"""group""")
self._attrs[self.AttrName.PAD_MODE] = NndctIrAttr(
name=self.AttrName.PAD_MODE,
value_type=str,
size=1,
value_mem=self._attr_value_mem[self.AttrName.PAD_MODE],
occurence_type=OccurenceType.REQUIRED,
annotation=r"""We support 4 padding mode: `FLOOR, CEIL, SAME, VALID`. "
"For example, when you parsing models from other frameworks, "
"`caffe, pytorch->\"FLOOR\", tensorflow->\"SAME\" or \"VALID\"`""")
self._attrs[self.AttrName.PAD] = NndctIrAttr(
name=self.AttrName.PAD,
value_type=int,
size=6,
value_mem=self._attr_value_mem[self.AttrName.PAD],
occurence_type=OccurenceType.OPTIONAL,
default_value=[0, 0, 0, 0, 0, 0],
annotation=r"""padding size, only effective when pad mode is PADDING, ["
"left, right, top, bottom, near, far],""")
self._attrs[self.AttrName.OUTPUT_PAD] = NndctIrAttr(
name=self.AttrName.OUTPUT_PAD,
value_type=int,
size=6,
value_mem=self._attr_value_mem[self.AttrName.OUTPUT_PAD],
occurence_type=OccurenceType.OPTIONAL,
default_value=[0, 0, 0, 0, 0, 0],
annotation=r"""additional size added to one side of each dimension in the output, ["
"left, right, top, bottom, near, far],""")
self._attrs[self.AttrName.BIAS_TERM] = NndctIrAttr(
name=self.AttrName.BIAS_TERM,
value_type=bool,
size=None,
value_mem=self._attr_value_mem[self.AttrName.BIAS_TERM],
occurence_type=OccurenceType.REQUIRED,
annotation=r"""whether bias exist""")
self._attrs[self.AttrName.IN_DIM] = NndctIrAttr(
name=self.AttrName.IN_DIM,
value_type=int,
size=None,
value_mem=self._attr_value_mem[self.AttrName.IN_DIM],
occurence_type=OccurenceType.REQUIRED,
annotation=r"""in_channels""")
self._attrs[self.AttrName.OUT_DIM] = NndctIrAttr(
name=self.AttrName.OUT_DIM,
value_type=int,
size=None,
value_mem=self._attr_value_mem[self.AttrName.OUT_DIM],
occurence_type=OccurenceType.REQUIRED,
annotation=r"""out_channels""")
class BatchNorm(Operation):
@unique
class AttrName(AutoName):
EPSILON = auto()
SCALE = auto()
CENTER = auto()
OUT_DIM = auto()
AXIS = auto()
@unique
class ParamName(AutoName):
GAMMA = auto()
BETA = auto()
MOVING_MEAN = auto()
MOVING_VAR = auto()
def __init__(self, *args, **kwargs) -> None:
super(BatchNorm, self).__init__(*args, **kwargs)
# allocate memory for attr value
self._attr_value_mem = {
self.AttrName.EPSILON: [None],
self.AttrName.SCALE: [None],
self.AttrName.CENTER: [None],
self.AttrName.OUT_DIM: [None],
self.AttrName.AXIS: [None]
}
self._attrs[self.AttrName.EPSILON] = NndctIrAttr(
name=self.AttrName.EPSILON,
value_type=float,
size=1,
value_mem=self._attr_value_mem[self.AttrName.EPSILON],
occurence_type=OccurenceType.REQUIRED,
annotation=r"""epsilon""")
self._attrs[self.AttrName.SCALE] = NndctIrAttr(
name=self.AttrName.SCALE,
value_type=bool,
size=1,
value_mem=self._attr_value_mem[self.AttrName.SCALE],
occurence_type=OccurenceType.REQUIRED,
annotation=r"""scale""")
self._attrs[self.AttrName.CENTER] = NndctIrAttr(
name=self.AttrName.CENTER,
value_type=bool,
size=1,
value_mem=self._attr_value_mem[self.AttrName.CENTER],
occurence_type=OccurenceType.REQUIRED,
annotation=r"""center""")
self._attrs[self.AttrName.OUT_DIM] = NndctIrAttr(
name=self.AttrName.OUT_DIM,
value_type=int,
size=1,
value_mem=self._attr_value_mem[self.AttrName.OUT_DIM],
occurence_type=OccurenceType.REQUIRED,
annotation=r"""num features""")
self._attrs[self.AttrName.AXIS] = NndctIrAttr(
name=self.AttrName.AXIS,
value_type=int,
size=1,
value_mem=self._attr_value_mem[self.AttrName.AXIS],
occurence_type=OccurenceType.REQUIRED,
annotation=r"""the axis of the input to implement batchnorm""")
class Dense(Operation):
@unique
class AttrName(AutoName):
BIAS_TERM = auto()
IN_DIM = auto()
OUT_DIM = auto()
@unique
class ParamName(AutoName):
WEIGHTS = auto()
BIAS = auto()
def __init__(self, *args, **kwargs) -> None:
super(Dense, self).__init__(*args, **kwargs)
# allocate memory for attr value
self._attr_value_mem = {
self.AttrName.BIAS_TERM: [None],
self.AttrName.IN_DIM: [None],
self.AttrName.OUT_DIM: [None],
}
self._attrs[self.AttrName.BIAS_TERM] = NndctIrAttr(
name=self.AttrName.BIAS_TERM,
value_type=bool,
size=1,
value_mem=self._attr_value_mem[self.AttrName.BIAS_TERM],
occurence_type=OccurenceType.REQUIRED,
annotation=r"""whether bias exist""")
self._attrs[self.AttrName.IN_DIM] = NndctIrAttr(
name=self.AttrName.IN_DIM,
value_type=int,
size=1,
value_mem=self._attr_value_mem[self.AttrName.IN_DIM],
occurence_type=OccurenceType.REQUIRED,
annotation=r"""in_channels""")
self._attrs[self.AttrName.OUT_DIM] = NndctIrAttr(
name=self.AttrName.OUT_DIM,
value_type=int,
size=1,
value_mem=self._attr_value_mem[self.AttrName.OUT_DIM],
occurence_type=OccurenceType.REQUIRED,
annotation=r"""out_channels""")
class Concat(Operation):
@unique
class AttrName(AutoName):
AXIS = auto()
def __init__(self, *args, **kwargs) -> None:
super(Concat, self).__init__(*args, **kwargs)
# allocate memory for attr value
self._attr_value_mem = {
self.AttrName.AXIS: [None],
}
self._attrs[self.AttrName.AXIS] = NndctIrAttr(
name=self.AttrName.AXIS,
value_type=int,
size=1,
value_mem=self._attr_value_mem[self.AttrName.AXIS],
occurence_type=OccurenceType.REQUIRED,
annotation=r"""specified axis""")
class Shape(Operation):
@unique
class AttrName(AutoName):
AXIS = auto()
def __init__(self, *args, **kwargs) -> None:
super(Shape, self).__init__(*args, **kwargs)
# allocate memory for attr value
self._attr_value_mem = {
self.AttrName.AXIS: [None],
}
self._attrs[self.AttrName.AXIS] = NndctIrAttr(
name=self.AttrName.AXIS,
value_type=int,
size=1,
value_mem=self._attr_value_mem[self.AttrName.AXIS],
occurence_type=OccurenceType.REQUIRED,
annotation=r"""specified axis""")
class Reshape(Operation):
@unique
class AttrName(AutoName):
SHAPE = auto()
def __init__(self, *args, **kwargs) -> None:
super(Reshape, self).__init__(*args, **kwargs)
# allocate memory for attr value
self._attr_value_mem = {
self.AttrName.SHAPE: [], # possible any length
}
self._attrs[self.AttrName.SHAPE] = NndctIrAttr(
name=self.AttrName.SHAPE,
value_type=(int, Tensor),
size=None,
value_mem=self._attr_value_mem[self.AttrName.SHAPE],
occurence_type=OccurenceType.REQUIRED,
annotation=r"""the target shape""")
class MaxPool(Operation):
@unique
class AttrName(AutoName):
KERNEL = auto()
STRIDE = auto()
DILATION = auto()
PAD_MODE = auto()
PAD = auto()
GLOBAL = auto()
def __init__(self, *args, **kwargs) -> None:
super(MaxPool, self).__init__(*args, **kwargs)
# allocate memory for attr value
self._attr_value_mem = {
self.AttrName.KERNEL: [None, None],
self.AttrName.STRIDE: [None, None],
self.AttrName.PAD_MODE: [None],
self.AttrName.PAD: [None, None, None, None],
self.AttrName.GLOBAL: [None],
}
self._attrs[self.AttrName.KERNEL] = NndctIrAttr(
name=self.AttrName.KERNEL,
value_type=int,
size=2,
value_mem=self._attr_value_mem[self.AttrName.KERNEL],
occurence_type=OccurenceType.REQUIRED,
annotation=r"""kernel size, [kernel_w, kernel_h]""")
self._attrs[self.AttrName.STRIDE] = NndctIrAttr(
name=self.AttrName.STRIDE,
value_type=int,
size=2,
value_mem=self._attr_value_mem[self.AttrName.STRIDE],
occurence_type=OccurenceType.REQUIRED,
annotation=r"""stride [stride_w, stride_h]""")
self._attrs[self.AttrName.PAD_MODE] = NndctIrAttr(
name=self.AttrName.PAD_MODE,
value_type=int,
size=1,
value_mem=self._attr_value_mem[self.AttrName.PAD_MODE],
occurence_type=OccurenceType.REQUIRED,
annotation=r"""padding mode, 0-PADDING, 1-SAME, 2-VALID, 3-CEIL
for the FUTURE. use attr pad. SAME, make output with same
width and height as input. VALID, no padding""")
self._attrs[self.AttrName.PAD] = NndctIrAttr(
name=self.AttrName.PAD,
value_type=int,
size=4,
value_mem=self._attr_value_mem[self.AttrName.PAD],
occurence_type=OccurenceType.OPTIONAL,
default_value=[0, 0, 0, 0],
annotation=r"""padding size, only effective when pad mode is PADDING, ["
"left, right, top, bottom],""")
self._attrs[self.AttrName.GLOBAL] = NndctIrAttr(
name=self.AttrName.GLOBAL,
value_type=bool,
size=1,
value_mem=self._attr_value_mem[self.AttrName.GLOBAL],
occurence_type=OccurenceType.OPTIONAL,
default_value=False,
annotation=r"""global""")
class MaxPool1d(Operation):
@unique
class AttrName(AutoName):
KERNEL = auto()
STRIDE = auto()
DILATION = auto()
PAD_MODE = auto()
PAD = auto()
GLOBAL = auto()
COUNT_INCLUDE_PAD = auto()
def __init__(self, *args, **kwargs) -> None:
super(MaxPool1d, self).__init__(*args, **kwargs)
# allocate memory for attr value
self._attr_value_mem = {
self.AttrName.KERNEL: [None],
self.AttrName.STRIDE: [None],
self.AttrName.PAD_MODE: [None],
self.AttrName.PAD: [None, None],
self.AttrName.GLOBAL: [None],
self.AttrName.COUNT_INCLUDE_PAD: [None]
}
self._attrs[self.AttrName.KERNEL] = NndctIrAttr(
name=self.AttrName.KERNEL,
value_type=int,
size=1,
value_mem=self._attr_value_mem[self.AttrName.KERNEL],
occurence_type=OccurenceType.REQUIRED,
annotation=r"""kernel size, [kernel_w, kernel_h]""")
self._attrs[self.AttrName.STRIDE] = NndctIrAttr(
name=self.AttrName.STRIDE,
value_type=int,
size=1,
value_mem=self._attr_value_mem[self.AttrName.STRIDE],
occurence_type=OccurenceType.REQUIRED,
annotation=r"""stride [stride_w, stride_h]""")
self._attrs[self.AttrName.PAD_MODE] = NndctIrAttr(
name=self.AttrName.PAD_MODE,
value_type=int,
size=1,
value_mem=self._attr_value_mem[self.AttrName.PAD_MODE],
occurence_type=OccurenceType.REQUIRED,
annotation=r"""padding mode, 0-PADDING, 1-SAME, 2-VALID, 3-CEIL
for the FUTURE. use attr pad. SAME, make output with same
width and height as input. VALID, no padding""")
self._attrs[self.AttrName.PAD] = NndctIrAttr(
name=self.AttrName.PAD,
value_type=int,
size=2,
value_mem=self._attr_value_mem[self.AttrName.PAD],
occurence_type=OccurenceType.OPTIONAL,
default_value=[0, 0],
annotation=r"""padding size, only effective when pad mode is PADDING, ["
"left, right, top, bottom],""")
self._attrs[self.AttrName.GLOBAL] = NndctIrAttr(
name=self.AttrName.GLOBAL,
value_type=bool,
size=1,
value_mem=self._attr_value_mem[self.AttrName.GLOBAL],
occurence_type=OccurenceType.OPTIONAL,
default_value=False,
annotation=r"""global""")
self._attrs[self.AttrName.COUNT_INCLUDE_PAD] = NndctIrAttr(
name=self.AttrName.COUNT_INCLUDE_PAD,
value_type=bool,
size=1,
value_mem=self._attr_value_mem[self.AttrName.COUNT_INCLUDE_PAD],
occurence_type=OccurenceType.OPTIONAL,
default_value=[True],
annotation=r"""when True, will include the zero-padding in the averaging calculation""")
class AvgPool(Operation):
@unique
class AttrName(AutoName):
KERNEL = auto()
STRIDE = auto()
PAD_MODE = auto()
PAD = auto()
GLOBAL = auto()
COUNT_INCLUDE_PAD = auto()
def __init__(self, *args, **kwargs) -> None:
super(AvgPool, self).__init__(*args, **kwargs)
# allocate memory for attr value
self._attr_value_mem = {
self.AttrName.KERNEL: [None, None],
self.AttrName.STRIDE: [None, None],
self.AttrName.PAD_MODE: [None],
self.AttrName.PAD: [None, None, None, None],
self.AttrName.GLOBAL: [None],
self.AttrName.COUNT_INCLUDE_PAD: [None]
}
self._attrs[self.AttrName.KERNEL] = NndctIrAttr(
name=self.AttrName.KERNEL,
value_type=int,
size=2,
value_mem=self._attr_value_mem[self.AttrName.KERNEL],
occurence_type=OccurenceType.REQUIRED,
annotation=r"""kernel size, [kernel_w, kernel_h]""")
self._attrs[self.AttrName.STRIDE] = NndctIrAttr(
name=self.AttrName.STRIDE,
value_type=int,
size=2,
value_mem=self._attr_value_mem[self.AttrName.STRIDE],
occurence_type=OccurenceType.REQUIRED,
annotation=r"""stride [stride_w, stride_h]""")
self._attrs[self.AttrName.PAD_MODE] = NndctIrAttr(
name=self.AttrName.PAD_MODE,
value_type=int,
size=1,
value_mem=self._attr_value_mem[self.AttrName.PAD_MODE],
occurence_type=OccurenceType.REQUIRED,
annotation=r"""padding mode, 0-PADDING, 1-SAME, 2-VALID, 3-CEIL
for the FUTURE. use attr pad. SAME, make output with same
width and height as input. VALID, no padding""")
self._attrs[self.AttrName.PAD] = NndctIrAttr(
name=self.AttrName.PAD,
value_type=int,
size=4,
value_mem=self._attr_value_mem[self.AttrName.PAD],
occurence_type=OccurenceType.OPTIONAL,
default_value=[0, 0, 0, 0],
annotation=r"""padding size, only effective when pad mode is PADDING, ["
"left, right, top, bottom],""")
self._attrs[self.AttrName.GLOBAL] = NndctIrAttr(
name=self.AttrName.GLOBAL,
value_type=bool,
size=1,
value_mem=self._attr_value_mem[self.AttrName.GLOBAL],
occurence_type=OccurenceType.OPTIONAL,
default_value=False,
annotation=r"""global""")
self._attrs[self.AttrName.COUNT_INCLUDE_PAD] = NndctIrAttr(
name=self.AttrName.COUNT_INCLUDE_PAD,
value_type=bool,
size=1,
value_mem=self._attr_value_mem[self.AttrName.COUNT_INCLUDE_PAD],
occurence_type=OccurenceType.OPTIONAL,
default_value=[True],
annotation=r"""when True, will include the zero-padding in the averaging calculation""")
class Flatten(Operation):
@unique
class AttrName(AutoName):
START_DIM = "start_axis"
END_DIM = "end_axis"
def __init__(self, *args, **kwargs) -> None:
super(Flatten, self).__init__(*args, **kwargs)
# allocate memory for attr value
self._attr_value_mem = {
self.AttrName.START_DIM: [None],
self.AttrName.END_DIM: [None],
}
self._attrs[self.AttrName.START_DIM] = NndctIrAttr(
name=self.AttrName.START_DIM,
value_type=int,
size=1,
value_mem=self._attr_value_mem[self.AttrName.START_DIM],
occurence_type=OccurenceType.REQUIRED,
annotation=r"""the first dim to flatten""")
self._attrs[self.AttrName.END_DIM] = NndctIrAttr(
name=self.AttrName.END_DIM,
value_type=int,
size=1,
value_mem=self._attr_value_mem[self.AttrName.END_DIM],
occurence_type=OccurenceType.REQUIRED,
annotation=r"""the last dim to flatten""")
# including mean, max, min etc.
class PermuteInvariantOp(Operation):
@unique
class AttrName(AutoName):
DIMS = "axis"
KEEP_DIMS = auto()
def __init__(self, *args, **kwargs) -> None:
super().__init__(*args, **kwargs)
# allocate memory for attr value
self._attr_value_mem = {
self.AttrName.DIMS: [],
self.AttrName.KEEP_DIMS: [None],
}
self._attrs[self.AttrName.DIMS] = NndctIrAttr(
name=self.AttrName.DIMS,
value_type=int,
size=None,
value_mem=self._attr_value_mem[self.AttrName.DIMS],
occurence_type=OccurenceType.REQUIRED,
annotation=r"""The dimensions to reduce. List of integers""")
self._attrs[self.AttrName.KEEP_DIMS] = NndctIrAttr(
name=self.AttrName.KEEP_DIMS,
value_type=bool,
size=1,
value_mem=self._attr_value_mem[self.AttrName.KEEP_DIMS],
occurence_type=OccurenceType.REQUIRED,
annotation=r"""specify whether the reduced dimension is kept or not.""")
class Permute(Operation):
@unique
class AttrName(AutoName):
ORDER = auto()
def __init__(self, op_type, *args, **kwargs) -> None:
super(Permute, self).__init__(op_type)
# allocate memory for attr value
self._attr_value_mem = {
self.AttrName.ORDER: [],
}
self._attrs[self.AttrName.ORDER] = NndctIrAttr(
name=self.AttrName.ORDER,
value_type=(int, Tensor),
size=None,
value_mem=self._attr_value_mem[self.AttrName.ORDER],
occurence_type=OccurenceType.REQUIRED,
annotation=r"""The dimensions to reduce. List of integers""")
class Softmax(Operation):
@unique
class AttrName(AutoName):
AXIS = auto()
def __init__(self) -> None:
super(Softmax, self).__init__(NNDCT_OP.SOFTMAX)
# allocate memory for attr value
self._attr_value_mem = {
self.AttrName.AXIS: [None],
}
self._attrs[self.AttrName.AXIS] = NndctIrAttr(
name=self.AttrName.AXIS,
value_type=int,
size=1,
value_mem=self._attr_value_mem[self.AttrName.AXIS],
occurence_type=OccurenceType.REQUIRED,
annotation=r"""the dimension softmax would be performed on. default
is the last dimension.""")
class Lstm(Operation):
@unique
class AttrName(AutoName):
INPUT_SIZE = auto()
HIDDEN_SIZE = auto()
BIDIRECTIONAL = auto()
NUM_LAYERS = auto()
BATCH_FIRST = auto()
@unique
class ParamName(AutoName):
WEIGHT_IH = auto()
WEIGHT_HH = auto()
WEIGHT_IH_REVERSE = auto()
WEIGHT_HH_REVERSE = auto()
BIAS = auto()
BIAS_REVERSE = auto()
def __init__(self, *args, **kwargs) -> None:
super(Lstm, self).__init__(*args, **kwargs)
# allocate memory for attr value
self._attr_value_mem = {
self.AttrName.INPUT_SIZE: [None],
self.AttrName.HIDDEN_SIZE: [None],
self.AttrName.BIDIRECTIONAL: [None],
self.AttrName.NUM_LAYERS: [None],
self.AttrName.BATCH_FIRST: [None],
}
self._attrs[self.AttrName.INPUT_SIZE] = NndctIrAttr(
name=self.AttrName.INPUT_SIZE,
value_type=int,
size=1,
value_mem=self._attr_value_mem[self.AttrName.INPUT_SIZE],
occurence_type=OccurenceType.REQUIRED,
annotation=r"""input size of LSTM.""")
self._attrs[self.AttrName.HIDDEN_SIZE] = NndctIrAttr(
name=self.AttrName.HIDDEN_SIZE,
value_type=int,
size=1,
value_mem=self._attr_value_mem[self.AttrName.HIDDEN_SIZE],
occurence_type=OccurenceType.REQUIRED,
annotation=r"""hidden size of LSTM.""")
self._attrs[self.AttrName.BIDIRECTIONAL] = NndctIrAttr(
name=self.AttrName.BIDIRECTIONAL,
value_type=bool,
size=1,
value_mem=self._attr_value_mem[self.AttrName.BIDIRECTIONAL],
occurence_type=OccurenceType.REQUIRED,
annotation=r""" If True, means a bidirectional LSTM.""")
self._attrs[self.AttrName.NUM_LAYERS] = NndctIrAttr(
name=self.AttrName.NUM_LAYERS,
value_type=int,
size=1,
value_mem=self._attr_value_mem[self.AttrName.NUM_LAYERS],
occurence_type=OccurenceType.REQUIRED,
annotation=r"""Number of recurrent layers""")
self._attrs[self.AttrName.BATCH_FIRST] = NndctIrAttr(
name=self.AttrName.BATCH_FIRST,
value_type=bool,
size=1,
value_mem=self._attr_value_mem[self.AttrName.BATCH_FIRST],
occurence_type=OccurenceType.REQUIRED,
annotation=r""" If True, then the input and output tensors are provided as (batch, seq, feature)"""
)
class Gru(Operation):
@unique
class AttrName(AutoName):
INPUT_SIZE = auto()
HIDDEN_SIZE = auto()
BIDIRECTIONAL = auto()
NUM_LAYERS = auto()
BATCH_FIRST = auto()
@unique
class ParamName(AutoName):
WEIGHT_IH = auto()
WEIGHT_HH = auto()
WEIGHT_IH_REVERSE = auto()
WEIGHT_HH_REVERSE = auto()
BIAS_IH = auto()
BIAS_HH = auto()
BIAS_IH_REVERSE = auto()
BIAS_HH_REVERSE = auto()
def __init__(self, *args, **kwargs) -> None:
super(Gru, self).__init__(*args, **kwargs)
# allocate memory for attr value
self._attr_value_mem = {
self.AttrName.INPUT_SIZE: [None],
self.AttrName.HIDDEN_SIZE: [None],
self.AttrName.BIDIRECTIONAL: [None],
self.AttrName.NUM_LAYERS: [None],
self.AttrName.BATCH_FIRST: [None],
}
self._attrs[self.AttrName.INPUT_SIZE] = NndctIrAttr(
name=self.AttrName.INPUT_SIZE,
value_type=int,
size=1,
value_mem=self._attr_value_mem[self.AttrName.INPUT_SIZE],
occurence_type=OccurenceType.REQUIRED,
annotation=r"""input size of GRU.""")
self._attrs[self.AttrName.HIDDEN_SIZE] = NndctIrAttr(
name=self.AttrName.HIDDEN_SIZE,
value_type=int,
size=1,
value_mem=self._attr_value_mem[self.AttrName.HIDDEN_SIZE],
occurence_type=OccurenceType.REQUIRED,
annotation=r"""hidden size of GRU.""")
self._attrs[self.AttrName.BIDIRECTIONAL] = NndctIrAttr(
name=self.AttrName.BIDIRECTIONAL,
value_type=bool,
size=1,
value_mem=self._attr_value_mem[self.AttrName.BIDIRECTIONAL],
occurence_type=OccurenceType.REQUIRED,
annotation=r""" If True, means a bidirectional GRU.""")
self._attrs[self.AttrName.NUM_LAYERS] = NndctIrAttr(
name=self.AttrName.NUM_LAYERS,
value_type=int,
size=1,
value_mem=self._attr_value_mem[self.AttrName.NUM_LAYERS],
occurence_type=OccurenceType.REQUIRED,
annotation=r"""Number of recurrent layers""")
self._attrs[self.AttrName.BATCH_FIRST] = NndctIrAttr(
name=self.AttrName.BATCH_FIRST,
value_type=bool,
size=1,
value_mem=self._attr_value_mem[self.AttrName.BATCH_FIRST],
occurence_type=OccurenceType.REQUIRED,
annotation=r""" If True, then the input and output tensors are provided as (batch, seq, feature)"""
)
class StridedSlice(Operation):
@unique
class AttrName(AutoName):
BEGIN = auto()
END = auto()
STRIDES = auto()
BEGIN_MASK = auto()
END_MASK = auto()
ELLIPSIS_MASK = auto()
NEW_AXIS_MASK = auto()
SHRINK_AXIS_MASK = auto()
def __init__(self) -> None:
super(StridedSlice, self).__init__(NNDCT_OP.STRIDED_SLICE)
# allocate memory for attr value
self._attr_value_mem = {
self.AttrName.BEGIN: [],
self.AttrName.END: [],
self.AttrName.STRIDES: [],
self.AttrName.BEGIN_MASK: [None],
self.AttrName.END_MASK: [None],
self.AttrName.ELLIPSIS_MASK: [None],
self.AttrName.NEW_AXIS_MASK: [None],
self.AttrName.SHRINK_AXIS_MASK: [None]
}
self._attrs[self.AttrName.BEGIN] = NndctIrAttr(
name=self.AttrName.BEGIN,
value_type=int,
size=None,
value_mem=self._attr_value_mem[self.AttrName.BEGIN],
occurence_type=OccurenceType.REQUIRED,
annotation=r"""start location of slicing (included)""")
self._attrs[self.AttrName.END] = NndctIrAttr(
name=self.AttrName.END,
value_type=int,
size=None,
value_mem=self._attr_value_mem[self.AttrName.END],
occurence_type=OccurenceType.REQUIRED,
annotation=r"""end location of slicing (excluded)""")
self._attrs[self.AttrName.STRIDES] = NndctIrAttr(
name=self.AttrName.STRIDES,
value_type=int,
size=None,
value_mem=self._attr_value_mem[self.AttrName.STRIDES],
occurence_type=OccurenceType.REQUIRED,
annotation=r"""strides of slicing""")
self._attrs[self.AttrName.BEGIN_MASK] = NndctIrAttr(
name=self.AttrName.BEGIN_MASK,
value_type=int,
size=1,
value_mem=self._attr_value_mem[self.AttrName.BEGIN_MASK],
default_value=0,
occurence_type=OccurenceType.OPTIONAL,
annotation=r"""If the ith bit of begin_mask is set, begin[i] is ignored
and the fullest possible range in that dimension is used
instead.""")
self._attrs[self.AttrName.END_MASK] = NndctIrAttr(
name=self.AttrName.END_MASK,
value_type=int,
size=1,
value_mem=self._attr_value_mem[self.AttrName.END_MASK],
default_value=0,
occurence_type=OccurenceType.OPTIONAL,
annotation=r"""If the ith bit of end_mask is set, end[i] is ignored and
the fullest possible range in that dimension is used
instead, except with the end range.""")
self._attrs[self.AttrName.ELLIPSIS_MASK] = NndctIrAttr(
name=self.AttrName.ELLIPSIS_MASK,
value_type=int,
size=1,
value_mem=self._attr_value_mem[self.AttrName.ELLIPSIS_MASK],
default_value=0,
occurence_type=OccurenceType.OPTIONAL,
annotation=r"""If the ith bit of ellipsis_mask is set, as many
unspecified dimensions as needed will be inserted between
other dimensions. Only one non-zero bit is allowed in
ellipsis_mask.""")
self._attrs[self.AttrName.NEW_AXIS_MASK] = NndctIrAttr(
name=self.AttrName.NEW_AXIS_MASK,
value_type=int,
size=1,
value_mem=self._attr_value_mem[self.AttrName.NEW_AXIS_MASK],
default_value=0,
occurence_type=OccurenceType.OPTIONAL,
annotation=r"""If the ith bit of new_axis_mask is set, then begin, end,
and stride are ignored and a new length 1 dimension is
added at this point in the output tensor.""")
self._attrs[self.AttrName.SHRINK_AXIS_MASK] = NndctIrAttr(
name=self.AttrName.SHRINK_AXIS_MASK,
value_type=int,
size=1,
value_mem=self._attr_value_mem[self.AttrName.SHRINK_AXIS_MASK],
default_value=0,
occurence_type=OccurenceType.OPTIONAL,
annotation=r"""If the ith bit of shrink_axis_mask is set, it implies that
taking on the value at index begin[i]. end[i] and
strides[i] are ignored in this case.""")
class BinaryOp(Operation):
@unique
class AttrName(AutoName):
INPUT = auto()
OTHER = auto()
def __init__(self, *args, **kwargs) -> None:
super().__init__(*args, **kwargs)
# allocate memory for attr value
self._attr_value_mem = {
self.AttrName.INPUT: [None],
self.AttrName.OTHER: [None],
}
self._attrs[self.AttrName.INPUT] = NndctIrAttr(
name=self.AttrName.INPUT,
value_type=(int, float, bool, Tensor, np.ndarray),
size=1,
value_mem=self._attr_value_mem[self.AttrName.INPUT],
occurence_type=OccurenceType.REQUIRED,
map_to_xir=False,
annotation=r"""the first input tensor.""")
self._attrs[self.AttrName.OTHER] = NndctIrAttr(
name=self.AttrName.OTHER,
value_type=(int, float, Tensor, np.ndarray),
size=1,
value_mem=self._attr_value_mem[self.AttrName.OTHER],
occurence_type=OccurenceType.REQUIRED,
map_to_xir=False,
annotation=r"""the second input tensor.""")
class Sub(Operation):
@unique
class AttrName(AutoName):
INPUT = auto()
OTHER = auto()
def __init__(self, *args, **kwargs) -> None:
super().__init__(*args, **kwargs)
# allocate memory for attr value
self._attr_value_mem = {
self.AttrName.INPUT: [None],
self.AttrName.OTHER: [None],
}
self._attrs[self.AttrName.INPUT] = NndctIrAttr(
name=self.AttrName.INPUT,
value_type=(int, float, Tensor, np.ndarray),
size=1,
value_mem=self._attr_value_mem[self.AttrName.INPUT],
occurence_type=OccurenceType.REQUIRED,
annotation=r"""the first input tensor.""")
self._attrs[self.AttrName.OTHER] = NndctIrAttr(
name=self.AttrName.OTHER,
value_type=(int, float, Tensor, np.ndarray),
size=1,
value_mem=self._attr_value_mem[self.AttrName.OTHER],
occurence_type=OccurenceType.REQUIRED,
annotation=r"""the second input tensor.""")
class Pad(Operation):
@unique
class AttrName(AutoName):
PAD_WITH = "paddings"
MODE = auto()
CONSTANT_VALUES = auto()
def __init__(self) -> None:
super().__init__(NNDCT_OP.PAD)
# allocate memory for attr value
self._attr_value_mem = {
self.AttrName.PAD_WITH: [None, None, None, None, None, None, None, None],
self.AttrName.MODE: [None],
self.AttrName.CONSTANT_VALUES: [None, None, None, None, None, None, None, None]
}
self._attrs[self.AttrName.PAD_WITH] = NndctIrAttr(
name=self.AttrName.PAD_WITH,
value_type=int,
size=8,
value_mem=self._attr_value_mem[self.AttrName.PAD_WITH],
occurence_type=OccurenceType.REQUIRED,
annotation=r"""0 , 0 , left, right, top, bottom, 0, 0""")
self._attrs[self.AttrName.MODE] = NndctIrAttr(
name=self.AttrName.MODE,
value_type=int,
size=1,
value_mem=self._attr_value_mem[self.AttrName.MODE],
occurence_type=OccurenceType.REQUIRED,
annotation=r"""The padding mode. 0:'CONSTANT', 1:'REFLECT', 2:'SYMMETRIC']."""
)
self._attrs[self.AttrName.CONSTANT_VALUES] = NndctIrAttr(
name=self.AttrName.CONSTANT_VALUES,
value_type=float,
size=8,
value_mem=self._attr_value_mem[self.AttrName.CONSTANT_VALUES],
occurence_type=OccurenceType.REQUIRED,
annotation=r"""the value set into the padded locations""")
class LeakyReLU(Operation):
@unique
class AttrName(AutoName):
ALPHA = auto()
def __init__(self) -> None:
super().__init__(NNDCT_OP.LEAKY_RELU)
# allocate memory for attr value
self._attr_value_mem = {
self.AttrName.ALPHA: [None],
}
self._attrs[self.AttrName.ALPHA] = NndctIrAttr(
name=self.AttrName.ALPHA,
value_type=float,
size=1,
value_mem=self._attr_value_mem[self.AttrName.ALPHA],
occurence_type=OccurenceType.REQUIRED,
annotation=r"""negative slope""")
class Resize(Operation):
@unique
class AttrName(AutoName):
SIZE = auto()
SCALE = auto()
ALIGN_CORNERS = auto()
HALF_PIXEL_CENTERS = auto()
MODE = auto()
def __init__(self) -> None:
super().__init__(NNDCT_OP.RESIZE)
# allocate memory for attr value
self._attr_value_mem = {
self.AttrName.SIZE: [None, None],
self.AttrName.SCALE: [None, None],
self.AttrName.ALIGN_CORNERS: [None],
self.AttrName.HALF_PIXEL_CENTERS: [None],
self.AttrName.MODE: [None],
}
self._attrs[self.AttrName.SIZE] = NndctIrAttr(
name=self.AttrName.SIZE,
value_type=(int, Tensor),
size=2,
value_mem=self._attr_value_mem[self.AttrName.SIZE],
default_value=[0, 0],
occurence_type=OccurenceType.OPTIONAL,
annotation=r"""output spatial size, [size_w, size_h]""")
self._attrs[self.AttrName.SCALE] = NndctIrAttr(
name=self.AttrName.SCALE,
value_type=float,
size=2,
value_mem=self._attr_value_mem[self.AttrName.SCALE],
default_value=[1.0, 1.0],
occurence_type=OccurenceType.OPTIONAL,
annotation=r"""New size = Origin size * scale. {scale_w, scale_h}.""")
self._attrs[self.AttrName.ALIGN_CORNERS] = NndctIrAttr(
name=self.AttrName.ALIGN_CORNERS,
value_type=bool,
size=1,
value_mem=self._attr_value_mem[self.AttrName.ALIGN_CORNERS],
default_value=False,
occurence_type=OccurenceType.OPTIONAL,
annotation=r"""It must be set When mode is 3.If true, the centers of
the 4 corner pixels of the input and output tensors are
aligned, preserving the values at the corner pixels.
Defaults to false.""")
self._attrs[self.AttrName.HALF_PIXEL_CENTERS] = NndctIrAttr(
name=self.AttrName.HALF_PIXEL_CENTERS,
value_type=bool,
size=1,
value_mem=self._attr_value_mem[self.AttrName.HALF_PIXEL_CENTERS],
default_value=False,
occurence_type=OccurenceType.OPTIONAL,
annotation=r"""half_pixel_centers is false by default in,
tf.resize_bilinear() and tf.resize_nearest_neighbor().
is true by default in tf.upsampling2d(), but the version
of tf should be > r1.13""")
self._attrs[self.AttrName.MODE] = NndctIrAttr(
name=self.AttrName.MODE,
value_type=str,
size=1,
value_mem=self._attr_value_mem[self.AttrName.MODE],
occurence_type=OccurenceType.REQUIRED,
annotation=r"""OPENCV-NEAREST -> 0, OPENCV-BILINEAR -> 1,
Tensorflow-NEAREST -> 2, Tensorflow-BILINEAR -> 3,
To be improved!""")
class Resize3d(Operation):
@unique
class AttrName(AutoName):
SIZE = auto()
SCALE = auto()
ALIGN_CORNERS = auto()
HALF_PIXEL_CENTERS = auto()
MODE = auto()
def __init__(self) -> None:
super().__init__(NNDCT_OP.RESIZE_3D)
# allocate memory for attr value
self._attr_value_mem = {
self.AttrName.SIZE: [None, None, None],
self.AttrName.SCALE: [None, None, None],
self.AttrName.ALIGN_CORNERS: [None],
self.AttrName.HALF_PIXEL_CENTERS: [None],
self.AttrName.MODE: [None],
}
self._attrs[self.AttrName.SIZE] = NndctIrAttr(
name=self.AttrName.SIZE,
value_type=(int, Tensor),
size=3,
value_mem=self._attr_value_mem[self.AttrName.SIZE],
default_value=[0, 0, 0],
occurence_type=OccurenceType.OPTIONAL,
annotation=r"""output spatial size, [size_h, size_w, size_d]""")
self._attrs[self.AttrName.SCALE] = NndctIrAttr(
name=self.AttrName.SCALE,
value_type=float,
size=3,
value_mem=self._attr_value_mem[self.AttrName.SCALE],
default_value=[1.0, 1.0, 1.0],
occurence_type=OccurenceType.OPTIONAL,
annotation=r"""New size = Origin size * scale. {scale_h, scale_w, scale_d}.""")
self._attrs[self.AttrName.ALIGN_CORNERS] = NndctIrAttr(
name=self.AttrName.ALIGN_CORNERS,
value_type=bool,
size=1,
value_mem=self._attr_value_mem[self.AttrName.ALIGN_CORNERS],
default_value=False,
occurence_type=OccurenceType.OPTIONAL,
annotation=r"""It must be set When mode is 3.If true, the centers of
the 4 corner pixels of the input and output tensors are
aligned, preserving the values at the corner pixels.
Defaults to false.""")
self._attrs[self.AttrName.HALF_PIXEL_CENTERS] = NndctIrAttr(
name=self.AttrName.HALF_PIXEL_CENTERS,
value_type=bool,
size=1,
value_mem=self._attr_value_mem[self.AttrName.HALF_PIXEL_CENTERS],
default_value=False,
occurence_type=OccurenceType.OPTIONAL,
annotation=r"""half_pixel_centers is false by default in,
tf.resize_bilinear() and tf.resize_nearest_neighbor().
is true by default in tf.upsampling2d(), but the version
of tf should be > r1.13""")
self._attrs[self.AttrName.MODE] = NndctIrAttr(
name=self.AttrName.MODE,
value_type=str,
size=1,
value_mem=self._attr_value_mem[self.AttrName.MODE],
occurence_type=OccurenceType.REQUIRED,
annotation=r"""Trilinear""")
class Constant(Operation):
@unique
class AttrName(AutoName):
DATA = auto()
def __init__(self, nndct_op_type) -> None:
super().__init__(nndct_op_type)
# allocate memory for attr value
self._attr_value_mem = {
self.AttrName.DATA: [],
}
self._attrs[self.AttrName.DATA] = NndctIrAttr(
name=self.AttrName.DATA,
value_type=(int, float, list, Tensor),
size=None,
value_mem=self._attr_value_mem[self.AttrName.DATA],
occurence_type=OccurenceType.REQUIRED,
annotation=r"""Constant Parameter""")
class Squeeze(Operation):
@unique
class AttrName(AutoName):
DIMS = "axis"
def __init__(self) -> None:
super().__init__(NNDCT_OP.SQUEEZE)
# allocate memory for attr value
self._attr_value_mem = {
self.AttrName.DIMS: [],
}
self._attrs[self.AttrName.DIMS] = NndctIrAttr(
name=self.AttrName.DIMS,
value_type=int,
size=None,
value_mem=self._attr_value_mem[self.AttrName.DIMS],
occurence_type=OccurenceType.REQUIRED,
annotation=r"""The dimensions to be squeezed. The dimension index " //
"starts at 0.""")
class EmbeddingBag(Operation):
@unique
class ParamName(AutoName):
WEIGHT = auto()
class LayerNorm(Operation):
@unique
class ParamName(AutoName):
GAMMA = auto()
BETA = auto()
# e.g. ones, zeros
class ConstFromShape(Operation):
@unique
class AttrName(AutoName):
SHAPE = auto()
def __init__(self, *args, **kwargs) -> None:
super().__init__(*args, **kwargs)
# allocate memory for attr value
self._attr_value_mem = {
self.AttrName.SHAPE: [],
}
self._attrs[self.AttrName.SHAPE] = NndctIrAttr(
name=self.AttrName.SHAPE,
value_type=(int, Tensor),
size=None,
value_mem=self._attr_value_mem[self.AttrName.SHAPE],
occurence_type=OccurenceType.REQUIRED,
annotation=r"""the target shape""")
class UnaryOp(Operation):
@unique
class AttrName(AutoName):
INPUT = auto()
def __init__(self, *args, **kwargs) -> None:
super().__init__(*args, **kwargs)
# allocate memory for attr value
self._attr_value_mem = {
self.AttrName.INPUT: [None],
}
self._attrs[self.AttrName.INPUT] = NndctIrAttr(
name=self.AttrName.INPUT,
value_type=(int, str, float, bool, Tensor, np.ndarray),
size=1,
value_mem=self._attr_value_mem[self.AttrName.INPUT],
occurence_type=OccurenceType.REQUIRED,
map_to_xir=False,
annotation=r"""the first input tensor.""")
class Reorg(Operation):
@unique
class AttrName(AutoName):
SCALE = auto()
REVERSE = auto()
def __init__(self, nndct_op_type) -> None:
super().__init__(nndct_op_type)
# allocate memory for attr value
self._attr_value_mem = {
self.AttrName.SCALE: [None],
self.AttrName.REVERSE: [None],
}
self._attrs[self.AttrName.SCALE] = NndctIrAttr(
name=self.AttrName.SCALE,
value_type=(int, Tensor),
size=1,
value_mem=self._attr_value_mem[self.AttrName.SCALE],
occurence_type=OccurenceType.REQUIRED,
annotation=r"""scale for reorg""")
self._attrs[self.AttrName.REVERSE] = NndctIrAttr(
name=self.AttrName.REVERSE,
value_type=bool,
size=1,
value_mem=self._attr_value_mem[self.AttrName.REVERSE],
occurence_type=OccurenceType.REQUIRED,
annotation=r"""reverse""")
class Gstiling(Operation):
@unique
class AttrName(AutoName):
STRIDE = auto()
REVERSE = auto()
def __init__(self, nndct_op_type) -> None:
super().__init__(nndct_op_type)
# allocate memory for attr value
self._attr_value_mem = {
self.AttrName.STRIDE: [None],
self.AttrName.REVERSE: [None],
}
self._attrs[self.AttrName.STRIDE] = NndctIrAttr(
name=self.AttrName.STRIDE,
value_type=(int, Tensor),
size=1,
value_mem=self._attr_value_mem[self.AttrName.STRIDE],
occurence_type=OccurenceType.REQUIRED,
annotation=r"""stride for feature maps""")
self._attrs[self.AttrName.REVERSE] = NndctIrAttr(
name=self.AttrName.REVERSE,
value_type=bool,
size=1,
value_mem=self._attr_value_mem[self.AttrName.REVERSE],
occurence_type=OccurenceType.REQUIRED,
annotation=r"""reverse""")
class PixelShuffle(Operation):
@unique
class AttrName(AutoName):
SCALE = auto()
UPSCALE = auto()
def __init__(self, nndct_op_type) -> None:
super().__init__(nndct_op_type)
# allocate memory for attr value
self._attr_value_mem = {
self.AttrName.SCALE: [None],
self.AttrName.UPSCALE: [None],
}
self._attrs[self.AttrName.SCALE] = NndctIrAttr(
name=self.AttrName.SCALE,
value_type=(int, Tensor),
size=1,
value_mem=self._attr_value_mem[self.AttrName.SCALE],
occurence_type=OccurenceType.REQUIRED,
annotation=r"""scale for feature maps""")
self._attrs[self.AttrName.UPSCALE] = NndctIrAttr(
name=self.AttrName.UPSCALE,
value_type=bool,
size=1,
value_mem=self._attr_value_mem[self.AttrName.UPSCALE],
occurence_type=OccurenceType.REQUIRED,
annotation=r"""upscale or downscale PixelShuffle.""")
class Embedding(Operation):
@unique
class ParamName(AutoName):
WEIGHT = auto()
class CustomOp(Operation):
AttrName = Enum("AttrName", '')
def __init__(self, nndct_op_type) -> None:
super().__init__(nndct_op_type)
self._attr_value_mem = {}
self.is_custom_op = True
def get_attr_name_from_str(self, attr_name):
attr_names = [(name, val.value) for name, val in self.AttrName.__members__.items()]
if(not attr_names) or (attr_names and all([attr_name != attr[1] for attr in attr_names])):
attr_names += [(attr_name.upper(), attr_name.lower())]
self.AttrName = Enum("AttrName", attr_names)
return getattr(self.AttrName, attr_name.upper())
def _register_attr_by_name(self, attr_name):
if attr_name in self.AttrName.__members__:
return
attr_name = self.get_attr_name_from_str(attr_name)
self._attr_value_mem[attr_name] = [None]
self._attrs[attr_name] = NndctIrAttr(
name=attr_name,
value_type=Any,
size=None,
occurence_type=OccurenceType.REQUIRED,
value_mem=self._attr_value_mem[attr_name])
def set_attr_by_name(self, attr_name, value):
if attr_name not in self.AttrName.__members__:
self._register_attr_by_name(attr_name)
attr_name = self.get_attr_name_from_str(attr_name)
self.set_attr(attr_name, value)
|
from string import ascii_lowercase
from itertools import product
import gizeh
import numpy as np
import random
random.seed(1234)
alphabet = ascii_lowercase + "_"
bigrams = [''.join(bigram) for bigram in product(alphabet, repeat=2)]
random.shuffle(bigrams)
scale = 2
width = 512 * scale
height = 512 * scale
def draw(bs, name, theta_offset=0):
surface = gizeh.Surface(width=width, height=height)
r = width / 2 * 3/4
offset = [width / 2, height / 2]
theta_step = (2 * np.pi) / (len(bs))
i = 0
for theta in np.linspace(0, 2 * np.pi, len(bs) + 1)[:-1]:
t = theta + (theta_offset * theta_step / 2)
xy = [r * np.sin(t) + offset[0], r * np.cos(t) + offset[1]]
text = gizeh.text(
bs[i],
fontfamily="monospace",
fontsize=20 * scale,
fill=(0, 0, 0),
xy=xy,
angle=0
)
text.draw(surface)
i += 1
surface.write_to_png("gen/" + name + ".png")
even = bigrams[:16]
even0 = [x for i, x in enumerate(even) if i % 2 == 0]
even1 = [x for i, x in enumerate(even) if i % 2 == 1]
bigrams = bigrams[16:]
draw(even, "even")
draw(even0, "even0")
draw(even1, "even1", theta_offset=1)
odd = bigrams[:15]
bigrams = bigrams[15:]
draw(odd, "odd")
|
# Copyright 2019-present MongoDB Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for metrics_output_parser"""
import unittest
import genny.metrics_output_parser as parser
import tests.parser_test_lib as test_lib
class GennyOutputParserTest(unittest.TestCase):
def raises_parse_error(self, input_str):
with self.assertRaises(parser.ParseError):
test_lib.parse_string(input_str)
def test_no_clocks(self):
self.raises_parse_error("""
Timers
1234,A.0.o,345
""")
def test_missing_clocks(self):
self.raises_parse_error("""
Clocks
Timers
1234,A.0.o,345
""")
def test_timers_before_clocks(self):
self.raises_parse_error("""
Timers
1234,A.0.o,345
Clocks
SystemTime,23439048
MetricsTime,303947
""")
def test_csv_no_sections_have_data(self):
self.assertEqual(
test_lib.parse_string("""
Clocks
Gauges
Counters
Timers
""").timers(), {})
def test_empty_input(self):
self.assertEqual(test_lib.parse_string("").timers(), {})
def test_fixture1(self):
actual = test_lib.parse_fixture('csvoutput1').timers()
self.assertEqual(
actual, {
'InsertTest.output': {
'mean': 1252307.75,
'n': 4,
'threads': {'id-0', 'id-1'},
'started': 1537814141061109,
'ended': 1537814143687260
},
'HelloTest.output': {
'mean': 55527.25,
'n': 4,
'threads': {'0', '1'},
'started': 1537814141061476,
'ended': 1537814143457943
}
})
def test_fixture2(self):
actual = test_lib.parse_fixture('csvoutput2').timers()
self.assertEqual(
actual, {
'InsertRemoveTest.remove': {
'n':
823,
'threads':
set([
str(x) for x in [
0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18,
19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35,
36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52,
53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63, 64, 65, 66, 67, 68, 69,
70, 71, 72, 73, 74, 75, 76, 77, 78, 79, 80, 81, 82, 83, 84, 85, 86,
87, 88, 89, 90, 91, 92, 93, 94, 95, 96, 97, 98, 99
]
]),
'started':
1540233103870294,
'ended':
1540233383199723,
'mean':
4297048.190765492
},
'InsertRemoveTest.insert': {
'n':
823,
'threads':
set([
str(x) for x in [
0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18,
19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35,
36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52,
53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63, 64, 65, 66, 67, 68, 69,
70, 71, 72, 73, 74, 75, 76, 77, 78, 79, 80, 81, 82, 83, 84, 85, 86,
87, 88, 89, 90, 91, 92, 93, 94, 95, 96, 97, 98, 99
]
]),
'started':
1540233073074953,
'ended':
1540233380763649,
'mean':
8656706.69744836
},
'Genny.Setup': {
'n': 1,
'threads': {'0'},
'started': 1540233035593684,
'ended': 1540233044288445,
'mean': 8694761.0
}
})
def test_fixture3(self):
actual = test_lib.parse_fixture('csvoutput3').timers()
self.assertEqual(
actual, {
'InsertRemoveTest.remove.op-time': {
'n': 6,
'threads': {'id-99'},
'started': 1547004939911848888,
'ended': 1547004939932315379,
'mean': 1814961.8333333333
}
})
|
import math
import chainer
import chainer.functions as F
import chainer.links as L
import numpy as np
from .sn_convolution_2d import SNConvolution2D, SNDeconvolution2D
from .sn_linear import SNLinear
def _upsample(x):
h, w = x.shape[2:]
return F.unpooling_2d(x, 2, outsize=(h * 2, w * 2))
def _downsample(x):
return F.average_pooling_2d(x, 2)
def upsample_conv(x, conv):
return conv(_upsample(x))
def _upsample_frq(x):
h, w = x.shape[2:]
return F.unpooling_2d(x, (1,2), outsize=(h, w * 2))
def _downsample_frq(x):
return F.average_pooling_2d(x, (1,2))
def upsample_conv_frq(x, conv):
return conv(_upsample_frq(x))
class ResBlock(chainer.Chain):
def __init__(self, in_channels, out_channels, ksize=3, pad=1, activation=F.leaky_relu, mode='none', bn=False, dr=None):
super(ResBlock, self).__init__()
initializer = chainer.initializers.GlorotUniform()
initializer_sc = chainer.initializers.GlorotUniform()
self.activation = activation
self.mode = _downsample if mode == 'down' else _upsample if mode == 'up' else None
self.learnable_sc = in_channels != out_channels
self.dr = dr
self.bn = bn
with self.init_scope():
self.c1 = L.Convolution2D(in_channels, out_channels, ksize=ksize, pad=pad, initialW=initializer, nobias=bn)
self.c2 = L.Convolution2D(out_channels, out_channels, ksize=ksize, pad=pad, initialW=initializer, nobias=bn)
if bn:
self.b1 = L.BatchNormalization(out_channels)
self.b2 = L.BatchNormalization(out_channels)
if self.learnable_sc:
self.c_sc = L.Convolution2D(in_channels, out_channels, ksize=1, pad=0, initialW=initializer_sc)
def residual(self, x):
h = x
h = self.c1(h)
if self.bn:
h = self.b1(h)
if self.activation:
h = self.activation(h)
if self.mode:
h = self.mode(h)
if self.dr:
with chainer.using_config('train', True):
h = F.dropout(h, self.dr)
h = self.c2(h)
if self.bn:
h = self.b2(h)
if self.activation:
h = self.activation(h)
return h
def shortcut(self, x):
if self.mode:
x = self.mode(x)
if self.learnable_sc:
x = self.c_sc(x)
return x
def __call__(self, x):
return self.residual(x) + self.shortcut(x)
class ConvBlock(chainer.Chain):
def __init__(self, in_channels, out_channels, mode='none', activation=F.leaky_relu, bn=False, dr=None):
super(ConvBlock, self).__init__()
# initializer = chainer.initializers.GlorotUniform()
initializer = chainer.initializers.HeUniform()
self.activation = activation
self.bn = bn
self.dr = dr
with self.init_scope():
if mode == 'none':
self.c = L.Convolution2D(in_channels, out_channels, ksize=3, stride=1, pad=1, initialW=initializer, nobias=bn)
elif mode == 'none-7':
self.c = L.Convolution2D(in_channels, out_channels, ksize=(7,7), stride=1, pad=(3,3), initialW=initializer, nobias=bn)
elif mode == 'down':
self.c = L.Convolution2D(in_channels, out_channels, ksize=4, stride=2, pad=1, initialW=initializer, nobias=bn)
elif mode == 'up':
self.c = L.Deconvolution2D(in_channels, out_channels, ksize=4, stride=2, pad=1, initialW=initializer, nobias=bn)
elif mode == 'full-down':
self.c = L.Convolution2D(in_channels, out_channels, ksize=4, stride=1, pad=0, initialW=initializer, nobias=bn)
elif mode == 'frq':
self.c = L.Convolution2D(in_channels, out_channels, ksize=(1,9), stride=1, pad=(0,4), initialW=initializer, nobias=bn)
elif mode == 'frq-down':
self.c = L.Convolution2D(in_channels, out_channels, ksize=(1,9), stride=1, pad=(0,4), initialW=initializer, nobias=bn)
self.activation = lambda x: activation(_downsample(x))
elif mode == 'frq-up':
self.c = L.Convolution2D(in_channels, out_channels, ksize=(1,9), stride=1, pad=(0,4), initialW=initializer, nobias=bn)
self.activation = lambda x: activation(_upsample(x))
elif mode == 'pad':
self.c = L.Convolution2D(in_channels, out_channels, ksize=3, stride=1, pad=2, initialW=initializer, nobias=bn)
elif mode == 'trim':
self.c = L.Convolution2D(in_channels, out_channels, ksize=3, stride=1, pad=0, initialW=initializer, nobias=bn)
else:
raise Exception('mode is missing')
if bn:
self.b = L.BatchNormalization(out_channels)
def __call__(self, h):
if self.dr:
with chainer.using_config('train', True):
h = F.dropout(h, self.dr)
h = self.c(h)
if self.bn:
h = self.b(h)
if self.activation:
h = self.activation(h)
return h
class CoPSBlock(chainer.Chain):
def __init__(self, in_channels, out_channels, activation=F.leaky_relu, bn=True):
super(CoPSBlock, self).__init__()
initializer = chainer.initializers.GlorotUniform()
self.activation = activation
self.bn = bn
with self.init_scope():
self.ps = L.Convolution2D(in_channels, in_channels*4, ksize=1, stride=1, initialW=initializer)
self.c = L.Convolution2D(in_channels, out_channels, ksize=3, stride=1, pad=1, initialW=initializer)
if bn:
self.b = L.BatchNormalization(out_channels)
def pixel_shuffle(self, x):
out = self.ps(x)
b = out.shape[0]
c = out.shape[1]
h = out.shape[2]
w = out.shape[3]
out = F.reshape(out, (b, 2, 2, c//4, h, w))
out = F.transpose(out, (0, 3, 4, 1, 5, 2))
out = F.reshape(out, (b, c//4, h*2, w*2))
return out
def __call__(self, h):
h = self.pixel_shuffle(h)
h = self.c(h)
if self.bn:
h = self.b(h)
if self.activation:
h = self.activation(h)
return h
class SNResBlock(chainer.Chain):
def __init__(self, in_channels, out_channels, activation=F.leaky_relu, sample='none', dr=None):
super(SNResBlock, self).__init__()
initializer = chainer.initializers.GlorotUniform()
initializer_sc = chainer.initializers.GlorotUniform()
self.activation = activation
self.dr = dr
self.sample = _downsample if sample == 'down' else _upsample if sample == 'up' else None
self.learnable_sc = in_channels != out_channels or sample == 'down' or sample == 'up'
with self.init_scope():
self.c1 = SNConvolution2D(in_channels, out_channels, ksize=3, pad=1, initialW=initializer)
self.c2 = SNConvolution2D(out_channels, out_channels, ksize=3, pad=1, initialW=initializer)
if self.learnable_sc:
self.c_sc = SNConvolution2D(in_channels, out_channels, ksize=1, pad=0, initialW=initializer_sc)
def residual(self, x):
h = x
h = self.activation(h)
h = self.c1(h)
if self.sample:
h = self.sample(h)
if self.dr:
with chainer.using_config('train', True):
h = F.dropout(h, self.dr)
h = self.activation(h)
h = self.c2(h)
return h
def shortcut(self, x):
if self.learnable_sc:
x = self.c_sc(x)
if self.sample:
return self.sample(x)
else:
return x
else:
return x
def __call__(self, x):
return self.residual(x) + self.shortcut(x)
class SNConvBlock(chainer.Chain):
def __init__(self, in_channels, out_channels, mode='none', activation=F.leaky_relu, bn=False, dr=None):
super(SNConvBlock, self).__init__()
# initializer = chainer.initializers.GlorotUniform()
initializer = chainer.initializers.HeUniform()
self.activation = activation
self.bn = bn
self.dr = dr
with self.init_scope():
if mode == 'none':
self.c = SNConvolution2D(in_channels, out_channels, ksize=3, stride=1, pad=1, initialW=initializer, nobias=bn)
elif mode == 'none-7':
self.c = SNConvolution2D(in_channels, out_channels, ksize=(7,7), stride=1, pad=(3,3), initialW=initializer, nobias=bn)
elif mode == 'down':
self.c = SNConvolution2D(in_channels, out_channels, ksize=4, stride=2, pad=1, initialW=initializer, nobias=bn)
elif mode == 'up':
self.c = SNDeconvolution2D(in_channels, out_channels, ksize=4, stride=2, pad=1, initialW=initializer, nobias=bn)
elif mode == 'full-down':
self.c = SNConvolution2D(in_channels, out_channels, ksize=4, stride=1, pad=0, initialW=initializer, nobias=bn)
elif mode == 'frq':
self.c = SNConvolution2D(in_channels, out_channels, ksize=(1,9), stride=1, pad=(0,4), initialW=initializer, nobias=bn)
elif mode == 'frq-down':
self.c = SNConvolution2D(in_channels, out_channels, ksize=(1,9), stride=1, pad=(0,4), initialW=initializer, nobias=bn)
self.activation = lambda x: activation(_downsample(x))
elif mode == 'frq-up':
self.c = SNConvolution2D(in_channels, out_channels, ksize=(1,9), stride=1, pad=(0,4), initialW=initializer, nobias=bn)
self.activation = lambda x: activation(_upsample(x))
else:
raise Exception('mode is missing')
if bn:
self.b = L.BatchNormalization(out_channels)
def __call__(self, h):
if self.dr:
with chainer.using_config('train', True):
h = F.dropout(h, self.dr)
h = self.c(h)
if self.bn:
h = self.b(h)
if self.activation:
h = self.activation(h)
return h
class SNLinearBlock(chainer.Chain):
def __init__(self, in_channels, out_channels, activation=F.leaky_relu, dr=None):
super(SNLinearBlock, self).__init__()
initializer = chainer.initializers.GlorotUniform()
self.activation = activation
self.dr = dr
if type(out_channels) is tuple:
self.out_shape = (-1,)+out_channels
else:
self.out_shape = None
with self.init_scope():
self.l = SNLinear(in_channels, np.prod(out_channels), initialW=initializer)
def __call__(self, x):
if self.dr:
x = F.dropout(x, self.dr)
x = self.l(x)
x = self.activation(x)
if self.out_shape:
x = F.reshape(x, self.out_shape)
return x
class SNMDBlock(chainer.Chain):
def __init__(self, in_channels, in_size=4, B=100, C=5, gap=True, dr=None):
super(SNMDBlock, self).__init__()
# initializer = chainer.initializers.GlorotUniform()
initializer = chainer.initializers.HeUniform()
self.B = B
self.C = C
self.dr = dr
self.gap = gap
if gap:
in_size = 1
if type(in_size) is int:
in_size = (in_size, in_size)
with self.init_scope():
self.l = SNLinear(in_size[0] * in_size[1] * in_channels + B, 1, initialW=initializer)
self.md = SNLinear(in_size[0] * in_size[1] * in_channels, B * C, initialW=initializer)
def __call__(self, x):
if self.dr:
with chainer.using_config('train', True):
x = F.dropout(x, self.dr)
if self.gap:
x = F.sum(x, axis=(2,3))
N = x.shape[0]
#Below code copyed from https://github.com/pfnet-research/chainer-gan-lib/blob/master/minibatch_discrimination/net.py
feature = F.reshape(F.leaky_relu(x), (N, -1))
m = F.reshape(self.md(feature), (N, self.B * self.C, 1))
m0 = F.broadcast_to(m, (N, self.B * self.C, N))
m1 = F.transpose(m0, (2, 1, 0))
d = F.absolute(F.reshape(m0 - m1, (N, self.B, self.C, N)))
d = F.sum(F.exp(-F.sum(d, axis=2)), axis=2) - 1
h = F.concat([feature, d])
h = self.l(h)
return h
class SNL1DBlock(chainer.Chain):
def __init__(self, in_ch, out_ch, width, activation=F.leaky_relu, dr=None):
super(SNL1DBlock, self).__init__()
initializer = chainer.initializers.GlorotUniform()
self.activation = activation
self.dr = dr
self.out_ch = out_ch
with self.init_scope():
self.l = SNLinear(in_ch*width, out_ch*width, initialW=initializer)
def __call__(self, x):
if self.dr:
x = F.dropout(x, self.dr)
x = F.transpose(x, (0, 2, 1, 3))
out_shape = list(x.shape)
x = F.reshape(x, (-1, x.shape[2]*x.shape[3]))
x = self.l(x)
x = self.activation(x)
out_shape[2] = self.out_ch
x = F.reshape(x, out_shape)
x = F.transpose(x, (0, 2, 1, 3))
return x
class L1DBlock(chainer.Chain):
def __init__(self, in_ch, out_ch, width, activation=F.leaky_relu, dr=None):
super(L1DBlock, self).__init__()
# initializer = chainer.initializers.GlorotUniform()
initializer = chainer.initializers.HeUniform()
self.activation = activation
self.dr = dr
self.out_ch = out_ch
with self.init_scope():
self.l = L.Linear(in_ch*width, out_ch*width, initialW=initializer)
def __call__(self, x):
if self.dr:
x = F.dropout(x, self.dr)
x = F.transpose(x, (0, 2, 1, 3))
out_shape = list(x.shape)
x = F.reshape(x, (-1, x.shape[2]*x.shape[3]))
x = self.l(x)
x = self.activation(x)
out_shape[2] = self.out_ch
x = F.reshape(x, out_shape)
x = F.transpose(x, (0, 2, 1, 3))
return x
class CLBlock(chainer.Chain):
def __init__(self, in_ch, out_ch, width, activation=F.leaky_relu, liner_out_ch=1, dr=None):
super(CLBlock, self).__init__()
self.dr = dr
if out_ch - liner_out_ch <= 0:
raise Exception('out_ch <= liner_out_ch!')
with self.init_scope():
self.c = ConvBlock(in_ch, out_ch-liner_out_ch, activation=activation)
self.l = L1DBlock(in_ch, liner_out_ch, width, activation)
def __call__(self, x):
h = x
if self.dr:
h = F.dropout(h, self.dr)
h1 = self.c(h)
h2 = self.l(h)
h = F.concat([h1,h2])
return h
class SNCLBlock(chainer.Chain):
def __init__(self, in_ch, out_ch, width, activation=F.leaky_relu, dr=None):
super(SNCLBlock, self).__init__()
self.dr = dr
with self.init_scope():
self.c = SNConvBlock(in_ch, out_ch-1, activation=activation)
self.l = SNL1DBlock(in_ch, 1, width, activation)
def __call__(self, x):
h = x
if self.dr:
h = F.dropout(h, self.dr)
h1 = self.c(h)
h2 = self.l(h)
h = F.concat([h1,h2])
return h
|
#!/usr/bin/env python3
"""Combine logs from multiple swyft nodes as well as the test_framework log.
This streams the combined log output to stdout. Use combine_logs.py > outputfile
to write to an outputfile."""
import argparse
from collections import defaultdict, namedtuple
import heapq
import itertools
import os
import re
import sys
# Matches on the date format at the start of the log event
TIMESTAMP_PATTERN = re.compile(r"^\d{4}-\d{2}-\d{2}T\d{2}:\d{2}:\d{2}\.\d{6}Z")
LogEvent = namedtuple('LogEvent', ['timestamp', 'source', 'event'])
def main():
"""Main function. Parses args, reads the log files and renders them as text or html."""
parser = argparse.ArgumentParser(usage='%(prog)s [options] <test temporary directory>', description=__doc__)
parser.add_argument('-c', '--color', dest='color', action='store_true', help='outputs the combined log with events colored by source (requires posix terminal colors. Use less -r for viewing)')
parser.add_argument('--html', dest='html', action='store_true', help='outputs the combined log as html. Requires jinja2. pip install jinja2')
args, unknown_args = parser.parse_known_args()
if args.color and os.name != 'posix':
print("Color output requires posix terminal colors.")
sys.exit(1)
if args.html and args.color:
print("Only one out of --color or --html should be specified")
sys.exit(1)
# There should only be one unknown argument - the path of the temporary test directory
if len(unknown_args) != 1:
print("Unexpected arguments" + str(unknown_args))
sys.exit(1)
log_events = read_logs(unknown_args[0])
print_logs(log_events, color=args.color, html=args.html)
def read_logs(tmp_dir):
"""Reads log files.
Delegates to generator function get_log_events() to provide individual log events
for each of the input log files."""
files = [("test", "%s/test_framework.log" % tmp_dir)]
for i in itertools.count():
logfile = "{}/node{}/regtest/debug.log".format(tmp_dir, i)
if not os.path.isfile(logfile):
break
files.append(("node%d" % i, logfile))
return heapq.merge(*[get_log_events(source, f) for source, f in files])
def get_log_events(source, logfile):
"""Generator function that returns individual log events.
Log events may be split over multiple lines. We use the timestamp
regex match as the marker for a new log event."""
try:
with open(logfile, 'r') as infile:
event = ''
timestamp = ''
for line in infile:
# skip blank lines
if line == '\n':
continue
# if this line has a timestamp, it's the start of a new log event.
time_match = TIMESTAMP_PATTERN.match(line)
if time_match:
if event:
yield LogEvent(timestamp=timestamp, source=source, event=event.rstrip())
event = line
timestamp = time_match.group()
# if it doesn't have a timestamp, it's a continuation line of the previous log.
else:
event += "\n" + line
# Flush the final event
yield LogEvent(timestamp=timestamp, source=source, event=event.rstrip())
except FileNotFoundError:
print("File %s could not be opened. Continuing without it." % logfile, file=sys.stderr)
def print_logs(log_events, color=False, html=False):
"""Renders the iterator of log events into text or html."""
if not html:
colors = defaultdict(lambda: '')
if color:
colors["test"] = "\033[0;36m" # CYAN
colors["node0"] = "\033[0;34m" # BLUE
colors["node1"] = "\033[0;32m" # GREEN
colors["node2"] = "\033[0;31m" # RED
colors["node3"] = "\033[0;33m" # YELLOW
colors["reset"] = "\033[0m" # Reset font color
for event in log_events:
print("{0} {1: <5} {2} {3}".format(colors[event.source.rstrip()], event.source, event.event, colors["reset"]))
else:
try:
import jinja2
except ImportError:
print("jinja2 not found. Try `pip install jinja2`")
sys.exit(1)
print(jinja2.Environment(loader=jinja2.FileSystemLoader('./'))
.get_template('combined_log_template.html')
.render(title="Combined Logs from testcase", log_events=[event._asdict() for event in log_events]))
if __name__ == '__main__':
main()
|
# coding: utf-8
"""
Kubernetes
No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501
The version of the OpenAPI document: release-1.21
Generated by: https://openapi-generator.tech
"""
import pprint
import re # noqa: F401
import six
from kubernetes.client.configuration import Configuration
class V1beta1PodSecurityPolicyList(object):
"""NOTE: This class is auto generated by OpenAPI Generator.
Ref: https://openapi-generator.tech
Do not edit the class manually.
"""
"""
Attributes:
openapi_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
openapi_types = {
'api_version': 'str',
'items': 'list[V1beta1PodSecurityPolicy]',
'kind': 'str',
'metadata': 'V1ListMeta'
}
attribute_map = {
'api_version': 'apiVersion',
'items': 'items',
'kind': 'kind',
'metadata': 'metadata'
}
def __init__(self, api_version=None, items=None, kind=None, metadata=None, local_vars_configuration=None): # noqa: E501
"""V1beta1PodSecurityPolicyList - a model defined in OpenAPI""" # noqa: E501
if local_vars_configuration is None:
local_vars_configuration = Configuration()
self.local_vars_configuration = local_vars_configuration
self._api_version = None
self._items = None
self._kind = None
self._metadata = None
self.discriminator = None
if api_version is not None:
self.api_version = api_version
self.items = items
if kind is not None:
self.kind = kind
if metadata is not None:
self.metadata = metadata
@property
def api_version(self):
"""Gets the api_version of this V1beta1PodSecurityPolicyList. # noqa: E501
APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources # noqa: E501
:return: The api_version of this V1beta1PodSecurityPolicyList. # noqa: E501
:rtype: str
"""
return self._api_version
@api_version.setter
def api_version(self, api_version):
"""Sets the api_version of this V1beta1PodSecurityPolicyList.
APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources # noqa: E501
:param api_version: The api_version of this V1beta1PodSecurityPolicyList. # noqa: E501
:type: str
"""
self._api_version = api_version
@property
def items(self):
"""Gets the items of this V1beta1PodSecurityPolicyList. # noqa: E501
items is a list of schema objects. # noqa: E501
:return: The items of this V1beta1PodSecurityPolicyList. # noqa: E501
:rtype: list[V1beta1PodSecurityPolicy]
"""
return self._items
@items.setter
def items(self, items):
"""Sets the items of this V1beta1PodSecurityPolicyList.
items is a list of schema objects. # noqa: E501
:param items: The items of this V1beta1PodSecurityPolicyList. # noqa: E501
:type: list[V1beta1PodSecurityPolicy]
"""
if self.local_vars_configuration.client_side_validation and items is None: # noqa: E501
raise ValueError("Invalid value for `items`, must not be `None`") # noqa: E501
self._items = items
@property
def kind(self):
"""Gets the kind of this V1beta1PodSecurityPolicyList. # noqa: E501
Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds # noqa: E501
:return: The kind of this V1beta1PodSecurityPolicyList. # noqa: E501
:rtype: str
"""
return self._kind
@kind.setter
def kind(self, kind):
"""Sets the kind of this V1beta1PodSecurityPolicyList.
Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds # noqa: E501
:param kind: The kind of this V1beta1PodSecurityPolicyList. # noqa: E501
:type: str
"""
self._kind = kind
@property
def metadata(self):
"""Gets the metadata of this V1beta1PodSecurityPolicyList. # noqa: E501
:return: The metadata of this V1beta1PodSecurityPolicyList. # noqa: E501
:rtype: V1ListMeta
"""
return self._metadata
@metadata.setter
def metadata(self, metadata):
"""Sets the metadata of this V1beta1PodSecurityPolicyList.
:param metadata: The metadata of this V1beta1PodSecurityPolicyList. # noqa: E501
:type: V1ListMeta
"""
self._metadata = metadata
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.openapi_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, V1beta1PodSecurityPolicyList):
return False
return self.to_dict() == other.to_dict()
def __ne__(self, other):
"""Returns true if both objects are not equal"""
if not isinstance(other, V1beta1PodSecurityPolicyList):
return True
return self.to_dict() != other.to_dict()
|
"""
Copyright (c) 2020 Autonomous Vision Group (AVG), Max Planck Institute for Intelligent Systems, Tuebingen, Germany
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
"""
import json
import os
from tqdm import tqdm
import cv2
import torch
import itertools
from data import DataAdapterFactory
from experiment_state import ExperimentState
from experiment_settings import ExperimentSettings, recursive_dict_update
from optimization import optimize
from higo import higo_baseline
import general_settings
from evaluation import evaluate_state
from utils.logging import error
settings_dict_base = {
'data_settings': {
'data_type': "XIMEA",
'center_view': None,
'nr_neighbours': 40,
'base_input_path': "<input_data_base_folder>/",
'object_name': None,
'base_output_path': "<output_base_folder>/",
'gt_scan_folder': "<gt_scan_folder>/",
'calibration_path_geometric': "<calibration_base_folder>/geometric/",
'vignetting_file': '<calibration_base_folder>/photometric/vignetting.npz',
'depth_folder': 'tsdf-fusion-depth_oldCV_40_views',
'center_stride': 2,
'depth_scale': 1e-3,
'light_scale': 1e0,
'lazy_image_loading': False,
'input_reup_sample': 1.,
'input_down_sample': 1.,
'manual_center_view_crop': None,
},
'parametrization_settings': {
'locations': 'depth map',
'normals': 'per point',
'materials': 'base specular materials',
'brdf': 'cook torrance F1',
'observation_poses': 'quaternion',
'lights': 'point light',
},
'initialization_settings': {
'normals': 'from_closed_form',
'diffuse': 'from_closed_form',
'specular': 'hardcoded',
'lights': 'precalibrated',
'light_calibration_files': {
"positions": "<calibration_base_folder>/photometric/lights_array.pkl",
"intensities": "<calibration_base_folder>/photometric/LED_light_intensities.npy",
"attenuations": "<calibration_base_folder>/photometric/LED_angular_dependency.npy",
}
},
'default_optimization_settings': {
'parameters': [
'locations',
'poses',
'normals',
'vignetting',
'diffuse_materials',
'specular_weights',
'specular_materials',
'light_positions',
'light_intensities',
'light_attenuations',
],
'losses': {
"photoconsistency L1": 1e-4,
"geometric consistency": 1e1,
"depth compatibility": 1e10,
"normal smoothness": 1e-1,
"material sparsity": 1e-1,
"material smoothness": 1e0,
},
"iterations": 1000,
'visualize_initial': False,
'visualize_results': True,
'target_set': "training",
},
'optimization_steps': [],
}
settings_dict_higo = recursive_dict_update(
settings_dict_base,
{
'data_settings': {
'output_path_suffix': 'higo',
},
'higo_baseline': {
'step_size': 0.001, #1 mm
'step_radius': 25,
'eta': 10,
'lambda_n': 7.5,
'lambda_s': 3.0,
'lambda_1': 0.1,
'lambda_2': 0.5,
},
}
)
disjoint_iteration = [
{
'parameters': ['specular_weights'],
"iterations": 40,
},
{
'parameters': ['diffuse_materials', 'specular_materials'],
"iterations": 40,
},
{
'parameters': ['normals'],
"iterations": 60,
},
{
'parameters': ['locations', 'observation_poses'],
"iterations": 60,
},
]
settings_dict_disjoint = recursive_dict_update(
settings_dict_base,
{
'data_settings': {
'output_path_suffix': 'disjoint',
},
'default_optimization_settings': {
},
'optimization_steps': [
*(disjoint_iteration * 5),
# {
# 'iterations': 1000,
# 'parameters': [
# 'observation_poses',
# ],
# 'visualize_initial': True,
# 'target_set': "testing",
# },
]
}
)
settings_dict_proposed = recursive_dict_update(
settings_dict_base,
{
'data_settings': {
'output_path_suffix': 'proposed',
},
'optimization_steps': [
{
'parameters': [
'diffuse_materials',
'specular_weights',
'specular_materials',
'normals',
'observation_poses',
'locations',
],
'visualize_initial': True,
},
# {
# 'parameters': [
# 'observation_poses',
# ],
# 'visualize_initial': True,
# 'target_set': "testing",
# },
]
}
)
def run_experiment(settings_dict):
experiment_settings = ExperimentSettings(settings_dict)
# localize to the current computer as required
experiment_settings.localize()
# create an empty experiment object, with the correct parametrizations
experiment_settings.check_stored("parametrization_settings")
experiment_state = ExperimentState.create(experiment_settings.get('parametrization_settings'))
experiment_settings.save("parametrization_settings")
# create the data adapter
experiment_settings.check_stored("data_settings")
data_adapter = DataAdapterFactory(
experiment_settings.get('data_settings')['data_type']
)(
experiment_settings.get('local_data_settings')
)
if not experiment_settings.get('data_settings')['lazy_image_loading']:
device = torch.device(general_settings.device_name)
image_tensors = [
observation.get_image()
for observation in tqdm(data_adapter.images, desc="Preloading training images")
]
# now compact all observations into a few big tensors and remove the old tensors
# this makes for much faster access/operations
data_adapter.compound_image_tensors = {}
data_adapter.compound_image_tensor_sizes = {}
training_indices_batches, _ = data_adapter.get_training_info()
testing_indices_batches, _ = data_adapter.get_testing_info()
for batch in itertools.chain(training_indices_batches, testing_indices_batches):
compound_H = max([image_tensors[i].shape[-2] for i in batch])
compound_W = max([image_tensors[i].shape[-1] for i in batch])
C = len(batch)
compound_images = torch.zeros(C, 3, compound_H, compound_W, dtype=torch.float, device=device)
compound_sizes = torch.zeros(C, 2, dtype=torch.long, device=device)
for idx, image_idx in enumerate(batch):
src_tensor = image_tensors[image_idx]
compound_images[idx,:,:src_tensor.shape[-2], :src_tensor.shape[-1]] = src_tensor
compound_sizes[idx,0] = src_tensor.shape[-1]
compound_sizes[idx,1] = src_tensor.shape[-2]
del data_adapter.images[image_idx]._image
data_adapter.compound_image_tensors[batch] = compound_images
data_adapter.compound_image_tensor_sizes[batch] = compound_sizes
del image_tensors
experiment_settings.save("data_settings")
# initialize the parametrizations with the requested values, if the initialization is not available on disk
initialization_state_folder = experiment_settings.get_state_folder("initialization")
if experiment_settings.check_stored("initialization_settings"):
experiment_state.load(initialization_state_folder)
else:
experiment_state.initialize(data_adapter, experiment_settings.get('local_initialization_settings'))
experiment_state.save(initialization_state_folder)
experiment_settings.save("initialization_settings")
# evaluate_state("initialization",
# experiment_settings.get('data_settings')['object_name'],
# experiment_settings.get('local_data_settings')['gt_scan_folder'],
# experiment_state)
experiment_state.visualize_statics(
experiment_settings.get_output_path(),
data_adapter
)
if experiment_settings.get("higo_baseline") is not None:
higo_state_folder = experiment_settings.get_state_folder("higo")
if not experiment_settings.check_stored("higo_baseline"):
higo_experiment_state = higo_baseline(
experiment_state,
data_adapter,
higo_state_folder,
experiment_settings.get('higo_baseline')
)
higo_experiment_state.visualize(
experiment_settings.get_output_path(),
"higo_baseline",
data_adapter,
losses = [],
shadows_occlusions=False
)
higo_experiment_state.save(higo_state_folder)
experiment_settings.save("higo_baseline")
else:
higo_experiment_state = ExperimentState.copy(experiment_state)
higo_experiment_state.load(higo_state_folder)
evaluate_state("higo baseline",
experiment_settings.get('data_settings')['object_name'],
experiment_settings.get('local_data_settings')['gt_scan_folder'],
higo_experiment_state)
optimization_step_settings = experiment_settings.get('default_optimization_settings')
experiment_settings.check_stored("default_optimization_settings")
experiment_settings.save("default_optimization_settings")
for step_index in range(len(experiment_settings.get('optimization_steps'))):
step_state_folder = experiment_settings.get_state_folder("optimization_steps", step_index)
optimization_settings = experiment_settings.get("optimization_steps", step_index)
shorthand = experiment_settings.get_shorthand("optimization_steps", step_index)
set_name = "%02d_%s" % (step_index, shorthand)
if optimization_settings['visualize_initial']:
experiment_state.visualize(
experiment_settings.get_output_path(),
"%02d__initial" % step_index,
data_adapter,
optimization_settings['losses']
)
if experiment_settings.check_stored("optimization_steps", step_index):
experiment_state.load(step_state_folder)
else:
optimize(
experiment_state,
data_adapter,
optimization_settings,
output_path_structure=os.path.join(
experiment_settings.get_output_path(),
"evolution_%%s_%s.png" % set_name
)
)
experiment_state.save(step_state_folder)
experiment_settings.save("optimization_steps", step_index)
if optimization_settings['visualize_results']:
experiment_state.visualize(
experiment_settings.get_output_path(),
set_name,
data_adapter,
optimization_settings['losses']
)
evaluate_state(experiment_settings.get('data_settings').get('output_path_suffix', 'proposed'),
experiment_settings.get('data_settings')['object_name'],
experiment_settings.get('local_data_settings')['gt_scan_folder'],
experiment_state)
if __name__ == "__main__":
for object_name, center_view in [
# ("peter", 1171),
# ("peter", 566),
# ("teapot", 451),
# ("teapot", 999),
# ("gnome", 308),
# ("gnome", 488),
# ("girl", 882),
# ("girl", 1059),
("duck", 826),
# ("duck", 49),
# ("fire_hydrant", 582),
# ("fire_hydrant", 704),
# ("pineapple", 401),
# ("pineapple", 536),
# ("bunny", 670),
# ("bunny", 180),
]:
for settings_dict in [settings_dict_proposed, settings_dict_higo, settings_dict_disjoint]:
settings_dict['data_settings']['object_name'] = object_name
settings_dict['data_settings']['center_view'] = center_view
run_experiment(settings_dict)
|
""" Definition for RHEAS Datasets decorators.
.. module:: datasets.decorators
:synopsis: Definition of the Datasets decorators
.. moduleauthor:: Kostas Andreadis <kandread@jpl.nasa.gov>
"""
from functools import wraps
import tempfile
import shutil
import urllib
from datetime import datetime
from ftplib import FTP
import re
from pydap.client import open_url
import netCDF4 as netcdf4
import numpy as np
from osgeo import gdal
import datasets
def resetDatetime(dt):
"""Set time to 00:00 to align with daily data."""
return datetime(dt.year, dt.month, dt.day, 0, 0)
def path(fetch):
"""Decorator for getting files from local path."""
@wraps(fetch)
def wrapper(*args, **kwargs):
url, bbox, dt = fetch(*args, **kwargs)
outpath = tempfile.mkdtemp()
filename = url.format(dt.year, dt.month, dt.day)
try:
shutil.copy(filename, outpath)
lfilename = filename.split("/")[-1]
except:
lfilename = None
return outpath, lfilename, bbox, dt
return wrapper
def http(fetch):
"""Decorator for downloading files from HTTP sites."""
@wraps(fetch)
def wrapper(*args, **kwargs):
url, bbox, dt = fetch(*args, **kwargs)
outpath = tempfile.mkdtemp()
filename = url.format(dt.year, dt.month, dt.day)
try:
lfilename = filename.split("/")[-1]
urllib.urlcleanup()
urllib.urlretrieve(filename, "{0}/{1}".format(outpath, lfilename))
except:
lfilename = None
return outpath, lfilename, bbox, dt
return wrapper
def ftp(fetch):
"""Decorator for downloading files from FTP sites."""
@wraps(fetch)
def wrapper(*args, **kwargs):
url, bbox, dt = fetch(*args, **kwargs)
ftpurl = url.split("/")[2]
outpath = tempfile.mkdtemp()
try:
conn = FTP(ftpurl)
conn.login()
conn.cwd("/".join(url.split("/")[3:-1]).format(dt.year, dt.month, dt.day))
name = url.split("/")[-1].format(dt.year, dt.month, dt.day)
filenames = [f for f in conn.nlst() if re.match(r".*{0}.*".format(name), f) is not None]
if len(filenames) > 0:
filename = filenames[0]
with open("{0}/{1}".format(outpath, filename), 'wb') as f:
conn.retrbinary("RETR {0}".format(filename), f.write)
filenames.append("{0}/{1}".format(outpath, filename))
else:
filename = None
except:
filename = None
return outpath, filename, bbox, dt
return wrapper
def opendap(fetch):
"""Decorator for fetching data from Opendap servers."""
@wraps(fetch)
def wrapper(*args, **kwargs):
url, varname, bbox, dt = fetch(*args, **kwargs)
ds = open_url(url)
for var in ds.keys():
if var.lower().startswith("lon") or var.lower() == "x":
lonvar = var
if var.lower().startswith("lat") or var.lower() == "y":
latvar = var
if var.lower().startswith("time") or var.lower() == "t":
timevar = var
lat = ds[latvar][:].data
lon = ds[lonvar][:].data
lon[lon > 180] -= 360
res = abs(lat[0]-lat[1]) # assume rectangular grid
i1, i2, j1, j2 = datasets.spatialSubset(np.sort(lat)[::-1], np.sort(lon), res, bbox)
t = ds[timevar]
tt = netcdf4.num2date(t[:].data, units=t.units)
ti = [tj for tj in range(len(tt)) if resetDatetime(tt[tj]) >= dt[0] and resetDatetime(tt[tj]) <= dt[1]]
if len(ti) > 0:
lati = np.argsort(lat)[::-1][i1:i2]
loni = np.argsort(lon)[j1:j2]
if len(ds[varname].data[0].shape) > 3:
data = ds[varname].data[0][ti[0]:ti[-1]+1, 0, lati[0]:lati[-1]+1, loni[0]:loni[-1]+1]
else:
data = ds[varname].data[0][ti[0]:ti[-1]+1, 0, lati[0]:lati[-1]+1, loni[0]:loni[-1]+1]
dt = tt[ti]
else:
data = None
dt = None
lat = np.sort(lat)[::-1][i1:i2]
lon = np.sort(lon)[j1:j2]
return data, lat, lon, dt
return wrapper
def netcdf(fetch):
"""Decorator for fetching NetCDF files (local or from Opendap servers)."""
@wraps(fetch)
def wrapper(*args, **kwargs):
url, varname, bbox, dt = fetch(*args, **kwargs)
ds = netcdf4.Dataset(url)
for var in ds.variables:
if var.lower().startswith("lon") or var.lower() == "x":
lonvar = var
if var.lower().startswith("lat") or var.lower() == "y":
latvar = var
if var.lower().startswith("time") or var.lower() == "t":
timevar = var
lat = ds.variables[latvar][:]
lon = ds.variables[lonvar][:]
lon[lon > 180] -= 360
res = abs(lat[0]-lat[1]) # assume rectangular grid
i1, i2, j1, j2 = datasets.spatialSubset(np.sort(lat)[::-1], np.sort(lon), res, bbox)
t = ds.variables[timevar]
tt = netcdf4.num2date(t[:], units=t.units)
ti = [tj for tj in range(len(tt)) if resetDatetime(tt[tj]) >= dt[0] and resetDatetime(tt[tj]) <= dt[1]]
if len(ti) > 0:
lati = np.argsort(lat)[::-1][i1:i2]
loni = np.argsort(lon)[j1:j2]
if len(ds.variables[varname].shape) > 3:
data = ds.variables[varname][ti, 0, lati, loni]
else:
data = ds.variables[varname][ti, lati, loni]
dt = tt[ti]
else:
data = None
dt = None
lat = np.sort(lat)[::-1][i1:i2]
lon = np.sort(lon)[j1:j2]
return data, lat, lon, dt
return wrapper
def geotiff(fetch):
"""Decorator for reading data from raster files."""
@wraps(fetch)
def wrapper(*args, **kwargs):
outpath, filename, bbox, dt = fetch(*args, **kwargs)
if filename is not None:
lfilename = datasets.uncompress(filename, outpath)
f = gdal.Open("{0}/{1}".format(outpath, lfilename))
xul, xres, _, yul, _, yres = f.GetGeoTransform()
data = f.ReadAsArray()
nr, nc = data.shape
lat = np.arange(yul + yres/2.0, yul + yres * nr, yres)
lon = np.arange(xul + xres/2.0, xul + xres * nc, xres)
i1, i2, j1, j2 = datasets.spatialSubset(lat, lon, xres, bbox)
data = data[i1:i2, j1:j2]
lat = lat[i1:i2]
lon = lon[j1:j2]
shutil.rmtree(outpath)
else:
data = lat = lon = None
return data, lat, lon, dt
return wrapper
|
"""Tests the `debug_print` option for Nonlinear solvers."""
import os
import re
import sys
import shutil
import tempfile
import unittest
from distutils.version import LooseVersion
from io import StringIO
import numpy as np
import openmdao.api as om
from openmdao.test_suite.scripts.circuit_analysis import Circuit
from openmdao.utils.general_utils import run_model
from openmdao.utils.general_utils import printoptions
try:
from parameterized import parameterized
except ImportError:
from openmdao.utils.assert_utils import SkipParameterized as parameterized
nonlinear_solvers = [
om.NonlinearBlockGS,
om.NonlinearBlockJac,
om.NewtonSolver,
om.BroydenSolver
]
class TestNonlinearSolvers(unittest.TestCase):
def setUp(self):
import re
import os
from tempfile import mkdtemp
# perform test in temporary directory
self.startdir = os.getcwd()
self.tempdir = mkdtemp(prefix='test_solver')
os.chdir(self.tempdir)
# iteration coordinate, file name and variable data are common for all tests
coord = 'rank0:root._solve_nonlinear|0|NLRunOnce|0|circuit._solve_nonlinear|0'
self.filename = 'solver_errors.0.out'
self.expected_data = '\n'.join([
"",
"# Inputs and outputs at start of iteration '%s':" % coord,
"",
"# nonlinear inputs",
"{'circuit.D1.V_in': array([ 1.]),",
" 'circuit.D1.V_out': array([ 0.]),",
" 'circuit.R1.V_in': array([ 1.]),",
" 'circuit.R1.V_out': array([ 0.]),",
" 'circuit.R2.V_in': array([ 1.]),",
" 'circuit.R2.V_out': array([ 1.]),",
" 'circuit.n1.I_in:0': array([ 0.1]),",
" 'circuit.n1.I_out:0': array([ 1.]),",
" 'circuit.n1.I_out:1': array([ 1.]),",
" 'circuit.n2.I_in:0': array([ 1.]),",
" 'circuit.n2.I_out:0': array([ 1.])}",
"",
"# nonlinear outputs",
"{'circuit.D1.I': array([ 1.]),",
" 'circuit.R1.I': array([ 1.]),",
" 'circuit.R2.I': array([ 1.]),",
" 'circuit.n1.V': array([ 10.]),",
" 'circuit.n2.V': array([ 0.001])}",
""
])
def tearDown(self):
import os
from shutil import rmtree
# clean up the temporary directory
os.chdir(self.startdir)
try:
rmtree(self.tempdir)
except OSError:
pass
@parameterized.expand([
[solver.__name__, solver] for solver in nonlinear_solvers
])
def test_solver_debug_print(self, name, solver):
p = om.Problem()
model = p.model
model.add_subsystem('ground', om.IndepVarComp('V', 0., units='V'))
model.add_subsystem('source', om.IndepVarComp('I', 0.1, units='A'))
model.add_subsystem('circuit', Circuit())
model.connect('source.I', 'circuit.I_in')
model.connect('ground.V', 'circuit.Vg')
p.setup()
nl = model.circuit.nonlinear_solver = solver()
nl.options['debug_print'] = True
nl.options['err_on_non_converge'] = True
if name == 'NonlinearBlockGS':
nl.options['use_apply_nonlinear'] = True
if name == 'NewtonSolver':
nl.options['solve_subsystems'] = True
# suppress solver output for test
nl.options['iprint'] = model.circuit.linear_solver.options['iprint'] = -1
# For Broydensolver, don't calc Jacobian
try:
nl.options['compute_jacobian'] = False
except KeyError:
pass
# set some poor initial guesses so that we don't converge
p['circuit.n1.V'] = 10.
p['circuit.n2.V'] = 1e-3
opts = {}
# formatting has changed in numpy 1.14 and beyond.
if LooseVersion(np.__version__) >= LooseVersion("1.14"):
opts["legacy"] = '1.13'
with printoptions(**opts):
# run the model and check for expected output file
output = run_model(p, ignore_exception=True)
expected_output = '\n'.join([
self.expected_data,
"Inputs and outputs at start of iteration "
"have been saved to '%s'.\n" % self.filename
])
self.assertEqual(output, expected_output)
with open(self.filename, 'r') as f:
self.assertEqual(f.read(), self.expected_data)
# setup & run again to make sure there is no error due to existing file
p.setup()
with printoptions(**opts):
run_model(p, ignore_exception=False)
def test_solver_debug_print_feature(self):
from distutils.version import LooseVersion
import numpy as np
import openmdao.api as om
from openmdao.test_suite.scripts.circuit_analysis import Circuit
from openmdao.utils.general_utils import printoptions
p = om.Problem()
model = p.model
model.add_subsystem('circuit', Circuit())
p.setup()
nl = model.circuit.nonlinear_solver = om.NewtonSolver(solve_subsystems=False)
nl.options['iprint'] = 2
nl.options['debug_print'] = True
nl.options['err_on_non_converge'] = True
# set some poor initial guesses so that we don't converge
p.set_val('circuit.I_in', 0.1, units='A')
p.set_val('circuit.Vg', 0.0, units='V')
p.set_val('circuit.n1.V', 10.)
p.set_val('circuit.n2.V', 1e-3)
opts = {}
# formatting has changed in numpy 1.14 and beyond.
if LooseVersion(np.__version__) >= LooseVersion("1.14"):
opts["legacy"] = '1.13'
with printoptions(**opts):
# run the model
try:
p.run_model()
except om.AnalysisError:
pass
with open(self.filename, 'r') as f:
self.assertEqual(f.read(), self.expected_data)
class TestNonlinearSolversIsolated(unittest.TestCase):
"""
This test needs to run isolated to preclude interactions in the underlying
`warnings` module that is used to raise the singular entry error.
"""
ISOLATED = True
def setUp(self):
# perform test in temporary directory
self.startdir = os.getcwd()
self.tempdir = tempfile.mkdtemp(prefix='test_solver')
os.chdir(self.tempdir)
def tearDown(self):
# clean up the temporary directory
os.chdir(self.startdir)
try:
shutil.rmtree(self.tempdir)
except OSError:
pass
def test_debug_after_raised_error(self):
prob = om.Problem()
model = prob.model
comp = om.IndepVarComp()
comp.add_output('dXdt:TAS', val=1.0)
comp.add_output('accel_target', val=2.0)
model.add_subsystem('des_vars', comp, promotes=['*'])
teg = model.add_subsystem('thrust_equilibrium_group', subsys=om.Group())
teg.add_subsystem('dynamics', om.ExecComp('z = 2.0*thrust'), promotes=['*'])
thrust_bal = om.BalanceComp()
thrust_bal.add_balance(name='thrust', val=1207.1, lhs_name='dXdt:TAS',
rhs_name='accel_target', eq_units='m/s**2', lower=-10.0, upper=10000.0)
teg.add_subsystem(name='thrust_bal', subsys=thrust_bal,
promotes_inputs=['dXdt:TAS', 'accel_target'],
promotes_outputs=['thrust'])
teg.linear_solver = om.DirectSolver()
teg.nonlinear_solver = om.NewtonSolver()
teg.nonlinear_solver.options['solve_subsystems'] = True
teg.nonlinear_solver.options['max_sub_solves'] = 1
teg.nonlinear_solver.options['atol'] = 1e-4
teg.nonlinear_solver.options['debug_print'] = True
prob.setup()
prob.set_solver_print(level=0)
stdout = sys.stdout
strout = StringIO()
sys.stdout = strout
with self.assertRaises(RuntimeError) as cm:
prob.run_model()
sys.stdout = stdout
output = strout.getvalue()
target = "'thrust_equilibrium_group.thrust_bal.thrust'"
self.assertTrue(target in output, msg=target + "NOT FOUND IN" + output)
# Make sure exception is unchanged.
expected_msg = "Singular entry found in Group (thrust_equilibrium_group) for row associated with state/residual 'thrust' ('thrust_equilibrium_group.thrust_bal.thrust') index 0."
self.assertEqual(expected_msg, str(cm.exception))
if __name__ == "__main__":
unittest.main()
|
import logging
import os
import shutil
import sys
from string import Template
from galaxy import eggs
eggs.require( 'MarkupSafe' )
import markupsafe
from galaxy.util import nice_size, unicodify
log = logging.getLogger( __name__ )
CHUNK_SIZE = 2 ** 20 # 1Mb
INSTALLATION_LOG = 'INSTALLATION.log'
# Set no activity timeout to 20 minutes.
NO_OUTPUT_TIMEOUT = 3600.0
MAXDIFFSIZE = 8000
MAX_DISPLAY_SIZE = 32768
DOCKER_IMAGE_TEMPLATE = '''
# Galaxy Docker image
FROM bgruening/galaxy-stable
MAINTAINER Bjoern A. Gruning, bjoern.gruening@gmail.com
WORKDIR /galaxy-central
${selected_repositories}
# Mark folders as imported from the host.
VOLUME ["/export/", "/data/", "/var/lib/docker"]
# Expose port 80 (webserver), 21 (FTP server), 8800 (Proxy), 9001 (Galaxy report app)
EXPOSE :80
EXPOSE :21
EXPOSE :8800
EXPOSE :9001
# Autostart script that is invoked during container start
CMD ["/usr/bin/startup"]
'''
SELECTED_REPOSITORIES_TEMPLATE = '''
RUN install-repository "--url ${tool_shed_url} -o ${repository_owner} --name ${repository_name}"
'''
def evaluate_template( text, install_environment ):
"""
Substitute variables defined in XML blocks from dependencies file. The value of the received
repository_install_dir is the root installation directory of the repository that contains the
tool dependency. The value of the received install_dir is the root installation directory of
the tool_dependency.
"""
return Template( text ).safe_substitute( get_env_var_values( install_environment ) )
def get_env_var_values( install_environment ):
"""
Return a dictionary of values, some of which enable substitution of reserved words for the values.
The received install_enviroment object has 3 important attributes for reserved word substitution:
install_environment.tool_shed_repository_install_dir is the root installation directory of the repository
that contains the tool dependency being installed, install_environment.install_dir is the root
installation directory of the tool dependency, and install_environment.tmp_work_dir is the
temporary directory where the tool dependency compilation/installation is being processed.
"""
env_var_dict = {}
env_var_dict[ 'REPOSITORY_INSTALL_DIR' ] = install_environment.tool_shed_repository_install_dir
env_var_dict[ 'INSTALL_DIR' ] = install_environment.install_dir
env_var_dict[ 'TMP_WORK_DIR' ] = install_environment.tmp_work_dir
env_var_dict[ 'system_install' ] = install_environment.install_dir
# If the Python interpreter is 64bit then we can safely assume that the underlying system is also 64bit.
env_var_dict[ '__is64bit__' ] = sys.maxsize > 2 ** 32
return env_var_dict
def get_file_type_str( changeset_revision, file_type ):
if file_type == 'zip':
file_type_str = '%s.zip' % changeset_revision
elif file_type == 'bz2':
file_type_str = '%s.tar.bz2' % changeset_revision
elif file_type == 'gz':
file_type_str = '%s.tar.gz' % changeset_revision
else:
file_type_str = ''
return file_type_str
def move_file( current_dir, source, destination, rename_to=None ):
source_path = os.path.abspath( os.path.join( current_dir, source ) )
source_file = os.path.basename( source_path )
if rename_to is not None:
destination_file = rename_to
destination_directory = os.path.join( destination )
destination_path = os.path.join( destination_directory, destination_file )
else:
destination_directory = os.path.join( destination )
destination_path = os.path.join( destination_directory, source_file )
if not os.path.exists( destination_directory ):
os.makedirs( destination_directory )
shutil.move( source_path, destination_path )
def remove_dir( dir ):
"""Attempt to remove a directory from disk."""
if dir:
if os.path.exists( dir ):
try:
shutil.rmtree( dir )
except:
pass
def size_string( raw_text, size=MAX_DISPLAY_SIZE ):
"""Return a subset of a string (up to MAX_DISPLAY_SIZE) translated to a safe string for display in a browser."""
if raw_text and len( raw_text ) >= size:
large_str = '\nFile contents truncated because file size is larger than maximum viewing size of %s\n' % nice_size( size )
raw_text = '%s%s' % ( raw_text[ 0:size ], large_str )
return raw_text or ''
def stringify( list ):
if list:
return ','.join( list )
return ''
def strip_path( fpath ):
"""Attempt to strip the path from a file name."""
if not fpath:
return fpath
try:
file_path, file_name = os.path.split( fpath )
except:
file_name = fpath
return file_name
def to_html_string( text ):
"""Translates the characters in text to an html string"""
if text:
try:
text = unicodify( text )
except UnicodeDecodeError, e:
return "Error decoding string: %s" % str( e )
text = unicode( markupsafe.escape( text ) )
text = text.replace( '\n', '<br/>' )
text = text.replace( ' ', ' ' )
text = text.replace( ' ', ' ' )
return text
|
# pylint: disable=invalid-name
import glob
import os
import re
import time
import torch
import pytest
from allennlp.common.testing import AllenNlpTestCase
from allennlp.training.trainer import Trainer, sparse_clip_norm, is_sparse
from allennlp.data import Vocabulary
from allennlp.common.params import Params
from allennlp.common.checks import ConfigurationError
from allennlp.models.simple_tagger import SimpleTagger
from allennlp.data.iterators import BasicIterator
from allennlp.data.dataset_readers import SequenceTaggingDatasetReader
class TestTrainer(AllenNlpTestCase):
def setUp(self):
super(TestTrainer, self).setUp()
self.instances = SequenceTaggingDatasetReader().read('tests/fixtures/data/sequence_tagging.tsv')
vocab = Vocabulary.from_instances(self.instances)
self.vocab = vocab
self.model_params = Params({
"text_field_embedder": {
"tokens": {
"type": "embedding",
"embedding_dim": 5
}
},
"encoder": {
"type": "lstm",
"input_size": 5,
"hidden_size": 7,
"num_layers": 2
}
})
self.model = SimpleTagger.from_params(self.vocab, self.model_params)
self.optimizer = torch.optim.SGD(self.model.parameters(), 0.01)
self.iterator = BasicIterator(batch_size=2)
self.iterator.index_with(vocab)
def test_trainer_can_run(self):
trainer = Trainer(model=self.model,
optimizer=self.optimizer,
iterator=self.iterator,
train_dataset=self.instances,
validation_dataset=self.instances,
num_epochs=2)
metrics = trainer.train()
assert 'best_validation_loss' in metrics
assert isinstance(metrics['best_validation_loss'], float)
assert 'best_epoch' in metrics
assert isinstance(metrics['best_epoch'], int)
# Making sure that both increasing and decreasing validation metrics work.
trainer = Trainer(model=self.model,
optimizer=self.optimizer,
iterator=self.iterator,
train_dataset=self.instances,
validation_dataset=self.instances,
validation_metric='+loss',
num_epochs=2)
metrics = trainer.train()
assert 'best_validation_loss' in metrics
assert isinstance(metrics['best_validation_loss'], float)
assert 'best_epoch' in metrics
assert isinstance(metrics['best_epoch'], int)
@pytest.mark.skipif(not torch.cuda.is_available(), reason="No CUDA device registered.")
def test_trainer_can_run_cuda(self):
trainer = Trainer(self.model, self.optimizer,
self.iterator, self.instances, num_epochs=2,
cuda_device=0)
trainer.train()
@pytest.mark.skipif(torch.cuda.device_count() < 2,
reason="Need multiple GPUs.")
def test_trainer_can_run_multiple_gpu(self):
multigpu_iterator = BasicIterator(batch_size=4)
multigpu_iterator.index_with(self.vocab)
trainer = Trainer(self.model, self.optimizer,
multigpu_iterator, self.instances, num_epochs=2,
cuda_device=[0, 1])
trainer.train()
def test_trainer_can_resume_training(self):
trainer = Trainer(self.model, self.optimizer,
self.iterator, self.instances,
validation_dataset=self.instances,
num_epochs=1, serialization_dir=self.TEST_DIR)
trainer.train()
new_trainer = Trainer(self.model, self.optimizer,
self.iterator, self.instances,
validation_dataset=self.instances,
num_epochs=3, serialization_dir=self.TEST_DIR)
epoch, val_metrics_per_epoch = new_trainer._restore_checkpoint() # pylint: disable=protected-access
assert epoch == 1
assert len(val_metrics_per_epoch) == 1
assert isinstance(val_metrics_per_epoch[0], float)
assert val_metrics_per_epoch[0] != 0.
new_trainer.train()
def test_should_stop_early_with_increasing_metric(self):
new_trainer = Trainer(self.model, self.optimizer,
self.iterator, self.instances,
validation_dataset=self.instances,
num_epochs=3, serialization_dir=self.TEST_DIR,
patience=5, validation_metric="+test")
assert new_trainer._should_stop_early([.5, .3, .2, .1, .4, .4]) # pylint: disable=protected-access
assert not new_trainer._should_stop_early([.3, .3, .3, .2, .5, .1]) # pylint: disable=protected-access
def test_should_stop_early_with_decreasing_metric(self):
new_trainer = Trainer(self.model, self.optimizer,
self.iterator, self.instances,
validation_dataset=self.instances,
num_epochs=3, serialization_dir=self.TEST_DIR,
patience=5, validation_metric="-test")
assert new_trainer._should_stop_early([.02, .3, .2, .1, .4, .4]) # pylint: disable=protected-access
assert not new_trainer._should_stop_early([.3, .3, .2, .1, .4, .5]) # pylint: disable=protected-access
def test_train_driver_raises_on_model_with_no_loss_key(self):
class FakeModel(torch.nn.Module):
def forward(self, **kwargs): # pylint: disable=arguments-differ,unused-argument
return {}
with pytest.raises(ConfigurationError):
trainer = Trainer(FakeModel(), self.optimizer,
self.iterator, self.instances,
num_epochs=2, serialization_dir=self.TEST_DIR)
trainer.train()
def test_trainer_can_log_histograms(self):
# enable activation logging
for module in self.model.modules():
module.should_log_activations = True
trainer = Trainer(self.model, self.optimizer,
self.iterator, self.instances, num_epochs=3,
serialization_dir=self.TEST_DIR,
histogram_interval=2)
trainer.train()
def test_trainer_respects_num_serialized_models_to_keep(self):
trainer = Trainer(self.model, self.optimizer,
self.iterator, self.instances, num_epochs=5,
serialization_dir=self.TEST_DIR,
num_serialized_models_to_keep=3)
trainer.train()
# Now check the serialized files
for prefix in ['model_state_epoch_*', 'training_state_epoch_*']:
file_names = glob.glob(os.path.join(self.TEST_DIR, prefix))
epochs = [int(re.search(r"_([0-9])\.th", fname).group(1))
for fname in file_names]
assert sorted(epochs) == [2, 3, 4]
def test_trainer_respects_keep_serialized_model_every_num_seconds(self):
# To test:
# Create an iterator that sleeps for 0.5 second per epoch, so the total training
# time for one epoch is slightly greater then 0.5 seconds.
# Run for 6 epochs, keeping the last 2 models, models also kept every 1 second.
# Check the resulting checkpoints. Should then have models at epochs
# 2, 4, plus the last two at 5 and 6.
class WaitingIterator(BasicIterator):
# pylint: disable=arguments-differ
def _create_batches(self, *args, **kwargs):
time.sleep(0.5)
return super(WaitingIterator, self)._create_batches(*args, **kwargs)
iterator = WaitingIterator(batch_size=2)
iterator.index_with(self.vocab)
trainer = Trainer(self.model, self.optimizer,
iterator, self.instances, num_epochs=6,
serialization_dir=self.TEST_DIR,
num_serialized_models_to_keep=2,
keep_serialized_model_every_num_seconds=1)
trainer.train()
# Now check the serialized files
for prefix in ['model_state_epoch_*', 'training_state_epoch_*']:
file_names = glob.glob(os.path.join(self.TEST_DIR, prefix))
epochs = [int(re.search(r"_([0-9])\.th", fname).group(1))
for fname in file_names]
# epoch N has N-1 in file name
assert sorted(epochs) == [1, 3, 4, 5]
def test_trainer_saves_models_at_specified_interval(self):
iterator = BasicIterator(batch_size=4)
iterator.index_with(self.vocab)
trainer = Trainer(self.model, self.optimizer,
iterator, self.instances, num_epochs=2,
serialization_dir=self.TEST_DIR,
model_save_interval=0.0001)
trainer.train()
# Now check the serialized files for models saved during the epoch.
prefix = 'model_state_epoch_*'
file_names = sorted(glob.glob(os.path.join(self.TEST_DIR, prefix)))
epochs = [re.search(r"_([0-9\.\-]+)\.th", fname).group(1)
for fname in file_names]
# We should have checkpoints at the end of each epoch and during each, e.g.
# [0.timestamp, 0, 1.timestamp, 1]
assert len(epochs) == 4
assert epochs[3] == '1'
assert '.' in epochs[0]
# Now make certain we can restore from timestamped checkpoint.
# To do so, remove the checkpoint from the end of epoch 1&2, so
# that we are forced to restore from the timestamped checkpoints.
for k in range(2):
os.remove(os.path.join(self.TEST_DIR, 'model_state_epoch_{}.th'.format(k)))
os.remove(os.path.join(self.TEST_DIR, 'training_state_epoch_{}.th'.format(k)))
os.remove(os.path.join(self.TEST_DIR, 'best.th'))
restore_trainer = Trainer(self.model, self.optimizer,
self.iterator, self.instances, num_epochs=2,
serialization_dir=self.TEST_DIR,
model_save_interval=0.0001)
epoch, _ = restore_trainer._restore_checkpoint() # pylint: disable=protected-access
assert epoch == 2
# One batch per epoch.
assert restore_trainer._batch_num_total == 2 # pylint: disable=protected-access
class TestSparseClipGrad(AllenNlpTestCase):
def test_sparse_clip_grad(self):
# create a sparse embedding layer, then take gradient
embedding = torch.nn.Embedding(100, 16, sparse=True)
embedding.zero_grad()
ids = torch.autograd.Variable((torch.rand(17) * 100).long())
# Set some of the ids to the same value so that the sparse gradient
# has repeated indices. This tests some additional logic.
ids[:5] = 5
loss = embedding(ids).sum()
loss.backward()
assert is_sparse(embedding.weight.grad)
# Now try to clip the gradients.
_ = sparse_clip_norm([embedding.weight], 1.5)
# Final norm should be 1.5
grad = embedding.weight.grad.data.coalesce()
self.assertAlmostEqual(grad._values().norm(2.0), 1.5, places=5) # pylint: disable=protected-access
|
# pylint: disable=redefined-outer-name
from mock import MagicMock
import pytest
import posixpath
import ftplib
from ftplib import FTP
from kiwi.store.artifact.artifact_repository_registry import get_artifact_repository
from kiwi.store.artifact.ftp_artifact_repo import FTPArtifactRepository
@pytest.fixture
def ftp_mock():
return MagicMock(autospec=FTP)
def test_artifact_uri_factory():
repo = get_artifact_repository("ftp://user:pass@test_ftp:123/some/path")
assert isinstance(repo, FTPArtifactRepository)
def test_list_artifacts_empty(ftp_mock):
repo = FTPArtifactRepository("ftp://test_ftp/some/path")
repo.get_ftp_client = MagicMock()
call_mock = MagicMock(return_value=ftp_mock)
repo.get_ftp_client.return_value = MagicMock(__enter__=call_mock)
ftp_mock.nlst = MagicMock(return_value=[])
assert repo.list_artifacts() == []
ftp_mock.nlst.assert_called_once_with("/some/path")
def test_list_artifacts(ftp_mock):
artifact_root_path = "/experiment_id/run_id/"
repo = FTPArtifactRepository("ftp://test_ftp"+artifact_root_path)
repo.get_ftp_client = MagicMock()
call_mock = MagicMock(return_value=ftp_mock)
repo.get_ftp_client.return_value = MagicMock(__enter__=call_mock)
# mocked file structure
# |- file
# |- model
# |- model.pb
file_path = "file"
file_size = 678
dir_path = "model"
ftp_mock.cwd = MagicMock(side_effect=[None, ftplib.error_perm, None])
ftp_mock.nlst = MagicMock(return_value=[file_path, dir_path])
ftp_mock.size = MagicMock(return_value=file_size)
artifacts = repo.list_artifacts(path=None)
ftp_mock.nlst.assert_called_once_with(artifact_root_path)
ftp_mock.size.assert_called_once_with(artifact_root_path + file_path)
assert len(artifacts) == 2
assert artifacts[0].path == file_path
assert artifacts[0].is_dir is False
assert artifacts[0].file_size == file_size
assert artifacts[1].path == dir_path
assert artifacts[1].is_dir is True
assert artifacts[1].file_size is None
def test_list_artifacts_with_subdir(ftp_mock):
artifact_root_path = "/experiment_id/run_id/"
repo = FTPArtifactRepository("sftp://test_sftp"+artifact_root_path)
repo.get_ftp_client = MagicMock()
call_mock = MagicMock(return_value=ftp_mock)
repo.get_ftp_client.return_value = MagicMock(__enter__=call_mock)
# mocked file structure
# |- model
# |- model.pb
# |- variables
dir_name = 'model'
# list artifacts at sub directory level
file_path = 'model.pb'
file_size = 345
subdir_name = 'variables'
ftp_mock.nlst = MagicMock(return_value=[file_path, subdir_name])
ftp_mock.cwd = MagicMock(side_effect=[None, ftplib.error_perm, None])
ftp_mock.size = MagicMock(return_value=file_size)
artifacts = repo.list_artifacts(path=dir_name)
ftp_mock.nlst.assert_called_once_with(artifact_root_path + dir_name)
ftp_mock.size.assert_called_once_with(artifact_root_path + dir_name + '/' + file_path)
assert len(artifacts) == 2
assert artifacts[0].path == dir_name + '/' + file_path
assert artifacts[0].is_dir is False
assert artifacts[0].file_size == file_size
assert artifacts[1].path == dir_name + '/' + subdir_name
assert artifacts[1].is_dir is True
assert artifacts[1].file_size is None
def test_log_artifact(ftp_mock, tmpdir):
repo = FTPArtifactRepository("ftp://test_ftp/some/path")
repo.get_ftp_client = MagicMock()
call_mock = MagicMock(return_value=ftp_mock)
repo.get_ftp_client.return_value = MagicMock(__enter__=call_mock)
d = tmpdir.mkdir("data")
f = d.join("test.txt")
f.write("hello world!")
fpath = d + '/test.txt'
fpath = fpath.strpath
ftp_mock.cwd = MagicMock(side_effect=[ftplib.error_perm, None])
repo.log_artifact(fpath)
ftp_mock.mkd.assert_called_once_with('/some/path')
ftp_mock.cwd.assert_called_with('/some/path')
ftp_mock.storbinary.assert_called_once()
assert ftp_mock.storbinary.call_args_list[0][0][0] == 'STOR test.txt'
def test_log_artifact_multiple_calls(ftp_mock, tmpdir):
repo = FTPArtifactRepository("ftp://test_ftp/some/path")
repo.get_ftp_client = MagicMock()
call_mock = MagicMock(return_value=ftp_mock)
repo.get_ftp_client.return_value = MagicMock(__enter__=call_mock)
d = tmpdir.mkdir("data")
file1 = d.join("test1.txt")
file1.write("hello world!")
fpath1 = d + '/test1.txt'
fpath1 = fpath1.strpath
file2 = d.join("test2.txt")
file2.write("hello world!")
fpath2 = d + '/test2.txt'
fpath2 = fpath2.strpath
ftp_mock.cwd = MagicMock(side_effect=[
ftplib.error_perm,
None,
ftplib.error_perm,
None,
None,
None
])
repo.log_artifact(fpath1)
ftp_mock.mkd.assert_called_once_with('/some/path')
ftp_mock.cwd.assert_called_with('/some/path')
ftp_mock.storbinary.assert_called()
assert ftp_mock.storbinary.call_args_list[0][0][0] == 'STOR test1.txt'
ftp_mock.reset_mock()
repo.log_artifact(fpath1, "subdir")
ftp_mock.mkd.assert_called_once_with('/some/path/subdir')
ftp_mock.cwd.assert_called_with('/some/path/subdir')
ftp_mock.storbinary.assert_called()
assert ftp_mock.storbinary.call_args_list[0][0][0] == 'STOR test1.txt'
ftp_mock.reset_mock()
repo.log_artifact(fpath2)
ftp_mock.mkd.assert_not_called()
ftp_mock.cwd.assert_called_with('/some/path')
ftp_mock.storbinary.assert_called()
assert ftp_mock.storbinary.call_args_list[0][0][0] == 'STOR test2.txt'
def test_log_artifacts(ftp_mock, tmpdir):
repo = FTPArtifactRepository("ftp://test_ftp/some/path")
repo.get_ftp_client = MagicMock()
call_mock = MagicMock(return_value=ftp_mock)
repo.get_ftp_client.return_value = MagicMock(__enter__=call_mock)
subd = tmpdir.mkdir("data").mkdir("subdir")
subd.join("a.txt").write("A")
subd.join("b.txt").write("B")
subd.join("c.txt").write("C")
ftp_mock.cwd = MagicMock(side_effect=[ftplib.error_perm, None, None, None, None, None])
repo.log_artifacts(subd.strpath)
ftp_mock.mkd.assert_any_call('/some/path/subdir')
ftp_mock.cwd.assert_any_call('/some/path/subdir')
assert ftp_mock.storbinary.call_count == 3
storbinary_call_args = sorted([ftp_mock.storbinary.call_args_list[i][0][0] for i in range(3)])
assert storbinary_call_args == ['STOR a.txt', 'STOR b.txt', 'STOR c.txt']
def test_download_artifacts_single(ftp_mock):
repo = FTPArtifactRepository("ftp://test_ftp/some/path")
repo.get_ftp_client = MagicMock()
call_mock = MagicMock(return_value=ftp_mock)
repo.get_ftp_client.return_value = MagicMock(__enter__=call_mock)
ftp_mock.cwd = MagicMock(side_effect=ftplib.error_perm)
repo.download_artifacts("test.txt")
ftp_mock.retrbinary.assert_called_once()
assert ftp_mock.retrbinary.call_args_list[0][0][0] == 'RETR /some/path/test.txt'
def test_download_artifacts(ftp_mock):
artifact_root_path = "/some/path"
repo = FTPArtifactRepository("ftp://test_ftp" + artifact_root_path)
repo.get_ftp_client = MagicMock()
call_mock = MagicMock(return_value=ftp_mock)
repo.get_ftp_client.return_value = MagicMock(__enter__=call_mock)
# mocked file structure
# |- model
# |- model.pb
# |- empty_dir
# |- variables
# |- test.txt
dir_path = posixpath.join(artifact_root_path, 'model')
# list artifacts at sub directory level
model_file_path_sub = 'model.pb'
model_file_path_full = posixpath.join(dir_path, model_file_path_sub)
empty_dir_name = "empty_dir"
empty_dir_path = posixpath.join(dir_path, empty_dir_name)
subdir_name = 'variables'
subdir_path_full = posixpath.join(dir_path, subdir_name)
subfile_name = 'test.txt'
subfile_path_full = posixpath.join(artifact_root_path, subdir_path_full, subfile_name)
is_dir_mapping = {
dir_path: True,
empty_dir_path: True,
model_file_path_full: False,
subdir_path_full: True,
subfile_path_full: False,
}
is_dir_call_args = [
dir_path, model_file_path_full, empty_dir_path, subdir_path_full,
model_file_path_full,
subdir_path_full, subfile_path_full,
subfile_path_full
]
def cwd_side_effect(call_arg):
if not is_dir_mapping[call_arg]:
raise ftplib.error_perm
ftp_mock.cwd = MagicMock(side_effect=cwd_side_effect)
def nlst_side_effect(call_arg):
if call_arg == dir_path:
return [model_file_path_sub, subdir_name, empty_dir_name]
elif call_arg == subdir_path_full:
return [subfile_name]
elif call_arg == empty_dir_path:
return []
else:
raise Exception("should never call nlst for non-directories {}".format(call_arg))
ftp_mock.nlst = MagicMock(side_effect=nlst_side_effect)
repo.download_artifacts("model")
cwd_call_args = [arg_entry[0][0] for arg_entry in ftp_mock.cwd.call_args_list]
assert set(cwd_call_args) == set(is_dir_call_args)
assert ftp_mock.nlst.call_count == 3
assert ftp_mock.retrbinary.call_args_list[0][0][0] == 'RETR ' + model_file_path_full
assert ftp_mock.retrbinary.call_args_list[1][0][0] == 'RETR ' + subfile_path_full
def test_log_artifact_reuse_ftp_client(ftp_mock, tmpdir):
repo = FTPArtifactRepository("ftp://test_ftp/some/path")
repo.get_ftp_client = MagicMock()
call_mock = MagicMock(return_value=ftp_mock)
repo.get_ftp_client.return_value = MagicMock(__enter__=call_mock)
d = tmpdir.mkdir("data")
file = d.join("test.txt")
file.write("hello world!")
fpath = file.strpath
repo.log_artifact(fpath)
repo.log_artifact(fpath, "subdir1/subdir2")
repo.log_artifact(fpath, "subdir3")
assert repo.get_ftp_client.call_count == 3
|
import os
import librosa
import numpy as np
import soundfile as sf
import torch
from tqdm import tqdm
from utils import data, spectrogram, spectrogram_clean
from models.hifi_gan import Generator
from models.wavenet import WaveNet
from utils.hparams import hparams as hp
def inference(audio_clip):
original_file = audio_clip
save_dir = './uploads'
checkpoint_path = './saved_model/latest_checkpoint.pt'
#default_inf_device = 'cpu',
# Load checkpoint
checkpoint = torch.load(checkpoint_path, map_location=torch.device('cpu'))
# Initializing model, optimizer, criterion and scaler
model = Generator(wavenet=WaveNet())
model.to('cpu')
model.load_state_dict(checkpoint['generator_state_dict'])
model.eval()
inference_files = [original_file]
with torch.no_grad():
for file in inference_files:
filename = os.path.splitext(os.path.split(file)[1])[0]
x, _ = librosa.load(file, sr=16000, mono=True)
target_length = len(x)
x = torch.tensor(x).to('cpu')
x = data.preprocess_inference_data(x,
hp.inference.batched,
hp.inference.batch_size,
hp.inference.sequence_length,
16000)
# run inference and create spectrograms
y = []
for x_batch in tqdm(x):
spect_orig = np.array(x_batch[0])
spectrogram.create_spectrogram(spect_orig)
clean_temp = model.inference(x_batch)
y.append(clean_temp)
spectrogram_clean.create_spectrogram(np.array(clean_temp))
y = data.postprocess_inference_data(y, hp.inference.batched,
16000)
y = y[:target_length].detach().cpu().numpy()
sf.write(os.path.join(save_dir, f'{filename}_denoised.wav'),
y.astype(np.float32), samplerate=hp.dsp.sample_rate)
|
from AbstractRequest import AbstractRequest
class SearchRequest(AbstractRequest):
name = ""
sequence = ""
sequence_option = ""
sequence_length = ""
n_terminus_id = ""
c_terminus_id = ""
target_group_id = ""
target_object_id = ""
synthesis_type = ""
kingdom_id = ""
bond_id = ""
unusual_amino_acid_id = ""
author_id = ""
journal_id = ""
article_year = ""
article_title = ""
complexity = "monomer"
target_species_id = ""
non_standart_experimental_conditions = "false"
hemolytic_and_cytotoxic_activities = "false"
def query_type(self):
return "search"
def get_parameters(self):
dict = {}
dict["complexity"] = self.complexity
dict["name"] = self.name
dict["sequence"] = self.sequence
dict["sequence_option"] = self.sequence_option
dict["sequence_length"] = self.sequence_length
dict["n_terminus_id"] = self.n_terminus_id
dict["c_terminus_id"] = self.c_terminus_id
dict["target_group_id"] = self.target_group_id
dict["target_object_id"] = self.target_object_id
dict["synthesis_type"] = self.synthesis_type
dict["kingdom_id"] = self.kingdom_id
dict["bond_id"] = self.bond_id
dict["unusual_amino_acid_id"] = self.unusual_amino_acid_id
dict["author_id"] = self.author_id
dict["journal_id"] = self.journal_id
dict["article_year"] = self.article_year
dict["article_title"] = self.article_title
dict["target_species_id"] = self.target_species_id
dict["non_standart_experimental_conditions"] = self.non_standart_experimental_conditions
dict["hemolytic_and_cytotoxic_activities"] = self.hemolytic_and_cytotoxic_activities
return dict
|
# TRAINS - Example of Plotly integration and reporting
#
from trains import Task
import plotly.express as px
task = Task.init('examples', 'plotly reporting')
print('reporting plotly figures')
# Iris dataset
df = px.data.iris()
# create complex plotly figure
fig = px.scatter(df, x="sepal_width", y="sepal_length", color="species", marginal_y="rug", marginal_x="histogram")
# report the plotly figure
task.get_logger().report_plotly(title="iris", series="sepal", iteration=0, figure=fig)
print('done')
|
###############################################################################
#
# Tests for XlsxWriter.
#
# Copyright (c), 2013-2021, John McNamara, jmcnamara@cpan.org
#
from ..excel_comparison_test import ExcelComparisonTest
import codecs
from ...workbook import Workbook
class TestCompareXLSXFiles(ExcelComparisonTest):
"""
Test file created by XlsxWriter against a file created by Excel.
"""
def setUp(self):
self.set_filename('optimize10.xlsx')
self.set_text_file('unicode_polish_utf8.txt')
def test_create_file(self):
"""Test example file converting Unicode text."""
# Open the input file with the correct encoding.
textfile = codecs.open(self.txt_filename, 'r', 'utf-8')
# Create an new Excel file and convert the text data.
workbook = Workbook(self.got_filename, {'constant_memory': True, 'in_memory': False})
worksheet = workbook.add_worksheet()
# Widen the first column to make the text clearer.
worksheet.set_column('A:A', 50)
# Start from the first cell.
row = 0
col = 0
# Read the text file and write it to the worksheet.
for line in textfile:
# Ignore the comments in the sample file.
if line.startswith('#'):
continue
# Write any other lines to the worksheet.
worksheet.write(row, col, line.rstrip("\n"))
row += 1
workbook.close()
textfile.close()
self.assertExcelEqual()
|
# Copyright (c) 2017 Michel Betancourt
#
# This software is released under the MIT License.
# https://opensource.org/licenses/MIT
"""
menu.py
Created by Michel Betancourt on 2017.
Copyright (c) 2017 MIT. All rights reserved.
"""
from math import floor, ceil
from tebless.utils.styles import red
from tebless.devs import Widget, echo
from tebless.utils.keyboard import KEY_DOWN, KEY_UP
__all__ = ['Menu']
class Menu(Widget):
"""Widget show a list of elements.
:param cordx: Position on axis X
:param cordy: Position on axis Y
:param items: Element to show
:param is_menu: Is a menu or only show items
:param limit: Max items to show
:param header: Text header of table
:param footer: Text footer of table
:param selector: A function that return text to show on select
:param width: Width of table
:param empty: Whats show if table is empty
:param key: A function return text of object in list
"""
def __init__(self, items=None, *args, **kwargs):
super().__init__(items=items,
on_key_arrow=self._on_key_arrow,
*args, **kwargs)
self._items = items or []
self._len_items = len(self._items)
self._empty = kwargs.get('empty', ['Sin elementos'])
self._is_menu = kwargs.get('is_menu', True)
self._limit = round(kwargs.get('limit', 4))
if 'width' not in kwargs:
self._width = self.term.width
self._header = kwargs.get('header', '')
self._footer = kwargs.get('footer', '')
def selector(text, **kwargs):
return red('| ') + text if self.term.length(text) > 0 else text
self._selector = kwargs.get('selector', selector)
self._key = kwargs.get('key', lambda x: x)
self._formater = kwargs.get(
'formater', lambda text, **kw: ' ' + text[:self._width])
self._page = 1
self._index = 0
self._height = 0
def _on_key_arrow(self, key):
if key.code == KEY_DOWN:
self.index = (self.index + 1) % self._len_items
elif key.code == KEY_UP:
self.index = (self.index - 1) % self._len_items
def paint(self):
self._page = ceil((self._index + 1) / self._limit)
echo(self.term.move(self.y, self.x))
header_height, footer_height = 0, 0
if self._header != '':
header_height = len(self._header.split('\n'))
if self._footer != '':
footer_height = len(self._footer.split('\n'))
items = self.items if self.items else self._empty
first = floor(self._index / self._limit) * self._limit
max_page = ceil(len(items) / self._limit)
items = items[first:self._limit + first]
vars_op = {
'page': self._page,
'last': max_page,
'count': self._len_items
}
# Print header
if self._header != '':
echo(self._header.format(**vars_op) + '\n')
self._height = header_height
# Print elements
for idx, item in enumerate(items):
array_text = self._key(item)
if isinstance(array_text, str):
array_text = [array_text]
for index, text in enumerate(array_text):
echo(self.term.move_x(self.x))
tmp = self._formater(**{
'text': text,
'index': index,
'lenght': len(array_text)
})
pos = self._index % self._limit == idx
if pos and self._is_menu and text != '':
tmp = self._selector(**{
'text': text[:self.width],
'index': pos,
'lenght': len(array_text)
})
tmp += '\n'
self._height += tmp.count('\n')
echo(tmp)
# Print footer
if self._footer != '':
echo(self.term.move_x(self.x))
echo(self._footer.format(**vars_op))
self._height += footer_height
@property
def value(self):
return self.items[self._index]
@property
def index(self):
return self._index
@index.setter
def index(self, value):
self._index = value
self.on_change()
@property
def items(self):
return list(self._items)
@items.setter
def items(self, value):
self._index = 0
self._items = list(value)
self._len_items = len(self._items)
self.on_change()
|
import os
from typing import Dict
import numpy as np
from utils.log import logger
def write_results(filename, results_dict: Dict, data_type: str):
if not filename:
return
path = os.path.dirname(filename)
if not os.path.exists(path):
os.makedirs(path)
if data_type in ('mot', 'mcmot', 'lab'):
save_format = '{frame},{id},{x1},{y1},{w},{h},1,-1,-1,-1\n'
elif data_type == 'kitti':
save_format = '{frame} {id} pedestrian -1 -1 -10 {x1} {y1} {x2} {y2} -1 -1 -1 -1000 -1000 -1000 -10 {score}\n'
else:
raise ValueError(data_type)
with open(filename, 'w') as f:
for frame_id, frame_data in results_dict.items():
if data_type == 'kitti':
frame_id -= 1
for tlwh, track_id in frame_data:
if track_id < 0:
continue
x1, y1, w, h = tlwh
x2, y2 = x1 + w, y1 + h
line = save_format.format(frame=frame_id, id=track_id, x1=x1, y1=y1, x2=x2, y2=y2, w=w, h=h, score=1.0)
f.write(line)
logger.info('Save results to {}'.format(filename))
def read_results(filename, data_type: str, is_gt=False, is_ignore=False):
if data_type in ('mot', 'lab'):
read_fun = read_mot_results
else:
raise ValueError('Unknown data type: {}'.format(data_type))
return read_fun(filename, is_gt, is_ignore)
"""
labels={'ped', ... % 1
'person_on_vhcl', ... % 2
'car', ... % 3
'bicycle', ... % 4
'mbike', ... % 5
'non_mot_vhcl', ... % 6
'static_person', ... % 7
'distractor', ... % 8
'occluder', ... % 9
'occluder_on_grnd', ... %10
'occluder_full', ... % 11
'reflection', ... % 12
'crowd' ... % 13
};
"""
def read_mot_results(filename, is_gt, is_ignore):
valid_labels = {1}
ignore_labels = {2, 7, 8, 12}
results_dict = dict()
if os.path.isfile(filename):
with open(filename, 'r') as f:
for line in f.readlines():
linelist = line.split(',')
if len(linelist) < 7:
continue
fid = int(linelist[0])
if fid < 1:
continue
results_dict.setdefault(fid, list())
if is_gt:
if 'MOT16-' in filename or 'MOT17-' in filename:
label = int(float(linelist[7]))
mark = int(float(linelist[6]))
if mark == 0 or label not in valid_labels:
continue
score = 1
elif is_ignore:
if 'MOT16-' in filename or 'MOT17-' in filename:
label = int(float(linelist[7]))
vis_ratio = float(linelist[8])
if label not in ignore_labels and vis_ratio >= 0:
continue
else:
continue
score = 1
else:
score = float(linelist[6])
tlwh = tuple(map(float, linelist[2:6]))
target_id = int(linelist[1])
results_dict[fid].append((tlwh, target_id, score))
return results_dict
def unzip_objs(objs):
if len(objs) > 0:
tlwhs, ids, scores = zip(*objs)
else:
tlwhs, ids, scores = [], [], []
tlwhs = np.asarray(tlwhs, dtype=float).reshape(-1, 4)
return tlwhs, ids, scores
|
import rhinoscriptsyntax as rs
from compas.datastructures import Mesh
from compas.geometry import add_vectors
from compas.geometry import centroid_points
from compas.geometry import subtract_vectors
from compas.geometry import mesh_smooth_area
from compas.geometry import mesh_smooth_centroid
from compas_rhino.helpers import mesh_from_guid
from compas_rhino.artists.meshartist import MeshArtist
if __name__ == '__main__':
# select rhino mesh
guid = rs.GetObject("Select mesh", 32)
# create compas mesh object from rhino mesh
mesh = mesh_from_guid(Mesh,guid)
# set vertices on boundary as fixed
fixed = set(mesh.vertices_on_boundary())
# number of iterations
kmax = 20
for k in range(kmax):
updated = {}
# loop over all vertices
for key in mesh.vertices():
pt = mesh.vertex_coordinates(key)
if key in fixed:
# don't alter pt coordinates if fixed
updated[key] = pt
else:
# get neighboring keys
nbrs = mesh.vertex_neighbors(key)
# get neighboring points
pts_nbrs = [mesh.vertex_coordinates(nbr) for nbr in nbrs]
# compute barycenter for neighboring points
cent = centroid_points(pts_nbrs)
# store new coordinates
updated[key] = cent
# update coordinates of all mesh vertices
for key, attr in mesh.vertices(True):
x, y, z = updated[key]
attr['x'] = x
attr['y'] = y
attr['z'] = z
# draw mesh
artist = MeshArtist(mesh, layer='relaxed_mesh_laplacian')
artist.draw()
|
import copy
from typing import List, Optional, Tuple
from . import boards, enums, moves
JournalEntry = Tuple[Optional[moves.Move], boards.Board]
class Journal:
"""A journal of all previous Move and Board states."""
def __init__(self, board: boards.Board):
"""
Create a journal.
:param board: a Board to use as the initial state
"""
self._log: List[JournalEntry] = [(None, board.copy())]
@property
def current_turn_number(self) -> int:
"""
Return the current turn number.
The first move returns 1.
:return: the current turn number
"""
return len(self._log)
@property
def current_team(self) -> enums.Team:
"""
Return the current team based on the current turn number.
:return: the current Team
"""
return enums.Team.ONE if self.current_turn_number % 2 != 0 else enums.Team.TWO
@property
def current_board(self) -> boards.Board:
"""
Return a copy of the current board.
:return: the current board
"""
return self._log[-1][1].copy()
def apply(self, move: moves.Move) -> None:
"""
Apply a Move to the current_board and save it to the journal.
This method assumes that the move has been validated.
:param move: a Move
"""
board = self.current_board
board.apply(move)
self._log.append((move, board))
def copy(self) -> "Journal":
"""
Return a deep copy of this journal.
:return: a Journal
"""
return copy.deepcopy(self)
|
# -*- coding: utf-8 -*-
# Generated by Django 1.10.5 on 2021-03-30 10:49
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('orchestration', '0008_auto_20190805_1134'),
]
operations = [
migrations.RenameField(
model_name='route',
old_name='async',
new_name='run_async',
),
migrations.AlterField(
model_name='route',
name='backend',
field=models.CharField(choices=[('Apache2Traffic', '[M] Apache 2 Traffic'), ('ApacheTrafficByName', '[M] ApacheTrafficByName'), ('DokuWikiMuTraffic', '[M] DokuWiki MU Traffic'), ('DovecotMaildirDisk', '[M] Dovecot Maildir size'), ('Exim4Traffic', '[M] Exim4 traffic'), ('MailmanSubscribers', '[M] Mailman subscribers'), ('MailmanTraffic', '[M] Mailman traffic'), ('MysqlDisk', '[M] MySQL disk'), ('PostfixMailscannerTraffic', '[M] Postfix-Mailscanner traffic'), ('ProxmoxOpenVZTraffic', '[M] ProxmoxOpenVZTraffic'), ('UNIXUserDisk', '[M] UNIX user disk'), ('VsFTPdTraffic', '[M] VsFTPd traffic'), ('WordpressMuTraffic', '[M] Wordpress MU Traffic'), ('NextCloudDiskQuota', '[M] nextCloud SaaS Disk Quota'), ('NextcloudTraffic', '[M] nextCloud SaaS Traffic'), ('OwnCloudDiskQuota', '[M] ownCloud SaaS Disk Quota'), ('OwncloudTraffic', '[M] ownCloud SaaS Traffic'), ('PhpListTraffic', '[M] phpList SaaS Traffic'), ('Apache2Controller', '[S] Apache 2'), ('BSCWController', '[S] BSCW SaaS'), ('Bind9MasterDomainController', '[S] Bind9 master domain'), ('Bind9SlaveDomainController', '[S] Bind9 slave domain'), ('DokuWikiMuController', '[S] DokuWiki multisite'), ('DrupalMuController', '[S] Drupal multisite'), ('GitLabSaaSController', '[S] GitLab SaaS'), ('LetsEncryptController', "[S] Let's encrypt!"), ('LxcController', '[S] LxcController'), ('AutoresponseController', '[S] Mail autoresponse'), ('MailmanController', '[S] Mailman'), ('MailmanVirtualDomainController', '[S] Mailman virtdomain-only'), ('MoodleController', '[S] Moodle'), ('MoodleWWWRootController', '[S] Moodle WWWRoot (required)'), ('MoodleMuController', '[S] Moodle multisite'), ('MySQLController', '[S] MySQL database'), ('MySQLUserController', '[S] MySQL user'), ('PHPController', '[S] PHP FPM/FCGID'), ('PostfixAddressController', '[S] Postfix address'), ('PostfixAddressVirtualDomainController', '[S] Postfix address virtdomain-only'), ('ProxmoxOVZ', '[S] ProxmoxOVZ'), ('uWSGIPythonController', '[S] Python uWSGI'), ('RoundcubeIdentityController', '[S] Roundcube Identity Controller'), ('StaticController', '[S] Static'), ('SymbolicLinkController', '[S] Symbolic link webapp'), ('UNIXUserMaildirController', '[S] UNIX maildir user'), ('UNIXUserController', '[S] UNIX user'), ('WebalizerAppController', '[S] Webalizer App'), ('WebalizerController', '[S] Webalizer Content'), ('WordPressForceSSLController', '[S] WordPress Force SSL'), ('WordPressURLController', '[S] WordPress URL'), ('WordPressController', '[S] Wordpress'), ('WordpressMuController', '[S] Wordpress multisite'), ('NextCloudController', '[S] nextCloud SaaS'), ('OwnCloudController', '[S] ownCloud SaaS'), ('PhpListSaaSController', '[S] phpList SaaS')], max_length=256, verbose_name='backend'),
),
]
|
"""Tests for module bregman on OT with bregman projections """
# Author: Remi Flamary <remi.flamary@unice.fr>
# Kilian Fatras <kilian.fatras@irisa.fr>
#
# License: MIT License
import numpy as np
import ot
import pytest
def test_sinkhorn():
# test sinkhorn
n = 100
rng = np.random.RandomState(0)
x = rng.randn(n, 2)
u = ot.utils.unif(n)
M = ot.dist(x, x)
G = ot.sinkhorn(u, u, M, 1, stopThr=1e-10)
# check constratints
np.testing.assert_allclose(
u, G.sum(1), atol=1e-05) # cf convergence sinkhorn
np.testing.assert_allclose(
u, G.sum(0), atol=1e-05) # cf convergence sinkhorn
def test_sinkhorn_empty():
# test sinkhorn
n = 100
rng = np.random.RandomState(0)
x = rng.randn(n, 2)
u = ot.utils.unif(n)
M = ot.dist(x, x)
G, log = ot.sinkhorn([], [], M, 1, stopThr=1e-10, verbose=True, log=True)
# check constratints
np.testing.assert_allclose(u, G.sum(1), atol=1e-05)
np.testing.assert_allclose(u, G.sum(0), atol=1e-05)
G, log = ot.sinkhorn([], [], M, 1, stopThr=1e-10,
method='sinkhorn_stabilized', verbose=True, log=True)
# check constratints
np.testing.assert_allclose(u, G.sum(1), atol=1e-05)
np.testing.assert_allclose(u, G.sum(0), atol=1e-05)
G, log = ot.sinkhorn(
[], [], M, 1, stopThr=1e-10, method='sinkhorn_epsilon_scaling',
verbose=True, log=True)
# check constratints
np.testing.assert_allclose(u, G.sum(1), atol=1e-05)
np.testing.assert_allclose(u, G.sum(0), atol=1e-05)
def test_sinkhorn_variants():
# test sinkhorn
n = 100
rng = np.random.RandomState(0)
x = rng.randn(n, 2)
u = ot.utils.unif(n)
M = ot.dist(x, x)
G0 = ot.sinkhorn(u, u, M, 1, method='sinkhorn', stopThr=1e-10)
Gs = ot.sinkhorn(u, u, M, 1, method='sinkhorn_stabilized', stopThr=1e-10)
Ges = ot.sinkhorn(
u, u, M, 1, method='sinkhorn_epsilon_scaling', stopThr=1e-10)
G_green = ot.sinkhorn(u, u, M, 1, method='greenkhorn', stopThr=1e-10)
# check values
np.testing.assert_allclose(G0, Gs, atol=1e-05)
np.testing.assert_allclose(G0, Ges, atol=1e-05)
np.testing.assert_allclose(G0, G_green, atol=1e-5)
print(G0, G_green)
def test_sinkhorn_variants_log():
# test sinkhorn
n = 100
rng = np.random.RandomState(0)
x = rng.randn(n, 2)
u = ot.utils.unif(n)
M = ot.dist(x, x)
G0, log0 = ot.sinkhorn(u, u, M, 1, method='sinkhorn', stopThr=1e-10, log=True)
Gs, logs = ot.sinkhorn(u, u, M, 1, method='sinkhorn_stabilized', stopThr=1e-10, log=True)
Ges, loges = ot.sinkhorn(
u, u, M, 1, method='sinkhorn_epsilon_scaling', stopThr=1e-10, log=True)
G_green, loggreen = ot.sinkhorn(u, u, M, 1, method='greenkhorn', stopThr=1e-10, log=True)
# check values
np.testing.assert_allclose(G0, Gs, atol=1e-05)
np.testing.assert_allclose(G0, Ges, atol=1e-05)
np.testing.assert_allclose(G0, G_green, atol=1e-5)
print(G0, G_green)
@pytest.mark.parametrize("method", ["sinkhorn", "sinkhorn_stabilized"])
def test_barycenter(method):
n_bins = 100 # nb bins
# Gaussian distributions
a1 = ot.datasets.make_1D_gauss(n_bins, m=30, s=10) # m= mean, s= std
a2 = ot.datasets.make_1D_gauss(n_bins, m=40, s=10)
# creating matrix A containing all distributions
A = np.vstack((a1, a2)).T
# loss matrix + normalization
M = ot.utils.dist0(n_bins)
M /= M.max()
alpha = 0.5 # 0<=alpha<=1
weights = np.array([1 - alpha, alpha])
# wasserstein
reg = 1e-2
bary_wass = ot.bregman.barycenter(A, M, reg, weights, method=method)
np.testing.assert_allclose(1, np.sum(bary_wass))
ot.bregman.barycenter(A, M, reg, log=True, verbose=True)
def test_barycenter_stabilization():
n_bins = 100 # nb bins
# Gaussian distributions
a1 = ot.datasets.make_1D_gauss(n_bins, m=30, s=10) # m= mean, s= std
a2 = ot.datasets.make_1D_gauss(n_bins, m=40, s=10)
# creating matrix A containing all distributions
A = np.vstack((a1, a2)).T
# loss matrix + normalization
M = ot.utils.dist0(n_bins)
M /= M.max()
alpha = 0.5 # 0<=alpha<=1
weights = np.array([1 - alpha, alpha])
# wasserstein
reg = 1e-2
bar_stable = ot.bregman.barycenter(A, M, reg, weights,
method="sinkhorn_stabilized",
stopThr=1e-8)
bar = ot.bregman.barycenter(A, M, reg, weights, method="sinkhorn",
stopThr=1e-8)
np.testing.assert_allclose(bar, bar_stable)
def test_wasserstein_bary_2d():
size = 100 # size of a square image
a1 = np.random.randn(size, size)
a1 += a1.min()
a1 = a1 / np.sum(a1)
a2 = np.random.randn(size, size)
a2 += a2.min()
a2 = a2 / np.sum(a2)
# creating matrix A containing all distributions
A = np.zeros((2, size, size))
A[0, :, :] = a1
A[1, :, :] = a2
# wasserstein
reg = 1e-2
bary_wass = ot.bregman.convolutional_barycenter2d(A, reg)
np.testing.assert_allclose(1, np.sum(bary_wass))
# help in checking if log and verbose do not bug the function
ot.bregman.convolutional_barycenter2d(A, reg, log=True, verbose=True)
def test_unmix():
n_bins = 50 # nb bins
# Gaussian distributions
a1 = ot.datasets.make_1D_gauss(n_bins, m=20, s=10) # m= mean, s= std
a2 = ot.datasets.make_1D_gauss(n_bins, m=40, s=10)
a = ot.datasets.make_1D_gauss(n_bins, m=30, s=10)
# creating matrix A containing all distributions
D = np.vstack((a1, a2)).T
# loss matrix + normalization
M = ot.utils.dist0(n_bins)
M /= M.max()
M0 = ot.utils.dist0(2)
M0 /= M0.max()
h0 = ot.unif(2)
# wasserstein
reg = 1e-3
um = ot.bregman.unmix(a, D, M, M0, h0, reg, 1, alpha=0.01,)
np.testing.assert_allclose(1, np.sum(um), rtol=1e-03, atol=1e-03)
np.testing.assert_allclose([0.5, 0.5], um, rtol=1e-03, atol=1e-03)
ot.bregman.unmix(a, D, M, M0, h0, reg,
1, alpha=0.01, log=True, verbose=True)
def test_empirical_sinkhorn():
# test sinkhorn
n = 100
a = ot.unif(n)
b = ot.unif(n)
X_s = np.reshape(np.arange(n), (n, 1))
X_t = np.reshape(np.arange(0, n), (n, 1))
M = ot.dist(X_s, X_t)
M_m = ot.dist(X_s, X_t, metric='minkowski')
G_sqe = ot.bregman.empirical_sinkhorn(X_s, X_t, 1)
sinkhorn_sqe = ot.sinkhorn(a, b, M, 1)
G_log, log_es = ot.bregman.empirical_sinkhorn(X_s, X_t, 0.1, log=True)
sinkhorn_log, log_s = ot.sinkhorn(a, b, M, 0.1, log=True)
G_m = ot.bregman.empirical_sinkhorn(X_s, X_t, 1, metric='minkowski')
sinkhorn_m = ot.sinkhorn(a, b, M_m, 1)
loss_emp_sinkhorn = ot.bregman.empirical_sinkhorn2(X_s, X_t, 1)
loss_sinkhorn = ot.sinkhorn2(a, b, M, 1)
# check constratints
np.testing.assert_allclose(
sinkhorn_sqe.sum(1), G_sqe.sum(1), atol=1e-05) # metric sqeuclidian
np.testing.assert_allclose(
sinkhorn_sqe.sum(0), G_sqe.sum(0), atol=1e-05) # metric sqeuclidian
np.testing.assert_allclose(
sinkhorn_log.sum(1), G_log.sum(1), atol=1e-05) # log
np.testing.assert_allclose(
sinkhorn_log.sum(0), G_log.sum(0), atol=1e-05) # log
np.testing.assert_allclose(
sinkhorn_m.sum(1), G_m.sum(1), atol=1e-05) # metric euclidian
np.testing.assert_allclose(
sinkhorn_m.sum(0), G_m.sum(0), atol=1e-05) # metric euclidian
np.testing.assert_allclose(loss_emp_sinkhorn, loss_sinkhorn, atol=1e-05)
def test_empirical_sinkhorn_divergence():
#Test sinkhorn divergence
n = 10
a = ot.unif(n)
b = ot.unif(n)
X_s = np.reshape(np.arange(n), (n, 1))
X_t = np.reshape(np.arange(0, n * 2, 2), (n, 1))
M = ot.dist(X_s, X_t)
M_s = ot.dist(X_s, X_s)
M_t = ot.dist(X_t, X_t)
emp_sinkhorn_div = ot.bregman.empirical_sinkhorn_divergence(X_s, X_t, 1)
sinkhorn_div = (ot.sinkhorn2(a, b, M, 1) - 1 / 2 * ot.sinkhorn2(a, a, M_s, 1) - 1 / 2 * ot.sinkhorn2(b, b, M_t, 1))
emp_sinkhorn_div_log, log_es = ot.bregman.empirical_sinkhorn_divergence(X_s, X_t, 1, log=True)
sink_div_log_ab, log_s_ab = ot.sinkhorn2(a, b, M, 1, log=True)
sink_div_log_a, log_s_a = ot.sinkhorn2(a, a, M_s, 1, log=True)
sink_div_log_b, log_s_b = ot.sinkhorn2(b, b, M_t, 1, log=True)
sink_div_log = sink_div_log_ab - 1 / 2 * (sink_div_log_a + sink_div_log_b)
# check constratints
np.testing.assert_allclose(
emp_sinkhorn_div, sinkhorn_div, atol=1e-05) # cf conv emp sinkhorn
np.testing.assert_allclose(
emp_sinkhorn_div_log, sink_div_log, atol=1e-05) # cf conv emp sinkhorn
def test_stabilized_vs_sinkhorn_multidim():
# test if stable version matches sinkhorn
# for multidimensional inputs
n = 100
# Gaussian distributions
a = ot.datasets.make_1D_gauss(n, m=20, s=5) # m= mean, s= std
b1 = ot.datasets.make_1D_gauss(n, m=60, s=8)
b2 = ot.datasets.make_1D_gauss(n, m=30, s=4)
# creating matrix A containing all distributions
b = np.vstack((b1, b2)).T
M = ot.utils.dist0(n)
M /= np.median(M)
epsilon = 0.1
G, log = ot.bregman.sinkhorn(a, b, M, reg=epsilon,
method="sinkhorn_stabilized",
log=True)
G2, log2 = ot.bregman.sinkhorn(a, b, M, epsilon,
method="sinkhorn", log=True)
np.testing.assert_allclose(G, G2)
def test_implemented_methods():
IMPLEMENTED_METHODS = ['sinkhorn', 'sinkhorn_stabilized']
ONLY_1D_methods = ['greenkhorn', 'sinkhorn_epsilon_scaling']
NOT_VALID_TOKENS = ['foo']
# test generalized sinkhorn for unbalanced OT barycenter
n = 3
rng = np.random.RandomState(42)
x = rng.randn(n, 2)
a = ot.utils.unif(n)
# make dists unbalanced
b = ot.utils.unif(n)
A = rng.rand(n, 2)
M = ot.dist(x, x)
epsilon = 1.
for method in IMPLEMENTED_METHODS:
ot.bregman.sinkhorn(a, b, M, epsilon, method=method)
ot.bregman.sinkhorn2(a, b, M, epsilon, method=method)
ot.bregman.barycenter(A, M, reg=epsilon, method=method)
with pytest.raises(ValueError):
for method in set(NOT_VALID_TOKENS):
ot.bregman.sinkhorn(a, b, M, epsilon, method=method)
ot.bregman.sinkhorn2(a, b, M, epsilon, method=method)
ot.bregman.barycenter(A, M, reg=epsilon, method=method)
for method in ONLY_1D_methods:
ot.bregman.sinkhorn(a, b, M, epsilon, method=method)
with pytest.raises(ValueError):
ot.bregman.sinkhorn2(a, b, M, epsilon, method=method)
|
from django.shortcuts import render, get_object_or_404
from .models import Post, Comment
from django.core.paginator import Paginator, EmptyPage, PageNotAnInteger
from .forms import EmailPostForm, CommentForm, SearchForm
from django.core.mail import send_mail
from taggit.models import Tag
from django.db.models import Count
from django.contrib.postgres.search import (SearchVector, SearchQuery, SearchRank)
from django.contrib.auth.decorators import login_required
from django.contrib.auth.mixins import LoginRequiredMixin
from django.views.generic import ListView, FormView, CreateView, UpdateView, DeleteView
from django.urls import reverse_lazy
from django.utils.text import slugify
@login_required()
def post_list(request, tag_slug=None):
#posts = Post.published.all()
object_list = Post.published.all()
tag = None
if tag_slug:
tag = get_object_or_404(Tag, slug=tag_slug)
object_list = object_list.filter(tags__in=[tag])
paginator = Paginator(object_list, 2)
page = request.GET.get('page')
try:
posts = paginator.page(page) #tolong paginator kirimin page dengan halaman [page]
except PageNotAnInteger:
posts = paginator.page(1) #kl pagenya gaada, kirim page hal 1
except EmptyPage:
posts = paginator.page(paginator.num_pages)
return render(request, 'blog/post/list.html', {'posts': posts, 'page': page, 'tag': tag}) #return http response
class PostListView(LoginRequiredMixin, ListView):
queryset = Post.published.all()
context_object_name = 'posts'
paginate_by = 2
template_name = 'blog/post/list.html'
def get_queryset(self):
qs = super().get_queryset()
tag_slug = self.kwargs.get('tag_slug')
if tag_slug:
tag = get_object_or_404(Tag, slug=tag_slug)
qs = qs.filter(tags__in=[tag])
self.tag = tag
else:
self.tag = None
return qs
def get_context_data(self, **kwargs):
context = super().get_context_data(**kwargs)
if self.tag:
context['tag'] = self.tag
return context
@login_required()
def post_detail(request, year, month, day, post):
post = get_object_or_404(Post, slug=post, status='published', publish__year=year, publish__month=month, publish__day=day)
comments = post.comments.filter(active=True)
new_comment = None
if request.method == 'POST':
comment_form = CommentForm(data=request.POST)
if comment_form.is_valid():
new_comment = comment_form.save(commit=False)
new_comment.post = post
new_comment.save()
else:
comment_form = CommentForm()
post_tags_ids = post.tags.values_list('id', flat=True) #biar dapet tupples id yg flat (1,2,3)
similar_posts = Post.published.filter(tags__in=post_tags_ids).exclude(id=post.id)
similar_posts = similar_posts.annotate(same_tags=Count('tags')).order_by('-same_tags', '-publish')[:4]
return render(request, 'blog/post/detail.html', {'post': post, 'comments': comments, 'new_comment': new_comment, 'comment_form': comment_form, 'similar_posts': similar_posts})
class PostDetailView(LoginRequiredMixin, FormView):
form_class = CommentForm
template_name = 'blog/post/detail.html'
def get_initial(self):
pk = self.kwargs.get('pk')
slug = self.kwargs.get('slug')
self.post = get_object_or_404(Post, pk=pk, slug=slug)
self.comments = self.post.comments.filter(active=True)
self.new_comment = None
post_tags_ids = self.post.tags.values_list('id', flat=True) #biar dapet tupples id yg flat (1,2,3)
similar_posts = Post.published.filter(tags__in=post_tags_ids).exclude(id=self.post.id)
self.similar_posts = similar_posts.annotate(same_tags=Count('tags')).order_by('-same_tags', '-publish')[:4]
return super().get_initial()
def get_context_data(self, **kwargs):
context = super().get_context_data(**kwargs)
context['post'] = self.post
context['comments'] = self.comments
context['similar_posts'] = self.similar_posts
return context
def form_valid(self, form):
new_comment = form.save(commit=False)
new_comment.post = self.post
new_comment.save()
context = self.get_context_data()
context['new_comment'] = new_comment
return render(self.request, self.template_name, context=context)
@login_required()
def post_share(request, post_id):
post = get_object_or_404(Post, id=post_id, status='published')
sent = False
if request.method == 'POST':
form = EmailPostForm(request.POST) #form terisi
if form.is_valid():
cd = form.cleaned_data
post_url = request.build_absolute_uri(post.get_absolute_url())
subject = f"{cd['name']} recommends you read {post.title}" #f itu untuk format bisa terima variable name, title
message = (f"Read {post.title} at {post_url}\n\n"
f"{cd['name']} comments: {cd['comments']}")
send_mail(subject, message, 'django.patronus@gmail.com', [cd['to'],])
sent = True
else:
form = EmailPostForm() #form baru
return render(request, 'blog/post/share.html', { 'post' : post, 'form' : form, 'sent' : sent })
@login_required()
def post_search(request):
if 'query' in request.GET:
form = SearchForm(request.GET)
if form.is_valid():
query = form.cleaned_data['query']
#results = Post.published.annotate(search = SearchVector('title', 'body')).filter(search=query) #search vector: bisa search from multiple fields
search_vector = SearchVector('title', 'body')
search_query = SearchQuery(query)
results = Post.published.annotate(search=search_vector, rank=SearchRank(search_vector, search_query)).filter(search=search_query).order_by('-rank')
else:
form = SearchForm()
query = None
results = []
return render(request, 'blog/post/search.html', {'form': form, 'query': query, 'results': results})
class PostCreateView(LoginRequiredMixin, CreateView):
model = Post
fields = ['title', 'body', 'tags']
template_name = 'blog/post/post_form.html'
def form_valid(self, form):
form.instance.author = self.request.user
form.instance.status = 'published'
form.instance.slug = slugify(form.instance.title, allow_unicode=True)
return super().form_valid(form)
class PostUpdateView(LoginRequiredMixin, UpdateView):
model = Post
fields = ['title', 'body', 'tags']
template_name = 'blog/post/post_form.html'
query_pk_and_slug = True
def get_queryset(self):
qs = super().get_queryset()
return qs.filter(author = self.request.user)
def form_valid(self, form):
form.instance.slug = slugify(form.instance.title, allow_unicode=True)
return super().form_valid(form)
class PostDeleteView(LoginRequiredMixin, DeleteView):
model = Post
template_name = 'blog/post/post_confirm_delete.html'
success_url = reverse_lazy('blog:post_list')
query_pk_and_slug = True
def get_queryset(self):
qs = super().get_queryset()
return qs.filter(author = self.request.user)
|
## -
## - asSeq2Int
## -
## - Created by Chris Ng 20201214
## - input the user SoI here: - ##
## - post-CIBR version will prompt the user in the terminal for an SoI
user_input_SoI = "mxxt"
## - The following can be used to test the code
## SxSxSSXXSXSS
## s.sXs
## S.s
## m.xT
## - #### Initialize empty lists
lst_rawFASTA = []
proteinSeqOnly = []
lst_FASTAentry = []
lst_masterFASTA = []
lst_results = [['Protein Name', 'Sequence Hit', 'Position of Hit']]
## - #### Part 1
## - Part 1 reformat the FASTA file into list of list for Part 2
## - It first opens the file
with open('dummy1.FASTA') as FASTA_file: #Actual code will ask for the name of the .FASTA
FASTA_data = FASTA_file.read()
## - File can close now
## - Splits the file into a list using "*/n>" as the delimiter
## - lst_rawFASTA is a list of each protein entry, containing protein name, sequence, etc, in a single string
lst_rawFASTA = FASTA_data.split('*\n>')
## - relevant info will be pulled out from each protein entry
## - We are interested in the protein name and protein sequence
for protein_entry in lst_rawFASTA:
## - The protein name is pulled from each FASTA entry and saved into FASTAproteinNames
## - This is done by indexing from 0 to the first comma (which delimits name from the next entry)
FASTAproteinNames = protein_entry[0: protein_entry.find(',')]
## - Next, the protein sequence is pulled from protein_entry. Reformatting is required.
## - The protein sequence in protein_entry is seprated by "\n"
## - First, '\n' is replaced with ", "
FASTAprotein_comma = protein_entry.replace("\n", ", ")
## - Next, the beginning of the protein sequence (and end of the previous item) is marked by '*$'
for_protein_seqjoin = FASTAprotein_comma.replace('\",', '*$')
## - Then, only the string with sequence info is saved into proteinSeqOnly
## - This is done by indexing from where ("*$" is located + 3) till the end of the string
proteinSeqOnly = for_protein_seqjoin[for_protein_seqjoin.find("*$")+3:]
## - Finally, the protein sequence is joined together by removing ', '
## - proteinSeqFinal is the protein sequence of each protein entry
proteinSeqFinal = proteinSeqOnly.replace(', ', '')
## - Now, we put protein name and protein sequence together into a list for each entry
## - First, the protein names are separated into genomic name, common name, and ID name
lst_FASTAentry = FASTAproteinNames.split(' ')
## - Next, the protein sequence is appended to end of the names.
lst_FASTAentry.append (proteinSeqFinal)
## - Finally, the list for the entry is appended to the growing list of protein entries from the FASTA.
lst_masterFASTA.append (lst_FASTAentry)
## - At this point, lst_masterFASTA contains artifacts from the reformatting above.
## - Namely, ">" and "*" remains on the first of first and last of last indices respectively.
## - These artifacts are removed in the following 2 lines.
(lst_masterFASTA[0])[0] = (lst_masterFASTA[0])[0].replace('>', '')
(lst_masterFASTA[-1])[-1] = (lst_masterFASTA[-1])[-1].replace('*', '')
# print(lst_masterFASTA)
## - This list of sublists of protein entries will be used in Part 2.
## - This list will be searched for the user-input SoI to output the name of proteins containing the SoI
## - #### Part 2
## - Part 2 will search the lst_masterFASTA from Part 1 for the user-input SoI
## - Regular Expression (re) is imported and will be used for the search.
import re
## - the user input SoI will be .upper, in case user input in lowercase
upper_SoI = user_input_SoI.upper()
## - Then, if the user used x to denote "any", it will be replaced by .
regex_SoI = upper_SoI.replace("X", ".")
## - the reformated user-input is then placed into RegEx pattern match syntax
pattern = '^' + regex_SoI + "$"
## - The protein sequence in each protein entry in lst_masterFASTA will be iterated thru searching for SoI
for protein_entry in lst_masterFASTA:
## protein_entry looks like:
#protein_entry = ['YAL001C', 'TFC3', 'SGDID:S000000001', 'MVLTIYPDELVQIVSDKI..."
protein_seq = protein_entry[3]
## - The following cuts each protein sequence into blocks with the length of the input SoI
for i in range(len(protein_seq)-(len(user_input_SoI)-1)):
protein_sequence_block =protein_seq[i:(i + len(user_input_SoI))]
# The following commented code checks to see if I cut the blocks correctly
# print(protein_seq[i:(i+len(pattern)-2)])
result = re.match(pattern, protein_sequence_block)
## - If a protein sequence block matches the user-input sequence, it will print:
if result:
print(upper_SoI + " found in:")
print(protein_entry[1]) # the protein name and ...
print("as " + protein_seq[i:(i + len(pattern) - 2)] + ' which starts at position: ' + str(i+1) + "\n")
# The following appends to a list that will be used to populate the output .txt file
lst_results.append([protein_entry[1], protein_seq[i:(i + len(pattern) - 2)], str(i+1)])
## - the actual sequence it matched, and which amino acid position the match started in the protein sequence
else:
continue
## - lst_results is then written into temp_results.csv with csv module
import csv
with open('temp_results.csv', 'w', newline='') as temp_results_file:
writer = csv.writer(temp_results_file)
writer.writerows(lst_results)
## - #### The .py code is over for the purposes of the CIBR final project
## - #### Next, temp_results.csv will be copied and renamed to the date-time.csv
## - #### Finally, this renamed csv will be rsync'ed
## - #### (currently set to a temp_dir in the current dir, but can be changed to a local dir).
|
# Copyright 2018 Tensorforce Team. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
import tensorflow as tf
from tensorforce import TensorforceError, util
from tensorforce.core import Module
class Optimizer(Module):
"""
Base class for optimizers which minimize a not yet further specified expression, usually some
kind of loss function. More generally, an optimizer can be considered as some method of
updating a set of variables.
"""
def __init__(self, name, summary_labels=None):
super().__init__(name=name, l2_regularization=0.0, summary_labels=summary_labels)
def tf_step(self, variables, **kwargs):
"""
Creates the TensorFlow operations for performing an optimization step on the given
variables, including actually changing the values of the variables.
Args:
variables: List of variables to optimize.
**kwargs: Additional arguments depending on the specific optimizer implementation.
For instance, often includes `fn_loss` if a loss function is optimized.
Returns:
List of delta tensors corresponding to the updates for each optimized variable.
"""
raise NotImplementedError
def tf_apply_step(self, variables, deltas):
"""
Applies the given (and already calculated) step deltas to the variable values.
Args:
variables: List of variables.
deltas: List of deltas of same length.
Returns:
The step-applied operation. A tf.group of tf.assign_add ops.
"""
if len(variables) != len(deltas):
raise TensorforceError("Invalid variables and deltas lists.")
assignments = list()
for variable, delta in zip(variables, deltas):
assignments.append(tf.assign_add(ref=variable, value=delta))
with tf.control_dependencies(control_inputs=assignments):
return util.no_operation()
def tf_minimize(self, variables, **kwargs):
"""
Performs an optimization step.
Args:
variables: List of variables to optimize.
**kwargs: Additional optimizer-specific arguments. The following arguments are used
by some optimizers:
- arguments: Dict of arguments for callables, like fn_loss.
- fn_loss: A callable returning the loss of the current model.
- fn_reference: A callable returning the reference values, in case of a comparative
loss.
- fn_kl_divergence: A callable returning the KL-divergence relative to the
current model.
- sampled_loss: A sampled loss (integer).
- return_estimated_improvement: Returns the estimated improvement resulting from
the natural gradient calculation if true.
- source_variables: List of source variables to synchronize with.
- global_variables: List of global variables to apply the proposed optimization
step to.
Returns:
The optimization operation.
"""
deltas = self.step(variables=variables, **kwargs)
for n in range(len(variables)):
name = variables[n].name
if name[-2:] != ':0':
raise TensorforceError.unexpected()
deltas[n] = self.add_summary(
label=('updates', 'updates-full'), name=(name[:-2] + '-update'), tensor=deltas[n],
mean_variance=True
)
deltas[n] = self.add_summary(
label='updates-full', name=(name[:-2] + '-update'), tensor=deltas[n]
)
with tf.control_dependencies(control_inputs=deltas):
return util.no_operation()
def add_variable(self, name, dtype, shape, is_trainable=False, initializer='zeros'):
if is_trainable:
raise TensorforceError("Invalid trainable variable.")
return super().add_variable(
name=name, dtype=dtype, shape=shape, is_trainable=is_trainable, initializer=initializer
)
|
#!/usr/bin/env python3
import pandas as pd
import sys
input_file = sys.argv[1]
output_file = sys.argv[2]
data_frame = pd.read_csv(input_file)
data_frame['Cost'] = data_frame['Cost'].str.strip('$').astype(float)
data_frame_value_meets_condition = data_frame.loc[(data_frame['Supplier Name']\
.str.contains('Z')) | (data_frame['Cost'] > 600.0), :]
data_frame_value_meets_condition.to_csv(output_file, index=False)
|
import os
import random
import shutil
# TASK_DEV_SIZES = {"boolq": 500, "cb": 50, "copa": 50, "multirc": 50, "record": 7500, "rte": 250, "wic": 100, "wsc": 50}
TASK_DEV_SIZES = {"BoolQ": 500, "CB": 50, "COPA": 50, "MultiRC": 50, "ReCoRD": 7500, "RTE": 250, "WiC": 100, "WSC": 50}
def file_len(fname):
count = 0
with open(fname) as file:
for line in file:
if not line:
break
else:
count += 1
return count
if __name__ == "__main__":
for task_name, size in TASK_DEV_SIZES.items():
try:
os.makedirs(os.path.join("split_data", task_name.lower()))
except FileExistsError:
pass
train_file_path = os.path.join("data", task_name, "train.jsonl")
test_file_path = os.path.join("data", task_name, "val.jsonl")
new_train_file_path = os.path.join("split_data", task_name.lower(), "train.jsonl")
dev_file_path = os.path.join("split_data", task_name.lower(), "val.jsonl")
new_test_file_path = os.path.join("split_data", task_name.lower(), "test.jsonl")
total_lines = file_len(train_file_path)
print(f"{task_name}: {size} out of {total_lines}")
indexes = list(range(total_lines))
dev_indices = random.sample(indexes, size)
with open(train_file_path, encoding="utf8") as f, open(new_train_file_path, 'w', encoding="utf8") as g, open(
dev_file_path, 'w', encoding="utf8") as h:
for i, line in enumerate(f):
if i in dev_indices:
h.write(line)
else:
g.write(line)
shutil.copy(test_file_path, new_test_file_path)
|
# -*- coding: utf-8 -*-
from PyQt5 import QtWidgets, QtCore
from PyQt5.QtCore import Qt
from PyQt5.QtGui import QCursor
from PyQt5.QtWidgets import QVBoxLayout, QToolBar, QSplitter
from src.constant.conn_dialog_constant import ADD_CONN_MENU, EDIT_CONN_MENU
from src.constant.main_constant import LOCATION_BUTTON, TAB_ID_SEPARATOR, TREE_TOP_TEXT
from src.function.db.conn_sqlite import Connection
from src.ui.async_func.async_reopen_item import AsyncReopenManager
from src.ui.button.label_button import LabelButton
from src.ui.dialog.conn.conn_dialog import ConnDialog
from src.ui.func.common import keep_center, close_sqlite
from src.ui.func.menu_bar import fill_menu_bar
from src.ui.func.tool_bar import fill_tool_bar
from src.ui.func.tree import tree_node_factory, Context
from src.ui.tab.tab_bar import MyTabBar
from src.ui.tab.tab_widget import MyTabWidget
from src.ui.tree.my_tree import MyTreeWidget
_author_ = 'luwt'
_date_ = '2021/10/31 17:39'
class MainWindow(QtWidgets.QMainWindow):
def __init__(self, screen_rect):
super().__init__()
# 当前屏幕的分辨率大小
self.desktop_screen_rect = screen_rect
# 创建主控件,用以包含所有内容
self.main_widget = QtWidgets.QWidget()
# 主控件中的布局
self.main_layout = QVBoxLayout()
# 主部件
self.central_widget = QtWidgets.QWidget()
self.central_widget.setObjectName("central_widget")
# 主部件布局为水平布局
self.horizontalLayout = QtWidgets.QHBoxLayout(self.central_widget)
self.horizontalLayout.setObjectName("horizontalLayout")
self.main_splitter = QSplitter()
self.main_splitter.setOrientation(Qt.Horizontal)
self.main_splitter.setObjectName("main_splitter")
self.horizontalLayout.addWidget(self.main_splitter)
self.horizontalLayout.setSpacing(0)
# 左边树结构frame
self.tree_frame = QtWidgets.QFrame(self.main_splitter)
self.tree_frame.setObjectName("tree_frame")
self.tree_layout = QtWidgets.QVBoxLayout(self.tree_frame)
self.tree_layout.setObjectName("tree_layout")
self.tree_layout.setSpacing(0)
self.tree_layout.setContentsMargins(0, 0, 0, 0)
# 树顶部的工具栏
self.tree_header_widget = QtWidgets.QWidget(self.tree_frame)
self.tree_layout.addWidget(self.tree_header_widget)
self.tree_header_layout = QtWidgets.QGridLayout(self.tree_header_widget)
# 左、右、顶部边距设为0
self.tree_header_layout.setContentsMargins(0, 0, 0, self.tree_header_layout.contentsMargins().bottom())
# 树标题
self.tree_tool_header = QtWidgets.QLabel(self.tree_header_widget)
self.tree_tool_header.setText(TREE_TOP_TEXT)
self.tree_header_layout.addWidget(self.tree_tool_header, 0, 0, 1, 8)
# 定位按钮
self.tree_tool_location_button = LabelButton(self.tree_header_widget)
self.tree_tool_location_button.setObjectName("tree_tool_location_button")
# 默认不可用
self.tree_tool_location_button.setEnabled(False)
self.tree_header_layout.addWidget(self.tree_tool_location_button, 0, 8, 1, 1)
# 左边树结构
self.tree_widget = MyTreeWidget(self.tree_frame, self)
self.tree_widget.setObjectName("tree_widget")
self.tree_widget.headerItem().setHidden(True)
self.tree_layout.addWidget(self.tree_widget)
# 右边tab区frame
self.tab_frame = QtWidgets.QFrame(self.main_splitter)
self.tab_frame.setObjectName("tab_frame")
self.tab_layout = QtWidgets.QVBoxLayout(self.tab_frame)
self.tab_layout.setObjectName("tab_layout")
# 右边tab区
self.tab_widget = MyTabWidget(self.tab_frame, main_window=self)
self.tab_widget.setObjectName("tab_widget")
self.tab_bar = MyTabBar(self.tab_widget)
self.tab_bar.setObjectName("tab_bar")
self.tab_widget.setTabBar(self.tab_bar)
self.tab_layout.addWidget(self.tab_widget)
self.tab_layout.setSpacing(0)
self.tab_layout.setContentsMargins(0, 0, 0, 0)
# 菜单栏
self.menubar = QtWidgets.QMenuBar(self)
self.menubar.setObjectName("menubar")
self.setMenuBar(self.menubar)
# 工具栏
self.toolBar = QToolBar(self)
self.toolBar.setObjectName("toolBar")
self.addToolBar(Qt.TopToolBarArea, self.toolBar)
# 状态栏
self.statusbar = QtWidgets.QStatusBar(self)
self.statusbar.setObjectName("statusbar")
self.setStatusBar(self.statusbar)
self._translate = QtCore.QCoreApplication.translate
self.reopen_manager = ...
self.setup_ui()
self.translate_ui()
self.bind_action()
def setup_ui(self):
self.setObjectName("MainWindow")
self.setWindowFlags(Qt.WindowTitleHint)
# 按当前分辨率计算窗口大小
self.resize(self.desktop_screen_rect.width() * 0.65, self.desktop_screen_rect.height() * 0.7)
# 窗体居中
keep_center(self, self.desktop_screen_rect)
# 设置所有控件间距为0
self.main_layout.setSpacing(0)
self.main_layout.setContentsMargins(0, 0, 0, 0)
self.main_widget.setLayout(self.main_layout)
self.setCentralWidget(self.main_widget)
self.main_splitter.setStretchFactor(0, 2)
self.main_splitter.setStretchFactor(1, 9)
# 填充菜单栏
fill_menu_bar(self)
# 填充工具栏
fill_tool_bar(self)
# 设置名称显示在图标下面(默认本来是只显示图标)
self.toolBar.setToolButtonStyle(Qt.ToolButtonTextUnderIcon)
self.main_layout.addWidget(self.central_widget)
def bind_action(self):
# 异步重新打开上次退出时的工作状态
self.reopen_manager = AsyncReopenManager(self, self.tree_widget, self.tab_widget)
self.reopen_manager.start()
# 双击树节点事件
self.tree_widget.doubleClicked.connect(self.get_tree_list)
# 右击事件
self.tree_widget.setContextMenuPolicy(Qt.CustomContextMenu)
self.tree_widget.customContextMenuRequested.connect(self.right_click_menu)
# tab页清除或打开tab信号
self.tab_widget.opened_tab_signal.connect(lambda: self.tree_tool_location_button.setEnabled(True))
self.tab_widget.clear_tabs_signal.connect(lambda: self.tree_tool_location_button.setEnabled(False))
# 定位
self.tree_tool_location_button.clicked.connect(self.location_method)
def connect_rest_signal(self):
# 点击、展开、收起节点,都需要让列根据内容自适应,从而可以保证水平滚动条
self.tree_widget.doubleClicked.connect(self.handle_expanded_changed)
self.tree_widget.expanded.connect(self.handle_expanded_changed)
self.tree_widget.collapsed.connect(self.handle_expanded_changed)
def translate_ui(self):
self.setWindowTitle(self._translate("MainWindow", "Dubbo-test-client"))
self.tree_tool_location_button.setText(LOCATION_BUTTON)
def add_conn(self):
"""打开添加连接窗口"""
conn_info = Connection(*((None,) * len(Connection._fields)))
conn_dialog = ConnDialog(conn_info, ADD_CONN_MENU, self.geometry(), self.tree_widget)
conn_dialog.exec()
def edit_conn(self, conn_info, tree_item):
"""打开编辑连接窗口"""
conn_dialog = ConnDialog(conn_info, EDIT_CONN_MENU, self.geometry(), tree_item=tree_item)
conn_dialog.exec()
def get_tree_list(self):
"""获取树的子节点,双击触发,连接 -> service -> method,按顺序读取出来"""
item = self.tree_widget.currentItem()
node = tree_node_factory(item)
Context(node).open_item(item, self)
def handle_expanded_changed(self, index):
# 根据当前内容决定列宽度
self.tree_widget.tree_column_resize()
item = self.tree_widget.itemFromIndex(index)
# method节点没有expanded属性,没有必要进行处理,监听conn和service节点就可以
if item.parent() is None or item.parent().parent() is None:
expanded = self.tree_widget.itemFromIndex(index).isExpanded()
self.tree_widget.update_expanded(item.text(1), expanded, item)
def right_click_menu(self, pos):
"""
右键菜单功能,实现右键弹出菜单功能
:param pos:右键的坐标位置
"""
# 获取当前元素,只有在元素上才显示菜单
item = self.tree_widget.itemAt(pos)
if item:
# 生成右键菜单
menu = QtWidgets.QMenu()
node = tree_node_factory(item)
menu_names = Context(node).get_menu_names(item, self)
[menu.addAction(QtWidgets.QAction(option, menu)) for option in menu_names]
# 右键菜单点击事件
menu.triggered.connect(lambda action: Context(node).handle_menu_func(item, action.text(), self))
# 右键菜单弹出位置跟随焦点位置
menu.exec_(QCursor.pos())
def del_history(self):
item = self.tree_widget.currentItem()
node = tree_node_factory(item)
Context(node).del_history(item, self)
def location_method(self):
tab_id = self.tab_widget.currentWidget().property("tab_id")
# 根据tab_id构造特点,逐级确定位置,根据分隔符拆分,拆分完,应该是连接id,接口名,方法名
self.tree_widget.tab_id_splits = tab_id.split(TAB_ID_SEPARATOR)
conn_id = self.tree_widget.tab_id_splits[0]
# 遍历根节点
for conn_idx in range(self.tree_widget.topLevelItemCount()):
conn_item = self.tree_widget.topLevelItem(conn_idx)
# 找到对应的连接节点,遍历
if int(conn_id) == eval(conn_item.text(2)).get("id"):
method_item = self.tree_widget.recursive_search_item(conn_item)
self.tree_widget.set_selected_focus(method_item)
def close(self):
close_sqlite()
self.tab_widget.close()
super().close()
|
# Copyright 2010-2011, RTLCores. All rights reserved.
# http://rtlcores.com
# See LICENSE.txt
from VerilogSim import VerilogSim
class IcarusVerilog(VerilogSim):
"""
Icarus Verilog class to build a compile command and a
simulation command.
Inherits VerilogSim
Defaults:
TIMESCALE : 1ns / 10ps
OUTFILE : sim
DUMPFILE : dump.vcd
WARN : all
"""
def __init__(self, cfg):
"""
"""
VerilogSim.__init__(self, cfg)
self.cfg = cfg
## Default flags specific to Icarus Verilog
## and any required list comprehension commands
# self['compcmd'] = ['iverilog']
# self['simcmd'] = ['vvp'] # -n = non-interactive mode
self['builddir'] = ['run']
self['warn'] = ['all']
self['warn'].cmd = lambda x: self._prepend('-W', x)
self['outfile'] = [self.cfg.outfile]
self['outfile'].cmd = lambda x: self._prepend('-o', x)
## Run the populate method to do the cfg conversion
## Populate will overwrite any flags defined locally if they exist
## in the config file
self.populate()
# The test_files need to come before the rtl_files on the command
# line or it exposes a strange thing where Icarus will detect an
# rtl file that doesn't exist, but not return a non-zero exit code.
# This causes the sim executable to be generated and run but there
# is nothing to run, so it looks like it completes successfully.
def buildCompCmd(self):
self.comp_cmd = self['compcmd'] + \
self['warn'].conv() + \
self['outfile'].conv() + \
self['defines'].conv() + \
self['rtl_inc_dirs'].conv() + \
self['test_inc_dirs'].conv() + \
self['test_files'].conv() + \
self['rtl_files'].conv() + \
[self.cfg.auto_test]
# [self.cfg['auto_test']]
self.cmds.append(self.comp_cmd)
def buildSimCmd(self):
log = '-l' + self.cfg.build_path + '/' + self.cfg['logfile']
self.sim_cmd = self['simcmd'] + \
['-n'] + \
[log] + \
self['outfile'] + \
self['plusargs'].conv()
self.cmds.append(self.sim_cmd)
|
CLIENT_ID = "a126d5791c694dac84956d88bdeab74f"
CLIENT_SECRET = "18f7ba3185ae43df90092e87aedf0b31"
REDIRECT_URI = "http://127.0.01:8000/spotify/redirect"
|
from numpy.testing import assert_equal
from spacy.language import Language
from spacy.training import Example
from spacy.util import fix_random_seed, registry
SPAN_KEY = "labeled_spans"
TRAIN_DATA = [
("Who is Shaka Khan?", {"spans": {SPAN_KEY: [(7, 17, "PERSON")]}}),
(
"I like London and Berlin.",
{"spans": {SPAN_KEY: [(7, 13, "LOC"), (18, 24, "LOC")]}},
),
]
def make_get_examples(nlp):
train_examples = []
for t in TRAIN_DATA:
eg = Example.from_dict(nlp.make_doc(t[0]), t[1])
train_examples.append(eg)
def get_examples():
return train_examples
return get_examples
def test_simple_train():
fix_random_seed(0)
nlp = Language()
spancat = nlp.add_pipe("spancat", config={"spans_key": SPAN_KEY})
get_examples = make_get_examples(nlp)
nlp.initialize(get_examples)
sgd = nlp.create_optimizer()
assert len(spancat.labels) != 0
for i in range(40):
losses = {}
nlp.update(list(get_examples()), losses=losses, drop=0.1, sgd=sgd)
doc = nlp("I like London and Berlin.")
assert doc.spans[spancat.key] == doc.spans[SPAN_KEY]
assert len(doc.spans[spancat.key]) == 2
assert doc.spans[spancat.key][0].text == "London"
scores = nlp.evaluate(get_examples())
assert f"spans_{SPAN_KEY}_f" in scores
assert scores[f"spans_{SPAN_KEY}_f"] == 1.0
def test_ngram_suggester(en_tokenizer):
# test different n-gram lengths
for size in [1, 2, 3]:
ngram_suggester = registry.misc.get("ngram_suggester.v1")(sizes=[size])
docs = [
en_tokenizer(text)
for text in [
"a",
"a b",
"a b c",
"a b c d",
"a b c d e",
"a " * 100,
]
]
ngrams = ngram_suggester(docs)
# span sizes are correct
for s in ngrams.data:
assert s[1] - s[0] == size
# spans are within docs
offset = 0
for i, doc in enumerate(docs):
spans = ngrams.dataXd[offset : offset + ngrams.lengths[i]]
spans_set = set()
for span in spans:
assert 0 <= span[0] < len(doc)
assert 0 < span[1] <= len(doc)
spans_set.add((span[0], span[1]))
# spans are unique
assert spans.shape[0] == len(spans_set)
offset += ngrams.lengths[i]
# the number of spans is correct
assert_equal(ngrams.lengths, [max(0, len(doc) - (size - 1)) for doc in docs])
# test 1-3-gram suggestions
ngram_suggester = registry.misc.get("ngram_suggester.v1")(sizes=[1, 2, 3])
docs = [
en_tokenizer(text) for text in ["a", "a b", "a b c", "a b c d", "a b c d e"]
]
ngrams = ngram_suggester(docs)
assert_equal(ngrams.lengths, [1, 3, 6, 9, 12])
assert_equal(
ngrams.data,
[
# doc 0
[0, 1],
# doc 1
[0, 1],
[1, 2],
[0, 2],
# doc 2
[0, 1],
[1, 2],
[2, 3],
[0, 2],
[1, 3],
[0, 3],
# doc 3
[0, 1],
[1, 2],
[2, 3],
[3, 4],
[0, 2],
[1, 3],
[2, 4],
[0, 3],
[1, 4],
# doc 4
[0, 1],
[1, 2],
[2, 3],
[3, 4],
[4, 5],
[0, 2],
[1, 3],
[2, 4],
[3, 5],
[0, 3],
[1, 4],
[2, 5],
],
)
# test some empty docs
ngram_suggester = registry.misc.get("ngram_suggester.v1")(sizes=[1])
docs = [en_tokenizer(text) for text in ["", "a", ""]]
ngrams = ngram_suggester(docs)
assert_equal(ngrams.lengths, [len(doc) for doc in docs])
# test all empty docs
ngram_suggester = registry.misc.get("ngram_suggester.v1")(sizes=[1])
docs = [en_tokenizer(text) for text in ["", "", ""]]
ngrams = ngram_suggester(docs)
assert_equal(ngrams.lengths, [len(doc) for doc in docs])
|
import time
from optparse import OptionParser
def build_option_parser():
parser = OptionParser()
parser.add_option("-t", "--time", dest="given_time", type="string", help="Use HH:MM format for timer")
return parser.parse_args()
def countdown_timer(given_time_seconds):
while given_time_seconds:
minutes, seconds = divmod(given_time_seconds, 60)
hours, minutes = divmod(minutes, 60)
time_format = '{:02d}:{:02d}:{:02d}'.format(hours, minutes, seconds)
print(time_format, end='\r')
time.sleep(1)
given_time_seconds -= 1
def main():
(options, args) = build_option_parser()
given_time = options.given_time
if given_time:
hours = int(given_time.split(':')[0])
minutes = int(given_time.split(':')[1])
given_time_seconds = (hours * 3600) + (minutes * 60)
countdown_timer(given_time_seconds)
else:
print("Use -h option to view help\n Developer: Aditya Kamble (adityakamble49.com)")
if __name__ == '__main__':
main()
|
from django import forms
from .utils import PYTHON_PATH
class EditorForm(forms.Form):
code = forms.CharField(
widget=forms.Textarea,
required=False,
)
file_name = forms.CharField(
required=False,
)
dir_name = forms.CharField(
required=False,
)
select_python = forms.ChoiceField(
choices= PYTHON_PATH,
)
pip_name = forms.CharField(
required=False,
)
def __init__(self, request, *args, **kwargs):
super().__init__(*args, **kwargs)
open_file_path = request.GET.get('open_file_path')
if open_file_path:
try:
self.fields['code'].initial = open(open_file_path, 'rb').read().decode('utf-8')
# 現在開いているファイルを削除した
except FileNotFoundError:
self.fields['code'].initial = ''
else:
self.fields['code'].initial = ''
venv_path = request.GET.get('venv_path')
if venv_path:
choices = [(venv_path, 'venv')] + PYTHON_PATH
self.fields['select_python'] = forms.ChoiceField(choices=choices)
class ChkForm(forms.Form):
labels = ['Machine Learning','Visualize', 'other',]
CHOICE = [
('keras','Keras'),
('tensorflow','Tensorflow'),
('chainer','Chainer')]
CHOICE1 = [
('matplotlib','Matplotlib'),
('seaborn','Seaborn'),
]
CHOICE2 = [
('numpy','numpy'),
('pandas','pandas'),
('opencv-python','opencv'),
]
one = forms.MultipleChoiceField(
label=labels[0],
required=False,
disabled=False,
initial=[],
choices=CHOICE,
widget=forms.CheckboxSelectMultiple(attrs={
'id': 'one','class': 'form-check-input'}))
two = forms.MultipleChoiceField(
label=labels[1],
required=False,
disabled=False,
initial=[],
choices=CHOICE1,
widget=forms.CheckboxSelectMultiple(attrs={
'id': 'two','class': 'form-check-input'}))
three = forms.MultipleChoiceField(
label=labels[2],
required=False,
disabled=False,
initial=[],
choices=CHOICE2,
widget=forms.CheckboxSelectMultiple(attrs={
'id': 'three','class': 'form-check-input'}))
|
#/usr/bin/env python
import QtTesting
object1 = 'pqClientMainWindow/menubar/menuSources'
QtTesting.playCommand(object1, 'activate', 'RTAnalyticSource')
object2 = 'pqClientMainWindow/propertiesDock/propertiesPanel/Accept'
QtTesting.playCommand(object2, 'activate', '')
object3 = 'pqClientMainWindow/menubar/menuFilters/Common'
QtTesting.playCommand(object3, 'activate', 'Contour')
QtTesting.playCommand(object2, 'activate', '')
object5 = 'pqClientMainWindow/propertiesDock/propertiesPanel/scrollArea/qt_scrollarea_viewport/scrollAreaWidgetContents/PropertiesFrame/ProxyPanel/ContourValues/ScalarValueList'
QtTesting.setProperty(object5, 'scalars', '120')
QtTesting.playCommand(object2, 'activate', '')
QtTesting.setProperty(object5, 'scalars', '120;130;140;150')
QtTesting.playCommand(object2, 'activate', '')
object1 = 'pqClientMainWindow/menubar/menu_Edit'
QtTesting.playCommand(object1, 'activate', 'actionEditUndo')
# Need to wait a moment to allow the GUI to update.
import time
time.sleep(1)
val = QtTesting.getProperty(object5, 'scalars')
if val != "120":
import exceptions
raise exceptions.RuntimeError, "Expecting 120, received: " + val
else:
print "Value comparison successful -- Test passed."
|
# -*- coding: utf-8 -*-
# Generated by the protocol buffer compiler. DO NOT EDIT!
# source: pyatv/protocols/mrp/protobuf/RemoteTextInputMessage.proto
"""Generated protocol buffer code."""
from google.protobuf import descriptor as _descriptor
from google.protobuf import descriptor_pool as _descriptor_pool
from google.protobuf import message as _message
from google.protobuf import reflection as _reflection
from google.protobuf import symbol_database as _symbol_database
# @@protoc_insertion_point(imports)
_sym_db = _symbol_database.Default()
from pyatv.protocols.mrp.protobuf import ProtocolMessage_pb2 as pyatv_dot_protocols_dot_mrp_dot_protobuf_dot_ProtocolMessage__pb2
DESCRIPTOR = _descriptor_pool.Default().AddSerializedFile(b'\n9pyatv/protocols/mrp/protobuf/RemoteTextInputMessage.proto\x1a\x32pyatv/protocols/mrp/protobuf/ProtocolMessage.proto\"J\n\x16RemoteTextInputMessage\x12\x11\n\ttimestamp\x18\x01 \x01(\x01\x12\x0f\n\x07version\x18\x02 \x01(\x04\x12\x0c\n\x04\x64\x61ta\x18\x03 \x01(\x0c:I\n\x16remoteTextInputMessage\x12\x10.ProtocolMessage\x18G \x01(\x0b\x32\x17.RemoteTextInputMessage')
REMOTETEXTINPUTMESSAGE_FIELD_NUMBER = 71
remoteTextInputMessage = DESCRIPTOR.extensions_by_name['remoteTextInputMessage']
_REMOTETEXTINPUTMESSAGE = DESCRIPTOR.message_types_by_name['RemoteTextInputMessage']
RemoteTextInputMessage = _reflection.GeneratedProtocolMessageType('RemoteTextInputMessage', (_message.Message,), {
'DESCRIPTOR' : _REMOTETEXTINPUTMESSAGE,
'__module__' : 'pyatv.protocols.mrp.protobuf.RemoteTextInputMessage_pb2'
# @@protoc_insertion_point(class_scope:RemoteTextInputMessage)
})
_sym_db.RegisterMessage(RemoteTextInputMessage)
if _descriptor._USE_C_DESCRIPTORS == False:
pyatv_dot_protocols_dot_mrp_dot_protobuf_dot_ProtocolMessage__pb2.ProtocolMessage.RegisterExtension(remoteTextInputMessage)
DESCRIPTOR._options = None
_REMOTETEXTINPUTMESSAGE._serialized_start=113
_REMOTETEXTINPUTMESSAGE._serialized_end=187
# @@protoc_insertion_point(module_scope)
|
from flask import Blueprint
from werkzeug.exceptions import NotFound, InternalServerError, TooManyRequests
from mldictionary_api.const import API_PREFIX
from mldictionary_api.resources.response import ResponseAPI
from mldictionary_api.resources.const import (
ENGLISH_REPR,
ENGLISH_TO_PORTUGUESE_REPR,
PORTUGUESE_REPR,
PORTUGUESE_TO_ENGLISH,
SPANISH_REPR,
)
api = Blueprint('mldictionary_api', __name__, url_prefix=API_PREFIX)
@api.route('/dictionary/en/<word>/')
def english(word: str):
return ResponseAPI().get_meanings(ENGLISH_REPR, word)
@api.route('/dictionary/pt/<word>/')
def portuguese(word: str):
return ResponseAPI().get_meanings(PORTUGUESE_REPR, word)
@api.route('/dictionary/es/<word>/')
def spanish(word: str):
return ResponseAPI().get_meanings(SPANISH_REPR, word)
@api.route('/translator/en-pt/<word>/')
def english_to_portuguese(word: str):
return ResponseAPI().get_meanings(ENGLISH_TO_PORTUGUESE_REPR, word)
@api.route('/translator/pt-en/<word>/')
def portuguese_to_english(word: str):
return ResponseAPI().get_meanings(PORTUGUESE_TO_ENGLISH, word)
@api.app_errorhandler(NotFound)
def not_found(err):
return ResponseAPI().handle_error(err)
@api.app_errorhandler(TooManyRequests)
def too_many_requests(err):
return ResponseAPI().handle_error(err)
@api.app_errorhandler(InternalServerError)
def internal_error(err):
return ResponseAPI().handle_error(err)
@api.app_errorhandler(Exception)
def general_exception(err):
err.description = 'Don\'t recognize erro'
err.code = 500
return ResponseAPI().handle_error(err)
|
import sys
import os
import configparser
import requests
import pandas as pd
import hashlib
from io import StringIO
from datetime import datetime, timezone
## Django Setup
import django
import pymysql
pymysql.install_as_MySQLdb()
conffile = os.path.join(os.path.dirname(__file__), "../../conf/insert2db.conf")
conf = configparser.SafeConfigParser()
conf.read(conffile)
sys.path.append(conf.get('exist', 'syspath'))
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'intelligence.settings')
django.setup()
from apps.reputation.models import blacklist
import django.utils.timezone as tzone
from django.db import IntegrityError
## Logger Setup
from logging import getLogger, DEBUG, NullHandler
logger = getLogger(__name__)
logger.addHandler(NullHandler())
logger.setLevel(DEBUG)
logger.propagate = True
DataDir = os.path.join(os.path.dirname(__file__), '../data/')
class Tracker():
def __init__(self):
self.name = 'Dshield_Medium'
self.ID = 222
self.URL = 'https://www.dshield.org/feeds/suspiciousdomains_Medium.txt'
self.DataFilePath = DataDir + 'dshield/suspiciousdomains_Medium.txt'
self.header = [
'domain',
]
def cmpFiles(self, oldfile, newtext):
diffline = ''
if not os.path.exists(oldfile):
f = open(oldfile, 'w')
f.close()
oldsets = set(open(oldfile).readlines())
newsets = set(newtext.replace('\r\n','\n').splitlines(True))
results = newsets.difference(oldsets)
for result in results:
diffline += result
return diffline[:-1]
def delComment(self, s):
result = ''
for line in s.splitlines(True):
if not line.startswith('#') \
and line != "Site\n":
result += line
return result
def makeDataframe(self):
df = pd.DataFrame()
newline = ''
try:
res = requests.get(self.URL)
if res.status_code != 200:
return df
newline = self.cmpFiles(self.DataFilePath, res.text)
newline = self.delComment(newline)
except Exception as e:
logger.error(e)
if not newline == '':
open(self.DataFilePath, 'w').write(res.text)
df = pd.read_csv(StringIO(newline), names=self.header)
return df
def parse(self):
logger.info("start parsing: %s", self.name)
df = self.makeDataframe()
queries = []
if not df.empty:
for i, v in df.iterrows():
line = str(self.ID) + ","
line += str(v.values)
md5 = hashlib.md5(line.encode('utf-8')).hexdigest()
try:
query = blacklist(
id = md5,
domain = v.domain,
datetime = tzone.now(),
source = self.ID,
referrer = 'https://www.dshield.org/feeds/suspiciousdomains_Medium.txt',
)
except Exception as e:
logger.error("%s: %s", e, line)
queries.append(query)
else:
logger.info("no update")
logger.info("done parsing: %s, %s queries were parsed", self.name, len(queries))
return queries
|
import os
from celery import Celery
from celery.schedules import crontab
# set the default Django settings module for the 'celery' program.
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'reminders.settings')
app = Celery('reminders')
# Using a string here means the worker doesn't have to serialize
# the configuration object to child processes.
# - namespace='CELERY' means all celery-related configuration keys
# should have a `CELERY_` prefix.
app.config_from_object('django.conf:settings', namespace='CELERY')
app.conf.beat_schedule = {
'add-every-60-seconds': {
'task': 'schedule.tasks.remind_by_mail',
'schedule': 10.0
},
'test_task_every_10_seconds': {
'task': 'schedule.tasks.print_test',
'schedule': 10.0
},
}
app.conf.timezone = 'UTC'
# Load task modules from all registered Django app configs.
app.autodiscover_tasks()
@app.task(bind=True)
def debug_task(self):
print(f'Request: {self.request!r}')
|
def rotcir(ns):
lista = [ns]
for i in range(len(ns) - 1):
a = ns[0]
ns = ns[1:len(ns)+1]
ns += a
lista.append(ns)
return(lista)
def cyclic_number(ns):
rotaciones = rotcir(ns)
for n in range(1, len(ns)):
Ns = str(n*int(ns))
while len(Ns) != len(ns):
Ns = '0' + Ns
if Ns not in rotaciones:
return False
return True
import itertools
def per_n(n):
lista1 = list(itertools.product('0123456789',repeat=n))
lista2 = []
for i in lista1:
n = ''
for j in range(len(i)):
n += i[j]
lista2.append(n)
return(lista2)
i = 8
agregado = per_n(i)
for j in agregado:
ns = '00000000137' + j + '56789'
if cyclic_number(ns):
print(ns)
break
|
"""
Do not change anything if you dont have enough knowledge
how to handle it, otherwise it may mess the server.
"""
from navycut.core import AppSister
from navycut.utils import path
__basedir__ = path.abspath(__file__).parent
class AniketSister(AppSister):
name = "aniket"
template_folder = __basedir__ / "templates"
static_folder = __basedir__ / "static"
static_url_path = "/static"
url_prefix = "/aniket"
import_app_feature = True
|
#!/usr/bin/env python3
#
# wikipedia.py
"""
Sphinx extension to create links to Wikipedia articles.
.. versionadded:: 0.2.0
.. extensions:: sphinx_toolbox.wikipedia
Configuration
--------------
.. latex:vspace:: -5px
.. confval:: wikipedia_lang
:type: :class:`str`
:required: False
:default: ``'en'``
The Wikipedia language to use for :rst:role:`wikipedia` roles.
.. versionadded:: 0.2.0
Usage
------
.. latex:vspace:: -5px
.. rst:role:: wikipedia
Role which shows a link to the given article on Wikipedia.
The title and language can be customised.
:bold-title:`Example`
.. rest-example::
:wikipedia:`Sphinx`
:wikipedia:`mythical creature <Sphinx>`
:wikipedia:`Answer to the Ultimate Question of Life, the Universe, and Everything <:de:42 (Antwort)>`
.. only:: html
.. rest-example::
:wikipedia:`:zh:斯芬克斯`
API Reference
----------------
"""
#
# Copyright © 2020-2021 Dominic Davis-Foster <dominic@davis-foster.co.uk>
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
# IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM,
# DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
# OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE
# OR OTHER DEALINGS IN THE SOFTWARE.
#
# Based on https://github.com/quiver/sphinx-ext-wikipedia
# BSD Licensed
#
# Parts of the docstrings based on https://docutils.sourceforge.io/docs/howto/rst-roles.html
#
# stdlib
import re
from typing import Dict, List, Tuple
from urllib.parse import quote
# 3rd party
from apeye.url import URL
from docutils import nodes
from docutils.nodes import system_message
from docutils.parsers.rst.states import Inliner
from sphinx.application import Sphinx
from sphinx.util.nodes import split_explicit_title
# this package
from sphinx_toolbox.utils import SphinxExtMetadata, metadata_add_version
__all__ = ["make_wikipedia_link", "setup"]
base_url = "https://%s.wikipedia.org/wiki"
_wiki_lang_re = re.compile(":(.*?):(.*)")
def _get_wikipedia_lang(inliner: Inliner): # pragma: no cover
return inliner.document.settings.env.config.wikipedia_lang
def make_wikipedia_link(
name: str,
rawtext: str,
text: str,
lineno: int,
inliner: Inliner,
options: Dict = {},
content: List[str] = []
) -> Tuple[List[nodes.reference], List[system_message]]:
"""
Adds a link to the given article on :wikipedia:`Wikipedia`.
:param name: The local name of the interpreted role, the role name actually used in the document.
:param rawtext: A string containing the entire interpreted text input, including the role and markup.
:param text: The interpreted text content.
:param lineno: The line number where the interpreted text begins.
:param inliner: The :class:`docutils.parsers.rst.states.Inliner` object that called :func:`~.source_role`.
It contains the several attributes useful for error reporting and document tree access.
:param options: A dictionary of directive options for customization (from the ``role`` directive),
to be interpreted by the function.
Used for additional attributes for the generated elements and other functionality.
:param content: A list of strings, the directive content for customization (from the ``role`` directive).
To be interpreted by the function.
:return: A list containing the created node, and a list containing any messages generated during the function.
"""
text = nodes.unescape(text)
has_explicit, title, target = split_explicit_title(text)
m = _wiki_lang_re.match(target)
if m:
lang, target = m.groups()
if not has_explicit:
title = target
else:
lang = _get_wikipedia_lang(inliner)
ref = URL(base_url % lang) / quote(target.replace(' ', '_'), safe='')
node = nodes.reference(rawtext, title, refuri=str(ref), **options)
return [node], []
@metadata_add_version
def setup(app: Sphinx) -> SphinxExtMetadata:
"""
Setup :mod:`sphinx_toolbox.wikipedia`.
.. versionadded:: 1.0.0
:param app: The Sphinx application.
"""
app.add_role("wikipedia", make_wikipedia_link)
app.add_config_value("wikipedia_lang", "en", "env", [str])
return {"parallel_read_safe": True}
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.