text
stringlengths 2
999k
|
|---|
# -*- coding: utf-8 -*-
# Copyright (C) 2020. Huawei Technologies Co., Ltd. All rights reserved.
# This program is free software; you can redistribute it and/or modify
# it under the terms of the MIT License.
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# MIT License for more details.
"""AutoGate top-k version Stage2 TrainerCallback."""
import logging
import pandas as pd
from vega.common import ClassFactory, ClassType
from vega.common import FileOps
from vega.algorithms.nas.fis.ctr_trainer_callback import CtrTrainerCallback
from vega.core.pipeline.conf import ModelConfig
logger = logging.getLogger(__name__)
@ClassFactory.register(ClassType.CALLBACK)
class AutoGateS2TrainerCallback(CtrTrainerCallback):
"""AutoGateS2TrainerCallback module."""
def __init__(self):
"""Construct AutoGateS2TrainerCallback class."""
super(CtrTrainerCallback, self).__init__()
self.sieve_board = pd.DataFrame(
columns=['selected_feature_pairs', 'score'])
self.selected_pairs = list()
logging.info("init autogate s2 trainer callback")
def before_train(self, logs=None):
"""Call before_train of the managed callbacks."""
super().before_train(logs)
"""Be called before the training process."""
hpo_result = FileOps.load_pickle(FileOps.join_path(
self.trainer.local_output_path, 'best_config.pickle'))
logging.info("loading stage1_hpo_result \n{}".format(hpo_result))
feature_interaction_score = hpo_result['feature_interaction_score']
print('feature_interaction_score:', feature_interaction_score)
sorted_pairs = sorted(feature_interaction_score.items(),
key=lambda x: abs(x[1]), reverse=True)
if ModelConfig.model_desc:
fis_ratio = ModelConfig.model_desc["custom"]["fis_ratio"]
else:
fis_ratio = 1.0
top_k = int(len(feature_interaction_score) * min(1.0, fis_ratio))
self.selected_pairs = list(map(lambda x: x[0], sorted_pairs[:top_k]))
# add selected_pairs
setattr(ModelConfig.model_desc['custom'], 'selected_pairs', self.selected_pairs)
def after_train(self, logs=None):
"""Call after_train of the managed callbacks."""
curr_auc = float(self.trainer.valid_metrics.results['auc'])
self.sieve_board = self.sieve_board.append(
{
'selected_feature_pairs': self.selected_pairs,
'score': curr_auc
}, ignore_index=True)
result_file = FileOps.join_path(
self.trainer.local_output_path, '{}_result.csv'.format(self.trainer.__worker_id__))
self.sieve_board.to_csv(result_file, sep='\t')
|
from setuptools import setup, find_packages
with open("README.md", "r", encoding="utf-8") as f:
long_description = f.read()
setup_requirements = [
"wheel>=0.35.1",
]
requirements = ["Pillow>=7.2.0"]
test_requirements = [
"flake8>=3.8.3",
"pytest>=5.4.3",
]
dev_requirements = [
*setup_requirements,
*test_requirements,
]
extra_requirements = {
"setup": setup_requirements,
"test": test_requirements,
"all": [*requirements, *dev_requirements,],
}
setup(
name="image-scramble",
version="2.0.1",
author="catsital",
author_email="catshital@gmail.com",
description="Split image into tiles and scramble/unscramble them with seed.",
entry_points={"console_scripts": ["pycasso=pycasso.__main__:main"],},
install_requires=requirements,
long_description=long_description,
long_description_content_type="text/markdown",
url="https://github.com/catsital/pycasso",
project_urls={
"Bug Tracker": "https://github.com/catsital/pycasso/issues",
},
classifiers=[
"Programming Language :: Python :: 3",
"License :: OSI Approved :: MIT License",
"Operating System :: OS Independent",
],
packages=find_packages(),
setup_requires=setup_requirements,
tests_require=test_requirements,
extras_require=extra_requirements,
zip_safe=False
)
|
from collections import OrderedDict
import pytest
from company.constants import RegistrationNumberChoices
from company.models import Country
from company.serialisers import CompanySerialiser
from .factories import CompanyFactory, IndustryCodeFactory, PrimaryIndustryCodeFactory, RegistrationNumberFactory
@pytest.mark.django_db
def test_company_serialiser():
company = CompanyFactory(**{
'duns_number': '123456789',
'primary_name': 'Test Company 1',
'trading_names': ['ACME trading corp'],
'global_ultimate_duns_number': '888888888',
'global_ultimate_primary_name': 'global primary name',
'domain': 'www.e-corp.corp',
'is_out_of_business': False,
'address_line_1': 'The Old Test Mill 1',
'address_line_2': '100 Test Rd',
'address_town': 'Cheshire',
'address_county': 'address county',
'address_area_name': 'address area name',
'address_area_abbrev_name': 'abr',
'address_postcode': 'address postcode',
'address_country': Country.objects.get(iso_alpha2='GB'),
'registered_address_line_1': 'reg address line 1',
'registered_address_line_2': 'reg address line 2',
'registered_address_town': 'reg address town',
'registered_address_county': 'reg address county',
'registered_address_area_name': 'reg address area name',
'registered_address_area_abbrev_name': 'abr',
'registered_address_country': Country.objects.get(iso_alpha2='GB'),
'registered_address_postcode': 'reg postcode',
'annual_sales': 51806612000,
'annual_sales_currency': 'USD',
'is_annual_sales_estimated': None,
'employee_number': 24,
'year_started': 2000,
'is_employees_number_estimated': False,
'legal_status': 'foreign_company'
})
RegistrationNumberFactory(**{
'company': company,
'registration_type': RegistrationNumberChoices.uk_vat_number,
'registration_number': '12341234',
})
IndustryCodeFactory(**{
'company': company,
'code': '517919',
'description': 'All Other Telecommunications',
'typeDescription': 'North American Industry Classification System 2017',
'typeDnBCode': 30832,
'priority': 2
})
IndustryCodeFactory(**{
'company': company,
'code': '423690',
'description': 'Other Electronic Parts and Equipment Merchant Wholesalers',
'typeDescription': 'North American Industry Classification System 2017',
'typeDnBCode': 30832,
'priority': 1
})
PrimaryIndustryCodeFactory(**{
'company': company,
'usSicV4': '5065',
'usSicV4Description': 'Whol electronic parts/equipment'
})
assert CompanySerialiser(company).data == {
'last_updated': None,
'duns_number': '123456789',
'primary_name': 'Test Company 1',
'trading_names': ['ACME trading corp'],
'registration_numbers': [
OrderedDict(
[
('registration_type', 'VAT Registration number'),
('registration_number', '12341234'),
]
)
],
'global_ultimate_duns_number': '888888888',
'global_ultimate_primary_name': 'global primary name',
'domain': 'www.e-corp.corp',
'is_out_of_business': False,
'address_line_1': 'The Old Test Mill 1',
'address_line_2': '100 Test Rd',
'address_town': 'Cheshire',
'address_county': 'address county',
'address_area_name': 'address area name',
'address_area_abbrev_name': 'abr',
'address_postcode': 'address postcode',
'address_country': 'GB',
'registered_address_line_1': 'reg address line 1',
'registered_address_line_2': 'reg address line 2',
'registered_address_town': 'reg address town',
'registered_address_county': 'reg address county',
'registered_address_area_name': 'reg address area name',
'registered_address_area_abbrev_name': 'abr',
'registered_address_country': 'GB',
'registered_address_postcode': 'reg postcode',
'annual_sales': 51806612000.0,
'annual_sales_currency': 'USD',
'is_annual_sales_estimated': None,
'employee_number': 24,
'is_employees_number_estimated': False,
'primary_industry_codes': [
OrderedDict([
('usSicV4', '5065'),
('usSicV4Description', 'Whol electronic parts/equipment'),
])
],
'industry_codes': [
OrderedDict(
[
('code', '423690'),
('description', 'Other Electronic Parts and Equipment Merchant Wholesalers'),
('priority', 1),
('typeDescription', 'North American Industry Classification System 2017'),
('typeDnBCode', '30832'),
]
),
OrderedDict(
[
('code', '517919'),
('description', 'All Other Telecommunications'),
('priority', 2),
('typeDescription', 'North American Industry Classification System 2017'),
('typeDnBCode', '30832'),
]
)
],
'line_of_business': '',
'year_started': 2000,
'legal_status': 'foreign_company'
}
|
"""
Django settings for drf_sample project.
Generated by 'django-admin startproject' using Django 1.10.1.
For more information on this file, see
https://docs.djangoproject.com/en/1.10/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/1.10/ref/settings/
"""
import sys
import os
sys.path.append('/fan/')
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/1.10/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = 'ov0!!1=grqmn-1^gdcm87a+=al3)(t9xnionsx)*&oe&3l+x4*'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'drf_app',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
'fan.contrib.django.FanMiddleware',
]
ROOT_URLCONF = 'drf_sample.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'drf_sample.wsgi.application'
# Database
# https://docs.djangoproject.com/en/1.10/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# Password validation
# https://docs.djangoproject.com/en/1.10/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/1.10/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.10/howto/static-files/
STATIC_URL = '/static/'
LOGGING = {
'version': 1,
'disable_existing_loggers': False,
'formatters': {
'standard': {
'format': '%(asctime)s [%(levelname)s] %(name)s: %(message)s'
},
},
'handlers': {
'default': {
'level': 'DEBUG',
'class': 'logging.FileHandler',
'filename': 'default.log',
'formatter': 'standard',
},
},
'loggers': {
'': {
'handlers': ['default'],
'level': 'DEBUG',
'propagate': True,
},
},
}
|
import os
from flask import render_template, url_for, redirect
from werkzeug.utils import secure_filename
from app import app
from app.forms import ScriptForm
from Script_reader import table_creator
@app.route('/', methods=['POST', 'GET'])
@app.route('/index', methods=['POST', 'GET'])
def index():
form = ScriptForm()
if form.validate_on_submit():
f = form.script.data
filename = secure_filename(f.filename)
file_path = os.path.join(app.instance_path, 'scripts', filename)
f.save(os.path.join(app.instance_path, 'scripts', filename))
table = table_creator(file_path).get_html_string()
os.remove(file_path)
return table
return render_template('index.html', title='Home', form=form)
@app.route('/locations', methods=['POST', 'GET'])
def locations():
pass
#Perhaps use http://flask.pocoo.org/docs/0.12/api/#flask.send_from_directory to allow a CSV to be downloaded.
|
import pandas as pd
import numpy as np
from collections import defaultdict
import RemovingDataSolns as s
# Question 1
def prop_sals_test(prop_sals):
'''
INPUT prop_sals - a float as the percent of missing values in the salary column
Prints statement related to the correctness of the solution of the proportion
'''
if np.allclose(prop_sals, s.prop_sals):
print("Nice job! That looks right!")
else:
print("Oops! Make sure your value is for the proportion of nan values in only the Salary column.")
# Question 2
def sal_rm_test(sal_rm):
'''
INPUT sal_rm - a pandas dataframe with all rows that are missing a value the salary column removed. The dataframe should only have the columns of num_vars (quant variables)
Prints statement related to the correctness of the solution of the dataframe
'''
if sal_rm.equals(s.sal_rm):
print("Nice job! That looks right!")
else:
print("That wasn't quite as expected. Try again, this should be the num_vars dataframe with salary removed.")
# Question 3
def question3_check(question3_solution):
'''
INPUT question3_solution - the letter (a, b, or c) corresponding to the statement that best describes what happend when fitting your model.
Prints statement related to the correctness of the letter chosen.
'''
if question3_solution == s.question3_solution:
print("Nice job! That's right! Those missing values in the X matrix will still not allow us to predict the response.")
else:
print("Oops! That wasn't what we were expecting. Your solution should be either a, b, or c for the string that best relates to what happened.")
# Question 4
def all_rm_test(all_rm):
'''
INPUT all_rm - a pandas dataframe with all rows that are missing a value in any column removed from num_vars (only the numeric columns)
Prints statement related to the correctness of the solution of the dataframe
'''
if all_rm.equals(s.all_rm):
print("Nice job! That looks right. The default is to drop any row with a missing value in any column, so we didn't need to specify any arguments in this case.")
else:
print("Oops! That doesn't look like what we were expecting. Make sure you are working with only the numeric columns, and you have dropped any rows with missing values.")
# Question 5
def question5_check(question5_solution):
'''
INPUT question3_solution - the letter (a, b, or c) corresponding to the statement that best describes what happend when fitting your model.
Prints statement related to the correctness of the letter chosen.
'''
if question5_solution == s.question5_solution:
print("Nice job! That's right! Python isn't exactly magic, but sometimes it feels like it is!")
else:
print("Oops! Your solution should have worked. In which case, no output should have printed. This solution should follow just as in the screencast.")
# Question 6
def r2_test_check(r2_test):
'''
INPUT r2_test - the rsquared value from fitting a model with all nan values dropped and only using quantitative variables.
Prints statement related to the correctness rsquared matching solution.
'''
if r2_test == s.r2_test:
print("Nice job! That's right! Your rsquared matches the solution.")
else:
print("Oops! That wasn't the value that was expected. You should fit your model using the training data, predict on the X_test data, and then score comparing the y_test and your predicted values.")
# Question 7
def question7_check(question7_solution):
'''
INPUT question7_solution - a dictionary with statements of takeaways from the rest of the notebook. The values should be the variables a, b, c, d, e, f, or g
Prints statement related to the correctness of the solution of the dictionary
'''
if question7_solution == s.question7_solution:
print("Nice job! That looks right to me! We would really like to predict for anyone who provides a salary, but our model right now definitely has some limitations.")
elif question7_solution['The number of reported salaries in the original dataset'] != s.question7_solution['The number of reported salaries in the original dataset']:
print("The number of reported salaries in the original dataset doesn't look quite right.")
elif question7_solution['The number of test salaries predicted using our model'] != s.question7_solution['The number of test salaries predicted using our model']:
print("The number of salaries predicted using our model doesn't look quite right.")
elif question7_solution['If an individual does not rate stackoverflow, but has a salary'] != s.question7_solution['If an individual does not rate stackoverflow, but has a salary']:
print("Whether an individual rates stackoverflow or has a job satisfaction we would still like to predict the salary if we can.")
elif question7_solution['If an individual does not have a a job satisfaction, but has a salary'] != s.question7_solution['If an individual does not have a a job satisfaction, but has a salary']:
print("Whether an individual rates stackoverflow or has a job satisfaction we would still like to predict the salary if we can.")
elif question7_solution['Our model predicts salaries for the two individuals described above.'] != s.question7_solution['Our model predicts salaries for the two individuals described above.']:
print("Unfortunately, our current model will not predict for anyone who has missing values in any column - even if they do have a salary!")
|
# import sharpy.utils.settings as settings
# import sharpy.utils.exceptions as exceptions
# import sharpy.utils.cout_utils as cout
import numpy as np
import importlib
import unittest
import os
import sharpy.utils.cout_utils as cout
class TestCoupledPrescribed(unittest.TestCase):
"""
"""
@classmethod
def setUpClass(cls):
# run all the cases generators
# case = 'smith_2deg_prescribed'
# mod = importlib.import_module('tests.coupled.prescribed.' + case + '.generate_' + case)
# case = 'rotating_wing'
# mod1 = importlib.import_module('tests.coupled.prescribed.' + case + '.generate_' + case)
pass
@classmethod
def tearDownClass(cls):
pass
# def test_smith2deg_prescribed(self):
# import sharpy.sharpy_main
# solver_path = os.path.abspath(os.path.dirname(os.path.realpath(__file__)) +
# '/smith_2deg_prescribed/smith_2deg_prescribed.sharpy')
# sharpy.sharpy_main.main(['', solver_path])
#
# # read output and compare
# output_path = os.path.dirname(solver_path) + 'output/aero/'
# forces_data = np.genfromtxt(output_path + 'smith_2deg_prescribed_aeroforces.csv')
# self.assertAlmostEqual(forces_data[-1, 3], -3.728e1, 1)
def test_rotating_wing(self):
# import sharpy.sharpy_main
# solver_path = os.path.abspath(os.path.dirname(os.path.realpath(__file__)) +
# '/rotating_wing/rotating_wing.sharpy')
# sharpy.sharpy_main.main(['', solver_path])
cout.cout_wrap('No tests for prescribed dynamic configurations (yet)!', 1)
pass
|
# coding: utf-8
"""
Isilon SDK
Isilon SDK - Language bindings for the OneFS API # noqa: E501
OpenAPI spec version: 9
Contact: sdk@isilon.com
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
import pprint
import re # noqa: F401
import six
from isi_sdk_8_2_2.models.cloud_proxy import CloudProxy # noqa: F401,E501
class CloudProxyCreateParams(object):
"""NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
"""
Attributes:
swagger_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
swagger_types = {
'host': 'str',
'name': 'str',
'password': 'str',
'port': 'int',
'type': 'str',
'username': 'str'
}
attribute_map = {
'host': 'host',
'name': 'name',
'password': 'password',
'port': 'port',
'type': 'type',
'username': 'username'
}
def __init__(self, host=None, name=None, password=None, port=None, type=None, username=None): # noqa: E501
"""CloudProxyCreateParams - a model defined in Swagger""" # noqa: E501
self._host = None
self._name = None
self._password = None
self._port = None
self._type = None
self._username = None
self.discriminator = None
self.host = host
self.name = name
if password is not None:
self.password = password
self.port = port
self.type = type
if username is not None:
self.username = username
@property
def host(self):
"""Gets the host of this CloudProxyCreateParams. # noqa: E501
A host name or network address for connecting to this proxy # noqa: E501
:return: The host of this CloudProxyCreateParams. # noqa: E501
:rtype: str
"""
return self._host
@host.setter
def host(self, host):
"""Sets the host of this CloudProxyCreateParams.
A host name or network address for connecting to this proxy # noqa: E501
:param host: The host of this CloudProxyCreateParams. # noqa: E501
:type: str
"""
if host is None:
raise ValueError("Invalid value for `host`, must not be `None`") # noqa: E501
self._host = host
@property
def name(self):
"""Gets the name of this CloudProxyCreateParams. # noqa: E501
A unique friendly name for this proxy configuration # noqa: E501
:return: The name of this CloudProxyCreateParams. # noqa: E501
:rtype: str
"""
return self._name
@name.setter
def name(self, name):
"""Sets the name of this CloudProxyCreateParams.
A unique friendly name for this proxy configuration # noqa: E501
:param name: The name of this CloudProxyCreateParams. # noqa: E501
:type: str
"""
if name is None:
raise ValueError("Invalid value for `name`, must not be `None`") # noqa: E501
self._name = name
@property
def password(self):
"""Gets the password of this CloudProxyCreateParams. # noqa: E501
The password to connect to this proxy if required (write-only) # noqa: E501
:return: The password of this CloudProxyCreateParams. # noqa: E501
:rtype: str
"""
return self._password
@password.setter
def password(self, password):
"""Sets the password of this CloudProxyCreateParams.
The password to connect to this proxy if required (write-only) # noqa: E501
:param password: The password of this CloudProxyCreateParams. # noqa: E501
:type: str
"""
self._password = password
@property
def port(self):
"""Gets the port of this CloudProxyCreateParams. # noqa: E501
The port used to connect to this proxy # noqa: E501
:return: The port of this CloudProxyCreateParams. # noqa: E501
:rtype: int
"""
return self._port
@port.setter
def port(self, port):
"""Sets the port of this CloudProxyCreateParams.
The port used to connect to this proxy # noqa: E501
:param port: The port of this CloudProxyCreateParams. # noqa: E501
:type: int
"""
if port is None:
raise ValueError("Invalid value for `port`, must not be `None`") # noqa: E501
self._port = port
@property
def type(self):
"""Gets the type of this CloudProxyCreateParams. # noqa: E501
The type of connection used to connect to this proxy # noqa: E501
:return: The type of this CloudProxyCreateParams. # noqa: E501
:rtype: str
"""
return self._type
@type.setter
def type(self, type):
"""Sets the type of this CloudProxyCreateParams.
The type of connection used to connect to this proxy # noqa: E501
:param type: The type of this CloudProxyCreateParams. # noqa: E501
:type: str
"""
if type is None:
raise ValueError("Invalid value for `type`, must not be `None`") # noqa: E501
allowed_values = ["socks_4", "socks_5", "http"] # noqa: E501
if type not in allowed_values:
raise ValueError(
"Invalid value for `type` ({0}), must be one of {1}" # noqa: E501
.format(type, allowed_values)
)
self._type = type
@property
def username(self):
"""Gets the username of this CloudProxyCreateParams. # noqa: E501
The username to connect to this proxy if required # noqa: E501
:return: The username of this CloudProxyCreateParams. # noqa: E501
:rtype: str
"""
return self._username
@username.setter
def username(self, username):
"""Sets the username of this CloudProxyCreateParams.
The username to connect to this proxy if required # noqa: E501
:param username: The username of this CloudProxyCreateParams. # noqa: E501
:type: str
"""
self._username = username
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, CloudProxyCreateParams):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
|
from django.urls import include, path
from rest_framework import routers
from . import views
from .views import *
router = routers.DefaultRouter()
router.register(r"tracks", views.TrackViewSet)
urlpatterns = [
path("", include(router.urls)),
path('playlist/add', PlaylistAPIView.as_view()),
path('allplaylist/', PlayListViewSet.as_view({'get': 'list'})),
path('playlist/<id>', PlaylistAPIView.as_view()),
path('playlist/delete/<id>', PlaylistAPIView.as_view()),
path('playlist/addTrack/<id>', PlaylistAPIView.as_view(),name="addTrack"),
path('playlist/removeTrack/<id>', PlaylistAPIView.as_view(),name="removeTrack"),
]
|
# Данный пример выводит изображения myImage_1 и myImage_2.
# Создаём изображение "смайлик".
myImage_1 = [ 0b00111100, #
0b01000010, #
0b10100101, #
0b10000001, #
0b10100101, #
0b10011001, #
0b01000010, #
0b00111100 ] #
# Создаём изображение "телевизор".
myImage_2 = [ 0b01000100, #
0b00101000, #
0b00010000, #
0b11111111, #
0b10000011, #
0b10000011, #
0b10000011, #
0b11111111 ] #
from pyiArduinoI2Cmatrix import * # Подключаем библиотеку для работы с LED матрицей 8x8.
from time import sleep # Импортируем функцию ожидания
disp = pyiArduinoI2Cmatrix(0x09) # Объявляем объект disp для работы с LED матрицей 8x8, указывая её адрес на шине I2C.
#
try: # Входим в блок исключений
while True: # Входим в бесконечный цикл
disp.drawImage(myImage_1), # Выводим на дисплей изображение списка myImage_1
sleep(2) # и ждём пару секунд.
disp.drawImage(myImage_2), # Выводим на дисплей изображение списка myImage_2
sleep(2) # и ждём пару секунд.
except: # Если поднято исключение (наример, сценарий завершён с клавиатуры
disp.reset() # сбрасываем параметры модуля.
|
#!/usr/bin/env python3
# Copyright (c) 2016 Anki, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License in the file LICENSE.txt or at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
'''"If This Then That" sports example
This example demonstrates how "If This Then That" (http://ifttt.com) can be used
make Cozmo respond when there is an in-game or final score update for the team
you specify. Instructions below will lead you through setting up an applet on
the IFTTT website. When the applet trigger is called (which sends a web request
received by the web server started in this example), Cozmo will play an animation,
show an image on his face, and speak the in-game update.
Please place Cozmo on the charger for this example. When necessary, he will be
rolled off and back on.
Follow these steps to set up and run the example:
1) Provide a a static ip, URL or similar that can be reached from the "If This
Then That" server. One easy way to do this is with ngrok, which sets up
a secure tunnel to localhost running on your machine.
To set up ngrok:
a) Follow instructions here to download and install:
https://ngrok.com/download
b) Run this command to create a secure public URL for port 8080:
./ngrok http 8080
c) Note the HTTP forwarding address shown in the terminal (e.g., http://55e57164.ngrok.io).
You will use this address in your applet, below.
WARNING: Using ngrok exposes your local web server to the internet. See the ngrok
documentation for more information: https://ngrok.com/docs
2) Set up your applet on the "If This Then That" website.
a) Sign up and sign into https://ifttt.com
b) Create an applet: https://ifttt.com/create
c) Set up your trigger.
1. Click "this".
2. Select "ESPN" as your service.
3. Under "Choose a Trigger", select “New in-game update".
4. In section "Complete Trigger Fields", enter your sport and team and click “Create Trigger".
d) Set up your action.
1. Click “that".
2. Select “Maker Webhooks" to set it as your action channel. Connect to the Maker channel if prompted.
3. Click “Make a web request" and fill out the fields as follows. Remember your publicly
accessible URL from above (e.g., http://55e57164.ngrok.io) and use it in the URL field,
followed by "/iftttSports" as shown below:
URL: http://55e57164.ngrok.io/iftttSports
Method: POST
Content Type: application/json
Body: {"AlertBody":"{{AlertBody}}"}
5. Click “Create Action" then “Finish".
3) Test your applet.
a) Run this script at the command line: ./ifttt_sports.py
b) On ifttt.com, on your applet page, click “Check now”. See that IFTTT confirms that the applet
was checked.
c) Wait for new in-game updates for your team and see Cozmo react! Cozmo should roll off the charger, raise
and lower his lift, show an image on his face and speak the in-game update.
'''
import asyncio
import sys
try:
from aiohttp import web
except ImportError:
sys.exit("Cannot import from aiohttp. Do `pip3 install --user aiohttp` to install")
import cozmo
from common import IFTTTRobot
app = web.Application()
async def serve_sports(request):
'''Define an HTTP POST handler for receiving requests from If This Then That.
You may modify this method to change how Cozmo reacts to
an in-game update from IFTTT.
'''
json_object = await request.json()
# Extract the text for the in-game update.
alert_body = json_object["AlertBody"]
robot = request.app['robot']
async def read_name():
try:
async with robot.perform_off_charger():
'''If necessary, Move Cozmo's Head and Lift to make it easy to see Cozmo's face.'''
await robot.get_in_position()
# First, have Cozmo play an animation
await robot.play_anim_trigger(cozmo.anim.Triggers.ReactToPokeStartled).wait_for_completed()
# Next, have Cozmo speak the text from the in-game update.
await robot.say_text(alert_body).wait_for_completed()
# Last, have Cozmo display a sports image on his face.
robot.display_image_file_on_face("../face_images/ifttt_sports.png")
except cozmo.RobotBusy:
cozmo.logger.warning("Robot was busy so didn't read update: '" + alert_body +"'")
# Perform Cozmo's task in the background so the HTTP server responds immediately.
asyncio.ensure_future(read_name())
return web.Response(text="OK")
# Attach the function as an HTTP handler.
app.router.add_post('/iftttSports', serve_sports)
if __name__ == '__main__':
cozmo.setup_basic_logging()
cozmo.robot.Robot.drive_off_charger_on_connect = False
# Use our custom robot class with extra helper methods
cozmo.conn.CozmoConnection.robot_factory = IFTTTRobot
try:
app_loop = asyncio.get_event_loop()
sdk_conn = cozmo.connect_on_loop(app_loop)
# Wait for the robot to become available and add it to the app object.
app['robot'] = app_loop.run_until_complete(sdk_conn.wait_for_robot())
except cozmo.ConnectionError as e:
sys.exit("A connection error occurred: %s" % e)
web.run_app(app)
|
from django.http import HttpResponse
from django.views.decorators.http import require_http_methods
from django.shortcuts import render
import re
# Create your views here.
@require_http_methods(['GET', 'POST'])
def echo_0(request):
if request.method == 'GET' and something == None:
return render(request,'templates/echo.html',context)
elif request.method in ['POST', 'PUT']:
return HtppBadResponse(status=405)
def parser(string):
result = re.match(r'[aA-zZ]+',string)
return result.group(0)
# def echo(request):
# try:
# if (request.method == 'GET'):
# meta = parser(request.META['QUERY_STRING'])
# return render(request, 'echo.html', context={
# 'get_letters': meta,
# 'get_value': request.GET.get(meta),
# 'get_tag': request.META.get('HTTP_X_PRINT_STATEMENT'),
# 'request_method': request.META['REQUEST_METHOD'].lower()
# })
# elif request.method == 'POST':
# meta = parser(request.META['QUERY_STRING'])
# return render(request, 'echo.html', context={
# 'get_letters': meta,
# 'get_value': request.POST.get(meta),
# 'get_tag': request.META.get('HTTP_X_PRINT_STATEMENT'),
# 'request_method': request.META['REQUEST_METHOD'].lower()
# })
# except:
# return HttpResponse(status=404)
# def echo(request):
# if (request.method == 'GET'):
# meta = parser(request.META['QUERY_STRING'])
# return render(request, 'echo.html', context={
# 'get_letters': meta,
# 'get_value': request.GET.get(meta),
# 'get_tag': request.META.get('HTTP_X_PRINT_STATEMENT'),
# 'request_method': request.META['REQUEST_METHOD'].lower()
# })
# elif request.method == 'POST':
# #print(request.META['QUERY_STRING'])
# print(request.POST)
# return render(request, 'echo.html', context={
# 'get_letters':'a',
# 'get_value': 1,
# 'get_tag': request.META.get('HTTP_X_PRINT_STATEMENT'),
# 'request_method': request.META['REQUEST_METHOD'].lower()
# })
def echo(request):
context = {
'get' : request.GET,
'post' : request.POST,
'meta' : request.META
}
return render(request,"echo.html",context = context)
def filters(request):
return render(request, 'filters.html', context={
'a': request.GET.get('a', 1),
'b': request.GET.get('b', 1)
})
# <!-- {% extends base.html%} -->
#
def extend(request):
return render(request, 'extend.html', context={
'a': request.GET.get('a'),
'b': request.GET.get('b')
})
#
# <!--DOCTYPE html -->
# <html>
# <body>
# {% if 'QUERY_STRING' in request.META %}
# <h1> {{ request_method }} {{ get_letter }}: {{ get_value }} statement is empty </h1>
# {% elif 'HTTP_X_PRINT_STATEMENT' in request.META %}
# <h2> statement is {{get_tag}} </h2>
# {% endif %}
# </body>
# </html>
|
from kivy.app import App
from kivy.factory import Factory
from kivy.properties import ObjectProperty
from kivy.lang import Builder
from kivy.clock import Clock
from kivy.uix.label import Label
from electrum_commercium_gui.kivy.i18n import _
from datetime import datetime
from electrum_commercium.util import InvalidPassword
Builder.load_string('''
<TxDialog>
id: popup
title: _('Transaction')
is_mine: True
can_sign: False
can_broadcast: False
fee_str: ''
date_str: ''
date_label:''
amount_str: ''
tx_hash: ''
status_str: ''
description: ''
outputs_str: ''
BoxLayout:
orientation: 'vertical'
ScrollView:
GridLayout:
height: self.minimum_height
size_hint_y: None
cols: 1
spacing: '10dp'
padding: '10dp'
GridLayout:
height: self.minimum_height
size_hint_y: None
cols: 1
spacing: '10dp'
BoxLabel:
text: _('Status')
value: root.status_str
BoxLabel:
text: _('Description') if root.description else ''
value: root.description
BoxLabel:
text: root.date_label
value: root.date_str
BoxLabel:
text: _('Amount sent') if root.is_mine else _('Amount received')
value: root.amount_str
BoxLabel:
text: _('Transaction fee') if root.fee_str else ''
value: root.fee_str
TopLabel:
text: _('Outputs') + ':'
OutputList:
height: self.minimum_height
size_hint: 1, None
id: output_list
TopLabel:
text: _('Transaction ID') + ':' if root.tx_hash else ''
TxHashLabel:
data: root.tx_hash
name: _('Transaction ID')
Widget:
size_hint: 1, 0.1
BoxLayout:
size_hint: 1, None
height: '48dp'
Button:
size_hint: 0.5, None
height: '48dp'
text: _('Sign') if root.can_sign else _('Broadcast') if root.can_broadcast else ''
disabled: not(root.can_sign or root.can_broadcast)
opacity: 0 if self.disabled else 1
on_release:
if root.can_sign: root.do_sign()
if root.can_broadcast: root.do_broadcast()
IconButton:
size_hint: 0.5, None
height: '48dp'
icon: 'atlas://gui/kivy/theming/light/qrcode'
on_release: root.show_qr()
Button:
size_hint: 0.5, None
height: '48dp'
text: _('Close')
on_release: root.dismiss()
''')
class TxDialog(Factory.Popup):
def __init__(self, app, tx):
Factory.Popup.__init__(self)
self.app = app
self.wallet = self.app.wallet
self.tx = tx
def on_open(self):
self.update()
def update(self):
format_amount = self.app.format_amount_and_units
tx_hash, self.status_str, self.description, self.can_broadcast, amount, fee, height, conf, timestamp, exp_n = self.wallet.get_tx_info(self.tx)
self.tx_hash = tx_hash or ''
if timestamp:
self.date_label = _('Date')
self.date_str = datetime.fromtimestamp(timestamp).isoformat(' ')[:-3]
elif exp_n:
self.date_label = _('Mempool depth')
self.date_str = _('{} from tip').format('%.2f MB'%(exp_n/1000000))
else:
self.date_label = ''
self.date_str = ''
if amount is None:
self.amount_str = _("Transaction unrelated to your wallet")
elif amount > 0:
self.is_mine = False
self.amount_str = format_amount(amount)
else:
self.is_mine = True
self.amount_str = format_amount(-amount)
self.fee_str = format_amount(fee) if fee is not None else _('unknown')
self.can_sign = self.wallet.can_sign(self.tx)
self.ids.output_list.update(self.tx.outputs())
def do_sign(self):
self.app.protected(_("Enter your PIN code in order to sign this transaction"), self._do_sign, ())
def _do_sign(self, password):
self.status_str = _('Signing') + '...'
Clock.schedule_once(lambda dt: self.__do_sign(password), 0.1)
def __do_sign(self, password):
try:
self.app.wallet.sign_transaction(self.tx, password)
except InvalidPassword:
self.app.show_error(_("Invalid PIN"))
self.update()
def do_broadcast(self):
self.app.broadcast(self.tx)
def show_qr(self):
from electrum_commercium.bitcoin import base_encode, bfh
text = bfh(str(self.tx))
text = base_encode(text, base=43)
self.app.qr_dialog(_("Raw Transaction"), text)
|
import math
from typing import List
import numpy
from allennlp.common.util import JsonDict, sanitize
from allennlp.interpret.saliency_interpreters.saliency_interpreter import SaliencyInterpreter
from allennlp.nn import util
@SaliencyInterpreter.register("simple-gradient")
class SimpleGradient(SaliencyInterpreter):
"""
Registered as a `SaliencyInterpreter` with name "simple-gradient".
"""
def saliency_interpret_from_json(self, inputs: JsonDict) -> JsonDict:
"""
Interprets the model's prediction for inputs. Gets the gradients of the loss with respect
to the input and returns those gradients normalized and sanitized.
"""
labeled_instances = self.predictor.json_to_labeled_instances(inputs)
# List of embedding inputs, used for multiplying gradient by the input for normalization
embeddings_list: List[numpy.ndarray] = []
instances_with_grads = dict()
for idx, instance in enumerate(labeled_instances):
# Hook used for saving embeddings
handle = self._register_forward_hook(embeddings_list)
grads = self.predictor.get_gradients([instance])[0]
handle.remove()
# Gradients come back in the reverse order that they were sent into the network
embeddings_list.reverse()
for key, grad in grads.items():
# Get number at the end of every gradient key (they look like grad_input_[int],
# we're getting this [int] part and subtracting 1 for zero-based indexing).
# This is then used as an index into the reversed input array to match up the
# gradient and its respective embedding.
input_idx = int(key[-1]) - 1
# The [0] here is undo-ing the batching that happens in get_gradients.
emb_grad = numpy.sum(grad[0] * embeddings_list[input_idx], axis=1)
norm = numpy.linalg.norm(emb_grad, ord=1)
normalized_grad = [math.fabs(e) / norm for e in emb_grad]
grads[key] = normalized_grad
instances_with_grads["instance_" + str(idx + 1)] = grads
return sanitize(instances_with_grads)
def _register_forward_hook(self, embeddings_list: List):
"""
Finds all of the TextFieldEmbedders, and registers a forward hook onto them. When forward()
is called, embeddings_list is filled with the embedding values. This is necessary because
our normalization scheme multiplies the gradient by the embedding value.
"""
def forward_hook(module, inputs, output):
embeddings_list.append(output.squeeze(0).clone().detach().numpy())
embedding_layer = util.find_embedding_layer(self.predictor._model)
handle = embedding_layer.register_forward_hook(forward_hook)
return handle
|
# coding: utf-8
"""
FlashArray REST API
No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen)
OpenAPI spec version: 2.5
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
import pprint
import re
import six
import typing
from ....properties import Property
if typing.TYPE_CHECKING:
from pypureclient.flasharray.FA_2_5 import models
class Qos(object):
"""
Attributes:
swagger_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
swagger_types = {
'bandwidth_limit': 'int',
'iops_limit': 'int'
}
attribute_map = {
'bandwidth_limit': 'bandwidth_limit',
'iops_limit': 'iops_limit'
}
required_args = {
}
def __init__(
self,
bandwidth_limit=None, # type: int
iops_limit=None, # type: int
):
"""
Keyword args:
bandwidth_limit (int): The maximum QoS bandwidth limit for the volume. Whenever throughput exceeds the bandwidth limit, throttling occurs. Measured in bytes per second. Maximum limit is 512 GB/s.
iops_limit (int): The QoS IOPs limit for the volume.
"""
if bandwidth_limit is not None:
self.bandwidth_limit = bandwidth_limit
if iops_limit is not None:
self.iops_limit = iops_limit
def __setattr__(self, key, value):
if key not in self.attribute_map:
raise KeyError("Invalid key `{}` for `Qos`".format(key))
if key == "bandwidth_limit" and value is not None:
if value > 549755813888:
raise ValueError("Invalid value for `bandwidth_limit`, value must be less than or equal to `549755813888`")
if value < 1048576:
raise ValueError("Invalid value for `bandwidth_limit`, must be a value greater than or equal to `1048576`")
if key == "iops_limit" and value is not None:
if value > 104857600:
raise ValueError("Invalid value for `iops_limit`, value must be less than or equal to `104857600`")
if value < 100:
raise ValueError("Invalid value for `iops_limit`, must be a value greater than or equal to `100`")
self.__dict__[key] = value
def __getattribute__(self, item):
value = object.__getattribute__(self, item)
if isinstance(value, Property):
raise AttributeError
else:
return value
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.swagger_types):
if hasattr(self, attr):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
if issubclass(Qos, dict):
for key, value in self.items():
result[key] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, Qos):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
|
# Copyright 2019 The Pigweed Authors
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may not
# use this file except in compliance with the License. You may obtain a copy of
# the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations under
# the License.
"""Pigweed build environment for bazel."""
DEBUGGING = [
"-g",
]
# Standard compiler flags to reduce output binary size.
REDUCED_SIZE_COPTS = [
"-fno-common",
"-fno-exceptions",
"-ffunction-sections",
"-fdata-sections",
]
STRICT_WARNINGS_COPTS = [
"-Wall",
"-Wextra",
# Make all warnings errors, except for the exemptions below.
"-Werror",
"-Wno-error=cpp", # preprocessor #warning statement
"-Wno-error=deprecated-declarations", # [[deprecated]] attribute
]
CPP17_COPTS = [
"-std=c++17",
"-fno-rtti",
"-Wnon-virtual-dtor",
# Allow uses of the register keyword, which may appear in C headers.
"-Wno-register",
]
DISABLE_PENDING_WORKAROUND_OPTS = [
"-Wno-private-header",
]
PW_DEFAULT_COPTS = (
DEBUGGING +
REDUCED_SIZE_COPTS +
STRICT_WARNINGS_COPTS +
DISABLE_PENDING_WORKAROUND_OPTS
)
PW_DEFAULT_LINKOPTS = []
def _add_defaults(kwargs):
"""Adds default arguments suitable for both C and C++ code to kwargs."""
kwargs["copts"] = kwargs.get("copts", []) + PW_DEFAULT_COPTS
kwargs["linkopts"] = kwargs.get("linkopts", []) + PW_DEFAULT_LINKOPTS
# Set linkstatic to avoid building .so files.
kwargs["linkstatic"] = True
kwargs.setdefault("features", [])
# Crosstool--adding this line to features disables header modules, which
# don't work with -fno-rtti. Note: this is not a command-line argument,
# it's "minus use_header_modules".
kwargs["features"].append("-use_header_modules")
def _default_cc_and_c_kwargs(kwargs):
_add_defaults(kwargs)
kwargs.setdefault("srcs", [])
cc = dict(kwargs.items())
cc["srcs"] = [src for src in kwargs["srcs"] if not src.endswith(".c")]
cc["copts"] = cc["copts"] + CPP17_COPTS
c_srcs = [src for src in kwargs["srcs"] if src.endswith(".c")]
if c_srcs:
c = dict(kwargs.items())
c["name"] += "_c"
c["srcs"] = c_srcs + [src for src in kwargs["srcs"] if src.endswith(".h")]
cc["deps"] = cc.get("deps", []) + [":" + c["name"]]
return cc, c
return cc, None
def _add_cc_and_c_targets(target, kwargs):
cc_kwargs, c_kwargs = _default_cc_and_c_kwargs(kwargs)
if c_kwargs:
native.cc_library(**c_kwargs)
target(**cc_kwargs)
def pw_cc_binary(**kwargs):
_add_cc_and_c_targets(native.cc_binary, kwargs)
def pw_cc_library(**kwargs):
_add_cc_and_c_targets(native.cc_library, kwargs)
def pw_cc_test(**kwargs):
kwargs["deps"] = kwargs.get("deps", []) + ["//pw_unit_test:main"]
_add_cc_and_c_targets(native.cc_test, kwargs)
|
# -*- coding: utf-8 -*-
from framework.routing import Rule, json_renderer
from website.addons.github import views
settings_routes = {
'rules': [
# Configuration
Rule(
[
'/project/<pid>/github/settings/',
'/project/<pid>/node/<nid>/github/settings/',
],
'post',
views.config.github_set_config,
json_renderer,
),
Rule(
[
'/project/<pid>/github/settings/',
'/project/<pid>/node/<nid>/github/settings/',
],
'get',
views.config.github_get_config,
json_renderer,
),
Rule(
[
'/project/<pid>/github/settings/',
'/project/<pid>/node/<nid>/github/settings/',
'/project/<pid>/github/config/',
'/project/<pid>/node/<nid>/github/config/',
],
'delete',
views.config.github_remove_node_settings,
json_renderer,
),
Rule(
[
'/project/<pid>/github/repos/',
'/project/<pid>/node/<nid>/github/repos/',
],
'get',
views.config.github_repo_list,
json_renderer,
),
Rule(
[
'/project/<pid>/github/tarball/',
'/project/<pid>/node/<nid>/github/tarball/',
],
'get',
views.crud.github_download_starball,
json_renderer,
{'archive': 'tar'},
endpoint_suffix='__tar',
),
Rule(
[
'/project/<pid>/github/zipball/',
'/project/<pid>/node/<nid>/github/zipball/',
],
'get',
views.crud.github_download_starball,
json_renderer,
{'archive': 'zip'},
endpoint_suffix='__zip',
),
Rule(
[
'/project/<pid>/github/hook/',
'/project/<pid>/node/<nid>/github/hook/',
],
'post',
views.hooks.github_hook_callback,
json_renderer,
),
# OAuth: User
Rule(
'/settings/github/oauth/',
'get',
views.auth.github_oauth_start,
json_renderer,
endpoint_suffix='__user',
),
Rule(
'/settings/github/oauth/',
'delete',
views.auth.github_oauth_delete_user,
json_renderer,
),
# OAuth: Node
Rule(
[
'/project/<pid>/github/oauth/',
'/project/<pid>/node/<nid>/github/oauth/',
],
'get',
views.auth.github_oauth_start,
json_renderer,
),
Rule(
[
'/project/<pid>/github/user_auth/',
'/project/<pid>/node/<nid>/github/user_auth/',
],
'post',
views.auth.github_add_user_auth,
json_renderer,
),
Rule(
[
'/project/<pid>/github/oauth/',
'/project/<pid>/node/<nid>/github/oauth/',
'/project/<pid>/github/config/',
'/project/<pid>/node/<nid>/github/config/'
],
'delete',
views.auth.github_oauth_deauthorize_node,
json_renderer,
),
# OAuth: General
Rule(
[
'/addons/github/callback/<uid>/',
'/addons/github/callback/<uid>/<nid>/',
],
'get',
views.auth.github_oauth_callback,
json_renderer,
),
],
'prefix': '/api/v1',
}
api_routes = {
'rules': [
Rule(
[
'/project/<pid>/github/newrepo/',
'/project/<pid>/node/<nid>/github/newrepo/',
],
'post',
views.repos.github_create_repo,
json_renderer,
),
Rule(
[
'/project/<pid>/github/hgrid/',
'/project/<pid>/node/<nid>/github/hgrid/',
'/project/<pid>/github/hgrid/<path:path>/',
'/project/<pid>/node/<nid>/github/hgrid/<path:path>/',
],
'get',
views.hgrid.github_hgrid_data_contents,
json_renderer,
),
Rule(
[
'/project/<pid>/github/hgrid/root/',
'/project/<pid>/node/<nid>/github/hgrid/root/',
],
'get',
views.hgrid.github_root_folder_public,
json_renderer,
),
],
'prefix': '/api/v1'
}
|
import matplotlib.pyplot as plt
from math import pi
import numpy as np
|
# coding=utf-8
# Copyright 2017 The Tensor2Tensor Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Base class for problem/dataset definitions."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
import os
import random
# Dependency imports
import six
from tensor2tensor.data_generators import generator_utils
from tensor2tensor.data_generators import text_encoder
from tensor2tensor.utils import metrics
from tensor2tensor.utils import registry
import tensorflow as tf
class SpaceID(object):
"""Input and target space ids. Add more as needed."""
# Generic / unknown output space (default)
GENERIC = 0
# Image labels
IMAGE_LABEL = 1
# English characters
EN_CHR = 2
# English tokens
EN_TOK = 3
# English bpe tokens
EN_BPE_TOK = 4
# French characters
FR_CHR = 5
# French tokens
FR_TOK = 6
# German characters
DE_CHR = 7
# German tokens
DE_TOK = 8
# German bpe tokens
DE_BPE_TOK = 9
# Digit cipher lexicon 0
DIGIT_0 = 10
# Digit cipher lexicon 1
DIGIT_1 = 11
# Audio waveform domain
AUDIO_WAV = 12
# Audio spectral domain
AUDIO_SPECTRAL = 13
# Parse characters
PARSE_CHR = 14
# Parse tokens
PARSE_TOK = 15
# Chinese tokens
ZH_TOK = 16
# Icelandic characters
ICE_CHAR = 17
# Icelandic tokens
ICE_TOK = 18
# Icelandic parse tokens
ICE_PARSE_TOK = 19
# Macedonian tokens
MK_TOK = 20
# Czech tokens
CS_TOK = 21
# Czech characters
CS_CHR = 22
# Genetic bases (ACTG)
DNA = 23
# Real numbers
REAL = 24
# Images
IMAGE = 25
# Peptide
PEPTIDE = 26
# Python
PY_TOK = 27
# C++
CPP_TOK = 28
# Strokes
STROKES = 29
# Pickled Python
PICKLED_PYTHON = 30
def default_model_hparams():
return tf.contrib.training.HParams(
max_input_seq_length=0,
max_target_seq_length=0,
prepend_mode="none",
data_dir=None)
def preprocess_example_common(example, hparams, mode):
"""Preprocessing steps common to all models."""
if hparams.max_input_seq_length > 0:
example["inputs"] = example["inputs"][:hparams.max_input_seq_length]
if hparams.max_target_seq_length > 0:
example["targets"] = example["targets"][:hparams.max_target_seq_length]
if hparams.prepend_mode != "none":
if mode == tf.estimator.ModeKeys.PREDICT:
example["partial_targets"] = tf.concat([example["inputs"], [0]], 0)
else:
example["targets"] = tf.concat(
[example["inputs"], [0], example["targets"]], 0)
return example
class Problem(object):
"""Problem base class. Specifies a T2T problem.
Problems unify the specification of a problem for data generation, training,
and inference.
New problems are specified by the following methods:
Data generation:
* generate_data(data_dir, tmp_dir)
- Generate training and dev datasets into data_dir.
- Additional files, e.g. vocabulary files, should also be written to
data_dir. Vocab files are newline-separated files with each line
containing a token. The standard convention for the filename is to
set it to be
${Problem.vocab_name}.${Problem.targeted_vocab_size}
- Downloads and other files can be written to tmp_dir
- If you have a training and dev generator, you can generate the
training and dev datasets with
generator_utils.generate_dataset_and_shuffle.
- Use the self.training_filepaths and self.dev_filepaths functions to
get sharded filenames. If shuffled=False, the filenames will contain
an "unshuffled" suffix; you should then shuffle the data
shard-by-shard with generator_utils.shuffle_dataset.
- Allows to specify the number of shards, optionally (can be omitted).
- Subclasses must override
* dataset_filename()
- Base filename for problem.
- Defaults to registered name (self.name).
Training:
* hparams(defaults, model_hparams)
- Specify the problem hyperparameters (see _default_hparams)
- Mutate defaults as needed
* example_reading_spec
- Specify the names and types of the features on disk.
- Specify tf.contrib.slim.tfexample_decoder
* preprocess_example(example, mode)
- Preprocess the example feature dict from feature name to Tensor or
SparseTensor.
- Used in training, eval, and inference (specified by mode).
Eval:
* eval_metrics
- Specify the set of evaluation metrics for this problem.
Inference:
* feature_encoders(data_dir)
- Return a dict of <feature name, TextEncoder> for encoding and decoding
inference input/output.
- Defaults to TextEncoder for inputs and targets.
"""
# ============================================================================
# BEGIN SUBCLASS INTERFACE
# ============================================================================
def generate_data(self, data_dir, tmp_dir, task_id=-1):
raise NotImplementedError()
def hparams(self, defaults, model_hparams):
pass
def dataset_filename(self):
return self.name
def feature_encoders(self, data_dir):
del data_dir
return {
"inputs": text_encoder.TextEncoder(),
"targets": text_encoder.TextEncoder()
}
def example_reading_spec(self):
data_fields = {
"inputs": tf.VarLenFeature(tf.int64),
"targets": tf.VarLenFeature(tf.int64)
}
data_items_to_decoders = None
return (data_fields, data_items_to_decoders)
def preprocess_example(self, example, mode, hparams):
return preprocess_example_common(example, hparams, mode)
def eval_metrics(self):
return [
metrics.Metrics.ACC, metrics.Metrics.ACC_TOP5,
metrics.Metrics.ACC_PER_SEQ, metrics.Metrics.NEG_LOG_PERPLEXITY
]
# ============================================================================
# END SUBCLASS INTERFACE
# ============================================================================
def training_filepaths(self, data_dir, num_shards, shuffled):
file_basename = self.dataset_filename()
if not shuffled:
file_basename += generator_utils.UNSHUFFLED_SUFFIX
return generator_utils.train_data_filenames(file_basename, data_dir,
num_shards)
def dev_filepaths(self, data_dir, num_shards, shuffled):
file_basename = self.dataset_filename()
if not shuffled:
file_basename += generator_utils.UNSHUFFLED_SUFFIX
return generator_utils.dev_data_filenames(file_basename, data_dir,
num_shards)
def test_filepaths(self, data_dir, num_shards, shuffled):
file_basename = self.dataset_filename()
if not shuffled:
file_basename += generator_utils.UNSHUFFLED_SUFFIX
return generator_utils.test_data_filenames(file_basename, data_dir,
num_shards)
def filepattern(self, data_dir, mode, shard=None):
"""Get filepattern for data files for mode.
Matches mode to a suffix.
* TRAIN: train
* EVAL: dev
* PREDICT: dev
* test: test
Args:
data_dir: str, data directory.
mode: tf.estimator.ModeKeys or "test".
shard: int, if provided, will only read data from the specified shard.
Returns:
filepattern str
"""
path = os.path.join(data_dir, self.dataset_filename())
shard_str = "-%05d" % shard if shard is not None else ""
if mode == tf.estimator.ModeKeys.TRAIN:
suffix = "train"
elif mode in [tf.estimator.ModeKeys.EVAL, tf.estimator.ModeKeys.PREDICT]:
suffix = "dev"
else:
assert mode == "test"
suffix = "test"
return "%s-%s%s*" % (path, suffix, shard_str)
def __init__(self, was_reversed=False, was_copy=False):
"""Create a Problem.
Args:
was_reversed: bool, whether to reverse inputs and targets.
was_copy: bool, whether to copy inputs to targets. Can be composed with
was_reversed so that if both are true, the targets become the inputs,
which are then copied to targets so that the task is targets->targets.
"""
self._was_reversed = was_reversed
self._was_copy = was_copy
self._encoders = None
self._hparams = None
self._feature_info = None
def get_feature_encoders(self, data_dir=None):
if self._encoders is None:
self._encoders = self.feature_encoders(data_dir)
return self._encoders
def get_hparams(self, model_hparams=None):
"""Returns problem_hparams."""
if self._hparams is not None:
return self._hparams
if self._encoders is None:
data_dir = (model_hparams and model_hparams.data_dir) or None
self.get_feature_encoders(data_dir)
hp = _default_hparams()
ret = self.hparams(hp, model_hparams)
if ret is not None:
raise ValueError("The Problem subclass hparams function should mutate "
"the defaults passed in and return None.")
hp.add_hparam("vocabulary", self._encoders)
hp.add_hparam("was_reversed", self._was_reversed)
hp.add_hparam("was_copy", self._was_copy)
if self._was_reversed:
_reverse_problem_hparams(hp)
if self._was_copy:
_copy_problem_hparams(hp)
self._hparams = hp
return self._hparams
def maybe_reverse_features(self, feature_map):
if not self._was_reversed:
return
inputs, targets = feature_map["inputs"], feature_map["targets"]
feature_map["inputs"], feature_map["targets"] = targets, inputs
def maybe_copy_features(self, feature_map):
if not self._was_copy:
return
feature_map["targets"] = feature_map["inputs"]
def dataset(self,
mode,
data_dir=None,
num_threads=None,
output_buffer_size=None,
shuffle_files=None,
hparams=None,
preprocess=True,
dataset_split=None,
shard=None):
"""Build a Dataset for this problem.
Args:
mode: tf.estimator.ModeKeys; determines which files to read from.
data_dir: directory that contains data files.
num_threads: int, number of threads to use for decode and preprocess
Dataset.map calls.
output_buffer_size: int, how many elements to prefetch in Dataset.map
calls.
shuffle_files: whether to shuffle input files. Default behavior (i.e. when
shuffle_files=None) is to shuffle if mode == TRAIN.
hparams: tf.contrib.training.HParams; hparams to be passed to
Problem.preprocess_example and Problem.hparams. If None, will use a
default set that is a no-op.
preprocess: bool, whether to map the Dataset through
Problem.preprocess_example.
dataset_split: tf.estimator.ModeKeys + ["test"], which split to read data
from (TRAIN:"-train", EVAL:"-dev", "test":"-test"). Defaults to mode.
shard: int, if provided, will only read data from the specified shard.
Returns:
Dataset containing dict<feature name, Tensor>.
"""
dataset_split = dataset_split or mode
assert data_dir
if hparams is None:
hparams = default_model_hparams()
if not hasattr(hparams, "data_dir"):
hparams.add_hparam("data_dir", data_dir)
if not hparams.data_dir:
hparams.data_dir = data_dir
# Construct the Problem's hparams so that items within it are accessible
_ = self.get_hparams(hparams)
data_fields, data_items_to_decoders = self.example_reading_spec()
if data_items_to_decoders is None:
data_items_to_decoders = {
field: tf.contrib.slim.tfexample_decoder.Tensor(field)
for field in data_fields
}
is_training = mode == tf.estimator.ModeKeys.TRAIN
data_filepattern = self.filepattern(data_dir, dataset_split, shard=shard)
tf.logging.info("Reading data files from %s", data_filepattern)
data_files = tf.contrib.slim.parallel_reader.get_data_files(
data_filepattern)
if shuffle_files or shuffle_files is None and is_training:
random.shuffle(data_files)
dataset = tf.contrib.data.TFRecordDataset(data_files)
def decode_record(record):
"""Serialized Example to dict of <feature name, Tensor>."""
decoder = tf.contrib.slim.tfexample_decoder.TFExampleDecoder(
data_fields, data_items_to_decoders)
decode_items = list(data_items_to_decoders)
decoded = decoder.decode(record, items=decode_items)
return dict(zip(decode_items, decoded))
def _preprocess(example):
example = self.preprocess_example(example, mode, hparams)
self.maybe_reverse_features(example)
self.maybe_copy_features(example)
return example
dataset = dataset.map(decode_record, num_threads=num_threads)
if preprocess:
dataset = dataset.map(
_preprocess,
num_threads=num_threads,
output_buffer_size=output_buffer_size)
return dataset
@property
def has_inputs(self):
return "inputs" in self.get_feature_encoders()
@property
def feature_info(self):
"""Retrieve dict<feature name, FeatureInfo>.
Must first call Problem.get_hparams or Problem.dataset to have the problem's
internal hparams already constructed.
Returns:
dict<feature name, FeatureInfo>
"""
if self._feature_info is not None:
return self._feature_info
assert self._hparams is not None
hp = self.get_hparams()
input_mods = hp.input_modality
target_mod = hp.target_modality
vocabs = hp.vocabulary
if self.has_inputs:
in_id = hp.input_space_id
out_id = hp.target_space_id
features = collections.defaultdict(FeatureInfo)
for name, mod_spec in six.iteritems(input_mods):
mod, vocab_size = mod_spec
finfo = features[name]
finfo.modality = mod
finfo.vocab_size = vocab_size
mod, vocab_size = target_mod
features["targets"].modality = mod
features["targets"].vocab_size = vocab_size
for name, encoder in six.iteritems(vocabs):
features[name].encoder = encoder
if self.has_inputs:
features["inputs"].space_id = in_id
features["targets"].space_id = out_id
self._feature_info = features
return features
class FeatureInfo(object):
def __init__(self,
encoder=None,
modality=None,
vocab_size=None,
space_id=None):
self.encoder = encoder
self.modality = modality
self.vocab_size = vocab_size
self.space_id = space_id
def _copy_problem_hparams(p_hparams):
"""Use input modality, vocab, and space id for target."""
p = p_hparams
# Duplicate input modality.
p.target_modality = p.input_modality["inputs"]
# Duplicate input vocabulary.
p.vocabulary["targets"] = p.vocabulary["inputs"]
# Duplicate input space ids.
p.target_space_id = p.input_space_id
# Mark that p was reversed.
p.was_copy = True
def _reverse_problem_hparams(p_hparams):
"""Swap input/output modalities, vocab, and space ids."""
p = p_hparams
# Swap modalities.
input_modality = p.input_modality["inputs"]
target_modality = p.target_modality
p.input_modality["inputs"] = target_modality
p.target_modality = input_modality
# Swap vocabularies.
input_vocabulary = p.vocabulary["inputs"]
target_vocabulary = p.vocabulary["targets"]
p.vocabulary["inputs"] = target_vocabulary
p.vocabulary["targets"] = input_vocabulary
# Swap input/target space ids.
input_space_id = p.input_space_id
target_space_id = p.target_space_id
p.input_space_id = target_space_id
p.target_space_id = input_space_id
# Mark that p was reversed.
p.was_reversed = True
def _default_hparams():
"""A set of basic model hyperparameters."""
return tf.contrib.training.HParams(
# Use this parameter to get comparable perplexity numbers with different
# tokenizations. This value should be set to the ratio of the number of
# tokens in the test set according to the tokenization used to the number
# of tokens in the test set in the "official" tokenization. For
# example, if we are using a word-piece based model and we want to
# compute per-word perplexity, then we set loss_multiplier to the number
# of wordpieces per word in the test set.
loss_multiplier=1.0,
# Use this parameter to allow for larger sequences in the batch. Without
# the use of this parameter, the size of the inner two dimensions will
# be used to judge the sequence length.
batch_size_multiplier=1,
# To make queues of the right capacity, it's good to know the maximal
# expected batch size, as it can vary a lot. It only affects performance
# of input readers and memory use. The defaults should be safe and fast,
# but decrease if your reader uses a lot of memory and increase if slow.
max_expected_batch_size_per_shard=64,
# During inference for autoregressive problems, if the batch_size is 1,
# the inference will stop when the model predict a text_encoder.EOS_ID
# token.
stop_at_eos=False,
# Modalities used to map from input features to a space compatible with
# chosen model architecture. One modality spec (which is a 2-tuple,
# (modality_full_name, vocab_size)) per feature key. modality_full_name
# is a string type:name, e.g. class_label:class_label_2d. Leaving off
# the name uses the default modality for that type (e.g. class_label ==
# class_label:default).
input_modality={},
# Modality used to map from hidden representation to the target space.
# Specified as a modality spec, a 2-tuple described above.
target_modality=None,
# Identifiers used to tell the model which input/target space will be
# expected. For example, it can tell that we expect French as characters
# as output, or Spanish as sound. Spaces defined as constants in SpaceID
# class.
input_space_id=SpaceID.GENERIC,
target_space_id=SpaceID.GENERIC)
class Text2TextProblem(Problem):
"""Base class for text-to-text problems."""
@property
def is_character_level(self):
"""Whether the inputs and targets are sequences of characters."""
raise NotImplementedError()
@property
def targeted_vocab_size(self):
raise NotImplementedError() # Not needed if self.is_character_level.
def generator(self, data_dir, tmp_dir, is_training):
"""Generator for the training and evaluation data.
Args:
data_dir: The directory in which to assets, e.g. the vocab file.
tmp_dir: A scratch directory (if needed).
is_training: A boolean indicating if we should generate training data
(True) or dev set data (False).
Yields:
dicts with keys "inputs" and "targets", with values being lists of token
ids.
"""
raise NotImplementedError()
@property
def use_train_shards_for_dev(self):
"""If true, we only generate training data and hold out shards for dev."""
return False
@property
def input_space_id(self):
raise NotImplementedError()
@property
def target_space_id(self):
raise NotImplementedError()
@property
def num_shards(self):
raise NotImplementedError()
@property
def num_dev_shards(self):
return 1
@property
def vocab_name(self):
raise NotImplementedError()
@property
def vocab_file(self):
return "%s.%d" % (self.vocab_name, self.targeted_vocab_size)
@property
def use_subword_tokenizer(self):
raise NotImplementedError()
@property
def has_inputs(self):
return True # Set to False for language models.
def generate_data(self, data_dir, tmp_dir, task_id=-1):
train_paths = self.training_filepaths(
data_dir, self.num_shards, shuffled=False)
dev_paths = self.dev_filepaths(
data_dir, self.num_dev_shards, shuffled=False)
if self.use_train_shards_for_dev:
all_paths = train_paths + dev_paths
generator_utils.generate_files(
self.generator(data_dir, tmp_dir, True), all_paths)
generator_utils.shuffle_dataset(all_paths)
else:
generator_utils.generate_dataset_and_shuffle(
self.generator(data_dir, tmp_dir, True), train_paths,
self.generator(data_dir, tmp_dir, False), dev_paths)
def feature_encoders(self, data_dir):
if self.is_character_level:
encoder = text_encoder.ByteTextEncoder()
elif self.use_subword_tokenizer:
vocab_filename = os.path.join(data_dir, self.vocab_file)
encoder = text_encoder.SubwordTextEncoder(vocab_filename)
else:
vocab_filename = os.path.join(data_dir, self.vocab_file)
encoder = text_encoder.TokenTextEncoder(vocab_filename)
if self.has_inputs:
return {"inputs": encoder, "targets": encoder}
return {"targets": encoder}
def hparams(self, defaults, unused_model_hparams):
p = defaults
p.stop_at_eos = int(True)
if self.has_inputs:
source_vocab_size = self._encoders["inputs"].vocab_size
p.input_modality = {
"inputs": (registry.Modalities.SYMBOL, source_vocab_size)
}
target_vocab_size = self._encoders["targets"].vocab_size
p.target_modality = (registry.Modalities.SYMBOL, target_vocab_size)
if self.has_inputs:
p.input_space_id = self.input_space_id
p.target_space_id = self.target_space_id
if self.is_character_level:
p.loss_multiplier = 2.0
def eval_metrics(self):
return [
metrics.Metrics.ACC, metrics.Metrics.ACC_TOP5,
metrics.Metrics.ACC_PER_SEQ, metrics.Metrics.NEG_LOG_PERPLEXITY,
metrics.Metrics.APPROX_BLEU, metrics.Metrics.ROUGE_2_F,
metrics.Metrics.ROUGE_L_F
]
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
#
# Copyright 2019, 2020 Matt Post <post@cs.jhu.edu>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Ingests data into the Anthology. It takes a list of one or more
ACLPUB proceedings/ directories and does the following:
- executes some basic sanity checks
- applies normalization to names and titles (e.g, fixed-case protection)
- generates the nexted XML in the Anthology repository
- copies the PDFs and attachments into place for rsyncing to the server
Updated in March 2020, this script replaces:
- the old ingest.py (which converted the old ACLPUB flat XML format)
- anthologize.pl in ACLPUB
- anthology_xml.py in ACLPUB
"""
import argparse
import iso639
import os
import re
import readline
import shutil
import sys
import lxml.etree as etree
from collections import defaultdict, OrderedDict
from datetime import datetime
from normalize_anth import normalize
from anthology.bibtex import read_bibtex
from anthology.index import AnthologyIndex
from anthology.people import PersonName
from anthology.sigs import SIGIndex
from anthology.utils import (
make_simple_element,
build_anthology_id,
deconstruct_anthology_id,
indent,
compute_hash_from_file,
)
from anthology.venues import VenueIndex
from itertools import chain
from typing import Dict, Any
from slugify import slugify
def log(text: str, fake: bool = False):
message = "[DRY RUN] " if fake else ""
print(f"{message}{text}", file=sys.stderr)
def read_meta(path: str) -> Dict[str, Any]:
meta = {"chairs": []}
with open(path) as instream:
for line in instream:
if re.match(r"^\s*$", line):
continue
key, value = line.rstrip().split(" ", maxsplit=1)
if key.startswith("chair"):
meta["chairs"].append(value)
else:
meta[key] = value
if "volume" in meta and re.match(rf"^[a-z0-1]+$", meta["volume"]) is None:
raise Exception(f"Invalid volume key '{meta['volume']}' in {path}")
return meta
def maybe_copy(source_path, dest_path):
"""Copies the file if it's different from the target."""
if not os.path.exists(dest_path) or compute_hash_from_file(
source_path
) != compute_hash_from_file(dest_path):
log(f"Copying {source_path} -> {dest_path}", args.dry_run)
shutil.copyfile(source_path, dest_path)
def bib2xml(bibfilename, anthology_id):
"""
Moved here from ACLPUB's anthology_xml.py script.
"""
fields = [
'title',
'author',
'editor',
'booktitle',
'month',
'year',
'address',
'publisher',
'pages',
'abstract',
'url',
'doi',
'language',
]
try:
collection_id, volume_name, paper_no = deconstruct_anthology_id(anthology_id)
except ValueError:
print(f"Couldn't split {anthology_id}", file=sys.stderr)
sys.exit(1)
if paper_no == '':
return # skip the master bib file; we only process the individual files
bibdata = read_bibtex(bibfilename)
if len(bibdata.entries) != 1:
log(f"more than one entry in {bibfilename}")
bibkey, bibentry = bibdata.entries.items()[0]
if len(bibentry.fields) == 0:
log(f"parsing bib of paper {paper_no} failed")
sys.exit(1)
paper = make_simple_element("paper", attrib={"id": paper_no})
for field in list(bibentry.fields) + list(bibentry.persons):
if field not in fields:
log(f"unknown field {field}")
for field in fields:
if field in ['author', 'editor']:
if field in bibentry.persons:
for person in bibentry.persons[field]:
first_text = ' '.join(person.bibtex_first_names)
last_text = ' '.join(person.prelast_names + person.last_names)
if person.lineage_names:
last_text += ', ' + ' '.join(person.lineage_names)
# Don't distinguish between authors that have only a first name
# vs. authors that have only a last name; always make it a last name.
if last_text.strip() in [
'',
'-',
]: # Some START users have '-' for null
last_text = first_text
first_text = ''
name_node = make_simple_element(field, parent=paper)
make_simple_element("first", first_text, parent=name_node)
make_simple_element("last", last_text, parent=name_node)
else:
if field == 'url':
value = f"{anthology_id}"
elif field in bibentry.fields:
value = bibentry.fields[field]
elif field == 'bibtype':
value = bibentry.type
elif field == 'bibkey':
value = bibkey
else:
continue
try:
make_simple_element(field, text=value, parent=paper)
except:
print(
f"Couldn't process {bibfilename} for {anthology_id}", file=sys.stderr
)
sys.exit(2)
return paper
def main(args):
collections = defaultdict(OrderedDict)
volumes = {}
anthology_datadir = os.path.join(os.path.dirname(sys.argv[0]), "..", "data")
venue_index = VenueIndex(srcdir=anthology_datadir)
venue_keys = [venue["slug"].lower() for _, venue in venue_index.items()]
sig_index = SIGIndex(srcdir=anthology_datadir)
# Build list of volumes, confirm uniqueness
unseen_venues = []
for proceedings in args.proceedings:
meta = read_meta(os.path.join(proceedings, "meta"))
venue_abbrev = meta["abbrev"]
venue_slug = venue_index.get_slug(venue_abbrev)
if str(datetime.now().year) in venue_abbrev:
print(f"Fatal: Venue assembler put year in acronym: '{venue_abbrev}'")
sys.exit(1)
if re.match(r".*\d$", venue_abbrev) is not None:
print(
f"WARNING: Venue {venue_abbrev} ends in a number, this is probably a mistake"
)
if venue_slug not in venue_keys:
unseen_venues.append((venue_slug, venue_abbrev, meta["title"]))
meta["path"] = proceedings
meta["collection_id"] = collection_id = meta["year"] + "." + venue_slug
volume_name = meta["volume"].lower()
volume_full_id = f"{collection_id}-{volume_name}"
if volume_full_id in volumes:
print("Error: ")
collections[collection_id][volume_name] = {}
volumes[volume_full_id] = meta
if "sig" in meta:
print(
f"Add this line to {anthology_datadir}/sigs/{meta['sig'].lower()}.yaml:"
)
print(f" - {meta['year']}:")
print(f" - {volume_full_id} # {meta['booktitle']}")
# Make sure all venues exist
if len(unseen_venues) > 0:
for venue in unseen_venues:
slug, abbrev, title = venue
print(f"Creating venue '{abbrev}' ({title})")
venue_index.add_venue(abbrev, title)
venue_index.dump(directory=anthology_datadir)
# Copy over the PDFs and attachments
for volume, meta in volumes.items():
root_path = os.path.join(meta["path"], "cdrom")
collection_id = meta["collection_id"]
venue_name = meta["abbrev"].lower()
volume_name = meta["volume"].lower()
year = meta["year"]
pdfs_dest_dir = os.path.join(args.pdfs_dir, venue_name)
if not os.path.exists(pdfs_dest_dir):
os.makedirs(pdfs_dest_dir)
# copy the book
book_dest_path = (
os.path.join(pdfs_dest_dir, f"{collection_id}-{volume_name}") + ".pdf"
)
# try the standard filename, e.g., 2021.naacl-main.pdf
book_src_filename = f'{year}.{meta["abbrev"]}-{volume_name}.pdf'
book_src_path = os.path.join(root_path, book_src_filename)
if not os.path.exists(book_src_path):
# try a different filename, e.g., "NLP4CALL-2021.pdf"
book_src_filename = f'{meta["abbrev"]}-{year}.pdf'
book_src_path = os.path.join(root_path, book_src_filename)
if os.path.exists(book_src_path) and not args.dry_run:
maybe_copy(book_src_path, book_dest_path)
# copy the paper PDFs
pdf_src_dir = os.path.join(root_path, "pdf")
for pdf_file in os.listdir(pdf_src_dir):
# Skip . files
if os.path.basename(pdf_file).startswith("."):
continue
# names are {abbrev}{number}.pdf
match = re.match(rf".*\.(\d+)\.pdf", pdf_file)
if match is not None:
paper_num = int(match[1])
paper_id_full = f"{collection_id}-{volume_name}.{paper_num}"
bib_path = os.path.join(
root_path,
"bib",
pdf_file.replace("/pdf", "/bib/").replace(".pdf", ".bib"),
)
pdf_src_path = os.path.join(pdf_src_dir, pdf_file)
pdf_dest_path = os.path.join(
pdfs_dest_dir, f"{collection_id}-{volume_name}.{paper_num}.pdf"
)
if not args.dry_run:
maybe_copy(pdf_src_path, pdf_dest_path)
collections[collection_id][volume_name][paper_num] = {
"anthology_id": paper_id_full,
"bib": bib_path,
"pdf": pdf_dest_path,
"attachments": [],
}
# copy the attachments
if os.path.exists(os.path.join(root_path, "additional")):
attachments_dest_dir = os.path.join(args.attachments_dir, venue_name)
if not os.path.exists(attachments_dest_dir):
os.makedirs(attachments_dest_dir)
for attachment_file in os.listdir(os.path.join(root_path, "additional")):
if os.path.basename(attachment_file).startswith("."):
continue
attachment_file_path = os.path.join(
root_path, "additional", attachment_file
)
match = re.match(
rf"{year}\.{venue_name}-\w+\.(\d+)_?(\w+)\.(\w+)$", attachment_file
)
if match is None:
print(
f"* Warning: no attachment match for {attachment_file}",
file=sys.stderr,
)
sys.exit(2)
paper_num, type_, ext = match.groups()
paper_num = int(paper_num)
file_name = f"{collection_id}-{volume_name}.{paper_num}.{type_}.{ext}"
dest_path = os.path.join(attachments_dest_dir, file_name)
if not args.dry_run and not os.path.exists(dest_path):
log(f"Copying {attachment_file} -> {dest_path}", args.dry_run)
shutil.copyfile(attachment_file_path, dest_path)
collections[collection_id][volume_name][paper_num]["attachments"].append(
(dest_path, type_)
)
people = AnthologyIndex(None, srcdir=anthology_datadir)
def correct_caps(person, name_node, anth_id):
"""
Many people submit their names in "ALL CAPS" or "all lowercase".
Correct this with heuristics.
"""
name = name_node.text
if name.islower() or name.isupper():
# capitalize all parts
corrected = " ".join(list(map(lambda x: x.capitalize(), name.split())))
print(
f"-> Correcting capitalization of '{name}' to '{corrected}'",
file=sys.stderr,
)
name_node.text = corrected
def disambiguate_name(node, anth_id):
name = PersonName.from_element(node)
ids = people.get_ids(name)
if len(ids) > 1:
choice = -1
while choice < 0 or choice >= len(ids):
print(
f"({anth_id}): ambiguous author {name}; Please choose from the following:"
)
for i, id_ in enumerate(ids):
print(f"[{i}] {id_} ({people.get_comment(id_)})")
choice = int(input("--> "))
node.attrib["id"] = ids[choice]
for collection_id, collection in collections.items():
# Newly added volumes, so we can normalize and name-disambig later
newly_added_volumes = []
collection_file = os.path.join(
args.anthology_dir, "data", "xml", f"{collection_id}.xml"
)
if os.path.exists(collection_file):
root_node = etree.parse(collection_file).getroot()
else:
root_node = make_simple_element("collection", attrib={"id": collection_id})
for volume_id, volume in collection.items():
volume_node = make_simple_element(
"volume",
attrib={"id": volume_id, "ingest-date": args.ingest_date},
)
# Replace the existing one if present
existing_volume_node = root_node.find(f"./volume[@id='{volume_id}']")
for i, child in enumerate(root_node):
if child.attrib["id"] == volume_id:
root_node[i] = volume_node
break
else:
root_node.append(volume_node)
meta_node = None
for paper_num, paper in sorted(volume.items()):
paper_id_full = paper["anthology_id"]
bibfile = paper["bib"]
paper_node = bib2xml(bibfile, paper_id_full)
if paper_node.attrib["id"] == "0":
# create metadata subtree
meta_node = make_simple_element("meta", parent=volume_node)
title_node = paper_node.find("title")
title_node.tag = "booktitle"
meta_node.append(title_node)
for author_or_editor in chain(
paper_node.findall("./author"), paper_node.findall("./editor")
):
meta_node.append(author_or_editor)
author_or_editor.tag = "editor"
meta_node.append(paper_node.find("publisher"))
meta_node.append(paper_node.find("address"))
meta_node.append(paper_node.find("month"))
meta_node.append(paper_node.find("year"))
if book_dest_path is not None:
make_simple_element(
"url",
text=f"{collection_id}-{volume_name}",
attrib={"hash": compute_hash_from_file(book_dest_path)},
parent=meta_node,
)
# modify frontmatter tag
paper_node.tag = "frontmatter"
del paper_node.attrib["id"]
else:
# remove unneeded fields
for child in paper_node:
if child.tag in [
"editor",
"address",
"booktitle",
"publisher",
"year",
"month",
]:
paper_node.remove(child)
url = paper_node.find("./url")
if url is not None:
url.attrib["hash"] = compute_hash_from_file(paper["pdf"])
for path, type_ in paper["attachments"]:
make_simple_element(
"attachment",
text=os.path.basename(path),
attrib={
"type": type_,
"hash": compute_hash_from_file(path),
},
parent=paper_node,
)
if len(paper_node) > 0:
volume_node.append(paper_node)
# Normalize
for oldnode in paper_node:
normalize(oldnode, informat="latex")
# Adjust the language tag
language_node = paper_node.find("./language")
if language_node is not None:
try:
lang = iso639.languages.get(name=language_node.text)
except KeyError:
raise Exception(f"Can't find language '{language_node.text}'")
language_node.text = lang.part3
print(language_node.text)
# Fix author names
for name_node in chain(
paper_node.findall("./author"), paper_node.findall("./editor")
):
disambiguate_name(name_node, paper_id_full)
person = PersonName.from_element(name_node)
for name_part in name_node:
correct_caps(person, name_part, paper_id_full)
# Other data from the meta file
if "isbn" in meta:
make_simple_element("isbn", meta["isbn"], parent=meta_node)
indent(root_node)
tree = etree.ElementTree(root_node)
tree.write(
collection_file, encoding="UTF-8", xml_declaration=True, with_tail=True
)
if __name__ == "__main__":
now = datetime.now()
today = f"{now.year}-{now.month:02d}-{now.day:02d}"
parser = argparse.ArgumentParser()
parser.add_argument(
"proceedings", nargs="+", help="List of paths to ACLPUB proceedings/ directories."
)
parser.add_argument(
"--ingest-date",
"-d",
type=str,
default=today,
help="Ingestion date as YYYY-MM-DD. Default: %(default)s.",
)
anthology_path = os.path.join(os.path.dirname(sys.argv[0]), "..")
parser.add_argument(
"--anthology-dir",
"-r",
default=anthology_path,
help="Root path of ACL Anthology Github repo. Default: %(default)s.",
)
pdfs_path = os.path.join(os.environ["HOME"], "anthology-files", "pdf")
parser.add_argument(
"--pdfs-dir", "-p", default=pdfs_path, help="Root path for placement of PDF files"
)
attachments_path = os.path.join(os.environ["HOME"], "anthology-files", "attachments")
parser.add_argument(
"--attachments-dir",
"-a",
default=attachments_path,
help="Root path for placement of PDF files",
)
parser.add_argument(
"--dry-run", "-n", action="store_true", help="Don't actually copy anything."
)
args = parser.parse_args()
main(args)
|
import tensorflow as tf
import numpy as np
import os,glob,cv2
import sys,argparse
# First, pass the path of the image
dir_path = os.path.dirname(os.path.realpath(__file__))
image_path=sys.argv[1]
filename = dir_path +'/' +image_path
image_size=128
num_channels=3
images = []
# Reading the image using OpenCV
image = cv2.imread(filename)
# Resizing the image to our desired size and preprocessing will be done exactly as done during training
image = cv2.resize(image, (image_size, image_size),0,0, cv2.INTER_LINEAR)
images.append(image)
images = np.array(images, dtype=np.uint8)
images = images.astype('float32')
images = np.multiply(images, 1.0/255.0)
#The input to the network is of shape [None image_size image_size num_channels]. Hence we reshape.
x_batch = images.reshape(1, image_size,image_size,num_channels)
## Let us restore the saved model
sess = tf.Session()
# Step-1: Recreate the network graph. At this step only graph is created.
saver = tf.train.import_meta_graph('ore-mine-model.meta')
# Step-2: Now let's load the weights saved using the restore method.
saver.restore(sess, tf.train.latest_checkpoint('./'))
# Accessing the default graph which we have restored
graph = tf.get_default_graph()
# Now, let's get hold of the op that we can be processed to get the output.
# In the original network y_pred is the tensor that is the prediction of the network
y_pred = graph.get_tensor_by_name("y_pred:0")
## Let's feed the images to the input placeholders
x= graph.get_tensor_by_name("x:0")
y_true = graph.get_tensor_by_name("y_true:0")
y_test_images = np.zeros((1, len(os.listdir('training_data'))))
### Creating the feed_dict that is required to be fed to calculate y_pred
feed_dict_testing = {x: x_batch, y_true: y_test_images}
result=sess.run(y_pred, feed_dict=feed_dict_testing)
# result is of this format [probabiliy_of_rose probability_of_sunflower]
print(result)
|
"""
ARIA -- Ambiguous Restraints for Iterative Assignment
A software for automated NOE assignment
Version 2.3
Copyright (C) Benjamin Bardiaux, Michael Habeck, Therese Malliavin,
Wolfgang Rieping, and Michael Nilges
All rights reserved.
NO WARRANTY. This software package is provided 'as is' without warranty of
any kind, expressed or implied, including, but not limited to the implied
warranties of merchantability and fitness for a particular purpose or
a warranty of non-infringement.
Distribution of substantively modified versions of this module is
prohibited without the explicit permission of the copyright holders.
$Author: bardiaux $
$Revision: 1.1.1.1 $
$Date: 2010/03/23 15:27:24 $
"""
from aria.ariabase import *
#import numpy as N
from numpy import *
from aria.Settings import Settings
class NOEModel(AriaBaseClass):
"""
The main class for calculating NOE spectra from structures.
Update: Malliavin/Bardiaux
Becomes an abstract class to didtinct ISPA and RelaxationMatrix
to include the spin-duffusion correction of distances
"""
def __init__(self):
AriaBaseClass.__init__(self)
from aria.Contribution import ContributionEvaluator
self.__evaluator = ContributionEvaluator()
self.is_spin_diff = None
class ISPA(NOEModel):
def __init__(self):
NOEModel.__init__(self)
from aria.Contribution import ContributionEvaluator
self.__evaluator = ContributionEvaluator()
def calculatePeaksize(self, peak, ensemble):
"""
for the given peak (AriaPeak) this method computes
the intensity of a simulated NOE wrt the instance' ensemble
of structures.
n_c: number of contributions
c_i: i-th contribution, n contributions
<c_i>: ensemble-average for i-th contribution.
NOE = \sum_{i=0}^n_c <c_i>^{-6}
i.e. it is summed over ensemble-averaged contributions.
"""
check_type(peak, 'AriaPeak')
check_type(ensemble, 'StructureEnsemble')
if not peak:
self.error(ValueError, 'No contributions in xpk: %d' %
peak.getId())
from aria.mathutils import average
self.__evaluator.setStructureEnsemble(ensemble)
## for each structure: calculate effective distance
## for contribution, i.e. distances between atoms
## of every spinpair are averaged according to the
## type of the given contribution.
f = self.__evaluator.effective_distances
avg_distances = [f(c) for c in peak.getContributions()]
## for each contribution: calculate ensemble-average
## TODO: average -> _average, probably faster
avg_distances = average(avg_distances, axis = 1)
## calculate NOEs
d = power(avg_distances, -6.)
## NOE is sum over partial NOEs
return sum(d)
class SpinDiffusionCorrection(NOEModel):
def __init__(self):
NOEModel.__init__(self)
from aria.Contribution import ContributionEvaluator
self.__evaluator = ContributionEvaluator()
self.__intensity_matrix = {}
def prepare(self, molecule, ensemble):
from aria.Relaxation import Relaxation
self.relaxation = Relaxation()
self.relaxation.initialize(molecule, ensemble)
self._spin_first_atom = self.relaxation.getNonEquivalentSpinList()
self.spin_ids = self.relaxation.spin_list_id
self._spin_multiplicity = self.relaxation.getSpinMultiplicity()
def setIntensityMatrix(self, spectrum):
m = self.relaxation.calculateIntensityMatrix(spectrum)
spectrum_name = spectrum.getName()
self.__intensity_matrix[spectrum_name] = m
def getIntensityMatrix(self, name):
return self.__intensity_matrix[name]
def calculatePeaksize(self, peak, ensemble):
## Malliavin 2005/2006
"""
for the given peak (AriaPeak) this method computes
the intensity of a simulated NOE wrt the instance' ensemble
of structures.
n_c: number of contributions
c_i: i-th contribution, n contributions
<c_i>: ensemble-average for i-th contribution.
NOE = \sum_{i=0}^n_c <c_i>^{-6}
i.e. it is summed over ensemble-averaged contributions.
"""
check_type(peak, 'AriaPeak')
check_type(ensemble, 'StructureEnsemble')
if not peak:
self.error(ValueError, 'No contributions in xpk: %d' %
peak.getId())
from aria.mathutils import average
from time import clock
self.__evaluator.setStructureEnsemble(ensemble)
# Modification Therese Malliavin, December 16, 2005
spectrum = peak.getReferencePeak().getSpectrum()
spectrum_name = spectrum.getName()
intensities = self.getIntensityMatrix(spectrum_name)
atoms = [tuple(sp.getAtoms()) for c in peak.getContributions() for sp in c.getSpinPairs()]
lstintens = []
spsys = [c.getSpinSystems() for c in peak.getContributions()]
atoms = [(s[0].getAtoms()[0], s[1].getAtoms()[0]) for s in spsys]
for a1, a2 in atoms:
sp1 = self.spin_ids[a1.getId()]
sp2 = self.spin_ids[a2.getId()]
lstintens.append(intensities[sp1,sp2])
## for a1, a2 in atoms:
## #uu = [sp.count(a1) for sp in SpinFirstAtom]
## #sp1 = uu.index(1)
## #sp1 = self._get_spin_first_atom_id(a1)
## sp1 = self.spin_ids[a1.getId()]
## if self._spin_multiplicity[sp1] > 1 and a1 != self._spin_first_atom[sp1][0]:
## sp1 = 0
## #sp2 = self._get_spin_first_atom_id(a2)
## sp2 = self.spin_ids[a2.getId()]
## #uu = [sp.count(a2) for sp in SpinFirstAtom]
## #sp2 = uu.index(1)
## if self._spin_multiplicity[sp2] > 1 and a2 != self._spin_first_atom[sp2][0]:
## sp2 = 0
## if sp1 != 0 and sp2 != 0:
## lstintens.append(intensities[sp1,sp2])
## for a1, a2 in atoms:
## sp1 = self.spin_ids[a1.getId()]
## sp2 = self.spin_ids[a2.getId()]
## lstintens.append(intensities[sp1,sp2])
int_aria_pk = sum(lstintens)
peak.setTheoricVolume(int_aria_pk)
## TEST ISPA
ispa = []
for a1, a2 in atoms:
sp1 = self.spin_ids[a1.getId()]
sp2 = self.spin_ids[a2.getId()]
ispa.append(self.relaxation.distance_matrix[sp1,sp2])
peak.setIspa(sum(ispa))
return int_aria_pk
def _get_spin_first_atom_id(self, a):
for i in range(len(self._spin_first_atom)):
if a in self._spin_first_atom[i]: return i
|
import numpy as np
import torch
import torch.nn.functional as F
import torch.nn as nn
from sklearn.utils import class_weight
from utils.lovasz_losses import lovasz_softmax
import pdb
def make_one_hot(labels, classes):
one_hot = torch.FloatTensor(labels.size()[0], classes, labels.size()[2], labels.size()[3]).zero_().to(labels.device)
target = one_hot.scatter_(1, labels.data, 1)
return target
def get_weights(target):
t_np = target.view(-1).data.cpu().numpy()
classes, counts = np.unique(t_np, return_counts=True)
cls_w = np.median(counts) / counts
#cls_w = class_weight.compute_class_weight('balanced', classes, t_np)
weights = np.ones(7)
weights[classes] = cls_w
return torch.from_numpy(weights).float().cuda()
class CrossEntropyLoss2d(nn.Module):
def __init__(self, weight=None, ignore_index=255, reduction='mean'):
super(CrossEntropyLoss2d, self).__init__()
self.CE = nn.CrossEntropyLoss(weight=weight, ignore_index=ignore_index, reduction=reduction)
def forward(self, output, target):
loss = self.CE(output, target)
return loss
class DiceLoss(nn.Module):
def __init__(self, smooth=1., ignore_index=255):
super(DiceLoss, self).__init__()
self.ignore_index = ignore_index
self.smooth = smooth
def forward(self, output, target):
if self.ignore_index not in range(target.min(), target.max()):
if (target == self.ignore_index).sum() > 0:
target[target == self.ignore_index] = target.min()
target = make_one_hot(target.unsqueeze(dim=1), classes=output.size()[1])
output = F.softmax(output, dim=1)
output_flat = output.contiguous().view(-1)
target_flat = target.contiguous().view(-1)
intersection = (output_flat * target_flat).sum()
loss = 1 - ((2. * intersection + self.smooth) /
(output_flat.sum() + target_flat.sum() + self.smooth))
return loss
class FocalLoss(nn.Module):
def __init__(self, gamma=2, alpha=None, ignore_index=255, size_average=True):
super(FocalLoss, self).__init__()
self.gamma = gamma
self.size_average = size_average
self.CE_loss = nn.CrossEntropyLoss(reduce=False, ignore_index=ignore_index, weight=alpha)
def forward(self, output, target):
logpt = self.CE_loss(output, target)
pt = torch.exp(-logpt)
loss = ((1-pt)**self.gamma) * logpt
if self.size_average:
return loss.mean()
return loss.sum()
class CE_DiceLoss(nn.Module):
def __init__(self, smooth=1, reduction='mean', ignore_index=255, weight=None):
super(CE_DiceLoss, self).__init__()
self.smooth = smooth
self.dice = DiceLoss()
self.cross_entropy = nn.CrossEntropyLoss(weight=weight, reduction=reduction, ignore_index=ignore_index)
def forward(self, output, target):
CE_loss = self.cross_entropy(output, target)
dice_loss = self.dice(output, target)
return CE_loss + dice_loss
class LovaszSoftmax(nn.Module):
def __init__(self, classes='present', per_image=False, ignore_index=255):
super(LovaszSoftmax, self).__init__()
self.smooth = classes
self.per_image = per_image
self.ignore_index = ignore_index
def forward(self, output, target):
logits = F.softmax(output, dim=1)
loss = lovasz_softmax(logits, target, ignore=self.ignore_index)
return loss
|
from collections import deque
from dataclasses import dataclass
from types import TracebackType
from typing import Deque, Optional, Tuple, Type
from warnings import warn
from ..lowlevel import cancel_shielded_checkpoint, checkpoint, checkpoint_if_cancelled
from ._compat import DeprecatedAwaitable
from ._eventloop import get_asynclib
from ._exceptions import BusyResourceError, WouldBlock
from ._tasks import CancelScope
from ._testing import TaskInfo, get_current_task
@dataclass(frozen=True)
class EventStatistics:
"""
:ivar int tasks_waiting: number of tasks waiting on :meth:`~.Event.wait`
"""
tasks_waiting: int
@dataclass(frozen=True)
class CapacityLimiterStatistics:
"""
:ivar int borrowed_tokens: number of tokens currently borrowed by tasks
:ivar float total_tokens: total number of available tokens
:ivar tuple borrowers: tasks or other objects currently holding tokens borrowed from this
limiter
:ivar int tasks_waiting: number of tasks waiting on :meth:`~.CapacityLimiter.acquire` or
:meth:`~.CapacityLimiter.acquire_on_behalf_of`
"""
borrowed_tokens: int
total_tokens: float
borrowers: Tuple[object, ...]
tasks_waiting: int
@dataclass(frozen=True)
class LockStatistics:
"""
:ivar bool locked: flag indicating if this lock is locked or not
:ivar ~anyio.TaskInfo owner: task currently holding the lock (or ``None`` if the lock is not
held by any task)
:ivar int tasks_waiting: number of tasks waiting on :meth:`~.Lock.acquire`
"""
locked: bool
owner: Optional[TaskInfo]
tasks_waiting: int
@dataclass(frozen=True)
class ConditionStatistics:
"""
:ivar int tasks_waiting: number of tasks blocked on :meth:`~.Condition.wait`
:ivar ~anyio.LockStatistics lock_statistics: statistics of the underlying :class:`~.Lock`
"""
tasks_waiting: int
lock_statistics: LockStatistics
@dataclass(frozen=True)
class SemaphoreStatistics:
"""
:ivar int tasks_waiting: number of tasks waiting on :meth:`~.Semaphore.acquire`
"""
tasks_waiting: int
class Event:
def __new__(cls):
return get_asynclib().Event()
def set(self) -> DeprecatedAwaitable:
"""Set the flag, notifying all listeners."""
raise NotImplementedError
def is_set(self) -> bool:
"""Return ``True`` if the flag is set, ``False`` if not."""
raise NotImplementedError
async def wait(self) -> bool:
"""
Wait until the flag has been set.
If the flag has already been set when this method is called, it returns immediately.
"""
raise NotImplementedError
def statistics(self) -> EventStatistics:
"""Return statistics about the current state of this event."""
raise NotImplementedError
class Lock:
_owner_task: Optional[TaskInfo] = None
def __init__(self):
self._waiters: Deque[Tuple[TaskInfo, Event]] = deque()
async def __aenter__(self):
await self.acquire()
async def __aexit__(self, exc_type: Optional[Type[BaseException]],
exc_val: Optional[BaseException],
exc_tb: Optional[TracebackType]) -> None:
self.release()
async def acquire(self) -> None:
"""Acquire the lock."""
await checkpoint_if_cancelled()
try:
self.acquire_nowait()
except WouldBlock:
task = get_current_task()
event = Event()
token = task, event
self._waiters.append(token)
try:
await event.wait()
except BaseException:
if not event.is_set():
self._waiters.remove(token)
raise
assert self._owner_task == task
else:
await cancel_shielded_checkpoint()
def acquire_nowait(self) -> None:
"""
Acquire the lock, without blocking.
:raises ~WouldBlock: if the operation would block
"""
task = get_current_task()
if self._owner_task == task:
raise RuntimeError('Attempted to acquire an already held Lock')
if self._owner_task is not None:
raise WouldBlock
self._owner_task = task
def release(self) -> DeprecatedAwaitable:
"""Release the lock."""
if self._owner_task != get_current_task():
raise RuntimeError('The current task is not holding this lock')
if self._waiters:
self._owner_task, event = self._waiters.popleft()
event.set()
else:
del self._owner_task
return DeprecatedAwaitable(self.release)
def locked(self) -> bool:
"""Return True if the lock is currently held."""
return self._owner_task is not None
def statistics(self) -> LockStatistics:
"""
Return statistics about the current state of this lock.
.. versionadded:: 3.0
"""
return LockStatistics(self.locked(), self._owner_task, len(self._waiters))
class Condition:
_owner_task: Optional[TaskInfo] = None
def __init__(self, lock: Optional[Lock] = None):
self._lock = lock or Lock()
self._waiters: Deque[Event] = deque()
async def __aenter__(self):
await self.acquire()
async def __aexit__(self, exc_type: Optional[Type[BaseException]],
exc_val: Optional[BaseException],
exc_tb: Optional[TracebackType]) -> None:
self.release()
def _check_acquired(self) -> None:
if self._owner_task != get_current_task():
raise RuntimeError('The current task is not holding the underlying lock')
async def acquire(self) -> None:
"""Acquire the underlying lock."""
await self._lock.acquire()
self._owner_task = get_current_task()
def acquire_nowait(self) -> None:
"""
Acquire the underlying lock, without blocking.
:raises ~WouldBlock: if the operation would block
"""
self._lock.acquire_nowait()
self._owner_task = get_current_task()
def release(self) -> DeprecatedAwaitable:
"""Release the underlying lock."""
self._lock.release()
return DeprecatedAwaitable(self.release)
def locked(self) -> bool:
"""Return True if the lock is set."""
return self._lock.locked()
def notify(self, n: int = 1) -> None:
"""Notify exactly n listeners."""
self._check_acquired()
for _ in range(n):
try:
event = self._waiters.popleft()
except IndexError:
break
event.set()
def notify_all(self) -> None:
"""Notify all the listeners."""
self._check_acquired()
for event in self._waiters:
event.set()
self._waiters.clear()
async def wait(self) -> None:
"""Wait for a notification."""
await checkpoint()
event = Event()
self._waiters.append(event)
self.release()
try:
await event.wait()
except BaseException:
if not event.is_set():
self._waiters.remove(event)
raise
finally:
with CancelScope(shield=True):
await self.acquire()
def statistics(self) -> ConditionStatistics:
"""
Return statistics about the current state of this condition.
.. versionadded:: 3.0
"""
return ConditionStatistics(len(self._waiters), self._lock.statistics())
class Semaphore:
def __init__(self, initial_value: int, *, max_value: Optional[int] = None):
if not isinstance(initial_value, int):
raise TypeError('initial_value must be an integer')
if initial_value < 0:
raise ValueError('initial_value must be >= 0')
if max_value is not None:
if not isinstance(max_value, int):
raise TypeError('max_value must be an integer or None')
if max_value < initial_value:
raise ValueError('max_value must be equal to or higher than initial_value')
self._value = initial_value
self._max_value = max_value
self._waiters: Deque[Event] = deque()
async def __aenter__(self) -> 'Semaphore':
await self.acquire()
return self
async def __aexit__(self, exc_type: Optional[Type[BaseException]],
exc_val: Optional[BaseException],
exc_tb: Optional[TracebackType]) -> None:
self.release()
async def acquire(self) -> None:
"""Decrement the semaphore value, blocking if necessary."""
await checkpoint_if_cancelled()
try:
self.acquire_nowait()
except WouldBlock:
event = Event()
self._waiters.append(event)
try:
await event.wait()
except BaseException:
if not event.is_set():
self._waiters.remove(event)
raise
else:
await cancel_shielded_checkpoint()
def acquire_nowait(self) -> None:
"""
Acquire the underlying lock, without blocking.
:raises ~WouldBlock: if the operation would block
"""
if self._value == 0:
raise WouldBlock
self._value -= 1
def release(self) -> DeprecatedAwaitable:
"""Increment the semaphore value."""
if self._max_value is not None and self._value == self._max_value:
raise ValueError('semaphore released too many times')
if self._waiters:
self._waiters.popleft().set()
else:
self._value += 1
return DeprecatedAwaitable(self.release)
@property
def value(self) -> int:
"""The current value of the semaphore."""
return self._value
@property
def max_value(self) -> Optional[int]:
"""The maximum value of the semaphore."""
return self._max_value
def statistics(self) -> SemaphoreStatistics:
"""
Return statistics about the current state of this semaphore.
.. versionadded:: 3.0
"""
return SemaphoreStatistics(len(self._waiters))
class CapacityLimiter:
def __new__(cls, total_tokens: float):
return get_asynclib().CapacityLimiter(total_tokens)
async def __aenter__(self):
raise NotImplementedError
async def __aexit__(self, exc_type: Optional[Type[BaseException]],
exc_val: Optional[BaseException],
exc_tb: Optional[TracebackType]) -> Optional[bool]:
raise NotImplementedError
@property
def total_tokens(self) -> float:
"""
The total number of tokens available for borrowing.
This is a read-write property. If the total number of tokens is increased, the
proportionate number of tasks waiting on this limiter will be granted their tokens.
.. versionchanged:: 3.0
The property is now writable.
"""
raise NotImplementedError
@total_tokens.setter
def total_tokens(self, value: float) -> None:
raise NotImplementedError
async def set_total_tokens(self, value) -> None:
warn('CapacityLimiter.set_total_tokens has been deprecated. Set the value of the'
'"total_tokens" attribute directly.', DeprecationWarning)
self.total_tokens = value
@property
def borrowed_tokens(self) -> int:
"""The number of tokens that have currently been borrowed."""
raise NotImplementedError
@property
def available_tokens(self) -> float:
"""The number of tokens currently available to be borrowed"""
raise NotImplementedError
def acquire_nowait(self) -> DeprecatedAwaitable:
"""
Acquire a token for the current task without waiting for one to become available.
:raises ~anyio.WouldBlock: if there are no tokens available for borrowing
"""
raise NotImplementedError
def acquire_on_behalf_of_nowait(self, borrower) -> DeprecatedAwaitable:
"""
Acquire a token without waiting for one to become available.
:param borrower: the entity borrowing a token
:raises ~anyio.WouldBlock: if there are no tokens available for borrowing
"""
raise NotImplementedError
async def acquire(self) -> None:
"""
Acquire a token for the current task, waiting if necessary for one to become available.
"""
raise NotImplementedError
async def acquire_on_behalf_of(self, borrower) -> None:
"""
Acquire a token, waiting if necessary for one to become available.
:param borrower: the entity borrowing a token
"""
raise NotImplementedError
def release(self) -> None:
"""
Release the token held by the current task.
:raises RuntimeError: if the current task has not borrowed a token from this limiter.
"""
raise NotImplementedError
def release_on_behalf_of(self, borrower) -> None:
"""
Release the token held by the given borrower.
:raises RuntimeError: if the borrower has not borrowed a token from this limiter.
"""
raise NotImplementedError
def statistics(self) -> CapacityLimiterStatistics:
"""
Return statistics about the current state of this limiter.
.. versionadded:: 3.0
"""
raise NotImplementedError
def create_lock() -> Lock:
"""
Create an asynchronous lock.
:return: a lock object
.. deprecated:: 3.0
Use :class:`~Lock` directly.
"""
warn('create_lock() is deprecated -- use Lock() directly', DeprecationWarning)
return Lock()
def create_condition(lock: Optional[Lock] = None) -> Condition:
"""
Create an asynchronous condition.
:param lock: the lock to base the condition object on
:return: a condition object
.. deprecated:: 3.0
Use :class:`~Condition` directly.
"""
warn('create_condition() is deprecated -- use Condition() directly', DeprecationWarning)
return Condition(lock=lock)
def create_event() -> Event:
"""
Create an asynchronous event object.
:return: an event object
.. deprecated:: 3.0
Use :class:`~Event` directly.
"""
warn('create_event() is deprecated -- use Event() directly', DeprecationWarning)
return get_asynclib().Event()
def create_semaphore(value: int, *, max_value: Optional[int] = None) -> Semaphore:
"""
Create an asynchronous semaphore.
:param value: the semaphore's initial value
:param max_value: if set, makes this a "bounded" semaphore that raises :exc:`ValueError` if the
semaphore's value would exceed this number
:return: a semaphore object
.. deprecated:: 3.0
Use :class:`~Semaphore` directly.
"""
warn('create_semaphore() is deprecated -- use Semaphore() directly', DeprecationWarning)
return Semaphore(value, max_value=max_value)
def create_capacity_limiter(total_tokens: float) -> CapacityLimiter:
"""
Create a capacity limiter.
:param total_tokens: the total number of tokens available for borrowing (can be an integer or
:data:`math.inf`)
:return: a capacity limiter object
.. deprecated:: 3.0
Use :class:`~CapacityLimiter` directly.
"""
warn('create_capacity_limiter() is deprecated -- use CapacityLimiter() directly',
DeprecationWarning)
return get_asynclib().CapacityLimiter(total_tokens)
class ResourceGuard:
__slots__ = 'action', '_guarded'
def __init__(self, action: str):
self.action = action
self._guarded = False
def __enter__(self):
if self._guarded:
raise BusyResourceError(self.action)
self._guarded = True
def __exit__(self, exc_type, exc_val, exc_tb):
self._guarded = False
|
# Copyright 2011 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from webob import exc
from nova.api.openstack.api_version_request import \
MAX_IMAGE_META_PROXY_API_VERSION
from nova.api.openstack import common
from nova.api.openstack.compute.schemas import image_metadata
from nova.api.openstack import wsgi
from nova.api import validation
from nova import exception
from nova.i18n import _
import nova.image
class ImageMetadataController(wsgi.Controller):
"""The image metadata API controller for the OpenStack API."""
def __init__(self):
self.image_api = nova.image.API()
def _get_image(self, context, image_id):
try:
return self.image_api.get(context, image_id)
except exception.ImageNotAuthorized as e:
raise exc.HTTPForbidden(explanation=e.format_message())
except exception.ImageNotFound:
msg = _("Image not found.")
raise exc.HTTPNotFound(explanation=msg)
@wsgi.Controller.api_version("2.1", MAX_IMAGE_META_PROXY_API_VERSION)
@wsgi.expected_errors((403, 404))
def index(self, req, image_id):
"""Returns the list of metadata for a given instance."""
context = req.environ['nova.context']
metadata = self._get_image(context, image_id)['properties']
return dict(metadata=metadata)
@wsgi.Controller.api_version("2.1", MAX_IMAGE_META_PROXY_API_VERSION)
@wsgi.expected_errors((403, 404))
def show(self, req, image_id, id):
context = req.environ['nova.context']
metadata = self._get_image(context, image_id)['properties']
if id in metadata:
return {'meta': {id: metadata[id]}}
else:
raise exc.HTTPNotFound()
@wsgi.Controller.api_version("2.1", MAX_IMAGE_META_PROXY_API_VERSION)
@wsgi.expected_errors((400, 403, 404))
@validation.schema(image_metadata.create)
def create(self, req, image_id, body):
context = req.environ['nova.context']
image = self._get_image(context, image_id)
for key, value in body['metadata'].items():
image['properties'][key] = value
common.check_img_metadata_properties_quota(context,
image['properties'])
try:
image = self.image_api.update(context, image_id, image, data=None,
purge_props=True)
except exception.ImageNotAuthorized as e:
raise exc.HTTPForbidden(explanation=e.format_message())
return dict(metadata=image['properties'])
@wsgi.Controller.api_version("2.1", MAX_IMAGE_META_PROXY_API_VERSION)
@wsgi.expected_errors((400, 403, 404))
@validation.schema(image_metadata.update)
def update(self, req, image_id, id, body):
context = req.environ['nova.context']
meta = body['meta']
if id not in meta:
expl = _('Request body and URI mismatch')
raise exc.HTTPBadRequest(explanation=expl)
image = self._get_image(context, image_id)
image['properties'][id] = meta[id]
common.check_img_metadata_properties_quota(context,
image['properties'])
try:
self.image_api.update(context, image_id, image, data=None,
purge_props=True)
except exception.ImageNotAuthorized as e:
raise exc.HTTPForbidden(explanation=e.format_message())
return dict(meta=meta)
@wsgi.Controller.api_version("2.1", MAX_IMAGE_META_PROXY_API_VERSION)
@wsgi.expected_errors((400, 403, 404))
@validation.schema(image_metadata.update_all)
def update_all(self, req, image_id, body):
context = req.environ['nova.context']
image = self._get_image(context, image_id)
metadata = body['metadata']
common.check_img_metadata_properties_quota(context, metadata)
image['properties'] = metadata
try:
self.image_api.update(context, image_id, image, data=None,
purge_props=True)
except exception.ImageNotAuthorized as e:
raise exc.HTTPForbidden(explanation=e.format_message())
return dict(metadata=metadata)
@wsgi.Controller.api_version("2.1", MAX_IMAGE_META_PROXY_API_VERSION)
@wsgi.expected_errors((403, 404))
@wsgi.response(204)
def delete(self, req, image_id, id):
context = req.environ['nova.context']
image = self._get_image(context, image_id)
if id not in image['properties']:
msg = _("Invalid metadata key")
raise exc.HTTPNotFound(explanation=msg)
image['properties'].pop(id)
try:
self.image_api.update(context, image_id, image, data=None,
purge_props=True)
except exception.ImageNotAuthorized as e:
raise exc.HTTPForbidden(explanation=e.format_message())
|
"""
Data structures for sparse float data. Life is made simpler by dealing only
with float64 data
"""
from __future__ import division
# pylint: disable=E1101,E1103,W0231,E0202
import warnings
from pandas.compat import lmap
from pandas import compat
import numpy as np
from pandas.core.dtypes.missing import isna, notna
from pandas.core.dtypes.cast import maybe_upcast, find_common_type
from pandas.core.dtypes.common import ensure_platform_int, is_scipy_sparse
from pandas.compat.numpy import function as nv
from pandas.core.index import Index, MultiIndex, ensure_index
from pandas.core.series import Series
from pandas.core.frame import DataFrame, extract_index, _prep_ndarray
import pandas.core.algorithms as algos
from pandas.core.internals import (BlockManager,
create_block_manager_from_arrays)
import pandas.core.generic as generic
from pandas.core.sparse.series import SparseSeries, SparseArray
from pandas._libs.sparse import BlockIndex, get_blocks
from pandas.util._decorators import Appender
import pandas.core.ops as ops
import pandas.core.common as com
import pandas.core.indexes.base as ibase
_shared_doc_kwargs = dict(klass='SparseDataFrame')
class SparseDataFrame(DataFrame):
"""
DataFrame containing sparse floating point data in the form of SparseSeries
objects
Parameters
----------
data : same types as can be passed to DataFrame or scipy.sparse.spmatrix
.. versionchanged :: 0.23.0
If data is a dict, argument order is maintained for Python 3.6
and later.
index : array-like, optional
column : array-like, optional
default_kind : {'block', 'integer'}, default 'block'
Default sparse kind for converting Series to SparseSeries. Will not
override SparseSeries passed into constructor
default_fill_value : float
Default fill_value for converting Series to SparseSeries
(default: nan). Will not override SparseSeries passed in.
"""
_subtyp = 'sparse_frame'
def __init__(self, data=None, index=None, columns=None, default_kind=None,
default_fill_value=None, dtype=None, copy=False):
# pick up the defaults from the Sparse structures
if isinstance(data, SparseDataFrame):
if index is None:
index = data.index
if columns is None:
columns = data.columns
if default_fill_value is None:
default_fill_value = data.default_fill_value
if default_kind is None:
default_kind = data.default_kind
elif isinstance(data, (SparseSeries, SparseArray)):
if index is None:
index = data.index
if default_fill_value is None:
default_fill_value = data.fill_value
if columns is None and hasattr(data, 'name'):
columns = [data.name]
if columns is None:
raise Exception("cannot pass a series w/o a name or columns")
data = {columns[0]: data}
if default_fill_value is None:
default_fill_value = np.nan
if default_kind is None:
default_kind = 'block'
self._default_kind = default_kind
self._default_fill_value = default_fill_value
if is_scipy_sparse(data):
mgr = self._init_spmatrix(data, index, columns, dtype=dtype,
fill_value=default_fill_value)
elif isinstance(data, dict):
mgr = self._init_dict(data, index, columns, dtype=dtype)
elif isinstance(data, (np.ndarray, list)):
mgr = self._init_matrix(data, index, columns, dtype=dtype)
elif isinstance(data, SparseDataFrame):
mgr = self._init_mgr(data._data,
dict(index=index, columns=columns),
dtype=dtype, copy=copy)
elif isinstance(data, DataFrame):
mgr = self._init_dict(data, data.index, data.columns, dtype=dtype)
elif isinstance(data, Series):
mgr = self._init_dict(data.to_frame(), data.index,
columns=None, dtype=dtype)
elif isinstance(data, BlockManager):
mgr = self._init_mgr(data, axes=dict(index=index, columns=columns),
dtype=dtype, copy=copy)
elif data is None:
data = DataFrame()
if index is None:
index = Index([])
else:
index = ensure_index(index)
if columns is None:
columns = Index([])
else:
for c in columns:
data[c] = SparseArray(np.nan, index=index,
kind=self._default_kind,
fill_value=self._default_fill_value)
mgr = to_manager(data, columns, index)
if dtype is not None:
mgr = mgr.astype(dtype)
else:
msg = ('SparseDataFrame called with unknown type "{data_type}" '
'for data argument')
raise TypeError(msg.format(data_type=type(data).__name__))
generic.NDFrame.__init__(self, mgr)
@property
def _constructor(self):
return SparseDataFrame
_constructor_sliced = SparseSeries
def _init_dict(self, data, index, columns, dtype=None):
# pre-filter out columns if we passed it
if columns is not None:
columns = ensure_index(columns)
data = {k: v for k, v in compat.iteritems(data) if k in columns}
else:
keys = com._dict_keys_to_ordered_list(data)
columns = Index(keys)
if index is None:
index = extract_index(list(data.values()))
def sp_maker(x):
return SparseArray(x, kind=self._default_kind,
fill_value=self._default_fill_value,
copy=True, dtype=dtype)
sdict = {}
for k, v in compat.iteritems(data):
if isinstance(v, Series):
# Force alignment, no copy necessary
if not v.index.equals(index):
v = v.reindex(index)
if not isinstance(v, SparseSeries):
v = sp_maker(v.values)
elif isinstance(v, SparseArray):
v = v.copy()
else:
if isinstance(v, dict):
v = [v.get(i, np.nan) for i in index]
v = sp_maker(v)
sdict[k] = v
# TODO: figure out how to handle this case, all nan's?
# add in any other columns we want to have (completeness)
nan_arr = np.empty(len(index), dtype='float64')
nan_arr.fill(np.nan)
nan_arr = sp_maker(nan_arr)
sdict.update((c, nan_arr) for c in columns if c not in sdict)
return to_manager(sdict, columns, index)
def _init_matrix(self, data, index, columns, dtype=None):
""" Init self from ndarray or list of lists """
data = _prep_ndarray(data, copy=False)
index, columns = self._prep_index(data, index, columns)
data = {idx: data[:, i] for i, idx in enumerate(columns)}
return self._init_dict(data, index, columns, dtype)
def _init_spmatrix(self, data, index, columns, dtype=None,
fill_value=None):
""" Init self from scipy.sparse matrix """
index, columns = self._prep_index(data, index, columns)
data = data.tocoo()
N = len(index)
# Construct a dict of SparseSeries
sdict = {}
values = Series(data.data, index=data.row, copy=False)
for col, rowvals in values.groupby(data.col):
# get_blocks expects int32 row indices in sorted order
rowvals = rowvals.sort_index()
rows = rowvals.index.values.astype(np.int32)
blocs, blens = get_blocks(rows)
sdict[columns[col]] = SparseSeries(
rowvals.values, index=index,
fill_value=fill_value,
sparse_index=BlockIndex(N, blocs, blens))
# Add any columns that were empty and thus not grouped on above
sdict.update({column: SparseSeries(index=index,
fill_value=fill_value,
sparse_index=BlockIndex(N, [], []))
for column in columns
if column not in sdict})
return self._init_dict(sdict, index, columns, dtype)
def _prep_index(self, data, index, columns):
N, K = data.shape
if index is None:
index = ibase.default_index(N)
if columns is None:
columns = ibase.default_index(K)
if len(columns) != K:
raise ValueError('Column length mismatch: {columns} vs. {K}'
.format(columns=len(columns), K=K))
if len(index) != N:
raise ValueError('Index length mismatch: {index} vs. {N}'
.format(index=len(index), N=N))
return index, columns
def to_coo(self):
"""
Return the contents of the frame as a sparse SciPy COO matrix.
.. versionadded:: 0.20.0
Returns
-------
coo_matrix : scipy.sparse.spmatrix
If the caller is heterogeneous and contains booleans or objects,
the result will be of dtype=object. See Notes.
Notes
-----
The dtype will be the lowest-common-denominator type (implicit
upcasting); that is to say if the dtypes (even of numeric types)
are mixed, the one that accommodates all will be chosen.
e.g. If the dtypes are float16 and float32, dtype will be upcast to
float32. By numpy.find_common_type convention, mixing int64 and
and uint64 will result in a float64 dtype.
"""
try:
from scipy.sparse import coo_matrix
except ImportError:
raise ImportError('Scipy is not installed')
dtype = find_common_type(self.dtypes)
cols, rows, datas = [], [], []
for col, name in enumerate(self):
s = self[name]
row = s.sp_index.to_int_index().indices
cols.append(np.repeat(col, len(row)))
rows.append(row)
datas.append(s.sp_values.astype(dtype, copy=False))
cols = np.concatenate(cols)
rows = np.concatenate(rows)
datas = np.concatenate(datas)
return coo_matrix((datas, (rows, cols)), shape=self.shape)
def __array_wrap__(self, result):
return self._constructor(
result, index=self.index, columns=self.columns,
default_kind=self._default_kind,
default_fill_value=self._default_fill_value).__finalize__(self)
def __getstate__(self):
# pickling
return dict(_typ=self._typ, _subtyp=self._subtyp, _data=self._data,
_default_fill_value=self._default_fill_value,
_default_kind=self._default_kind)
def _unpickle_sparse_frame_compat(self, state):
""" original pickle format """
series, cols, idx, fv, kind = state
if not isinstance(cols, Index): # pragma: no cover
from pandas.io.pickle import _unpickle_array
columns = _unpickle_array(cols)
else:
columns = cols
if not isinstance(idx, Index): # pragma: no cover
from pandas.io.pickle import _unpickle_array
index = _unpickle_array(idx)
else:
index = idx
series_dict = DataFrame()
for col, (sp_index, sp_values) in compat.iteritems(series):
series_dict[col] = SparseSeries(sp_values, sparse_index=sp_index,
fill_value=fv)
self._data = to_manager(series_dict, columns, index)
self._default_fill_value = fv
self._default_kind = kind
def to_dense(self):
"""
Convert to dense DataFrame
Returns
-------
df : DataFrame
"""
data = {k: v.to_dense() for k, v in compat.iteritems(self)}
return DataFrame(data, index=self.index, columns=self.columns)
def _apply_columns(self, func):
""" get new SparseDataFrame applying func to each columns """
new_data = {}
for col, series in compat.iteritems(self):
new_data[col] = func(series)
return self._constructor(
data=new_data, index=self.index, columns=self.columns,
default_fill_value=self.default_fill_value).__finalize__(self)
def astype(self, dtype):
return self._apply_columns(lambda x: x.astype(dtype))
def copy(self, deep=True):
"""
Make a copy of this SparseDataFrame
"""
result = super(SparseDataFrame, self).copy(deep=deep)
result._default_fill_value = self._default_fill_value
result._default_kind = self._default_kind
return result
@property
def default_fill_value(self):
return self._default_fill_value
@property
def default_kind(self):
return self._default_kind
@property
def density(self):
"""
Ratio of non-sparse points to total (dense) data points
represented in the frame
"""
tot_nonsparse = sum(ser.sp_index.npoints
for _, ser in compat.iteritems(self))
tot = len(self.index) * len(self.columns)
return tot_nonsparse / float(tot)
def fillna(self, value=None, method=None, axis=0, inplace=False,
limit=None, downcast=None):
new_self = super(SparseDataFrame,
self).fillna(value=value, method=method, axis=axis,
inplace=inplace, limit=limit,
downcast=downcast)
if not inplace:
self = new_self
# set the fill value if we are filling as a scalar with nothing special
# going on
if (value is not None and value == value and method is None and
limit is None):
self._default_fill_value = value
if not inplace:
return self
# ----------------------------------------------------------------------
# Support different internal representation of SparseDataFrame
def _sanitize_column(self, key, value, **kwargs):
"""
Creates a new SparseArray from the input value.
Parameters
----------
key : object
value : scalar, Series, or array-like
kwargs : dict
Returns
-------
sanitized_column : SparseArray
"""
def sp_maker(x, index=None):
return SparseArray(x, index=index,
fill_value=self._default_fill_value,
kind=self._default_kind)
if isinstance(value, SparseSeries):
clean = value.reindex(self.index).as_sparse_array(
fill_value=self._default_fill_value, kind=self._default_kind)
elif isinstance(value, SparseArray):
if len(value) != len(self.index):
raise AssertionError('Length of values does not match '
'length of index')
clean = value
elif hasattr(value, '__iter__'):
if isinstance(value, Series):
clean = value.reindex(self.index)
if not isinstance(value, SparseSeries):
clean = sp_maker(clean)
else:
if len(value) != len(self.index):
raise AssertionError('Length of values does not match '
'length of index')
clean = sp_maker(value)
# Scalar
else:
clean = sp_maker(value, self.index)
# always return a SparseArray!
return clean
def get_value(self, index, col, takeable=False):
"""
Quickly retrieve single value at passed column and index
.. deprecated:: 0.21.0
Please use .at[] or .iat[] accessors.
Parameters
----------
index : row label
col : column label
takeable : interpret the index/col as indexers, default False
Returns
-------
value : scalar value
"""
warnings.warn("get_value is deprecated and will be removed "
"in a future release. Please use "
".at[] or .iat[] accessors instead", FutureWarning,
stacklevel=2)
return self._get_value(index, col, takeable=takeable)
def _get_value(self, index, col, takeable=False):
if takeable is True:
series = self._iget_item_cache(col)
else:
series = self._get_item_cache(col)
return series._get_value(index, takeable=takeable)
_get_value.__doc__ = get_value.__doc__
def set_value(self, index, col, value, takeable=False):
"""
Put single value at passed column and index
.. deprecated:: 0.21.0
Please use .at[] or .iat[] accessors.
Parameters
----------
index : row label
col : column label
value : scalar value
takeable : interpret the index/col as indexers, default False
Notes
-----
This method *always* returns a new object. It is currently not
particularly efficient (and potentially very expensive) but is provided
for API compatibility with DataFrame
Returns
-------
frame : DataFrame
"""
warnings.warn("set_value is deprecated and will be removed "
"in a future release. Please use "
".at[] or .iat[] accessors instead", FutureWarning,
stacklevel=2)
return self._set_value(index, col, value, takeable=takeable)
def _set_value(self, index, col, value, takeable=False):
dense = self.to_dense()._set_value(
index, col, value, takeable=takeable)
return dense.to_sparse(kind=self._default_kind,
fill_value=self._default_fill_value)
_set_value.__doc__ = set_value.__doc__
def _slice(self, slobj, axis=0, kind=None):
if axis == 0:
new_index = self.index[slobj]
new_columns = self.columns
else:
new_index = self.index
new_columns = self.columns[slobj]
return self.reindex(index=new_index, columns=new_columns)
def xs(self, key, axis=0, copy=False):
"""
Returns a row (cross-section) from the SparseDataFrame as a Series
object.
Parameters
----------
key : some index contained in the index
Returns
-------
xs : Series
"""
if axis == 1:
data = self[key]
return data
i = self.index.get_loc(key)
data = self.take([i]).get_values()[0]
return Series(data, index=self.columns)
# ----------------------------------------------------------------------
# Arithmetic-related methods
def _combine_frame(self, other, func, fill_value=None, level=None):
this, other = self.align(other, join='outer', level=level, copy=False)
new_index, new_columns = this.index, this.columns
if level is not None:
raise NotImplementedError("'level' argument is not supported")
if self.empty and other.empty:
return self._constructor(index=new_index).__finalize__(self)
new_data = {}
if fill_value is not None:
# TODO: be a bit more intelligent here
for col in new_columns:
if col in this and col in other:
dleft = this[col].to_dense()
dright = other[col].to_dense()
result = dleft._binop(dright, func, fill_value=fill_value)
result = result.to_sparse(fill_value=this[col].fill_value)
new_data[col] = result
else:
for col in new_columns:
if col in this and col in other:
new_data[col] = func(this[col], other[col])
# if the fill values are the same use them? or use a valid one
new_fill_value = None
other_fill_value = getattr(other, 'default_fill_value', np.nan)
if self.default_fill_value == other_fill_value:
new_fill_value = self.default_fill_value
elif np.isnan(self.default_fill_value) and not np.isnan(
other_fill_value):
new_fill_value = other_fill_value
elif not np.isnan(self.default_fill_value) and np.isnan(
other_fill_value):
new_fill_value = self.default_fill_value
return self._constructor(data=new_data, index=new_index,
columns=new_columns,
default_fill_value=new_fill_value
).__finalize__(self)
def _combine_match_index(self, other, func, level=None):
new_data = {}
if level is not None:
raise NotImplementedError("'level' argument is not supported")
new_index = self.index.union(other.index)
this = self
if self.index is not new_index:
this = self.reindex(new_index)
if other.index is not new_index:
other = other.reindex(new_index)
for col, series in compat.iteritems(this):
new_data[col] = func(series.values, other.values)
# fill_value is a function of our operator
fill_value = None
if isna(other.fill_value) or isna(self.default_fill_value):
fill_value = np.nan
else:
fill_value = func(np.float64(self.default_fill_value),
np.float64(other.fill_value))
return self._constructor(
new_data, index=new_index, columns=self.columns,
default_fill_value=fill_value).__finalize__(self)
def _combine_match_columns(self, other, func, level=None, try_cast=True):
# patched version of DataFrame._combine_match_columns to account for
# NumPy circumventing __rsub__ with float64 types, e.g.: 3.0 - series,
# where 3.0 is numpy.float64 and series is a SparseSeries. Still
# possible for this to happen, which is bothersome
if level is not None:
raise NotImplementedError("'level' argument is not supported")
new_data = {}
union = intersection = self.columns
if not union.equals(other.index):
union = other.index.union(self.columns)
intersection = other.index.intersection(self.columns)
for col in intersection:
new_data[col] = func(self[col], float(other[col]))
return self._constructor(
new_data, index=self.index, columns=union,
default_fill_value=self.default_fill_value).__finalize__(self)
def _combine_const(self, other, func, errors='raise', try_cast=True):
return self._apply_columns(lambda x: func(x, other))
def _reindex_index(self, index, method, copy, level, fill_value=np.nan,
limit=None, takeable=False):
if level is not None:
raise TypeError('Reindex by level not supported for sparse')
if self.index.equals(index):
if copy:
return self.copy()
else:
return self
if len(self.index) == 0:
return self._constructor(
index=index, columns=self.columns).__finalize__(self)
indexer = self.index.get_indexer(index, method, limit=limit)
indexer = ensure_platform_int(indexer)
mask = indexer == -1
need_mask = mask.any()
new_series = {}
for col, series in self.iteritems():
if mask.all():
continue
values = series.values
# .take returns SparseArray
new = values.take(indexer)
if need_mask:
new = new.values
# convert integer to float if necessary. need to do a lot
# more than that, handle boolean etc also
new, fill_value = maybe_upcast(new, fill_value=fill_value)
np.putmask(new, mask, fill_value)
new_series[col] = new
return self._constructor(
new_series, index=index, columns=self.columns,
default_fill_value=self._default_fill_value).__finalize__(self)
def _reindex_columns(self, columns, method, copy, level, fill_value=None,
limit=None, takeable=False):
if level is not None:
raise TypeError('Reindex by level not supported for sparse')
if notna(fill_value):
raise NotImplementedError("'fill_value' argument is not supported")
if limit:
raise NotImplementedError("'limit' argument is not supported")
if method is not None:
raise NotImplementedError("'method' argument is not supported")
# TODO: fill value handling
sdict = {k: v for k, v in compat.iteritems(self) if k in columns}
return self._constructor(
sdict, index=self.index, columns=columns,
default_fill_value=self._default_fill_value).__finalize__(self)
def _reindex_with_indexers(self, reindexers, method=None, fill_value=None,
limit=None, copy=False, allow_dups=False):
if method is not None or limit is not None:
raise NotImplementedError("cannot reindex with a method or limit "
"with sparse")
if fill_value is None:
fill_value = np.nan
reindexers = {self._get_axis_number(a): val
for (a, val) in compat.iteritems(reindexers)}
index, row_indexer = reindexers.get(0, (None, None))
columns, col_indexer = reindexers.get(1, (None, None))
if columns is None:
columns = self.columns
new_arrays = {}
for col in columns:
if col not in self:
continue
if row_indexer is not None:
new_arrays[col] = algos.take_1d(self[col].get_values(),
row_indexer,
fill_value=fill_value)
else:
new_arrays[col] = self[col]
return self._constructor(new_arrays, index=index,
columns=columns).__finalize__(self)
def _join_compat(self, other, on=None, how='left', lsuffix='', rsuffix='',
sort=False):
if on is not None:
raise NotImplementedError("'on' keyword parameter is not yet "
"implemented")
return self._join_index(other, how, lsuffix, rsuffix)
def _join_index(self, other, how, lsuffix, rsuffix):
if isinstance(other, Series):
if other.name is None:
raise ValueError('Other Series must have a name')
other = SparseDataFrame(
{other.name: other},
default_fill_value=self._default_fill_value)
join_index = self.index.join(other.index, how=how)
this = self.reindex(join_index)
other = other.reindex(join_index)
this, other = this._maybe_rename_join(other, lsuffix, rsuffix)
from pandas import concat
return concat([this, other], axis=1, verify_integrity=True)
def _maybe_rename_join(self, other, lsuffix, rsuffix):
to_rename = self.columns.intersection(other.columns)
if len(to_rename) > 0:
if not lsuffix and not rsuffix:
raise ValueError('columns overlap but no suffix specified: '
'{to_rename}'.format(to_rename=to_rename))
def lrenamer(x):
if x in to_rename:
return '{x}{lsuffix}'.format(x=x, lsuffix=lsuffix)
return x
def rrenamer(x):
if x in to_rename:
return '{x}{rsuffix}'.format(x=x, rsuffix=rsuffix)
return x
this = self.rename(columns=lrenamer)
other = other.rename(columns=rrenamer)
else:
this = self
return this, other
def transpose(self, *args, **kwargs):
"""
Returns a DataFrame with the rows/columns switched.
"""
nv.validate_transpose(args, kwargs)
return self._constructor(
self.values.T, index=self.columns, columns=self.index,
default_fill_value=self._default_fill_value,
default_kind=self._default_kind).__finalize__(self)
T = property(transpose)
@Appender(DataFrame.count.__doc__)
def count(self, axis=0, **kwds):
if axis is None:
axis = self._stat_axis_number
return self.apply(lambda x: x.count(), axis=axis)
def cumsum(self, axis=0, *args, **kwargs):
"""
Return SparseDataFrame of cumulative sums over requested axis.
Parameters
----------
axis : {0, 1}
0 for row-wise, 1 for column-wise
Returns
-------
y : SparseDataFrame
"""
nv.validate_cumsum(args, kwargs)
if axis is None:
axis = self._stat_axis_number
return self.apply(lambda x: x.cumsum(), axis=axis)
@Appender(generic._shared_docs['isna'] % _shared_doc_kwargs)
def isna(self):
return self._apply_columns(lambda x: x.isna())
isnull = isna
@Appender(generic._shared_docs['notna'] % _shared_doc_kwargs)
def notna(self):
return self._apply_columns(lambda x: x.notna())
notnull = notna
def apply(self, func, axis=0, broadcast=None, reduce=None,
result_type=None):
"""
Analogous to DataFrame.apply, for SparseDataFrame
Parameters
----------
func : function
Function to apply to each column
axis : {0, 1, 'index', 'columns'}
broadcast : bool, default False
For aggregation functions, return object of same size with values
propagated
.. deprecated:: 0.23.0
This argument will be removed in a future version, replaced
by result_type='broadcast'.
reduce : boolean or None, default None
Try to apply reduction procedures. If the DataFrame is empty,
apply will use reduce to determine whether the result should be a
Series or a DataFrame. If reduce is None (the default), apply's
return value will be guessed by calling func an empty Series (note:
while guessing, exceptions raised by func will be ignored). If
reduce is True a Series will always be returned, and if False a
DataFrame will always be returned.
.. deprecated:: 0.23.0
This argument will be removed in a future version, replaced
by result_type='reduce'.
result_type : {'expand', 'reduce', 'broadcast, None}
These only act when axis=1 {columns}:
* 'expand' : list-like results will be turned into columns.
* 'reduce' : return a Series if possible rather than expanding
list-like results. This is the opposite to 'expand'.
* 'broadcast' : results will be broadcast to the original shape
of the frame, the original index & columns will be retained.
The default behaviour (None) depends on the return value of the
applied function: list-like results will be returned as a Series
of those. However if the apply function returns a Series these
are expanded to columns.
.. versionadded:: 0.23.0
Returns
-------
applied : Series or SparseDataFrame
"""
if not len(self.columns):
return self
axis = self._get_axis_number(axis)
if isinstance(func, np.ufunc):
new_series = {}
for k, v in compat.iteritems(self):
applied = func(v)
applied.fill_value = func(v.fill_value)
new_series[k] = applied
return self._constructor(
new_series, index=self.index, columns=self.columns,
default_fill_value=self._default_fill_value,
default_kind=self._default_kind).__finalize__(self)
from pandas.core.apply import frame_apply
op = frame_apply(self,
func=func,
axis=axis,
reduce=reduce,
broadcast=broadcast,
result_type=result_type)
return op.get_result()
def applymap(self, func):
"""
Apply a function to a DataFrame that is intended to operate
elementwise, i.e. like doing map(func, series) for each series in the
DataFrame
Parameters
----------
func : function
Python function, returns a single value from a single value
Returns
-------
applied : DataFrame
"""
return self.apply(lambda x: lmap(func, x))
def to_manager(sdf, columns, index):
""" create and return the block manager from a dataframe of series,
columns, index
"""
# from BlockManager perspective
axes = [ensure_index(columns), ensure_index(index)]
return create_block_manager_from_arrays(
[sdf[c] for c in columns], columns, axes)
def stack_sparse_frame(frame):
"""
Only makes sense when fill_value is NaN
"""
lengths = [s.sp_index.npoints for _, s in compat.iteritems(frame)]
nobs = sum(lengths)
# this is pretty fast
minor_labels = np.repeat(np.arange(len(frame.columns)), lengths)
inds_to_concat = []
vals_to_concat = []
# TODO: Figure out whether this can be reached.
# I think this currently can't be reached because you can't build a
# SparseDataFrame with a non-np.NaN fill value (fails earlier).
for _, series in compat.iteritems(frame):
if not np.isnan(series.fill_value):
raise TypeError('This routine assumes NaN fill value')
int_index = series.sp_index.to_int_index()
inds_to_concat.append(int_index.indices)
vals_to_concat.append(series.sp_values)
major_labels = np.concatenate(inds_to_concat)
stacked_values = np.concatenate(vals_to_concat)
index = MultiIndex(levels=[frame.index, frame.columns],
labels=[major_labels, minor_labels],
verify_integrity=False)
lp = DataFrame(stacked_values.reshape((nobs, 1)), index=index,
columns=['foo'])
return lp.sort_index(level=0)
def homogenize(series_dict):
"""
Conform a set of SparseSeries (with NaN fill_value) to a common SparseIndex
corresponding to the locations where they all have data
Parameters
----------
series_dict : dict or DataFrame
Notes
-----
Using the dumbest algorithm I could think of. Should put some more thought
into this
Returns
-------
homogenized : dict of SparseSeries
"""
index = None
need_reindex = False
for _, series in compat.iteritems(series_dict):
if not np.isnan(series.fill_value):
raise TypeError('this method is only valid with NaN fill values')
if index is None:
index = series.sp_index
elif not series.sp_index.equals(index):
need_reindex = True
index = index.intersect(series.sp_index)
if need_reindex:
output = {}
for name, series in compat.iteritems(series_dict):
if not series.sp_index.equals(index):
series = series.sparse_reindex(index)
output[name] = series
else:
output = series_dict
return output
# use unaccelerated ops for sparse objects
ops.add_flex_arithmetic_methods(SparseDataFrame)
ops.add_special_arithmetic_methods(SparseDataFrame)
|
import time
import telebot
from Responses import TELE_HI_GREET, TELE_CLASS_CODE
import BacaPdf as pdf
import csvHandler as csvH
with open('API_KEY.txt') as API_KEY:
bot = telebot.TeleBot(API_KEY.read()[:-1])
#Message type check
#ClassCode, TimeInterval, Status, Feedback
messageBool = [False, False, False, False]
def Echooo(themessage):
for ID in csvH.AllID():
bot.send_message(ID, themessage)
def Greet(message):
print(message.text)
if (message.text).lower() in TELE_HI_GREET:
return True
return False
def ClassCode(message):
if (message.text).lower() in TELE_CLASS_CODE:
return True
return False
def TimeInterval(message):
message = (message.text).lower()
if message.isdigit():
return True
return False
def feedbackCatch(message):
if messageBool[3]:
return True
return False
#Commands
@bot.message_handler(commands=['start'])
def start(message):
bot.reply_to(message,"HEY! Welcome to bot Ukrida")
if csvH.checkID(message.chat.id) == 0:
classCom(message,True)
csvH.newID(message.chat.id,
message.chat.first_name,
message.chat.username,
"1PEEA", 10, 'active')
@bot.message_handler(commands=['classcode'])
def classCom(message, first = False):
global messageBool
messageBool = [True, False, False, False]
if first:
bot.send_message(message.chat.id, "Ketik kode kelasmu,\n(Contoh 1Peea):")
else:
bot.send_message(message.chat.id, "Ketik kode kelasmu, atau /cancel untuk membatalkan\n(Contoh 1Peea):")
@bot.message_handler(commands=['cancel'])
def cancelCom(message):
global messageBool
for x in messageBool:
if x:
messageBool = [False, False, False, False]
bot.send_message(message.chat.id, "OK :)")
return
@bot.message_handler(commands=['feedback'])
def feedbackCom(message):
global messageBool
messageBool = [False, False, False, True]
bot.send_message(message.chat.id, "Feedback, atau laporan error:")
@bot.message_handler(commands=['schedules'])
def schedulesCom(message,classCode=0):
if classCode == 0:
classCode = csvH.checkClass(message.chat.id)
queryClass = pdf.openFile(classCode)
if len(queryClass) > 0:
for kelas in queryClass:
sendTo = "Matkul: "+kelas[0]+"\n"
sendTo += "Waktu: "+kelas[1]+", "+kelas[2]+kelas[3]+"\n"
sendTo += "Dosen: "+kelas[4]+"\n"
if kelas[5] == "PTM":
sendTo += "Room:" + kelas[5]
elif kelas[5] == "Meet":
sendTo += "Room:" +'G'+ kelas[5]
else:#angka
sendTo += "MeetID: "+kelas[5]+"\n"
sendTo += "Pass: "+kelas[6]
bot.send_message(message.chat.id, sendTo)
bot.send_message(message.chat.id, "Selamat Kuliah!")
else:
bot.send_message(message.chat.id, "Maaf, kode kelas "+classCode.upper()+" belum ada di list.")
@bot.message_handler(commands=['timer', 'help'])
def notyetCom(message):
bot.send_message(message.chat.id, "Under Construction")
#Commands Child
@bot.message_handler(func=Greet)
def GreetCH(message):
bot.send_message(message.chat.id, "Halo "+message.chat.first_name+" :)")
@bot.message_handler(func=feedbackCatch)
def GreetCH(message):
with open('feedback.txt','a') as f:
f.write(message.text)
#bot.send_message(895523970, str(message.chat.first_name)+":"+message.text)
bot.send_message(message.chat.id, "Pesan terkirim :)")
@bot.message_handler(func=ClassCode)
def ClassCH(message):
if messageBool[0]:
bot.send_message(message.chat.id, "OK, kelasmu tercatat: "+(message.text).upper())
schedulesCom(message,message.text)
csvH.changeClass(csvH.checkID(message.chat.id), (message.text).upper())
messageBool[0] = False
else:
bot.send_message(message.chat.id, "Ketik /classcode untuk mengganti kode kelas, atau /schedules untuk melihat jadwal kelasmu")
if __name__ == "__main__":
Echooo("Hi! Server On 7-12 Maret 2022")
# bot.infinity_polling()
# time.sleep(2)
|
#!/usr/bin/python3
# encoding: utf-8
### python script to interact with Complice
### giovanni, Saturday, April 10, 2021, 2:11 PM
### March 2022 updated to Python 3
import sys
from config import POMOLENGTH, TIMERLENGTH, TOKEN
import complice_post
myInput = sys.argv[1]
if myInput == "startTimer":
complice_post.start_hourglass()
if myInput == "startPomo":
complice_post.start_pomo()
if myInput == "Timer30":
complice_post.start_custom_hourglass30()
if myInput == "Timer60":
complice_post.start_custom_hourglass60()
if myInput == "runningTimerPause":
complice_post.pause_timer()
if myInput == "runningTimerCancel":
complice_post.cancel_timer()
if myInput == "pausedTimerCancel":
complice_post.cancel_timer()
if myInput == "restartTimer":
complice_post.restart_hourglass()
if myInput == "runningPomo":
complice_post.cancel_timer()
if myInput == "breaking":
complice_post.cancel_timer()
|
import unittest
from troposphere import Tags, Template
from troposphere.s3 import Filter, Rules, S3Key
from troposphere.serverless import (
Api, DeadLetterQueue, DeploymentPreference, Function, FunctionForPackaging,
LayerVersion, S3Event, S3Location, SimpleTable,
)
class TestServerless(unittest.TestCase):
def test_exactly_one_code(self):
serverless_func = Function(
"SomeHandler",
Handler="index.handler",
Runtime="nodejs",
CodeUri=S3Location(
Bucket="mybucket",
Key="mykey",
),
InlineCode="",
)
t = Template()
t.add_resource(serverless_func)
with self.assertRaises(ValueError):
t.to_json()
def test_s3_location(self):
serverless_func = Function(
"SomeHandler",
Handler="index.handler",
Runtime="nodejs",
CodeUri=S3Location(
Bucket="mybucket",
Key="mykey",
)
)
t = Template()
t.add_resource(serverless_func)
t.to_json()
def test_tags(self):
serverless_func = Function(
"SomeHandler",
Handler="index.handler",
Runtime="nodejs",
CodeUri="s3://bucket/handler.zip",
Tags=Tags({
'Tag1': 'TagValue1',
'Tag2': 'TagValue2'
})
)
t = Template()
t.add_resource(serverless_func)
t.to_json()
def test_DLQ(self):
serverless_func = Function(
"SomeHandler",
Handler="index.handler",
Runtime="nodejs",
CodeUri="s3://bucket/handler.zip",
DeadLetterQueue=DeadLetterQueue(
Type='SNS',
TargetArn='arn:aws:sns:us-east-1:000000000000:SampleTopic'
)
)
t = Template()
t.add_resource(serverless_func)
t.to_json()
def test_required_function(self):
serverless_func = Function(
"SomeHandler",
Handler="index.handler",
Runtime="nodejs",
CodeUri="s3://bucket/handler.zip"
)
t = Template()
t.add_resource(serverless_func)
t.to_json()
def test_optional_auto_publish_alias(self):
serverless_func = Function(
"SomeHandler",
Handler="index.handler",
Runtime="nodejs",
CodeUri="s3://bucket/handler.zip",
AutoPublishAlias="alias"
)
t = Template()
t.add_resource(serverless_func)
t.to_json()
def test_optional_deployment_preference(self):
serverless_func = Function(
"SomeHandler",
Handler="index.handler",
Runtime="nodejs",
CodeUri="s3://bucket/handler.zip",
AutoPublishAlias="alias",
DeploymentPreference=DeploymentPreference(
Type="AllAtOnce"
)
)
t = Template()
t.add_resource(serverless_func)
t.to_json()
def test_required_api_definitionuri(self):
serverless_api = Api(
"SomeApi",
StageName='test',
DefinitionUri='s3://bucket/swagger.yml',
)
t = Template()
t.add_resource(serverless_api)
t.to_json()
swagger = {
"swagger": "2.0",
"info": {
"title": "swagger test",
},
"paths": {
"/test": {
"get": {
},
},
},
}
def test_required_api_both(self):
serverless_api = Api(
"SomeApi",
StageName='test',
DefinitionUri='s3://bucket/swagger.yml',
DefinitionBody=self.swagger,
)
t = Template()
t.add_resource(serverless_api)
with self.assertRaises(ValueError):
t.to_json()
def test_required_api_definitionbody(self):
serverless_api = Api(
"SomeApi",
StageName='test',
DefinitionBody=self.swagger,
)
t = Template()
t.add_resource(serverless_api)
t.to_json()
def test_api_no_definition(self):
serverless_api = Api(
"SomeApi",
StageName='test',
)
t = Template()
t.add_resource(serverless_api)
t.to_json()
def test_simple_table(self):
serverless_table = SimpleTable(
"SomeTable"
)
t = Template()
t.add_resource(serverless_table)
t.to_json()
def test_layer_version(self):
layer_version = LayerVersion(
"SomeLayer",
ContentUri="someuri",
)
t = Template()
t.add_resource(layer_version)
t.to_json()
layer_version = LayerVersion(
"SomeLayer",
)
t = Template()
t.add_resource(layer_version)
with self.assertRaises(ValueError):
t.to_json()
def test_s3_filter(self):
t = Template()
t.add_resource(
Function(
"ProcessorFunction",
Handler='process_file.handler',
CodeUri='.',
Runtime='python3.6',
Policies='AmazonS3FullAccess',
Events={
'FileUpload': S3Event(
'FileUpload',
Bucket="bucket",
Events=['s3:ObjectCreated:*'],
Filter=Filter(S3Key=S3Key(
Rules=[
Rules(Name="prefix", Value="upload/"),
Rules(Name="suffix", Value=".txt"),
],
))
)
}
)
)
t.to_json()
def test_policy_document(self):
t = Template()
t.add_resource(
Function(
"ProcessorFunction",
Handler='process_file.handler',
CodeUri='.',
Runtime='python3.6',
Policies="AmazonS3ReadOnly"
)
)
t.to_json()
t = Template()
t.add_resource(
Function(
"ProcessorFunction",
Handler='process_file.handler',
CodeUri='.',
Runtime='python3.6',
Policies=["AmazonS3FullAccess", "AmazonDynamoDBFullAccess"]
)
)
t.to_json()
t = Template()
t.add_resource(
Function(
"ProcessorFunction",
Handler='process_file.handler',
CodeUri='.',
Runtime='python3.6',
Policies={
"Statement": [{
"Effect": "Allow",
"Action": ["s3:GetObject", "s3:PutObject"],
"Resource": ["arn:aws:s3:::bucket/*"],
}]
},
)
)
t.to_json()
def test_packaging(self):
# test for no CodeUri or InlineCode
t = Template()
t.add_resource(
FunctionForPackaging(
"ProcessorFunction",
Handler='process_file.handler',
Runtime='python3.6',
Policies={
"Statement": [{
"Effect": "Allow",
"Action": ["s3:GetObject", "s3:PutObject"],
"Resource": ["arn:aws:s3:::bucket/*"],
}]
},
)
)
t.to_json()
if __name__ == '__main__':
unittest.main()
|
import sys
import csv
import time
import calendar
import matplotlib.pyplot as plt
from collections import OrderedDict
from datetime import date, timedelta as td
if __name__ == '__main__':
if len(sys.argv) < 2:
print 'Please insert processed twitdata.csv'
exit()
twitReader = csv.DictReader(open(sys.argv[1], 'rb'), delimiter = ',', fieldnames=['uid','lat','lon','province','province_abbr','province_abbr_index','epoch','date','time'])
hourFreq = [0 for x in range(24)]
hourFreqDays = [[0 for x in range(24)] for y in range(7)]
dayFreq = [0 for x in range(7)]
everyDayFreq = {}
twitReader.next()
for twit in twitReader:
tTime = time.strptime(twit['time'],'%H:%M:%S')
tDay = time.strptime(twit['date'], '%Y-%m-%d')
tWeekDay = calendar.weekday(tDay.tm_year, tDay.tm_mon, tDay.tm_mday)
hourFreq[tTime.tm_hour] += 1
hourFreqDays[tWeekDay][tTime.tm_hour] += 1
dayFreq[tWeekDay] += 1
tDayTuple = str(tDay.tm_year)+'-'+str(tDay.tm_mon)+'-'+str(tDay.tm_mday)
if tDayTuple not in everyDayFreq:
everyDayFreq[tDayTuple] = 1
else:
everyDayFreq[tDayTuple] += 1
everyDayFreq = OrderedDict(sorted(everyDayFreq.items()))
dStart = time.strptime(everyDayFreq.keys()[0], '%Y-%m-%d')
dStart = date(dStart.tm_year, dStart.tm_mon, dStart.tm_mday)
dEnd = time.strptime(everyDayFreq.keys()[-1], '%Y-%m-%d')
dEnd = date(dEnd.tm_year, dEnd.tm_mon, dEnd.tm_mday)
delta = dEnd - dStart
fullEveryDayFreq = OrderedDict()
weekDayCount = OrderedDict()
for i in range(7):
weekDayCount[i] = []
for i in range(delta.days+1):
tempDay = time.strptime(str(dStart + td(days=i)), '%Y-%m-%d')
dayStr = str(tempDay.tm_year)+'-'+str(tempDay.tm_mon)+'-'+str(tempDay.tm_mday)
fullEveryDayFreq[dayStr] = 0
weekDay = calendar.weekday(tempDay.tm_year, tempDay.tm_mon, tempDay.tm_mday)
if dayStr not in weekDayCount[weekDay]:
weekDayCount[weekDay].append(dayStr)
for day in everyDayFreq.items():
fullEveryDayFreq[day[0]] = day[1]
for i in range(7):
try:
dayFreq[i] /= len(weekDayCount[i])
except:
pass
# Plot tweet freq by day hour
weekDayNames = ['Mon','Tue','Wed','Thu','Fri','Sat','Sun']
# Seperate by day
for f1 in range(len(hourFreqDays)):
for f2 in range(len(hourFreqDays[f1])):
try:
hourFreqDays[f1][f2] /= len(weekDayCount[f1])
except:
pass
plt.plot(hourFreqDays[f1], label = weekDayNames[f1])
plt.legend(loc='best')
plt.xticks(range(24))
plt.xlabel('Hour during the day')
plt.ylabel('Mean number of tweet')
plt.title('Tweet frequency during the day')
plt.show()
# Plot tweet freq by day in week
plt.plot(dayFreq, 'b-', dayFreq, 'ro')
plt.xticks(range(len(weekDayNames)), weekDayNames)
plt.xlabel('Day')
plt.ylabel('Mean number of tweet')
plt.title('Tweet frequency by day in week')
plt.show()
# Plot tweet freq in every day of collected data
plt.plot(fullEveryDayFreq.values(), 'b-', fullEveryDayFreq.values(), 'ro')
plt.xticks(range(len(fullEveryDayFreq.values())), fullEveryDayFreq.keys(), rotation = 'vertical')
plt.xlabel('Date')
plt.ylabel('Number of tweet')
plt.title('Tweet frequency of overall collected data')
plt.show()
|
from django.shortcuts import render
from django.http import JsonResponse
from django.http import HttpResponse, HttpResponseNotFound
from django.contrib.auth.decorators import login_required
from floweditor.models import B1if
from easywebdavbiffy import *
import xmltodict
import StringIO
from zipfile import ZipFile
from xml.dom.minidom import parseString
# Renders work area, hands over list of b1if servers
@login_required
def index(request):
account_id = request.user.biffyuser.account.id
b1if_servers = B1if.objects.filter(account_id=account_id).order_by('name')
context = {
'b1if_servers': b1if_servers
}
return render(request, 'floweditor/workarea.html', context)
# Gets a list of scenarios - .vPac is the content for un-assigned flows
@login_required
def getScenarios(request):
b1if_server = B1if.objects.get(id=request.POST['server'])
if(b1if_server.account != request.user.biffyuser.account):
return HttpResponseNotFound();
#b1if_server = B1if.objects.get(id=1)
webdav = ewdconnect(b1if_server.server, port=b1if_server.port, username=b1if_server.user, password=b1if_server.password)
#print b1if_server.server+":"+b1if_server.port
folders = webdav.ls(b1if_server.path)
scenarios = []
for f in folders:
fname = f.name.rsplit('/')[-1]
# Folders starting with vPac. are scenarios, don't include SAP generated scenarios
if 'vPac.' in fname and not 'vPac.sap.' in fname:
scenarios.append(fname)
return JsonResponse({'scenarios':scenarios,'path':b1if_server.path})
# JSON Returns a list of flows for a scenario - read from the vBIU list in the scenario vPac file
@login_required
def getScenarioFlows(request):
b1if_server = B1if.objects.get(id=request.POST['server'])
if(b1if_server.account != request.user.biffyuser.account):
return HttpResponseNotFound();
webdav = ewdconnect(b1if_server.server, port=b1if_server.port, username=b1if_server.user, password=b1if_server.password)
path = b1if_server.path+request.POST['scenario']+'/vPac.xml'
virtual_file = StringIO.StringIO()
webdav.download(path,virtual_file)
file_contents = virtual_file.getvalue()
flows = []
doc = xmltodict.parse(file_contents)
#print doc['vPac']['vBIUList']
for vbiu in doc['vPac']['vBIUList']['vBIU']:
flows.append(vbiu['@Id'])
return JsonResponse({'flows':flows,'path':b1if_server.path,})
# JSON Returns a list of files for a scenario flow
@login_required
def getFlowFiles(request):
b1if_server = B1if.objects.get(id=request.POST['server'])
if(b1if_server.account != request.user.biffyuser.account):
return HttpResponseNotFound();
webdav = ewdconnect(b1if_server.server, port=b1if_server.port, username=b1if_server.user, password=b1if_server.password)
path = b1if_server.path+'vBIU.'+request.POST['flow']
folders = webdav.ls(path)
files = []
for f in folders:
fname = f.name.rsplit('/')[-1]
files.append(fname)
return JsonResponse({'files':files,'path':path})
# JSON Returns a files content
@login_required
def getFlowFileContent(request):
b1if_server = B1if.objects.get(id=request.POST['server'])
if(b1if_server.account != request.user.biffyuser.account):
return HttpResponseNotFound();
webdav = ewdconnect(b1if_server.server, port=b1if_server.port, username=b1if_server.user, password=b1if_server.password)
path = b1if_server.path+'vBIU.'+request.POST['flow']+'/'+request.POST['file']
virtual_file = StringIO.StringIO()
webdav.download(path,virtual_file)
return JsonResponse({'file_content':virtual_file.getvalue(),'path':path})
# JSON Saves a files content - returns True/False
# Writes the new file to .floweditor.xml (pro tip, the webdav server will Base64 encode
# your file if it doesn't end in .xml or .xsl)
# Will bails if the upload fails instead of overwriting the old file
# with a blank new file (severely painful past experience here)
# Deletes the old file and moves the new file to the old name
# Deletes old move files first
@login_required
def saveFlowFileContent(request):
b1if_server = B1if.objects.get(id=request.POST['server'])
if(b1if_server.account != request.user.biffyuser.account):
return HttpResponseNotFound();
webdav = ewdconnect(b1if_server.server, port=b1if_server.port, username=b1if_server.user, password=b1if_server.password)
path = b1if_server.path+'vBIU.'+request.POST['flow']+'/'+request.POST['file']
temp_path = b1if_server.path+'vBIU.'+request.POST['flow']+'/floweditor.'+request.POST['file']
new_file_content = request.POST['file_content']
if webdav.exists(temp_path)==True:
webdav.delete(temp_path)
virtual_file = StringIO.StringIO()
virtual_file.write(new_file_content)
webdav.upload(virtual_file,temp_path)
response = False
if webdav.exists(temp_path)==True:
webdav.delete(path)
webdav.move(temp_path,path)
response = True
return JsonResponse({'success':response})
@login_required
def downloadScenarioZip(request):
b1if_server = B1if.objects.get(id=request.POST['server'])
if(b1if_server.account != request.user.biffyuser.account):
return HttpResponseNotFound();
scenario = request.POST['scenario']
webdav = ewdconnect(b1if_server.server, port=b1if_server.port, username=b1if_server.user, password=b1if_server.password)
path = b1if_server.path+scenario
files = webdav.ls(path)
zipOutputFile = StringIO.StringIO()
zipFile = ZipFile(zipOutputFile, 'w')
#zipFile.writestr('/b1ifident.xml', 'test')
zipFile.writestr('/b1ifident.xml', '<?xml version="1.0" encoding="UTF-8"?><b1ifident xmlns:bfa="urn:com.sap.b1i.bizprocessor:bizatoms"><id>'+str(scenario.replace('vPac.',''))+'</id><type>vPac</type><ver>1.0.0</ver></b1ifident>')
for f in files:
virtual_file = StringIO.StringIO()
webdav.download(f.name,virtual_file)
zipFile.writestr(f.name.replace('/B1iXcellerator/exec/webdav',''), virtual_file.getvalue())
path = b1if_server.path+scenario+'/vPac.xml'
virtual_file = StringIO.StringIO()
webdav.download(path,virtual_file)
file_contents = virtual_file.getvalue()
doc = xmltodict.parse(file_contents)
#print doc['vPac']['vBIUList']
for vbiu in doc['vPac']['vBIUList']['vBIU']:
flow = vbiu['@Id']
path = b1if_server.path+'vBIU.'+flow
folders = webdav.ls(path)
for f in folders:
virtual_file = StringIO.StringIO()
webdav.download(f.name,virtual_file)
zipFile.writestr(f.name.replace('/B1iXcellerator/exec/webdav',''), virtual_file.getvalue())
zipFile.close()
zipOutputFile.seek(0)
#response = HttpResponse(zipOutputFile.read())
response = HttpResponse(zipOutputFile.getvalue())
response['Content-Disposition'] = 'attachment; filename=%s.zip' %(scenario)
response['Content-Type'] = 'application/x-zip'
return response
@login_required
def formatXML(request):
input_xml = request.POST['xml']
error = []
#xmlDom = xml.dom.minidom.parseString(input_xml)
formatted_xml = '\n'.join([line for line in parseString(input_xml).toprettyxml(indent=' '*2).split('\n') if line.strip()])
#formatted_xml = lambda formatted_xml: '\n'.join([line for line in parseString(formatted_xml).toprettyxml(indent=' '*2).split('\n') if line.strip()])
return JsonResponse({'formatted_xml':formatted_xml,'error':error})
|
# -*- coding: utf-8 -*-
# Copyright 2011 Google Inc. All Rights Reserved.
# Copyright 2011, Nexenta Systems Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Implementation of Unix-like cat command for cloud storage providers."""
from __future__ import absolute_import
import re
from gslib.cat_helper import CatHelper
from gslib.command import Command
from gslib.command_argument import CommandArgument
from gslib.cs_api_map import ApiSelector
from gslib.exception import CommandException
from gslib.util import NO_MAX
_SYNOPSIS = """
gsutil cat [-h] url...
"""
_DETAILED_HELP_TEXT = ("""
<B>SYNOPSIS</B>
""" + _SYNOPSIS + """
<B>DESCRIPTION</B>
The cat command outputs the contents of one or more URLs to stdout.
It is equivalent to doing:
gsutil cp url... -
(The final '-' causes gsutil to stream the output to stdout.)
<B>WARNING: DATA INTEGRITY CHECKING NOT DONE</B>
The gsutil cat command does not compute a checksum of the downloaded data.
Therefore, we recommend that users either perform their own validation of the
output of gsutil cat or use gsutil cp or rsync (both of which perform
integrity checking automatically).
<B>OPTIONS</B>
-h Prints short header for each object. For example:
gsutil cat -h gs://bucket/meeting_notes/2012_Feb/*.txt
This would print a header with the object name before the contents
of each text object that matched the wildcard.
-r range Causes gsutil to output just the specified byte range of the
object. Ranges are can be of these forms:
start-end (e.g., -r 256-5939)
start- (e.g., -r 256-)
-numbytes (e.g., -r -5)
where offsets start at 0, start-end means to return bytes start
through end (inclusive), start- means to return bytes start
through the end of the object, and -numbytes means to return the
last numbytes of the object. For example:
gsutil cat -r 256-939 gs://bucket/object
returns bytes 256 through 939, while:
gsutil cat -r -5 gs://bucket/object
returns the final 5 bytes of the object.
""")
class CatCommand(Command):
"""Implementation of gsutil cat command."""
# Command specification. See base class for documentation.
command_spec = Command.CreateCommandSpec(
'cat',
command_name_aliases=[],
usage_synopsis=_SYNOPSIS,
min_args=1,
max_args=NO_MAX,
supported_sub_args='hr:',
file_url_ok=False,
provider_url_ok=False,
urls_start_arg=0,
gs_api_support=[ApiSelector.XML, ApiSelector.JSON],
gs_default_api=ApiSelector.JSON,
argparse_arguments=[
CommandArgument.MakeZeroOrMoreCloudURLsArgument()
]
)
# Help specification. See help_provider.py for documentation.
help_spec = Command.HelpSpec(
help_name='cat',
help_name_aliases=[],
help_type='command_help',
help_one_line_summary='Concatenate object content to stdout',
help_text=_DETAILED_HELP_TEXT,
subcommand_help_text={},
)
# Command entry point.
def RunCommand(self):
"""Command entry point for the cat command."""
show_header = False
request_range = None
start_byte = 0
end_byte = None
if self.sub_opts:
for o, a in self.sub_opts:
if o == '-h':
show_header = True
elif o == '-r':
request_range = a.strip()
range_matcher = re.compile(
'^(?P<start>[0-9]+)-(?P<end>[0-9]*)$|^(?P<endslice>-[0-9]+)$')
range_match = range_matcher.match(request_range)
if not range_match:
raise CommandException('Invalid range (%s)' % request_range)
if range_match.group('start'):
start_byte = long(range_match.group('start'))
if range_match.group('end'):
end_byte = long(range_match.group('end'))
if range_match.group('endslice'):
start_byte = long(range_match.group('endslice'))
else:
self.RaiseInvalidArgumentException()
return CatHelper(self).CatUrlStrings(self.args,
show_header=show_header,
start_byte=start_byte,
end_byte=end_byte)
|
from math import sqrt
from math import pi
import json
import tf
from geometry_msgs.msg import Quaternion
def dynamic_euclid_dist(a, b):
o = 0
for i in range(len(a)):
o += (a[i]-b[i])**2
return sqrt(o)
def quaternion_from_euler(roll, pitch, yaw):
'''
From HSR's utils.py
'''
q = tf.transformations.quaternion_from_euler(roll / 180.0 * pi,
pitch / 180.0 * pi,
yaw / 180.0 * pi, 'rxyz')
return Quaternion(q[0], q[1], q[2], q[3])
def euler_from_quaternion(q):
q = tf.transformations.euler_from_quaternion([q.x, q.y, q.z, q.w], 'rxyz')
return (q[0]/pi * 180, q[1]/pi * 180, q[2]/pi * 180)
def str_to_obj(string):
"""
Converts JSON string to data structure
Args:
string (str): valid JSON string
Raises:
ValueError: if input isnt a valid JSON string
Returns:
Data structure: [description]
"""
try:
return json.loads(string)
except ValueError as e:
raise ValueError("ValueError occured when loading JSON string: {}, the input was: {}".format(e, string))
def obj_to_str(obj):
return json.dumps(obj)
|
import re
from anchore_engine.apis.authorization import (
ActionBoundPermission,
Permission,
RequestingAccountValue,
get_authorizer,
)
from anchore_engine.apis.context import ApiRequestContextProxy
from anchore_engine.apis.exceptions import BadRequest
from anchore_engine.clients.services import internal_client_for
from anchore_engine.clients.services.catalog import CatalogClient
from anchore_engine.common.helpers import make_response_error
from anchore_engine.configuration.localconfig import (
ADMIN_ACCOUNT_NAME,
GLOBAL_RESOURCE_DOMAIN,
)
authorizer = get_authorizer()
digest_regex = re.compile("^sha256:[abcdef0-9]+$")
def handle_proxy_response(resp):
if issubclass(Exception, resp.__class__):
if hasattr(resp, "httpcode"):
return make_response_error(resp, in_httpcode=resp.httpcode), resp.httpcode
else:
return make_response_error(resp, in_httpcode=500), 500
else:
return resp, 200
@authorizer.requires([ActionBoundPermission(domain=RequestingAccountValue())])
def list_archives():
"""
GET /archives
:return: JSON object for archive summary
"""
client = internal_client_for(CatalogClient, ApiRequestContextProxy.namespace())
try:
return handle_proxy_response(client.list_archives())
except Exception as ex:
return handle_proxy_response(ex)
@authorizer.requires([ActionBoundPermission(domain=RequestingAccountValue())])
def list_analysis_archive_rules(system_global=True):
"""
GET /archives/rules
:return:
"""
client = internal_client_for(CatalogClient, ApiRequestContextProxy.namespace())
try:
return handle_proxy_response(
client.list_analysis_archive_rules(system_global=system_global)
)
except Exception as ex:
return handle_proxy_response(ex)
@authorizer.requires([ActionBoundPermission(domain=RequestingAccountValue())])
def create_analysis_archive_rule(rule):
"""
POST /archives/rules
:param rule: the rule's json object definition
:return:
"""
# Permission check on the system_global field, only admins
if rule.get("system_global"):
perm = Permission(GLOBAL_RESOURCE_DOMAIN, "createArchiveTransitionRule", "*")
# Will raise exception if unauthorized
authorizer.authorize(ApiRequestContextProxy.identity(), [perm])
# Validation for max_images_per_account
if (
not rule.get("system_global")
and rule.get("max_images_per_account", None) is not None
):
raise BadRequest(
"Cannot set max_images_per_account on a rule that isn't system_global", {}
)
client = internal_client_for(CatalogClient, ApiRequestContextProxy.namespace())
try:
return handle_proxy_response(client.add_analysis_archive_rule(rule))
except Exception as ex:
return handle_proxy_response(ex)
@authorizer.requires([ActionBoundPermission(domain=RequestingAccountValue())])
def delete_analysis_archive_rule(ruleId):
"""
DELETE /archives/rules/{ruleId}
:param ruleId:
:return:
"""
client = internal_client_for(CatalogClient, ApiRequestContextProxy.namespace())
try:
resp1 = handle_proxy_response(client.delete_analysis_archive_rule(ruleId))
if resp1[1] == 404 and ApiRequestContextProxy.namespace() != ADMIN_ACCOUNT_NAME:
# Yes, this is a bit ugly
# Get the rule, check if a global rule and adjust error code appropriately
try:
c2 = internal_client_for(CatalogClient, ADMIN_ACCOUNT_NAME)
r2 = c2.get_analysis_archive_rule(ruleId)
if r2 and r2.get("system_global", False):
return (
make_response_error(
"Non-admins cannot modify/delete system global rules",
in_httpcode=403,
),
403,
)
except Exception as ex:
pass
return resp1
except Exception as ex:
return handle_proxy_response(ex)
@authorizer.requires([ActionBoundPermission(domain=RequestingAccountValue())])
def get_analysis_archive_rule(ruleId):
"""
GET /archives/rules/{ruleId}
:param ruleId:
:return:
"""
client = internal_client_for(CatalogClient, ApiRequestContextProxy.namespace())
try:
resp1 = handle_proxy_response(client.get_analysis_archive_rule(ruleId))
if resp1[1] == 404 and ApiRequestContextProxy.namespace() != ADMIN_ACCOUNT_NAME:
# Yes, this is a bit ugly
# Get the rule, check if a global rule
try:
c2 = internal_client_for(CatalogClient, ADMIN_ACCOUNT_NAME)
r2 = handle_proxy_response(c2.get_analysis_archive_rule(ruleId))
if r2 and r2[1] == 200 and r2[0].get("system_global", False):
# Allow it
return handle_proxy_response(r2)
except Exception as ex:
pass
return resp1
except Exception as ex:
return handle_proxy_response(ex)
# @authorizer.requires([ActionBoundPermission(domain=RequestingAccountValue())])
# def get_analysis_archive_rule_history(ruleId):
# """
#
# GET /archives/rules/{ruleId}/history
#
# :param ruleId:
# :return: list of events for the rule
# """
# client = internal_client_for(CatalogClient, ApiRequestContextProxy.namespace())
# try:
# resp1 = handle_proxy_response(client.get_analysis_archive_rule_history(ruleId))
# if resp1[1] == 404 and ApiRequestContextProxy.namespace() != ADMIN_ACCOUNT_NAME:
# # Yes, this is a bit ugly
# # Get the rule, check if a global rule and adjust error code appropriately
# try:
# c2 = internal_client_for(CatalogClient, ADMIN_ACCOUNT_NAME)
# r2 = handle_proxy_response(c2.get_analysis_archive_rule(ruleId))
# if r2 and r2[1] == 200 and r2[0].get('system_global', False):
# return make_response_error('Non-admins cannot modify/delete system global rules', in_httpcode=403), 403
# except Exception as ex:
# pass
# return resp1
# except Exception as ex:
# return handle_proxy_response(ex)
@authorizer.requires([ActionBoundPermission(domain=RequestingAccountValue())])
def list_analysis_archive():
"""
GET /archives/images
:return: array of archivedimage json objects
"""
client = internal_client_for(CatalogClient, ApiRequestContextProxy.namespace())
try:
return handle_proxy_response(client.list_archived_analyses())
except Exception as ex:
return handle_proxy_response(ex)
@authorizer.requires([ActionBoundPermission(domain=RequestingAccountValue())])
def archive_image_analysis(imageReferences):
"""
POST /archives/images
:param imageReferences: list of json object that reference images to archive
:return:
"""
client = internal_client_for(CatalogClient, ApiRequestContextProxy.namespace())
try:
return handle_proxy_response(client.archive_analyses(imageReferences))
except Exception as ex:
return handle_proxy_response(ex)
@authorizer.requires([ActionBoundPermission(domain=RequestingAccountValue())])
def get_archived_analysis(imageDigest):
"""
GET /archives/images/{imageDigest}
:param imageDigest:
:return:
"""
client = internal_client_for(CatalogClient, ApiRequestContextProxy.namespace())
try:
return handle_proxy_response(client.get_archived_analysis(imageDigest))
except Exception as ex:
return handle_proxy_response(ex)
@authorizer.requires([ActionBoundPermission(domain=RequestingAccountValue())])
def delete_archived_analysis(imageDigest):
"""
DELETE /archives/images/{imageDigest}
:param imageDigest:
:return:
"""
client = internal_client_for(CatalogClient, ApiRequestContextProxy.namespace())
try:
return handle_proxy_response(client.delete_archived_analysis(imageDigest))
except Exception as e:
return handle_proxy_response(e)
|
import io
import re
import sys
from setuptools import setup
if sys.version_info < (3, 5):
sys.exit('Sorry, Python < 3.5 is not supported.')
with io.open('README.rst', 'rt', encoding='utf8') as f:
readme = f.read()
with io.open('pygclip/__init__.py', 'rt', encoding='utf8') as f:
version = re.search(r'__version__ = \'(.*?)\'', f.read()).group(1)
setup(
name='pygclip',
version=version,
license='MIT',
author='Bryan Lee McKelvey',
author_email='bryan.mckelvey@gmail.com',
url='https://github.com/brymck/pygclip',
description='Pygmentize to clipboard for macOS.',
long_description=readme,
classifiers=[
'License :: OSI Approved :: MIT License',
'Programming Language :: Python',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.5',
'Programming Language :: Python :: 3.6',
],
packages=['pygclip'],
include_package_data=True,
zip_safe=False,
install_requires=[
'pygments',
],
setup_requires=[
'pytest-runner',
],
tests_require=[
'pytest',
],
extras_require={
'dev': [
'coverage',
'flake8',
'flake8-mypy',
'pytest>=3',
'tox',
],
},
entry_points={
'console_scripts': [
'pygclip = pygclip.cli:main',
],
},
)
|
from setuptools import setup
setup(
name='unsolve',
version='0.1',
py_modules=['unsolve'],
install_requires=[
'Click',
],
entry_points='''
[console_scripts]
unsolve=unsolve:cli
''',
)
|
#!/usr/bin/env python3
import sys
import re
import configparser
def app(specs, spec):
if not specs:
return spec
else:
delimiter = "\n\n"
return delimiter.join((specs, spec))
# TODO: for Python 3.5 or higher: z = {**x, **y}
def merge_two_dicts(x, y):
z = dict(x)
z.update(y)
return z
def subst(text, key, val):
return text.replace('{' + key.upper() + '}', val)
def safe_get(config, section):
if section in config:
return config[section]
else:
return {}
def inherit_get(config, section):
if not section:
return safe_get(config, 'root')
else:
parent = inherit_get(config, '-'.join(section.split('-')[:-1]))
current = safe_get(config, section)
merged = merge_two_dicts(parent, current) # TODO: for Python 3.5 or higher: {**parent, **current}
for key in list(merged.keys()):
if key.startswith('+'):
merged[key[1:]] += merged[key]
del merged[key]
return merged
def gen(spec_template, rule_template, spec_ini, spec_name, rule_name_list):
spec_config = configparser.ConfigParser(comment_prefixes=(';'))
spec_config.read(spec_ini)
if 'pgm' not in spec_config:
print('''Must specify a "pgm" section in the .ini file.''')
sys.exit(1)
pgm_config = spec_config['pgm']
rule_spec_list = []
for name in rule_name_list:
rule_spec = rule_template
for config in [ inherit_get(spec_config, name)
, pgm_config
]:
for key in config:
rule_spec = subst(rule_spec, key, config[key].strip())
rule_spec = subst(rule_spec, "rulename", name)
rule_spec_list.append(rule_spec)
delimeter = "\n"
rules = delimeter.join(rule_spec_list)
genspec = subst(spec_template, 'module', spec_name.upper())
genspec = subst(genspec, 'rules', rules)
print(genspec)
# genspec = template
# for config in [ inherit_get(spec_config, name)
# , {'module': name.upper()}
# , pgm_config['DEFAULT']
# ]:
# for key in config:
# genspec = subst(genspec, key, config[key].strip())
# print(genspec)
if __name__ == '__main__':
if len(sys.argv) < 6:
print("usage: <cmd> <spec-template> <rule-template> <spec_ini> <spec_name> <rule_name_list>")
sys.exit(1)
spec_template = open(sys.argv[1], "r").read()
rule_template = open(sys.argv[2], "r").read()
gen(spec_template, rule_template, sys.argv[3], sys.argv[4], sys.argv[5:])
|
import logging
import mist.api.users.models
import mist.api.auth.models
import mist.api.tag.models
log = logging.getLogger(__name__)
class AuthContext(object):
def __init__(self, user, token):
assert isinstance(user, mist.api.users.models.User)
self.user = user
assert isinstance(token, mist.api.auth.models.AuthToken)
self.token = token
assert (
hasattr(token, 'org') and
isinstance(token.org, mist.api.users.models.Organization)
)
self.org = token.org
# For backwards compatibility.
self.owner = self.org
def is_owner(self):
return True
def _raise(self, rtype, action, rid='', rtags=''):
pass
def check_perm(self, rtype, action, rid):
return {}, {}
def get_security_tags(self):
return []
def get_allowed_resources(self, action='read', rtype=None):
return {}
def _get_matching_tags(self, rtype, action):
return {}
def _get_matching_constraints(self, rtype, action):
return {}
def serialize(self):
"""This returns the basic context info in a dict of strings and can
safely be passed to dramatiq tasks etc. To recreate the context, just
feed it to AuthContext.deserialize"""
return {
'user_id': self.user.id,
'token_id': str(self.token.id) if self.token is not None else None,
}
@staticmethod
def deserialize(serialized):
if not isinstance(serialized, dict):
raise TypeError("Expected serialized AuthContext as dict, "
"got %r instead." % serialized)
user_id = serialized.get('user_id')
token_id = serialized.get('token_id')
user = mist.api.users.models.User.objects.get(id=user_id)
if token_id:
token = mist.api.auth.models.AuthToken.objects.get(id=token_id)
else:
token = None
return AuthContext(user, token)
def filter_logs(auth_context, kwargs):
log.warning('Call to dummy.filter_logs')
return
|
import json
import base64
from tests import TestBase
class TestAuth(TestBase):
def setUp(self):
super(TestAuth, self).setUp()
self.data_user = {
'username': 'nicolas',
'password': 'password',
}
def test_auth(self):
self.app.post(
'/api/v1.0/users',
data=json.dumps(self.data_user),
headers={"content-type": "application/json"},
)
response = self.app.get(
'/api/v1.0/token',
data=json.dumps(self.data_user),
headers={
'content-type': 'application/json',
'Authorization':'Basic {}'.format(
base64.b64encode(
'{username}:{password}'.format(
username=self.data_user['username'],
password=self.data_user['password'],
)
)
)
},
)
self.assertEqual(
response.status_code,
200,
)
response_data = json.loads(response.data)
token = response_data['token']
response = self.app.get(
'/api/v1.0/token/check',
headers={
'content-type': 'application/json',
'Authorization':'Basic {}'.format(
base64.b64encode(
'{username}:noused'.format(username=token)
)
)
}
)
|
from django.contrib import auth
from django.conf import settings
from django.utils.deprecation import MiddlewareMixin
class FakeLoginMiddleware(MiddlewareMixin):
"""
Login using ?fakeuser=USERNAME as long as settings.DEBUG is true.
This is for DEVELOPMENT ONLY.
"""
def process_request(self, request):
if settings.DEBUG and 'fakeuser' in request.GET:
username = request.GET['fakeuser']
# If the user is already authenticated and that user is the user we are
# getting passed in the headers, then the correct user is already
# persisted in the session and we don't need to continue.
if request.user.is_authenticated:
if request.user.username == username:
return
# We are seeing this user for the first time in this session, attempt
# to authenticate the user.
user = auth.authenticate(username=username, password='test')
if user:
# User is valid. Set request.user and persist user in the session
# by logging the user in.
request.user = user
auth.login(request, user)
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*-
# vi: set ft=python sts=4 ts=4 sw=4 et:
# @Author: oesteban
# @Date: 2017-06-13 09:42:38
import os.path as op
import sys
def main():
from mriqc.__about__ import __version__
print(__version__)
if __name__ == '__main__':
sys.path.insert(0, op.abspath('.'))
main()
|
import claripy
import logging
from archinfo.arch_arm import is_arm_arch
l = logging.getLogger(name=__name__)
#l.setLevel(logging.DEBUG)
# pylint: disable=R0911
# pylint: disable=W0613
# pylint: disable=W0612
# pylint: disable=invalid-unary-operand-type
###############
### Helpers ###
###############
# There might be a better way of doing this
def calc_paritybit(state, p, msb=7, lsb=0):
if len(p) > msb:
p_part = p[msb:lsb]
else:
p_part = p
b = state.solver.BVV(1, 1)
for i in range(p_part.size()):
b = b ^ p_part[i]
return b
def calc_zerobit(state, p):
return state.solver.If(p == 0, state.solver.BVV(1, 1), state.solver.BVV(0, 1))
def boolean_extend(state, O, a, b, size):
return state.solver.If(O(a, b), state.solver.BVV(1, size), state.solver.BVV(0, size))
def flag_concretize(state, flag):
return state.solver.eval(flag)
#return state.solver.eval_one(flag)
##################
### x86* data ###
##################
data = {
'AMD64': {
'CondTypes': { },
'CondBitOffsets': { },
'CondBitMasks': { },
'OpTypes': { }
}, 'X86': {
'CondTypes': { },
'CondBitOffsets': { },
'CondBitMasks': { },
'OpTypes': { }
}
}
# condition types
data['AMD64']['CondTypes']['CondO'] = 0 # /* overflow */
data['AMD64']['CondTypes']['CondNO'] = 1 # /* no overflow */
data['AMD64']['CondTypes']['CondB'] = 2 # /* below */
data['AMD64']['CondTypes']['CondNB'] = 3 # /* not below */
data['AMD64']['CondTypes']['CondZ'] = 4 # /* zero */
data['AMD64']['CondTypes']['CondNZ'] = 5 # /* not zero */
data['AMD64']['CondTypes']['CondBE'] = 6 # /* below or equal */
data['AMD64']['CondTypes']['CondNBE'] = 7 # /* not below or equal */
data['AMD64']['CondTypes']['CondS'] = 8 # /* negative */
data['AMD64']['CondTypes']['CondNS'] = 9 # /* not negative */
data['AMD64']['CondTypes']['CondP'] = 10 # /* parity even */
data['AMD64']['CondTypes']['CondNP'] = 11 # /* not parity even */
data['AMD64']['CondTypes']['CondL'] = 12 # /* jump less */
data['AMD64']['CondTypes']['CondNL'] = 13 # /* not less */
data['AMD64']['CondTypes']['CondLE'] = 14 # /* less or equal */
data['AMD64']['CondTypes']['CondNLE'] = 15 # /* not less or equal */
# condition bit offsets
data['AMD64']['CondBitOffsets']['G_CC_SHIFT_O'] = 11
data['AMD64']['CondBitOffsets']['G_CC_SHIFT_S'] = 7
data['AMD64']['CondBitOffsets']['G_CC_SHIFT_Z'] = 6
data['AMD64']['CondBitOffsets']['G_CC_SHIFT_A'] = 4
data['AMD64']['CondBitOffsets']['G_CC_SHIFT_C'] = 0
data['AMD64']['CondBitOffsets']['G_CC_SHIFT_P'] = 2
# masks
data['AMD64']['CondBitMasks']['G_CC_MASK_O'] = (1 << data['AMD64']['CondBitOffsets']['G_CC_SHIFT_O'])
data['AMD64']['CondBitMasks']['G_CC_MASK_S'] = (1 << data['AMD64']['CondBitOffsets']['G_CC_SHIFT_S'])
data['AMD64']['CondBitMasks']['G_CC_MASK_Z'] = (1 << data['AMD64']['CondBitOffsets']['G_CC_SHIFT_Z'])
data['AMD64']['CondBitMasks']['G_CC_MASK_A'] = (1 << data['AMD64']['CondBitOffsets']['G_CC_SHIFT_A'])
data['AMD64']['CondBitMasks']['G_CC_MASK_C'] = (1 << data['AMD64']['CondBitOffsets']['G_CC_SHIFT_C'])
data['AMD64']['CondBitMasks']['G_CC_MASK_P'] = (1 << data['AMD64']['CondBitOffsets']['G_CC_SHIFT_P'])
# operation types
data['AMD64']['OpTypes']['G_CC_OP_COPY'] = 0
data['AMD64']['OpTypes']['G_CC_OP_ADDB'] = 1
data['AMD64']['OpTypes']['G_CC_OP_ADDW'] = 2
data['AMD64']['OpTypes']['G_CC_OP_ADDL'] = 3
data['AMD64']['OpTypes']['G_CC_OP_ADDQ'] = 4
data['AMD64']['OpTypes']['G_CC_OP_SUBB'] = 5
data['AMD64']['OpTypes']['G_CC_OP_SUBW'] = 6
data['AMD64']['OpTypes']['G_CC_OP_SUBL'] = 7
data['AMD64']['OpTypes']['G_CC_OP_SUBQ'] = 8
data['AMD64']['OpTypes']['G_CC_OP_ADCB'] = 9
data['AMD64']['OpTypes']['G_CC_OP_ADCW'] = 10
data['AMD64']['OpTypes']['G_CC_OP_ADCL'] = 11
data['AMD64']['OpTypes']['G_CC_OP_ADCQ'] = 12
data['AMD64']['OpTypes']['G_CC_OP_SBBB'] = 13
data['AMD64']['OpTypes']['G_CC_OP_SBBW'] = 14
data['AMD64']['OpTypes']['G_CC_OP_SBBL'] = 15
data['AMD64']['OpTypes']['G_CC_OP_SBBQ'] = 16
data['AMD64']['OpTypes']['G_CC_OP_LOGICB'] = 17
data['AMD64']['OpTypes']['G_CC_OP_LOGICW'] = 18
data['AMD64']['OpTypes']['G_CC_OP_LOGICL'] = 19
data['AMD64']['OpTypes']['G_CC_OP_LOGICQ'] = 20
data['AMD64']['OpTypes']['G_CC_OP_INCB'] = 21
data['AMD64']['OpTypes']['G_CC_OP_INCW'] = 22
data['AMD64']['OpTypes']['G_CC_OP_INCL'] = 23
data['AMD64']['OpTypes']['G_CC_OP_INCQ'] = 24
data['AMD64']['OpTypes']['G_CC_OP_DECB'] = 25
data['AMD64']['OpTypes']['G_CC_OP_DECW'] = 26
data['AMD64']['OpTypes']['G_CC_OP_DECL'] = 27
data['AMD64']['OpTypes']['G_CC_OP_DECQ'] = 28
data['AMD64']['OpTypes']['G_CC_OP_SHLB'] = 29
data['AMD64']['OpTypes']['G_CC_OP_SHLW'] = 30
data['AMD64']['OpTypes']['G_CC_OP_SHLL'] = 31
data['AMD64']['OpTypes']['G_CC_OP_SHLQ'] = 32
data['AMD64']['OpTypes']['G_CC_OP_SHRB'] = 33
data['AMD64']['OpTypes']['G_CC_OP_SHRW'] = 34
data['AMD64']['OpTypes']['G_CC_OP_SHRL'] = 35
data['AMD64']['OpTypes']['G_CC_OP_SHRQ'] = 36
data['AMD64']['OpTypes']['G_CC_OP_ROLB'] = 37
data['AMD64']['OpTypes']['G_CC_OP_ROLW'] = 38
data['AMD64']['OpTypes']['G_CC_OP_ROLL'] = 39
data['AMD64']['OpTypes']['G_CC_OP_ROLQ'] = 40
data['AMD64']['OpTypes']['G_CC_OP_RORB'] = 41
data['AMD64']['OpTypes']['G_CC_OP_RORW'] = 42
data['AMD64']['OpTypes']['G_CC_OP_RORL'] = 43
data['AMD64']['OpTypes']['G_CC_OP_RORQ'] = 44
data['AMD64']['OpTypes']['G_CC_OP_UMULB'] = 45
data['AMD64']['OpTypes']['G_CC_OP_UMULW'] = 46
data['AMD64']['OpTypes']['G_CC_OP_UMULL'] = 47
data['AMD64']['OpTypes']['G_CC_OP_UMULQ'] = 48
data['AMD64']['OpTypes']['G_CC_OP_SMULB'] = 49
data['AMD64']['OpTypes']['G_CC_OP_SMULW'] = 50
data['AMD64']['OpTypes']['G_CC_OP_SMULL'] = 51
data['AMD64']['OpTypes']['G_CC_OP_SMULQ'] = 52
data['AMD64']['OpTypes']['G_CC_OP_NUMBER'] = 53
data['X86']['CondTypes']['CondO'] = 0
data['X86']['CondTypes']['CondNO'] = 1
data['X86']['CondTypes']['CondB'] = 2
data['X86']['CondTypes']['CondNB'] = 3
data['X86']['CondTypes']['CondZ'] = 4
data['X86']['CondTypes']['CondNZ'] = 5
data['X86']['CondTypes']['CondBE'] = 6
data['X86']['CondTypes']['CondNBE'] = 7
data['X86']['CondTypes']['CondS'] = 8
data['X86']['CondTypes']['CondNS'] = 9
data['X86']['CondTypes']['CondP'] = 10
data['X86']['CondTypes']['CondNP'] = 11
data['X86']['CondTypes']['CondL'] = 12
data['X86']['CondTypes']['CondNL'] = 13
data['X86']['CondTypes']['CondLE'] = 14
data['X86']['CondTypes']['CondNLE'] = 15
data['X86']['CondTypes']['CondAlways'] = 16
data['X86']['CondBitOffsets']['G_CC_SHIFT_O'] = 11
data['X86']['CondBitOffsets']['G_CC_SHIFT_S'] = 7
data['X86']['CondBitOffsets']['G_CC_SHIFT_Z'] = 6
data['X86']['CondBitOffsets']['G_CC_SHIFT_A'] = 4
data['X86']['CondBitOffsets']['G_CC_SHIFT_C'] = 0
data['X86']['CondBitOffsets']['G_CC_SHIFT_P'] = 2
# masks
data['X86']['CondBitMasks']['G_CC_MASK_O'] = (1 << data['X86']['CondBitOffsets']['G_CC_SHIFT_O'])
data['X86']['CondBitMasks']['G_CC_MASK_S'] = (1 << data['X86']['CondBitOffsets']['G_CC_SHIFT_S'])
data['X86']['CondBitMasks']['G_CC_MASK_Z'] = (1 << data['X86']['CondBitOffsets']['G_CC_SHIFT_Z'])
data['X86']['CondBitMasks']['G_CC_MASK_A'] = (1 << data['X86']['CondBitOffsets']['G_CC_SHIFT_A'])
data['X86']['CondBitMasks']['G_CC_MASK_C'] = (1 << data['X86']['CondBitOffsets']['G_CC_SHIFT_C'])
data['X86']['CondBitMasks']['G_CC_MASK_P'] = (1 << data['X86']['CondBitOffsets']['G_CC_SHIFT_P'])
data['X86']['OpTypes']['G_CC_OP_COPY'] = 0
data['X86']['OpTypes']['G_CC_OP_ADDB'] = 1
data['X86']['OpTypes']['G_CC_OP_ADDW'] = 2
data['X86']['OpTypes']['G_CC_OP_ADDL'] = 3
data['X86']['OpTypes']['G_CC_OP_SUBB'] = 4
data['X86']['OpTypes']['G_CC_OP_SUBW'] = 5
data['X86']['OpTypes']['G_CC_OP_SUBL'] = 6
data['X86']['OpTypes']['G_CC_OP_ADCB'] = 7
data['X86']['OpTypes']['G_CC_OP_ADCW'] = 8
data['X86']['OpTypes']['G_CC_OP_ADCL'] = 9
data['X86']['OpTypes']['G_CC_OP_SBBB'] = 10
data['X86']['OpTypes']['G_CC_OP_SBBW'] = 11
data['X86']['OpTypes']['G_CC_OP_SBBL'] = 12
data['X86']['OpTypes']['G_CC_OP_LOGICB'] = 13
data['X86']['OpTypes']['G_CC_OP_LOGICW'] = 14
data['X86']['OpTypes']['G_CC_OP_LOGICL'] = 15
data['X86']['OpTypes']['G_CC_OP_INCB'] = 16
data['X86']['OpTypes']['G_CC_OP_INCW'] = 17
data['X86']['OpTypes']['G_CC_OP_INCL'] = 18
data['X86']['OpTypes']['G_CC_OP_DECB'] = 19
data['X86']['OpTypes']['G_CC_OP_DECW'] = 20
data['X86']['OpTypes']['G_CC_OP_DECL'] = 21
data['X86']['OpTypes']['G_CC_OP_SHLB'] = 22
data['X86']['OpTypes']['G_CC_OP_SHLW'] = 23
data['X86']['OpTypes']['G_CC_OP_SHLL'] = 24
data['X86']['OpTypes']['G_CC_OP_SHRB'] = 25
data['X86']['OpTypes']['G_CC_OP_SHRW'] = 26
data['X86']['OpTypes']['G_CC_OP_SHRL'] = 27
data['X86']['OpTypes']['G_CC_OP_ROLB'] = 28
data['X86']['OpTypes']['G_CC_OP_ROLW'] = 29
data['X86']['OpTypes']['G_CC_OP_ROLL'] = 30
data['X86']['OpTypes']['G_CC_OP_RORB'] = 31
data['X86']['OpTypes']['G_CC_OP_RORW'] = 32
data['X86']['OpTypes']['G_CC_OP_RORL'] = 33
data['X86']['OpTypes']['G_CC_OP_UMULB'] = 34
data['X86']['OpTypes']['G_CC_OP_UMULW'] = 35
data['X86']['OpTypes']['G_CC_OP_UMULL'] = 36
data['X86']['OpTypes']['G_CC_OP_SMULB'] = 37
data['X86']['OpTypes']['G_CC_OP_SMULW'] = 38
data['X86']['OpTypes']['G_CC_OP_SMULL'] = 39
data['X86']['OpTypes']['G_CC_OP_NUMBER'] = 40
data['X86']['OpTypes']['G_CC_OP_SMULQ'] = None
data['X86']['OpTypes']['G_CC_OP_UMULQ'] = None
data['X86']['OpTypes']['G_CC_OP_RORQ'] = None
data['X86']['OpTypes']['G_CC_OP_ROLQ'] = None
data['X86']['OpTypes']['G_CC_OP_SHRQ'] = None
data['X86']['OpTypes']['G_CC_OP_SHLQ'] = None
data['X86']['OpTypes']['G_CC_OP_DECQ'] = None
data['X86']['OpTypes']['G_CC_OP_INCQ'] = None
data['X86']['OpTypes']['G_CC_OP_LOGICQ'] = None
data['X86']['OpTypes']['G_CC_OP_SBBQ'] = None
data['X86']['OpTypes']['G_CC_OP_ADCQ'] = None
data['X86']['OpTypes']['G_CC_OP_SUBQ'] = None
data['X86']['OpTypes']['G_CC_OP_ADDQ'] = None
data_inverted = { k_arch: { k_data_class: {y:x for (x,y) in d_data_class.items()} for k_data_class, d_data_class in d_arch.items() } for k_arch,d_arch in data.items() }
data['AMD64']['size'] = 64
data['X86']['size'] = 32
#
# AMD64 internal helpers
#
def pc_preamble(state, nbits, platform=None):
data_mask = state.solver.BVV(2 ** nbits - 1, nbits)
sign_mask = 1 << (nbits - 1)
return data_mask, sign_mask
def pc_make_rdata(nbits, cf, pf, af, zf, sf, of, platform=None):
return cf, pf, af, zf, sf, of
def pc_make_rdata_if_necessary(nbits, cf, pf, af, zf, sf, of, platform=None):
vec = [(data[platform]['CondBitOffsets']['G_CC_SHIFT_C'], cf),
(data[platform]['CondBitOffsets']['G_CC_SHIFT_P'], pf),
(data[platform]['CondBitOffsets']['G_CC_SHIFT_A'], af),
(data[platform]['CondBitOffsets']['G_CC_SHIFT_Z'], zf),
(data[platform]['CondBitOffsets']['G_CC_SHIFT_S'], sf),
(data[platform]['CondBitOffsets']['G_CC_SHIFT_O'], of)]
vec.sort(reverse=True)
return _concat_flags(nbits, vec)
def pc_actions_ADD(state, nbits, arg_l, arg_r, cc_ndep, platform=None):
data_mask, sign_mask = pc_preamble(state, nbits, platform=platform)
res = arg_l + arg_r
cf = state.solver.If(state.solver.ULT(res, arg_l), state.solver.BVV(1, 1), state.solver.BVV(0, 1))
pf = calc_paritybit(state, res)
af = (res ^ arg_l ^ arg_r)[data[platform]['CondBitOffsets']['G_CC_SHIFT_A']]
zf = calc_zerobit(state, res)
sf = res[nbits - 1:nbits - 1]
of = ((arg_l ^ arg_r ^ data_mask) & (arg_l ^ res))[nbits - 1:nbits - 1]
return pc_make_rdata(data[platform]['size'], cf, pf, af, zf, sf, of, platform=platform)
def pc_actions_SUB(state, nbits, arg_l, arg_r, cc_ndep, platform=None):
data_mask, sign_mask = pc_preamble(state, nbits, platform=platform)
res = arg_l - arg_r
cf = state.solver.If(state.solver.ULT(arg_l, arg_r), state.solver.BVV(1, 1), state.solver.BVV(0, 1))
pf = calc_paritybit(state, res)
af = (res ^ arg_l ^ arg_r)[data[platform]['CondBitOffsets']['G_CC_SHIFT_A']]
zf = calc_zerobit(state, res)
sf = res[nbits - 1:nbits - 1]
of = ((arg_l ^ arg_r) & (arg_l ^ res))[nbits - 1:nbits - 1]
return pc_make_rdata(data[platform]['size'], cf, pf, af, zf, sf, of, platform=platform)
def pc_actions_LOGIC(state, nbits, arg_l, arg_r, cc_ndep, platform=None):
data_mask, sign_mask = pc_preamble(state, nbits, platform=platform)
cf = state.solver.BVV(0, 1)
pf = calc_paritybit(state, arg_l)
af = state.solver.BVV(0, 1)
zf = calc_zerobit(state, arg_l)
sf = arg_l[nbits-1]
of = state.solver.BVV(0, 1)
return pc_make_rdata(data[platform]['size'], cf, pf, af, zf, sf, of, platform=platform)
def pc_actions_DEC(state, nbits, res, _, cc_ndep, platform=None):
data_mask, sign_mask = pc_preamble(state, nbits, platform=platform)
arg_l = res + 1
arg_r = 1
cf = (cc_ndep & data[platform]['CondBitMasks']['G_CC_MASK_C'])[data[platform]['CondBitOffsets']['G_CC_SHIFT_C']]
pf = calc_paritybit(state, res)
af = (res ^ arg_l ^ 1)[data[platform]['CondBitOffsets']['G_CC_SHIFT_A']]
zf = calc_zerobit(state, res)
sf = res[nbits-1]
of = state.solver.If(sf == arg_l[nbits-1], state.solver.BVV(0, 1), state.solver.BVV(1, 1))
return pc_make_rdata(data[platform]['size'], cf, pf, af, zf, sf, of, platform=platform)
def pc_actions_ADC(state, nbits, cc_dep1, cc_dep2, cc_ndep, platform=None):
old_c = cc_ndep & data[platform]['CondBitMasks']['G_CC_MASK_C']
arg_l = cc_dep1
arg_r = cc_dep2 ^ old_c
res = (arg_l + arg_r) + old_c
cf = state.solver.If(
old_c != 0,
state.solver.If(res <= arg_l, state.solver.BVV(1, 1), state.solver.BVV(0, 1)),
state.solver.If(res < arg_l, state.solver.BVV(1, 1), state.solver.BVV(0, 1))
)
pf = calc_paritybit(state, res)
af = (res ^ arg_l ^ arg_r)[data[platform]['CondBitOffsets']['G_CC_SHIFT_A']]
zf = calc_zerobit(state, res)
sf = res[nbits - 1]
of = ((arg_l ^ arg_r ^ -1) & (arg_l ^ res))[nbits-1]
return pc_make_rdata(data[platform]['size'], cf, pf, af, zf, sf, of, platform=platform)
def pc_actions_SBB(state, nbits, cc_dep1, cc_dep2, cc_ndep, platform=None):
data_mask, sign_mask = pc_preamble(state, nbits, platform=platform)
old_c = cc_ndep[data[platform]['CondBitOffsets']['G_CC_SHIFT_C']].zero_extend(nbits-1)
arg_l = cc_dep1
arg_r = cc_dep2 ^ old_c
res = (arg_l - arg_r) - old_c
cf_c = state.solver.If(state.solver.ULE(arg_l, arg_r), state.solver.BVV(1, 1), state.solver.BVV(0, 1))
cf_noc = state.solver.If(state.solver.ULT(arg_l, arg_r), state.solver.BVV(1, 1), state.solver.BVV(0, 1))
cf = state.solver.If(old_c == 1, cf_c, cf_noc)
pf = calc_paritybit(state, res)
af = (res ^ arg_l ^ arg_r)[data[platform]['CondBitOffsets']['G_CC_SHIFT_A']]
zf = calc_zerobit(state, res)
sf = res[nbits-1]
of = ((arg_l ^ arg_r) & (arg_l ^ res))[nbits-1]
return pc_make_rdata(data[platform]['size'], cf, pf, af, zf, sf, of, platform=platform)
def pc_actions_INC(state, nbits, res, _, cc_ndep, platform=None):
data_mask, sign_mask = pc_preamble(state, nbits, platform=platform)
arg_l = res - 1
arg_r = 1
cf = (cc_ndep & data[platform]['CondBitMasks']['G_CC_MASK_C'])[data[platform]['CondBitOffsets']['G_CC_SHIFT_C']]
pf = calc_paritybit(state, res)
af = (res ^ arg_l ^ 1)[data[platform]['CondBitOffsets']['G_CC_SHIFT_A']]
zf = calc_zerobit(state, res)
sf = res[nbits-1]
of = state.solver.If(sf == arg_l[nbits-1], state.solver.BVV(0, 1), state.solver.BVV(1, 1))
return pc_make_rdata(data[platform]['size'], cf, pf, af, zf, sf, of, platform=platform)
def pc_actions_SHL(state, nbits, remaining, shifted, cc_ndep, platform=None):
cf = ((remaining >> (nbits - 1)) & data[platform]['CondBitMasks']['G_CC_MASK_C'])[data[platform]['CondBitOffsets']['G_CC_SHIFT_C']]
pf = calc_paritybit(state, remaining[7:0])
af = state.solver.BVV(0, 1)
zf = calc_zerobit(state, remaining)
sf = remaining[nbits-1]
of = (remaining[0] ^ shifted[0])[0]
return pc_make_rdata(data[platform]['size'], cf, pf, af, zf, sf, of, platform=platform)
def pc_actions_SHR(state, nbits, remaining, shifted, cc_ndep, platform=None):
cf = state.solver.If(shifted & 1 != 0, state.solver.BVV(1, 1), state.solver.BVV(0, 1))
pf = calc_paritybit(state, remaining[7:0])
af = state.solver.BVV(0, 1)
zf = calc_zerobit(state, remaining)
sf = remaining[nbits-1]
of = (remaining[0] ^ shifted[0])[0]
return pc_make_rdata(data[platform]['size'], cf, pf, af, zf, sf, of, platform=platform)
def pc_actions_ROL(state, nbits, res, _, cc_ndep, platform=None):
cf = res[0]
pf = (cc_ndep & data[platform]['CondBitMasks']['G_CC_MASK_P'])[data[platform]['CondBitOffsets']['G_CC_SHIFT_P']]
af = (cc_ndep & data[platform]['CondBitMasks']['G_CC_MASK_A'])[data[platform]['CondBitOffsets']['G_CC_SHIFT_A']]
zf = (cc_ndep & data[platform]['CondBitMasks']['G_CC_MASK_Z'])[data[platform]['CondBitOffsets']['G_CC_SHIFT_Z']]
sf = (cc_ndep & data[platform]['CondBitMasks']['G_CC_MASK_S'])[data[platform]['CondBitOffsets']['G_CC_SHIFT_S']]
of = (state.solver.LShR(res, nbits-1) ^ res)[0]
return pc_make_rdata(data[platform]['size'], cf, pf, af, zf, sf, of, platform=platform)
def pc_actions_ROR(state, nbits, res, _, cc_ndep, platform=None):
cf = res[nbits-1]
pf = (cc_ndep & data[platform]['CondBitMasks']['G_CC_MASK_P'])[data[platform]['CondBitOffsets']['G_CC_SHIFT_P']]
af = (cc_ndep & data[platform]['CondBitMasks']['G_CC_MASK_A'])[data[platform]['CondBitOffsets']['G_CC_SHIFT_A']]
zf = (cc_ndep & data[platform]['CondBitMasks']['G_CC_MASK_Z'])[data[platform]['CondBitOffsets']['G_CC_SHIFT_Z']]
sf = (cc_ndep & data[platform]['CondBitMasks']['G_CC_MASK_S'])[data[platform]['CondBitOffsets']['G_CC_SHIFT_S']]
of = (res[nbits-1] ^ res[nbits-2])
return pc_make_rdata(data[platform]['size'], cf, pf, af, zf, sf, of, platform=platform)
def pc_actions_UMUL(state, nbits, cc_dep1, cc_dep2, cc_ndep, platform=None):
lo = (cc_dep1 * cc_dep2)[nbits - 1:0]
rr = lo
hi = (rr >> nbits)[nbits - 1:0]
cf = state.solver.If(hi != 0, state.solver.BVV(1, 1), state.solver.BVV(0, 1))
zf = calc_zerobit(state, lo)
pf = calc_paritybit(state, lo)
af = state.solver.BVV(0, 1)
sf = lo[nbits - 1]
of = cf
return pc_make_rdata(data[platform]['size'], cf, pf, af, zf, sf, of, platform=platform)
def pc_actions_UMULQ(*args, **kwargs):
l.error("Unsupported flag action UMULQ")
raise SimCCallError("Unsupported flag action. Please implement or bug Yan.")
def pc_actions_SMUL(state, nbits, cc_dep1, cc_dep2, cc_ndep, platform=None):
lo = (cc_dep1 * cc_dep2)[nbits - 1:0]
rr = lo
hi = (rr >> nbits)[nbits - 1:0]
cf = state.solver.If(hi != (lo >> (nbits - 1)), state.solver.BVV(1, 1), state.solver.BVV(0, 1))
zf = calc_zerobit(state, lo)
pf = calc_paritybit(state, lo)
af = state.solver.BVV(0, 1)
sf = lo[nbits - 1]
of = cf
return pc_make_rdata(data[platform]['size'], cf, pf, af, zf, sf, of, platform=platform)
def pc_actions_SMULQ(*args, **kwargs):
l.error("Unsupported flag action SMULQ")
raise SimCCallError("Unsupported flag action. Please implement or bug Yan.")
def pc_calculate_rdata_all_WRK(state, cc_op, cc_dep1_formal, cc_dep2_formal, cc_ndep_formal, platform=None):
# sanity check
if not isinstance(cc_op, int):
cc_op = flag_concretize(state, cc_op)
if cc_op == data[platform]['OpTypes']['G_CC_OP_COPY']:
l.debug("cc_op == data[platform]['OpTypes']['G_CC_OP_COPY']")
return cc_dep1_formal & (data[platform]['CondBitMasks']['G_CC_MASK_O'] | data[platform]['CondBitMasks']['G_CC_MASK_S'] | data[platform]['CondBitMasks']['G_CC_MASK_Z']
| data[platform]['CondBitMasks']['G_CC_MASK_A'] | data[platform]['CondBitMasks']['G_CC_MASK_C'] | data[platform]['CondBitMasks']['G_CC_MASK_P'])
cc_str = data_inverted[platform]['OpTypes'][cc_op]
nbits = _get_nbits(cc_str)
l.debug("nbits == %d", nbits)
cc_dep1_formal = cc_dep1_formal[nbits-1:0]
cc_dep2_formal = cc_dep2_formal[nbits-1:0]
cc_ndep_formal = cc_ndep_formal[nbits-1:0]
if cc_str in [ 'G_CC_OP_ADDB', 'G_CC_OP_ADDW', 'G_CC_OP_ADDL', 'G_CC_OP_ADDQ' ]:
l.debug("cc_str: ADD")
return pc_actions_ADD(state, nbits, cc_dep1_formal, cc_dep2_formal, cc_ndep_formal, platform=platform)
if cc_str in [ 'G_CC_OP_ADCB', 'G_CC_OP_ADCW', 'G_CC_OP_ADCL', 'G_CC_OP_ADCQ' ]:
l.debug("cc_str: ADC")
return pc_actions_ADC(state, nbits, cc_dep1_formal, cc_dep2_formal, cc_ndep_formal, platform=platform)
if cc_str in [ 'G_CC_OP_SUBB', 'G_CC_OP_SUBW', 'G_CC_OP_SUBL', 'G_CC_OP_SUBQ' ]:
l.debug("cc_str: SUB")
return pc_actions_SUB(state, nbits, cc_dep1_formal, cc_dep2_formal, cc_ndep_formal, platform=platform)
if cc_str in [ 'G_CC_OP_SBBB', 'G_CC_OP_SBBW', 'G_CC_OP_SBBL', 'G_CC_OP_SBBQ' ]:
l.debug("cc_str: SBB")
return pc_actions_SBB(state, nbits, cc_dep1_formal, cc_dep2_formal, cc_ndep_formal, platform=platform)
if cc_str in [ 'G_CC_OP_LOGICB', 'G_CC_OP_LOGICW', 'G_CC_OP_LOGICL', 'G_CC_OP_LOGICQ' ]:
l.debug("cc_str: LOGIC")
return pc_actions_LOGIC(state, nbits, cc_dep1_formal, cc_dep2_formal, cc_ndep_formal, platform=platform)
if cc_str in [ 'G_CC_OP_INCB', 'G_CC_OP_INCW', 'G_CC_OP_INCL', 'G_CC_OP_INCQ' ]:
l.debug("cc_str: INC")
return pc_actions_INC(state, nbits, cc_dep1_formal, cc_dep2_formal, cc_ndep_formal, platform=platform)
if cc_str in [ 'G_CC_OP_DECB', 'G_CC_OP_DECW', 'G_CC_OP_DECL', 'G_CC_OP_DECQ' ]:
l.debug("cc_str: DEC")
return pc_actions_DEC(state, nbits, cc_dep1_formal, cc_dep2_formal, cc_ndep_formal, platform=platform)
if cc_str in [ 'G_CC_OP_SHLB', 'G_CC_OP_SHLW', 'G_CC_OP_SHLL', 'G_CC_OP_SHLQ' ]:
l.debug("cc_str: SHL")
return pc_actions_SHL(state, nbits, cc_dep1_formal, cc_dep2_formal, cc_ndep_formal, platform=platform)
if cc_str in [ 'G_CC_OP_SHRB', 'G_CC_OP_SHRW', 'G_CC_OP_SHRL', 'G_CC_OP_SHRQ' ]:
l.debug("cc_str: SHR")
return pc_actions_SHR(state, nbits, cc_dep1_formal, cc_dep2_formal, cc_ndep_formal, platform=platform)
if cc_str in [ 'G_CC_OP_ROLB', 'G_CC_OP_ROLW', 'G_CC_OP_ROLL', 'G_CC_OP_ROLQ' ]:
l.debug("cc_str: ROL")
return pc_actions_ROL(state, nbits, cc_dep1_formal, cc_dep2_formal, cc_ndep_formal, platform=platform)
if cc_str in [ 'G_CC_OP_RORB', 'G_CC_OP_RORW', 'G_CC_OP_RORL', 'G_CC_OP_RORQ' ]:
l.debug("cc_str: ROR")
return pc_actions_ROR(state, nbits, cc_dep1_formal, cc_dep2_formal, cc_ndep_formal, platform=platform)
if cc_str in [ 'G_CC_OP_UMULB', 'G_CC_OP_UMULW', 'G_CC_OP_UMULL', 'G_CC_OP_UMULQ' ]:
l.debug("cc_str: UMUL")
return pc_actions_UMUL(state, nbits, cc_dep1_formal, cc_dep2_formal, cc_ndep_formal, platform=platform)
if cc_str == 'G_CC_OP_UMULQ':
l.debug("cc_str: UMULQ")
return pc_actions_UMULQ(state, nbits, cc_dep1_formal, cc_dep2_formal, cc_ndep_formal, platform=platform)
if cc_str in [ 'G_CC_OP_SMULB', 'G_CC_OP_SMULW', 'G_CC_OP_SMULL', 'G_CC_OP_SMULQ' ]:
l.debug("cc_str: SMUL")
return pc_actions_SMUL(state, nbits, cc_dep1_formal, cc_dep2_formal, cc_ndep_formal, platform=platform)
if cc_str == 'G_CC_OP_SMULQ':
l.debug("cc_str: SMULQ")
return pc_actions_SMULQ(state, nbits, cc_dep1_formal, cc_dep2_formal, cc_ndep_formal, platform=platform)
l.error("Unsupported cc_op %d in in pc_calculate_rdata_all_WRK", cc_op)
raise SimCCallError("Unsupported cc_op in pc_calculate_rdata_all_WRK")
# This function returns all the data
def pc_calculate_rdata_all(state, cc_op, cc_dep1, cc_dep2, cc_ndep, platform=None):
rdata_all = pc_calculate_rdata_all_WRK(state, cc_op, cc_dep1, cc_dep2, cc_ndep, platform=platform)
if isinstance(rdata_all, tuple):
return pc_make_rdata_if_necessary(data[platform]['size'], *rdata_all, platform=platform), [ ]
else:
return rdata_all, [ ]
# This function takes a condition that is being checked (ie, zero bit), and basically
# returns that bit
def pc_calculate_condition(state, cond, cc_op, cc_dep1, cc_dep2, cc_ndep, platform=None):
rdata_all = pc_calculate_rdata_all_WRK(state, cc_op, cc_dep1, cc_dep2, cc_ndep, platform=platform)
if isinstance(rdata_all, tuple):
cf, pf, af, zf, sf, of = rdata_all
if state.solver.symbolic(cond):
raise SimError("Hit a symbolic 'cond' in pc_calculate_condition. Panic.")
v = flag_concretize(state, cond)
inv = v & 1
l.debug("inv: %d", inv)
if v in [ data[platform]['CondTypes']['CondO'], data[platform]['CondTypes']['CondNO'] ]:
l.debug("CondO")
#of = state.solver.LShR(rdata, data[platform]['G_CC_SHIFT_O'])
r = 1 & (inv ^ of)
elif v in [ data[platform]['CondTypes']['CondZ'], data[platform]['CondTypes']['CondNZ'] ]:
l.debug("CondZ")
#zf = state.solver.LShR(rdata, data[platform]['G_CC_SHIFT_Z'])
r = 1 & (inv ^ zf)
elif v in [ data[platform]['CondTypes']['CondB'], data[platform]['CondTypes']['CondNB'] ]:
l.debug("CondB")
#cf = state.solver.LShR(rdata, data[platform]['G_CC_SHIFT_C'])
r = 1 & (inv ^ cf)
elif v in [ data[platform]['CondTypes']['CondBE'], data[platform]['CondTypes']['CondNBE'] ]:
l.debug("CondBE")
#cf = state.solver.LShR(rdata, data[platform]['G_CC_SHIFT_C'])
#zf = state.solver.LShR(rdata, data[platform]['G_CC_SHIFT_Z'])
r = 1 & (inv ^ (cf | zf))
elif v in [ data[platform]['CondTypes']['CondS'], data[platform]['CondTypes']['CondNS'] ]:
l.debug("CondS")
#sf = state.solver.LShR(rdata, data[platform]['G_CC_SHIFT_S'])
r = 1 & (inv ^ sf)
elif v in [ data[platform]['CondTypes']['CondP'], data[platform]['CondTypes']['CondNP'] ]:
l.debug("CondP")
#pf = state.solver.LShR(rdata, data[platform]['G_CC_SHIFT_P'])
r = 1 & (inv ^ pf)
elif v in [ data[platform]['CondTypes']['CondL'], data[platform]['CondTypes']['CondNL'] ]:
l.debug("CondL")
#sf = state.solver.LShR(rdata, data[platform]['G_CC_SHIFT_S'])
#of = state.solver.LShR(rdata, data[platform]['G_CC_SHIFT_O'])
r = 1 & (inv ^ (sf ^ of))
elif v in [ data[platform]['CondTypes']['CondLE'], data[platform]['CondTypes']['CondNLE'] ]:
l.debug("CondLE")
#sf = state.solver.LShR(rdata, data[platform]['G_CC_SHIFT_S'])
#of = state.solver.LShR(rdata, data[platform]['G_CC_SHIFT_O'])
#zf = state.solver.LShR(rdata, data[platform]['G_CC_SHIFT_Z'])
r = 1 & (inv ^ ((sf ^ of) | zf))
return state.solver.Concat(state.solver.BVV(0, state.arch.bits-1), r), [ ]
else:
rdata = rdata_all
if state.solver.symbolic(cond):
raise SimError("Hit a symbolic 'cond' in pc_calculate_condition. Panic.")
v = flag_concretize(state, cond)
inv = v & 1
l.debug("inv: %d", inv)
# THIS IS A FUCKING HACK
if v == 0xe:
# jle
pass
if v in [data[platform]['CondTypes']['CondO'], data[platform]['CondTypes']['CondNO']]:
l.debug("CondO")
of = state.solver.LShR(rdata, data[platform]['CondBitOffsets']['G_CC_SHIFT_O'])
return 1 & (inv ^ of), []
if v in [data[platform]['CondTypes']['CondZ'], data[platform]['CondTypes']['CondNZ']]:
l.debug("CondZ")
zf = state.solver.LShR(rdata, data[platform]['CondBitOffsets']['G_CC_SHIFT_Z'])
return 1 & (inv ^ zf), []
if v in [data[platform]['CondTypes']['CondB'], data[platform]['CondTypes']['CondNB']]:
l.debug("CondB")
cf = state.solver.LShR(rdata, data[platform]['CondBitOffsets']['G_CC_SHIFT_C'])
return 1 & (inv ^ cf), []
if v in [data[platform]['CondTypes']['CondBE'], data[platform]['CondTypes']['CondNBE']]:
l.debug("CondBE")
cf = state.solver.LShR(rdata, data[platform]['CondBitOffsets']['G_CC_SHIFT_C'])
zf = state.solver.LShR(rdata, data[platform]['CondBitOffsets']['G_CC_SHIFT_Z'])
return 1 & (inv ^ (cf | zf)), []
if v in [data[platform]['CondTypes']['CondS'], data[platform]['CondTypes']['CondNS']]:
l.debug("CondS")
sf = state.solver.LShR(rdata, data[platform]['CondBitOffsets']['G_CC_SHIFT_S'])
return 1 & (inv ^ sf), []
if v in [data[platform]['CondTypes']['CondP'], data[platform]['CondTypes']['CondNP']]:
l.debug("CondP")
pf = state.solver.LShR(rdata, data[platform]['CondBitOffsets']['G_CC_SHIFT_P'])
return 1 & (inv ^ pf), []
if v in [data[platform]['CondTypes']['CondL'], data[platform]['CondTypes']['CondNL']]:
l.debug("CondL")
sf = state.solver.LShR(rdata, data[platform]['CondBitOffsets']['G_CC_SHIFT_S'])
of = state.solver.LShR(rdata, data[platform]['CondBitOffsets']['G_CC_SHIFT_O'])
return 1 & (inv ^ (sf ^ of)), []
if v in [data[platform]['CondTypes']['CondLE'], data[platform]['CondTypes']['CondNLE']]:
l.debug("CondLE")
sf = state.solver.LShR(rdata, data[platform]['CondBitOffsets']['G_CC_SHIFT_S'])
of = state.solver.LShR(rdata, data[platform]['CondBitOffsets']['G_CC_SHIFT_O'])
zf = state.solver.LShR(rdata, data[platform]['CondBitOffsets']['G_CC_SHIFT_Z'])
return 1 & (inv ^ ((sf ^ of) | zf)), []
l.error("Unsupported condition %d in in pc_calculate_condition", v)
raise SimCCallError("Unrecognized condition in pc_calculate_condition")
#
# Simplified CCalls
#
# Simplified CCalls (whose names look like `pc_actions_<operation>_<condition>`) are a bunch of methods that generate
# straight-forward ASTs based on the operation and the condition, instead of blindly following the way that a CPU does
# the conditional flags calculation and generating messy and meaningless ASTs. It allows us to have a meaningful AST
# for each conditional flag, which greatly helps static analysis (like VSA).
def _cond_flag(state, condition):
return state.solver.If(condition, state.solver.BVV(1, 1), state.solver.BVV(0, 1))
# TODO: Implement the missing ones
# General ops
def pc_actions_op_SUB(arg_l, arg_r, cc_ndep):
return arg_l - arg_r
def pc_actions_op_DEC(arg_l, arg_r, cc_ndep):
return arg_l - 1
def pc_actions_op_INC(arg_l, arg_r, cc_ndep):
return arg_l + 1
def pc_actions_op_SHR(arg_l, arg_r, cc_ndep):
return arg_l >> arg_r
def pc_actions_op_SHL(arg_l, arg_r, cc_ndep):
return arg_l << arg_r
def pc_actions_op_ADD(arg_l, arg_r, cc_ndep):
return arg_l + arg_r
def pc_actions_op_LOGIC(arg_l, arg_r, cc_ndep):
return arg_l
# General conditions
def pc_actions_cond_CondZ(state, cc_expr):
return _cond_flag(state, cc_expr == 0)
def pc_actions_cond_CondNZ(state, cc_expr):
return _cond_flag(state, cc_expr != 0)
def pc_actions_cond_CondS(state, cc_expr):
return _cond_flag(state, state.solver.SLT(cc_expr, 0))
def pc_actions_cond_CondB(state, cc_expr):
return _cond_flag(state, state.solver.ULT(cc_expr, 0))
def pc_actions_cond_CondBE(state, cc_expr):
return _cond_flag(state, state.solver.ULE(cc_expr, 0))
def pc_actions_cond_CondNBE(state, cc_expr):
return _cond_flag(state, state.solver.UGT(cc_expr, 0))
def pc_actions_cond_CondL(state, cc_expr):
return _cond_flag(state, state.solver.SLT(cc_expr, 0))
def pc_actions_cond_CondLE(state, cc_expr):
return _cond_flag(state, state.solver.SLE(cc_expr, 0))
def pc_actions_cond_CondNLE(state, cc_expr):
return _cond_flag(state, state.solver.SGT(cc_expr, 0))
# Specialized versions of (op,cond) to make claripy happy
def pc_actions_SUB_CondZ(state, arg_l, arg_r, cc_ndep):
return _cond_flag(state, arg_l == arg_r)
def pc_actions_SUB_CondNZ(state, arg_l, arg_r, cc_ndep):
return _cond_flag(state, arg_l != arg_r)
def pc_actions_SUB_CondB(state, arg_l, arg_r, cc_ndep):
return _cond_flag(state, state.solver.ULT(arg_l, arg_r))
def pc_actions_SUB_CondBE(state, arg_l, arg_r, cc_ndep):
return _cond_flag(state, state.solver.ULE(arg_l, arg_r))
def pc_actions_SUB_CondNBE(state, arg_l, arg_r, cc_ndep):
return _cond_flag(state, state.solver.UGT(arg_l, arg_r))
def pc_actions_SUB_CondL(state, arg_l, arg_r, cc_ndep):
return _cond_flag(state, state.solver.SLT(arg_l, arg_r))
def pc_actions_SUB_CondLE(state, arg_l, arg_r, cc_ndep):
return _cond_flag(state, state.solver.SLE(arg_l, arg_r))
def pc_actions_SUB_CondNLE(state, arg_l, arg_r, cc_ndep):
return _cond_flag(state, state.solver.SGT(arg_l, arg_r))
def pc_calculate_condition_simple(state, cond, cc_op, cc_dep1, cc_dep2, cc_ndep, platform=None):
"""
A simplified version of pc_calculate_condition(). Please refer to the documentation of Simplified CCalls above.
Limitation: symbolic flags are not supported for now.
"""
if state.solver.symbolic(cond):
raise SimError("Hit a symbolic 'cond' in pc_calculate_condition. Panic.")
v = flag_concretize(state, cond)
# Extract the operation
cc_op = flag_concretize(state, cc_op)
if cc_op == data[platform]['OpTypes']['G_CC_OP_COPY']:
raise SimCCallError("G_CC_OP_COPY is not supported in pc_calculate_condition_simple(). Consider implementing.")
if cc_op == data[platform]['OpTypes']['G_CC_OP_NUMBER']:
raise SimCCallError("G_CC_OP_NUMBER is not supported in pc_calculate_condition_simple(). Consider implementing.")
op = data_inverted[platform]['OpTypes'][cc_op]
nbits = _get_nbits(op)
op = op[8 : -1]
# Extract the condition
cond = None
# TODO: Convert it to a table-lookup later
for key, cond_val in data[platform]['CondTypes'].items():
if cond_val == v:
cond = key
break
cc_dep1_nbits = cc_dep1[nbits-1:0]
cc_dep2_nbits = cc_dep2[nbits-1:0]
# check for a specialized version first
funcname = "pc_actions_%s_%s" % (op, cond)
if funcname in globals():
r = globals()[funcname](state, cc_dep1_nbits, cc_dep2_nbits, cc_ndep)
else:
op_funcname = "pc_actions_op_%s" % op
cond_funcname = "pc_actions_cond_%s" % cond
if op_funcname in globals() and cond_funcname in globals():
cc_expr = globals()[op_funcname](cc_dep1_nbits, cc_dep2_nbits, cc_ndep)
r = globals()[cond_funcname](state, cc_expr)
else:
l.warning('Operation %s with condition %s is not supported in pc_calculate_condition_simple(). Consider implementing.', op, cond)
raise SimCCallError('Operation %s with condition %s not found.' % (op, cond))
return state.solver.Concat(state.solver.BVV(0, state.arch.bits - 1), r), []
def pc_calculate_rdata_c(state, cc_op, cc_dep1, cc_dep2, cc_ndep, platform=None):
cc_op = flag_concretize(state, cc_op)
if cc_op == data[platform]['OpTypes']['G_CC_OP_COPY']:
return state.solver.LShR(cc_dep1, data[platform]['CondBitOffsets']['G_CC_SHIFT_C']) & 1, [ ] # TODO: actual constraints
elif cc_op in ( data[platform]['OpTypes']['G_CC_OP_LOGICQ'], data[platform]['OpTypes']['G_CC_OP_LOGICL'], data[platform]['OpTypes']['G_CC_OP_LOGICW'], data[platform]['OpTypes']['G_CC_OP_LOGICB'] ):
return state.solver.BVV(0, state.arch.bits), [ ] # TODO: actual constraints
rdata_all = pc_calculate_rdata_all_WRK(state, cc_op,cc_dep1,cc_dep2,cc_ndep, platform=platform)
if isinstance(rdata_all, tuple):
cf, pf, af, zf, sf, of = rdata_all
return state.solver.Concat(state.solver.BVV(0, state.arch.bits-1), cf & 1), [ ]
else:
return state.solver.LShR(rdata_all, data[platform]['CondBitOffsets']['G_CC_SHIFT_C']) & 1, []
def generic_rotate_with_carry(state, left, arg, rot_amt, carry_bit_in, sz):
# returns cf, of, result
# make sure sz is not symbolic
if sz.symbolic:
raise SimError('Hit a symbolic "sz" in an x86 rotate with carry instruction. Panic.')
# convert sz to concrete value
sz = state.solver.eval_one(sz)
bits = sz * 8
bits_in = len(arg)
# construct bitvec to use for rotation amount - 9/17/33/65 bits
if bits > len(rot_amt):
raise SimError("Got a rotate instruction for data larger than the provided word size. Panic.")
if bits == len(rot_amt):
sized_amt = (rot_amt & (bits_in - 1)).zero_extend(1)
else:
sized_amt = (rot_amt & (bits_in - 1))[bits:0]
assert len(sized_amt) == bits + 1
# construct bitvec to use for rotating value - 9/17/33/65 bits
sized_arg_in = arg[bits-1:0]
rotatable_in = carry_bit_in.concat(sized_arg_in)
# compute and extract
op = state.solver.RotateLeft if left else state.solver.RotateRight
rotatable_out = op(rotatable_in, sized_amt)
sized_arg_out = rotatable_out[bits-1:0]
carry_bit_out = rotatable_out[bits]
arg_out = sized_arg_out.zero_extend(bits_in - bits)
if left:
overflow_bit_out = carry_bit_out ^ sized_arg_out[bits-1]
else:
overflow_bit_out = sized_arg_out[bits-1] ^ sized_arg_out[bits-2]
# construct final answer
return carry_bit_out, overflow_bit_out, arg_out
###########################
### AMD64-specific ones ###
###########################
# https://github.com/angr/vex/blob/master/priv/guest_amd64_helpers.c#L2272
def amd64g_check_ldmxcsr(state, mxcsr):
# /* Decide on a rounding mode. mxcsr[14:13] holds it. */
# /* NOTE, encoded exactly as per enum IRRoundingMode. */
rmode = (mxcsr >> 13) & 3
# /* Detect any required emulation warnings. */
ew = EmNote_NONE
if ((mxcsr & 0x1F80) != 0x1F80).is_true:
# /* unmasked exceptions! */
ew = EmWarn_X86_sseExns
elif (mxcsr & (1 << 15)).is_true:
# /* FZ is set */
ew = EmWarn_X86_fz
elif (mxcsr & (1 << 6)).is_true:
# /* DAZ is set */
ew = EmWarn_X86_daz
return (ew << 32) | rmode, []
# https://github.com/angr/vex/blob/master/priv/guest_amd64_helpers.c#L2304
def amd64g_create_mxcsr(state, sseround):
sseround &= 3
return 0x1F80 | (sseround << 13), []
# https://github.com/angr/vex/blob/master/priv/guest_amd64_helpers.c#L2316
def amd64g_check_fldcw(state, fpucw):
rmode = (fpucw >> 10) & 3
ew = EmNote_NONE
if ((fpucw & 0x3f) != 0x3f).is_true:
# unmasked exceptions
ew = EmWarn_X86_x87exns
elif (((fpucw >> 8) & 3) != 3).is_true:
ew = EmWarn_X86_x87precision
return (ew << 32) | rmode, []
# https://github.com/angr/vex/blob/master/priv/guest_amd64_helpers.c#L2342
def amd64g_create_fpucw(state, fpround):
fpround &= 3
return 0x037f | (fpround << 10), []
def amd64g_calculate_RCL(state, arg, rot_amt, eflags_in, sz):
want_flags = state.solver.is_true(state.solver.SLT(sz, 0))
if want_flags: sz = -sz
carry_bit_in = eflags_in[data['AMD64']['CondBitOffsets']['G_CC_SHIFT_C']]
carry_bit_out, overflow_bit_out, arg_out = generic_rotate_with_carry(state, True, arg, rot_amt, carry_bit_in, sz)
if want_flags:
cf = carry_bit_out.zero_extend(63)
of = overflow_bit_out.zero_extend(63)
eflags_out = eflags_in
eflags_out &= ~(data['AMD64']['CondBitMasks']['G_CC_MASK_C'] | data['AMD64']['CondBitMasks']['G_CC_MASK_O'])
eflags_out |= (cf << data['AMD64']['CondBitOffsets']['G_CC_SHIFT_C']) | \
(of << data['AMD64']['CondBitOffsets']['G_CC_SHIFT_O'])
return eflags_out, []
else:
return arg_out, []
def amd64g_calculate_RCR(state, arg, rot_amt, eflags_in, sz):
want_flags = state.solver.is_true(state.solver.SLT(sz, 0))
if want_flags: sz = -sz
carry_bit_in = eflags_in[data['AMD64']['CondBitOffsets']['G_CC_SHIFT_C']]
carry_bit_out, overflow_bit_out, arg_out = generic_rotate_with_carry(state, False, arg, rot_amt, carry_bit_in, sz)
if want_flags:
cf = carry_bit_out.zero_extend(63)
of = overflow_bit_out.zero_extend(63)
eflags_out = eflags_in
eflags_out &= ~(data['AMD64']['CondBitMasks']['G_CC_MASK_C'] | data['AMD64']['CondBitMasks']['G_CC_MASK_O'])
eflags_out |= (cf << data['AMD64']['CondBitOffsets']['G_CC_SHIFT_C']) | \
(of << data['AMD64']['CondBitOffsets']['G_CC_SHIFT_O'])
return eflags_out, []
else:
return arg_out, []
def amd64g_calculate_mmx_pmaddwd(_state, xx, yy):
xx_3, xx_2, xx_1, xx_0 = xx.chop(16)
yy_3, yy_2, yy_1, yy_0 = yy.chop(16)
xx_3 = xx_3.sign_extend(16)
xx_2 = xx_2.sign_extend(16)
xx_1 = xx_1.sign_extend(16)
xx_0 = xx_0.sign_extend(16)
yy_3 = yy_3.sign_extend(16)
yy_2 = yy_2.sign_extend(16)
yy_1 = yy_1.sign_extend(16)
yy_0 = yy_0.sign_extend(16)
res_1 = xx_3 * yy_3 + xx_2 * yy_2
res_0 = xx_1 * yy_1 + xx_0 * yy_0
return claripy.Concat(res_1, res_0), []
def amd64g_calculate_condition(state, cond, cc_op, cc_dep1, cc_dep2, cc_ndep):
if USE_SIMPLIFIED_CCALLS in state.options:
try:
return pc_calculate_condition_simple(state, cond, cc_op, cc_dep1, cc_dep2, cc_ndep, platform='AMD64')
except KeyError:
pass
return pc_calculate_condition(state, cond, cc_op, cc_dep1, cc_dep2, cc_ndep, platform='AMD64')
def amd64g_calculate_rflags_all(state, cc_op, cc_dep1, cc_dep2, cc_ndep):
return pc_calculate_rdata_all(state, cc_op, cc_dep1, cc_dep2, cc_ndep, platform='AMD64')
def amd64g_calculate_rflags_c(state, cc_op, cc_dep1, cc_dep2, cc_ndep):
return pc_calculate_rdata_c(state, cc_op, cc_dep1, cc_dep2, cc_ndep, platform='AMD64')
###########################
### X86-specific ones ###
###########################
def x86g_calculate_RCL(state, arg, rot_amt, eflags_in, sz):
carry_bit_in = eflags_in[data['X86']['CondBitOffsets']['G_CC_SHIFT_C']]
carry_bit_out, overflow_bit_out, arg_out = generic_rotate_with_carry(state, True, arg, rot_amt, carry_bit_in, sz)
cf = carry_bit_out.zero_extend(31)
of = overflow_bit_out.zero_extend(31)
eflags_out = eflags_in
eflags_out &= ~(data['X86']['CondBitMasks']['G_CC_MASK_C'] | data['X86']['CondBitMasks']['G_CC_MASK_O'])
eflags_out |= (cf << data['X86']['CondBitOffsets']['G_CC_SHIFT_C']) | \
(of << data['X86']['CondBitOffsets']['G_CC_SHIFT_O'])
return eflags_out.concat(arg_out), []
def x86g_calculate_RCR(state, arg, rot_amt, eflags_in, sz):
carry_bit_in = eflags_in[data['X86']['CondBitOffsets']['G_CC_SHIFT_C']]
carry_bit_out, overflow_bit_out, arg_out = generic_rotate_with_carry(state, False, arg, rot_amt, carry_bit_in, sz)
cf = carry_bit_out.zero_extend(31)
of = overflow_bit_out.zero_extend(31)
eflags_out = eflags_in
eflags_out &= ~(data['X86']['CondBitMasks']['G_CC_MASK_C'] | data['X86']['CondBitMasks']['G_CC_MASK_O'])
eflags_out |= (cf << data['X86']['CondBitOffsets']['G_CC_SHIFT_C']) | \
(of << data['X86']['CondBitOffsets']['G_CC_SHIFT_O'])
return eflags_out.concat(arg_out), []
def x86g_calculate_condition(state, cond, cc_op, cc_dep1, cc_dep2, cc_ndep):
if USE_SIMPLIFIED_CCALLS in state.options:
return pc_calculate_condition_simple(state, cond, cc_op, cc_dep1, cc_dep2, cc_ndep, platform='X86')
else:
return pc_calculate_condition(state, cond, cc_op, cc_dep1, cc_dep2, cc_ndep, platform='X86')
def x86g_calculate_eflags_all(state, cc_op, cc_dep1, cc_dep2, cc_ndep):
return pc_calculate_rdata_all(state, cc_op, cc_dep1, cc_dep2, cc_ndep, platform='X86')
def x86g_calculate_eflags_c(state, cc_op, cc_dep1, cc_dep2, cc_ndep):
return pc_calculate_rdata_c(state, cc_op, cc_dep1, cc_dep2, cc_ndep, platform='X86')
def x86g_check_fldcw(state, fpucw):
return ((fpucw >> 10) & 3).zero_extend(32), ()
def x86g_create_fpucw(state, fpround):
return 0x037f | ((fpround & 3) << 10), ()
def x86g_calculate_daa_das_aaa_aas(state, flags_and_AX, opcode):
assert len(flags_and_AX) == 32
assert not opcode.symbolic
opcode = state.solver.eval(opcode)
r_O = flags_and_AX[data['X86']['CondBitOffsets']['G_CC_SHIFT_O'] + 16].zero_extend(31)
r_S = flags_and_AX[data['X86']['CondBitOffsets']['G_CC_SHIFT_S'] + 16].zero_extend(31)
r_Z = flags_and_AX[data['X86']['CondBitOffsets']['G_CC_SHIFT_Z'] + 16].zero_extend(31)
r_A = flags_and_AX[data['X86']['CondBitOffsets']['G_CC_SHIFT_A'] + 16].zero_extend(31)
r_C = flags_and_AX[data['X86']['CondBitOffsets']['G_CC_SHIFT_C'] + 16].zero_extend(31)
r_P = flags_and_AX[data['X86']['CondBitOffsets']['G_CC_SHIFT_P'] + 16].zero_extend(31)
r_AL = (flags_and_AX >> 0) & 0xFF
r_AH = (flags_and_AX >> 8) & 0xFF
zero = state.solver.BVV(0, 32)
one = state.solver.BVV(1, 32)
if opcode == 0x27: # DAA
old_AL = r_AL
old_C = r_C
condition = state.solver.Or((r_AL & 0xF) > 9, r_A == 1)
r_AL = state.solver.If(condition, r_AL + 6, old_AL)
r_C = state.solver.If(condition, state.solver.If(r_AL >= 0x100, one, old_C), zero)
r_A = state.solver.If(condition, one, zero)
condition = state.solver.Or(old_AL > 0x99, old_C == 1)
r_AL = state.solver.If(condition, r_AL + 0x60, r_AL)
r_C = state.solver.If(condition, one, zero)
r_AL = r_AL&0xFF
r_O = zero
r_S = state.solver.If((r_AL & 0x80) != 0, one, zero)
r_Z = state.solver.If(r_AL == 0, one, zero)
r_P = calc_paritybit(state, r_AL).zero_extend(31)
elif opcode == 0x2F: # DAS
old_AL = r_AL
old_C = r_C
condition = state.solver.Or((r_AL & 0xF) > 9, r_A == 1)
r_AL = state.solver.If(condition, r_AL - 6, old_AL)
r_C = state.solver.If(condition, state.solver.If(r_AL < 6, one, zero), zero)
r_A = state.solver.If(condition, one, zero)
condition = state.solver.Or(old_AL > 0x99, old_C == 1)
r_AL = state.solver.If(condition, r_AL - 0x60, r_AL)
r_C = state.solver.If(condition, one, zero)
r_AL &= 0xFF
r_O = zero
r_S = state.solver.If((r_AL & 0x80) != 0, one, zero)
r_Z = state.solver.If(r_AL == 0, one, zero)
r_P = calc_paritybit(state, r_AL).zero_extend(31)
elif opcode == 0x37: # AAA
nudge = r_AL > 0xF9
condition = state.solver.Or((r_AL & 0xF) > 9, r_A == 1)
r_AL = state.solver.If(condition, (r_AL + 6) & 0xF, r_AL & 0xF)
r_AH = state.solver.If(condition, state.solver.If(nudge, r_AH + 2, r_AH + 1), r_AH)
r_A = state.solver.If(condition, one, zero)
r_C = state.solver.If(condition, one, zero)
r_O = r_S = r_Z = r_P = 0
elif opcode == 0x3F: # AAS
nudge = r_AL < 0x06
condition = state.solver.Or((r_AL & 0xF) > 9, r_A == 1)
r_AL = state.solver.If(condition, (r_AL - 6) & 0xF, r_AL & 0xF)
r_AH = state.solver.If(condition, state.solver.If(nudge, r_AH - 2, r_AH - 1), r_AH)
r_A = state.solver.If(condition, one, zero)
r_C = state.solver.If(condition, one, zero)
r_O = r_S = r_Z = r_P = 0
result = ( (r_O & 1) << (16 + data['X86']['CondBitOffsets']['G_CC_SHIFT_O']) ) \
| ( (r_S & 1) << (16 + data['X86']['CondBitOffsets']['G_CC_SHIFT_S']) ) \
| ( (r_Z & 1) << (16 + data['X86']['CondBitOffsets']['G_CC_SHIFT_Z']) ) \
| ( (r_A & 1) << (16 + data['X86']['CondBitOffsets']['G_CC_SHIFT_A']) ) \
| ( (r_C & 1) << (16 + data['X86']['CondBitOffsets']['G_CC_SHIFT_C']) ) \
| ( (r_P & 1) << (16 + data['X86']['CondBitOffsets']['G_CC_SHIFT_P']) ) \
| ( (r_AH & 0xFF) << 8 ) \
| ( (r_AL & 0xFF) << 0 )
return result, []
def x86g_calculate_aad_aam(state, flags_and_AX, opcode):
assert len(flags_and_AX) == 32
assert not opcode.symbolic
opcode = state.solver.eval(opcode)
r_AL = (flags_and_AX >> 0) & 0xFF
r_AH = (flags_and_AX >> 8) & 0xFF
if opcode == 0xD4: # AAM
r_AH = r_AL // 10
r_AL = r_AL % 10
elif opcode == 0xD5: # AAD
r_AL = ((r_AH * 10) + r_AL) & 0xff
r_AH = state.solver.BVV(0, 32)
else:
raise SimCCallError("Unknown opcode %#x in AAD/AAM ccall" % opcode)
r_O = state.solver.BVV(0, 32)
r_C = state.solver.BVV(0, 32)
r_A = state.solver.BVV(0, 32)
r_S = r_AL[7].zero_extend(31)
r_Z = state.solver.If(r_AL == 0, state.solver.BVV(1, 32), state.solver.BVV(0, 32))
r_P = calc_paritybit(state, r_AL).zero_extend(31)
result = ( (r_O & 1) << (16 + data['X86']['CondBitOffsets']['G_CC_SHIFT_O']) ) \
| ( (r_S & 1) << (16 + data['X86']['CondBitOffsets']['G_CC_SHIFT_S']) ) \
| ( (r_Z & 1) << (16 + data['X86']['CondBitOffsets']['G_CC_SHIFT_Z']) ) \
| ( (r_A & 1) << (16 + data['X86']['CondBitOffsets']['G_CC_SHIFT_A']) ) \
| ( (r_C & 1) << (16 + data['X86']['CondBitOffsets']['G_CC_SHIFT_C']) ) \
| ( (r_P & 1) << (16 + data['X86']['CondBitOffsets']['G_CC_SHIFT_P']) ) \
| ( (r_AH & 0xFF) << 8 ) \
| ( (r_AL & 0xFF) << 0 )
return result, []
#
# x86 segment selection
#
# Reference for the GDT entry layout
# http://wiki.osdev.org/Global_Descriptor_Table
def get_segdescr_base(state, descriptor):
lo = descriptor[31:16]
mid = descriptor[39:32]
hi = descriptor[63:56]
return state.solver.Concat(hi, mid, lo)
def get_segdescr_limit(state, descriptor):
granularity = descriptor[55]
lo = descriptor[15:0]
hi = descriptor[51:48]
limit = state.solver.Concat(hi, lo).zero_extend(12)
if state.solver.is_true(granularity == 0):
return limit
else:
return (limit << 12) | 0xfff
def x86g_use_seg_selector(state, ldt, gdt, seg_selector, virtual_addr):
# TODO Read/write/exec bit handling
def bad(msg):
if msg:
l.warning("x86g_use_seg_selector: %s", msg)
return state.solver.BVV(1 << 32, state.arch.bits).zero_extend(32), ()
if state.solver.is_true(seg_selector & ~0xFFFF != 0):
return bad("invalid selector (" + str(seg_selector) + ")")
if virtual_addr.length == 16:
virtual_addr = virtual_addr.zero_extend(16)
# are we in real mode?
if state.arch.vex_archinfo['x86_cr0'] & 1 == 0:
return ((seg_selector << 4) + virtual_addr).zero_extend(32), ()
seg_selector &= 0x0000FFFF
segment_selector_val = state.solver.eval(seg_selector >> 3)
if state.project.simos.name == "Win32" and segment_selector_val == 0x6 and state.project.concrete_target is not None:
return bad("angr doesn't support Windows Heaven's gate calls http://rce.co/knockin-on-heavens-gate-dynamic-processor-mode-switching/ \n"
"Please use the native 32 bit libs (not WoW64) or implement a simprocedure to avoid executing these instructions"
)
# RPL=11 check
#if state.solver.is_true((seg_selector & 3) != 3):
# return bad()
tiBit = (seg_selector >> 2) & 1
if state.solver.is_true(tiBit == 0):
# GDT access
gdt_value = state.solver.eval_one(gdt)
if gdt_value == 0:
return ((seg_selector << 16) + virtual_addr).zero_extend(32), ()
seg_selector >>= 3 # bit 3 to 15 are the index in the table
seg_selector = seg_selector.zero_extend(32)
gdt_limit = gdt[15:0]
if state.solver.is_true(seg_selector >= gdt_limit.zero_extend(48)):
return bad("index out of range")
gdt_base = gdt[47:16]
gdt_base_value = state.solver.eval_one(gdt_base)
descriptor = state.memory.load(gdt_base_value + seg_selector * 8, 8, endness='Iend_LE')
else:
# LDT access
ldt_value = state.solver.eval_one(ldt)
if ldt_value == 0:
return ((seg_selector << 16) + virtual_addr).zero_extend(32), ()
seg_selector >>= 3 # bit 3 to 15 are the index in the table
seg_selector = seg_selector.zero_extend(32)
ldt_limit = ldt[15:0]
if state.solver.is_true(seg_selector >= ldt_limit.zero_extend(48)):
return bad("index out of range")
ldt_base = ldt[47:16]
ldt_base_value = state.solver.eval_one(ldt_base)
ldt_value = state.solver.eval_one(ldt_base)
descriptor = state.memory.load(ldt_value + seg_selector * 8, 8, endness='Iend_LE')
present = descriptor[47]
if state.solver.is_true(present == 0):
return bad("present bit set to 0")
base = get_segdescr_base(state, descriptor)
limit = get_segdescr_limit(state, descriptor)
# When a concrete target is set and memory is read directly from the process sometimes a negative offset
# from a segment register is used
# if state.solver.is_true(virtual_addr >= limit) and state.project.concrete_target is None:
# return bad("virtual_addr >= limit")
r = (base + virtual_addr).zero_extend(32)
l.debug("x86g_use_seg_selector: addr=%s", str(r))
return r, ()
#
# other amd64 craziness
#
EmNote_NONE = 0
EmWarn_X86_x87exns = 1
EmWarn_X86_x87precision = 2
EmWarn_X86_sseExns = 3
EmWarn_X86_fz = 4
EmWarn_X86_daz = 5
EmWarn_X86_acFlag = 6
EmWarn_PPCexns = 7
EmWarn_PPC64_redir_overflow = 8
EmWarn_PPC64_redir_underflow = 9
EmWarn_S390X_fpext_rounding = 10
EmWarn_S390X_invalid_rounding = 11
EmFail_S390X_stfle = 12
EmFail_S390X_stckf = 13
EmFail_S390X_ecag = 14
EmFail_S390X_pfpo = 15
EmFail_S390X_DFP_insn = 16
EmFail_S390X_fpext = 17
EmFail_S390X_invalid_PFPO_rounding_mode = 18
EmFail_S390X_invalid_PFPO_function = 19
def amd64g_create_mxcsr(state, sseround):
return 0x1F80 | ((sseround & 3) << 13), ()
def amd64g_check_ldmxcsr(state, mxcsr):
rmode = state.solver.LShR(mxcsr, 13) & 3
ew = state.solver.If(
(mxcsr & 0x1F80) != 0x1F80,
state.solver.BVV(EmWarn_X86_sseExns, 64),
state.solver.If(
mxcsr & (1<<15) != 0,
state.solver.BVV(EmWarn_X86_fz, 64),
state.solver.If(
mxcsr & (1<<6) != 0,
state.solver.BVV(EmWarn_X86_daz, 64),
state.solver.BVV(EmNote_NONE, 64)
)
)
)
return (ew << 32) | rmode, ()
#################
### ARM Flags ###
#################
ARMCondEQ = 0 # /* equal : Z=1 */
ARMCondNE = 1 # /* not equal : Z=0 */
ARMCondHS = 2 # /* >=u (higher or same) : C=1 */
ARMCondLO = 3 # /* <u (lower) : C=0 */
ARMCondMI = 4 # /* minus (negative) : N=1 */
ARMCondPL = 5 # /* plus (zero or +ve) : N=0 */
ARMCondVS = 6 # /* overflow : V=1 */
ARMCondVC = 7 # /* no overflow : V=0 */
ARMCondHI = 8 # /* >u (higher) : C=1 && Z=0 */
ARMCondLS = 9 # /* <=u (lower or same) : C=0 || Z=1 */
ARMCondGE = 10 # /* >=s (signed greater or equal) : N=V */
ARMCondLT = 11 # /* <s (signed less than) : N!=V */
ARMCondGT = 12 # /* >s (signed greater) : Z=0 && N=V */
ARMCondLE = 13 # /* <=s (signed less or equal) : Z=1 || N!=V */
ARMCondAL = 14 # /* always (unconditional) : 1 */
ARMCondNV = 15 # /* never (unconditional): : 0 */
ARMG_CC_OP_COPY = 0 # /* DEP1 = NZCV in 31:28, DEP2 = 0, DEP3 = 0 just copy DEP1 to output */
ARMG_CC_OP_ADD = 1 # /* DEP1 = argL (Rn) = DEP2 = argR (shifter_op) = DEP3 = 0 */
ARMG_CC_OP_SUB = 2 # /* DEP1 = argL (Rn) = DEP2 = argR (shifter_op) = DEP3 = 0 */
ARMG_CC_OP_ADC = 3 # /* DEP1 = argL (Rn) = DEP2 = arg2 (shifter_op) = DEP3 = oldC (in LSB) */
ARMG_CC_OP_SBB = 4 # /* DEP1 = argL (Rn) = DEP2 = arg2 (shifter_op) = DEP3 = oldC (in LSB) */
ARMG_CC_OP_LOGIC = 5 # /* DEP1 = result = DEP2 = shifter_carry_out (in LSB) = DEP3 = old V flag (in LSB) */
ARMG_CC_OP_MUL = 6 # /* DEP1 = result = DEP2 = 0 = DEP3 = oldC:old_V (in bits 1:0) */
ARMG_CC_OP_MULL = 7 # /* DEP1 = resLO32 = DEP2 = resHI32 = DEP3 = oldC:old_V (in bits 1:0) */
ARMG_CC_OP_NUMBER = 8
ARMG_CC_SHIFT_N = 31
ARMG_CC_SHIFT_Z = 30
ARMG_CC_SHIFT_C = 29
ARMG_CC_SHIFT_V = 28
ARMG_CC_SHIFT_Q = 27
ARMG_NBITS = 32
def armg_calculate_flag_n(state, cc_op, cc_dep1, cc_dep2, cc_dep3):
concrete_op = flag_concretize(state, cc_op)
flag = None
if concrete_op == ARMG_CC_OP_COPY:
flag = state.solver.LShR(cc_dep1, ARMG_CC_SHIFT_N) & 1
elif concrete_op == ARMG_CC_OP_ADD:
res = cc_dep1 + cc_dep2
flag = state.solver.LShR(res, 31)
elif concrete_op == ARMG_CC_OP_SUB:
res = cc_dep1 - cc_dep2
flag = state.solver.LShR(res, 31)
elif concrete_op == ARMG_CC_OP_ADC:
res = cc_dep1 + cc_dep2 + cc_dep3
flag = state.solver.LShR(res, 31)
elif concrete_op == ARMG_CC_OP_SBB:
res = cc_dep1 - cc_dep2 - (cc_dep3^1)
flag = state.solver.LShR(res, 31)
elif concrete_op == ARMG_CC_OP_LOGIC:
flag = state.solver.LShR(cc_dep1, 31)
elif concrete_op == ARMG_CC_OP_MUL:
flag = state.solver.LShR(cc_dep1, 31)
elif concrete_op == ARMG_CC_OP_MULL:
flag = state.solver.LShR(cc_dep2, 31)
if flag is not None: return flag, [ cc_op == concrete_op ]
l.error("Unknown cc_op %s (armg_calculate_flag_n)", cc_op)
raise SimCCallError("Unknown cc_op %s" % cc_op)
def arm_zerobit(state, x):
return calc_zerobit(state, x).zero_extend(31)
def armg_calculate_flag_z(state, cc_op, cc_dep1, cc_dep2, cc_dep3):
concrete_op = flag_concretize(state, cc_op)
flag = None
if concrete_op == ARMG_CC_OP_COPY:
flag = state.solver.LShR(cc_dep1, ARMG_CC_SHIFT_Z) & 1
elif concrete_op == ARMG_CC_OP_ADD:
res = cc_dep1 + cc_dep2
flag = arm_zerobit(state, res)
elif concrete_op == ARMG_CC_OP_SUB:
res = cc_dep1 - cc_dep2
flag = arm_zerobit(state, res)
elif concrete_op == ARMG_CC_OP_ADC:
res = cc_dep1 + cc_dep2 + cc_dep3
flag = arm_zerobit(state, res)
elif concrete_op == ARMG_CC_OP_SBB:
res = cc_dep1 - cc_dep2 - (cc_dep3^1)
flag = arm_zerobit(state, res)
elif concrete_op == ARMG_CC_OP_LOGIC:
flag = arm_zerobit(state, cc_dep1)
elif concrete_op == ARMG_CC_OP_MUL:
flag = arm_zerobit(state, cc_dep1)
elif concrete_op == ARMG_CC_OP_MULL:
flag = arm_zerobit(state, cc_dep1 | cc_dep2)
if flag is not None: return flag, [ cc_op == concrete_op ]
l.error("Unknown cc_op %s (armg_calculate_flag_z)", concrete_op)
raise SimCCallError("Unknown cc_op %s" % concrete_op)
def armg_calculate_flag_c(state, cc_op, cc_dep1, cc_dep2, cc_dep3):
concrete_op = flag_concretize(state, cc_op)
flag = None
if concrete_op == ARMG_CC_OP_COPY:
flag = state.solver.LShR(cc_dep1, ARMG_CC_SHIFT_C) & 1
elif concrete_op == ARMG_CC_OP_ADD:
res = cc_dep1 + cc_dep2
flag = boolean_extend(state, state.solver.ULT, res, cc_dep1, 32)
elif concrete_op == ARMG_CC_OP_SUB:
flag = boolean_extend(state, state.solver.UGE, cc_dep1, cc_dep2, 32)
elif concrete_op == ARMG_CC_OP_ADC:
res = cc_dep1 + cc_dep2 + cc_dep3
flag = state.solver.If(cc_dep3 != 0, boolean_extend(state, state.solver.ULE, res, cc_dep1, 32), boolean_extend(state, state.solver.ULT, res, cc_dep1, 32))
elif concrete_op == ARMG_CC_OP_SBB:
flag = state.solver.If(cc_dep3 != 0, boolean_extend(state, state.solver.UGE, cc_dep1, cc_dep2, 32), boolean_extend(state, state.solver.UGT, cc_dep1, cc_dep2, 32))
elif concrete_op == ARMG_CC_OP_LOGIC:
flag = cc_dep2
elif concrete_op == ARMG_CC_OP_MUL:
flag = (state.solver.LShR(cc_dep3, 1)) & 1
elif concrete_op == ARMG_CC_OP_MULL:
flag = (state.solver.LShR(cc_dep3, 1)) & 1
if flag is not None: return flag, [ cc_op == concrete_op ]
l.error("Unknown cc_op %s (armg_calculate_flag_c)", cc_op)
raise SimCCallError("Unknown cc_op %s" % cc_op)
def armg_calculate_flag_v(state, cc_op, cc_dep1, cc_dep2, cc_dep3):
concrete_op = flag_concretize(state, cc_op)
flag = None
if concrete_op == ARMG_CC_OP_COPY:
flag = state.solver.LShR(cc_dep1, ARMG_CC_SHIFT_V) & 1
elif concrete_op == ARMG_CC_OP_ADD:
res = cc_dep1 + cc_dep2
v = ((res ^ cc_dep1) & (res ^ cc_dep2))
flag = state.solver.LShR(v, 31)
elif concrete_op == ARMG_CC_OP_SUB:
res = cc_dep1 - cc_dep2
v = ((cc_dep1 ^ cc_dep2) & (cc_dep1 ^ res))
flag = state.solver.LShR(v, 31)
elif concrete_op == ARMG_CC_OP_ADC:
res = cc_dep1 + cc_dep2 + cc_dep3
v = ((res ^ cc_dep1) & (res ^ cc_dep2))
flag = state.solver.LShR(v, 31)
elif concrete_op == ARMG_CC_OP_SBB:
res = cc_dep1 - cc_dep2 - (cc_dep3^1)
v = ((cc_dep1 ^ cc_dep2) & (cc_dep1 ^ res))
flag = state.solver.LShR(v, 31)
elif concrete_op == ARMG_CC_OP_LOGIC:
flag = cc_dep3
elif concrete_op == ARMG_CC_OP_MUL:
flag = cc_dep3 & 1
elif concrete_op == ARMG_CC_OP_MULL:
flag = cc_dep3 & 1
if flag is not None: return flag, [ cc_op == concrete_op ]
l.error("Unknown cc_op %s (armg_calculate_flag_v)", cc_op)
raise SimCCallError("Unknown cc_op %s" % cc_op)
def armg_calculate_flags_nzcv(state, cc_op, cc_dep1, cc_dep2, cc_dep3):
# NOTE: adding constraints afterwards works here *only* because the constraints are actually useless, because we require
# cc_op to be unique. If we didn't, we'd need to pass the constraints into any functions called after the constraints were
# created.
n, c1 = armg_calculate_flag_n(state, cc_op, cc_dep1, cc_dep2, cc_dep3)
z, c2 = armg_calculate_flag_z(state, cc_op, cc_dep1, cc_dep2, cc_dep3)
c, c3 = armg_calculate_flag_c(state, cc_op, cc_dep1, cc_dep2, cc_dep3)
v, c4 = armg_calculate_flag_v(state, cc_op, cc_dep1, cc_dep2, cc_dep3)
vec = [(ARMG_CC_SHIFT_N, state.solver.Extract(0, 0, n)),
(ARMG_CC_SHIFT_Z, state.solver.Extract(0, 0, z)),
(ARMG_CC_SHIFT_C, state.solver.Extract(0, 0, c)),
(ARMG_CC_SHIFT_V, state.solver.Extract(0, 0, v))]
return _concat_flags(ARMG_NBITS, vec), c1 + c2 + c3 + c4
def armg_calculate_condition(state, cond_n_op, cc_dep1, cc_dep2, cc_dep3):
cond = state.solver.LShR(cond_n_op, 4)
cc_op = cond_n_op & 0xF
inv = cond & 1
concrete_cond = flag_concretize(state, cond)
flag = None
c1,c2,c3 = [ ], [ ], [ ]
# NOTE: adding constraints afterwards works here *only* because the constraints are actually useless, because we require
# cc_op to be unique. If we didn't, we'd need to pass the constraints into any functions called after the constraints were
# created.
if concrete_cond == ARMCondAL:
flag = state.solver.BVV(1, 32)
elif concrete_cond in [ ARMCondEQ, ARMCondNE ]:
zf, c1 = armg_calculate_flag_z(state, cc_op, cc_dep1, cc_dep2, cc_dep3)
flag = inv ^ zf
elif concrete_cond in [ ARMCondHS, ARMCondLO ]:
cf, c1 = armg_calculate_flag_c(state, cc_op, cc_dep1, cc_dep2, cc_dep3)
flag = inv ^ cf
elif concrete_cond in [ ARMCondMI, ARMCondPL ]:
nf, c1 = armg_calculate_flag_n(state, cc_op, cc_dep1, cc_dep2, cc_dep3)
flag = inv ^ nf
elif concrete_cond in [ ARMCondVS, ARMCondVC ]:
vf, c1 = armg_calculate_flag_v(state, cc_op, cc_dep1, cc_dep2, cc_dep3)
flag = inv ^ vf
elif concrete_cond in [ ARMCondHI, ARMCondLS ]:
cf, c1 = armg_calculate_flag_c(state, cc_op, cc_dep1, cc_dep2, cc_dep3)
zf, c2 = armg_calculate_flag_z(state, cc_op, cc_dep1, cc_dep2, cc_dep3)
flag = inv ^ (cf & ~zf)
elif concrete_cond in [ ARMCondGE, ARMCondLT ]:
nf, c1 = armg_calculate_flag_n(state, cc_op, cc_dep1, cc_dep2, cc_dep3)
vf, c2 = armg_calculate_flag_v(state, cc_op, cc_dep1, cc_dep2, cc_dep3)
flag = inv ^ (1 & ~(nf ^ vf))
elif concrete_cond in [ ARMCondGT, ARMCondLE ]:
nf, c1 = armg_calculate_flag_n(state, cc_op, cc_dep1, cc_dep2, cc_dep3)
vf, c2 = armg_calculate_flag_v(state, cc_op, cc_dep1, cc_dep2, cc_dep3)
zf, c3 = armg_calculate_flag_z(state, cc_op, cc_dep1, cc_dep2, cc_dep3)
flag = inv ^ (1 & ~(zf | (nf ^ vf)))
if flag is not None: return flag, [ cond == concrete_cond ] + c1 + c2 + c3
l.error("Unrecognized condition %d in armg_calculate_condition", concrete_cond)
raise SimCCallError("Unrecognized condition %d in armg_calculate_condition" % concrete_cond)
ARM64G_CC_SHIFT_N = 31
ARM64G_CC_SHIFT_Z = 30
ARM64G_CC_SHIFT_C = 29
ARM64G_CC_SHIFT_V = 28
ARM64G_CC_OP_COPY=0 #/* DEP1 = NZCV in 31:28, DEP2 = 0, DEP3 = 0 just copy DEP1 to output */
ARM64G_CC_OP_ADD32=1 #/* DEP1 = argL (Rn), DEP2 = argR (shifter_op), DEP3 = 0 */
ARM64G_CC_OP_ADD64=2 #/* DEP1 = argL (Rn), DEP2 = argR (shifter_op), DEP3 = 0 */
ARM64G_CC_OP_SUB32=3 #/* DEP1 = argL (Rn), DEP2 = argR (shifter_op), DEP3 = 0 */
ARM64G_CC_OP_SUB64=4 #/* DEP1 = argL (Rn), DEP2 = argR (shifter_op), DEP3 = 0 */
ARM64G_CC_OP_ADC32=5 #/* DEP1 = argL (Rn), DEP2 = arg2 (shifter_op), DEP3 = oldC (in LSB) */
ARM64G_CC_OP_ADC64=6 #/* DEP1 = argL (Rn), DEP2 = arg2 (shifter_op), DEP3 = oldC (in LSB) */
ARM64G_CC_OP_SBC32=7 #/* DEP1 = argL (Rn), DEP2 = arg2 (shifter_op), DEP3 = oldC (in LSB) */
ARM64G_CC_OP_SBC64=8 #/* DEP1 = argL (Rn), DEP2 = arg2 (shifter_op), DEP3 = oldC (in LSB) */
ARM64G_CC_OP_LOGIC32=9 #/* DEP1 = result, DEP2 = 0, DEP3 = 0 */
ARM64G_CC_OP_LOGIC64=10 #/* DEP1 = result, DEP2 = 0, DEP3 = 0 */
ARM64G_CC_OP_NUMBER=11 #
ARM64CondEQ = 0 #/* equal : Z=1 */
ARM64CondNE = 1 #/* not equal : Z=0 */
ARM64CondCS = 2 #/* >=u (higher or same) (aka HS) : C=1 */
ARM64CondCC = 3 #/* <u (lower) (aka LO) : C=0 */
ARM64CondMI = 4 #/* minus (negative) : N=1 */
ARM64CondPL = 5 #/* plus (zero or +ve) : N=0 */
ARM64CondVS = 6 #/* overflow : V=1 */
ARM64CondVC = 7 #/* no overflow : V=0 */
ARM64CondHI = 8 #/* >u (higher) : C=1 && Z=0 */
ARM64CondLS = 9 #/* <=u (lower or same) : C=0 || Z=1 */
ARM64CondGE = 10 #/* >=s (signed greater or equal) : N=V */
ARM64CondLT = 11 #/* <s (signed less than) : N!=V */
ARM64CondGT = 12 #/* >s (signed greater) : Z=0 && N=V */
ARM64CondLE = 13 #/* <=s (signed less or equal) : Z=1 || N!=V */
ARM64CondAL = 14 #/* always (unconditional) : 1 */
ARM64CondNV = 15 #/* always (unconditional) : 1 */
ARM64G_NBITS = 64
def arm64g_calculate_flag_n(state, cc_op, cc_dep1, cc_dep2, cc_dep3):
concrete_op = flag_concretize(state, cc_op)
flag = None
cc_dep1, cc_dep2, cc_dep3 = arm64g_32bit_truncate_operands(concrete_op, cc_dep1, cc_dep2, cc_dep3)
if concrete_op == ARM64G_CC_OP_COPY:
flag = state.solver.LShR(cc_dep1, ARM64G_CC_SHIFT_N) & 1
elif concrete_op == ARM64G_CC_OP_ADD32:
res = cc_dep1 + cc_dep2
flag = state.solver.LShR(res, 31)
elif concrete_op == ARM64G_CC_OP_ADD64:
res = cc_dep1 + cc_dep2
flag = state.solver.LShR(res, 63)
elif concrete_op == ARM64G_CC_OP_SUB32:
res = cc_dep1 - cc_dep2
flag = state.solver.LShR(res, 31)
elif concrete_op == ARM64G_CC_OP_SUB64:
res = cc_dep1 - cc_dep2
flag = state.solver.LShR(res, 63)
elif concrete_op == ARM64G_CC_OP_ADC32:
res = cc_dep1 + cc_dep2 + cc_dep3
flag = state.solver.LShR(res, 31)
elif concrete_op == ARM64G_CC_OP_ADC64:
res = cc_dep1 + cc_dep2 + cc_dep3
flag = state.solver.LShR(res, 63)
elif concrete_op == ARM64G_CC_OP_SBC32:
res = cc_dep1 - cc_dep2 - (cc_dep3^1)
flag = state.solver.LShR(res, 31)
elif concrete_op == ARM64G_CC_OP_SBC64:
res = cc_dep1 - cc_dep2 - (cc_dep3^1)
flag = state.solver.LShR(res, 63)
elif concrete_op == ARM64G_CC_OP_LOGIC32:
flag = state.solver.LShR(cc_dep1, 31)
elif concrete_op == ARM64G_CC_OP_LOGIC64:
flag = state.solver.LShR(cc_dep1, 63)
if flag is not None: return flag, [ cc_op == concrete_op ]
l.error("Unknown cc_op %s (arm64g_calculate_flag_n)", cc_op)
raise SimCCallError("Unknown cc_op %s" % cc_op)
def arm64_zerobit(state, x):
return calc_zerobit(state, x).zero_extend(63)
def u64_to_u32(n):
return n[31:0]
def arm64g_32bit_truncate_operands(cc_op, cc_dep1, cc_dep2, cc_dep3):
# Truncate operands if in 32-bit mode
if cc_op in {ARM64G_CC_OP_ADD32, ARM64G_CC_OP_SUB32}:
cc_dep1 = u64_to_u32(cc_dep1)
cc_dep2 = u64_to_u32(cc_dep2)
elif cc_op in {ARM64G_CC_OP_ADC32, ARM64G_CC_OP_SBC32}:
cc_dep1 = u64_to_u32(cc_dep1)
cc_dep2 = u64_to_u32(cc_dep2)
cc_dep3 = u64_to_u32(cc_dep3)
elif cc_op == ARM64G_CC_OP_LOGIC32:
cc_dep1 = u64_to_u32(cc_dep1)
return cc_dep1, cc_dep2, cc_dep3
def arm64g_calculate_flag_z(state, cc_op, cc_dep1, cc_dep2, cc_dep3):
concrete_op = flag_concretize(state, cc_op)
flag = None
cc_dep1, cc_dep2, cc_dep3 = arm64g_32bit_truncate_operands(concrete_op, cc_dep1, cc_dep2, cc_dep3)
if concrete_op == ARM64G_CC_OP_COPY:
flag = state.solver.LShR(cc_dep1, ARM64G_CC_SHIFT_Z) & 1
elif concrete_op in (ARM64G_CC_OP_ADD32, ARM64G_CC_OP_ADD64):
res = cc_dep1 + cc_dep2
flag = arm64_zerobit(state, res)
elif concrete_op in (ARM64G_CC_OP_SUB32, ARM64G_CC_OP_SUB64):
res = cc_dep1 - cc_dep2
flag = arm64_zerobit(state, res)
elif concrete_op in (ARM64G_CC_OP_ADC32, ARM64G_CC_OP_ADC64):
res = cc_dep1 + cc_dep2 + cc_dep3
flag = arm64_zerobit(state, res)
elif concrete_op in (ARM64G_CC_OP_SBC32, ARM64G_CC_OP_SBC64):
res = cc_dep1 - cc_dep2 - (cc_dep3^1)
flag = arm64_zerobit(state, res)
elif concrete_op in (ARM64G_CC_OP_LOGIC32, ARM64G_CC_OP_LOGIC64):
flag = arm64_zerobit(state, cc_dep1)
if flag is not None: return flag, [ cc_op == concrete_op ]
l.error("Unknown cc_op %s (arm64g_calculate_flag_z)", concrete_op)
raise SimCCallError("Unknown cc_op %s" % concrete_op)
def arm64g_calculate_flag_c(state, cc_op, cc_dep1, cc_dep2, cc_dep3):
concrete_op = flag_concretize(state, cc_op)
flag = None
cc_dep1, cc_dep2, cc_dep3 = arm64g_32bit_truncate_operands(concrete_op, cc_dep1, cc_dep2, cc_dep3)
if concrete_op == ARM64G_CC_OP_COPY:
flag = state.solver.LShR(cc_dep1, ARM64G_CC_SHIFT_C) & 1
elif concrete_op in (ARM64G_CC_OP_ADD32, ARM64G_CC_OP_ADD64):
res = cc_dep1 + cc_dep2
flag = boolean_extend(state, state.solver.ULT, res, cc_dep1, 64)
elif concrete_op in (ARM64G_CC_OP_SUB32, ARM64G_CC_OP_SUB64):
flag = boolean_extend(state, state.solver.UGE, cc_dep1, cc_dep2, 64)
elif concrete_op in (ARM64G_CC_OP_ADC32, ARM64G_CC_OP_ADC64):
res = cc_dep1 + cc_dep2 + cc_dep3
flag = state.solver.If(cc_dep2 != 0, boolean_extend(state, state.solver.ULE, res, cc_dep1, 64), boolean_extend(state, state.solver.ULT, res, cc_dep1, 64))
elif concrete_op in (ARM64G_CC_OP_SBC32, ARM64G_CC_OP_SBC64):
flag = state.solver.If(cc_dep2 != 0, boolean_extend(state, state.solver.UGE, cc_dep1, cc_dep2, 64), boolean_extend(state, state.solver.UGT, cc_dep1, cc_dep2, 64))
elif concrete_op in (ARM64G_CC_OP_LOGIC32, ARM64G_CC_OP_LOGIC64):
flag = state.solver.BVV(0, 64) # C after logic is zero on arm64
if flag is not None: return flag, [ cc_op == concrete_op ]
l.error("Unknown cc_op %s (arm64g_calculate_flag_c)", cc_op)
raise SimCCallError("Unknown cc_op %s" % cc_op)
def arm64g_calculate_flag_v(state, cc_op, cc_dep1, cc_dep2, cc_dep3):
concrete_op = flag_concretize(state, cc_op)
flag = None
cc_dep1, cc_dep2, cc_dep3 = arm64g_32bit_truncate_operands(concrete_op, cc_dep1, cc_dep2, cc_dep3)
if concrete_op == ARM64G_CC_OP_COPY:
flag = state.solver.LShR(cc_dep1, ARM64G_CC_SHIFT_V) & 1
elif concrete_op == ARM64G_CC_OP_ADD32:
cc_dep1 = cc_dep1[31:0]
cc_dep2 = cc_dep2[31:0]
res = cc_dep1 + cc_dep2
v = ((res ^ cc_dep1) & (res ^ cc_dep2))
flag = state.solver.LShR(v, 31).zero_extend(32)
elif concrete_op == ARM64G_CC_OP_ADD64:
res = cc_dep1 + cc_dep2
v = ((res ^ cc_dep1) & (res ^ cc_dep2))
flag = state.solver.LShR(v, 63)
elif concrete_op == ARM64G_CC_OP_SUB32:
cc_dep1 = cc_dep1[31:0]
cc_dep2 = cc_dep2[31:0]
res = cc_dep1 - cc_dep2
v = ((cc_dep1 ^ cc_dep2) & (cc_dep1 ^ res))
flag = state.solver.LShR(v, 31).zero_extend(32)
elif concrete_op == ARM64G_CC_OP_SUB64:
res = cc_dep1 - cc_dep2
v = ((cc_dep1 ^ cc_dep2) & (cc_dep1 ^ res))
flag = state.solver.LShR(v, 63)
elif concrete_op == ARM64G_CC_OP_ADC32:
cc_dep1 = cc_dep1[31:0]
cc_dep2 = cc_dep2[31:0]
res = cc_dep1 + cc_dep2 + cc_dep3
v = ((res ^ cc_dep1) & (res ^ cc_dep2))
flag = state.solver.LShR(v, 31).zero_extend(32)
elif concrete_op == ARM64G_CC_OP_ADC64:
res = cc_dep1 + cc_dep2 + cc_dep3
v = ((res ^ cc_dep1) & (res ^ cc_dep2))
flag = state.solver.LShR(v, 63)
elif concrete_op == ARM64G_CC_OP_SBC32:
cc_dep1 = cc_dep1[31:0]
cc_dep2 = cc_dep2[31:0]
res = cc_dep1 - cc_dep2 - (cc_dep3^1)
v = ((cc_dep1 ^ cc_dep2) & (cc_dep1 ^ res))
flag = state.solver.LShR(v, 31).zero_extend(32)
elif concrete_op == ARM64G_CC_OP_SBC64:
res = cc_dep1 - cc_dep2 - (cc_dep3^1)
v = ((cc_dep1 ^ cc_dep2) & (cc_dep1 ^ res))
flag = state.solver.LShR(v, 63)
elif concrete_op in (ARM64G_CC_OP_LOGIC32, ARM64G_CC_OP_LOGIC64):
flag = state.solver.BVV(0, 64)
if flag is not None: return flag, [ cc_op == concrete_op ]
l.error("Unknown cc_op %s (arm64g_calculate_flag_v)", cc_op)
raise SimCCallError("Unknown cc_op %s" % cc_op)
def arm64g_calculate_data_nzcv(state, cc_op, cc_dep1, cc_dep2, cc_dep3):
# NOTE: adding constraints afterwards works here *only* because the constraints are actually useless, because we require
# cc_op to be unique. If we didn't, we'd need to pass the constraints into any functions called after the constraints were
# created.
n, c1 = arm64g_calculate_flag_n(state, cc_op, cc_dep1, cc_dep2, cc_dep3)
z, c2 = arm64g_calculate_flag_z(state, cc_op, cc_dep1, cc_dep2, cc_dep3)
c, c3 = arm64g_calculate_flag_c(state, cc_op, cc_dep1, cc_dep2, cc_dep3)
v, c4 = arm64g_calculate_flag_v(state, cc_op, cc_dep1, cc_dep2, cc_dep3)
vec = [(ARM64G_CC_SHIFT_N, state.solver.Extract(0, 0, n)),
(ARM64G_CC_SHIFT_Z, state.solver.Extract(0, 0, z)),
(ARM64G_CC_SHIFT_C, state.solver.Extract(0, 0, c)),
(ARM64G_CC_SHIFT_V, state.solver.Extract(0, 0, v))]
return _concat_flags(ARM64G_NBITS, vec), c1 + c2 + c3 + c4
def arm64g_calculate_condition(state, cond_n_op, cc_dep1, cc_dep2, cc_dep3):
cond = state.solver.LShR(cond_n_op, 4)
cc_op = cond_n_op & 0xF
inv = cond & 1
concrete_cond = flag_concretize(state, cond)
flag = None
c1,c2,c3 = [ ], [ ], [ ]
if concrete_cond in (ARM64CondAL, ARM64CondNV):
flag = state.solver.BVV(1, 64)
elif concrete_cond in (ARM64CondEQ, ARM64CondNE):
zf, c1 = arm64g_calculate_flag_z(state, cc_op, cc_dep1, cc_dep2, cc_dep3)
flag = inv ^ zf
elif concrete_cond in (ARM64CondCS, ARM64CondCC):
cf, c1 = arm64g_calculate_flag_c(state, cc_op, cc_dep1, cc_dep2, cc_dep3)
flag = inv ^ cf
elif concrete_cond in (ARM64CondMI, ARM64CondPL):
nf, c1 = arm64g_calculate_flag_n(state, cc_op, cc_dep1, cc_dep2, cc_dep3)
flag = inv ^ nf
elif concrete_cond in (ARM64CondVS, ARM64CondVC):
vf, c1 = arm64g_calculate_flag_v(state, cc_op, cc_dep1, cc_dep2, cc_dep3)
flag = inv ^ vf
elif concrete_cond in (ARM64CondHI, ARM64CondLS):
cf, c1 = arm64g_calculate_flag_c(state, cc_op, cc_dep1, cc_dep2, cc_dep3)
zf, c2 = arm64g_calculate_flag_z(state, cc_op, cc_dep1, cc_dep2, cc_dep3)
flag = inv ^ (1 & (cf & ~zf))
elif concrete_cond in (ARM64CondGE, ARM64CondLT):
nf, c1 = arm64g_calculate_flag_n(state, cc_op, cc_dep1, cc_dep2, cc_dep3)
vf, c2 = arm64g_calculate_flag_v(state, cc_op, cc_dep1, cc_dep2, cc_dep3)
flag = inv ^ (1 & ~(nf ^ vf))
elif concrete_cond in (ARM64CondGT, ARM64CondLE):
nf, c1 = arm64g_calculate_flag_n(state, cc_op, cc_dep1, cc_dep2, cc_dep3)
vf, c2 = arm64g_calculate_flag_v(state, cc_op, cc_dep1, cc_dep2, cc_dep3)
zf, c3 = arm64g_calculate_flag_z(state, cc_op, cc_dep1, cc_dep2, cc_dep3)
flag = inv ^ (1 & ~(zf | (nf ^ vf)))
if flag is not None: return flag, [ cond == concrete_cond ] + c1 + c2 + c3
l.error("Unrecognized condition %d in arm64g_calculate_condition", concrete_cond)
raise SimCCallError("Unrecognized condition %d in arm64g_calculate_condition" % concrete_cond)
#
# Some helpers
#
def _get_flags(state):
if state.arch.name == 'X86':
return x86g_calculate_eflags_all(state, state.regs.cc_op, state.regs.cc_dep1, state.regs.cc_dep2, state.regs.cc_ndep)
elif state.arch.name == 'AMD64':
return amd64g_calculate_rflags_all(state, state.regs.cc_op, state.regs.cc_dep1, state.regs.cc_dep2, state.regs.cc_ndep)
elif is_arm_arch(state.arch):
return armg_calculate_flags_nzcv(state, state.regs.cc_op, state.regs.cc_dep1, state.regs.cc_dep2, state.regs.cc_ndep)
elif state.arch.name == 'AARCH64':
return arm64g_calculate_data_nzcv(state, state.regs.cc_op, state.regs.cc_dep1, state.regs.cc_dep2, state.regs.cc_ndep)
else:
l.warning("No such thing as a flags register for arch %s", state.arch.name)
return None
def _concat_flags(nbits, flags_vec):
"""
Concatenate different flag BVs to a single BV. Currently used for ARM, X86
and AMD64.
:param nbits : platform size in bits.
:param flags_vec: vector of flag BVs and their offset in the resulting BV.
:type nbits : int
:type flags_vec : list
:return : the resulting flag BV.
:rtype : claripy.BVV
"""
result = claripy.BVV(0, 0)
for offset, bit in flags_vec:
current_position = nbits - 1 - result.length
result = result.concat(claripy.BVV(0, current_position - offset), bit)
result = result.concat(claripy.BVV(0, nbits - result.length))
return result
def _get_nbits(cc_str):
nbits = None
if cc_str.endswith('B'):
nbits = 8
elif cc_str.endswith('W'):
nbits = 16
elif cc_str.endswith('L'):
nbits = 32
elif cc_str.endswith('Q'):
nbits = 64
return nbits
from ...errors import SimError, SimCCallError
from ...sim_options import USE_SIMPLIFIED_CCALLS
|
# Copyright 2018, The TensorFlow Federated Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import tensorflow as tf
from tensorflow_federated.python.common_libs import test
from tensorflow_federated.python.core.api import computation_types
from tensorflow_federated.python.core.impl import tree_to_cc_transformations
from tensorflow_federated.python.core.impl.compiler import building_block_factory
from tensorflow_federated.python.core.impl.compiler import building_blocks
from tensorflow_federated.python.core.impl.compiler import tensorflow_computation_factory
from tensorflow_federated.python.core.impl.compiler import transformation_utils
from tensorflow_federated.python.core.impl.compiler import tree_transformations
from tensorflow_federated.python.core.impl.context_stack import set_default_context
from tensorflow_federated.python.core.impl.executors import execution_context
from tensorflow_federated.python.core.impl.executors import executor_stacks
from tensorflow_federated.python.core.impl.wrappers import computation_wrapper_instances
def _create_compiled_computation(py_fn, parameter_type):
proto, type_signature = tensorflow_computation_factory.create_computation_for_py_fn(
py_fn, parameter_type)
return building_blocks.CompiledComputation(
proto, type_signature=type_signature)
def parse_tff_to_tf(comp):
comp, _ = tree_transformations.insert_called_tf_identity_at_leaves(comp)
parser_callable = tree_to_cc_transformations.TFParser()
comp, _ = tree_transformations.replace_called_lambda_with_block(comp)
comp, _ = tree_transformations.inline_block_locals(comp)
comp, _ = tree_transformations.replace_selection_from_tuple_with_element(comp)
new_comp, transformed = transformation_utils.transform_postorder(
comp, parser_callable)
return new_comp, transformed
class ParseTFFToTFTest(test.TestCase):
def test_raises_on_none(self):
with self.assertRaises(TypeError):
parse_tff_to_tf(None)
def test_does_not_transform_standalone_intrinsic(self):
type_signature = computation_types.TensorType(tf.int32)
standalone_intrinsic = building_blocks.Intrinsic('dummy', type_signature)
non_transformed, _ = parse_tff_to_tf(standalone_intrinsic)
self.assertEqual(standalone_intrinsic.compact_representation(),
non_transformed.compact_representation())
def test_replaces_lambda_to_selection_from_called_graph_with_tf_of_same_type(
self):
identity_tf_block_type = computation_types.StructType(
[tf.int32, tf.float32])
identity_tf_block = building_block_factory.create_compiled_identity(
identity_tf_block_type)
tuple_ref = building_blocks.Reference('x', [tf.int32, tf.float32])
called_tf_block = building_blocks.Call(identity_tf_block, tuple_ref)
selection_from_call = building_blocks.Selection(called_tf_block, index=1)
lambda_wrapper = building_blocks.Lambda('x', [tf.int32, tf.float32],
selection_from_call)
parsed, modified = parse_tff_to_tf(lambda_wrapper)
exec_lambda = computation_wrapper_instances.building_block_to_computation(
lambda_wrapper)
exec_tf = computation_wrapper_instances.building_block_to_computation(
parsed)
self.assertIsInstance(parsed, building_blocks.CompiledComputation)
self.assertTrue(modified)
# TODO(b/157172423): change to assertEqual when Py container is preserved.
parsed.type_signature.check_equivalent_to(lambda_wrapper.type_signature)
self.assertEqual(exec_lambda([0, 1.]), exec_tf([0, 1.]))
def test_replaces_lambda_to_called_graph_with_tf_of_same_type(self):
identity_tf_block_type = computation_types.TensorType(tf.int32)
identity_tf_block = building_block_factory.create_compiled_identity(
identity_tf_block_type)
int_ref = building_blocks.Reference('x', tf.int32)
called_tf_block = building_blocks.Call(identity_tf_block, int_ref)
lambda_wrapper = building_blocks.Lambda('x', tf.int32, called_tf_block)
parsed, modified = parse_tff_to_tf(lambda_wrapper)
exec_lambda = computation_wrapper_instances.building_block_to_computation(
lambda_wrapper)
exec_tf = computation_wrapper_instances.building_block_to_computation(
parsed)
self.assertIsInstance(parsed, building_blocks.CompiledComputation)
self.assertTrue(modified)
# TODO(b/157172423): change to assertEqual when Py container is preserved.
parsed.type_signature.check_equivalent_to(lambda_wrapper.type_signature)
self.assertEqual(exec_lambda(2), exec_tf(2))
def test_replaces_lambda_to_called_graph_on_selection_from_arg_with_tf_of_same_type(
self):
identity_tf_block_type = computation_types.TensorType(tf.int32)
identity_tf_block = building_block_factory.create_compiled_identity(
identity_tf_block_type)
tuple_ref = building_blocks.Reference('x', [tf.int32, tf.float32])
selected_int = building_blocks.Selection(tuple_ref, index=0)
called_tf_block = building_blocks.Call(identity_tf_block, selected_int)
lambda_wrapper = building_blocks.Lambda('x', [tf.int32, tf.float32],
called_tf_block)
parsed, modified = parse_tff_to_tf(lambda_wrapper)
exec_lambda = computation_wrapper_instances.building_block_to_computation(
lambda_wrapper)
exec_tf = computation_wrapper_instances.building_block_to_computation(
parsed)
self.assertIsInstance(parsed, building_blocks.CompiledComputation)
self.assertTrue(modified)
# TODO(b/157172423): change to assertEqual when Py container is preserved.
parsed.type_signature.check_equivalent_to(lambda_wrapper.type_signature)
exec_lambda = computation_wrapper_instances.building_block_to_computation(
lambda_wrapper)
exec_tf = computation_wrapper_instances.building_block_to_computation(
parsed)
self.assertEqual(exec_lambda([3, 4.]), exec_tf([3, 4.]))
def test_replaces_lambda_to_called_graph_on_selection_from_arg_with_tf_of_same_type_with_names(
self):
identity_tf_block_type = computation_types.TensorType(tf.int32)
identity_tf_block = building_block_factory.create_compiled_identity(
identity_tf_block_type)
tuple_ref = building_blocks.Reference('x', [('a', tf.int32),
('b', tf.float32)])
selected_int = building_blocks.Selection(tuple_ref, index=0)
called_tf_block = building_blocks.Call(identity_tf_block, selected_int)
lambda_wrapper = building_blocks.Lambda('x', [('a', tf.int32),
('b', tf.float32)],
called_tf_block)
parsed, modified = parse_tff_to_tf(lambda_wrapper)
exec_lambda = computation_wrapper_instances.building_block_to_computation(
lambda_wrapper)
exec_tf = computation_wrapper_instances.building_block_to_computation(
parsed)
self.assertIsInstance(parsed, building_blocks.CompiledComputation)
self.assertTrue(modified)
self.assertEqual(parsed.type_signature, lambda_wrapper.type_signature)
self.assertEqual(exec_lambda({'a': 5, 'b': 6.}), exec_tf({'a': 5, 'b': 6.}))
def test_replaces_lambda_to_called_graph_on_tuple_of_selections_from_arg_with_tf_of_same_type(
self):
identity_tf_block_type = computation_types.StructType([tf.int32, tf.bool])
identity_tf_block = building_block_factory.create_compiled_identity(
identity_tf_block_type)
tuple_ref = building_blocks.Reference('x', [tf.int32, tf.float32, tf.bool])
selected_int = building_blocks.Selection(tuple_ref, index=0)
selected_bool = building_blocks.Selection(tuple_ref, index=2)
created_tuple = building_blocks.Struct([selected_int, selected_bool])
called_tf_block = building_blocks.Call(identity_tf_block, created_tuple)
lambda_wrapper = building_blocks.Lambda('x',
[tf.int32, tf.float32, tf.bool],
called_tf_block)
parsed, modified = parse_tff_to_tf(lambda_wrapper)
exec_lambda = computation_wrapper_instances.building_block_to_computation(
lambda_wrapper)
exec_tf = computation_wrapper_instances.building_block_to_computation(
parsed)
self.assertIsInstance(parsed, building_blocks.CompiledComputation)
self.assertTrue(modified)
# TODO(b/157172423): change to assertEqual when Py container is preserved.
parsed.type_signature.check_equivalent_to(lambda_wrapper.type_signature)
exec_lambda = computation_wrapper_instances.building_block_to_computation(
lambda_wrapper)
exec_tf = computation_wrapper_instances.building_block_to_computation(
parsed)
self.assertEqual(exec_lambda([7, 8., True]), exec_tf([7, 8., True]))
def test_replaces_lambda_to_called_graph_on_tuple_of_selections_from_arg_with_tf_of_same_type_with_names(
self):
identity_tf_block_type = computation_types.StructType([tf.int32, tf.bool])
identity_tf_block = building_block_factory.create_compiled_identity(
identity_tf_block_type)
tuple_ref = building_blocks.Reference('x', [('a', tf.int32),
('b', tf.float32),
('c', tf.bool)])
selected_int = building_blocks.Selection(tuple_ref, index=0)
selected_bool = building_blocks.Selection(tuple_ref, index=2)
created_tuple = building_blocks.Struct([selected_int, selected_bool])
called_tf_block = building_blocks.Call(identity_tf_block, created_tuple)
lambda_wrapper = building_blocks.Lambda('x', [('a', tf.int32),
('b', tf.float32),
('c', tf.bool)],
called_tf_block)
parsed, modified = parse_tff_to_tf(lambda_wrapper)
exec_lambda = computation_wrapper_instances.building_block_to_computation(
lambda_wrapper)
exec_tf = computation_wrapper_instances.building_block_to_computation(
parsed)
self.assertIsInstance(parsed, building_blocks.CompiledComputation)
self.assertTrue(modified)
self.assertEqual(parsed.type_signature, lambda_wrapper.type_signature)
exec_lambda = computation_wrapper_instances.building_block_to_computation(
lambda_wrapper)
exec_tf = computation_wrapper_instances.building_block_to_computation(
parsed)
self.assertEqual(
exec_lambda({
'a': 9,
'b': 10.,
'c': False
}), exec_tf({
'a': 9,
'b': 10.,
'c': False
}))
def test_replaces_lambda_to_unnamed_tuple_of_called_graphs_with_tf_of_same_type(
self):
int_tensor_type = computation_types.TensorType(tf.int32)
int_identity_tf_block = building_block_factory.create_compiled_identity(
int_tensor_type)
float_tensor_type = computation_types.TensorType(tf.float32)
float_identity_tf_block = building_block_factory.create_compiled_identity(
float_tensor_type)
tuple_ref = building_blocks.Reference('x', [tf.int32, tf.float32])
selected_int = building_blocks.Selection(tuple_ref, index=0)
selected_float = building_blocks.Selection(tuple_ref, index=1)
called_int_tf_block = building_blocks.Call(int_identity_tf_block,
selected_int)
called_float_tf_block = building_blocks.Call(float_identity_tf_block,
selected_float)
tuple_of_called_graphs = building_blocks.Struct(
[called_int_tf_block, called_float_tf_block])
lambda_wrapper = building_blocks.Lambda('x', [tf.int32, tf.float32],
tuple_of_called_graphs)
parsed, modified = parse_tff_to_tf(lambda_wrapper)
exec_lambda = computation_wrapper_instances.building_block_to_computation(
lambda_wrapper)
exec_tf = computation_wrapper_instances.building_block_to_computation(
parsed)
self.assertIsInstance(parsed, building_blocks.CompiledComputation)
self.assertTrue(modified)
# TODO(b/157172423): change to assertEqual when Py container is preserved.
parsed.type_signature.check_equivalent_to(lambda_wrapper.type_signature)
exec_lambda = computation_wrapper_instances.building_block_to_computation(
lambda_wrapper)
exec_tf = computation_wrapper_instances.building_block_to_computation(
parsed)
self.assertEqual(exec_lambda([11, 12.]), exec_tf([11, 12.]))
def test_replaces_lambda_to_named_tuple_of_called_graphs_with_tf_of_same_type(
self):
int_tensor_type = computation_types.TensorType(tf.int32)
int_identity_tf_block = building_block_factory.create_compiled_identity(
int_tensor_type)
float_tensor_type = computation_types.TensorType(tf.float32)
float_identity_tf_block = building_block_factory.create_compiled_identity(
float_tensor_type)
tuple_ref = building_blocks.Reference('x', [tf.int32, tf.float32])
selected_int = building_blocks.Selection(tuple_ref, index=0)
selected_float = building_blocks.Selection(tuple_ref, index=1)
called_int_tf_block = building_blocks.Call(int_identity_tf_block,
selected_int)
called_float_tf_block = building_blocks.Call(float_identity_tf_block,
selected_float)
tuple_of_called_graphs = building_blocks.Struct([('a', called_int_tf_block),
('b',
called_float_tf_block)])
lambda_wrapper = building_blocks.Lambda('x', [tf.int32, tf.float32],
tuple_of_called_graphs)
parsed, modified = parse_tff_to_tf(lambda_wrapper)
exec_lambda = computation_wrapper_instances.building_block_to_computation(
lambda_wrapper)
exec_tf = computation_wrapper_instances.building_block_to_computation(
parsed)
self.assertIsInstance(parsed, building_blocks.CompiledComputation)
self.assertTrue(modified)
# TODO(b/157172423): change to assertEqual when Py container is preserved.
parsed.type_signature.check_equivalent_to(lambda_wrapper.type_signature)
self.assertEqual(exec_lambda([13, 14.]), exec_tf([13, 14.]))
def test_replaces_lambda_to_called_composition_of_tf_blocks_with_tf_of_same_type_named_param(
self):
selection_type = computation_types.StructType([('a', tf.int32),
('b', tf.float32)])
selection_tf_block = _create_compiled_computation(lambda x: x[0],
selection_type)
add_one_int_type = computation_types.TensorType(tf.int32)
add_one_int_tf_block = _create_compiled_computation(lambda x: x + 1,
add_one_int_type)
int_ref = building_blocks.Reference('x', [('a', tf.int32),
('b', tf.float32)])
called_selection = building_blocks.Call(selection_tf_block, int_ref)
one_added = building_blocks.Call(add_one_int_tf_block, called_selection)
lambda_wrapper = building_blocks.Lambda('x', [('a', tf.int32),
('b', tf.float32)], one_added)
parsed, modified = parse_tff_to_tf(lambda_wrapper)
exec_lambda = computation_wrapper_instances.building_block_to_computation(
lambda_wrapper)
exec_tf = computation_wrapper_instances.building_block_to_computation(
parsed)
self.assertIsInstance(parsed, building_blocks.CompiledComputation)
self.assertTrue(modified)
# TODO(b/157172423): change to assertEqual when Py container is preserved.
parsed.type_signature.check_equivalent_to(lambda_wrapper.type_signature)
self.assertEqual(
exec_lambda({
'a': 15,
'b': 16.
}), exec_tf({
'a': 15,
'b': 16.
}))
def test_replaces_lambda_to_called_tf_block_with_replicated_lambda_arg_with_tf_block_of_same_type(
self):
sum_and_add_one_type = computation_types.StructType([tf.int32, tf.int32])
sum_and_add_one = _create_compiled_computation(lambda x: x[0] + x[1] + 1,
sum_and_add_one_type)
int_ref = building_blocks.Reference('x', tf.int32)
tuple_of_ints = building_blocks.Struct((int_ref, int_ref))
summed = building_blocks.Call(sum_and_add_one, tuple_of_ints)
lambda_wrapper = building_blocks.Lambda('x', tf.int32, summed)
parsed, modified = parse_tff_to_tf(lambda_wrapper)
exec_lambda = computation_wrapper_instances.building_block_to_computation(
lambda_wrapper)
exec_tf = computation_wrapper_instances.building_block_to_computation(
parsed)
self.assertIsInstance(parsed, building_blocks.CompiledComputation)
self.assertTrue(modified)
# TODO(b/157172423): change to assertEqual when Py container is preserved.
parsed.type_signature.check_equivalent_to(lambda_wrapper.type_signature)
self.assertEqual(exec_lambda(17), exec_tf(17))
if __name__ == '__main__':
factory = executor_stacks.local_executor_factory()
context = execution_context.ExecutionContext(executor_fn=factory)
set_default_context.set_default_context(context)
test.main()
|
from __future__ import print_function
import sys, os, time
sys.path.insert(1, os.path.join("..","..",".."))
import h2o
from tests import pyunit_utils
from h2o.targetencoder import TargetEncoder
"""
This test is used to check Rapids wrapper for java TargetEncoder
"""
def test_target_encoding_parameters():
print("Check arguments to TargetEncoder class")
targetEncoder = TargetEncoder(x=["teColumn1"])
assert targetEncoder._teColumns == ["teColumn1"]
def test_target_encoding_fit_method():
print("Check fit method of the TargetEncoder class")
targetColumnName = "survived"
foldColumnName = "kfold_column" # it is strange that we can't set name for generated kfold
teColumns = ["home.dest", "cabin", "embarked"]
targetEncoder = TargetEncoder(x= teColumns, y= targetColumnName,
fold_column= foldColumnName, blending_avg= True, inflection_point = 3, smoothing = 1)
trainingFrame = h2o.import_file(pyunit_utils.locate("smalldata/gbm_test/titanic.csv"), header=1)
trainingFrame[targetColumnName] = trainingFrame[targetColumnName].asfactor()
trainingFrame[foldColumnName] = trainingFrame.kfold_column(n_folds=5, seed=1234)
encodingMap = targetEncoder.fit(frame=trainingFrame)
assert encodingMap.map_keys['string'] == teColumns
assert encodingMap.frames[0]['num_rows'] == 583
def test_target_encoding_transform_kfold():
print("Check transform method (kfold strategy) of the TargetEncoder class")
targetColumnName = "survived"
foldColumnName = "kfold_column" # it is strange that we can't set name for generated kfold
teColumns = ["home.dest", "cabin", "embarked"]
targetEncoder = TargetEncoder(x= teColumns, y= targetColumnName,
fold_column= foldColumnName, blending_avg= True, inflection_point = 3, smoothing = 1)
trainingFrame = h2o.import_file(pyunit_utils.locate("smalldata/gbm_test/titanic.csv"), header=1)
trainingFrame[targetColumnName] = trainingFrame[targetColumnName].asfactor()
trainingFrame[foldColumnName] = trainingFrame.kfold_column(n_folds=5, seed=1234)
targetEncoder.fit(trainingFrame)
encodedFrame = targetEncoder.transform(frame=trainingFrame, holdout_type="kfold", seed=1234)
teColumnsEncoded = list(map(lambda x: x+"_te", teColumns))
frameWithEncodingsOnly = encodedFrame[teColumnsEncoded]
assert frameWithEncodingsOnly.ncols == 3
def test_target_encoding_transform_loo():
print("Check transform (loo strategy) of the TargetEncoder class")
targetColumnName = "survived"
teColumns = ["home.dest", "cabin", "embarked"]
targetEncoder = TargetEncoder(x= teColumns, y= targetColumnName,
fold_column='', blending_avg= True, inflection_point = 3, smoothing = 1)
trainingFrame = h2o.import_file(pyunit_utils.locate("smalldata/gbm_test/titanic.csv"), header=1)
trainingFrame[targetColumnName] = trainingFrame[targetColumnName].asfactor()
targetEncoder.fit(frame=trainingFrame)
encodedFrame = targetEncoder.transform(frame=trainingFrame, holdout_type="loo", seed=1234)
teColumnsEncoded = list(map(lambda x: x+"_te", teColumns))
frameWithEncodingsOnly = encodedFrame[teColumnsEncoded]
assert frameWithEncodingsOnly.ncols == 3
def test_target_encoding_transform_none():
print("Check transform (none strategy) of the TargetEncoder class")
targetColumnName = "survived"
teColumns = ["home.dest", "cabin", "embarked"]
targetEncoder = TargetEncoder(x= teColumns, y= targetColumnName,
blending_avg= True, inflection_point = 3, smoothing = 1)
trainingFrame = h2o.import_file(pyunit_utils.locate("smalldata/gbm_test/titanic.csv"), header=1)
trainingFrame[targetColumnName] = trainingFrame[targetColumnName].asfactor()
targetEncoder.fit(frame=trainingFrame)
encodedFrame = targetEncoder.transform(frame=trainingFrame, holdout_type="none", seed=1234)
teColumnsEncoded = list(map(lambda x: x+"_te", teColumns))
frameWithEncodingsOnly = encodedFrame[teColumnsEncoded]
assert frameWithEncodingsOnly.ncols == 3
def test_target_encoding_transform_none_blending():
print("Check none strategy with and without blending")
targetColumnName = "survived"
teColumns = ["home.dest", "cabin", "embarked"]
teColumnsEncoded = list(map(lambda x: x+"_te", teColumns))
trainingFrame = h2o.import_file(pyunit_utils.locate("smalldata/gbm_test/titanic.csv"), header=1)
trainingFrame[targetColumnName] = trainingFrame[targetColumnName].asfactor()
targetEncoderWithBlending = TargetEncoder(x= teColumns, y= targetColumnName,
blending_avg= True, inflection_point = 3, smoothing = 1)
targetEncoderWithBlending.fit(frame=trainingFrame)
encodedFrameWithBlending = targetEncoderWithBlending.transform(frame=trainingFrame, holdout_type="none", seed=1234)
frameWithBlendedEncodingsOnly = encodedFrameWithBlending[teColumnsEncoded]
targetEncoderWithoutBlending = TargetEncoder(x= teColumns, y= targetColumnName,
blending_avg= False, inflection_point = 3, smoothing = 1)
targetEncoderWithoutBlending.fit(frame=trainingFrame)
encodedFrameWithoutBlending = targetEncoderWithoutBlending.transform(frame=trainingFrame, holdout_type="none", seed=1234)
encodedFrameWithoutBlendingOnly = encodedFrameWithoutBlending[teColumnsEncoded]
try:
pyunit_utils.compare_frames(frameWithBlendedEncodingsOnly, encodedFrameWithoutBlendingOnly, 10, tol_time=0, tol_numeric=1e-6)
assert False
except AssertionError:
print('Good, encodings are different as expected. Hopefully because of the blending.')
def test_target_encoding_seed_is_working():
print("Check that seed is applied when we use noise. Noise is set to the same values. Only seed is different.")
noiseTest = 0.02
targetColumnName = "survived"
teColumns = ["home.dest", "cabin", "embarked"]
teColumnsEncoded = list(map(lambda x: x+"_te", teColumns))
trainingFrame = h2o.import_file(pyunit_utils.locate("smalldata/gbm_test/titanic.csv"), header=1)
trainingFrame[targetColumnName] = trainingFrame[targetColumnName].asfactor()
targetEncoder = TargetEncoder(x= teColumns, y= targetColumnName,
blending_avg= True, inflection_point = 3, smoothing = 1)
targetEncoder.fit(frame=trainingFrame)
encodedFrame = targetEncoder.transform(frame=trainingFrame, holdout_type="none", noise=noiseTest, seed=1234)
encodingsOnly = encodedFrame[teColumnsEncoded]
# Second transformation with the same seed 1234
encodedFrame2 = targetEncoder.transform(frame=trainingFrame, holdout_type="none", noise=noiseTest, seed=1234)
encodingsOnly2 = encodedFrame2[teColumnsEncoded]
# Third transformation with another seed 1235
encodedFrame3 = targetEncoder.transform(frame=trainingFrame, holdout_type="none", noise=noiseTest, seed=1235)
encodingsOnly3 = encodedFrame3[teColumnsEncoded]
# Comparing results
# First two encodings should be equal
assert pyunit_utils.compare_frames(encodingsOnly, encodingsOnly2, 10, tol_time=0, tol_numeric=1e-6)
# Third encoding should be different from the first two ones
try:
pyunit_utils.compare_frames(encodingsOnly, encodingsOnly3, 10, tol_time=0, tol_numeric=1e-6)
assert False
except AssertionError:
print('Good, encodings are different as expected. Seed is working.')
def test_target_encoding_default_noise_is_applied():
print("Check that seed is applied when we use noise. Noise is set to the same values. Only seed is different.")
targetColumnName = "survived"
teColumns = ["home.dest", "cabin", "embarked"]
teColumnsEncoded = list(map(lambda x: x+"_te", teColumns))
trainingFrame = h2o.import_file(pyunit_utils.locate("smalldata/gbm_test/titanic.csv"), header=1)
trainingFrame[targetColumnName] = trainingFrame[targetColumnName].asfactor()
targetEncoder = TargetEncoder(x= teColumns, y= targetColumnName,
blending_avg= True, inflection_point = 3, smoothing = 1)
targetEncoder.fit(frame=trainingFrame)
seedTest = 1234
encodedFrame = targetEncoder.transform(frame=trainingFrame, holdout_type="none", noise=0.0, seed=seedTest)
encodingsOnly = encodedFrame[teColumnsEncoded]
# Second transformation without specifying noise. Default will be applied.
encodedFrame2 = targetEncoder.transform(frame=trainingFrame, holdout_type="none", seed=seedTest)
encodingsOnly2 = encodedFrame2[teColumnsEncoded]
# Third transformation with zero noise
encodedFrame3 = targetEncoder.transform(frame=trainingFrame, holdout_type="none", noise=0.0, seed=seedTest)
encodingsOnly3 = encodedFrame3[teColumnsEncoded]
# Comparing results
# Third encoding should be equal to the first one since no noise is applied in both cases
assert pyunit_utils.compare_frames(encodingsOnly, encodingsOnly3, 10, tol_time=0, tol_numeric=1e-6)
# First two encodings should be different since default noise will be applied to the second transformation
try:
pyunit_utils.compare_frames(encodingsOnly, encodingsOnly2, 10, tol_time=0, tol_numeric=1e-6)
assert False
except AssertionError:
print('Good, encodings are different as expected. Default noise is working')
testList = [
test_target_encoding_parameters,
test_target_encoding_fit_method,
test_target_encoding_transform_kfold,
test_target_encoding_transform_loo,
test_target_encoding_transform_none,
test_target_encoding_transform_none_blending,
test_target_encoding_default_noise_is_applied,
test_target_encoding_seed_is_working
]
if __name__ == "__main__":
for test in testList: pyunit_utils.standalone_test(test)
else:
for test in testList: test()
|
import datasets.import_datasets as im
import pandas as pd
#Takes a very long time to run, probably not worth running when the output
datasets = ["BMS1",
"BMS2",
"toydata"
"uci_retail",
"mushroom",
"Belgian_retail",
"chess",
"connect",
"mushroom",
"pumsb",
"pumsb_star",
"T40I10D100K",
"T10I4D100K",
"accidents",
"instacart"]
def main(datasets):
df = pd.DataFrame(columns=['Dataset Name',
'Number of transactions',
'Number of Unique items',
'Minimum Transaction Length',
'Maximum Transaction Length',
'Average Transaction Length'])
for dataset_name in datasets:
print("Analysing", dataset_name)
data = im.import_dataset(dataset_name)
data = data.astype('bool')
average = 0
minimum = 100000
maximum = 0
for _, row in data.iterrows():
transaction_len = sum(row)
#Minimum transaction length
if minimum > transaction_len:
minimum = transaction_len
#Maximum transaction length
if maximum < transaction_len:
maximum = transaction_len
#Average transaction length
average += transaction_len
new_row = {'Dataset Name':dataset_name,
'Number of transactions':data.shape[0],
'Number of Unique items':data.shape[1],
'Minimum Transaction Length':minimum,
'Maximum Transaction Length':maximum,
'Average Transaction Length':average/data.shape[0]
}
df = df.append(new_row, ignore_index=True)
print(df)
return df
main(datasets).to_csv('Dataset_details.csv')
|
import sys; sys.path.append('..')
from API.tools import EarlyStopping
from API.exp_basic import Exp_Basic
import numpy as np
import torch
import torch.nn as nn
from torch import optim
from ex_ablation.model import ConvUnet
from API.dataloader import load_data
import json
import os
import time
import logging
from tqdm import tqdm
from API.metrics import metric
from torch.optim.lr_scheduler import ReduceLROnPlateau
import nni
import warnings
warnings.filterwarnings('ignore')
class Exp_Traffic(Exp_Basic):
def __init__(self, args):
super(Exp_Traffic, self).__init__(args)
self.path = args.res_dir+'/{}'.format(args.ex_name)
if not os.path.exists(self.path):
os.makedirs(self.path)
self.checkpoints_path = os.path.join(self.path, 'checkpoints')
if not os.path.exists(self.checkpoints_path):
os.makedirs(self.checkpoints_path)
sv_param = os.path.join(self.path, 'model_param.json')
with open(sv_param, 'w') as file_obj:
json.dump(args.__dict__, file_obj)
for handler in logging.root.handlers[:]:
logging.root.removeHandler(handler)
logging.basicConfig(level=logging.INFO,#控制台打印的日志级别
filename=self.path+'/log.log',#'log/{}_{}_{}.log'.format(args.gcn_type,args.graph_type,args.order_list)
filemode='a',##模式,有w和a,w就是写模式,每次都会重新写日志,覆盖之前的日志
#a是追加模式,默认如果不写的话,就是追加模式
format='%(asctime)s - %(message)s'#日志格式
)
self._get_data()
self._select_optimizer()
if self.args.epoch_s>0:
self._load(self.args.epoch_s-1)
def _build_model(self):
from ast import literal_eval as make_tuple
in_shape = tuple(self.args.in_shape)
# logging.info('{}'.format(self.args.in_shape))
model = ConvUnet(self.args,self.args.dataname,in_shape,self.args.hidC,self.args.hidT)
return model
def _get_data(self):
config = self.args.__dict__
self.train_loader, self.vali_loader, self.test_loader, self.data_mean, self.data_std = load_data(config['dataname'],config['batch_size'], config['val_batch_size'], config['data_root'],require_back=True)
if self.vali_loader is None:
self.vali_loader = self.test_loader
def _select_optimizer(self):
self.model_optim = optim.Adam(self.model.parameters(), lr=self.args.lr)
self.scheduler = ReduceLROnPlateau(self.model_optim, mode='min', patience=3,factor=0.8,verbose=True)
return self.model_optim
def _adjust_learning_rate(self,optimizer,epoch,args):
lr_adjust = {epoch: args.lr * (0.5 ** ((epoch-1) // 2))}
if epoch in lr_adjust.keys():
lr = lr_adjust[epoch]
for param_group in optimizer.param_groups:
param_group['lr'] = lr
print('Updating learning rate to {}'.format(lr))
def _select_criterion(self):
criterion = nn.MSELoss()
return criterion
def _save(self,epoch):
torch.save(self.model.state_dict(), os.path.join(self.checkpoints_path, str(epoch) + '.pth'))
state=self.scheduler.state_dict()
with open(os.path.join(self.checkpoints_path, str(epoch) + '.json'), 'w') as file_obj:
json.dump(state, file_obj)
def _load(self,epoch):
self.model.load_state_dict(torch.load(os.path.join(self.checkpoints_path, str(epoch) + '.pth')))
state = json.load(open(self.checkpoints_path+'/'+str(epoch) + '.json','r'))
self.scheduler.load_state_dict(state)
def vali(self, vali_loader, criterion, name,epoch):
self.model.eval()
preds=[]
trues=[]
total_loss = []
vali_pbar = tqdm(vali_loader)
for i, (batch_x,batch_y,background) in enumerate(vali_pbar):
batch_x = batch_x.to(self.device)
batch_y = batch_y
background = background.float().to(self.device)
# pred_y, pred_b = self.model(batch_x,background)
pred_y = self.model(batch_x,background)
true = batch_y.detach().cpu()
pred_y = pred_y.detach().cpu()
loss = criterion(pred_y, true)
vali_pbar.set_description('vali loss: {:.4f}'.format(loss.item()))
total_loss.append(loss)
preds.append(pred_y.numpy())
trues.append(true.numpy())
if i*batch_x.shape[0]>500:
break
total_loss = np.average(total_loss)
preds = np.concatenate(preds,axis=0)
trues = np.concatenate(trues,axis=0)
mae, mse, rmse, mape, mspe = metric(preds, trues,vali_loader.dataset.mean,vali_loader.dataset.std)
print('{}\tmse:{}, mae:{}, rmse:{}, mape:{} ,mspe:{}'.format(name,mse, mae, rmse, mape, mspe ))
logging.info('{}\tmse:{}, mae:{}, rmse:{}, mape:{} ,mspe:{}'.format(name,mse, mae, rmse, mape, mspe ))
self.model.train()
if name == 'vali':
nni.report_intermediate_result(mse)
return total_loss
def train(self, args):
config = args.__dict__
time_now = time.time()
train_steps = len(self.train_loader)
early_stopping = EarlyStopping(patience=self.args.patience, verbose=True)
model_optim = self._select_optimizer()
criterion = self._select_criterion()
for epoch in range(config['epoch_s'], config['epoch_e']):
iter_count = 0
train_loss = []
self.model.train()
train_pbar = tqdm(self.train_loader)
i=0
for batch_x,batch_y,background in train_pbar:
iter_count += 1
model_optim.zero_grad()
batch_x = batch_x.to(self.device) # [32,12,3,32,64]
batch_y = batch_y.to(self.device) # [32,12,3,32,64]
background = background.float().to(self.device)
# pred_y, pred_b = self.model(batch_x,background)
# loss = criterion(pred_y, batch_y)+criterion(pred_b, background)
pred_y = self.model(batch_x,background)
loss = criterion(pred_y, batch_y)
train_loss.append(loss.item())
train_pbar.set_description('train loss: {:.4f}'.format(loss.item()))
loss.backward()
model_optim.step()
i+=1
train_loss = np.average(train_loss)
if epoch % args.log_step == 0:
self._save(epoch)
vali_loss = self.vali(self.vali_loader, criterion,'vali',epoch)
test_loss = self.vali(self.test_loader, criterion,'test',epoch)
self.scheduler.step(test_loss)
# nni.report_intermediate_result(test_loss)
print("Epoch: {0}, Steps: {1} | Train Loss: {2:.7f} Vali Loss: {3:.7f} Test Loss: {4:.7f}\n".format(
epoch + 1, train_steps, train_loss, vali_loss, test_loss))
logging.info("Epoch: {0}, Steps: {1} | Train Loss: {2:.7f} Vali Loss: {3:.7f} Test Loss: {4:.7f}\n".format(
epoch + 1, train_steps, train_loss, vali_loss, test_loss))
early_stopping(vali_loss, self.model, self.path)
if early_stopping.early_stop:
print("Early stopping")
logging.info("Early stopping")
break
best_model_path = self.path+'/'+'checkpoint.pth'
self.model.load_state_dict(torch.load(best_model_path))
return self.model
def test(self,args):
self.model.eval()
preds = []
trues = []
for batch_x,batch_y,background in self.test_loader:
batch_x = batch_x.to(self.device)
batch_y = batch_y
background = background.to(self.device)
pred_y = self.model(batch_x,background)#.squeeze()
pred_y = pred_y.detach().cpu()
true = batch_y.detach().cpu().numpy()#.squeeze()
preds.append(pred_y)
trues.append(true)
preds = np.concatenate(preds,axis=0)
trues = np.concatenate(trues,axis=0)
print('test shape:', preds.shape, trues.shape)
logging.info('test shape:{}-{}'.format(preds.shape, trues.shape))
# result save
folder_path = self.path+'/results/{}/sv/'.format(args.ex_name)
if not os.path.exists(folder_path):
os.makedirs(folder_path)
mae, mse, rmse, mape, mspe = metric(preds, trues,self.test_loader.dataset.mean,self.test_loader.dataset.std)
print('mse:{}, mae:{}'.format(mse, mae))
logging.info('mse:{}, mae:{}'.format(mse, mae))
np.save(folder_path+'metrics.npy', np.array([mae, mse, rmse, mape, mspe]))
np.save(folder_path+'pred.npy', preds)
np.save(folder_path+'true.npy', trues)
return mse
|
from nose.exc import SkipTest
import os
import sys
import unittest
try:
maketrans = str.maketrans
except AttributeError:
from string import maketrans
import rdflib
"""
SWAP N3 parser test suite
"""
rdf = rdflib.Namespace("http://www.w3.org/1999/02/22-rdf-syntax-ns#")
rdfs = rdflib.Namespace("http://www.w3.org/2000/01/rdf-schema#")
xsd = rdflib.Namespace("http://www.w3.org/2001/XMLSchema#")
owl = rdflib.Namespace("http://www.w3.org/2002/07/owl#")
test = rdflib.Namespace("http://www.w3.org/2000/10/swap/test.n3#")
n3test = rdflib.Namespace("http://www.w3.org/2004/11/n3test#")
rdft = rdflib.Namespace("http://www.w3.org/2000/10/rdf-tests/rdfcore/testSchema#")
triage = rdflib.Namespace("http://www.w3.org/2000/10/swap/test/triage#")
mf = rdflib.Namespace("http://www.w3.org/2001/sw/DataAccess/tests/test-manifest#")
qt = rdflib.Namespace("http://www.w3.org/2001/sw/DataAccess/tests/test-query#")
# class TestSWAPN3(unittest.TestCase):
# """SWAP 2000/10/n3 tests"""
# def setUp(self):
# def test_foo(self):
# """footest"""
# self.graph.parse(os.getcwd()+'/test/swap-n3/n3-rdf.tests', format="n3")
# tfiles = []
# for tst in self.graph.subjects():
# files = [str(tfile).replace('http://www.w3.org/2000/10/', 'file://'+os.getcwd()+'/test/swap-n3/')
# for tfile in self.graph.objects(tst, rdflib.URIRef("http://www.w3.org/2004/11/n3test#inputDocument")) if tfile.endswith('n3')]
# tfiles += files
# for tfile in tfiles:
# self.graph.parse(tfile, format="n3")
skiptests = [
"syntax_neg_single_quote",
"syntax_neg_literal_predicate",
"syntax_this_quantifiers",
"syntax_trailing_semicolon",
"syntax_neg_thisadoc",
"syntax_equals1",
"syntax_equals2",
"syntax_this_rules",
"syntax_neg_keywords3",
"syntax_zero_objects",
"syntax_neg_formula_predicate",
"syntax_zero_predicates",
# 'syntax_qvars1',
# 'syntax_qvars2',
# 'contexts',
"syntax_too_nested",
]
class Envelope(object):
def __init__(self, n, f):
self.name = n
self.file = f
def __repr__(self):
return self.name
def generictest(e):
"""Documentation"""
if e.skip:
raise SkipTest("%s skipped, known issue" % e.name)
g = rdflib.Graph()
for i in [rdf, rdfs, xsd, owl, test, n3test, rdft, triage, mf, qt]:
g.bind(str(i), i)
g.parse(e.file, format="n3")
def dir_to_uri(directory, sep=os.path.sep):
"""
Convert a local path to a File URI.
>>> dir_to_uri('c:\\\\temp\\\\foo\\\\file.txt', sep='\\\\')
'file:///c:/temp/foo/file.txt'
>>> dir_to_uri('/tmp/foo/file.txt', sep='/')
'file:///tmp/foo/file.txt'
"""
items = directory.split(sep)
path = "/".join(items)
if path.startswith("/"):
path = path[1:]
return "file:///%s" % (path,)
def test_cases():
from copy import deepcopy
g = rdflib.Graph()
swap_dir = os.path.join(os.getcwd(), "test", "swap-n3")
g.parse(os.path.join(swap_dir, "n3-rdf.tests"), format="n3")
g.parse(os.path.join(swap_dir, "n3-full.tests"), format="n3")
tfiles = []
swap_dir_uri = dir_to_uri(swap_dir) + "/"
for tst in g.subjects():
files = [
str(tfile).replace("http://www.w3.org/2000/10/", swap_dir_uri)
for tfile in g.objects(
tst, rdflib.URIRef("http://www.w3.org/2004/11/n3test#inputDocument")
)
if tfile.endswith("n3")
]
tfiles += files
for tfile in set(tfiles):
gname = tfile.split("/swap-n3/swap/test/")[1][:-3].translate(
maketrans("-/", "__")
)
e = Envelope(gname, tfile)
if gname in skiptests:
e.skip = True
else:
e.skip = False
# e.skip = True
if sys.version_info[:2] == (2, 4):
import pickle
gjt = pickle.dumps(generictest)
gt = pickle.loads(gjt)
else:
gt = deepcopy(generictest)
gt.__doc__ = tfile
yield gt, e
if __name__ == "__main__":
test_cases()
# unittest.main()
|
"""
This module contains shared fixtures.
"""
import json
import pytest
import selenium.webdriver
@pytest.fixture
def config(scope='session'):
# Read JSON file
with open('config.json') as config_file:
config = json.load(config_file)
# Assert values are acceptable
assert config['browser'] in ['Firefox', 'Chrome', 'Headless Chrome']
assert isinstance(config['implicit_wait'],int)
assert config['implicit_wait']>0
#Return config
return config
@pytest.fixture
def browser(config):
# Initialize the ChromeDriver instance
if config['browser'] == 'Firefox':
b = selenium.webdriver.Firefox()
elif config['browser'] == 'Chrome':
b = selenium.webdriver.Chrome()
elif config['browser'] == 'Headless Chrome':
opts = selenium.webdriver.ChromeOptions()
opts.add_argument('headless')
b = selenium.webdriver.Chrome(options=opts)
else:
raise Exception(f'Browser "{config["browser"]}" is not supported')
# Make its calls wait up to 10 seconds for elements to appear
b.implicitly_wait(config['implicit_wait'])
# Return the WebDriver instance for the setup
yield b
# Quit the WebDriver instance for the cleanup
b.quit()
|
# coding: utf-8
import re
import six
from huaweicloudsdkcore.utils.http_utils import sanitize_for_serialization
class ComputeFlavorGroupsInfo:
"""
Attributes:
openapi_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
sensitive_list = []
openapi_types = {
'group_type': 'str',
'compute_flavors': 'list[ComputeFlavors]',
'offset': 'int',
'limit': 'int',
'total': 'int'
}
attribute_map = {
'group_type': 'groupType',
'compute_flavors': 'computeFlavors',
'offset': 'offset',
'limit': 'limit',
'total': 'total'
}
def __init__(self, group_type=None, compute_flavors=None, offset=None, limit=None, total=None):
"""ComputeFlavorGroupsInfo - a model defined in huaweicloud sdk"""
self._group_type = None
self._compute_flavors = None
self._offset = None
self._limit = None
self._total = None
self.discriminator = None
if group_type is not None:
self.group_type = group_type
if compute_flavors is not None:
self.compute_flavors = compute_flavors
if offset is not None:
self.offset = offset
if limit is not None:
self.limit = limit
if total is not None:
self.total = total
@property
def group_type(self):
"""Gets the group_type of this ComputeFlavorGroupsInfo.
计算资源架构类型,目前分X86和ARM两种。
:return: The group_type of this ComputeFlavorGroupsInfo.
:rtype: str
"""
return self._group_type
@group_type.setter
def group_type(self, group_type):
"""Sets the group_type of this ComputeFlavorGroupsInfo.
计算资源架构类型,目前分X86和ARM两种。
:param group_type: The group_type of this ComputeFlavorGroupsInfo.
:type: str
"""
self._group_type = group_type
@property
def compute_flavors(self):
"""Gets the compute_flavors of this ComputeFlavorGroupsInfo.
计算类型规格详情。
:return: The compute_flavors of this ComputeFlavorGroupsInfo.
:rtype: list[ComputeFlavors]
"""
return self._compute_flavors
@compute_flavors.setter
def compute_flavors(self, compute_flavors):
"""Sets the compute_flavors of this ComputeFlavorGroupsInfo.
计算类型规格详情。
:param compute_flavors: The compute_flavors of this ComputeFlavorGroupsInfo.
:type: list[ComputeFlavors]
"""
self._compute_flavors = compute_flavors
@property
def offset(self):
"""Gets the offset of this ComputeFlavorGroupsInfo.
分页参数: 起始值。
:return: The offset of this ComputeFlavorGroupsInfo.
:rtype: int
"""
return self._offset
@offset.setter
def offset(self, offset):
"""Sets the offset of this ComputeFlavorGroupsInfo.
分页参数: 起始值。
:param offset: The offset of this ComputeFlavorGroupsInfo.
:type: int
"""
self._offset = offset
@property
def limit(self):
"""Gets the limit of this ComputeFlavorGroupsInfo.
分页参数:每页多少条。
:return: The limit of this ComputeFlavorGroupsInfo.
:rtype: int
"""
return self._limit
@limit.setter
def limit(self, limit):
"""Sets the limit of this ComputeFlavorGroupsInfo.
分页参数:每页多少条。
:param limit: The limit of this ComputeFlavorGroupsInfo.
:type: int
"""
self._limit = limit
@property
def total(self):
"""Gets the total of this ComputeFlavorGroupsInfo.
计算类型规格总数。
:return: The total of this ComputeFlavorGroupsInfo.
:rtype: int
"""
return self._total
@total.setter
def total(self, total):
"""Sets the total of this ComputeFlavorGroupsInfo.
计算类型规格总数。
:param total: The total of this ComputeFlavorGroupsInfo.
:type: int
"""
self._total = total
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.openapi_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
if attr in self.sensitive_list:
result[attr] = "****"
else:
result[attr] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
import simplejson as json
if six.PY2:
import sys
reload(sys)
sys.setdefaultencoding("utf-8")
return json.dumps(sanitize_for_serialization(self), ensure_ascii=False)
def __repr__(self):
"""For `print`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, ComputeFlavorGroupsInfo):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
|
import project_RL.linear_sarsa.train
from project_RL.linear_sarsa.sarsa_lambda_agent import *
|
"""sc-githooks - Git routines
Copyright (c) 2021 Scott Lau
Portions Copyright (c) 2021 InnoGames GmbH
Portions Copyright (c) 2021 Emre Hasegeli
"""
from os.path import isabs, join as joinpath, normpath
from subprocess import check_output
from githooks.utils import get_exe_path, get_extension, decode_str
git_exe_path = get_exe_path('git')
class CommitList(list):
"""Routines on a list of sequential commits"""
ref_path = None
def __init__(self, other, branch_name):
super(CommitList, self).__init__(other)
self.branch_name = branch_name
def __str__(self):
name = '{}..{}'.format(self[0], self[-1])
if self.ref_path:
name += ' ({})'.format(self.branch_name)
return name
class Commit(object):
"""Routines on a single commit"""
null_commit_id = '0000000000000000000000000000000000000000'
def __init__(self, commit_id, commit_list=None):
self.commit_id = commit_id
self.commit_list = commit_list
self.content_fetched = False
self.changed_files = None
self.binary_files = None
def __str__(self):
return self.commit_id[:8]
def __bool__(self):
return self.commit_id != Commit.null_commit_id
def __nonzero__(self):
return self.__bool__()
def __eq__(self, other):
return isinstance(other, Commit) and self.commit_id == other.commit_id
def get_new_commit_list(self, branch_name):
"""Get the list of parent new commits in order"""
output = decode_str(check_output([
git_exe_path,
'rev-list',
self.commit_id,
'--not',
'--all',
'--reverse',
]))
commit_list = CommitList([], branch_name)
for commit_id in output.splitlines():
commit = Commit(commit_id, commit_list)
commit_list.append(commit)
return commit_list
def _fetch_content(self):
content = check_output(
[git_exe_path, 'cat-file', '-p', self.commit_id]
)
self._parents = []
self._message_lines = []
# The commit message starts after the empty line. We iterate until
# we find one, and then consume the rest as the message.
lines = iter(content.splitlines())
for line in lines:
if not line:
break
if line.startswith(b'parent '):
self._parents.append(Commit(line[len(b'parent '):].rstrip()))
elif line.startswith(b'author '):
self._author = Contributor.parse(line[len(b'author '):])
elif line.startswith(b'committer '):
self._committer = Contributor.parse(line[len(b'committer '):])
for line in lines:
self._message_lines.append(decode_str(line))
self.content_fetched = True
def get_parents(self):
if not self.content_fetched:
self._fetch_content()
return self._parents
def get_author(self):
if not self.content_fetched:
self._fetch_content()
return self._author
def get_committer(self):
if not self.content_fetched:
self._fetch_content()
return self._committer
def get_contributors(self):
yield self.get_author()
yield self._committer
def get_message_lines(self):
if not self.content_fetched:
self._fetch_content()
return self._message_lines
def get_summary(self):
return self.get_message_lines()[0]
def parse_tags(self):
tags = []
rest = self.get_summary()
while rest.startswith('[') and ']' in rest:
end_index = rest.index(']')
tags.append(rest[1:end_index])
rest = rest[end_index + 1:]
return tags, rest
def content_can_fail(self):
return not any(
t in ['HOTFIX', 'MESS', 'TEMP', 'WIP']
for t in self.parse_tags()[0]
)
def get_changed_files(self):
"""Return the list of added or modified files on a commit"""
if self.changed_files is None:
output = decode_str(check_output([
git_exe_path,
'diff-tree',
'-r',
'--root', # Get the initial commit as additions
'--no-commit-id', # We already know the commit id.
'--break-rewrites', # Get rewrites as additions
'--no-renames', # Get renames as additions
'--diff-filter=AM', # Only additions and modifications
self.commit_id,
]))
changed_files = []
for line in output.splitlines():
line_split = line.split(None, 5)
assert len(line_split) == 6
assert line_split[0].startswith(':')
file_mode = line_split[1]
# sc add object_id
object_id = line_split[3]
file_path = line_split[5]
changed_files.append(CommittedFile(file_path, self, file_mode, object_id))
self.changed_files = changed_files
return self.changed_files
def get_binary_files(self):
"""Return the binary files on a commit"""
if self.binary_files is None:
output = decode_str(check_output([
git_exe_path,
'log',
'--pretty=format:%H -M100%', # pretty format
'--numstat', # number state of the file
'--no-commit-id', # We already know the commit id.
'--break-rewrites', # Get rewrites as additions
'--no-renames', # Get renames as additions
'--diff-filter=AM', # Only additions and modifications
"{}^!".format(self.commit_id),
]))
binary_files = []
for line in output.splitlines():
line_split = line.split('\t')
if len(line_split) == 3:
if "-" == line_split[0] and "-" == line_split[1]:
binary_files.append(line_split[2])
self.binary_files = binary_files
return self.binary_files
class Contributor(object):
"""Routines on contribution properties of a commit"""
def __init__(self, name, email, timestamp):
self.name = name
self.email = email
self.timestamp = timestamp
@classmethod
def parse(cls, line):
"""Parse the contribution line as bytes"""
name, line = line.split(b' <', 1)
email, line = line.split(b'> ', 1)
timestamp, line = line.split(b' ', 1)
return cls(decode_str(name), decode_str(email), int(timestamp))
def get_email_domain(self):
return self.email.split('@', 1)[-1]
class CommittedFile(object):
"""Routines on a single committed file"""
def __init__(self, path, commit=None, mode=None, object_id=None):
self.path = path
self.commit = commit
assert mode is None or len(mode) == 6
self.mode = mode
self.content = None
# sc add object id
self.object_id = object_id
def __str__(self):
return '文件 {} 位于提交 {}'.format(self.path, self.commit)
def __eq__(self, other):
return (
isinstance(other, CommittedFile) and
self.path == other.path and
self.commit == other.commit
)
def exists(self):
return bool(check_output([
git_exe_path,
'ls-tree',
'--name-only',
'-r',
self.commit.commit_id,
self.path,
]))
def changed(self):
return self in self.commit.get_changed_files()
def regular(self):
return self.mode[:2] == '10'
def symlink(self):
return self.mode[:2] == '12'
def owner_can_execute(self):
owner_bits = int(self.mode[-3])
return bool(owner_bits & 1)
def get_object_id(self):
return self.object_id
def get_filename(self):
return self.path.rsplit('/', 1)[-1]
def get_file_size(self):
output = check_output([
git_exe_path,
'cat-file',
'-s', # show file size
self.object_id,
])
return int(output)
def get_extension(self):
return get_extension(self.path)
def get_content(self):
"""Get the file content as binary"""
if self.content is None:
self.content = check_output([
git_exe_path, 'show', self.commit.commit_id + ':' + self.path
])
return self.content
def get_shebang(self):
"""Get the shebang from the file content"""
if not self.regular():
return None
content = self.get_content()
if not content.startswith(b'#!'):
return None
content = content[len(b'#!'):].strip()
return decode_str(content.split(None, 1)[0])
def get_shebang_exe(self):
"""Get the executable from the shebang"""
shebang = self.get_shebang()
if not shebang:
return None
if shebang == '/usr/bin/env':
rest = self.get_content().splitlines()[0][len(b'#!/usr/bin/env'):]
rest_split = rest.split(None, 1)
if rest_split:
return decode_str(rest_split[0])
return shebang.rsplit('/', 1)[-1]
def get_symlink_target(self):
"""Get the symlink target as same kind of instance
We just return None, if the target has no chance to be on
the repository."""
content = self.get_content()
if isabs(content):
return None
path = normpath(joinpath(self.path, '..', decode_str(content)))
if path.startswith('..'):
return None
return type(self)(path, self.commit)
|
import numpy as np
import os
import sys
import cntk
from cntk.layers import Convolution2D, MaxPooling, Dense, Dropout
from utils import *
import argparse
from cntk.train.distributed import Communicator, mpi_communicator
# Hyperparams
EPOCHS = 1
BATCHSIZE = 64 * 4
LR = 0.01
MOMENTUM = 0.9
N_CLASSES = 10
def create_basic_model(input, out_dims):
with cntk.layers.default_options(init=cntk.glorot_uniform(), activation=cntk.relu):
net = cntk.layers.Convolution((5,5), 32, pad=True)(input)
net = cntk.layers.MaxPooling((3,3), strides=(2,2))(net)
net = cntk.layers.Convolution((5,5), 32, pad=True)(net)
net = cntk.layers.MaxPooling((3,3), strides=(2,2))(net)
net = cntk.layers.Convolution((5,5), 64, pad=True)(net)
net = cntk.layers.MaxPooling((3,3), strides=(2,2))(net)
net = cntk.layers.Dense(64)(net)
net = cntk.layers.Dense(out_dims, activation=None)(net)
return net
def init_model(m):
progress_writers = [cntk.logging.ProgressPrinter(
freq=int(BATCHSIZE / 2),
rank=cntk.train.distributed.Communicator.rank(),
num_epochs=EPOCHS)]
# Loss (dense labels); check if support for sparse labels
loss = cntk.cross_entropy_with_softmax(m, labels)
# Momentum SGD
# https://github.com/Microsoft/CNTK/blob/master/Manual/Manual_How_to_use_learners.ipynb
# unit_gain=False: momentum_direction = momentum*old_momentum_direction + gradient
# if unit_gain=True then ...(1-momentum)*gradient
local_learner = cntk.momentum_sgd(m.parameters,
lr=cntk.learning_rate_schedule(LR, cntk.UnitType.minibatch) ,
momentum=cntk.momentum_schedule(MOMENTUM),
unit_gain=False)
distributed_learner = cntk.train.distributed.data_parallel_distributed_learner(local_learner)
trainer = cntk.Trainer(m, (loss, cntk.classification_error(m, labels)), [distributed_learner], progress_writers)
return trainer, distributed_learner
parser = argparse.ArgumentParser()
parser.add_argument('--input_dir')
#parser.add_argument('--output_dir')
print(sys.argv)
args = parser.parse_args()
# Data into format for library
x_train, x_test, y_train, y_test = cifar_for_library(download_dir=args.input_dir, channel_first=True, one_hot=True)
# CNTK format
y_train = y_train.astype(np.float32)
y_test = y_test.astype(np.float32)
print(x_train.shape, x_test.shape, y_train.shape, y_test.shape)
print(x_train.dtype, x_test.dtype, y_train.dtype, y_test.dtype)
# Placeholders
features = cntk.input_variable((3, 32, 32), np.float32)
labels = cntk.input_variable(N_CLASSES, np.float32)
# Load symbol
sym = create_basic_model(features, N_CLASSES)
def save_model(model, learner, file_name):
if learner.communicator().is_main():
model.save(file_name)
trainer, learner = init_model(sym)
for j in range(EPOCHS):
for data, label in yield_mb(x_train, y_train, BATCHSIZE, shuffle=True):
trainer.train_minibatch({features: data, labels: label})
# Log (this is just last batch in epoch, not average of batches)
eval_error = trainer.previous_minibatch_evaluation_average
print("Epoch %d | Accuracy: %.6f" % (j+1, (1-eval_error)))
z = cntk.softmax(sym)
save_model(sym, learner, "{}/cifar_final.model".format(args.input_dir))
n_samples = (y_test.shape[0]//BATCHSIZE)*BATCHSIZE
y_guess = np.zeros(n_samples, dtype=np.int)
y_truth = np.argmax(y_test[:n_samples], axis=-1)
c = 0
for data, label in yield_mb(x_test, y_test, BATCHSIZE):
predicted_label_probs = z.eval({features : data})
y_guess[c*BATCHSIZE:(c+1)*BATCHSIZE] = np.argmax(predicted_label_probs, axis=-1)
c += 1
print("Accuracy: ", sum(y_guess == y_truth)/len(y_guess))
cntk.train.distributed.Communicator.finalize()
|
# File : transformer.py
# Author : Zhengkun Tian
# Email : zhengkun.tian@outlook.com
import logging
import torch
import torch.nn as nn
from otrans.module.pos import MixedPositionalEncoding, RelPositionalEncoding
from otrans.module.ffn import PositionwiseFeedForward
from otrans.module.attention import MultiHeadedSelfAttention, MultiHeadedSelfAttentionWithRelPos
logger = logging.getLogger(__name__)
class TransformerEncoderLayer(nn.Module):
def __init__(self, n_heads, d_model, d_ff, slf_attn_dropout, ffn_dropout, residual_dropout,
normalize_before=False, concat_after=False, relative_positional=False, activation='relu'):
super(TransformerEncoderLayer, self).__init__()
self.relative_positional = relative_positional
if self.relative_positional:
self.slf_attn = MultiHeadedSelfAttentionWithRelPos(n_heads, d_model, slf_attn_dropout)
else:
self.slf_attn = MultiHeadedSelfAttention(n_heads, d_model, slf_attn_dropout)
self.feed_forward = PositionwiseFeedForward(d_model, d_ff, ffn_dropout, activation)
self.norm1 = nn.LayerNorm(d_model)
self.norm2 = nn.LayerNorm(d_model)
self.dropout1 = nn.Dropout(residual_dropout)
self.dropout2 = nn.Dropout(residual_dropout)
self.normalize_before = normalize_before
self.concat_after = concat_after
if self.concat_after:
self.concat_linear = nn.Linear(d_model * 2, d_model)
def forward(self, x, mask, pos=None):
if self.normalize_before:
x = self.norm1(x)
residual = x
if self.relative_positional:
slf_attn_out, slf_attn_weights = self.slf_attn(x, mask, pos)
else:
slf_attn_out, slf_attn_weights = self.slf_attn(x, mask)
if self.concat_after:
x = residual + self.concat_linear(torch.cat((x, slf_attn_out), dim=-1))
else:
x = residual + self.dropout1(slf_attn_out)
if not self.normalize_before:
x = self.norm1(x)
if self.normalize_before:
x = self.norm2(x)
residual = x
x = residual + self.dropout2(self.feed_forward(x))
if not self.normalize_before:
x = self.norm2(x)
return x, {'slf_attn_weights': slf_attn_weights}
def inference(self, x, mask, pos=None, cache=None):
if self.normalize_before:
x = self.norm1(x)
residual = x
if self.relative_positional:
slf_attn_out, slf_attn_weights, new_cache = self.slf_attn.inference(x, mask, cache, pos)
else:
slf_attn_out, slf_attn_weights, new_cache = self.slf_attn.inference(x, mask, cache)
if self.concat_after:
x = residual + self.concat_linear(torch.cat((x, slf_attn_out), dim=-1))
else:
x = residual + slf_attn_out
if not self.normalize_before:
x = self.norm1(x)
if self.normalize_before:
x = self.norm2(x)
residual = x
x = residual + self.feed_forward(x)
if not self.normalize_before:
x = self.norm2(x)
return x, new_cache, {'slf_attn_weights': slf_attn_weights}
class TransformerEncoder(nn.Module):
def __init__(self, d_model=256, n_heads=4, d_ff=2048, n_blocks=6, pos_dropout=0.0,
slf_attn_dropout=0.0, ffn_dropout=0.0, residual_dropout=0.1, normalize_before=False,
concat_after=False, relative_positional=False, activation='relu'):
super(TransformerEncoder, self).__init__()
self.normalize_before = normalize_before
self.relative_positional = relative_positional
if self.relative_positional:
self.pos_emb = RelPositionalEncoding(d_model, pos_dropout)
else:
self.pos_emb = MixedPositionalEncoding(d_model, pos_dropout)
self.blocks = nn.ModuleList([
TransformerEncoderLayer(
n_heads, d_model, d_ff, slf_attn_dropout, ffn_dropout,
residual_dropout=residual_dropout, normalize_before=normalize_before,
concat_after=concat_after, relative_positional=relative_positional, activation=activation) for _ in range(n_blocks)
])
if self.normalize_before:
self.norm = nn.LayerNorm(d_model)
def forward(self, inputs, mask):
enc_output, pos = self.pos_emb(inputs)
enc_output.masked_fill_(~mask.unsqueeze(2), 0.0)
attn_weights = {}
for i, block in enumerate(self.blocks):
enc_output, attn_weight = block(enc_output, mask.unsqueeze(1), pos)
attn_weights['enc_block_%d' % i] = attn_weight
if self.normalize_before:
enc_output = self.norm(enc_output)
return enc_output, mask, attn_weights
def inference(self, inputs, mask, cache=None):
enc_output, pos = self.pos_emb.inference(inputs)
enc_output.masked_fill_(~mask.unsqueeze(2), 0.0)
attn_weights = {}
new_caches = []
for i, block in enumerate(self.blocks):
enc_output, new_cache, attn_weight = block.inference(enc_output, mask.unsqueeze(1), pos, cache)
attn_weights['enc_block_%d' % i] = attn_weight
new_caches.append(new_cache)
if self.normalize_before:
enc_output = self.norm(enc_output)
return enc_output, mask, new_caches, attn_weights
|
# -*- coding: utf-8 -*-
"""Application configuration.
Most configuration is set via environment variables.
For local development, use a .env file to set
environment variables.
"""
from os import path, urandom
class Config:
"""Application Configuration."""
from environs import Env
env = Env()
env.read_env()
BASE_DIR = path.dirname(path.dirname(__file__))
ENV = env.str("FLASK_ENV", default="production")
DEBUG = ENV == "development"
MONGODB_SETTINGS = [
{
"db": env.str("MONGODB_DB"),
"host": env.str("MONGODB_HOST"),
"port": env.int("MONGODB_PORT"),
}
]
UPLOAD_DIR = env.str("UPLOAD_DIR")
INSTALLED_RESOURCES = [
"song",
]
# To enable flask to catch package exceptions
PROPAGATE_EXCEPTIONS = True
SECRET_KEY = urandom(24)
SEND_FILE_MAX_AGE_DEFAULT = env.int("SEND_FILE_MAX_AGE_DEFAULT")
|
# Configuration file for the Sphinx documentation builder.
#
# This file only contains a selection of the most common options. For a full
# list see the documentation:
# https://www.sphinx-doc.org/en/master/usage/configuration.html
# -- Path setup --------------------------------------------------------------
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#
import os
import sys
sys.path.insert(0, os.path.abspath('../../python'))
import sphinx_rtd_theme
# -- Project information -----------------------------------------------------
project = 'FlexFlow'
copyright = '2020, Stanford, LANL, CMU, Facebook'
author = 'Stanford, LANL, CMU, Facebook'
# -- General configuration ---------------------------------------------------
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
'sphinx_rtd_theme',
'sphinx.ext.autodoc',
]
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
html_theme_options = {
"collapse_navigation" : False
}
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#
# This is also used if you do content translation via gettext catalogs.
# Usually you set "language" from the command line for these cases.
language = 'Python, C++'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
# This pattern also affects html_static_path and html_extra_path.
exclude_patterns = []
# -- Options for HTML output -------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
#
html_theme = 'sphinx_rtd_theme'
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
|
# Copyright 2017 StreamSets Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import copy
import json
import logging
import math
import os
import random
import string
import tempfile
import time
from collections import OrderedDict
import pytest
import sqlalchemy
import datetime
from streamsets.sdk.utils import Version
from streamsets.testframework.environments.databases import Db2Database, OracleDatabase, SQLServerDatabase, PostgreSqlDatabase
from streamsets.testframework.markers import credentialstore, database, sdc_min_version
from streamsets.testframework.utils import get_random_string
logger = logging.getLogger(__name__)
ROWS_IN_DATABASE = [
{'id': 1, 'name': 'Dima'},
{'id': 2, 'name': 'Jarcec'},
{'id': 3, 'name': 'Arvind'}
]
ROWS_TO_UPDATE = [
{'id': 2, 'name': 'Eddie'},
{'id': 4, 'name': 'Jarcec'}
]
LOOKUP_RAW_DATA = ['id'] + [str(row['id']) for row in ROWS_IN_DATABASE]
RAW_DATA = ['name'] + [row['name'] for row in ROWS_IN_DATABASE]
DEFAULT_DB2_SCHEMA = 'DB2INST1'
@database
def test_jdbc_multitable_consumer_origin_simple(sdc_builder, sdc_executor, database):
"""
Check if Jdbc Multi-table Origin can retrieve any records from a table.
Destination is Trash.
Verify input and output (via snapshot).
"""
src_table_prefix = get_random_string(string.ascii_lowercase, 6)
table_name = '{}_{}'.format(src_table_prefix, get_random_string(string.ascii_lowercase, 20))
pipeline_builder = sdc_builder.get_pipeline_builder()
jdbc_multitable_consumer = pipeline_builder.add_stage('JDBC Multitable Consumer')
jdbc_multitable_consumer.set_attributes(table_configs=[{"tablePattern": f'%{src_table_prefix}%'}])
trash = pipeline_builder.add_stage('Trash')
jdbc_multitable_consumer >> trash
pipeline = pipeline_builder.build().configure_for_environment(database)
metadata = sqlalchemy.MetaData()
table = sqlalchemy.Table(
table_name,
metadata,
sqlalchemy.Column('id', sqlalchemy.Integer, primary_key=True),
sqlalchemy.Column('name', sqlalchemy.String(32))
)
try:
logger.info('Creating table %s in %s database ...', table_name, database.type)
table.create(database.engine)
logger.info('Adding three rows into %s database ...', database.type)
connection = database.engine.connect()
connection.execute(table.insert(), ROWS_IN_DATABASE)
sdc_executor.add_pipeline(pipeline)
snapshot = sdc_executor.capture_snapshot(pipeline=pipeline, start_pipeline=True).snapshot
sdc_executor.stop_pipeline(pipeline)
# Column names are converted to lower case since Oracle database column names are in upper case.
tuples_to_lower_name = lambda tup: (tup[0].lower(), tup[1])
rows_from_snapshot = [tuples_to_lower_name(list(record.field.items())[1])
for record in snapshot[pipeline[0].instance_name].output]
assert rows_from_snapshot == [('name', row['name']) for row in ROWS_IN_DATABASE]
finally:
logger.info('Dropping table %s in %s database...', table_name, database.type)
table.drop(database.engine)
@database
def test_jdbc_consumer_offset_resume(sdc_builder, sdc_executor, database):
"""Ensure that the Query consumer can resume where it ended and stop the pipeline when it reads all the data."""
if isinstance(database, OracleDatabase):
pytest.skip('This test does not support oracle and its upper casing of column names.')
metadata = sqlalchemy.MetaData()
table_name = get_random_string(string.ascii_lowercase, 20)
table = sqlalchemy.Table(
table_name,
metadata,
sqlalchemy.Column('id', sqlalchemy.Integer, primary_key=True),
sqlalchemy.Column('name', sqlalchemy.String(32))
)
pipeline_builder = sdc_builder.get_pipeline_builder()
origin = pipeline_builder.add_stage('JDBC Query Consumer')
origin.incremental_mode = True
origin.sql_query = 'SELECT * FROM {0} WHERE '.format(table_name) + 'id > ${OFFSET} ORDER BY id'
origin.initial_offset = '0'
origin.offset_column = 'id'
trash = pipeline_builder.add_stage('Trash')
origin >> trash
finisher = pipeline_builder.add_stage("Pipeline Finisher Executor")
origin >= finisher
pipeline = pipeline_builder.build().configure_for_environment(database)
sdc_executor.add_pipeline(pipeline)
try:
logger.info('Creating table %s in %s database ...', table_name, database.type)
table.create(database.engine)
connection = database.engine.connect()
for i in range(len(ROWS_IN_DATABASE)):
# Insert one row to the database
connection.execute(table.insert(), [ROWS_IN_DATABASE[i]])
snapshot = sdc_executor.capture_snapshot(pipeline=pipeline, start_pipeline=True).snapshot
assert len(snapshot[origin].output) == 1
assert snapshot[origin].output[0].get_field_data('/id') == i + 1
# TLKT-249: Add wait_for_finished to get_status object
sdc_executor.get_pipeline_status(pipeline).wait_for_status('FINISHED')
finally:
logger.info('Dropping table %s in %s database...', table_name, database.type)
table.drop(database.engine)
@database
def test_jdbc_consumer_non_incremental_mode(sdc_builder, sdc_executor, database):
"""Ensure that the Query consumer works properly in non-incremental mode."""
if database.type == 'Oracle':
pytest.skip("This test depends on proper case for column names that Oracle auto-uppers.")
metadata = sqlalchemy.MetaData()
table_name = get_random_string(string.ascii_lowercase, 20)
table = sqlalchemy.Table(
table_name,
metadata,
sqlalchemy.Column('id', sqlalchemy.Integer, primary_key=True),
sqlalchemy.Column('name', sqlalchemy.String(32))
)
pipeline_builder = sdc_builder.get_pipeline_builder()
origin = pipeline_builder.add_stage('JDBC Query Consumer')
origin.incremental_mode = False
origin.sql_query = 'SELECT * FROM {0}'.format(table_name)
trash = pipeline_builder.add_stage('Trash')
origin >> trash
finisher = pipeline_builder.add_stage("Pipeline Finisher Executor")
origin >= finisher
pipeline = pipeline_builder.build().configure_for_environment(database)
sdc_executor.add_pipeline(pipeline)
try:
logger.info('Creating table %s in %s database ...', table_name, database.type)
table.create(database.engine)
connection = database.engine.connect()
connection.execute(table.insert(), ROWS_IN_DATABASE)
# Run the pipeline N times, it should always read the same
for i in range(3):
snapshot = sdc_executor.capture_snapshot(pipeline=pipeline, start_pipeline=True).snapshot
assert len(snapshot[origin].output) == len(ROWS_IN_DATABASE)
assert snapshot[origin].output[0].get_field_data('/id') == 1
assert snapshot[origin].output[1].get_field_data('/id') == 2
assert snapshot[origin].output[2].get_field_data('/id') == 3
# TLKT-249: Add wait_for_finished to get_status object
sdc_executor.get_pipeline_status(pipeline).wait_for_status('FINISHED')
finally:
logger.info('Jdbc No More Data: Dropping table %s in %s database...', table_name, database.type)
table.drop(database.engine)
@database
def test_jdbc_multitable_consumer_with_finisher(sdc_builder, sdc_executor, database):
"""
Test reading with Multi-table JDBC, output to trash.
Test some table names that start with numbers (SDC-5381).
Check if Pipeline Finished Executor works correctly.
"""
src_table_prefix = get_random_string(string.ascii_lowercase, 6)
pipeline_builder = sdc_builder.get_pipeline_builder()
jdbc_multitable_consumer = pipeline_builder.add_stage('JDBC Multitable Consumer')
jdbc_multitable_consumer.set_attributes(table_configs=[{"tablePattern": f'%{src_table_prefix}%'}])
finisher = pipeline_builder.add_stage('Pipeline Finisher Executor')
trash = pipeline_builder.add_stage('Trash')
jdbc_multitable_consumer >= finisher
jdbc_multitable_consumer >> trash
pipeline = pipeline_builder.build().configure_for_environment(database)
sdc_executor.add_pipeline(pipeline)
random.seed()
tables = []
metadata = sqlalchemy.MetaData()
try:
connection = database.engine.connect()
num_letters = 10
num_recs = 10
num_tables = 3
for i in range(0, num_tables):
if i % 2 == 1:
# table name starts with a number, contains mixed-case letters.
input_name = '{}_{}_{}'.format(str(i), src_table_prefix,
get_random_string(string.ascii_lowercase, num_letters))
else:
# table name comprised of mixed-case letters only.
input_name = '{}_{}'.format(src_table_prefix, get_random_string(string.ascii_lowercase, num_letters))
tables.append(sqlalchemy.Table(
input_name,
metadata,
sqlalchemy.Column('serial', sqlalchemy.Integer, primary_key=True),
sqlalchemy.Column('data', sqlalchemy.Integer)
))
tables[i].create(database.engine)
rows = [{'serial': j, 'data': random.randint(0, 2100000000)} for j in range(1, num_recs + 1)]
connection.execute(tables[i].insert(), rows)
sdc_executor.start_pipeline(pipeline).wait_for_finished()
finally:
for table in tables:
table.drop(database.engine)
# SDC-11009: Run away pipeline runners in JDBC Multithread origins when no-more-data generation delay is configured
@database
@sdc_min_version('3.2.0')
def test_jdbc_multitable_consumer_with_no_more_data_event_generation_delay(sdc_builder, sdc_executor, database):
"""
Make sure that when a delayed no-more-data is being processed, the pipeline properly waits on the processing to
finish before stopping.
source >> trash
>= delay (only for no-more-data) >> trash
"""
src_table = get_random_string(string.ascii_lowercase, 6)
pipeline_builder = sdc_builder.get_pipeline_builder()
jdbc_multitable_consumer = pipeline_builder.add_stage('JDBC Multitable Consumer')
jdbc_multitable_consumer.no_more_data_event_generation_delay_in_seconds = 1
jdbc_multitable_consumer.table_configs = [{"tablePattern": f'%{src_table}%'}]
trash = pipeline_builder.add_stage('Trash')
delay = pipeline_builder.add_stage('Delay')
delay.delay_between_batches = 10 * 1000
delay.stage_record_preconditions = ['${record:eventType() == "no-more-data"}']
trash_event = pipeline_builder.add_stage('Trash')
jdbc_multitable_consumer >> trash
jdbc_multitable_consumer >= delay
delay >> trash_event
pipeline = pipeline_builder.build().configure_for_environment(database)
sdc_executor.add_pipeline(pipeline)
metadata = sqlalchemy.MetaData()
try:
connection = database.engine.connect()
table = sqlalchemy.Table(
src_table,
metadata,
sqlalchemy.Column('serial', sqlalchemy.Integer, primary_key=True)
)
table.create(database.engine)
rows = [{'serial': 1}]
connection.execute(table.insert(), rows)
# We start the pipeline
sdc_executor.start_pipeline(pipeline)
# We wait three seconds - one second for the no-more-data to be generated and then some buffer time
time.sleep(3)
# Then we try to stop the pipeline, now the pipeline should not stop immediately and should in-fact wait
sdc_executor.stop_pipeline(pipeline).wait_for_stopped()
current_status = sdc_executor.get_pipeline_status(pipeline).response.json().get('status')
assert current_status == 'STOPPED'
# Validate expected metrics
history = sdc_executor.get_pipeline_history(pipeline)
# Total number of input records
assert history.latest.metrics.counter('pipeline.batchInputRecords.counter').count == 1
# 1 record, 1 no-more-data (rest of events is discarded)
assert history.latest.metrics.counter('pipeline.batchOutputRecords.counter').count == 2
# The table itself contained only one record
assert history.latest.metrics.counter('stage.Trash_01.inputRecords.counter').count == 1
# Only no-more-data event should reach the destination
assert history.latest.metrics.counter('stage.Trash_02.inputRecords.counter').count == 1
# The max batch time should be slightly more then 10 (the delayed batch that we have caused)
# TODO: TLKT-167: Add access methods to metric objects
assert history.latest.metrics.timer('pipeline.batchProcessing.timer')._data.get('max') >= 10
finally:
if table is not None:
table.drop(database.engine)
def _get_random_name(database, prefix='', length=5):
"""Generate a random string to use as a database object name.
It handles letter case according to the database type, forcing upper-case (e.g. Oracle) or lower-case
(e.g. Postgres).
Args:
database: a :obj:`streamsets.testframework.environment.Database` object.
prefix: (:obj:`str`) add a prefix to the generated name. Default: ''.
length: (:obj:`int`) number of characters of the generated name (without counting ``prefix``).
"""
if isinstance(database, OracleDatabase):
name = '{}{}'.format(prefix.upper(), get_random_string(string.ascii_uppercase))
else:
name = '{}{}'.format(prefix.lower(), get_random_string(string.ascii_lowercase))
return name
def _create_table(table_name, database, schema_name=None):
"""Helper function to create a table with two columns: id (int, PK) and name (str).
Args:
table_name: (:obj:`str`) the name for the new table.
database: a :obj:`streamsets.testframework.environment.Database` object.
schema_name: (:obj:`str`, optional) when provided, create the new table in a specific schema; otherwise,
the default schema for the engine’s database connection is used.
Return:
The new table as a sqlalchemy.Table object.
"""
metadata = sqlalchemy.MetaData()
if type(database) == SQLServerDatabase:
table = sqlalchemy.Table(table_name,
metadata,
sqlalchemy.Column('name', sqlalchemy.String(32)),
sqlalchemy.Column('id', sqlalchemy.Integer, primary_key=True,
autoincrement=False),
schema=schema_name)
else:
table = sqlalchemy.Table(table_name,
metadata,
sqlalchemy.Column('name', sqlalchemy.String(32)),
sqlalchemy.Column('id', sqlalchemy.Integer, primary_key=True),
schema=schema_name)
logger.info('Creating table %s in %s database ...', table_name, database.type)
table.create(database.engine)
return table
def _create_schema(schema_name, database):
"""Create a new schema in the database.
For RDBMs with no distinction between schema and database (e.g. MySQL), it creates a new database. For Oracle, it
creates a new user. For databases with schema objects, it creates a new schema.
Use ``_drop_schema()`` to remove schemas created by this function, to handle properly each case.
Args:
schema_name: (:obj:`str`) the schema name.
database: a :obj:`streamsets.testframework.environment.Database` object.
"""
if isinstance(database, OracleDatabase):
database.engine.execute('CREATE USER {user} IDENTIFIED BY {pwd}'.format(user=schema_name, pwd=schema_name))
database.engine.execute('GRANT CONNECT, RESOURCE TO {user}'.format(user=schema_name))
else:
schema = sqlalchemy.schema.CreateSchema(schema_name)
database.engine.execute(schema)
def _drop_schema(schema_name, database):
"""Remove a schema from the given database.
Args:
schema_name: (:obj:`str`) name of the schema to remove.
database: a :obj:`streamsets.testframework.environment.Database` object.
"""
if isinstance(database, OracleDatabase):
database.engine.execute('DROP USER {user} CASCADE'.format(user=schema_name))
else:
sqlalchemy.schema.DropSchema(schema_name)
@credentialstore
@database
def test_jdbc_lookup_processor(sdc_builder, sdc_executor, database):
"""Simple JDBC Lookup processor test.
Pipeline will enrich records with the 'name' by adding a field as 'FirstName'.
The pipeline looks like:
dev_raw_data_source >> jdbc_lookup >> trash
"""
table_name = get_random_string(string.ascii_lowercase, 20)
table = _create_table(table_name, database)
logger.info('Adding %s rows into %s database ...', len(ROWS_IN_DATABASE), database.type)
connection = database.engine.connect()
connection.execute(table.insert(), ROWS_IN_DATABASE)
pipeline_builder = sdc_builder.get_pipeline_builder()
dev_raw_data_source = pipeline_builder.add_stage('Dev Raw Data Source')
dev_raw_data_source.set_attributes(data_format='DELIMITED',
header_line='WITH_HEADER',
raw_data='\n'.join(LOOKUP_RAW_DATA))
jdbc_lookup = pipeline_builder.add_stage('JDBC Lookup')
query_str = f"SELECT name FROM {table_name} WHERE id = '${{record:value('/id')}}'"
column_mappings = [dict(dataType='USE_COLUMN_TYPE',
columnName='name',
field='/FirstName')]
jdbc_lookup.set_attributes(sql_query=query_str,
column_mappings=column_mappings)
trash = pipeline_builder.add_stage('Trash')
dev_raw_data_source >> jdbc_lookup >> trash
pipeline = pipeline_builder.build(title='JDBC Lookup').configure_for_environment(database)
sdc_executor.add_pipeline(pipeline)
LOOKUP_EXPECTED_DATA = copy.deepcopy(ROWS_IN_DATABASE)
for record in LOOKUP_EXPECTED_DATA:
record.pop('id')
record['FirstName'] = record.pop('name')
try:
snapshot = sdc_executor.capture_snapshot(pipeline=pipeline,
start_pipeline=True).snapshot
sdc_executor.stop_pipeline(pipeline)
rows_from_snapshot = [{list(record.field.keys())[1]: list(record.field.values())[1].value}
for record in snapshot[jdbc_lookup].output]
assert rows_from_snapshot == LOOKUP_EXPECTED_DATA
finally:
logger.info('Dropping table %s in %s database...', table_name, database.type)
table.drop(database.engine)
@database
def test_jdbc_tee_processor(sdc_builder, sdc_executor, database):
"""Simple JDBC Tee processor test.
Pipeline will insert records into database and then pass generated database column 'id' to fields.
The pipeline looks like:
dev_raw_data_source >> jdbc_tee >> trash
"""
if isinstance(database, OracleDatabase):
pytest.skip('JDBC Tee Processor does not support Oracle')
elif type(database) == SQLServerDatabase:
pytest.skip('JDBC Tee Processor does not support SQL Server')
table_name = get_random_string(string.ascii_lowercase, 20)
table = _create_table(table_name, database)
pipeline_builder = sdc_builder.get_pipeline_builder()
dev_raw_data_source = pipeline_builder.add_stage('Dev Raw Data Source')
dev_raw_data_source.set_attributes(data_format='DELIMITED',
header_line='WITH_HEADER',
raw_data='\n'.join(RAW_DATA))
jdbc_tee = pipeline_builder.add_stage('JDBC Tee')
# Note that here ids are not inserted. Database generates them automatically.
field_to_column_mapping = [dict(columnName='name',
dataType='USE_COLUMN_TYPE',
field='/name',
paramValue='?')]
generated_column_mappings = [dict(columnName='id',
dataType='USE_COLUMN_TYPE',
field='/id')]
jdbc_tee.set_attributes(default_operation='INSERT',
field_to_column_mapping=field_to_column_mapping,
generated_column_mappings=generated_column_mappings,
table_name=table_name)
trash = pipeline_builder.add_stage('Trash')
dev_raw_data_source >> jdbc_tee >> trash
pipeline = pipeline_builder.build(title='JDBC Tee').configure_for_environment(database)
sdc_executor.add_pipeline(pipeline)
try:
snapshot = sdc_executor.capture_snapshot(pipeline=pipeline,
start_pipeline=True).snapshot
sdc_executor.stop_pipeline(pipeline)
# Verify the JDBC Tee processor has got new ids which were generated by database.
rows_from_snapshot = [{list(item.field.keys())[0]: list(item.field.values())[0].value,
list(item.field.keys())[1]: int(list(item.field.values())[1].value)}
for item in snapshot[jdbc_tee].output]
assert rows_from_snapshot == ROWS_IN_DATABASE
finally:
logger.info('Dropping table %s in %s database ...', table_name, database.type)
table.drop(database.engine)
@database
@pytest.mark.parametrize('use_multi_row', [True, False])
@sdc_min_version('3.0.0.0') # stop_after_first_batch
def test_jdbc_tee_processor_multi_ops(sdc_builder, sdc_executor, database, use_multi_row):
"""JDBC Tee processor with multiple operations
Pipeline will delete/update/insert records into database with one batch and then update 'id'
field if it is inserted. The 'operation' field is used for the record header sdc.operation.type
which defines the CRUD operation (1: Insert, 2: Delete, 3: Update). The pipeline looks like:
dev_raw_data_source >> expression evaluator >> jdbc_tee >> trash
"""
if isinstance(database, OracleDatabase):
pytest.skip('JDBC Tee Processor does not support Oracle')
elif type(database) == SQLServerDatabase:
pytest.skip('JDBC Tee Processor does not support SQL Server')
table_name = get_random_string(string.ascii_lowercase, 20)
pipeline_builder = sdc_builder.get_pipeline_builder()
DATA = [
{'operation': 2, 'name': 'Jarcec', 'id': 2}, # delete
{'operation': 3, 'name': 'Hari', 'id': 3}, # update
{'operation': 1, 'name': 'Eddie'} # insert, id will be added by JDBC Tee
]
dev_raw_data_source = pipeline_builder.add_stage('Dev Raw Data Source')
dev_raw_data_source.set_attributes(data_format='JSON',
raw_data='\n'.join(json.dumps(rec) for rec in DATA),
stop_after_first_batch=True)
HEADER_EXPRESSIONS = [dict(attributeToSet='sdc.operation.type',
headerAttributeExpression="${record:value('/operation')}")]
expression_evaluator = pipeline_builder.add_stage('Expression Evaluator')
expression_evaluator.header_attribute_expressions = HEADER_EXPRESSIONS
FIELD_TO_COLUMN = [dict(columnName='name', field='/name', paramValue='?')]
jdbc_tee = pipeline_builder.add_stage('JDBC Tee')
jdbc_tee.set_attributes(default_operation='INSERT',
field_to_column_mapping=FIELD_TO_COLUMN,
generated_column_mappings=[dict(columnName='id', field='/id')],
table_name=table_name,
use_multi_row_operation=use_multi_row)
trash = pipeline_builder.add_stage('Trash')
dev_raw_data_source >> expression_evaluator >> jdbc_tee >> trash
pipeline_title = 'JDBC Tee MultiOps MultiRow' if use_multi_row else 'JDBC Tee MultiOps SingleRow'
pipeline = pipeline_builder.build(title=pipeline_title).configure_for_environment(database)
sdc_executor.add_pipeline(pipeline)
table = _create_table(table_name, database)
try:
logger.info('Adding %s rows into %s database ...', len(ROWS_IN_DATABASE), database.type)
connection = database.engine.connect()
# Passing only names to get the correct sequence numbers esp. PostgreSQL
if type(database) == SQLServerDatabase:
connection.execute(table.insert(), [{'id': row['id'], 'name': row['name']} for row in ROWS_IN_DATABASE])
else:
connection.execute(table.insert(), [{'name': row['name']} for row in ROWS_IN_DATABASE])
snapshot = sdc_executor.capture_snapshot(pipeline=pipeline,
start_pipeline=True).snapshot
sequence_id = len(ROWS_IN_DATABASE)
# Verify the database is updated.
result = database.engine.execute(table.select())
data_from_database = sorted(result.fetchall(), key=lambda row: row[1]) # order by id
result.close()
expected_data = [(row['name'], row['id']) for row in ROWS_IN_DATABASE]
for record in DATA:
if record['operation'] == 1: # insert
sequence_id += 1
expected_data.append((record['name'], sequence_id))
elif record['operation'] == 2: # delete
expected_data = [row for row in expected_data if row[1] != record['id']]
elif record['operation'] == 3: # update
expected_data = [row if row[1] != record['id'] else (record['name'], row[1]) for row in expected_data]
assert data_from_database == expected_data
# Verify the JDBC Tee processor has the new ID which were generated by database.
jdbc_tee_output = snapshot[jdbc_tee].output
name_id_from_output = [(record.field['name'], record.field['id']) for record in jdbc_tee_output]
assert name_id_from_output == [('Jarcec', 2), ('Hari', 3), ('Eddie', sequence_id)]
finally:
logger.info('Dropping table %s in %s database ...', table_name, database.type)
table.drop(database.engine)
@database
def test_jdbc_query_executor(sdc_builder, sdc_executor, database):
"""Simple JDBC Query Executor test.
Pipeline will insert records into database and then using sqlalchemy, the verification will happen
that correct data is inserted into database.
This is achieved by using a deduplicator which assures us that there is only one ingest to database.
The pipeline looks like:
dev_raw_data_source >> record_deduplicator >> jdbc_query_executor
record_deduplicator >> trash
"""
table_name = get_random_string(string.ascii_lowercase, 20)
table = _create_table(table_name, database)
DATA = ['id,name'] + [','.join(str(item) for item in rec.values()) for rec in ROWS_IN_DATABASE]
pipeline_builder = sdc_builder.get_pipeline_builder()
dev_raw_data_source = pipeline_builder.add_stage('Dev Raw Data Source')
dev_raw_data_source.set_attributes(data_format='DELIMITED',
header_line='WITH_HEADER',
raw_data='\n'.join(DATA))
record_deduplicator = pipeline_builder.add_stage('Record Deduplicator')
jdbc_query_executor = pipeline_builder.add_stage('JDBC Query', type='executor')
query_str = f"INSERT INTO {table_name} (name, id) VALUES ('${{record:value('/name')}}', '${{record:value('/id')}}')"
if Version(sdc_builder.version) < Version('3.14.0'):
jdbc_query_executor.set_attributes(sql_query=query_str)
else:
jdbc_query_executor.set_attributes(sql_queries=[query_str])
trash = pipeline_builder.add_stage('Trash')
dev_raw_data_source >> record_deduplicator >> jdbc_query_executor
record_deduplicator >> trash
pipeline = pipeline_builder.build(title='JDBC Query Executor').configure_for_environment(database)
sdc_executor.add_pipeline(pipeline)
try:
sdc_executor.start_pipeline(pipeline).wait_for_pipeline_output_records_count(len(RAW_DATA) - 1)
sdc_executor.stop_pipeline(pipeline)
result = database.engine.execute(table.select())
data_from_database = sorted(result.fetchall(), key=lambda row: row[1]) # order by id
result.close()
assert data_from_database == [(record['name'], record['id']) for record in ROWS_IN_DATABASE]
finally:
logger.info('Dropping table %s in %s database ...', table_name, database.type)
table.drop(database.engine)
@database
@sdc_min_version('3.14.0') # multiple queries execution
def test_jdbc_query_executor_multiple_queries(sdc_builder, sdc_executor, database):
"""Simple JDBC Query Executor test.
Pipeline will insert records into database and then using sqlalchemy, the verification will happen
that correct data is inserted into database.
This is achieved by using a deduplicator which assures us that there is only one ingest to database.
The pipeline looks like:
dev_raw_data_source >> record_deduplicator >> jdbc_query_executor
record_deduplicator >> trash
"""
table_name = f'stf_{get_random_string(string.ascii_lowercase, 20)}'
table = _create_table(table_name, database)
ROWS_IN_DATABASE_UPDATED = [
{'id': 1, 'name': 'Alex'},
{'id': 2, 'name': 'Alex'},
{'id': 3, 'name': 'Alex'}
]
DATA = ['id,name'] + [','.join(str(item) for item in rec.values()) for rec in ROWS_IN_DATABASE]
pipeline_builder = sdc_builder.get_pipeline_builder()
dev_raw_data_source = pipeline_builder.add_stage('Dev Raw Data Source')
dev_raw_data_source.set_attributes(data_format='DELIMITED',
header_line='WITH_HEADER',
raw_data='\n'.join(DATA))
record_deduplicator = pipeline_builder.add_stage('Record Deduplicator')
jdbc_query_executor = pipeline_builder.add_stage('JDBC Query', type='executor')
query_str1 = f"INSERT INTO {table_name} (name, id) VALUES ('${{record:value('/name')}}', '${{record:value('/id')}}')"
query_str2 = f"UPDATE {table_name} SET name = 'Alex' WHERE name = '${{record:value('/name')}}'"
jdbc_query_executor.set_attributes(sql_queries=[query_str1, query_str2])
trash = pipeline_builder.add_stage('Trash')
dev_raw_data_source >> record_deduplicator >> jdbc_query_executor
record_deduplicator >> trash
pipeline = pipeline_builder.build(title='JDBC Query Executor').configure_for_environment(database)
sdc_executor.add_pipeline(pipeline)
try:
sdc_executor.start_pipeline(pipeline).wait_for_pipeline_output_records_count(len(RAW_DATA) - 1)
sdc_executor.stop_pipeline(pipeline)
result = database.engine.execute(table.select())
data_from_database = sorted(result.fetchall(), key=lambda row: row[1]) # order by id
result.close()
assert data_from_database == [(record['name'], record['id']) for record in ROWS_IN_DATABASE_UPDATED]
finally:
logger.info(f'Dropping table {table_name} in {database.type} database ...')
table.drop(database.engine)
@database
@sdc_min_version('3.11.0')
def test_jdbc_query_executor_successful_query_event(sdc_builder, sdc_executor, database):
"""Simple JDBC Query Executor test for successful-query event type.
Pipeline will insert records into database and then using sqlalchemy, the verification will happen
that correct data is inserted into database. Event records are verified for successful-query event type.
This is achieved by using a deduplicator which assures us that there is only one ingest to database.
The pipeline looks like:
dev_raw_data_source >> record_deduplicator >> jdbc_query_executor >= trash1
record_deduplicator >> trash2
"""
table_name = get_random_string(string.ascii_lowercase, 20)
table = _create_table(table_name, database)
DATA = ['id,name'] + [','.join(str(item) for item in rec.values()) for rec in ROWS_IN_DATABASE]
pipeline_builder = sdc_builder.get_pipeline_builder()
dev_raw_data_source = pipeline_builder.add_stage('Dev Raw Data Source')
dev_raw_data_source.set_attributes(data_format='DELIMITED',
header_line='WITH_HEADER',
raw_data='\n'.join(DATA))
query_str = f"INSERT INTO {table_name} (name, id) VALUES ('${{record:value('/name')}}', '${{record:value('/id')}}')"
jdbc_query_executor = pipeline_builder.add_stage('JDBC Query', type='executor')
if Version(sdc_builder.version) < Version('3.14.0'):
jdbc_query_executor.set_attributes(sql_query=query_str)
else:
jdbc_query_executor.set_attributes(sql_queries=[query_str])
record_deduplicator = pipeline_builder.add_stage('Record Deduplicator')
trash1 = pipeline_builder.add_stage('Trash')
trash2 = pipeline_builder.add_stage('Trash')
dev_raw_data_source >> record_deduplicator >> jdbc_query_executor >= trash1
record_deduplicator >> trash2
pipeline = pipeline_builder.build(title='JDBC Query Executor').configure_for_environment(database)
sdc_executor.add_pipeline(pipeline)
try:
snapshot = sdc_executor.capture_snapshot(pipeline, start_pipeline=True).snapshot
sdc_executor.stop_pipeline(pipeline)
event_records = snapshot[jdbc_query_executor.instance_name].event_records
assert len(event_records) == 3
assert 'successful-query' == event_records[0].header['values']['sdc.event.type']
assert 'successful-query' == event_records[1].header['values']['sdc.event.type']
assert 'successful-query' == event_records[2].header['values']['sdc.event.type']
result = database.engine.execute(table.select())
data_from_database = sorted(result.fetchall(), key=lambda row: row[1]) # order by id
result.close()
assert data_from_database == [(record['name'], record['id']) for record in ROWS_IN_DATABASE]
finally:
logger.info('Dropping table %s in %s database ...', table_name, database.type)
table.drop(database.engine)
@database
@sdc_min_version('3.11.0')
def test_jdbc_query_executor_insert_query_result_count(sdc_builder, sdc_executor, database):
"""Simple JDBC Query Executor test for successful-query event type and query result count enabled.
Pipeline will insert records into database and then using sqlalchemy, the verification will happen
that correct data is inserted into database. Event records are verified for successful-query event type
and query-result field for the insert query.
This is achieved by using a deduplicator which assures us that there is only one ingest to database.
The pipeline looks like:
dev_raw_data_source >> record_deduplicator >> jdbc_query_executor >= trash1
record_deduplicator >> trash2
"""
table_name = get_random_string(string.ascii_lowercase, 20)
table = _create_table(table_name, database)
DATA = ['id,name'] + [','.join(str(item) for item in rec.values()) for rec in ROWS_IN_DATABASE]
pipeline_builder = sdc_builder.get_pipeline_builder()
dev_raw_data_source = pipeline_builder.add_stage('Dev Raw Data Source')
dev_raw_data_source.set_attributes(data_format='DELIMITED',
header_line='WITH_HEADER',
raw_data='\n'.join(DATA))
query_str = f"INSERT INTO {table_name} (name, id) VALUES ('${{record:value('/name')}}', '${{record:value('/id')}}')"
jdbc_query_executor = pipeline_builder.add_stage('JDBC Query', type='executor')
jdbc_query_executor.set_attributes(include_query_result_count_in_events=True)
if Version(sdc_builder.version) < Version('3.14.0'):
jdbc_query_executor.set_attributes(sql_query=query_str)
else:
jdbc_query_executor.set_attributes(sql_queries=[query_str])
record_deduplicator = pipeline_builder.add_stage('Record Deduplicator')
trash1 = pipeline_builder.add_stage('Trash')
trash2 = pipeline_builder.add_stage('Trash')
dev_raw_data_source >> record_deduplicator >> jdbc_query_executor >= trash1
record_deduplicator >> trash2
pipeline = pipeline_builder.build(title='JDBC Query Executor').configure_for_environment(database)
sdc_executor.add_pipeline(pipeline)
try:
snapshot = sdc_executor.capture_snapshot(pipeline, start_pipeline=True).snapshot
sdc_executor.stop_pipeline(pipeline)
event_records = snapshot[jdbc_query_executor.instance_name].event_records
assert len(event_records) == 3
assert 'successful-query' == event_records[0].header['values']['sdc.event.type']
assert 'successful-query' == event_records[1].header['values']['sdc.event.type']
assert 'successful-query' == event_records[2].header['values']['sdc.event.type']
assert '1 row(s) affected' == event_records[0].value['value']['query-result']['value']
assert '1 row(s) affected' == event_records[1].value['value']['query-result']['value']
assert '1 row(s) affected' == event_records[2].value['value']['query-result']['value']
result = database.engine.execute(table.select())
data_from_database = sorted(result.fetchall(), key=lambda row: row[1]) # order by id
result.close()
assert data_from_database == [(record['name'], record['id']) for record in ROWS_IN_DATABASE]
finally:
logger.info('Dropping table %s in %s database ...', table_name, database.type)
table.drop(database.engine)
@database
@sdc_min_version('3.0.0.0')
def test_jdbc_query_executor_lifecycle_events(sdc_builder, sdc_executor, database):
"""Verify that the JDBC Query Executor will work properly when used inside pipeline lifecycle stages."""
if isinstance(database, OracleDatabase):
pytest.skip('This test does not support Oracle')
elif type(database) == SQLServerDatabase:
pytest.skip('This test does not support SQL Server')
table_name = get_random_string(string.ascii_lowercase, 20)
metadata = sqlalchemy.MetaData()
table = sqlalchemy.Table(table_name,
metadata,
sqlalchemy.Column('user', sqlalchemy.String(50)),
sqlalchemy.Column('event', sqlalchemy.String(50)))
logger.info('Creating table %s in %s database ...', table_name, database.type)
table.create(database.engine)
query = f"INSERT INTO {table_name} VALUES ('${{record:value('/user')}}', '${{record:attribute('sdc.event.type')}}')"
builder = sdc_builder.get_pipeline_builder()
source = builder.add_stage('Dev Raw Data Source')
source.stop_after_first_batch = True
source.data_format = 'TEXT'
source.raw_data='SOMETHING'
trash = builder.add_stage('Trash')
start_stage = builder.add_start_event_stage('JDBC Query')
if Version(sdc_builder.version) < Version('3.14.0'):
start_stage.set_attributes(sql_query=query)
else:
start_stage.set_attributes(sql_queries=[query])
stop_stage = builder.add_stop_event_stage('JDBC Query')
if Version(sdc_builder.version) < Version('3.14.0'):
stop_stage.set_attributes(sql_query=query)
else:
stop_stage.set_attributes(sql_queries=[query])
source >> trash
pipeline = builder.build().configure_for_environment(database)
sdc_executor.add_pipeline(pipeline)
try:
sdc_executor.start_pipeline(pipeline).wait_for_finished()
result = database.engine.execute(table.select())
db = sorted(result.fetchall(), key=lambda row: row[1])
result.close()
assert db[0][0] == 'admin'
assert db[0][1] == 'pipeline-start'
assert db[1][0] == ''
assert db[1][1] == 'pipeline-stop'
finally:
logger.info('Dropping table %s in %s database ...', table_name, database.type)
table.drop(database.engine)
@database
def test_jdbc_query_executor_failure_state(sdc_builder, sdc_executor, database):
"""Verify that the executor is properly called with the proper state on pipeline initialization failure."""
table_name = get_random_string(string.ascii_lowercase, 20)
metadata = sqlalchemy.MetaData()
table = sqlalchemy.Table(table_name,
metadata,
sqlalchemy.Column('reason', sqlalchemy.String(50)))
logger.info('Creating table %s in %s database ...', table_name, database.type)
table.create(database.engine)
query = f"INSERT INTO {table_name} VALUES ('${{record:value('/reason')}}')"
builder = sdc_builder.get_pipeline_builder()
source = builder.add_stage('JDBC Multitable Consumer')
source.table_configs=[{"tablePattern": 'this_table_do_not_exists'}]
trash = builder.add_stage('Trash')
stop_stage = builder.add_stop_event_stage('JDBC Query')
if Version(sdc_builder.version) < Version('3.14.0'):
stop_stage.set_attributes(sql_query=query)
else:
stop_stage.set_attributes(sql_queries=[query])
source >> trash
pipeline = builder.build().configure_for_environment(database)
# Injecting failure - this URL won't exists, pipeline won't be able to start properly
source.jdbc_connection_string = "jdbc:mysql://this-do-not-exists:3306/awesome-db"
sdc_executor.add_pipeline(pipeline)
try:
sdc_executor.start_pipeline(pipeline, wait=False).wait_for_status('START_ERROR', ignore_errors=True)
result = database.engine.execute(table.select())
db = result.fetchall()
result.close()
assert db[0][0] == 'FAILURE'
finally:
logger.info('Dropping table %s in %s database ...', table_name, database.type)
table.drop(database.engine)
@database
@sdc_min_version('3.11.0')
def test_jdbc_query_executor_select_query_result_count(sdc_builder, sdc_executor, database):
"""Simple JDBC Query Executor test for successful-query event type and query result count enabled.
Pipeline will insert records into database and then using sqlalchemy, the verification will happen
that correct data is inserted into database and then the same data is queried. Event records are
verified for successful-query event type and query-result field for the select query.
This is achieved by using a deduplicator which assures us that there is only one ingest to database.
The pipeline looks like:
dev_raw_data_source >> record_deduplicator >> jdbc_query_executor1 >= jdbc_query_executor2 >= trash1
record_deduplicator >> trash2
"""
table_name = get_random_string(string.ascii_lowercase, 20)
table = _create_table(table_name, database)
DATA = ['id,name'] + [','.join(str(item) for item in rec.values()) for rec in ROWS_IN_DATABASE]
pipeline_builder = sdc_builder.get_pipeline_builder()
dev_raw_data_source = pipeline_builder.add_stage('Dev Raw Data Source')
dev_raw_data_source.set_attributes(data_format='DELIMITED',
header_line='WITH_HEADER',
raw_data='\n'.join(DATA))
query_str1 = f"INSERT INTO {table_name} (name, id) VALUES ('${{record:value('/name')}}', '${{record:value('/id')}}')"
query_str2 = f"SELECT * FROM {table_name}"
jdbc_query_executor1 = pipeline_builder.add_stage('JDBC Query', type='executor')
if Version(sdc_builder.version) < Version('3.14.0'):
jdbc_query_executor1.set_attributes(sql_query=query_str1)
else:
jdbc_query_executor1.set_attributes(sql_queries=[query_str1])
jdbc_query_executor2 = pipeline_builder.add_stage('JDBC Query', type='executor')
jdbc_query_executor2.set_attributes(include_query_result_count_in_events=True)
if Version(sdc_builder.version) < Version('3.14.0'):
jdbc_query_executor2.set_attributes(sql_query=query_str2)
else:
jdbc_query_executor2.set_attributes(sql_queries=[query_str2])
record_deduplicator = pipeline_builder.add_stage('Record Deduplicator')
trash1 = pipeline_builder.add_stage('Trash')
trash2 = pipeline_builder.add_stage('Trash')
dev_raw_data_source >> record_deduplicator >> jdbc_query_executor1 >= jdbc_query_executor2 >= trash1
record_deduplicator >> trash2
pipeline = pipeline_builder.build(title='JDBC Query Executor').configure_for_environment(database)
sdc_executor.add_pipeline(pipeline)
try:
snapshot = sdc_executor.capture_snapshot(pipeline, start_pipeline=True).snapshot
sdc_executor.stop_pipeline(pipeline)
event_records = snapshot[jdbc_query_executor2.instance_name].event_records
assert len(event_records) == 3
assert 'successful-query' == event_records[0].header['values']['sdc.event.type']
assert 'successful-query' == event_records[1].header['values']['sdc.event.type']
assert 'successful-query' == event_records[2].header['values']['sdc.event.type']
assert '3 row(s) returned' == event_records[0].value['value']['query-result']['value']
assert '3 row(s) returned' == event_records[1].value['value']['query-result']['value']
assert '3 row(s) returned' == event_records[2].value['value']['query-result']['value']
result = database.engine.execute(table.select())
result.close()
finally:
logger.info('Dropping table %s in %s database ...', table_name, database.type)
table.drop(database.engine)
@database
@sdc_min_version('3.11.0')
def test_jdbc_query_executor_failed_query_event(sdc_builder, sdc_executor, database):
"""Simple JDBC Query Executor test for failed-query event type.
Pipeline will try to insert records into a non-existing table and the query would fail.
Event records are verified for failed-query event type.
This is achieved by using a deduplicator which assures us that there is only one ingest to database.
The pipeline looks like:
dev_raw_data_source >> record_deduplicator >> jdbc_query_executor >= trash1
record_deduplicator >> trash2
"""
table_name = get_random_string(string.ascii_lowercase, 20)
table = _create_table(table_name, database)
DATA = ['id,name'] + [','.join(str(item) for item in rec.values()) for rec in ROWS_IN_DATABASE]
pipeline_builder = sdc_builder.get_pipeline_builder()
dev_raw_data_source = pipeline_builder.add_stage('Dev Raw Data Source')
dev_raw_data_source.set_attributes(data_format='DELIMITED',
header_line='WITH_HEADER',
raw_data='\n'.join(DATA))
invalid_table = "INVALID_TABLE"
query_str = f"INSERT INTO {invalid_table} (name, id) VALUES ('${{record:value('/name')}}', '${{record:value('/id')}}')"
jdbc_query_executor = pipeline_builder.add_stage('JDBC Query', type='executor')
if Version(sdc_builder.version) < Version('3.14.0'):
jdbc_query_executor.set_attributes(sql_query=query_str)
else:
jdbc_query_executor.set_attributes(sql_queries=[query_str])
record_deduplicator = pipeline_builder.add_stage('Record Deduplicator')
trash1 = pipeline_builder.add_stage('Trash')
trash2 = pipeline_builder.add_stage('Trash')
dev_raw_data_source >> record_deduplicator >> jdbc_query_executor >= trash1
record_deduplicator >> trash2
pipeline = pipeline_builder.build(title='JDBC Query Executor').configure_for_environment(database)
sdc_executor.add_pipeline(pipeline)
try:
snapshot = sdc_executor.capture_snapshot(pipeline, start_pipeline=True).snapshot
sdc_executor.stop_pipeline(pipeline)
event_records = snapshot[jdbc_query_executor.instance_name].event_records
assert len(event_records) == 3
assert 'failed-query' == event_records[0].header['values']['sdc.event.type']
assert 'failed-query' == event_records[1].header['values']['sdc.event.type']
assert 'failed-query' == event_records[2].header['values']['sdc.event.type']
result = database.engine.execute(table.select())
data_from_database = sorted(result.fetchall(), key=lambda row: row[1]) # order by id
result.close()
assert data_from_database == []
finally:
logger.info('Dropping table %s in %s database ...', table_name, database.type)
table.drop(database.engine)
@database
@sdc_min_version('3.10.0')
@pytest.mark.parametrize('enable_parallel_execution', [True, False])
def test_jdbc_query_executor_parallel_query_execution(sdc_builder, sdc_executor, database, enable_parallel_execution):
"""Test JDBC Query Executor's parallel query execution mode.
Pipeline will insert records into database, then update the records.
Using sqlalchemy, we verify that correct data was inserted (and updated) in the database.
Pipeline configuration:
dev_raw_data_source >> jdbc_query_executor
"""
table_name = get_random_string(string.ascii_uppercase, 20)
table = _create_table(table_name, database)
# Make sure that we properly escape the table name. Ideally we would do escape for all databases, but since we
# know that all except postgre are passing, we only escape for Postgre for now.
enclosed_table = f'"{table_name}"' if type(database) == PostgreSqlDatabase else table_name
# first, the inserts - they will run in parallel,
# then all the updates will run sequentially
# net result is all records should get updated to the (last) new value.
# otherwise we've failed.
statements = []
for rec in ROWS_IN_DATABASE:
statements.extend([f"INSERT INTO {enclosed_table} (name, id) VALUES ('{rec['name']}', {rec['id']})",
f"UPDATE {enclosed_table} SET name = 'bob' WHERE id = {rec['id']}",
f"UPDATE {enclosed_table} SET name = 'MERRICK' WHERE id = {rec['id']}"])
# convert to string - Dev Raw Data Source Data Format tab does not seem
# to "unroll" the array into newline-terminated records.
statements = "\n".join(statements)
pipeline_builder = sdc_builder.get_pipeline_builder()
dev_raw_data_source = pipeline_builder.add_stage('Dev Raw Data Source')
dev_raw_data_source.set_attributes(data_format='TEXT', raw_data=statements)
jdbc_query_executor = pipeline_builder.add_stage('JDBC Query', type='executor')
query_str = "${record:value('/text')}"
jdbc_query_executor.set_attributes(enable_parallel_queries=enable_parallel_execution,
maximum_pool_size=2,
minimum_idle_connections=2)
if Version(sdc_builder.version) < Version('3.14.0'):
jdbc_query_executor.set_attributes(sql_query=query_str)
else:
jdbc_query_executor.set_attributes(sql_queries=[query_str])
dev_raw_data_source >> jdbc_query_executor
pipeline = pipeline_builder.build().configure_for_environment(database)
sdc_executor.add_pipeline(pipeline)
try:
sdc_executor.start_pipeline(pipeline).wait_for_pipeline_output_records_count(len(ROWS_IN_DATABASE)*3)
sdc_executor.stop_pipeline(pipeline)
result = database.engine.execute(table.select())
data_from_database = sorted(result.fetchall(), key=lambda row: row[1]) # order by id
result.close()
assert data_from_database == [('MERRICK', record['id']) for record in ROWS_IN_DATABASE]
finally:
logger.info('Dropping table %s in %s database ...', table_name, database.type)
table.drop(database.engine)
def _create_jdbc_producer_pipeline(pipeline_builder, pipeline_title, raw_data, table_name, operation):
"""Helper function to create and return a pipeline with JDBC Producer
The Deduplicator assures there is only one ingest to database. The pipeline looks like:
The pipeline looks like:
dev_raw_data_source >> record_deduplicator >> jdbc_producer
record_deduplicator >> trash
"""
dev_raw_data_source = pipeline_builder.add_stage('Dev Raw Data Source')
dev_raw_data_source.set_attributes(data_format='JSON', raw_data=raw_data)
record_deduplicator = pipeline_builder.add_stage('Record Deduplicator')
FIELD_MAPPINGS = [dict(field='/id', columnName='id'),
dict(field='/name', columnName='name')]
jdbc_producer = pipeline_builder.add_stage('JDBC Producer')
jdbc_producer.set_attributes(default_operation=operation,
table_name=table_name,
field_to_column_mapping=FIELD_MAPPINGS,
stage_on_record_error='STOP_PIPELINE')
trash = pipeline_builder.add_stage('Trash')
dev_raw_data_source >> record_deduplicator >> jdbc_producer
record_deduplicator >> trash
return pipeline_builder.build(title=pipeline_title)
@database
def test_jdbc_producer_insert(sdc_builder, sdc_executor, database):
"""Simple JDBC Producer test with INSERT operation.
The pipeline inserts records into the database and verify that correct data is in the database.
"""
table_name = get_random_string(string.ascii_lowercase, 20)
table = _create_table(table_name, database)
DATA = '\n'.join(json.dumps(rec) for rec in ROWS_IN_DATABASE)
pipeline_builder = sdc_builder.get_pipeline_builder()
pipeline = _create_jdbc_producer_pipeline(pipeline_builder, 'JDBC Producer Insert', DATA, table_name, 'INSERT')
sdc_executor.add_pipeline(pipeline.configure_for_environment(database))
try:
sdc_executor.start_pipeline(pipeline).wait_for_pipeline_output_records_count(len(ROWS_IN_DATABASE))
sdc_executor.stop_pipeline(pipeline)
result = database.engine.execute(table.select())
data_from_database = sorted(result.fetchall(), key=lambda row: row[1]) # order by id
result.close()
assert data_from_database == [(record['name'], record['id']) for record in ROWS_IN_DATABASE]
finally:
logger.info('Dropping table %s in %s database ...', table_name, database.type)
table.drop(database.engine)
@database('mysql', 'postgresql')
def test_jdbc_producer_insert_type_err(sdc_builder, sdc_executor, database):
"""This test covers invalid type coersion - writing string into int column. As different databases works differently,
we can't assert this across all supported databases. MySQL and PostgreSQL behaves the same way and we can properly
catch and generate JDBC_23. Other databases report coercion issues much later in the query cycle, sometimes even
in a way where we can't understand what and why has happened.
"""
ROWS_IN_DATABASE = [
{'id': 1, 'name': 'Dima'},
{'id': 'X', 'name': 'Jarcec'},
{'id': 3, 'name': 'Arvind'}
]
table_name = get_random_string(string.ascii_lowercase, 20)
table = _create_table(table_name, database)
DATA = '\n'.join(json.dumps(rec) for rec in ROWS_IN_DATABASE)
pipeline_builder = sdc_builder.get_pipeline_builder()
dev_raw_data_source = pipeline_builder.add_stage('Dev Raw Data Source')
dev_raw_data_source.set_attributes(data_format='JSON', raw_data=DATA, stop_after_first_batch=True)
FIELD_MAPPINGS = [dict(field='/id', columnName='id', dataType='INTEGER'),
dict(field='/name', columnName='name', dataType='STRING')]
jdbc_producer = pipeline_builder.add_stage('JDBC Producer')
jdbc_producer.set_attributes(default_operation='INSERT',
table_name=table_name,
field_to_column_mapping=FIELD_MAPPINGS,
stage_on_record_error='TO_ERROR')
dev_raw_data_source >> jdbc_producer
pipeline = pipeline_builder.build(title="JDBC producer with error")
sdc_executor.add_pipeline(pipeline.configure_for_environment(database))
try:
snapshot = sdc_executor.capture_snapshot(pipeline, start_pipeline=True).snapshot
sdc_executor.get_pipeline_status(pipeline).wait_for_status('FINISHED')
result = database.engine.execute(table.select())
data_from_database = sorted(result.fetchall(), key=lambda row: row[1]) # order by id
result.close()
assert data_from_database == [(record['name'], record['id']) for record in ROWS_IN_DATABASE
if record['id'] != 'X']
stage = snapshot[jdbc_producer.instance_name]
assert 'JDBC_23' == stage.error_records[0].header['errorCode']
finally:
logger.info('Dropping table %s in %s database ...', table_name, database.type)
table.drop(database.engine)
@database
def test_jdbc_producer_insert_multiple_types(sdc_builder, sdc_executor, database):
"""Simple JDBC Producer test with INSERT operation.
The pipeline inserts 1000 records of multiple types.
The pipeline should look like:
dev_data_generator >> jdbc_producer
"""
pipeline_builder = sdc_builder.get_pipeline_builder()
dev_data_generator = pipeline_builder.add_stage('Dev Data Generator')
dev_data_generator.fields_to_generate = [
{'field': 'field1', 'type': 'STRING'},
{'field': 'field2', 'type': 'DATETIME'},
{'field': 'field3', 'type': 'INTEGER'},
{'field': 'field4', 'precision': 10, 'scale': 2, 'type': 'DECIMAL'},
{'field': 'field5', 'type': 'DOUBLE'}
]
batch_size = 10000
dev_data_generator.set_attributes(delay_between_batches=0, batch_size=batch_size)
table_name = get_random_string(string.ascii_lowercase, 20)
metadata = sqlalchemy.MetaData()
table = sqlalchemy.Table(table_name,
metadata,
sqlalchemy.Column('field1', sqlalchemy.String(50)),
sqlalchemy.Column('field2', sqlalchemy.DateTime),
sqlalchemy.Column('field3', sqlalchemy.Integer),
sqlalchemy.Column('field4', sqlalchemy.DECIMAL(10, 2)),
sqlalchemy.Column('field5', sqlalchemy.Float),
schema=None)
logger.info('Creating table %s in %s database ...', table_name, database.type)
table.create(database.engine)
FIELD_MAPPINGS = [dict(field='/field1', columnName='field1', dataType='STRING'),
dict(field='/field2', columnName='field2', dataType='DATETIME'),
dict(field='/field3', columnName='field3', dataType='INTEGER'),
dict(field='/field4', columnName='field4', dataType='DECIMAL'),
dict(field='/field5', columnName='field5', dataType='FLOAT')]
jdbc_producer = pipeline_builder.add_stage('JDBC Producer')
jdbc_producer.set_attributes(default_operation='INSERT',
table_name=table_name,
field_to_column_mapping=FIELD_MAPPINGS,
stage_on_record_error='TO_ERROR')
dev_data_generator >> jdbc_producer
pipeline = pipeline_builder.build(title="JDBC producer multiple types")
sdc_executor.add_pipeline(pipeline.configure_for_environment(database))
try:
sdc_executor.start_pipeline(pipeline).wait_for_pipeline_output_records_count(batch_size, timeout_sec=3600)
snapshot = sdc_executor.capture_snapshot(pipeline).snapshot
sdc_executor.stop_pipeline(pipeline).wait_for_stopped()
result = database.engine.execute(table.select())
data_from_database = sorted(result.fetchall(), key=lambda row: row[1]) # order by id
result.close()
assert len(data_from_database) > batch_size
stage = snapshot[jdbc_producer.instance_name]
assert len(stage.error_records) == 0
finally:
logger.info('Dropping table %s in %s database ...', table_name, database.type)
table.drop(database.engine)
# SDC-10786: This test intends to cover the case really precise decimals being inserted into a Float column in MSSQL
@database('sqlserver')
def test_mssql_producer_bigdecimal(sdc_builder, sdc_executor, database):
"""
Insert a Decimal value with up to 38 decimals into a Float column in MSSQL.
This will look like:
dev_data_generator >> jdbc_producer
"""
table_name = get_random_string(string.ascii_lowercase, 20)
table = sqlalchemy.Table(
table_name,
sqlalchemy.MetaData(),
sqlalchemy.Column('a_value', sqlalchemy.Float()),
sqlalchemy.Column('b_value', sqlalchemy.Float()),
sqlalchemy.Column('c_value', sqlalchemy.Float()),
sqlalchemy.Column('id', sqlalchemy.Integer, primary_key=True, autoincrement=False)
)
table.create(database.engine)
pipeline_builder = sdc_builder.get_pipeline_builder()
dev_data_generator = pipeline_builder.add_stage('Dev Data Generator')
dev_data_generator.fields_to_generate = [{'field': 'id', 'type': 'INTEGER'},
{'field': 'a_value', 'precision': 50, 'scale': 40, 'type': 'DECIMAL'},
{'field': 'b_value', 'precision': 5, 'scale': 2, 'type': 'DECIMAL'},
{'field': 'c_value', 'type': 'DECIMAL'}]
dev_data_generator.batch_size = 1
FIELD_MAPPINGS = [dict(field='/id', columnName='id'),
dict(field='/a_value', columnName='a_value'),
dict(field='/b_value', columnName='b_value'),
dict(field='/c_value', columnName='c_value')]
jdbc_producer = pipeline_builder.add_stage('JDBC Producer')
jdbc_producer.set_attributes(default_operation='INSERT',
table_name=table_name,
field_to_column_mapping=FIELD_MAPPINGS,
stage_on_record_error='STOP_PIPELINE')
dev_data_generator >> jdbc_producer
pipeline = pipeline_builder.build('MSSQL BigDecimal')
sdc_executor.add_pipeline(pipeline.configure_for_environment(database))
try:
snapshot = sdc_executor.capture_snapshot(pipeline, start_pipeline=True, wait=True).snapshot
sdc_executor.stop_pipeline(pipeline)
records = [record.field for record in snapshot[dev_data_generator.instance_name].output]
result = database.engine.execute(table.select())
data_from_database = sorted(result.fetchall(), key=lambda row: row[0]) # order by id
result.close()
assert len(data_from_database) == 1
assert math.isclose(float(str(records[0]['a_value'])), data_from_database[0][0], rel_tol=0.02)
assert math.isclose(float(str(records[0]['b_value'])), data_from_database[0][1], rel_tol=0.02)
assert math.isclose(float(str(records[0]['c_value'])), data_from_database[0][2], rel_tol=0.02)
assert math.isclose(float(str(records[0]['id'])), data_from_database[0][3], rel_tol=0.02)
finally:
logger.info('Dropping table %s in %s database ...', table_name, database.type)
table.drop(database.engine)
@database
def test_jdbc_producer_coerced_insert(sdc_builder, sdc_executor, database):
"""Extension of the Simple JDBC Producer test with INSERT operation.
The pipeline inserts records into the database.
In one record, data is represented as type String, where column is type Integer.
This should be passed to the database to coerce.
Verify that correct data is in the database.
Please note the use of local COERCE_ROWS_IN_DATABASE to insert
and global ROWS_IN_DATABASE to verify.
COERCE_ has id (integer) set to string.
"""
table_name = get_random_string(string.ascii_lowercase, 20)
table = _create_table(table_name, database)
COERCE_ROWS_IN_DATABASE = [
{'id': '1', 'name': 'Dima'},
{'id': '2', 'name': 'Jarcec'},
{'id': '3', 'name': 'Arvind'}
]
DATA = '\n'.join(json.dumps(rec) for rec in COERCE_ROWS_IN_DATABASE)
pipeline_builder = sdc_builder.get_pipeline_builder()
pipeline = _create_jdbc_producer_pipeline(pipeline_builder, 'JDBC Producer Insert', DATA, table_name, 'INSERT')
sdc_executor.add_pipeline(pipeline.configure_for_environment(database))
try:
sdc_executor.start_pipeline(pipeline).wait_for_pipeline_output_records_count(len(ROWS_IN_DATABASE))
sdc_executor.stop_pipeline(pipeline)
result = database.engine.execute(table.select())
data_from_database = sorted(result.fetchall(), key=lambda row: row[1]) # order by id
result.close()
assert data_from_database == [(record['name'], record['id']) for record in ROWS_IN_DATABASE]
finally:
logger.info('Dropping table %s in %s database ...', table_name, database.type)
table.drop(database.engine)
@database
def test_jdbc_producer_delete(sdc_builder, sdc_executor, database):
"""Simple JDBC Producer test with DELETE operation.
The pipeline deletes records from the database and verify that correct data is in the database.
Records are deleted if the primary key is matched irrespective of other column values.
"""
table_name = get_random_string(string.ascii_lowercase, 20)
table = _create_table(table_name, database)
logger.info('Adding %s rows into %s database ...', len(ROWS_IN_DATABASE), database.type)
connection = database.engine.connect()
connection.execute(table.insert(), ROWS_IN_DATABASE)
DATA = '\n'.join(json.dumps(rec) for rec in ROWS_TO_UPDATE)
pipeline_builder = sdc_builder.get_pipeline_builder()
pipeline = _create_jdbc_producer_pipeline(pipeline_builder, 'JDBC Producer Delete', DATA, table_name, 'DELETE')
sdc_executor.add_pipeline(pipeline.configure_for_environment(database))
try:
sdc_executor.start_pipeline(pipeline).wait_for_pipeline_output_records_count(len(ROWS_TO_UPDATE))
sdc_executor.stop_pipeline(pipeline)
result = database.engine.execute(table.select())
data_from_database = result.fetchall()
result.close()
removed_ids = [record['id'] for record in ROWS_TO_UPDATE]
assert data_from_database == [(record['name'], record['id']) for record in ROWS_IN_DATABASE if
record['id'] not in removed_ids]
finally:
logger.info('Dropping table %s in %s database ...', table_name, database.type)
table.drop(database.engine)
@database
def test_jdbc_producer_update(sdc_builder, sdc_executor, database):
"""Simple JDBC Producer test with UPDATE operation.
The pipeline updates records from the database and verify that correct data is in the database.
Records with matching primary key are updated, and no action for unmatched records.
"""
table_name = get_random_string(string.ascii_lowercase, 20)
table = _create_table(table_name, database)
logger.info('Adding %s rows into %s database ...', len(ROWS_IN_DATABASE), database.type)
connection = database.engine.connect()
connection.execute(table.insert(), ROWS_IN_DATABASE)
DATA = '\n'.join(json.dumps(rec) for rec in ROWS_TO_UPDATE)
pipeline_builder = sdc_builder.get_pipeline_builder()
pipeline = _create_jdbc_producer_pipeline(pipeline_builder, 'JDBC Producer Update', DATA, table_name, 'UPDATE')
sdc_executor.add_pipeline(pipeline.configure_for_environment(database))
try:
sdc_executor.start_pipeline(pipeline).wait_for_pipeline_output_records_count(len(ROWS_TO_UPDATE))
sdc_executor.stop_pipeline(pipeline)
result = database.engine.execute(table.select())
data_from_database = sorted(result.fetchall(), key=lambda row: row[1]) # order by id
result.close()
updated_names = {record['id']: record['name'] for record in ROWS_IN_DATABASE}
updated_names.update({record['id']: record['name'] for record in ROWS_TO_UPDATE})
assert data_from_database == [(updated_names[record['id']], record['id']) for record in ROWS_IN_DATABASE]
finally:
logger.info('Dropping table %s in %s database ...', table_name, database.type)
table.drop(database.engine)
# SDC-10987: JDBC Multitable Consumer multiple offset columns with initial offset
@database
def test_jdbc_multitable_consumer_initial_offset_at_the_end(sdc_builder, sdc_executor, database):
"""
Set initial offset at the end of the table and verify that no records were read.
"""
table_name = get_random_string(string.ascii_lowercase, 10)
builder = sdc_builder.get_pipeline_builder()
jdbc_multitable_consumer = builder.add_stage('JDBC Multitable Consumer')
jdbc_multitable_consumer.table_configs = [{
"tablePattern": table_name,
"overrideDefaultOffsetColumns": True,
"offsetColumns": ["id"],
"offsetColumnToInitialOffsetValue": [{
"key": "id",
"value": "5"
}]
}]
trash = builder.add_stage('Trash')
jdbc_multitable_consumer >> trash
pipeline = builder.build().configure_for_environment(database)
metadata = sqlalchemy.MetaData()
table = sqlalchemy.Table(
table_name,
metadata,
sqlalchemy.Column('id', sqlalchemy.Integer, primary_key=True, quote=True),
sqlalchemy.Column('name', sqlalchemy.String(32), quote=True)
)
try:
logger.info('Creating table %s in %s database ...', table_name, database.type)
table.create(database.engine)
logger.info('Adding three rows into %s database ...', database.type)
connection = database.engine.connect()
connection.execute(table.insert(), ROWS_IN_DATABASE)
sdc_executor.add_pipeline(pipeline)
sdc_executor.start_pipeline(pipeline)
# Since the pipeline is not meant to read anything, we 'simply' wait
time.sleep(5)
sdc_executor.stop_pipeline(pipeline)
# There must be no records read
history = sdc_executor.get_pipeline_history(pipeline)
assert history.latest.metrics.counter('pipeline.batchInputRecords.counter').count == 0
assert history.latest.metrics.counter('pipeline.batchOutputRecords.counter').count == 0
finally:
logger.info('Dropping table %s in %s database...', table_name, database.type)
table.drop(database.engine)
# SDC-10562: Row-level stage errors not being caught at pipeline
@sdc_min_version('3.0.0.0')
@database
def test_jdbc_producer_multirow_with_duplicates(sdc_builder, sdc_executor, database):
"""
Make sure that when using Multi Row insert, data related errors are send to error stream.
"""
if type(database) == SQLServerDatabase:
pytest.skip('This test is trying to insert explicit value to identity column which is not supported on SQL Server')
table_name = get_random_string(string.ascii_lowercase, 15)
builder = sdc_builder.get_pipeline_builder()
# Generate batch that will repeat the same primary key in the middle of the batch (on third row)
source = builder.add_stage('Dev Raw Data Source')
source.stop_after_first_batch = True
source.data_format = 'JSON'
source.raw_data = """{"id" : 1}\n{"id" : 2}\n{"id" : 1}\n{"id" : 3}"""
producer = builder.add_stage('JDBC Producer')
producer.table_name = table_name
producer.field_to_column_mapping = []
producer.default_operation = 'INSERT'
producer.use_multi_row_operation = True
if database.type == 'Oracle':
producer.enclose_object_names = True
source >> producer
pipeline = builder.build().configure_for_environment(database)
metadata = sqlalchemy.MetaData()
table = sqlalchemy.Table(
table_name,
metadata,
sqlalchemy.Column('id', sqlalchemy.Integer, primary_key=True, quote=True)
)
try:
logger.info('Creating table %s in %s database ...', table_name, database.type)
table.create(database.engine)
sdc_executor.add_pipeline(pipeline)
sdc_executor.start_pipeline(pipeline).wait_for_finished()
# Since we are inserting duplicate primary key, the batch should fail
history = sdc_executor.get_pipeline_history(pipeline)
assert history.latest.metrics.counter('pipeline.batchInputRecords.counter').count == 4
assert history.latest.metrics.counter('pipeline.batchErrorRecords.counter').count == 4
assert history.latest.metrics.counter('pipeline.batchOutputRecords.counter').count == 0
# And similarly the database side should be empty as well
result = database.engine.execute(table.select())
data_from_database = result.fetchall()
result.close()
assert len(data_from_database) == 0
finally:
logger.info('Dropping table %s in %s database...', table_name, database.type)
table.drop(database.engine)
@database
def test_jdbc_producer_multitable(sdc_builder, sdc_executor, database):
"""Test for JDBC Producer with multiple destination table. We create 3 tables in the default schema and use an EL
expression to insert records according to the /table record field.
Pipeline:
dev_raw_data_source >> record_deduplicator >> jdbc_producer
record_deduplicator >> trash
"""
table1_name = _get_random_name(database, prefix='stf_table_')
table2_name = _get_random_name(database, prefix='stf_table_')
table3_name = _get_random_name(database, prefix='stf_table_')
table1 = _create_table(table1_name, database)
table2 = _create_table(table2_name, database)
table3 = _create_table(table3_name, database)
ROWS = [{'table': table1_name, 'id': 1, 'name': 'Roger Federer'},
{'table': table2_name, 'id': 2, 'name': 'Rafael Nadal'},
{'table': table3_name, 'id': 3, 'name': 'Dominic Thiem'}]
INPUT_DATA = '\n'.join(json.dumps(rec) for rec in ROWS)
pipeline_builder = sdc_builder.get_pipeline_builder()
pipeline = _create_jdbc_producer_pipeline(pipeline_builder, 'JDBC Producer Multitable Insert', INPUT_DATA,
"${record:value('/table')}", 'INSERT')
# JDBC Producer's "Table Name" property is converted to uppercase through the configure_for_environment() method
# when database is Oracle. However EL function names are case-sensitive; we overwrite it afterwards to avoid an EL
# error.
pipeline.configure_for_environment(database)
pipeline[2].set_attributes(table_name="${record:value('/table')}")
# For Oracle, the default value of JDBC Producer's "Schema Name" property in the database environment is the
# database name, but it should be the username instead.
if isinstance(database, OracleDatabase):
pipeline[2].set_attributes(schema_name=database.username.upper())
sdc_executor.add_pipeline(pipeline)
try:
sdc_executor.start_pipeline(pipeline).wait_for_pipeline_output_records_count(len(ROWS))
sdc_executor.stop_pipeline(pipeline)
result1 = database.engine.execute(table1.select())
result2 = database.engine.execute(table2.select())
result3 = database.engine.execute(table3.select())
data1 = result1.fetchall()
data2 = result2.fetchall()
data3 = result3.fetchall()
assert data1 == [(ROWS[0]['name'], ROWS[0]['id'])]
assert data2 == [(ROWS[1]['name'], ROWS[1]['id'])]
assert data3 == [(ROWS[2]['name'], ROWS[2]['id'])]
result1.close()
result2.close()
result3.close()
finally:
logger.info('Dropping tables %s, %s, %s in %s database...', table1_name, table2_name, table3_name,
database.type)
table1.drop(database.engine)
table2.drop(database.engine)
table3.drop(database.engine)
# Test SDC-10719
@database
@sdc_min_version('3.8.0')
def test_jdbc_producer_multischema(sdc_builder, sdc_executor, database):
"""Test for JDBC Producer in a multischema scenario with a single destination table for each schema. We create 3
schemas with one table for each, with the same name. Then we use an EL expression to insert records according to
the /schema record field.
Pipeline:
dev_raw_data_source >> record_deduplicator >> jdbc_producer
record_deduplicator >> trash
"""
schema1_name = _get_random_name(database, prefix='stf_schema_')
schema2_name = _get_random_name(database, prefix='stf_schema_')
schema3_name = _get_random_name(database, prefix='stf_schema_')
table_name = _get_random_name(database, prefix='stf_table_')
_create_schema(schema1_name, database)
_create_schema(schema2_name, database)
_create_schema(schema3_name, database)
table1 = _create_table(table_name, database, schema_name=schema1_name)
table2 = _create_table(table_name, database, schema_name=schema2_name)
table3 = _create_table(table_name, database, schema_name=schema3_name)
ROWS = [{'schema': schema1_name, 'id': 1, 'name': 'Roger Federer'},
{'schema': schema2_name, 'id': 2, 'name': 'Rafael Nadal'},
{'schema': schema3_name, 'id': 3, 'name': 'Dominic Thiem'}]
INPUT_DATA = '\n'.join(json.dumps(rec) for rec in ROWS)
pipeline_builder = sdc_builder.get_pipeline_builder()
pipeline = _create_jdbc_producer_pipeline(pipeline_builder, 'JDBC Producer Multischema Insert', INPUT_DATA,
table_name, 'INSERT')
# JDBC Producer's "Schema Name" property is set through the `database` environment under some circumstances
# (e.g. Sql Server database). We overwrite it afterwards for the test.
pipeline.configure_for_environment(database)
pipeline[2].set_attributes(schema_name="${record:value('/schema')}")
sdc_executor.add_pipeline(pipeline)
try:
sdc_executor.start_pipeline(pipeline).wait_for_pipeline_output_records_count(len(ROWS))
sdc_executor.stop_pipeline(pipeline)
result1 = database.engine.execute(table1.select())
result2 = database.engine.execute(table2.select())
result3 = database.engine.execute(table3.select())
data1 = result1.fetchall()
data2 = result2.fetchall()
data3 = result3.fetchall()
assert data1 == [(ROWS[0]['name'], ROWS[0]['id'])]
assert data2 == [(ROWS[1]['name'], ROWS[1]['id'])]
assert data3 == [(ROWS[2]['name'], ROWS[2]['id'])]
result1.close()
result2.close()
result3.close()
finally:
logger.info('Dropping table %s in schemas...', table_name)
table1.drop(database.engine)
table2.drop(database.engine)
table3.drop(database.engine)
logger.info('Dropping schemas %s, %s, %s...', schema1_name, schema2_name, schema3_name)
_drop_schema(schema1_name, database)
_drop_schema(schema2_name, database)
_drop_schema(schema3_name, database)
# Test SDC-10719
@database
@sdc_min_version('3.8.0')
def test_jdbc_producer_multischema_multitable(sdc_builder, sdc_executor, database):
"""Test a JDBC Producer in a multischema scenario with different destination tables for each schema. We create 3
schemas with one table for each, with different names. Then we use an EL expressions to insert records according to
the /schema and /table record fields.
There were a limitation in previous versions that affected to MySQL and MemSQL. These RDBMs do not differentiate
between schema and database. SDC used the database configured in the JDBC connection string, and looked for database
metadata filtering by database+schema. If the schema were other than the database of the connection string, metadata
could not be retrieved. This was a problem in a multischema scenario, where several schemas are employed.
Pipeline:
dev_raw_data_source >> record_deduplicator >> jdbc_producer
record_deduplicator >> trash
"""
schema1_name = _get_random_name(database, prefix='stf_schema_')
schema2_name = _get_random_name(database, prefix='stf_schema_')
schema3_name = _get_random_name(database, prefix='stf_schema_')
table1_name = _get_random_name(database, prefix='stf_table_')
table2_name = _get_random_name(database, prefix='stf_table_')
table3_name = _get_random_name(database, prefix='stf_table_')
_create_schema(schema1_name, database)
_create_schema(schema2_name, database)
_create_schema(schema3_name, database)
table1 = _create_table(table1_name, database, schema_name=schema1_name)
table2 = _create_table(table2_name, database, schema_name=schema2_name)
table3 = _create_table(table3_name, database, schema_name=schema3_name)
ROWS = [{'schema': schema1_name, 'table': table1_name, 'id': 1, 'name': 'Roger Federer'},
{'schema': schema2_name, 'table': table2_name, 'id': 2, 'name': 'Rafael Nadal'},
{'schema': schema3_name, 'table': table3_name, 'id': 3, 'name': 'Dominic Thiem'}]
INPUT_DATA = '\n'.join(json.dumps(rec) for rec in ROWS)
pipeline_builder = sdc_builder.get_pipeline_builder()
pipeline = _create_jdbc_producer_pipeline(pipeline_builder, 'JDBC Producer Multischema and Multitable Insert',
INPUT_DATA, "${record:value('/table')}", 'INSERT')
# JDBC Producer's "Schema Name" property is set through the `database` environment under some circumstances
# (e.g. Sql Server database). We overwrite it afterwards for the test.
pipeline.configure_for_environment(database)
pipeline[2].set_attributes(schema_name="${record:value('/schema')}")
# JDBC Producer's "Table Name" property is converted to uppercase through the configure_for_environment() method
# when database is Oracle. However EL function names are case-sensitive; we overwrite it afterwards to avoid an EL
# error.
pipeline[2].set_attributes(table_name="${record:value('/table')}")
sdc_executor.add_pipeline(pipeline)
try:
sdc_executor.start_pipeline(pipeline).wait_for_pipeline_output_records_count(len(ROWS))
sdc_executor.stop_pipeline(pipeline)
result1 = database.engine.execute(table1.select())
result2 = database.engine.execute(table2.select())
result3 = database.engine.execute(table3.select())
data1 = result1.fetchall()
data2 = result2.fetchall()
data3 = result3.fetchall()
assert data1 == [(ROWS[0]['name'], ROWS[0]['id'])]
assert data2 == [(ROWS[1]['name'], ROWS[1]['id'])]
assert data3 == [(ROWS[2]['name'], ROWS[2]['id'])]
result1.close()
result2.close()
result3.close()
finally:
logger.info('Dropping tables %s, %s, %s...', table1_name, table2_name, table3_name)
table1.drop(database.engine)
table2.drop(database.engine)
table3.drop(database.engine)
logger.info('Dropping schemas %s, %s, %s...', schema1_name, schema2_name, schema3_name)
_drop_schema(schema1_name, database)
_drop_schema(schema2_name, database)
_drop_schema(schema3_name, database)
# SDC-11063: Do not reoder update statements in JDBC destination
@sdc_min_version('3.0.0.0')
@pytest.mark.parametrize('multi_row', [True, False])
@database
def test_jdbc_producer_ordering(sdc_builder, sdc_executor, multi_row, database):
"""Ensure that variously intertwined operations won't be executed out of order in harmful way."""
table_name = get_random_string(string.ascii_lowercase, 20)
metadata = sqlalchemy.MetaData()
table = sqlalchemy.Table(
table_name,
metadata,
sqlalchemy.Column('id', sqlalchemy.Integer, primary_key=True, quote=True, autoincrement=False),
sqlalchemy.Column('a', sqlalchemy.Integer, quote=True),
sqlalchemy.Column('b', sqlalchemy.Integer, quote=True)
)
RAW_DATA = [
# Update id=5
{"op": 3, "id": 5, "a": 2, "b": 2},
# Insert id=4
{"op": 1, "id": 4, "a": 1, "b": 1},
# Update id=4
{"op": 3, "id": 4, "a": 2, "b": 2},
# Delete id=5
{"op": 2, "id": 5},
# Insert id=1
{"op": 1, "id": 1, "a": 1, "b": 1},
# Update id=1
{"op": 3, "id": 1, "a": 2},
# Insert id=2
{"op": 1, "id": 2, "a": 1, "b": 1},
# Delete id=2
{"op": 2, "id": 2},
# Update id=1
{"op": 3, "id": 1, "a": 2, "b": 2},
# Insert id=3
{"op": 1, "id": 3, "a": 1, "b": 1},
# Update id=1
{"op": 3, "id": 1, "a": 3},
# Update id=3
{"op": 3, "id": 3, "a": 5},
# Delete id=3
{"op": 2, "id": 3}
]
builder = sdc_builder.get_pipeline_builder()
source = builder.add_stage('Dev Raw Data Source')
source.stop_after_first_batch = True
source.data_format = 'JSON'
source.raw_data = '\n'.join(json.dumps(rec) for rec in RAW_DATA)
expression = builder.add_stage('Expression Evaluator')
expression.header_attribute_expressions = [
{'attributeToSet': 'sdc.operation.type', 'headerAttributeExpression': '${record:value("/op")}'}
]
remover = builder.add_stage('Field Remover')
remover.set_attributes(fields=['/op'], action='REMOVE')
producer = builder.add_stage('JDBC Producer')
producer.field_to_column_mapping = []
producer.default_operation = 'UPDATE'
producer.table_name = table_name
producer.use_multi_row_operation = multi_row
if database.type == 'Oracle':
producer.enclose_object_names = True
source >> expression >> remover >> producer
pipeline = builder.build().configure_for_environment(database)
sdc_executor.add_pipeline(pipeline)
try:
logger.info('Creating table %s in %s database ...', table_name, database.type)
table.create(database.engine)
# The table will start with single row (id=5)
logger.info('Inserting rows into %s in %s database', table_name, database.type)
connection = database.engine.connect()
connection.execute(table.insert(), {'id': 5, 'a': 1, 'b': 1})
# Finally run the pipeline and verify it's outcome
sdc_executor.start_pipeline(pipeline).wait_for_finished()
result = database.engine.execute(table.select())
db = sorted(result.fetchall(), key=lambda row: row[0]) # order by id
result.close()
assert len(db) == 2
# id=1
assert 1 == db[0][0]
assert 3 == db[0][1]
assert 2 == db[0][2]
# id=5
assert 4 == db[1][0]
assert 2 == db[1][1]
assert 2 == db[1][2]
finally:
logger.info('Dropping table %s in %s database ...', table_name, database.type)
table.drop(database.engine)
@sdc_min_version('3.0.0.0')
@database
def test_jdbc_multitable_events(sdc_builder, sdc_executor, database):
"""
Validate that we properly generate events
"""
if database.type == 'Oracle':
pytest.skip("This test depends on auto-created ID that doesn't work properly on Oracle")
table_prefix = get_random_string(string.ascii_lowercase, 20)
table_a = '{}_a'.format(table_prefix)
table_b = '{}_b'.format(table_prefix)
table_events = '{}_events'.format(table_prefix)
builder = sdc_builder.get_pipeline_builder()
source = builder.add_stage('JDBC Multitable Consumer')
source.transaction_isolation = 'TRANSACTION_READ_COMMITTED'
source.table_configs = [{
'tablePattern': f'{table_prefix}%',
"enableNonIncremental": True,
'tableExclusionPattern': table_events
}]
trash = builder.add_stage('Trash')
expression = builder.add_stage('Expression Evaluator')
expression.field_expressions = [{
'fieldToSet': '/tbl',
'expression': '${record:value("/table")}${record:value("/tables[0]")}'
}, {
'fieldToSet': '/tbls',
'expression': '${record:value("/tables[0]")},${record:value("/tables[1]")}'
}, {
'fieldToSet': '/event',
'expression': '${record:eventType()}'
}
]
producer = builder.add_stage('JDBC Producer')
producer.table_name = table_events
producer.default_operation = 'INSERT'
producer.field_to_column_mapping = [
dict(field='/event', columnName='event'),
dict(field='/tbl', columnName='tbl'),
dict(field='/tbls', columnName='tbls')
]
source >> trash
source >= expression
expression >> producer
pipeline = builder.build().configure_for_environment(database)
sdc_executor.add_pipeline(pipeline)
# We need three tables for this test
metadata = sqlalchemy.MetaData()
a = sqlalchemy.Table(
table_a,
metadata,
sqlalchemy.Column('id', sqlalchemy.Integer, primary_key=True)
)
b = sqlalchemy.Table(
table_b,
metadata,
sqlalchemy.Column('id', sqlalchemy.Integer, primary_key=False)
)
events = sqlalchemy.Table(
table_events,
metadata,
sqlalchemy.Column('id', sqlalchemy.Integer, primary_key=True),
sqlalchemy.Column('event', sqlalchemy.String(50)),
sqlalchemy.Column('tbl', sqlalchemy.String(150)),
sqlalchemy.Column('tbls', sqlalchemy.String(150))
)
try:
logger.info('Creating tables %s, %s and %s in %s database ...', table_a, table_b, table_events, database.type)
a.create(database.engine)
b.create(database.engine)
events.create(database.engine)
logger.info('Inserting rows into %s and %s', table_a, table_b)
connection = database.engine.connect()
connection.execute(a.insert(), {'id': 1})
connection.execute(b.insert(), {'id': 1})
# Start the pipeline
status = sdc_executor.start_pipeline(pipeline)
# Read two records, generate 4 events, 6 records
status.wait_for_pipeline_output_records_count(6)
result = database.engine.execute(events.select())
db = sorted(result.fetchall(), key=lambda row: row[0]) # order by stamp
result.close()
assert len(db) == 4
tbls = set()
assert 'table-finished' == db[0][1]
tbls.add(db[0][2])
assert 'table-finished' == db[1][1]
tbls.add(db[1][2])
assert table_a in tbls
assert table_b in tbls
assert 'schema-finished' == db[2][1]
tbls = set(db[2][3].split(","))
assert table_a in tbls
assert table_b in tbls
assert 'no-more-data' == db[3][1]
# Portable truncate
events.drop(database.engine)
events.create(database.engine)
# Second iteration - insert one new row
logger.info('Inserting rows into %s', table_a)
connection = database.engine.connect()
connection.execute(a.insert(), {'id': 2})
# 1 record, 3 events more
status.wait_for_pipeline_output_records_count(10)
result = database.engine.execute(events.select())
db = sorted(result.fetchall(), key=lambda row: row[0]) # order by stamp
result.close()
assert len(db) == 3
assert 'table-finished' == db[0][1]
assert table_a == db[0][2]
assert 'schema-finished' == db[1][1]
tbls = set(db[1][3].split(","))
assert table_a in tbls
assert table_b in tbls
assert 'no-more-data' == db[2][1]
# Now let's stop the pipeline and start it again
# SDC-10022: Multitable JDBC Origin with non-incremental table does not properly trigger 'no-more-data' event
sdc_executor.stop_pipeline(pipeline)
# Portable truncate
events.drop(database.engine)
events.create(database.engine)
# Start the pipeline and wait for it to read three records (3 events)
sdc_executor.start_pipeline(pipeline).wait_for_pipeline_output_records_count(3)
assert 'table-finished' == db[0][1]
assert table_a == db[0][2]
assert 'schema-finished' == db[1][1]
tbls = set(db[1][3].split(","))
assert table_a in tbls
assert table_b in tbls
assert 'no-more-data' == db[2][1]
finally:
sdc_executor.stop_pipeline(pipeline)
logger.info('Dropping tables %s, %s and %s in %s database...', table_a, table_b, table_events, database.type)
a.drop(database.engine)
b.drop(database.engine)
events.drop(database.engine)
# SDC-11092: Improve the ability of JDBC Destination to cover non-standard Data related SQL Error codes
@sdc_min_version('3.0.0.0')
@pytest.mark.parametrize('multi_row', [True, False])
@database('oracle')
def test_jdbc_producer_oracle_data_errors(sdc_builder, sdc_executor, multi_row, database):
"""Ensure that data related error in Oracle will be sent to eror stream rather then shutting the pipeline down."""
table_name = get_random_string(string.ascii_lowercase, 20)
metadata = sqlalchemy.MetaData()
table = sqlalchemy.Table(
table_name,
metadata,
sqlalchemy.Column('ID', sqlalchemy.Integer, primary_key=True),
sqlalchemy.Column('STR', sqlalchemy.String(2)),
)
builder = sdc_builder.get_pipeline_builder()
source = builder.add_stage('Dev Raw Data Source')
source.stop_after_first_batch = True
source.data_format = 'JSON'
source.raw_data = '{"ID" : 1, "STR": "Longer then 2 characters"}'
producer = builder.add_stage('JDBC Producer')
producer.field_to_column_mapping = []
producer.default_operation = 'INSERT'
producer.table_name = table_name
producer.use_multi_row_operation = multi_row
source >> producer
pipeline = builder.build().configure_for_environment(database)
sdc_executor.add_pipeline(pipeline)
try:
logger.info('Creating table %s in %s database ...', table_name, database.type)
table.create(database.engine)
sdc_executor.start_pipeline(pipeline).wait_for_finished()
# The table in database needs to be empty
result = database.engine.execute(table.select())
db = sorted(result.fetchall(), key=lambda row: row[0]) # order by id
result.close()
assert len(db) == 0
history = sdc_executor.get_pipeline_history(pipeline)
assert history.latest.metrics.counter('pipeline.batchInputRecords.counter').count == 1
assert history.latest.metrics.counter('pipeline.batchErrorRecords.counter').count == 1
assert history.latest.metrics.counter('pipeline.batchOutputRecords.counter').count == 0
finally:
logger.info('Dropping table %s in %s database ...', table_name, database.type)
table.drop(database.engine)
# SDC-11082: Extend support for TIMESTAMP WITH TIMEZONE Datatypes
@sdc_min_version('3.0.0.0')
@database('oracle')
# https://docs.oracle.com/cd/B28359_01/server.111/b28318/datatype.htm#CNCPT1821
# We don't support UriType (requires difficult workaround in JDBC)
@pytest.mark.parametrize('sql_type,insert_fragment,expected_type,expected_value', [
('number', '1', 'DECIMAL', '1'),
('char(2)', "'AB'", 'STRING', 'AB'),
('varchar(4)', "'ABCD'", 'STRING', 'ABCD'),
('varchar2(4)', "'NVAR'", 'STRING', 'NVAR'),
('nchar(3)', "'NCH'", 'STRING', 'NCH'),
('nvarchar2(4)', "'NVAR'", 'STRING', 'NVAR'),
('binary_float', '1.0', 'FLOAT', '1.0'),
('binary_double', '2.0', 'DOUBLE', '2.0'),
('date', "TO_DATE('1998-1-1 6:22:33', 'YYYY-MM-DD HH24:MI:SS')", 'DATETIME', 883635753000),
('timestamp', "TIMESTAMP'1998-1-2 6:00:00'", 'DATETIME', 883720800000),
('timestamp with time zone', "TIMESTAMP'1998-1-3 6:00:00-5:00'", 'ZONED_DATETIME', '1998-01-03T06:00:00-05:00'),
('timestamp with local time zone', "TIMESTAMP'1998-1-4 6:00:00-5:00'", 'ZONED_DATETIME', '1998-01-04T11:00:00Z'),
('long', "'LONG'", 'STRING', 'LONG'),
('blob', "utl_raw.cast_to_raw('BLOB')", 'BYTE_ARRAY', 'QkxPQg=='),
('clob', "'CLOB'", 'STRING', 'CLOB'),
('nclob', "'NCLOB'", 'STRING', 'NCLOB'),
('XMLType', "xmltype('<a></a>')", 'STRING', '<a></a>')
])
@pytest.mark.parametrize('use_table_origin', [True, False])
def test_jdbc_multitable_oracle_types(sdc_builder, sdc_executor, database, use_table_origin, sql_type, insert_fragment,
expected_type, expected_value):
"""Test all feasible Oracle types."""
table_name = get_random_string(string.ascii_lowercase, 20)
connection = database.engine.connect()
try:
# Create table
connection.execute(f"""
CREATE TABLE {table_name}(
id number primary key,
data_column {sql_type} NULL
)
""")
# And insert a row with actual value
connection.execute(f"INSERT INTO {table_name} VALUES(1, {insert_fragment})")
# And a null
connection.execute(f"INSERT INTO {table_name} VALUES(2, NULL)")
builder = sdc_builder.get_pipeline_builder()
if use_table_origin:
origin = builder.add_stage('JDBC Multitable Consumer')
origin.table_configs = [{"tablePattern": f'%{table_name}%'}]
origin.on_unknown_type = 'CONVERT_TO_STRING'
else:
origin = builder.add_stage('JDBC Query Consumer')
origin.sql_query = 'SELECT * FROM {0}'.format(table_name)
origin.incremental_mode = False
origin.on_unknown_type = 'CONVERT_TO_STRING'
trash = builder.add_stage('Trash')
origin >> trash
pipeline = builder.build().configure_for_environment(database)
sdc_executor.add_pipeline(pipeline)
snapshot = sdc_executor.capture_snapshot(pipeline=pipeline, start_pipeline=True).snapshot
sdc_executor.stop_pipeline(pipeline)
assert len(snapshot[origin].output) == 2
record = snapshot[origin].output[0]
null_record = snapshot[origin].output[1]
# Since we are controlling types, we want to check explicit values inside the record rather the the python
# wrappers.
# TLKT-177: Add ability for field to return raw value
assert record.field['DATA_COLUMN'].type == expected_type
assert null_record.field['DATA_COLUMN'].type == expected_type
assert record.field['DATA_COLUMN']._data['value'] == expected_value
assert null_record.field['DATA_COLUMN'] == None
finally:
logger.info('Dropping table %s in %s database ...', table_name, database.type)
connection.execute(f"DROP TABLE {table_name}")
# SDC-11324: JDBC MultiTable origin can create duplicate offsets
@database('mysql')
def test_jdbc_multitable_duplicate_offsets(sdc_builder, sdc_executor, database):
"""Validate that we will not create duplicate offsets. """
table_name = get_random_string(string.ascii_lowercase, 10)
pipeline_builder = sdc_builder.get_pipeline_builder()
origin = pipeline_builder.add_stage('JDBC Multitable Consumer')
origin.table_configs = [{"tablePattern": table_name}]
origin.max_batch_size_in_records = 1
trash = pipeline_builder.add_stage('Trash')
origin >> trash
pipeline = pipeline_builder.build().configure_for_environment(database)
metadata = sqlalchemy.MetaData()
table = sqlalchemy.Table(
table_name,
metadata,
sqlalchemy.Column('id', sqlalchemy.Integer, primary_key=True),
sqlalchemy.Column('name', sqlalchemy.String(32))
)
try:
logger.info('Creating table %s in %s database ...', table_name, database.type)
table.create(database.engine)
logger.info('Adding three rows into %s database ...', database.type)
connection = database.engine.connect()
connection.execute(table.insert(), ROWS_IN_DATABASE)
sdc_executor.add_pipeline(pipeline)
sdc_executor.start_pipeline(pipeline).wait_for_pipeline_output_records_count(len(ROWS_IN_DATABASE))
sdc_executor.stop_pipeline(pipeline)
# We should have transition 4 records
history = sdc_executor.get_pipeline_history(pipeline)
assert history.latest.metrics.counter('pipeline.batchInputRecords.counter').count == len(ROWS_IN_DATABASE)
assert history.latest.metrics.counter('pipeline.batchOutputRecords.counter').count == len(ROWS_IN_DATABASE)
# And most importantly, validate offset
offset = sdc_executor.api_client.get_pipeline_committed_offsets(pipeline.id).response.json()
assert offset is not None
assert offset['offsets'] is not None
expected_offset = {
f"tableName={table_name};;;partitioned=false;;;partitionSequence=-1;;;partitionStartOffsets=;;;partitionMaxOffsets=;;;usingNonIncrementalLoad=false": "id=3",
"$com.streamsets.pipeline.stage.origin.jdbc.table.TableJdbcSource.offset.version$": "2"
}
assert offset['offsets'] == expected_offset
finally:
logger.info('Dropping table %s in %s database...', table_name, database.type)
table.drop(database.engine)
# SDC-11326: JDBC MultiTable origin forgets offset of non-incremental table on consecutive execution
@database('mysql')
@sdc_min_version('3.0.0.0')
def test_jdbc_multitable_lost_nonincremental_offset(sdc_builder, sdc_executor, database):
"""Validate the origin does not loose non-incremental offset on various runs."""
table_name = get_random_string(string.ascii_lowercase, 10)
pipeline_builder = sdc_builder.get_pipeline_builder()
origin = pipeline_builder.add_stage('JDBC Multitable Consumer')
origin.table_configs = [{"tablePattern": table_name, "enableNonIncremental": True}]
origin.max_batch_size_in_records = 1
trash = pipeline_builder.add_stage('Trash')
origin >> trash
pipeline = pipeline_builder.build().configure_for_environment(database)
metadata = sqlalchemy.MetaData()
table = sqlalchemy.Table(
table_name,
metadata,
sqlalchemy.Column('id', sqlalchemy.Integer, primary_key=False),
sqlalchemy.Column('name', sqlalchemy.String(32))
)
try:
logger.info('Creating table %s in %s database ...', table_name, database.type)
table.create(database.engine)
logger.info('Adding three rows into %s database ...', database.type)
connection = database.engine.connect()
connection.execute(table.insert(), ROWS_IN_DATABASE)
sdc_executor.add_pipeline(pipeline)
sdc_executor.start_pipeline(pipeline).wait_for_pipeline_output_records_count(len(ROWS_IN_DATABASE))
sdc_executor.stop_pipeline(pipeline)
# We should have read all the records
history = sdc_executor.get_pipeline_history(pipeline)
assert history.latest.metrics.counter('pipeline.batchInputRecords.counter').count == len(ROWS_IN_DATABASE)
assert history.latest.metrics.counter('pipeline.batchOutputRecords.counter').count == len(ROWS_IN_DATABASE)
# And most importantly, validate offset
offset = sdc_executor.api_client.get_pipeline_committed_offsets(pipeline.id).response.json()
assert offset is not None
assert offset['offsets'] is not None
expected_offset = {
f"tableName={table_name};;;partitioned=false;;;partitionSequence=-1;;;partitionStartOffsets=;;;partitionMaxOffsets=;;;usingNonIncrementalLoad=true": "completed=true",
"$com.streamsets.pipeline.stage.origin.jdbc.table.TableJdbcSource.offset.version$": "2"
}
assert offset['offsets'] == expected_offset
for _ in range(5):
sdc_executor.start_pipeline(pipeline)
# Since the pipeline won't read anything, give it few seconds to "idle"
time.sleep(2)
sdc_executor.stop_pipeline(pipeline)
# And it really should not have read anything!
history = sdc_executor.get_pipeline_history(pipeline)
assert history.latest.metrics.counter('pipeline.batchInputRecords.counter').count == 0
assert history.latest.metrics.counter('pipeline.batchOutputRecords.counter').count == 0
# And offset should not have changed
offset = sdc_executor.api_client.get_pipeline_committed_offsets(pipeline.id).response.json()
assert offset is not None
assert offset['offsets'] is not None
assert offset['offsets'] == expected_offset
finally:
logger.info('Dropping table %s in %s database...', table_name, database.type)
table.drop(database.engine)
@sdc_min_version('3.9.0')
@database('oracle')
def test_jdbc_multitable_oracle_split_by_timestamp_with_timezone(sdc_builder, sdc_executor, database):
"""Make sure that we can properly partition TIMESTAMP WITH TIMEZONE type."""
table_name = get_random_string(string.ascii_uppercase, 20)
table_name_dest = get_random_string(string.ascii_uppercase, 20)
connection = database.engine.connect()
comparing_query = f"""(
select * from {table_name}
minus
select * from {table_name_dest}
) union (
select * from {table_name_dest}
minus
select * from {table_name}
)"""
try:
# Create table
connection.execute(f"""
CREATE TABLE {table_name}(
ID number primary key,
TZ timestamp(6) with time zone
)
""")
# Create destination table
connection.execute(f"""CREATE TABLE {table_name_dest} AS SELECT * FROM {table_name} WHERE 1=0""")
# Insert a few rows
for m in range(0, 5):
for s in range(0, 59):
connection.execute(f"INSERT INTO {table_name} VALUES({m*100+s}, TIMESTAMP'2019-01-01 10:{m}:{s}-5:00')")
connection.execute("commit")
builder = sdc_builder.get_pipeline_builder()
origin = builder.add_stage('JDBC Multitable Consumer')
origin.table_configs = [{
"tablePattern": f'%{table_name}%',
"overrideDefaultOffsetColumns": True,
"offsetColumns": ["TZ"],
"enableNonIncremental": False,
"partitioningMode": "REQUIRED",
"partitionSize": "30",
"maxNumActivePartitions": -1
}]
origin.number_of_threads = 2
origin.maximum_pool_size = 2
origin.max_batch_size_in_records = 30
finisher = builder.add_stage('Pipeline Finisher Executor')
finisher.stage_record_preconditions = ['${record:eventType() == "no-more-data"}']
FIELD_MAPPINGS = [dict(field='/ID', columnName='ID'),
dict(field='/TZ', columnName='TZ')]
destination = builder.add_stage('JDBC Producer')
destination.set_attributes(default_operation='INSERT',
table_name=table_name_dest,
field_to_column_mapping=FIELD_MAPPINGS,
stage_on_record_error='STOP_PIPELINE')
origin >> destination
origin >= finisher
pipeline = builder.build().configure_for_environment(database)
sdc_executor.add_pipeline(pipeline)
sdc_executor.start_pipeline(pipeline).wait_for_finished()
result = [row.items() for row in connection.execute(comparing_query)]
assert len(result) == 0
# Insert few more rows and validate the outcome again
for m in range(6, 8):
for s in range(0, 59):
connection.execute(f"INSERT INTO {table_name} VALUES({m*100+s}, TIMESTAMP'2019-01-01 10:{m}:{s}-5:00')")
connection.execute("commit")
sdc_executor.start_pipeline(pipeline).wait_for_finished()
result = [row.items() for row in connection.execute(comparing_query)]
assert len(result) == 0
finally:
logger.info('Dropping table %s and %s in %s database ...', table_name, table_name_dest, database.type)
connection.execute(f"DROP TABLE {table_name}")
connection.execute(f"DROP TABLE {table_name_dest}")
def _get_date_from_days(d):
return datetime.date(1970, 1, 1) + datetime.timedelta(days=d)
@database('oracle')
def test_jdbc_multitable_oracle_split_by_date(sdc_builder, sdc_executor, database):
"""Make sure that we can properly partition DATE type.
More precisely, we want to run this pipeline:
multitable >> jdbc
multitable >= finisher
With more than one thread and using a DATE column as a offset column.
This feature was not available until version 3.11.0, and was detected and
solved in ESC-513.
"""
table_name = get_random_string(string.ascii_uppercase, 20)
table_name_dest = get_random_string(string.ascii_uppercase, 20)
connection = database.engine.connect()
comparing_query = f"""(
select * from {table_name}
minus
select * from {table_name_dest}
) union (
select * from {table_name_dest}
minus
select * from {table_name}
)"""
try:
# Create table
connection.execute(f"""
CREATE TABLE {table_name}(
ID number primary key,
DT date
)
""")
# Create destination table
connection.execute(f"""CREATE TABLE {table_name_dest} AS SELECT * FROM {table_name} WHERE 1=0""")
# Insert a few rows
for m in range(0, 5):
for s in range(0, 59):
identifier = 100 * m + s
connection.execute(
f"INSERT INTO {table_name} VALUES({identifier}, DATE'{_get_date_from_days(identifier)}')"
)
connection.execute("commit")
builder = sdc_builder.get_pipeline_builder()
origin = builder.add_stage('JDBC Multitable Consumer')
# Partition size is set to 259200000 which corresponds to 30 days in ms,
# since dates are translated to timestamps
origin.table_configs = [{
"tablePattern": f'%{table_name}%',
"overrideDefaultOffsetColumns": True,
"offsetColumns": ["DT"], # Should cause SDC < 3.11.0 to throw an UnsupportedOperationException
"enableNonIncremental": False,
"partitioningMode": "REQUIRED",
"partitionSize": "259200000", # 30 days = 30*24*60*60*1000 (259200000)ms
"maxNumActivePartitions": 2
}]
origin.number_of_threads = 2
origin.maximum_pool_size = 2
finisher = builder.add_stage('Pipeline Finisher Executor')
finisher.stage_record_preconditions = ['${record:eventType() == "no-more-data"}']
FIELD_MAPPINGS = [dict(field='/ID', columnName='ID'),
dict(field='/DT', columnName='DT')]
destination = builder.add_stage('JDBC Producer')
destination.set_attributes(default_operation='INSERT',
table_name=table_name_dest,
field_to_column_mapping=FIELD_MAPPINGS,
stage_on_record_error='STOP_PIPELINE')
origin >> destination
origin >= finisher
pipeline = builder.build().configure_for_environment(database)
sdc_executor.add_pipeline(pipeline)
sdc_executor.start_pipeline(pipeline).wait_for_finished()
result = [row.items() for row in connection.execute(comparing_query)]
assert len(result) == 0
# Insert few more rows and validate the outcome again
for m in range(6, 8):
for s in range(0, 59):
identifier = 100 * m + s
connection.execute(
f"INSERT INTO {table_name} VALUES({identifier}, DATE'{_get_date_from_days(identifier)}')"
)
connection.execute("commit")
sdc_executor.start_pipeline(pipeline).wait_for_finished()
result = [row.items() for row in connection.execute(comparing_query)]
assert len(result) == 0
finally:
logger.info('Dropping table %s and %s in %s database ...', table_name, table_name_dest, database.type)
connection.execute(f"DROP TABLE {table_name}")
connection.execute(f"DROP TABLE {table_name_dest}")
@sdc_min_version('3.9.0')
@database('mysql')
def test_jdbc_multitable_consumer_origin_high_resolution_timestamp_offset(sdc_builder, sdc_executor, database):
"""
Check if Jdbc Multi-table Origin can retrieve any records from a table using as an offset a high resolution
timestamp of milliseconds order. It is checked that the records read have a timestamp greater than the timestamp
used as initial offset.
Pipeline looks like:
jdbc_multitable_consumer >> trash
"""
src_table_prefix = get_random_string(string.ascii_lowercase, 6)
table_name = f'{src_table_prefix}_{get_random_string(string.ascii_lowercase, 20)}'
pipeline_builder = sdc_builder.get_pipeline_builder()
jdbc_multitable_consumer = pipeline_builder.add_stage('JDBC Multitable Consumer')
jdbc_multitable_consumer.set_attributes(table_configs=[{'tablePattern': f'%{src_table_prefix}%',
'overrideDefaultOffsetColumns': True,
'offsetColumns': ['added'],
'offsetColumnToInitialOffsetValue': [{
'key': 'added',
'value': '${time:extractNanosecondsFromString(' +
'"1996-12-02 00:00:00.020111000")}'
}]
}])
trash = pipeline_builder.add_stage('Trash')
jdbc_multitable_consumer >> trash
pipeline = pipeline_builder.build().configure_for_environment(database)
connection = database.engine.connect()
# Create table
logger.info('Creating table %s in %s database ...', table_name, database.type)
connection.execute(f"""
CREATE TABLE {table_name}(
id INT UNSIGNED NOT NULL AUTO_INCREMENT PRIMARY KEY,
name varchar(100) NOT NULL,
age INT UNSIGNED NOT NULL,
added TIMESTAMP(6) NOT NULL
)
""")
# Insert rows
logger.info('Adding four rows into %s database ...', database.type)
connection.execute(f'INSERT INTO {table_name} VALUES(1, "Charly", 14, "2005-02-08 14:00:00.100105002")')
connection.execute(f'INSERT INTO {table_name} VALUES(2, "Paco", 28, "1992-05-25 11:00:00.000201010")')
connection.execute(f'INSERT INTO {table_name} VALUES(3, "Eugenio", 21, "1996-12-01 23:00:00.020111")')
connection.execute(f'INSERT INTO {table_name} VALUES(4, "Romualdo", 19, "2000-06-15 18:30:00.10523121")')
try:
sdc_executor.add_pipeline(pipeline)
snapshot = sdc_executor.capture_snapshot(pipeline=pipeline, start_pipeline=True).snapshot
sdc_executor.stop_pipeline(pipeline)
name_id_from_output = [(record.field['name'], record.field['id'])
for record in snapshot[jdbc_multitable_consumer].output]
assert len(name_id_from_output) == 2
assert name_id_from_output == [('Romualdo', 4), ('Charly', 1)]
finally:
logger.info('Dropping table %s in %s database...', table_name, database.type)
connection.execute(f'DROP TABLE {table_name}')
@database
@sdc_min_version('3.0.0.0')
def test_jdbc_multitable_consumer_partitioned_large_offset_gaps(sdc_builder, sdc_executor, database):
"""
Ensure that the multi-table JDBC origin can handle large gaps between offset columns in partitioned mode
The destination is trash, and there is a finisher waiting for the no-more-data event
The pipeline will be started, and we will capture two snapshots (to ensure all expected rows are covered),
then assert those captured snapshot rows match the expected data.
This is a test for SDC-10053
"""
if database.type == 'Oracle':
pytest.skip("This test depends on proper case for column names that Oracle auto-uppers.")
src_table_prefix = get_random_string(string.ascii_lowercase, 6)
table_name = '{}_{}'.format(src_table_prefix, get_random_string(string.ascii_lowercase, 20))
pipeline_builder = sdc_builder.get_pipeline_builder()
jdbc_multitable_consumer = pipeline_builder.add_stage('JDBC Multitable Consumer')
jdbc_multitable_consumer.set_attributes(table_configs=[{
"tablePattern": f'{table_name}',
"enableNonIncremental": False,
"partitioningMode": "REQUIRED",
"partitionSize": "1000000",
"maxNumActivePartitions": -1
}])
trash = pipeline_builder.add_stage('Trash')
jdbc_multitable_consumer >> trash
finisher = pipeline_builder.add_stage("Pipeline Finisher Executor")
finisher.stage_record_preconditions = ['${record:eventType() == "no-more-data"}']
jdbc_multitable_consumer >= finisher
pipeline = pipeline_builder.build().configure_for_environment(database)
metadata = sqlalchemy.MetaData()
table = sqlalchemy.Table(
table_name,
metadata,
sqlalchemy.Column('id', sqlalchemy.Integer, primary_key=True),
sqlalchemy.Column('name', sqlalchemy.String(32))
)
try:
logger.info('Creating table %s in %s database ...', table_name, database.type)
table.create(database.engine)
logger.info('Adding four rows into %s table, with a large gap in the primary keys ...', table_name)
connection = database.engine.connect()
rows_with_gap = ROWS_IN_DATABASE + [{'id': 5000000, 'name': 'Evil Jeff'}]
connection.execute(table.insert(), rows_with_gap)
connection.close()
sdc_executor.add_pipeline(pipeline)
# need to capture two batches, one for row IDs 1-3, and one for the last row after the large gap
snapshot = sdc_executor.capture_snapshot(pipeline=pipeline, batches=2, start_pipeline=True).snapshot
rows_from_snapshot = [(record.get_field_data('/name').value, record.get_field_data('/id').value)
for batch in snapshot.snapshot_batches
for record in batch.stage_outputs[jdbc_multitable_consumer.instance_name].output]
expected_data = [(row['name'], row['id']) for row in rows_with_gap]
logger.info('Actual %s expected %s', rows_from_snapshot, expected_data)
assert rows_from_snapshot == expected_data
finally:
logger.info('Dropping table %s in %s database...', table_name, database.type)
table.drop(database.engine)
@sdc_min_version('3.0.0.0')
@database('mysql')
# https://dev.mysql.com/doc/refman/8.0/en/data-types.html
# We don't support BIT generally (the driver is doing funky 'random' mappings on certain versions)
@pytest.mark.parametrize('sql_type,insert_fragment,expected_type,expected_value', [
('TINYINT', '-128', 'SHORT', -128),
('TINYINT UNSIGNED', '255', 'SHORT', 255),
('SMALLINT', '-32768', 'SHORT', -32768),
('SMALLINT UNSIGNED', '65535', 'SHORT', -1), # Support for unsigned isn't entirely correct!
('MEDIUMINT', '-8388608', 'INTEGER', '-8388608'),
('MEDIUMINT UNSIGNED', '16777215', 'INTEGER', '16777215'),
('INT', '-2147483648', 'INTEGER', '-2147483648'),
('INT UNSIGNED', '4294967295', 'INTEGER', '-1'), # Support for unsigned isn't entirely correct!
('BIGINT', '-9223372036854775807', 'LONG', '-9223372036854775807'),
('BIGINT UNSIGNED', '18446744073709551615', 'LONG', '-1'), # Support for unsigned isn't entirely correct!
('DECIMAL(5, 2)', '5.20', 'DECIMAL', '5.20'),
('NUMERIC(5, 2)', '5.20', 'DECIMAL', '5.20'),
('FLOAT', '5.2', 'FLOAT', '5.2'),
('DOUBLE', '5.2', 'DOUBLE', '5.2'),
# ('BIT(8)',"b'01010101'", 'BYTE_ARRAY', 'VQ=='),
('DATE', "'2019-01-01'", 'DATE', 1546300800000),
('DATETIME', "'2019-01-01 5:00:00'", 'DATETIME', 1546318800000),
('TIMESTAMP', "'2019-01-01 5:00:00'", 'DATETIME', 1546318800000),
('TIME', "'5:00:00'", 'TIME', 18000000),
('YEAR', "'2019'", 'DATE', 1546300800000),
('CHAR(5)', "'Hello'", 'STRING', 'Hello'),
('VARCHAR(5)', "'Hello'", 'STRING', 'Hello'),
('BINARY(5)', "'Hello'", 'BYTE_ARRAY', 'SGVsbG8='),
('VARBINARY(5)', "'Hello'", 'BYTE_ARRAY', 'SGVsbG8='),
('BLOB', "'Hello'", 'BYTE_ARRAY', 'SGVsbG8='),
('TEXT', "'Hello'", 'STRING', 'Hello'),
("ENUM('a', 'b')", "'a'", 'STRING', 'a'),
("set('a', 'b')", "'a,b'", 'STRING', 'a,b'),
("POINT", "POINT(1, 1)", 'BYTE_ARRAY', 'AAAAAAEBAAAAAAAAAAAA8D8AAAAAAADwPw=='),
("LINESTRING", "LineString(Point(0,0), Point(10,10), Point(20,25), Point(50,60))", 'BYTE_ARRAY',
'AAAAAAECAAAABAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAkQAAAAAAAACRAAAAAAAAANEAAAAAAAAA5QAAAAAAAAElAAAAAAAAATkA='),
("POLYGON",
"Polygon(LineString(Point(0,0),Point(10,0),Point(10,10),Point(0,10),Point(0,0)),LineString(Point(5,5),Point(7,5),Point(7,7),Point(5,7),Point(5,5)))",
'BYTE_ARRAY',
'AAAAAAEDAAAAAgAAAAUAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAJEAAAAAAAAAAAAAAAAAAACRAAAAAAAAAJEAAAAAAAAAAAAAAAAAAACRAAAAAAAAAAAAAAAAAAAAAAAUAAAAAAAAAAAAUQAAAAAAAABRAAAAAAAAAHEAAAAAAAAAUQAAAAAAAABxAAAAAAAAAHEAAAAAAAAAUQAAAAAAAABxAAAAAAAAAFEAAAAAAAAAUQA=='),
("JSON", "'{\"a\":\"b\"}'", 'STRING', '{\"a\": \"b\"}'),
])
@pytest.mark.parametrize('use_table_origin', [True, False])
def test_jdbc_multitable_mysql_types(sdc_builder, sdc_executor, database, use_table_origin, sql_type, insert_fragment,
expected_type, expected_value):
"""Test all feasible Mysql types."""
table_name = get_random_string(string.ascii_lowercase, 20)
connection = database.engine.connect()
try:
# Create table
connection.execute(f"""
CREATE TABLE {table_name}(
id int primary key,
data_column {sql_type} NULL
)
""")
# And insert a row with actual value
connection.execute(f"INSERT INTO {table_name} VALUES(1, {insert_fragment})")
# And a null
connection.execute(f"INSERT INTO {table_name} VALUES(2, NULL)")
builder = sdc_builder.get_pipeline_builder()
if use_table_origin:
origin = builder.add_stage('JDBC Multitable Consumer')
origin.table_configs = [{"tablePattern": f'%{table_name}%'}]
origin.on_unknown_type = 'CONVERT_TO_STRING'
else:
origin = builder.add_stage('JDBC Query Consumer')
origin.sql_query = 'SELECT * FROM {0}'.format(table_name)
origin.incremental_mode = False
origin.on_unknown_type = 'CONVERT_TO_STRING'
trash = builder.add_stage('Trash')
origin >> trash
pipeline = builder.build(f"MySQL Type {sql_type} with value {insert_fragment}").configure_for_environment(
database)
sdc_executor.add_pipeline(pipeline)
snapshot = sdc_executor.capture_snapshot(pipeline=pipeline, start_pipeline=True).snapshot
sdc_executor.stop_pipeline(pipeline)
assert len(snapshot[origin].output) == 2
record = snapshot[origin].output[0]
null_record = snapshot[origin].output[1]
# Since we are controlling types, we want to check explicit values inside the record rather the the python
# wrappers.
# TLKT-177: Add ability for field to return raw value
assert record.field['data_column'].type == expected_type
assert null_record.field['data_column'].type == expected_type
assert record.field['data_column']._data['value'] == expected_value
assert null_record.field['data_column'] == None
finally:
logger.info('Dropping table %s in %s database ...', table_name, database.type)
connection.execute(f"DROP TABLE {table_name}")
@sdc_min_version('3.0.0.0')
@database('postgresql')
# https://www.postgresql.org/docs/11/datatype.html
# Not testing 'serial' family explicitly as that is just an alias
# Not supporting tsvector tsquery as that doesn't seem fit for us
# bit(n) is not supported
# xml is not supported
# domain types (as a category are not supported)
# pg_lsn not supported
@pytest.mark.parametrize('sql_type,insert_fragment,expected_type,expected_value', [
('smallint', '-32768', 'SHORT', -32768),
('integer', '2147483647', 'INTEGER', '2147483647'),
('bigint', '-9223372036854775808', 'LONG', '-9223372036854775808'),
('decimal(5,2)', '5.20', 'DECIMAL', '5.20'),
('numeric(5,2)', '5.20', 'DECIMAL', '5.20'),
('real', '5.20', 'FLOAT', '5.2'),
('double precision', '5.20', 'DOUBLE', '5.2'),
('money', '12.34', 'DOUBLE', '12.34'),
('char(5)', "'Hello'", 'STRING', 'Hello'),
('varchar(5)', "'Hello'", 'STRING', 'Hello'),
('text', "'Hello'", 'STRING', 'Hello'),
('bytea', "'\\xDEADBEEF'", 'BYTE_ARRAY', '3q2+7w=='),
('timestamp', "'2003-04-12 04:05:06'", 'DATETIME', 1050120306000),
('timestamp with time zone', "'2003-04-12 04:05:06 America/New_York'", 'DATETIME', 1050134706000),
# For PostgreSQL, we don't create ZONED_DATETIME
('date', "'2019-01-01'", 'DATE', 1546300800000),
('time', "'5:00:00'", 'TIME', 18000000),
('time with time zone', "'04:05:06-08:00'", 'TIME', 43506000),
('interval', "INTERVAL '1' YEAR", 'STRING', '1 years 0 mons 0 days 0 hours 0 mins 0.00 secs'),
('boolean', "true", 'BOOLEAN', True),
('ai', "'sad'", 'STRING', 'sad'),
('point', "'(1, 1)'", 'STRING', '(1.0,1.0)'),
('line', "'{1, 1, 1}'", 'STRING', '{1.0,1.0,1.0}'),
('lseg', "'((1,1)(2,2))'", 'STRING', '[(1.0,1.0),(2.0,2.0)]'),
('box', "'(1,1)(2,2)'", 'STRING', '(2.0,2.0),(1.0,1.0)'),
('path', "'((1,1),(2,2))'", 'STRING', '((1.0,1.0),(2.0,2.0))'),
('polygon', "'((1,1),(2,2))'", 'STRING', '((1.0,1.0),(2.0,2.0))'),
('circle', "'<(1,1),5>'", 'STRING', '<(1.0,1.0),5.0>'),
('inet', "'127.0.0.1/16'", 'STRING', '127.0.0.1/16'),
('cidr', "'127.0.0.0/16'", 'STRING', '127.0.0.0/16'),
('macaddr', "'08:00:2b:01:02:03'", 'STRING', '08:00:2b:01:02:03'),
# ('macaddr8', "'08:00:2b:01:02:03'", 'STRING', '08:00:2b:ff:fe:01:02:03'),
# ('bit(8)', "b'10101010'", 'BYTE_ARRAY', '08:00:2b:ff:fe:01:02:03'), # Doesn't work at all today
('bit varying(3)', "b'101'", 'STRING', '101'),
('uuid', "'a0eebc99-9c0b-4ef8-bb6d-6bb9bd380a11'", 'STRING', 'a0eebc99-9c0b-4ef8-bb6d-6bb9bd380a11'),
# ('xml', "'<foo>bar</foo>'", 'STRING', ''), # Doesn't work properly today
("json", "'{\"a\":\"b\"}'", 'STRING', '{"a":"b"}'),
("jsonb", "'{\"a\":\"b\"}'", 'STRING', '{"a": "b"}'),
("integer[3][3]", "'{{1,2,3},{4,5,6},{7,8,9}}'", 'STRING', '{{1,2,3},{4,5,6},{7,8,9}}'),
("ct", "ROW(1, 2)", 'STRING', '(1,2)'),
("int4range", "'[1,2)'", 'STRING', '[1,2)'),
("int8range", "'[1,2)'", 'STRING', '[1,2)'),
("numrange", "'[1,2)'", 'STRING', '[1,2)'),
("tsrange", "'[2010-01-01 14:30, 2010-01-01 15:30)'", 'STRING', '["2010-01-01 14:30:00","2010-01-01 15:30:00")'),
("tstzrange", "'[2010-01-01 14:30 America/New_York, 2010-01-01 15:30 America/New_York)'", 'STRING',
'["2010-01-01 19:30:00+00","2010-01-01 20:30:00+00")'),
("daterange", "'[2010-01-01, 2010-01-02)'", 'STRING', '[2010-01-01,2010-01-02)'),
])
@pytest.mark.parametrize('use_table_origin', [True, False])
def test_jdbc_postgresql_types(sdc_builder, sdc_executor, database, use_table_origin, sql_type, insert_fragment,
expected_type, expected_value):
"""Test all feasible PostgreSQL types."""
table_name = get_random_string(string.ascii_lowercase, 20)
connection = database.engine.connect()
try:
# Create enum type conditionally
connection.execute(f"""
DO
$$
BEGIN
IF NOT EXISTS (SELECT * FROM pg_type typ
INNER JOIN pg_namespace nsp ON nsp.oid = typ.typnamespace
WHERE nsp.nspname = current_schema() AND typ.typname = 'ai') THEN
CREATE TYPE ai AS ENUM ('sad', 'ok', 'happy');
END IF;
END;
$$
LANGUAGE plpgsql;
""")
# Create enum complex type conditionally
connection.execute(f"""
DO
$$
BEGIN
IF NOT EXISTS (SELECT * FROM pg_type typ
INNER JOIN pg_namespace nsp ON nsp.oid = typ.typnamespace
WHERE nsp.nspname = current_schema() AND typ.typname = 'ct') THEN
CREATE TYPE ct AS (a int, b int);
END IF;
END;
$$
LANGUAGE plpgsql;
""")
# Create table
connection.execute(f"""
CREATE TABLE {table_name}(
id int primary key,
data_column {sql_type} NULL
)
""")
# And insert a row with actual value
connection.execute(f"INSERT INTO {table_name} VALUES(1, {insert_fragment})")
# And a null
connection.execute(f"INSERT INTO {table_name} VALUES(2, NULL)")
builder = sdc_builder.get_pipeline_builder()
if use_table_origin:
origin = builder.add_stage('JDBC Multitable Consumer')
origin.table_configs = [{"tablePattern": f'%{table_name}%'}]
origin.on_unknown_type = 'CONVERT_TO_STRING'
else:
origin = builder.add_stage('JDBC Query Consumer')
origin.sql_query = 'SELECT * FROM {0}'.format(table_name)
origin.incremental_mode = False
origin.on_unknown_type = 'CONVERT_TO_STRING'
trash = builder.add_stage('Trash')
origin >> trash
pipeline = builder.build().configure_for_environment(database)
sdc_executor.add_pipeline(pipeline)
snapshot = sdc_executor.capture_snapshot(pipeline=pipeline, start_pipeline=True).snapshot
sdc_executor.stop_pipeline(pipeline)
assert len(snapshot[origin].output) == 2
record = snapshot[origin].output[0]
null_record = snapshot[origin].output[1]
# Since we are controlling types, we want to check explicit values inside the record rather the the python
# wrappers.
# TLKT-177: Add ability for field to return raw value
assert record.field['data_column'].type == expected_type
assert null_record.field['data_column'].type == expected_type
assert record.field['data_column']._data['value'] == expected_value
assert null_record.field['data_column'] == None
finally:
logger.info('Dropping table %s in %s database ...', table_name, database.type)
connection.execute(f"DROP TABLE {table_name}")
@sdc_min_version('3.0.0.0')
@database('sqlserver')
# https://docs.microsoft.com/en-us/sql/t-sql/data-types/data-types-transact-sql?view=sql-server-2017
# hiearchyid types not supported
# Geometry and geography not supported
@pytest.mark.parametrize('sql_type,insert_fragment,expected_type,expected_value', [
('DATE', "'2019-01-01'", 'DATE', 1546300800000),
('DATETIME', "'2004-05-23T14:25:10'", 'DATETIME', 1085322310000),
('DATETIME2', "'2004-05-23T14:25:10'", 'DATETIME', 1085322310000),
('DATETIMEOFFSET', "'2004-05-23 14:25:10.3456 -08:00'", 'DEPENDS_ON_VERSION', 'depends_on_version'),
('SMALLDATETIME', "'2004-05-23T14:25:10'", 'DATETIME', 1085322300000),
('TIME', "'14:25:10'", 'TIME', 51910000),
('BIT', "1", 'BOOLEAN', True),
('DECIMAL(5,2)', '5.20', 'DECIMAL', '5.20'),
('NUMERIC(5,2)', '5.20', 'DECIMAL', '5.20'),
('REAL', '5.20', 'FLOAT', '5.2'),
('FLOAT', '5.20', 'DOUBLE', '5.2'),
('TINYINT', '255', 'SHORT', 255),
('SMALLINT', '-32768', 'SHORT', -32768),
('INT', '-2147483648', 'INTEGER', '-2147483648'),
('BIGINT', '-9223372036854775807', 'LONG', '-9223372036854775807'),
('MONEY', '255.60', 'DECIMAL', '255.6000'),
('SMALLMONEY', '255.60', 'DECIMAL', '255.6000'),
('BINARY(5)', "CAST('Hello' AS BINARY(5))", 'BYTE_ARRAY', 'SGVsbG8='),
('VARBINARY(5)', "CAST('Hello' AS VARBINARY(5))", 'BYTE_ARRAY', 'SGVsbG8='),
('CHAR(5)', "'Hello'", 'STRING', 'Hello'),
('VARCHAR(5)', "'Hello'", 'STRING', 'Hello'),
('NCHAR(5)', "'Hello'", 'STRING', 'Hello'),
('NVARCHAR(5)', "'Hello'", 'STRING', 'Hello'),
('TEXT', "'Hello'", 'STRING', 'Hello'),
('NTEXT', "'Hello'", 'STRING', 'Hello'),
('IMAGE', "CAST('Hello' AS IMAGE)", 'BYTE_ARRAY', 'SGVsbG8='),
# ('GEOGRAPHY',"geography::STGeomFromText('LINESTRING(-122.360 47.656, -122.343 47.656 )', 4326)", 'BYTE_ARRAY', '5hAAAAEUhxbZzvfTR0DXo3A9CpdewIcW2c7300dAy6FFtvOVXsA='),
# ('GEOMETRY',"geometry::STGeomFromText('LINESTRING (100 100, 20 180, 180 180)', 0)", 'BYTE_ARRAY', 'AAAAAAEEAwAAAAAAAAAAAFlAAAAAAAAAWUAAAAAAAAA0QAAAAAAAgGZAAAAAAACAZkAAAAAAAIBmQAEAAAABAAAAAAEAAAD/////AAAAAAI='),
('XML', "'<a></a>'", 'STRING', '<a/>')
])
@pytest.mark.parametrize('use_table_origin', [True, False])
def test_jdbc_sqlserver_types(sdc_builder, sdc_executor, database, use_table_origin, sql_type, insert_fragment,
expected_type, expected_value):
"""Test all feasible SQL Server types."""
table_name = get_random_string(string.ascii_lowercase, 20)
connection = database.engine.connect()
try:
# Create table
connection.execute(f"""
CREATE TABLE {table_name}(
id int primary key,
data_column {sql_type} NULL
)
""")
# And insert a row with actual value
connection.execute(f"INSERT INTO {table_name} VALUES(1, {insert_fragment})")
# And a null
connection.execute(f"INSERT INTO {table_name} VALUES(2, NULL)")
builder = sdc_builder.get_pipeline_builder()
if use_table_origin:
origin = builder.add_stage('JDBC Multitable Consumer')
origin.table_configs = [{"tablePattern": f'%{table_name}%'}]
else:
origin = builder.add_stage('JDBC Query Consumer')
origin.sql_query = 'SELECT * FROM {0}'.format(table_name)
origin.incremental_mode = False
trash = builder.add_stage('Trash')
# As a part of SDC-10125, DATETIMEOFFSET is natively supported in SDC, and is converted into ZONED_DATETIME
if sql_type == 'DATETIMEOFFSET':
if Version(sdc_builder.version) >= Version('3.14.0'):
expected_type = 'ZONED_DATETIME'
expected_value = '2004-05-23T14:25:10.3456-08:00'
else:
expected_type = 'STRING'
expected_value = '2004-05-23 14:25:10.3456 -08:00'
# This unknown_type_action setting is required, otherwise DATETIMEOFFSET tests for SDC < 3.14 will fail.
origin.on_unknown_type = 'CONVERT_TO_STRING'
origin >> trash
pipeline = builder.build().configure_for_environment(database)
sdc_executor.add_pipeline(pipeline)
snapshot = sdc_executor.capture_snapshot(pipeline=pipeline, start_pipeline=True).snapshot
sdc_executor.stop_pipeline(pipeline)
assert len(snapshot[origin].output) == 2
record = snapshot[origin].output[0]
null_record = snapshot[origin].output[1]
# Since we are controlling types, we want to check explicit values inside the record rather the the python
# wrappers.
# TLKT-177: Add ability for field to return raw value
assert record.field['data_column'].type == expected_type
assert null_record.field['data_column'].type == expected_type
assert record.field['data_column']._data['value'] == expected_value
assert null_record.field['data_column'] == None
finally:
logger.info('Dropping table %s in %s database ...', table_name, database.type)
connection.execute(f"DROP TABLE {table_name}")
@sdc_min_version('3.12.0')
@database('sqlserver')
@pytest.mark.parametrize('on_unknown_type_action', ['CONVERT_TO_STRING', 'STOP_PIPELINE'])
def test_jdbc_sqlserver_on_unknown_type_action(sdc_builder, sdc_executor, database, on_unknown_type_action):
"""Test JDBC Multitable Consumer with MS-SQL server for the on_unknown_type action.
This is to verify SDC-12764.
When the 'On Unknown Type' action is set to STOP_PIPELINE,the pipeline should stop with a StageException Error since it cannot convert DATETIMEOFFSET field
When the 'On Unknown Type' action is set to CONVERT_TO_STRING, the pipeline should convert the unknown type to string and process next record
The pipeline will look like:
JDBC_Multitable_Consumer >> trash
"""
if Version(sdc_builder.version) >= Version('3.14.0'):
pytest.skip("Skipping SQLServer Unknown Type action check, since DATETIMEOFFSET field is now natively supported from SDC Version 3.14.0")
column_type = 'DATETIMEOFFSET'
INPUT_DATE = "'2004-05-23T14:25:10'"
EXPECTED_OUTCOME = OrderedDict(id=1, date_offset='2004-05-23 14:25:10 +00:00')
table_name = get_random_string(string.ascii_lowercase, 20)
connection = database.engine.connect()
pipeline_builder = sdc_builder.get_pipeline_builder()
# Setup Origin with specified unknown type action
jdbc_multitable_consumer = pipeline_builder.add_stage('JDBC Multitable Consumer')
jdbc_multitable_consumer.set_attributes(table_configs=[{"tablePattern": f'%{table_name}%'}],
on_unknown_type=on_unknown_type_action)
# Setup destination
trash=pipeline_builder.add_stage('Trash')
# Connect the pipeline stages
jdbc_multitable_consumer >> trash
pipeline = pipeline_builder.build().configure_for_environment(database)
sdc_executor.add_pipeline(pipeline)
# Create table and add a row
connection.execute(f"""
CREATE TABLE {table_name}(
id int primary key,
date_offset {column_type} NOT NULL
)
""")
connection.execute(f"INSERT INTO {table_name} VALUES(1, {INPUT_DATE})")
try:
if on_unknown_type_action == 'STOP_PIPELINE':
# Pipeline should stop with StageException
with pytest.raises(Exception):
sdc_executor.start_pipeline(pipeline)
sdc_executor.stop_pipeline(pipeline)
status = sdc_executor.get_pipeline_status(pipeline).response.json().get('status')
assert 'RUN_ERROR' == status
else:
snapshot = sdc_executor.capture_snapshot(pipeline=pipeline, start_pipeline=True).snapshot
output_records = snapshot[jdbc_multitable_consumer].output
assert len(output_records) == 1
assert output_records[0].field == EXPECTED_OUTCOME
finally:
status = sdc_executor.get_pipeline_status(pipeline).response.json().get('status')
if status == 'RUNNING':
sdc_executor.stop_pipeline(pipeline)
logger.info('Dropping table %s in %s database ...', table_name, database.type)
connection.execute(f"DROP TABLE {table_name}")
@sdc_min_version('3.14.0')
@database('sqlserver')
def test_jdbc_sqlserver_datetimeoffset_as_primary_key(sdc_builder, sdc_executor, database):
"""Test JDBC Multitable Consumer with SQLServer table configured with DATETIMEOFFSET column as primary key.
The pipeline will look like:
JDBC_Multitable_Consumer >> trash
"""
INPUT_COLUMN_TYPE, INPUT_DATE = 'DATETIMEOFFSET', "'2004-05-23 14:25:10.3456 -08:00'"
EXPECTED_TYPE, EXPECTED_VALUE = 'ZONED_DATETIME', '2004-05-23T14:25:10.3456-08:00'
table_name = get_random_string(string.ascii_lowercase, 20)
connection = database.engine.connect()
pipeline_builder = sdc_builder.get_pipeline_builder()
jdbc_multitable_consumer = pipeline_builder.add_stage('JDBC Multitable Consumer')
jdbc_multitable_consumer.set_attributes(table_configs=[{"tablePattern": f'%{table_name}%'}])
trash=pipeline_builder.add_stage('Trash')
jdbc_multitable_consumer >> trash
pipeline = pipeline_builder.build().configure_for_environment(database)
sdc_executor.add_pipeline(pipeline)
connection.execute(f"""
CREATE TABLE {table_name}(
dto {INPUT_COLUMN_TYPE} NOT NULL PRIMARY KEY
)
""")
connection.execute(f"INSERT INTO {table_name} VALUES({INPUT_DATE})")
try:
snapshot = sdc_executor.capture_snapshot(pipeline=pipeline, start_pipeline=True).snapshot
sdc_executor.stop_pipeline(pipeline)
assert len(snapshot[jdbc_multitable_consumer].output) == 1
record = snapshot[jdbc_multitable_consumer].output[0]
assert record.field['dto'].type == EXPECTED_TYPE
assert record.field['dto'].value == EXPECTED_VALUE
finally:
logger.info('Dropping table %s in %s database ...', table_name, database.type)
connection.execute(f"DROP TABLE {table_name}")
# Test for SDC-13288
@database('db2')
def test_jdbc_producer_db2_long_record(sdc_builder, sdc_executor, database):
"""Test that JDBC Producer correctly sends record when setting Custom Data SQLSTATE for db2 database instead of
throwing StageException. The pipelines reads a file with 5 records 1 by 1 having the last record being biggest
than the db2 table column size. That throws an error with an specific SQL Code (22001). Having that code in Custom
Data SQLSTATE sends the last record to error.
The pipeline looks like:
directory_origin >> jdbc_producer
In order to create the file read by directory origin another pipeline is used that looks like:
dev_raw_data_source >> local_fs
"""
# Insert data into file.
tmp_directory = os.path.join(tempfile.gettempdir(), get_random_string(string.ascii_letters, 10))
csv_records = ['1,hello', '2,hello', '3,hello', '4,hello', '5,hellolargerword']
_setup_delimited_file(sdc_executor, tmp_directory, csv_records)
# Create directory origin.
pipeline_builder = sdc_builder.get_pipeline_builder()
directory = pipeline_builder.add_stage('Directory', type='origin')
directory.set_attributes(data_format='DELIMITED',
file_name_pattern='sdc*', file_name_pattern_mode='GLOB',
file_post_processing='DELETE', files_directory=tmp_directory,
batch_size_in_recs=1)
# Create jdbc producer destination.
# Create table. db2 internal sets table name in uppercase. Thus using directly ascii uppercase.
table_name = get_random_string(string.ascii_uppercase, 20)
database.engine.execute(f'CREATE TABLE {table_name} (id VARCHAR(20) NOT NULL PRIMARY KEY, a VARCHAR(10));')
field_to_column_mapping = [dict(columnName='ID',
dataType='USE_COLUMN_TYPE',
field='/0',
paramValue='?'),
dict(columnName='A',
dataType='USE_COLUMN_TYPE',
field='/1',
paramValue='?')]
jdbc_producer = pipeline_builder.add_stage('JDBC Producer')
jdbc_producer.set_attributes(default_operation="INSERT",
schema_name=DEFAULT_DB2_SCHEMA,
table_name=table_name,
field_to_column_mapping=field_to_column_mapping,
stage_on_record_error='TO_ERROR',
data_sqlstate_codes=["22001"])
directory >> jdbc_producer
directory_jdbc_producer_pipeline = pipeline_builder.build(
title='Directory - JDBC Producer. Test DB2 sql code error').configure_for_environment(database)
sdc_executor.add_pipeline(directory_jdbc_producer_pipeline)
try:
snapshot = sdc_executor.capture_snapshot(directory_jdbc_producer_pipeline, start_pipeline=True, batch_size=1,
batches=5).snapshot
sdc_executor.stop_pipeline(directory_jdbc_producer_pipeline)
assert 5 == len(snapshot.snapshot_batches)
result = database.engine.execute(f'SELECT ID,A FROM {table_name};')
data_from_database = sorted(result.fetchall(), key=lambda row: row[1]) # Order by id.
result.close()
# Assert records in database include from id=1 to id=4 excluding id=5. Columns => record[0] = id, record[1] = a.
assert data_from_database == [(record[0], record[1]) for record in
[unified_record.split(',') for unified_record in csv_records[:-1]]]
stage = snapshot.snapshot_batches[4][jdbc_producer.instance_name]
assert 1 == len(stage.error_records)
error_record = stage.error_records[0]
assert 'hellolargerword' == error_record.field['1']
assert 'JDBC_14' == error_record.header['errorCode']
assert 'SQLSTATE=22001' in error_record.header['errorMessage']
finally:
logger.info('Dropping table %s in %s database ...', table_name, database.type)
database.engine.execute(f'DROP TABLE {table_name}')
def _setup_delimited_file(sdc_executor, tmp_directory, csv_records):
"""Setup csv records and save in local system. The pipelines looks like:
dev_raw_data_source >> local_fs
"""
raw_data = "\n".join(csv_records)
pipeline_builder = sdc_executor.get_pipeline_builder()
dev_raw_data_source = pipeline_builder.add_stage('Dev Raw Data Source')
dev_raw_data_source.set_attributes(data_format='TEXT', raw_data=raw_data, stop_after_first_batch=True)
local_fs = pipeline_builder.add_stage('Local FS', type='destination')
local_fs.set_attributes(data_format='TEXT',
directory_template=tmp_directory,
files_prefix='sdc-${sdc:id()}', files_suffix='csv')
dev_raw_data_source >> local_fs
files_pipeline = pipeline_builder.build('Generate files pipeline')
sdc_executor.add_pipeline(files_pipeline)
# Generate some batches/files.
sdc_executor.start_pipeline(files_pipeline).wait_for_finished(timeout_sec=5)
return csv_records
# SDC-13556: Do not spin JDBC Destination and Tee Processor machinery for empty batches
@sdc_min_version('3.14.0')
@database('mysql')
@pytest.mark.parametrize('use_multi_row', [True, False])
def test_jdbc_tee_commits_on_empty_batches(use_multi_row, sdc_builder, sdc_executor, database):
"""Ensure that the JDBC Tee processor won't generate commits on empty batches. Since it's generally difficult
to create empty batches in SDC, we use scripting origin to generate them and then check commit timer (which also
contains count) to ensure that we don't generate excessive commits on the database."""
builder = sdc_builder.get_pipeline_builder()
table_name = get_random_string(string.ascii_lowercase, 20)
script = """
// First batch contains exactly one record
var batch = sdc.createBatch();
var record = sdc.createRecord('generated data');
record.value = {'name': 'A'};
batch.add(record);
batch.process("batch", "non-empty");
// Sent 1000 batches that will be empty
var step;
for (step = 0; step < 1000; step++) {
batch = sdc.createBatch();
batch.process("whatever", "batch-" + step);
}
"""
origin = builder.add_stage('JavaScript Scripting')
origin.record_type='NATIVE_OBJECTS'
origin.user_script=script
tee = builder.add_stage('JDBC Tee')
tee.default_operation = 'INSERT'
tee.field_to_column_mapping = [dict(columnName='name', field='/name', paramValue='?')]
tee.generated_column_mappings = [dict(columnName='id', field='/id')]
tee.table_name = table_name
tee.use_multi_row_operation = use_multi_row
trash = builder.add_stage('Trash')
origin >> tee >> trash
pipeline = builder.build().configure_for_environment(database)
sdc_executor.add_pipeline(pipeline)
table = _create_table(table_name, database)
try:
sdc_executor.start_pipeline(pipeline).wait_for_finished()
# First of all, verify that the table have exactly one record with expected values
result = database.engine.execute(table.select())
db = sorted(result.fetchall(), key=lambda row: row[1]) # order by id
result.close()
assert len(db) == 1
assert db[0][0] == 'A'
assert db[0][1] == 1
# Second of all, we should see exactly 1001 batches generated by our scripting origin
history = sdc_executor.get_pipeline_history(pipeline)
assert history.latest.metrics.counter('pipeline.batchCount.counter').count == 1001
# Then let's explore how many commits have we generated to ensure that we don't have 1001 commits
expected_commits = 1 if use_multi_row else 2
assert history.latest.metrics.timer('custom.JDBCTee_01.Commit Timer.0.timer').count == expected_commits
finally:
logger.info('Dropping table %s in %s database ...', table_name, database.type)
table.drop(database.engine)
@database
@sdc_min_version('3.15.0')
def test_multitable_quote_column_names(sdc_builder, sdc_executor, database):
"""
Ensure that we properly quote all table and column names when querying the database.
"""
table_name = "table_" + get_random_string(string.ascii_letters, 10)
offset_name = "column_" + get_random_string(string.ascii_letters, 10)
builder = sdc_builder.get_pipeline_builder()
origin = builder.add_stage('JDBC Multitable Consumer')
origin.table_configs=[{"tablePattern": f'%{table_name}%'}]
origin.max_batch_size_in_records = 10
trash = builder.add_stage('Trash')
origin >> trash
pipeline = builder.build().configure_for_environment(database)
# Work-arounding STF behavior of upper-casing table name configuration
origin.table_configs[0]["tablePattern"] = f'%{table_name}%'
metadata = sqlalchemy.MetaData()
table = sqlalchemy.Table(
table_name,
metadata,
sqlalchemy.Column(offset_name, sqlalchemy.Integer, primary_key=True, quote=True),
quote = True
)
try:
logger.info('Creating table %s in %s database ...', table_name, database.type)
table.create(database.engine)
logger.info('Adding three rows into %s database ...', database.type)
connection = database.engine.connect()
connection.execute(table.insert(), [{offset_name: 1}])
sdc_executor.add_pipeline(pipeline)
snapshot = sdc_executor.capture_snapshot(pipeline=pipeline, start_pipeline=True).snapshot
# We want to run for a few seconds to see if any errors show up (like that did in previous versions)
time.sleep(10)
sdc_executor.stop_pipeline(pipeline)
# There should be no errors reported
history = sdc_executor.get_pipeline_history(pipeline)
assert history.latest.metrics.counter('stage.JDBCMultitableConsumer_01.errorRecords.counter').count == 0
assert history.latest.metrics.counter('stage.JDBCMultitableConsumer_01.stageErrors.counter').count == 0
# And verify that we properly read that one record
assert len(snapshot[origin].output) == 1
assert snapshot[origin].output[0].get_field_data('/' + offset_name) == 1
finally:
logger.info('Dropping table %s in %s database...', table_name, database.type)
table.drop(database.engine)
@database
@sdc_min_version('3.0.0.0')
def test_jdbc_multitable_consumer_duplicates_read_when_initial_offset_configured(sdc_builder, sdc_executor, database):
"""
SDC-13625 Integration test for SDC-13624 - MT Consumer ingests duplicates when initial offset is specified
Setup origin as follows:
partitioning enabled + num_threads and num partitions > 1 + override offset column set
+ initial value specified for offset
Verify that origin does not ingest the records more than once (duplicates) when initial value for offset is set
Pipeline:
JDBC MT Consumer >> Trash
>= Pipeline Finisher (no-more-data)
"""
if database.type == 'Oracle':
pytest.skip("This test depends on proper case for column names that Oracle auto-uppers.")
src_table_prefix = get_random_string(string.ascii_lowercase, 6)
table_name = '{}_{}'.format(src_table_prefix, get_random_string(string.ascii_lowercase, 20))
pipeline_builder = sdc_builder.get_pipeline_builder()
jdbc_multitable_consumer = pipeline_builder.add_stage('JDBC Multitable Consumer')
jdbc_multitable_consumer.set_attributes(table_configs=[{
"tablePattern": f'{table_name}',
"enableNonIncremental": False,
"partitioningMode": "REQUIRED",
"partitionSize": "100000",
"maxNumActivePartitions": 5,
'overrideDefaultOffsetColumns': True,
'offsetColumns': ['created'],
'offsetColumnToInitialOffsetValue': [{
'key': 'created',
'value': '0'
}]
}])
jdbc_multitable_consumer.number_of_threads = 2
jdbc_multitable_consumer.maximum_pool_size = 2
trash = pipeline_builder.add_stage('Trash')
jdbc_multitable_consumer >> trash
finisher = pipeline_builder.add_stage("Pipeline Finisher Executor")
finisher.stage_record_preconditions = ['${record:eventType() == "no-more-data"}']
jdbc_multitable_consumer >= finisher
pipeline = pipeline_builder.build().configure_for_environment(database)
ONE_MILLION = 1000000
rows_in_table = [{'id': i, 'name': get_random_string(string.ascii_lowercase, 5), 'created': i + ONE_MILLION}
for i in range(1, 21)]
metadata = sqlalchemy.MetaData()
table = sqlalchemy.Table(
table_name,
metadata,
sqlalchemy.Column('id', sqlalchemy.Integer, primary_key=True),
sqlalchemy.Column('name', sqlalchemy.String(5)),
sqlalchemy.Column('created', sqlalchemy.Integer)
)
try:
logger.info('Creating table %s in %s database ...', table_name, database.type)
table.create(database.engine)
logger.info('Adding 20 rows into %s table', table_name)
connection = database.engine.connect()
connection.execute(table.insert(), rows_in_table)
connection.close()
sdc_executor.add_pipeline(pipeline)
snapshot = sdc_executor.capture_snapshot(pipeline=pipeline, batches=2, start_pipeline=True).snapshot
rows_from_snapshot = [(record.get_field_data('/name').value,
record.get_field_data('/id').value,
record.get_field_data('/created').value)
for batch in snapshot.snapshot_batches
for record in batch.stage_outputs[jdbc_multitable_consumer.instance_name].output]
expected_data = [(row['name'], row['id'], row['created']) for row in rows_in_table]
assert rows_from_snapshot == expected_data
finally:
logger.info('Dropping table %s in %s database...', table_name, database.type)
table.drop(database.engine)
|
import json
from django.test import TestCase
from django.test import Client
class TestHealthCheck(TestCase):
def setUp(self):
self.c = Client()
def test_is_service_online_route(self):
resp = self.c.get("/health/")
self.assertEquals(json.loads(resp.content), {"status": "online"})
|
# -*- coding: utf-8 -*-
#
# Copyright 2010 British Broadcasting Corporation and Kamaelia Contributors(1)
#
# (1) Kamaelia Contributors are listed in the AUTHORS file and at
# http://www.kamaelia.org/AUTHORS - please extend this file,
# not this notice.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# -------------------------------------------------------------------------
class Queue(list):
def shift(self):
if self == []:
return None
result = self[0]
del self[0]
return result
def push(self, value):
self.append(value)
class MessageQueue(Queue):
def read(Queue):
raise NotImplementedError
def write(Queue):
raise NotImplementedError
class InboundQueue(MessageQueue):
def read(Queue):
"Should be defined in this class"
class OutboundQueue(MessageQueue):
def write(Queue, *Messages):
"Should be defined in this class"
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""A set of functions that are used for visualization.
These functions often receive an image, perform some visualization on the image.
The functions do not return a value, instead they modify the image itself.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import abc
import collections
# Set headless-friendly backend.
#import matplotlib; matplotlib.use('Agg') # pylint: disable=multiple-statements
import matplotlib.pyplot as plt # pylint: disable=g-import-not-at-top
import numpy as np
import PIL.Image as Image
import PIL.ImageColor as ImageColor
import PIL.ImageDraw as ImageDraw
import PIL.ImageFont as ImageFont
import six
from six.moves import range
from six.moves import zip
import tensorflow.compat.v1 as tf
from object_detection.core import keypoint_ops
from object_detection.core import standard_fields as fields
from object_detection.utils import shape_utils
_TITLE_LEFT_MARGIN = 10
_TITLE_TOP_MARGIN = 10
STANDARD_COLORS = [
'AliceBlue', 'Chartreuse', 'Aqua', 'Aquamarine', 'Azure', 'Beige', 'Bisque',
'BlanchedAlmond', 'BlueViolet', 'BurlyWood', 'CadetBlue', 'AntiqueWhite',
'Chocolate', 'Coral', 'CornflowerBlue', 'Cornsilk', 'Crimson', 'Cyan',
'DarkCyan', 'DarkGoldenRod', 'DarkGrey', 'DarkKhaki', 'DarkOrange',
'DarkOrchid', 'DarkSalmon', 'DarkSeaGreen', 'DarkTurquoise', 'DarkViolet',
'DeepPink', 'DeepSkyBlue', 'DodgerBlue', 'FireBrick', 'FloralWhite',
'ForestGreen', 'Fuchsia', 'Gainsboro', 'GhostWhite', 'Gold', 'GoldenRod',
'Salmon', 'Tan', 'HoneyDew', 'HotPink', 'IndianRed', 'Ivory', 'Khaki',
'Lavender', 'LavenderBlush', 'LawnGreen', 'LemonChiffon', 'LightBlue',
'LightCoral', 'LightCyan', 'LightGoldenRodYellow', 'LightGray', 'LightGrey',
'LightGreen', 'LightPink', 'LightSalmon', 'LightSeaGreen', 'LightSkyBlue',
'LightSlateGray', 'LightSlateGrey', 'LightSteelBlue', 'LightYellow', 'Lime',
'LimeGreen', 'Linen', 'Magenta', 'MediumAquaMarine', 'MediumOrchid',
'MediumPurple', 'MediumSeaGreen', 'MediumSlateBlue', 'MediumSpringGreen',
'MediumTurquoise', 'MediumVioletRed', 'MintCream', 'MistyRose', 'Moccasin',
'NavajoWhite', 'OldLace', 'Olive', 'OliveDrab', 'Orange', 'OrangeRed',
'Orchid', 'PaleGoldenRod', 'PaleGreen', 'PaleTurquoise', 'PaleVioletRed',
'PapayaWhip', 'PeachPuff', 'Peru', 'Pink', 'Plum', 'PowderBlue', 'Purple',
'Red', 'RosyBrown', 'RoyalBlue', 'SaddleBrown', 'Green', 'SandyBrown',
'SeaGreen', 'SeaShell', 'Sienna', 'Silver', 'SkyBlue', 'SlateBlue',
'SlateGray', 'SlateGrey', 'Snow', 'SpringGreen', 'SteelBlue', 'GreenYellow',
'Teal', 'Thistle', 'Tomato', 'Turquoise', 'Violet', 'Wheat', 'White',
'WhiteSmoke', 'Yellow', 'YellowGreen'
]
def _get_multiplier_for_color_randomness():
"""Returns a multiplier to get semi-random colors from successive indices.
This function computes a prime number, p, in the range [2, 17] that:
- is closest to len(STANDARD_COLORS) / 10
- does not divide len(STANDARD_COLORS)
If no prime numbers in that range satisfy the constraints, p is returned as 1.
Once p is established, it can be used as a multiplier to select
non-consecutive colors from STANDARD_COLORS:
colors = [(p * i) % len(STANDARD_COLORS) for i in range(20)]
"""
num_colors = len(STANDARD_COLORS)
prime_candidates = [5, 7, 11, 13, 17]
# Remove all prime candidates that divide the number of colors.
prime_candidates = [p for p in prime_candidates if num_colors % p]
if not prime_candidates:
return 1
# Return the closest prime number to num_colors / 10.
abs_distance = [np.abs(num_colors / 10. - p) for p in prime_candidates]
num_candidates = len(abs_distance)
inds = [i for _, i in sorted(zip(abs_distance, range(num_candidates)))]
return prime_candidates[inds[0]]
def save_image_array_as_png(image, output_path):
"""Saves an image (represented as a numpy array) to PNG.
Args:
image: a numpy array with shape [height, width, 3].
output_path: path to which image should be written.
"""
image_pil = Image.fromarray(np.uint8(image)).convert('RGB')
with tf.gfile.Open(output_path, 'w') as fid:
image_pil.save(fid, 'PNG')
def encode_image_array_as_png_str(image):
"""Encodes a numpy array into a PNG string.
Args:
image: a numpy array with shape [height, width, 3].
Returns:
PNG encoded image string.
"""
image_pil = Image.fromarray(np.uint8(image))
output = six.BytesIO()
image_pil.save(output, format='PNG')
png_string = output.getvalue()
output.close()
return png_string
def draw_bounding_box_on_image_array(image,
ymin,
xmin,
ymax,
xmax,
color='red',
thickness=4,
display_str_list=(),
use_normalized_coordinates=True):
"""Adds a bounding box to an image (numpy array).
Bounding box coordinates can be specified in either absolute (pixel) or
normalized coordinates by setting the use_normalized_coordinates argument.
Args:
image: a numpy array with shape [height, width, 3].
ymin: ymin of bounding box.
xmin: xmin of bounding box.
ymax: ymax of bounding box.
xmax: xmax of bounding box.
color: color to draw bounding box. Default is red.
thickness: line thickness. Default value is 4.
display_str_list: list of strings to display in box
(each to be shown on its own line).
use_normalized_coordinates: If True (default), treat coordinates
ymin, xmin, ymax, xmax as relative to the image. Otherwise treat
coordinates as absolute.
"""
image_pil = Image.fromarray(np.uint8(image)).convert('RGB')
draw_bounding_box_on_image(image_pil, ymin, xmin, ymax, xmax, color,
thickness, display_str_list,
use_normalized_coordinates)
np.copyto(image, np.array(image_pil))
def draw_bounding_box_on_image(image,
ymin,
xmin,
ymax,
xmax,
color='red',
thickness=4,
display_str_list=(),
use_normalized_coordinates=True):
"""Adds a bounding box to an image.
Bounding box coordinates can be specified in either absolute (pixel) or
normalized coordinates by setting the use_normalized_coordinates argument.
Each string in display_str_list is displayed on a separate line above the
bounding box in black text on a rectangle filled with the input 'color'.
If the top of the bounding box extends to the edge of the image, the strings
are displayed below the bounding box.
Args:
image: a PIL.Image object.
ymin: ymin of bounding box.
xmin: xmin of bounding box.
ymax: ymax of bounding box.
xmax: xmax of bounding box.
color: color to draw bounding box. Default is red.
thickness: line thickness. Default value is 4.
display_str_list: list of strings to display in box
(each to be shown on its own line).
use_normalized_coordinates: If True (default), treat coordinates
ymin, xmin, ymax, xmax as relative to the image. Otherwise treat
coordinates as absolute.
"""
draw = ImageDraw.Draw(image)
im_width, im_height = image.size
if use_normalized_coordinates:
(left, right, top, bottom) = (xmin * im_width, xmax * im_width,
ymin * im_height, ymax * im_height)
else:
(left, right, top, bottom) = (xmin, xmax, ymin, ymax)
if thickness > 0:
draw.line([(left, top), (left, bottom), (right, bottom), (right, top),
(left, top)],
width=thickness,
fill=color)
try:
font = ImageFont.truetype('arial.ttf', 24)
except IOError:
font = ImageFont.load_default()
# If the total height of the display strings added to the top of the bounding
# box exceeds the top of the image, stack the strings below the bounding box
# instead of above.
display_str_heights = [font.getsize(ds)[1] for ds in display_str_list]
# Each display_str has a top and bottom margin of 0.05x.
total_display_str_height = (1 + 2 * 0.05) * sum(display_str_heights)
if top > total_display_str_height:
text_bottom = top
else:
text_bottom = bottom + total_display_str_height
# Reverse list and print from bottom to top.
for display_str in display_str_list[::-1]:
text_width, text_height = font.getsize(display_str)
margin = np.ceil(0.05 * text_height)
draw.rectangle(
[(left, text_bottom - text_height - 2 * margin), (left + text_width,
text_bottom)],
fill=color)
draw.text(
(left + margin, text_bottom - text_height - margin),
display_str,
fill='black',
font=font)
text_bottom -= text_height - 2 * margin
def draw_bounding_boxes_on_image_array(image,
boxes,
color='red',
thickness=4,
display_str_list_list=()):
"""Draws bounding boxes on image (numpy array).
Args:
image: a numpy array object.
boxes: a 2 dimensional numpy array of [N, 4]: (ymin, xmin, ymax, xmax).
The coordinates are in normalized format between [0, 1].
color: color to draw bounding box. Default is red.
thickness: line thickness. Default value is 4.
display_str_list_list: list of list of strings.
a list of strings for each bounding box.
The reason to pass a list of strings for a
bounding box is that it might contain
multiple labels.
Raises:
ValueError: if boxes is not a [N, 4] array
"""
image_pil = Image.fromarray(image)
draw_bounding_boxes_on_image(image_pil, boxes, color, thickness,
display_str_list_list)
np.copyto(image, np.array(image_pil))
def draw_bounding_boxes_on_image(image,
boxes,
color='red',
thickness=4,
display_str_list_list=()):
"""Draws bounding boxes on image.
Args:
image: a PIL.Image object.
boxes: a 2 dimensional numpy array of [N, 4]: (ymin, xmin, ymax, xmax).
The coordinates are in normalized format between [0, 1].
color: color to draw bounding box. Default is red.
thickness: line thickness. Default value is 4.
display_str_list_list: list of list of strings.
a list of strings for each bounding box.
The reason to pass a list of strings for a
bounding box is that it might contain
multiple labels.
Raises:
ValueError: if boxes is not a [N, 4] array
"""
boxes_shape = boxes.shape
if not boxes_shape:
return
if len(boxes_shape) != 2 or boxes_shape[1] != 4:
raise ValueError('Input must be of size [N, 4]')
for i in range(boxes_shape[0]):
display_str_list = ()
if display_str_list_list:
display_str_list = display_str_list_list[i]
draw_bounding_box_on_image(image, boxes[i, 0], boxes[i, 1], boxes[i, 2],
boxes[i, 3], color, thickness, display_str_list)
def create_visualization_fn(category_index,
include_masks=False,
include_keypoints=False,
include_keypoint_scores=False,
include_track_ids=False,
**kwargs):
"""Constructs a visualization function that can be wrapped in a py_func.
py_funcs only accept positional arguments. This function returns a suitable
function with the correct positional argument mapping. The positional
arguments in order are:
0: image
1: boxes
2: classes
3: scores
[4]: masks (optional)
[4-5]: keypoints (optional)
[4-6]: keypoint_scores (optional)
[4-7]: track_ids (optional)
-- Example 1 --
vis_only_masks_fn = create_visualization_fn(category_index,
include_masks=True, include_keypoints=False, include_track_ids=False,
**kwargs)
image = tf.py_func(vis_only_masks_fn,
inp=[image, boxes, classes, scores, masks],
Tout=tf.uint8)
-- Example 2 --
vis_masks_and_track_ids_fn = create_visualization_fn(category_index,
include_masks=True, include_keypoints=False, include_track_ids=True,
**kwargs)
image = tf.py_func(vis_masks_and_track_ids_fn,
inp=[image, boxes, classes, scores, masks, track_ids],
Tout=tf.uint8)
Args:
category_index: a dict that maps integer ids to category dicts. e.g.
{1: {1: 'dog'}, 2: {2: 'cat'}, ...}
include_masks: Whether masks should be expected as a positional argument in
the returned function.
include_keypoints: Whether keypoints should be expected as a positional
argument in the returned function.
include_keypoint_scores: Whether keypoint scores should be expected as a
positional argument in the returned function.
include_track_ids: Whether track ids should be expected as a positional
argument in the returned function.
**kwargs: Additional kwargs that will be passed to
visualize_boxes_and_labels_on_image_array.
Returns:
Returns a function that only takes tensors as positional arguments.
"""
def visualization_py_func_fn(*args):
"""Visualization function that can be wrapped in a tf.py_func.
Args:
*args: First 4 positional arguments must be:
image - uint8 numpy array with shape (img_height, img_width, 3).
boxes - a numpy array of shape [N, 4].
classes - a numpy array of shape [N].
scores - a numpy array of shape [N] or None.
-- Optional positional arguments --
instance_masks - a numpy array of shape [N, image_height, image_width].
keypoints - a numpy array of shape [N, num_keypoints, 2].
keypoint_scores - a numpy array of shape [N, num_keypoints].
track_ids - a numpy array of shape [N] with unique track ids.
Returns:
uint8 numpy array with shape (img_height, img_width, 3) with overlaid
boxes.
"""
image = args[0]
boxes = args[1]
classes = args[2]
scores = args[3]
masks = keypoints = keypoint_scores = track_ids = None
pos_arg_ptr = 4 # Positional argument for first optional tensor (masks).
if include_masks:
masks = args[pos_arg_ptr]
pos_arg_ptr += 1
if include_keypoints:
keypoints = args[pos_arg_ptr]
pos_arg_ptr += 1
if include_keypoint_scores:
keypoint_scores = args[pos_arg_ptr]
pos_arg_ptr += 1
if include_track_ids:
track_ids = args[pos_arg_ptr]
return visualize_boxes_and_labels_on_image_array(
image,
boxes,
classes,
scores,
category_index=category_index,
instance_masks=masks,
keypoints=keypoints,
keypoint_scores=keypoint_scores,
track_ids=track_ids,
**kwargs)
return visualization_py_func_fn
def draw_heatmaps_on_image(image, heatmaps):
"""Draws heatmaps on an image.
The heatmaps are handled channel by channel and different colors are used to
paint different heatmap channels.
Args:
image: a PIL.Image object.
heatmaps: a numpy array with shape [image_height, image_width, channel].
Note that the image_height and image_width should match the size of input
image.
"""
draw = ImageDraw.Draw(image)
channel = heatmaps.shape[2]
for c in range(channel):
heatmap = heatmaps[:, :, c] * 255
heatmap = heatmap.astype('uint8')
bitmap = Image.fromarray(heatmap, 'L')
bitmap.convert('1')
draw.bitmap(
xy=[(0, 0)],
bitmap=bitmap,
fill=STANDARD_COLORS[c])
def draw_heatmaps_on_image_array(image, heatmaps):
"""Overlays heatmaps to an image (numpy array).
The function overlays the heatmaps on top of image. The heatmap values will be
painted with different colors depending on the channels. Similar to
"draw_heatmaps_on_image_array" function except the inputs are numpy arrays.
Args:
image: a numpy array with shape [height, width, 3].
heatmaps: a numpy array with shape [height, width, channel].
Returns:
An uint8 numpy array representing the input image painted with heatmap
colors.
"""
if not isinstance(image, np.ndarray):
image = image.numpy()
if not isinstance(heatmaps, np.ndarray):
heatmaps = heatmaps.numpy()
image_pil = Image.fromarray(np.uint8(image)).convert('RGB')
draw_heatmaps_on_image(image_pil, heatmaps)
return np.array(image_pil)
def draw_heatmaps_on_image_tensors(images,
heatmaps,
apply_sigmoid=False):
"""Draws heatmaps on batch of image tensors.
Args:
images: A 4D uint8 image tensor of shape [N, H, W, C]. If C > 3, additional
channels will be ignored. If C = 1, then we convert the images to RGB
images.
heatmaps: [N, h, w, channel] float32 tensor of heatmaps. Note that the
heatmaps will be resized to match the input image size before overlaying
the heatmaps with input images. Theoretically the heatmap height width
should have the same aspect ratio as the input image to avoid potential
misalignment introduced by the image resize.
apply_sigmoid: Whether to apply a sigmoid layer on top of the heatmaps. If
the heatmaps come directly from the prediction logits, then we should
apply the sigmoid layer to make sure the values are in between [0.0, 1.0].
Returns:
4D image tensor of type uint8, with heatmaps overlaid on top.
"""
# Additional channels are being ignored.
if images.shape[3] > 3:
images = images[:, :, :, 0:3]
elif images.shape[3] == 1:
images = tf.image.grayscale_to_rgb(images)
_, height, width, _ = shape_utils.combined_static_and_dynamic_shape(images)
if apply_sigmoid:
heatmaps = tf.math.sigmoid(heatmaps)
resized_heatmaps = tf.image.resize(heatmaps, size=[height, width])
elems = [images, resized_heatmaps]
def draw_heatmaps(image_and_heatmaps):
"""Draws heatmaps on image."""
image_with_heatmaps = tf.py_function(
draw_heatmaps_on_image_array,
image_and_heatmaps,
tf.uint8)
return image_with_heatmaps
images = tf.map_fn(draw_heatmaps, elems, dtype=tf.uint8, back_prop=False)
return images
def _resize_original_image(image, image_shape):
image = tf.expand_dims(image, 0)
image = tf.image.resize_images(
image,
image_shape,
method=tf.image.ResizeMethod.NEAREST_NEIGHBOR,
align_corners=True)
return tf.cast(tf.squeeze(image, 0), tf.uint8)
def draw_bounding_boxes_on_image_tensors(images,
boxes,
classes,
scores,
category_index,
original_image_spatial_shape=None,
true_image_shape=None,
instance_masks=None,
keypoints=None,
keypoint_scores=None,
keypoint_edges=None,
track_ids=None,
max_boxes_to_draw=20,
min_score_thresh=0.2,
use_normalized_coordinates=True):
"""Draws bounding boxes, masks, and keypoints on batch of image tensors.
Args:
images: A 4D uint8 image tensor of shape [N, H, W, C]. If C > 3, additional
channels will be ignored. If C = 1, then we convert the images to RGB
images.
boxes: [N, max_detections, 4] float32 tensor of detection boxes.
classes: [N, max_detections] int tensor of detection classes. Note that
classes are 1-indexed.
scores: [N, max_detections] float32 tensor of detection scores.
category_index: a dict that maps integer ids to category dicts. e.g.
{1: {1: 'dog'}, 2: {2: 'cat'}, ...}
original_image_spatial_shape: [N, 2] tensor containing the spatial size of
the original image.
true_image_shape: [N, 3] tensor containing the spatial size of unpadded
original_image.
instance_masks: A 4D uint8 tensor of shape [N, max_detection, H, W] with
instance masks.
keypoints: A 4D float32 tensor of shape [N, max_detection, num_keypoints, 2]
with keypoints.
keypoint_scores: A 3D float32 tensor of shape [N, max_detection,
num_keypoints] with keypoint scores.
keypoint_edges: A list of tuples with keypoint indices that specify which
keypoints should be connected by an edge, e.g. [(0, 1), (2, 4)] draws
edges from keypoint 0 to 1 and from keypoint 2 to 4.
track_ids: [N, max_detections] int32 tensor of unique tracks ids (i.e.
instance ids for each object). If provided, the color-coding of boxes is
dictated by these ids, and not classes.
max_boxes_to_draw: Maximum number of boxes to draw on an image. Default 20.
min_score_thresh: Minimum score threshold for visualization. Default 0.2.
use_normalized_coordinates: Whether to assume boxes and kepoints are in
normalized coordinates (as opposed to absolute coordiantes).
Default is True.
Returns:
4D image tensor of type uint8, with boxes drawn on top.
"""
# Additional channels are being ignored.
if images.shape[3] > 3:
images = images[:, :, :, 0:3]
elif images.shape[3] == 1:
images = tf.image.grayscale_to_rgb(images)
visualization_keyword_args = {
'use_normalized_coordinates': use_normalized_coordinates,
'max_boxes_to_draw': max_boxes_to_draw,
'min_score_thresh': min_score_thresh,
'agnostic_mode': False,
'line_thickness': 4,
'keypoint_edges': keypoint_edges
}
if true_image_shape is None:
true_shapes = tf.constant(-1, shape=[images.shape.as_list()[0], 3])
else:
true_shapes = true_image_shape
if original_image_spatial_shape is None:
original_shapes = tf.constant(-1, shape=[images.shape.as_list()[0], 2])
else:
original_shapes = original_image_spatial_shape
visualize_boxes_fn = create_visualization_fn(
category_index,
include_masks=instance_masks is not None,
include_keypoints=keypoints is not None,
include_keypoint_scores=keypoint_scores is not None,
include_track_ids=track_ids is not None,
**visualization_keyword_args)
elems = [true_shapes, original_shapes, images, boxes, classes, scores]
if instance_masks is not None:
elems.append(instance_masks)
if keypoints is not None:
elems.append(keypoints)
if keypoint_scores is not None:
elems.append(keypoint_scores)
if track_ids is not None:
elems.append(track_ids)
def draw_boxes(image_and_detections):
"""Draws boxes on image."""
true_shape = image_and_detections[0]
original_shape = image_and_detections[1]
if true_image_shape is not None:
image = shape_utils.pad_or_clip_nd(image_and_detections[2],
[true_shape[0], true_shape[1], 3])
if original_image_spatial_shape is not None:
image_and_detections[2] = _resize_original_image(image, original_shape)
image_with_boxes = tf.py_func(visualize_boxes_fn, image_and_detections[2:],
tf.uint8)
return image_with_boxes
images = tf.map_fn(draw_boxes, elems, dtype=tf.uint8, back_prop=False)
return images
def draw_side_by_side_evaluation_image(eval_dict,
category_index,
max_boxes_to_draw=20,
min_score_thresh=0.2,
use_normalized_coordinates=True,
keypoint_edges=None):
"""Creates a side-by-side image with detections and groundtruth.
Bounding boxes (and instance masks, if available) are visualized on both
subimages.
Args:
eval_dict: The evaluation dictionary returned by
eval_util.result_dict_for_batched_example() or
eval_util.result_dict_for_single_example().
category_index: A category index (dictionary) produced from a labelmap.
max_boxes_to_draw: The maximum number of boxes to draw for detections.
min_score_thresh: The minimum score threshold for showing detections.
use_normalized_coordinates: Whether to assume boxes and keypoints are in
normalized coordinates (as opposed to absolute coordinates).
Default is True.
keypoint_edges: A list of tuples with keypoint indices that specify which
keypoints should be connected by an edge, e.g. [(0, 1), (2, 4)] draws
edges from keypoint 0 to 1 and from keypoint 2 to 4.
Returns:
A list of [1, H, 2 * W, C] uint8 tensor. The subimage on the left
corresponds to detections, while the subimage on the right corresponds to
groundtruth.
"""
detection_fields = fields.DetectionResultFields()
input_data_fields = fields.InputDataFields()
images_with_detections_list = []
# Add the batch dimension if the eval_dict is for single example.
if len(eval_dict[detection_fields.detection_classes].shape) == 1:
for key in eval_dict:
if (key != input_data_fields.original_image and
key != input_data_fields.image_additional_channels):
eval_dict[key] = tf.expand_dims(eval_dict[key], 0)
for indx in range(eval_dict[input_data_fields.original_image].shape[0]):
instance_masks = None
if detection_fields.detection_masks in eval_dict:
instance_masks = tf.cast(
tf.expand_dims(
eval_dict[detection_fields.detection_masks][indx], axis=0),
tf.uint8)
keypoints = None
keypoint_scores = None
if detection_fields.detection_keypoints in eval_dict:
keypoints = tf.expand_dims(
eval_dict[detection_fields.detection_keypoints][indx], axis=0)
if detection_fields.detection_keypoint_scores in eval_dict:
keypoint_scores = tf.expand_dims(
eval_dict[detection_fields.detection_keypoint_scores][indx], axis=0)
else:
keypoint_scores = tf.cast(keypoint_ops.set_keypoint_visibilities(
keypoints), dtype=tf.float32)
groundtruth_instance_masks = None
if input_data_fields.groundtruth_instance_masks in eval_dict:
groundtruth_instance_masks = tf.cast(
tf.expand_dims(
eval_dict[input_data_fields.groundtruth_instance_masks][indx],
axis=0), tf.uint8)
groundtruth_keypoints = None
groundtruth_keypoint_scores = None
gt_kpt_vis_fld = input_data_fields.groundtruth_keypoint_visibilities
if input_data_fields.groundtruth_keypoints in eval_dict:
groundtruth_keypoints = tf.expand_dims(
eval_dict[input_data_fields.groundtruth_keypoints][indx], axis=0)
if gt_kpt_vis_fld in eval_dict:
groundtruth_keypoint_scores = tf.expand_dims(
tf.cast(eval_dict[gt_kpt_vis_fld][indx], dtype=tf.float32), axis=0)
else:
groundtruth_keypoint_scores = tf.cast(
keypoint_ops.set_keypoint_visibilities(
groundtruth_keypoints), dtype=tf.float32)
images_with_detections = draw_bounding_boxes_on_image_tensors(
tf.expand_dims(
eval_dict[input_data_fields.original_image][indx], axis=0),
tf.expand_dims(
eval_dict[detection_fields.detection_boxes][indx], axis=0),
tf.expand_dims(
eval_dict[detection_fields.detection_classes][indx], axis=0),
tf.expand_dims(
eval_dict[detection_fields.detection_scores][indx], axis=0),
category_index,
original_image_spatial_shape=tf.expand_dims(
eval_dict[input_data_fields.original_image_spatial_shape][indx],
axis=0),
true_image_shape=tf.expand_dims(
eval_dict[input_data_fields.true_image_shape][indx], axis=0),
instance_masks=instance_masks,
keypoints=keypoints,
keypoint_scores=keypoint_scores,
keypoint_edges=keypoint_edges,
max_boxes_to_draw=max_boxes_to_draw,
min_score_thresh=min_score_thresh,
use_normalized_coordinates=use_normalized_coordinates)
images_with_groundtruth = draw_bounding_boxes_on_image_tensors(
tf.expand_dims(
eval_dict[input_data_fields.original_image][indx], axis=0),
tf.expand_dims(
eval_dict[input_data_fields.groundtruth_boxes][indx], axis=0),
tf.expand_dims(
eval_dict[input_data_fields.groundtruth_classes][indx], axis=0),
tf.expand_dims(
tf.ones_like(
eval_dict[input_data_fields.groundtruth_classes][indx],
dtype=tf.float32),
axis=0),
category_index,
original_image_spatial_shape=tf.expand_dims(
eval_dict[input_data_fields.original_image_spatial_shape][indx],
axis=0),
true_image_shape=tf.expand_dims(
eval_dict[input_data_fields.true_image_shape][indx], axis=0),
instance_masks=groundtruth_instance_masks,
keypoints=groundtruth_keypoints,
keypoint_scores=groundtruth_keypoint_scores,
keypoint_edges=keypoint_edges,
max_boxes_to_draw=None,
min_score_thresh=0.0,
use_normalized_coordinates=use_normalized_coordinates)
images_to_visualize = tf.concat([images_with_detections,
images_with_groundtruth], axis=2)
if input_data_fields.image_additional_channels in eval_dict:
images_with_additional_channels_groundtruth = (
draw_bounding_boxes_on_image_tensors(
tf.expand_dims(
eval_dict[input_data_fields.image_additional_channels][indx],
axis=0),
tf.expand_dims(
eval_dict[input_data_fields.groundtruth_boxes][indx], axis=0),
tf.expand_dims(
eval_dict[input_data_fields.groundtruth_classes][indx],
axis=0),
tf.expand_dims(
tf.ones_like(
eval_dict[input_data_fields.groundtruth_classes][indx],
dtype=tf.float32),
axis=0),
category_index,
original_image_spatial_shape=tf.expand_dims(
eval_dict[input_data_fields.original_image_spatial_shape]
[indx],
axis=0),
true_image_shape=tf.expand_dims(
eval_dict[input_data_fields.true_image_shape][indx], axis=0),
instance_masks=groundtruth_instance_masks,
keypoints=None,
keypoint_edges=None,
max_boxes_to_draw=None,
min_score_thresh=0.0,
use_normalized_coordinates=use_normalized_coordinates))
images_to_visualize = tf.concat(
[images_to_visualize, images_with_additional_channels_groundtruth],
axis=2)
images_with_detections_list.append(images_to_visualize)
return images_with_detections_list
def draw_keypoints_on_image_array(image,
keypoints,
keypoint_scores=None,
min_score_thresh=0.5,
color='red',
radius=2,
use_normalized_coordinates=True,
keypoint_edges=None,
keypoint_edge_color='green',
keypoint_edge_width=2):
"""Draws keypoints on an image (numpy array).
Args:
image: a numpy array with shape [height, width, 3].
keypoints: a numpy array with shape [num_keypoints, 2].
keypoint_scores: a numpy array with shape [num_keypoints]. If provided, only
those keypoints with a score above score_threshold will be visualized.
min_score_thresh: A scalar indicating the minimum keypoint score required
for a keypoint to be visualized. Note that keypoint_scores must be
provided for this threshold to take effect.
color: color to draw the keypoints with. Default is red.
radius: keypoint radius. Default value is 2.
use_normalized_coordinates: if True (default), treat keypoint values as
relative to the image. Otherwise treat them as absolute.
keypoint_edges: A list of tuples with keypoint indices that specify which
keypoints should be connected by an edge, e.g. [(0, 1), (2, 4)] draws
edges from keypoint 0 to 1 and from keypoint 2 to 4.
keypoint_edge_color: color to draw the keypoint edges with. Default is red.
keypoint_edge_width: width of the edges drawn between keypoints. Default
value is 2.
"""
image_pil = Image.fromarray(np.uint8(image)).convert('RGB')
draw_keypoints_on_image(image_pil,
keypoints,
keypoint_scores=keypoint_scores,
min_score_thresh=min_score_thresh,
color=color,
radius=radius,
use_normalized_coordinates=use_normalized_coordinates,
keypoint_edges=keypoint_edges,
keypoint_edge_color=keypoint_edge_color,
keypoint_edge_width=keypoint_edge_width)
np.copyto(image, np.array(image_pil))
def draw_keypoints_on_image(image,
keypoints,
keypoint_scores=None,
min_score_thresh=0.5,
color='red',
radius=2,
use_normalized_coordinates=True,
keypoint_edges=None,
keypoint_edge_color='green',
keypoint_edge_width=2):
"""Draws keypoints on an image.
Args:
image: a PIL.Image object.
keypoints: a numpy array with shape [num_keypoints, 2].
keypoint_scores: a numpy array with shape [num_keypoints].
min_score_thresh: a score threshold for visualizing keypoints. Only used if
keypoint_scores is provided.
color: color to draw the keypoints with. Default is red.
radius: keypoint radius. Default value is 2.
use_normalized_coordinates: if True (default), treat keypoint values as
relative to the image. Otherwise treat them as absolute.
keypoint_edges: A list of tuples with keypoint indices that specify which
keypoints should be connected by an edge, e.g. [(0, 1), (2, 4)] draws
edges from keypoint 0 to 1 and from keypoint 2 to 4.
keypoint_edge_color: color to draw the keypoint edges with. Default is red.
keypoint_edge_width: width of the edges drawn between keypoints. Default
value is 2.
"""
draw = ImageDraw.Draw(image)
im_width, im_height = image.size
keypoints = np.array(keypoints)
keypoints_x = [k[1] for k in keypoints]
keypoints_y = [k[0] for k in keypoints]
if use_normalized_coordinates:
keypoints_x = tuple([im_width * x for x in keypoints_x])
keypoints_y = tuple([im_height * y for y in keypoints_y])
if keypoint_scores is not None:
keypoint_scores = np.array(keypoint_scores)
valid_kpt = np.greater(keypoint_scores, min_score_thresh)
else:
valid_kpt = np.where(np.any(np.isnan(keypoints), axis=1),
np.zeros_like(keypoints[:, 0]),
np.ones_like(keypoints[:, 0]))
valid_kpt = [v for v in valid_kpt]
for keypoint_x, keypoint_y, valid in zip(keypoints_x, keypoints_y, valid_kpt):
if valid:
draw.ellipse([(keypoint_x - radius, keypoint_y - radius),
(keypoint_x + radius, keypoint_y + radius)],
outline=color, fill=color)
if keypoint_edges is not None:
for keypoint_start, keypoint_end in keypoint_edges:
if (keypoint_start < 0 or keypoint_start >= len(keypoints) or
keypoint_end < 0 or keypoint_end >= len(keypoints)):
continue
if not (valid_kpt[keypoint_start] and valid_kpt[keypoint_end]):
continue
edge_coordinates = [
keypoints_x[keypoint_start], keypoints_y[keypoint_start],
keypoints_x[keypoint_end], keypoints_y[keypoint_end]
]
draw.line(
edge_coordinates, fill=keypoint_edge_color, width=keypoint_edge_width)
def draw_mask_on_image_array(image, mask, color='red', alpha=0.4):
"""Draws mask on an image.
Args:
image: uint8 numpy array with shape (img_height, img_height, 3)
mask: a uint8 numpy array of shape (img_height, img_height) with
values between either 0 or 1.
color: color to draw the keypoints with. Default is red.
alpha: transparency value between 0 and 1. (default: 0.4)
Raises:
ValueError: On incorrect data type for image or masks.
"""
if image.dtype != np.uint8:
raise ValueError('`image` not of type np.uint8')
if mask.dtype != np.uint8:
raise ValueError('`mask` not of type np.uint8')
if np.any(np.logical_and(mask != 1, mask != 0)):
raise ValueError('`mask` elements should be in [0, 1]')
if image.shape[:2] != mask.shape:
raise ValueError('The image has spatial dimensions %s but the mask has '
'dimensions %s' % (image.shape[:2], mask.shape))
rgb = ImageColor.getrgb(color)
pil_image = Image.fromarray(image)
solid_color = np.expand_dims(
np.ones_like(mask), axis=2) * np.reshape(list(rgb), [1, 1, 3])
pil_solid_color = Image.fromarray(np.uint8(solid_color)).convert('RGBA')
pil_mask = Image.fromarray(np.uint8(255.0*alpha*mask)).convert('L')
pil_image = Image.composite(pil_solid_color, pil_image, pil_mask)
np.copyto(image, np.array(pil_image.convert('RGB')))
def visualize_boxes_and_labels_on_image_array(
image,
boxes,
classes,
scores,
category_index,
instance_masks=None,
instance_boundaries=None,
keypoints=None,
keypoint_scores=None,
keypoint_edges=None,
track_ids=None,
use_normalized_coordinates=False,
max_boxes_to_draw=20,
min_score_thresh=.5,
agnostic_mode=False,
line_thickness=4,
groundtruth_box_visualization_color='black',
skip_boxes=False,
skip_scores=False,
skip_labels=False,
skip_track_ids=False):
"""Overlay labeled boxes on an image with formatted scores and label names.
This function groups boxes that correspond to the same location
and creates a display string for each detection and overlays these
on the image. Note that this function modifies the image in place, and returns
that same image.
Args:
image: uint8 numpy array with shape (img_height, img_width, 3)
boxes: a numpy array of shape [N, 4]
classes: a numpy array of shape [N]. Note that class indices are 1-based,
and match the keys in the label map.
scores: a numpy array of shape [N] or None. If scores=None, then
this function assumes that the boxes to be plotted are groundtruth
boxes and plot all boxes as black with no classes or scores.
category_index: a dict containing category dictionaries (each holding
category index `id` and category name `name`) keyed by category indices.
instance_masks: a numpy array of shape [N, image_height, image_width] with
values ranging between 0 and 1, can be None.
instance_boundaries: a numpy array of shape [N, image_height, image_width]
with values ranging between 0 and 1, can be None.
keypoints: a numpy array of shape [N, num_keypoints, 2], can
be None.
keypoint_scores: a numpy array of shape [N, num_keypoints], can be None.
keypoint_edges: A list of tuples with keypoint indices that specify which
keypoints should be connected by an edge, e.g. [(0, 1), (2, 4)] draws
edges from keypoint 0 to 1 and from keypoint 2 to 4.
track_ids: a numpy array of shape [N] with unique track ids. If provided,
color-coding of boxes will be determined by these ids, and not the class
indices.
use_normalized_coordinates: whether boxes is to be interpreted as
normalized coordinates or not.
max_boxes_to_draw: maximum number of boxes to visualize. If None, draw
all boxes.
min_score_thresh: minimum score threshold for a box or keypoint to be
visualized.
agnostic_mode: boolean (default: False) controlling whether to evaluate in
class-agnostic mode or not. This mode will display scores but ignore
classes.
line_thickness: integer (default: 4) controlling line width of the boxes.
groundtruth_box_visualization_color: box color for visualizing groundtruth
boxes
skip_boxes: whether to skip the drawing of bounding boxes.
skip_scores: whether to skip score when drawing a single detection
skip_labels: whether to skip label when drawing a single detection
skip_track_ids: whether to skip track id when drawing a single detection
Returns:
uint8 numpy array with shape (img_height, img_width, 3) with overlaid boxes.
"""
# Create a display string (and color) for every box location, group any boxes
# that correspond to the same location.
box_to_display_str_map = collections.defaultdict(list)
box_to_color_map = collections.defaultdict(str)
box_to_instance_masks_map = {}
box_to_instance_boundaries_map = {}
box_to_keypoints_map = collections.defaultdict(list)
box_to_keypoint_scores_map = collections.defaultdict(list)
box_to_track_ids_map = {}
if not max_boxes_to_draw:
max_boxes_to_draw = boxes.shape[0]
for i in range(boxes.shape[0]):
if max_boxes_to_draw == len(box_to_color_map):
break
if scores is None or scores[i] > min_score_thresh:
box = tuple(boxes[i].tolist())
if instance_masks is not None:
box_to_instance_masks_map[box] = instance_masks[i]
if instance_boundaries is not None:
box_to_instance_boundaries_map[box] = instance_boundaries[i]
if keypoints is not None:
box_to_keypoints_map[box].extend(keypoints[i])
if keypoint_scores is not None:
box_to_keypoint_scores_map[box].extend(keypoint_scores[i])
if track_ids is not None:
box_to_track_ids_map[box] = track_ids[i]
if scores is None:
box_to_color_map[box] = groundtruth_box_visualization_color
else:
display_str = ''
if not skip_labels:
if not agnostic_mode:
if classes[i] in six.viewkeys(category_index):
class_name = category_index[classes[i]]['name']
else:
class_name = 'N/A'
display_str = str(class_name)
if not skip_scores:
if not display_str:
display_str = '{}%'.format(round(100*scores[i]))
else:
display_str = '{}: {}%'.format(display_str, round(100*scores[i]))
if not skip_track_ids and track_ids is not None:
if not display_str:
display_str = 'ID {}'.format(track_ids[i])
else:
display_str = '{}: ID {}'.format(display_str, track_ids[i])
box_to_display_str_map[box].append(display_str)
if agnostic_mode:
box_to_color_map[box] = 'DarkOrange'
elif track_ids is not None:
prime_multipler = _get_multiplier_for_color_randomness()
box_to_color_map[box] = STANDARD_COLORS[
(prime_multipler * track_ids[i]) % len(STANDARD_COLORS)]
else:
box_to_color_map[box] = STANDARD_COLORS[
classes[i] % len(STANDARD_COLORS)]
# Draw all boxes onto image.
for box, color in box_to_color_map.items():
ymin, xmin, ymax, xmax = box
if instance_masks is not None:
draw_mask_on_image_array(
image,
box_to_instance_masks_map[box],
color=color
)
if instance_boundaries is not None:
draw_mask_on_image_array(
image,
box_to_instance_boundaries_map[box],
color='red',
alpha=1.0
)
draw_bounding_box_on_image_array(
image,
ymin,
xmin,
ymax,
xmax,
color=color,
thickness=0 if skip_boxes else line_thickness,
display_str_list=box_to_display_str_map[box],
use_normalized_coordinates=use_normalized_coordinates)
if keypoints is not None:
keypoint_scores_for_box = None
if box_to_keypoint_scores_map:
keypoint_scores_for_box = box_to_keypoint_scores_map[box]
draw_keypoints_on_image_array(
image,
box_to_keypoints_map[box],
keypoint_scores_for_box,
min_score_thresh=min_score_thresh,
color=color,
radius=line_thickness / 2,
use_normalized_coordinates=use_normalized_coordinates,
keypoint_edges=keypoint_edges,
keypoint_edge_color=color,
keypoint_edge_width=line_thickness // 2)
return image
def add_cdf_image_summary(values, name):
"""Adds a tf.summary.image for a CDF plot of the values.
Normalizes `values` such that they sum to 1, plots the cumulative distribution
function and creates a tf image summary.
Args:
values: a 1-D float32 tensor containing the values.
name: name for the image summary.
"""
def cdf_plot(values):
"""Numpy function to plot CDF."""
normalized_values = values / np.sum(values)
sorted_values = np.sort(normalized_values)
cumulative_values = np.cumsum(sorted_values)
fraction_of_examples = (np.arange(cumulative_values.size, dtype=np.float32)
/ cumulative_values.size)
fig = plt.figure(frameon=False)
ax = fig.add_subplot('111')
ax.plot(fraction_of_examples, cumulative_values)
ax.set_ylabel('cumulative normalized values')
ax.set_xlabel('fraction of examples')
fig.canvas.draw()
width, height = fig.get_size_inches() * fig.get_dpi()
image = np.fromstring(fig.canvas.tostring_rgb(), dtype='uint8').reshape(
1, int(height), int(width), 3)
return image
cdf_plot = tf.py_func(cdf_plot, [values], tf.uint8)
tf.summary.image(name, cdf_plot)
def add_hist_image_summary(values, bins, name):
"""Adds a tf.summary.image for a histogram plot of the values.
Plots the histogram of values and creates a tf image summary.
Args:
values: a 1-D float32 tensor containing the values.
bins: bin edges which will be directly passed to np.histogram.
name: name for the image summary.
"""
def hist_plot(values, bins):
"""Numpy function to plot hist."""
fig = plt.figure(frameon=False)
ax = fig.add_subplot('111')
y, x = np.histogram(values, bins=bins)
ax.plot(x[:-1], y)
ax.set_ylabel('count')
ax.set_xlabel('value')
fig.canvas.draw()
width, height = fig.get_size_inches() * fig.get_dpi()
image = np.fromstring(
fig.canvas.tostring_rgb(), dtype='uint8').reshape(
1, int(height), int(width), 3)
return image
hist_plot = tf.py_func(hist_plot, [values, bins], tf.uint8)
tf.summary.image(name, hist_plot)
class EvalMetricOpsVisualization(six.with_metaclass(abc.ABCMeta, object)):
"""Abstract base class responsible for visualizations during evaluation.
Currently, summary images are not run during evaluation. One way to produce
evaluation images in Tensorboard is to provide tf.summary.image strings as
`value_ops` in tf.estimator.EstimatorSpec's `eval_metric_ops`. This class is
responsible for accruing images (with overlaid detections and groundtruth)
and returning a dictionary that can be passed to `eval_metric_ops`.
"""
def __init__(self,
category_index,
max_examples_to_draw=5,
max_boxes_to_draw=20,
min_score_thresh=0.2,
use_normalized_coordinates=True,
summary_name_prefix='evaluation_image',
keypoint_edges=None):
"""Creates an EvalMetricOpsVisualization.
Args:
category_index: A category index (dictionary) produced from a labelmap.
max_examples_to_draw: The maximum number of example summaries to produce.
max_boxes_to_draw: The maximum number of boxes to draw for detections.
min_score_thresh: The minimum score threshold for showing detections.
use_normalized_coordinates: Whether to assume boxes and keypoints are in
normalized coordinates (as opposed to absolute coordinates).
Default is True.
summary_name_prefix: A string prefix for each image summary.
keypoint_edges: A list of tuples with keypoint indices that specify which
keypoints should be connected by an edge, e.g. [(0, 1), (2, 4)] draws
edges from keypoint 0 to 1 and from keypoint 2 to 4.
"""
self._category_index = category_index
self._max_examples_to_draw = max_examples_to_draw
self._max_boxes_to_draw = max_boxes_to_draw
self._min_score_thresh = min_score_thresh
self._use_normalized_coordinates = use_normalized_coordinates
self._summary_name_prefix = summary_name_prefix
self._keypoint_edges = keypoint_edges
self._images = []
def clear(self):
self._images = []
def add_images(self, images):
"""Store a list of images, each with shape [1, H, W, C]."""
if len(self._images) >= self._max_examples_to_draw:
return
# Store images and clip list if necessary.
self._images.extend(images)
if len(self._images) > self._max_examples_to_draw:
self._images[self._max_examples_to_draw:] = []
def get_estimator_eval_metric_ops(self, eval_dict):
"""Returns metric ops for use in tf.estimator.EstimatorSpec.
Args:
eval_dict: A dictionary that holds an image, groundtruth, and detections
for a batched example. Note that, we use only the first example for
visualization. See eval_util.result_dict_for_batched_example() for a
convenient method for constructing such a dictionary. The dictionary
contains
fields.InputDataFields.original_image: [batch_size, H, W, 3] image.
fields.InputDataFields.original_image_spatial_shape: [batch_size, 2]
tensor containing the size of the original image.
fields.InputDataFields.true_image_shape: [batch_size, 3]
tensor containing the spatial size of the upadded original image.
fields.InputDataFields.groundtruth_boxes - [batch_size, num_boxes, 4]
float32 tensor with groundtruth boxes in range [0.0, 1.0].
fields.InputDataFields.groundtruth_classes - [batch_size, num_boxes]
int64 tensor with 1-indexed groundtruth classes.
fields.InputDataFields.groundtruth_instance_masks - (optional)
[batch_size, num_boxes, H, W] int64 tensor with instance masks.
fields.InputDataFields.groundtruth_keypoints - (optional)
[batch_size, num_boxes, num_keypoints, 2] float32 tensor with
keypoint coordinates in format [y, x].
fields.InputDataFields.groundtruth_keypoint_visibilities - (optional)
[batch_size, num_boxes, num_keypoints] bool tensor with
keypoint visibilities.
fields.DetectionResultFields.detection_boxes - [batch_size,
max_num_boxes, 4] float32 tensor with detection boxes in range [0.0,
1.0].
fields.DetectionResultFields.detection_classes - [batch_size,
max_num_boxes] int64 tensor with 1-indexed detection classes.
fields.DetectionResultFields.detection_scores - [batch_size,
max_num_boxes] float32 tensor with detection scores.
fields.DetectionResultFields.detection_masks - (optional) [batch_size,
max_num_boxes, H, W] float32 tensor of binarized masks.
fields.DetectionResultFields.detection_keypoints - (optional)
[batch_size, max_num_boxes, num_keypoints, 2] float32 tensor with
keypoints.
fields.DetectionResultFields.detection_keypoint_scores - (optional)
[batch_size, max_num_boxes, num_keypoints] float32 tensor with
keypoints scores.
Returns:
A dictionary of image summary names to tuple of (value_op, update_op). The
`update_op` is the same for all items in the dictionary, and is
responsible for saving a single side-by-side image with detections and
groundtruth. Each `value_op` holds the tf.summary.image string for a given
image.
"""
if self._max_examples_to_draw == 0:
return {}
images = self.images_from_evaluation_dict(eval_dict)
def get_images():
"""Returns a list of images, padded to self._max_images_to_draw."""
images = self._images
while len(images) < self._max_examples_to_draw:
images.append(np.array(0, dtype=np.uint8))
self.clear()
return images
def image_summary_or_default_string(summary_name, image):
"""Returns image summaries for non-padded elements."""
return tf.cond(
tf.equal(tf.size(tf.shape(image)), 4),
lambda: tf.summary.image(summary_name, image),
lambda: tf.constant(''))
if tf.executing_eagerly():
update_op = self.add_images([[images[0]]])
image_tensors = get_images()
else:
update_op = tf.py_func(self.add_images, [[images[0]]], [])
image_tensors = tf.py_func(
get_images, [], [tf.uint8] * self._max_examples_to_draw)
eval_metric_ops = {}
for i, image in enumerate(image_tensors):
summary_name = self._summary_name_prefix + '/' + str(i)
value_op = image_summary_or_default_string(summary_name, image)
eval_metric_ops[summary_name] = (value_op, update_op)
return eval_metric_ops
@abc.abstractmethod
def images_from_evaluation_dict(self, eval_dict):
"""Converts evaluation dictionary into a list of image tensors.
To be overridden by implementations.
Args:
eval_dict: A dictionary with all the necessary information for producing
visualizations.
Returns:
A list of [1, H, W, C] uint8 tensors.
"""
raise NotImplementedError
class VisualizeSingleFrameDetections(EvalMetricOpsVisualization):
"""Class responsible for single-frame object detection visualizations."""
def __init__(self,
category_index,
max_examples_to_draw=5,
max_boxes_to_draw=20,
min_score_thresh=0.2,
use_normalized_coordinates=True,
summary_name_prefix='Detections_Left_Groundtruth_Right',
keypoint_edges=None):
super(VisualizeSingleFrameDetections, self).__init__(
category_index=category_index,
max_examples_to_draw=max_examples_to_draw,
max_boxes_to_draw=max_boxes_to_draw,
min_score_thresh=min_score_thresh,
use_normalized_coordinates=use_normalized_coordinates,
summary_name_prefix=summary_name_prefix,
keypoint_edges=keypoint_edges)
def images_from_evaluation_dict(self, eval_dict):
return draw_side_by_side_evaluation_image(eval_dict, self._category_index,
self._max_boxes_to_draw,
self._min_score_thresh,
self._use_normalized_coordinates,
self._keypoint_edges)
|
import torch
import torch.distributions as td
import torch.nn as nn
import torch.nn.functional as tf
from rlpyt.utils.collections import namedarraytuple
from rlpyt.utils.buffer import buffer_method
from dreamer.utils.module import FreezeParameters
RSSMState = namedarraytuple('RSSMState', ['mean', 'std', 'stoch', 'deter'])
def stack_states(rssm_states: list, dim):
return RSSMState(
torch.stack([state.mean for state in rssm_states], dim=dim),
torch.stack([state.std for state in rssm_states], dim=dim),
torch.stack([state.stoch for state in rssm_states], dim=dim),
torch.stack([state.deter for state in rssm_states], dim=dim),
)
def get_feat(rssm_state: RSSMState):
return torch.cat((rssm_state.stoch, rssm_state.deter), dim=-1)
def get_dist(rssm_state: RSSMState):
return td.independent.Independent(td.Normal(rssm_state.mean, rssm_state.std), 1)
class TransitionBase(nn.Module):
def __init__(self):
super().__init__()
def forward(self, prev_action, prev_state):
""":return: next state"""
raise NotImplementedError
class RepresentationBase(nn.Module):
def __init__(self):
super().__init__()
def forward(self, obs_embed, prev_action, prev_state):
""":return: next state"""
raise NotImplementedError
class RollOutModule(nn.Module):
def __init__(self):
super().__init__()
def forward(self, steps, obs_embed, prev_action, prev_state):
raise NotImplementedError
class RSSMTransition(TransitionBase):
def __init__(self, action_size, stochastic_size=30, deterministic_size=200, hidden_size=200, activation=nn.ELU,
distribution=td.Normal):
super().__init__()
self._action_size = action_size
self._stoch_size = stochastic_size
self._deter_size = deterministic_size
self._hidden_size = hidden_size
self._activation = activation
self._cell = nn.GRUCell(hidden_size, deterministic_size)
self._rnn_input_model = self._build_rnn_input_model()
self._stochastic_prior_model = self._build_stochastic_model()
self._dist = distribution
def _build_rnn_input_model(self):
rnn_input_model = [
nn.Linear(self._action_size + self._stoch_size, self._hidden_size)]
rnn_input_model += [self._activation()]
return nn.Sequential(*rnn_input_model)
def _build_stochastic_model(self):
stochastic_model = [nn.Linear(self._hidden_size, self._hidden_size)]
stochastic_model += [self._activation()]
stochastic_model += [nn.Linear(self._hidden_size,
2 * self._stoch_size)]
return nn.Sequential(*stochastic_model)
def initial_state(self, batch_size, **kwargs):
return RSSMState(
torch.zeros(batch_size, self._stoch_size, **kwargs),
torch.zeros(batch_size, self._stoch_size, **kwargs),
torch.zeros(batch_size, self._stoch_size, **kwargs),
torch.zeros(batch_size, self._deter_size, **kwargs),
)
def forward(self, prev_action: torch.Tensor, prev_state: RSSMState):
if len(prev_action.shape) != len(prev_state.stoch.shape):
prev_state = RSSMState(
mean=prev_state.mean.unsqueeze(dim=0),
std=prev_state.std.unsqueeze(dim=0),
stoch=prev_state.stoch.unsqueeze(dim=0),
deter=prev_state.deter.unsqueeze(dim=0))
rnn_input = self._rnn_input_model(
torch.cat([prev_action, prev_state.stoch], dim=-1))
deter_state = self._cell(rnn_input, prev_state.deter)
mean, std = torch.chunk(
self._stochastic_prior_model(deter_state), 2, dim=-1)
std = tf.softplus(std) + 0.1
dist = self._dist(mean, std)
stoch_state = dist.rsample()
return RSSMState(mean, std, stoch_state, deter_state)
class RSSMRepresentation(RepresentationBase):
def __init__(self, transition_model: RSSMTransition, obs_embed_size, action_size, stochastic_size=30,
deterministic_size=200, hidden_size=200, activation=nn.ELU, distribution=td.Normal):
super().__init__()
self._transition_model = transition_model
self._obs_embed_size = obs_embed_size
self._action_size = action_size
self._stoch_size = stochastic_size
self._deter_size = deterministic_size
self._hidden_size = hidden_size
self._activation = activation
self._dist = distribution
self._stochastic_posterior_model = self._build_stochastic_model()
def _build_stochastic_model(self):
stochastic_model = [
nn.Linear(self._deter_size + self._obs_embed_size, self._hidden_size)]
stochastic_model += [self._activation()]
stochastic_model += [nn.Linear(self._hidden_size,
2 * self._stoch_size)]
return nn.Sequential(*stochastic_model)
def initial_state(self, batch_size, **kwargs):
return RSSMState(
torch.zeros(batch_size, self._stoch_size, **kwargs),
torch.zeros(batch_size, self._stoch_size, **kwargs),
torch.zeros(batch_size, self._stoch_size, **kwargs),
torch.zeros(batch_size, self._deter_size, **kwargs),
)
def forward(self, obs_embed: torch.Tensor, prev_action: torch.Tensor, prev_state: RSSMState):
prior_state = self._transition_model(prev_action, prev_state)
x = torch.cat([prior_state.deter, obs_embed], -1)
mean, std = torch.chunk(self._stochastic_posterior_model(x), 2, dim=-1)
std = tf.softplus(std) + 0.1
dist = self._dist(mean, std)
stoch_state = dist.rsample()
posterior_state = RSSMState(mean, std, stoch_state, prior_state.deter)
return prior_state, posterior_state
class RSSMRollout(RollOutModule):
def __init__(self, representation_model: RSSMRepresentation, transition_model: RSSMTransition):
super().__init__()
self.representation_model = representation_model
self.transition_model = transition_model
def forward(self, steps: int, obs_embed: torch.Tensor, action: torch.Tensor, prev_state: RSSMState):
return self.rollout_representation(steps, obs_embed, action, prev_state)
def rollout_representation(self, steps: int, obs_embed: torch.Tensor, action: torch.Tensor,
prev_state: RSSMState):
"""
Roll out the model with actions and observations from data.
:param steps: number of steps to roll out
:param obs_embed: size(time_steps, batch_size, embedding_size)
:param action: size(time_steps, batch_size, action_size)
:param prev_state: RSSM state, size(batch_size, state_size)
:return: prior, posterior states. size(time_steps, batch_size, state_size)
"""
priors = []
posteriors = []
for t in range(steps):
prior_state, posterior_state = self.representation_model(
obs_embed[t], action[t], prev_state)
priors.append(prior_state)
posteriors.append(posterior_state)
prev_state = posterior_state
prior = stack_states(priors, dim=0)
post = stack_states(posteriors, dim=0)
return prior, post
def rollout_transition(self, steps: int, action: torch.Tensor, prev_state: RSSMState):
"""
Roll out the model with actions from data.
:param steps: number of steps to roll out
:param action: size(time_steps, batch_size, action_size)
:param prev_state: RSSM state, size(batch_size, state_size)
:return: prior states. size(time_steps, batch_size, state_size)
"""
priors = []
state = prev_state
for t in range(steps):
state = self.transition_model(action[t], state)
priors.append(state)
return stack_states(priors, dim=0)
def rollout_policy(self, steps: int, policy, prev_state: RSSMState):
"""
Roll out the model with a policy function.
:param steps: number of steps to roll out
:param policy: RSSMState -> action
:param prev_state: RSSM state, size(batch_size, state_size)
:return: next states size(time_steps, batch_size, state_size),
actions size(time_steps, batch_size, action_size)
"""
state = prev_state
next_states = []
actions = []
state = buffer_method(state, 'detach')
for t in range(steps):
action, _ = policy(buffer_method(state, 'detach'))
state = self.transition_model(action, state)
next_states.append(state)
actions.append(action)
next_states = stack_states(next_states, dim=0)
actions = torch.stack(actions, dim=0)
return next_states, actions
|
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
from msrest.serialization import Model
from msrest.exceptions import HttpOperationError
class CloudError(Model):
"""CloudError.
"""
_attribute_map = {
}
class Display(Model):
"""Detailed HANA operation information.
Variables are only populated by the server, and will be ignored when
sending a request.
:ivar provider: The localized friendly form of the resource provider name.
This form is also expected to include the publisher/company responsible.
Use Title Casing. Begin with "Microsoft" for 1st party services.
:vartype provider: str
:ivar resource: The localized friendly form of the resource type related
to this action/operation. This form should match the public documentation
for the resource provider. Use Title Casing. For examples, refer to the
“name” section.
:vartype resource: str
:ivar operation: The localized friendly name for the operation as shown to
the user. This name should be concise (to fit in drop downs), but clear
(self-documenting). Use Title Casing and include the entity/resource to
which it applies.
:vartype operation: str
:ivar description: The localized friendly description for the operation as
shown to the user. This description should be thorough, yet concise. It
will be used in tool-tips and detailed views.
:vartype description: str
:ivar origin: The intended executor of the operation; governs the display
of the operation in the RBAC UX and the audit logs UX. Default value is
'user,system'
:vartype origin: str
"""
_validation = {
'provider': {'readonly': True},
'resource': {'readonly': True},
'operation': {'readonly': True},
'description': {'readonly': True},
'origin': {'readonly': True},
}
_attribute_map = {
'provider': {'key': 'provider', 'type': 'str'},
'resource': {'key': 'resource', 'type': 'str'},
'operation': {'key': 'operation', 'type': 'str'},
'description': {'key': 'description', 'type': 'str'},
'origin': {'key': 'origin', 'type': 'str'},
}
def __init__(self, **kwargs):
super(Display, self).__init__(**kwargs)
self.provider = None
self.resource = None
self.operation = None
self.description = None
self.origin = None
class ErrorResponse(Model):
"""Describes the format of Error response.
:param code: Error code
:type code: str
:param message: Error message indicating why the operation failed.
:type message: str
"""
_attribute_map = {
'code': {'key': 'code', 'type': 'str'},
'message': {'key': 'message', 'type': 'str'},
}
def __init__(self, **kwargs):
super(ErrorResponse, self).__init__(**kwargs)
self.code = kwargs.get('code', None)
self.message = kwargs.get('message', None)
class ErrorResponseException(HttpOperationError):
"""Server responsed with exception of type: 'ErrorResponse'.
:param deserialize: A deserializer
:param response: Server response to be deserialized.
"""
def __init__(self, deserialize, response, *args):
super(ErrorResponseException, self).__init__(deserialize, response, 'ErrorResponse', *args)
class Operation(Model):
"""HANA operation information.
Variables are only populated by the server, and will be ignored when
sending a request.
:ivar name: The name of the operation being performed on this particular
object. This name should match the action name that appears in RBAC / the
event service.
:vartype name: str
:param display: Displayed HANA operation information
:type display: ~azure.mgmt.hanaonazure.models.Display
"""
_validation = {
'name': {'readonly': True},
}
_attribute_map = {
'name': {'key': 'name', 'type': 'str'},
'display': {'key': 'display', 'type': 'Display'},
}
def __init__(self, **kwargs):
super(Operation, self).__init__(**kwargs)
self.name = None
self.display = kwargs.get('display', None)
class Resource(Model):
"""The core properties of ARM resources.
Variables are only populated by the server, and will be ignored when
sending a request.
:ivar id: Fully qualified resource Id for the resource. Ex -
/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/trafficManagerProfiles/{resourceName}
:vartype id: str
:ivar name: The name of the resource
:vartype name: str
:ivar type: The type of the resource. Ex-
Microsoft.Network/trafficManagerProfiles.
:vartype type: str
"""
_validation = {
'id': {'readonly': True},
'name': {'readonly': True},
'type': {'readonly': True},
}
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'type': {'key': 'type', 'type': 'str'},
}
def __init__(self, **kwargs):
super(Resource, self).__init__(**kwargs)
self.id = None
self.name = None
self.type = None
class ProxyResource(Resource):
"""The resource model definition for a ARM proxy resource. It will have
everything other than required location and tags.
Variables are only populated by the server, and will be ignored when
sending a request.
:ivar id: Fully qualified resource Id for the resource. Ex -
/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/trafficManagerProfiles/{resourceName}
:vartype id: str
:ivar name: The name of the resource
:vartype name: str
:ivar type: The type of the resource. Ex-
Microsoft.Network/trafficManagerProfiles.
:vartype type: str
"""
_validation = {
'id': {'readonly': True},
'name': {'readonly': True},
'type': {'readonly': True},
}
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'type': {'key': 'type', 'type': 'str'},
}
def __init__(self, **kwargs):
super(ProxyResource, self).__init__(**kwargs)
class ProviderInstance(ProxyResource):
"""A provider instance associated with a SAP monitor.
Variables are only populated by the server, and will be ignored when
sending a request.
:ivar id: Fully qualified resource Id for the resource. Ex -
/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/trafficManagerProfiles/{resourceName}
:vartype id: str
:ivar name: The name of the resource
:vartype name: str
:ivar type: The type of the resource. Ex-
Microsoft.Network/trafficManagerProfiles.
:vartype type: str
:param provider_instance_type: The type of provider instance.
:type provider_instance_type: str
:param properties: A JSON string containing the properties of the provider
instance.
:type properties: str
:param metadata: A JSON string containing metadata of the provider
instance.
:type metadata: str
:ivar provisioning_state: State of provisioning of the provider instance.
Possible values include: 'Accepted', 'Creating', 'Updating', 'Failed',
'Succeeded', 'Deleting', 'Migrating'
:vartype provisioning_state: str or
~azure.mgmt.hanaonazure.models.HanaProvisioningStatesEnum
"""
_validation = {
'id': {'readonly': True},
'name': {'readonly': True},
'type': {'readonly': True},
'provisioning_state': {'readonly': True},
}
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'type': {'key': 'type', 'type': 'str'},
'provider_instance_type': {'key': 'properties.type', 'type': 'str'},
'properties': {'key': 'properties.properties', 'type': 'str'},
'metadata': {'key': 'properties.metadata', 'type': 'str'},
'provisioning_state': {'key': 'properties.provisioningState', 'type': 'str'},
}
def __init__(self, **kwargs):
super(ProviderInstance, self).__init__(**kwargs)
self.provider_instance_type = kwargs.get('provider_instance_type', None)
self.properties = kwargs.get('properties', None)
self.metadata = kwargs.get('metadata', None)
self.provisioning_state = None
class TrackedResource(Resource):
"""The resource model definition for a ARM tracked top level resource.
Variables are only populated by the server, and will be ignored when
sending a request.
:ivar id: Fully qualified resource Id for the resource. Ex -
/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/trafficManagerProfiles/{resourceName}
:vartype id: str
:ivar name: The name of the resource
:vartype name: str
:ivar type: The type of the resource. Ex-
Microsoft.Network/trafficManagerProfiles.
:vartype type: str
:param tags: Resource tags.
:type tags: dict[str, str]
:param location: The Azure Region where the resource lives
:type location: str
"""
_validation = {
'id': {'readonly': True},
'name': {'readonly': True},
'type': {'readonly': True},
}
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'type': {'key': 'type', 'type': 'str'},
'tags': {'key': 'tags', 'type': '{str}'},
'location': {'key': 'location', 'type': 'str'},
}
def __init__(self, **kwargs):
super(TrackedResource, self).__init__(**kwargs)
self.tags = kwargs.get('tags', None)
self.location = kwargs.get('location', None)
class SapMonitor(TrackedResource):
"""SAP monitor info on Azure (ARM properties and SAP monitor properties).
Variables are only populated by the server, and will be ignored when
sending a request.
:ivar id: Fully qualified resource Id for the resource. Ex -
/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/trafficManagerProfiles/{resourceName}
:vartype id: str
:ivar name: The name of the resource
:vartype name: str
:ivar type: The type of the resource. Ex-
Microsoft.Network/trafficManagerProfiles.
:vartype type: str
:param tags: Resource tags.
:type tags: dict[str, str]
:param location: The Azure Region where the resource lives
:type location: str
:ivar provisioning_state: State of provisioning of the HanaInstance.
Possible values include: 'Accepted', 'Creating', 'Updating', 'Failed',
'Succeeded', 'Deleting', 'Migrating'
:vartype provisioning_state: str or
~azure.mgmt.hanaonazure.models.HanaProvisioningStatesEnum
:ivar managed_resource_group_name: The name of the resource group the SAP
Monitor resources get deployed into.
:vartype managed_resource_group_name: str
:param log_analytics_workspace_arm_id: The ARM ID of the Log Analytics
Workspace that is used for monitoring
:type log_analytics_workspace_arm_id: str
:param enable_customer_analytics: The value indicating whether to send
analytics to Microsoft
:type enable_customer_analytics: bool
:param log_analytics_workspace_id: The workspace ID of the log analytics
workspace to be used for monitoring
:type log_analytics_workspace_id: str
:param log_analytics_workspace_shared_key: The shared key of the log
analytics workspace that is used for monitoring
:type log_analytics_workspace_shared_key: str
:ivar sap_monitor_collector_version: The version of the payload running in
the Collector VM
:vartype sap_monitor_collector_version: str
:param monitor_subnet: The subnet which the SAP monitor will be deployed
in
:type monitor_subnet: str
"""
_validation = {
'id': {'readonly': True},
'name': {'readonly': True},
'type': {'readonly': True},
'provisioning_state': {'readonly': True},
'managed_resource_group_name': {'readonly': True},
'sap_monitor_collector_version': {'readonly': True},
}
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'type': {'key': 'type', 'type': 'str'},
'tags': {'key': 'tags', 'type': '{str}'},
'location': {'key': 'location', 'type': 'str'},
'provisioning_state': {'key': 'properties.provisioningState', 'type': 'str'},
'managed_resource_group_name': {'key': 'properties.managedResourceGroupName', 'type': 'str'},
'log_analytics_workspace_arm_id': {'key': 'properties.logAnalyticsWorkspaceArmId', 'type': 'str'},
'enable_customer_analytics': {'key': 'properties.enableCustomerAnalytics', 'type': 'bool'},
'log_analytics_workspace_id': {'key': 'properties.logAnalyticsWorkspaceId', 'type': 'str'},
'log_analytics_workspace_shared_key': {'key': 'properties.logAnalyticsWorkspaceSharedKey', 'type': 'str'},
'sap_monitor_collector_version': {'key': 'properties.sapMonitorCollectorVersion', 'type': 'str'},
'monitor_subnet': {'key': 'properties.monitorSubnet', 'type': 'str'},
}
def __init__(self, **kwargs):
super(SapMonitor, self).__init__(**kwargs)
self.provisioning_state = None
self.managed_resource_group_name = None
self.log_analytics_workspace_arm_id = kwargs.get('log_analytics_workspace_arm_id', None)
self.enable_customer_analytics = kwargs.get('enable_customer_analytics', None)
self.log_analytics_workspace_id = kwargs.get('log_analytics_workspace_id', None)
self.log_analytics_workspace_shared_key = kwargs.get('log_analytics_workspace_shared_key', None)
self.sap_monitor_collector_version = None
self.monitor_subnet = kwargs.get('monitor_subnet', None)
class Tags(Model):
"""Tags field of the resource.
:param tags: Tags field of the resource.
:type tags: dict[str, str]
"""
_attribute_map = {
'tags': {'key': 'tags', 'type': '{str}'},
}
def __init__(self, **kwargs):
super(Tags, self).__init__(**kwargs)
self.tags = kwargs.get('tags', None)
|
class JournalEntry(DependencyObject,ISerializable):
""" Represents an entry in either back or forward navigation history. """
@staticmethod
def GetKeepAlive(dependencyObject):
"""
GetKeepAlive(dependencyObject: DependencyObject) -> bool
Returns the System.Windows.Navigation.JournalEntry.KeepAlive�attached property
of the journal entry for the specified element.
dependencyObject: The element from which to get the attached property value.
Returns: The value of the System.Windows.Navigation.JournalEntry.KeepAlive�attached
property of the journal entry for the specified element.
"""
pass
@staticmethod
def GetName(dependencyObject):
"""
GetName(dependencyObject: DependencyObject) -> str
Gets the System.Windows.Navigation.JournalEntry.Name�attached property of the
journal entry for the specified element.
dependencyObject: The element from which to get the attached property value.
Returns: The System.Windows.Navigation.JournalEntry.Name�attached property of the
journal entry for the specified element.
"""
pass
def GetObjectData(self,info,context):
"""
GetObjectData(self: JournalEntry,info: SerializationInfo,context: StreamingContext)
Called when this object is serialized.
info: The data that is required to serialize the target object.
context: The streaming context.
"""
pass
def OnPropertyChanged(self,*args):
"""
OnPropertyChanged(self: DependencyObject,e: DependencyPropertyChangedEventArgs)
Invoked whenever the effective value of any dependency property on this
System.Windows.DependencyObject has been updated. The specific dependency
property that changed is reported in the event data.
e: Event data that will contain the dependency property identifier of interest,
the property metadata for the type,and old and new values.
OnPropertyChanged(self: Window_16$17,e: DependencyPropertyChangedEventArgs)OnPropertyChanged(self: Label_17$18,e: DependencyPropertyChangedEventArgs)OnPropertyChanged(self: TextBox_18$19,e: DependencyPropertyChangedEventArgs)OnPropertyChanged(self: Button_19$20,e: DependencyPropertyChangedEventArgs)OnPropertyChanged(self: CheckBox_20$21,e: DependencyPropertyChangedEventArgs)OnPropertyChanged(self: ComboBox_21$22,e: DependencyPropertyChangedEventArgs)OnPropertyChanged(self: Separator_22$23,e: DependencyPropertyChangedEventArgs)
"""
pass
@staticmethod
def SetKeepAlive(dependencyObject,keepAlive):
"""
SetKeepAlive(dependencyObject: DependencyObject,keepAlive: bool)
Sets the System.Windows.Navigation.JournalEntry.KeepAlive�attached property of
the specified element.
dependencyObject: The element on which to set the attached property value.
keepAlive: true to keep the journal entry in memory; otherwise,false.
"""
pass
@staticmethod
def SetName(dependencyObject,name):
"""
SetName(dependencyObject: DependencyObject,name: str)
Sets the System.Windows.Navigation.JournalEntry.Name�attached property of the
specified element.
dependencyObject: The element on which to set the attached property value.
name: The name to be assigned to the attached property.
"""
pass
def ShouldSerializeProperty(self,*args):
"""
ShouldSerializeProperty(self: DependencyObject,dp: DependencyProperty) -> bool
Returns a value that indicates whether serialization processes should serialize
the value for the provided dependency property.
dp: The identifier for the dependency property that should be serialized.
Returns: true if the dependency property that is supplied should be value-serialized;
otherwise,false.
ShouldSerializeProperty(self: Window_16$17,dp: DependencyProperty) -> bool
ShouldSerializeProperty(self: Label_17$18,dp: DependencyProperty) -> bool
ShouldSerializeProperty(self: TextBox_18$19,dp: DependencyProperty) -> bool
ShouldSerializeProperty(self: Button_19$20,dp: DependencyProperty) -> bool
ShouldSerializeProperty(self: CheckBox_20$21,dp: DependencyProperty) -> bool
ShouldSerializeProperty(self: ComboBox_21$22,dp: DependencyProperty) -> bool
ShouldSerializeProperty(self: Separator_22$23,dp: DependencyProperty) -> bool
"""
pass
def __init__(self,*args):
""" x.__init__(...) initializes x; see x.__class__.__doc__ for signaturex.__init__(...) initializes x; see x.__class__.__doc__ for signaturex.__init__(...) initializes x; see x.__class__.__doc__ for signature """
pass
@staticmethod
def __new__(self,*args): #cannot find CLR constructor
""" __new__(cls: type,info: SerializationInfo,context: StreamingContext) """
pass
def __reduce_ex__(self,*args):
pass
CustomContentState=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""Gets or sets the System.Windows.Navigation.CustomContentState object that is associated with this journal entry.
Get: CustomContentState(self: JournalEntry) -> CustomContentState
"""
Name=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""Gets or sets the name of the journal entry.
Get: Name(self: JournalEntry) -> str
Set: Name(self: JournalEntry)=value
"""
Source=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""Gets or sets the URI of the content that was navigated to.
Get: Source(self: JournalEntry) -> Uri
Set: Source(self: JournalEntry)=value
"""
KeepAliveProperty=None
NameProperty=None
|
#!/usr/bin/python
import participantCollection
import string
import re
import datetime
import pyperclip
#TODO: need to figure out how to get total days in current month...
# Remember, this is during signup, so current month is not July, it's June.
currentMonthTotalDays = 30
#TODO: fill in the current month url...
currentMonthURL = "https://www.reddit.com/r/pornfree/comments/381nyz/stay_clean_june_this_thread_updated_daily_check/"
currentMonthIndex = datetime.date.today().month
currentMonthPenultimateDayIndex = currentMonthTotalDays - 1
currentMonthName = {1:'January', 2:'February', 3:'March', 4:'April', 5:'May', 6:'June', 7:'July', 8:'August', 9:'September', 10:'October', 11:'November', 12:'December'}[currentMonthIndex]
nextMonthIndex = currentMonthIndex % 12 + 1
nextMonthName = {1:'January', 2:'February', 3:'March', 4:'April', 5:'May', 6:'June', 7:'July', 8:'August', 9:'September', 10:'October', 11:'November', 12:'December'}[nextMonthIndex]
uppercaseMonth = string.upper(nextMonthName)
currentDayOfMonthIndex = datetime.date.today().day
currentDayOfMonthName = {1:'first', 2:'second', 3:'third', 4:'fourth', 5:'fifth', 6:'sixth', 7:'seventh', 8:'eighth', 9:'ninth', 10:'tenth', 11:'eleventh', 12:'twelfth', 13:'thirteenth', 14:'fourteenth', 15:'fifteenth', 16:'sixteenth', 17:'seventeenth', 18:'eighteenth', 19:'nineteenth', 20:'twentieth', 21:'twenty-first', 22:'twenty-second', 23:'twenty-third', 24:'twenty-fourth', 25:'twenty-fifth', 26:'twenty-sixth', 27:'twenty-seventh', 28:'twenty-eighth', 29:'twenty-ninth', 30:'thirtieth', 31:'thirty-first'}[currentDayOfMonthIndex]
currentDayOfWeekName = {0:'Monday', 1:'Tuesday', 2:'Wednesday', 3:'Thursday', 4:'Friday', 5:'Saturday', 6:'Sunday'}[datetime.date.today().weekday()]
#TODO: testing
#currentDayOfMonthIndex = 28
participantCollection = participantCollection.ParticipantCollection()
initialNumber = participantCollection.size()
def templateForParticipants():
answer = ""
answer += "Here are the **INITIAL_NUMBER participants** who have already signed up:\n\n"
for participant in participantCollection.participants:
answer += "/u/" + participant.name
answer += "\n\n"
return answer
def templateForTooEarly():
answer = ""
answer += "(Too early. Come back on CURRENT_MONTH_NAME " + str(currentMonthTotalDays - 6) + ")\n"
return answer
def templateForFirstSignupDay():
answer = ""
answer += "STAY CLEAN: UPPERCASE_MONTH! Sign up here! (CURRENT_MONTH_NAME CURRENT_DAY_OF_MONTH_INDEX)\n"
answer += "Hey everybody, we had a great turnout for [Stay Clean: CURRENT_MONTH_NAME](CURRENT_MONTH_URL) - let's see if we can knock it out of the park for NEXT_MONTH_NAME. Have you been clean for the month of CURRENT_MONTH_NAME? Great! Join us here, and let's keep our streak going. Did you slip in CURRENT_MONTH_NAME? Then NEXT_MONTH_NAME is your month to shine, and we will gladly fight the good fight along with you. Did you miss out on the CURRENT_MONTH_NAME challenge? Well then here is your opportunity to join us.\n"
answer += "\n"
answer += "If you would like to be included in this challenge, please post a brief comment to this thread, and I will include you. After midnight, NEXT_MONTH_NAME 1, the sign up window will close, and the challenge will begin."
return answer
def templateForMiddleSignupDays():
answer = ""
answer += "STAY CLEAN: UPPERCASE_MONTH! Sign up here! (CURRENT_MONTH_NAME CURRENT_DAY_OF_MONTH_INDEX)\n"
answer += "Hey everybody, so far **INITIAL_NUMBER participants** have signed up. Have you been clean for **[the month of CURRENT_MONTH_NAME](CURRENT_MONTH_URL)**? Great! Join us here, and let's keep our streak going. Did you slip in CURRENT_MONTH_NAME? Then NEXT_MONTH_NAME is your month to shine, and we will gladly fight the good fight along with you. Did you miss out on the CURRENT_MONTH_NAME challenge? Well then here is your opportunity to join us.\n"
answer += "\n"
answer += "If you would like to be included in this challenge, please post a brief comment to this thread (if you haven't already done so on an earlier signup thread), and I will include you. After midnight, NEXT_MONTH_NAME 1, the sign up window will close, and the challenge will begin.\n"
answer += "\n"
answer += templateForParticipants()
return answer
def templateForLastSignupDay():
answer = ""
answer += "LAST CHANCE TO SIGN UP FOR STAY CLEAN: UPPERCASE_MONTH! Sign up here!\n"
answer += "The Stay Clean: NEXT_MONTH_NAME challenge **begins tomorrow**! So far, we have **INITIAL_NUMBER participants** signed up. If you would like to be included in the challenge, please post a brief comment to this thread (if you haven't already done so on an earlier signup thread), and we will include you. After midnight tonight, we will not be accepting any more participants. I will create the official update post tomorrow.\n"
answer += "\n"
answer += templateForParticipants()
return answer
def templateToUse():
if currentDayOfMonthIndex <= (currentMonthTotalDays - 7):
return templateForTooEarly()
elif currentDayOfMonthIndex == (currentMonthTotalDays - 6):
return templateForFirstSignupDay()
elif ( (currentMonthTotalDays - 5) <= currentDayOfMonthIndex <= (currentMonthTotalDays - 1) ):
return templateForMiddleSignupDays()
elif ( currentMonthTotalDays == currentDayOfMonthIndex ):
return templateForLastSignupDay()
def stringToPrint():
answer = templateToUse()
answer = re.sub( 'INITIAL_NUMBER', str(initialNumber), answer )
answer = re.sub( 'CURRENT_MONTH_INDEX', str(currentMonthIndex), answer )
answer = re.sub( 'CURRENT_MONTH_TOTAL_DAYS', str(currentMonthTotalDays), answer )
answer = re.sub( 'CURRENT_MONTH_PENULTIMATE_DAY_INDEX', str(currentMonthPenultimateDayIndex), answer )
answer = re.sub( 'CURRENT_MONTH_NAME', currentMonthName, answer )
answer = re.sub( 'CURRENT_MONTH_URL', currentMonthURL, answer )
answer = re.sub( 'NEXT_MONTH_INDEX', str(nextMonthIndex), answer )
answer = re.sub( 'NEXT_MONTH_NAME', nextMonthName, answer )
answer = re.sub( 'CURRENT_DAY_OF_MONTH_INDEX', str(currentDayOfMonthIndex), answer )
answer = re.sub( 'CURRENT_DAY_OF_MONTH_NAME', currentDayOfMonthName, answer )
answer = re.sub( 'CURRENT_DAY_OF_WEEK_NAME', currentDayOfWeekName, answer )
answer = re.sub( 'UPPERCASE_MONTH', uppercaseMonth, answer )
return answer
outputString = stringToPrint()
print "============================================================="
print outputString
print "============================================================="
pyperclip.copy(outputString)
|
"""Tests for C-implemented GenericAlias."""
import unittest
import pickle
import copy
from collections import (
defaultdict, deque, OrderedDict, Counter, UserDict, UserList
)
from collections.abc import *
from concurrent.futures import Future
from concurrent.futures.thread import _WorkItem
from contextlib import AbstractContextManager, AbstractAsyncContextManager
from contextvars import ContextVar, Token
from dataclasses import Field
from functools import partial, partialmethod, cached_property
from mailbox import Mailbox, _PartialFile
try:
import ctypes
except ImportError:
ctypes = None
from difflib import SequenceMatcher
from filecmp import dircmp
from fileinput import FileInput
from itertools import chain
from http.cookies import Morsel
from multiprocessing.managers import ValueProxy
from multiprocessing.pool import ApplyResult
try:
from multiprocessing.shared_memory import ShareableList
except ImportError:
# multiprocessing.shared_memory is not available on e.g. Android
ShareableList = None
from multiprocessing.queues import SimpleQueue as MPSimpleQueue
from os import DirEntry
from re import Pattern, Match
from types import GenericAlias, MappingProxyType, AsyncGeneratorType
from tempfile import TemporaryDirectory, SpooledTemporaryFile
from urllib.parse import SplitResult, ParseResult
from unittest.case import _AssertRaisesContext
from queue import Queue, SimpleQueue
from weakref import WeakSet, ReferenceType, ref
import typing
from typing import TypeVar
T = TypeVar('T')
K = TypeVar('K')
V = TypeVar('V')
class BaseTest(unittest.TestCase):
"""Test basics."""
generic_types = [type, tuple, list, dict, set, frozenset, enumerate,
defaultdict, deque,
SequenceMatcher,
dircmp,
FileInput,
OrderedDict, Counter, UserDict, UserList,
Pattern, Match,
partial, partialmethod, cached_property,
AbstractContextManager, AbstractAsyncContextManager,
Awaitable, Coroutine,
AsyncIterable, AsyncIterator,
AsyncGenerator, Generator,
Iterable, Iterator,
Reversible,
Container, Collection,
Mailbox, _PartialFile,
ContextVar, Token,
Field,
Set, MutableSet,
Mapping, MutableMapping, MappingView,
KeysView, ItemsView, ValuesView,
Sequence, MutableSequence,
MappingProxyType, AsyncGeneratorType,
DirEntry,
chain,
TemporaryDirectory, SpooledTemporaryFile,
Queue, SimpleQueue,
_AssertRaisesContext,
SplitResult, ParseResult,
ValueProxy, ApplyResult,
WeakSet, ReferenceType, ref,
ShareableList, MPSimpleQueue,
Future, _WorkItem,
Morsel]
if ctypes is not None:
generic_types.extend((ctypes.Array, ctypes.LibraryLoader))
def test_subscriptable(self):
for t in self.generic_types:
if t is None:
continue
tname = t.__name__
with self.subTest(f"Testing {tname}"):
alias = t[int]
self.assertIs(alias.__origin__, t)
self.assertEqual(alias.__args__, (int,))
self.assertEqual(alias.__parameters__, ())
def test_unsubscriptable(self):
for t in int, str, float, Sized, Hashable:
tname = t.__name__
with self.subTest(f"Testing {tname}"):
with self.assertRaises(TypeError):
t[int]
def test_instantiate(self):
for t in tuple, list, dict, set, frozenset, defaultdict, deque:
tname = t.__name__
with self.subTest(f"Testing {tname}"):
alias = t[int]
self.assertEqual(alias(), t())
if t is dict:
self.assertEqual(alias(iter([('a', 1), ('b', 2)])), dict(a=1, b=2))
self.assertEqual(alias(a=1, b=2), dict(a=1, b=2))
elif t is defaultdict:
def default():
return 'value'
a = alias(default)
d = defaultdict(default)
self.assertEqual(a['test'], d['test'])
else:
self.assertEqual(alias(iter((1, 2, 3))), t((1, 2, 3)))
def test_unbound_methods(self):
t = list[int]
a = t()
t.append(a, 'foo')
self.assertEqual(a, ['foo'])
x = t.__getitem__(a, 0)
self.assertEqual(x, 'foo')
self.assertEqual(t.__len__(a), 1)
def test_subclassing(self):
class C(list[int]):
pass
self.assertEqual(C.__bases__, (list,))
self.assertEqual(C.__class__, type)
def test_class_methods(self):
t = dict[int, None]
self.assertEqual(dict.fromkeys(range(2)), {0: None, 1: None}) # This works
self.assertEqual(t.fromkeys(range(2)), {0: None, 1: None}) # Should be equivalent
def test_no_chaining(self):
t = list[int]
with self.assertRaises(TypeError):
t[int]
def test_generic_subclass(self):
class MyList(list):
pass
t = MyList[int]
self.assertIs(t.__origin__, MyList)
self.assertEqual(t.__args__, (int,))
self.assertEqual(t.__parameters__, ())
def test_repr(self):
class MyList(list):
pass
self.assertEqual(repr(list[str]), 'list[str]')
self.assertEqual(repr(list[()]), 'list[()]')
self.assertEqual(repr(tuple[int, ...]), 'tuple[int, ...]')
self.assertTrue(repr(MyList[int]).endswith('.BaseTest.test_repr.<locals>.MyList[int]'))
self.assertEqual(repr(list[str]()), '[]') # instances should keep their normal repr
def test_exposed_type(self):
import types
a = types.GenericAlias(list, int)
self.assertEqual(str(a), 'list[int]')
self.assertIs(a.__origin__, list)
self.assertEqual(a.__args__, (int,))
self.assertEqual(a.__parameters__, ())
def test_parameters(self):
from typing import List, Dict, Callable
D0 = dict[str, int]
self.assertEqual(D0.__args__, (str, int))
self.assertEqual(D0.__parameters__, ())
D1a = dict[str, V]
self.assertEqual(D1a.__args__, (str, V))
self.assertEqual(D1a.__parameters__, (V,))
D1b = dict[K, int]
self.assertEqual(D1b.__args__, (K, int))
self.assertEqual(D1b.__parameters__, (K,))
D2a = dict[K, V]
self.assertEqual(D2a.__args__, (K, V))
self.assertEqual(D2a.__parameters__, (K, V))
D2b = dict[T, T]
self.assertEqual(D2b.__args__, (T, T))
self.assertEqual(D2b.__parameters__, (T,))
L0 = list[str]
self.assertEqual(L0.__args__, (str,))
self.assertEqual(L0.__parameters__, ())
L1 = list[T]
self.assertEqual(L1.__args__, (T,))
self.assertEqual(L1.__parameters__, (T,))
L2 = list[list[T]]
self.assertEqual(L2.__args__, (list[T],))
self.assertEqual(L2.__parameters__, (T,))
L3 = list[List[T]]
self.assertEqual(L3.__args__, (List[T],))
self.assertEqual(L3.__parameters__, (T,))
L4a = list[Dict[K, V]]
self.assertEqual(L4a.__args__, (Dict[K, V],))
self.assertEqual(L4a.__parameters__, (K, V))
L4b = list[Dict[T, int]]
self.assertEqual(L4b.__args__, (Dict[T, int],))
self.assertEqual(L4b.__parameters__, (T,))
L5 = list[Callable[[K, V], K]]
self.assertEqual(L5.__args__, (Callable[[K, V], K],))
self.assertEqual(L5.__parameters__, (K, V))
def test_parameter_chaining(self):
from typing import List, Dict, Union, Callable
self.assertEqual(list[T][int], list[int])
self.assertEqual(dict[str, T][int], dict[str, int])
self.assertEqual(dict[T, int][str], dict[str, int])
self.assertEqual(dict[K, V][str, int], dict[str, int])
self.assertEqual(dict[T, T][int], dict[int, int])
self.assertEqual(list[list[T]][int], list[list[int]])
self.assertEqual(list[dict[T, int]][str], list[dict[str, int]])
self.assertEqual(list[dict[str, T]][int], list[dict[str, int]])
self.assertEqual(list[dict[K, V]][str, int], list[dict[str, int]])
self.assertEqual(dict[T, list[int]][str], dict[str, list[int]])
self.assertEqual(list[List[T]][int], list[List[int]])
self.assertEqual(list[Dict[K, V]][str, int], list[Dict[str, int]])
self.assertEqual(list[Union[K, V]][str, int], list[Union[str, int]])
self.assertEqual(list[Callable[[K, V], K]][str, int],
list[Callable[[str, int], str]])
self.assertEqual(dict[T, List[int]][str], dict[str, List[int]])
with self.assertRaises(TypeError):
list[int][int]
dict[T, int][str, int]
dict[str, T][str, int]
dict[T, T][str, int]
def test_equality(self):
self.assertEqual(list[int], list[int])
self.assertEqual(dict[str, int], dict[str, int])
self.assertNotEqual(dict[str, int], dict[str, str])
self.assertNotEqual(list, list[int])
self.assertNotEqual(list[int], list)
def test_isinstance(self):
self.assertTrue(isinstance([], list))
with self.assertRaises(TypeError):
isinstance([], list[str])
def test_issubclass(self):
class L(list): ...
self.assertTrue(issubclass(L, list))
with self.assertRaises(TypeError):
issubclass(L, list[str])
def test_type_generic(self):
t = type[int]
Test = t('Test', (), {})
self.assertTrue(isinstance(Test, type))
test = Test()
self.assertEqual(t(test), Test)
self.assertEqual(t(0), int)
def test_type_subclass_generic(self):
class MyType(type):
pass
with self.assertRaises(TypeError):
MyType[int]
def test_pickle(self):
alias = GenericAlias(list, T)
for proto in range(pickle.HIGHEST_PROTOCOL + 1):
s = pickle.dumps(alias, proto)
loaded = pickle.loads(s)
self.assertEqual(loaded.__origin__, alias.__origin__)
self.assertEqual(loaded.__args__, alias.__args__)
self.assertEqual(loaded.__parameters__, alias.__parameters__)
def test_copy(self):
class X(list):
def __copy__(self):
return self
def __deepcopy__(self, memo):
return self
for origin in list, deque, X:
alias = GenericAlias(origin, T)
copied = copy.copy(alias)
self.assertEqual(copied.__origin__, alias.__origin__)
self.assertEqual(copied.__args__, alias.__args__)
self.assertEqual(copied.__parameters__, alias.__parameters__)
copied = copy.deepcopy(alias)
self.assertEqual(copied.__origin__, alias.__origin__)
self.assertEqual(copied.__args__, alias.__args__)
self.assertEqual(copied.__parameters__, alias.__parameters__)
def test_union(self):
a = typing.Union[list[int], list[str]]
self.assertEqual(a.__args__, (list[int], list[str]))
self.assertEqual(a.__parameters__, ())
def test_union_generic(self):
a = typing.Union[list[T], tuple[T, ...]]
self.assertEqual(a.__args__, (list[T], tuple[T, ...]))
self.assertEqual(a.__parameters__, (T,))
def test_dir(self):
dir_of_gen_alias = set(dir(list[int]))
self.assertTrue(dir_of_gen_alias.issuperset(dir(list)))
for generic_alias_property in ("__origin__", "__args__", "__parameters__"):
self.assertIn(generic_alias_property, dir_of_gen_alias)
def test_weakref(self):
for t in self.generic_types:
if t is None:
continue
tname = t.__name__
with self.subTest(f"Testing {tname}"):
alias = t[int]
self.assertEqual(ref(alias)(), alias)
def test_no_kwargs(self):
# bpo-42576
with self.assertRaises(TypeError):
GenericAlias(bad=float)
def test_subclassing_types_genericalias(self):
class SubClass(GenericAlias): ...
alias = SubClass(list, int)
class Bad(GenericAlias):
def __new__(cls, *args, **kwargs):
super().__new__(cls, *args, **kwargs)
self.assertEqual(alias, list[int])
with self.assertRaises(TypeError):
Bad(list, int, bad=int)
def test_abc_callable(self):
# A separate test is needed for Callable since it uses a subclass of
# GenericAlias.
alias = Callable[[int, str], float]
with self.subTest("Testing subscription"):
self.assertIs(alias.__origin__, Callable)
self.assertEqual(alias.__args__, (int, str, float))
self.assertEqual(alias.__parameters__, ())
with self.subTest("Testing instance checks"):
self.assertIsInstance(alias, GenericAlias)
with self.subTest("Testing weakref"):
self.assertEqual(ref(alias)(), alias)
with self.subTest("Testing pickling"):
s = pickle.dumps(alias)
loaded = pickle.loads(s)
self.assertEqual(alias.__origin__, loaded.__origin__)
self.assertEqual(alias.__args__, loaded.__args__)
self.assertEqual(alias.__parameters__, loaded.__parameters__)
with self.subTest("Testing TypeVar substitution"):
C1 = Callable[[int, T], T]
C2 = Callable[[K, T], V]
C3 = Callable[..., T]
self.assertEqual(C1[str], Callable[[int, str], str])
self.assertEqual(C2[int, float, str], Callable[[int, float], str])
self.assertEqual(C3[int], Callable[..., int])
# multi chaining
C4 = C2[int, V, str]
self.assertEqual(repr(C4).split(".")[-1], "Callable[[int, ~V], str]")
self.assertEqual(repr(C4[dict]).split(".")[-1], "Callable[[int, dict], str]")
self.assertEqual(C4[dict], Callable[[int, dict], str])
with self.subTest("Testing type erasure"):
class C1(Callable):
def __call__(self):
return None
a = C1[[int], T]
self.assertIs(a().__class__, C1)
self.assertEqual(a().__orig_class__, C1[[int], T])
# bpo-42195
with self.subTest("Testing collections.abc.Callable's consistency "
"with typing.Callable"):
c1 = typing.Callable[[int, str], dict]
c2 = Callable[[int, str], dict]
self.assertEqual(c1.__args__, c2.__args__)
self.assertEqual(hash(c1.__args__), hash(c2.__args__))
if __name__ == "__main__":
unittest.main()
|
from os import listdir
from os.path import isfile, join
from re import compile
from typing import Dict
from PyQt5.QtCore import QRegularExpression
from PyQt5.QtGui import QRegularExpressionValidator
from PyQt5.QtWidgets import QDialog, QMessageBox
from dbus import DBusException
from snapper.SnapperConnection import SnapperConnection
from widgets.message_boxes.BtrfsEnvironmentErrorMessageBox import BtrfsEnvironmentErrorMessageBox
from widgets.message_boxes.DBusErrorMessageBox import DBusErrorMessageBox
from widgets.windows.CreateConfigWindow.Ui_CreateConfigWindow import Ui_CreateConfigWindow
FileSystemPath = str
FileSystemType = str
class CreateConfigWindow(QDialog):
__lvm_regexp = compile("^lvm\\(([_a-z0-9]+)\\)$")
__config_name_validator = QRegularExpressionValidator(QRegularExpression("[^, \t]+"))
def __init__(self, connection: SnapperConnection):
super().__init__()
self.__ui = Ui_CreateConfigWindow()
self.__ui.setupUi(self)
self.__conn = connection
self.__filesystems = self.__get_filesystems()
self.__setup_ui()
self.__setup_listeners()
self.__ui.configSettingsWidget.update_sizes()
def exec(self) -> int:
if len(self.__filesystems) == 0:
message_box = QMessageBox(text="No available volumes to create config.")
message_box.setWindowTitle("Create config")
return message_box.exec()
return super().exec()
def __setup_ui(self) -> None:
self.__ui.volumeComboBox.addItems(self.__filesystems.keys())
self.__ui.configSettingsWidget.fill_from_dict(dict())
self.__ui.configNameLineEdit.setValidator(CreateConfigWindow.__config_name_validator)
self.__setup_templates()
def __setup_listeners(self) -> None:
self.__ui.volumeComboBox.currentTextChanged.connect(self.__on_change_mount_point)
self.__ui.createPushButton.clicked.connect(self.__on_click_create_push_button)
def __setup_templates(self) -> None:
self.__ui.templateComboBox.addItems([file_name for file_name in listdir("/etc/snapper/config-templates/") if
isfile(join("/etc/snapper/config-templates/", file_name))])
def __on_change_mount_point(self, path: str) -> None:
self.__ui.configSettingsWidget.is_enable_btrfs = self.__filesystems[path] == "btrfs"
if self.__ui.configSettingsWidget.is_enable_btrfs:
try:
self.__ui.configSettingsWidget.setup_qgroups(path)
except EnvironmentError as e:
BtrfsEnvironmentErrorMessageBox(e).exec()
def __get_filesystems(self) -> Dict[FileSystemType, FileSystemPath]:
result = dict()
try:
with open("/etc/mtab", "r") as file:
for line in file.readlines():
mount_item = line.split()
fs_type = mount_item[2]
if fs_type == "ext4dev":
fs_type = "ext4"
if fs_type == "btrfs" or fs_type == "ext4" or CreateConfigWindow.__lvm_regexp.match(fs_type):
result[mount_item[1]] = fs_type
for config in self.__conn.list_configs():
if config.path in result:
result.pop(config.path)
except IOError:
message = QMessageBox(text="Error while getting mount point list")
message.setWindowTitle("Error")
message.exec()
return result
def __on_click_create_push_button(self) -> None:
try:
sub_volume = self.__ui.volumeComboBox.currentText()
template_name = self.__ui.templateComboBox.currentText() if self.__ui.templateGroupBox.isChecked() else ""
config_name = self.__ui.configNameLineEdit.text()
self.__conn.create_config(config_name, sub_volume, self.__filesystems[sub_volume], template_name)
if len(template_name) == 0:
self.__conn.set_config(config_name, self.__ui.configSettingsWidget.get_data())
self.close()
except DBusException as e:
DBusErrorMessageBox(e).exec()
|
#!/usr/bin/python3
from init_env import init_env
init_env()
from pymavlink import mavutil
connection = mavutil.mavlink_connection('udpin:localhost:11000')
mav = connection
while (True):
msg = mav.recv_match(blocking=True)
print("Message from %d: %s" % (msg.get_srcSystem(), msg))
|
# Generated by Django 2.2.2 on 2019-09-07 14:38
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('ReclamaCaicoApp', '0010_comentario_user'),
]
operations = [
migrations.RemoveField(
model_name='comentario',
name='user',
),
migrations.AddField(
model_name='comentario',
name='nome',
field=models.CharField(default=1, max_length=200),
preserve_default=False,
),
]
|
from typing import List, Tuple, Optional
from pydantic import Field, HttpUrl, BaseModel
class Intents(BaseModel):
guilds: bool = True
guild_members: bool = True
guild_messages: bool = False
guild_message_reactions: bool = True
direct_message: bool = False
message_audit: bool = False
forum_event: bool = False
audio_action: bool = False
at_messages: bool = True
def to_int(self):
return (
self.guilds << 0
| self.guild_members << 1
| self.guild_messages << 9
| self.guild_message_reactions << 10
| self.direct_message << 12
| self.message_audit << 27
| self.forum_event << 28
| self.audio_action << 29
| self.at_messages << 30
)
class BotInfo(BaseModel):
id: str = Field(alias="id")
token: str = Field(alias="token")
secret: str = Field(alias="secret")
shard: Optional[Tuple[int, int]] = None
intent: Intents = Field(default_factory=Intents)
class Config(BaseModel):
qqguild_is_sandbox: bool = False
qqguild_api_base: HttpUrl = Field("https://api.sgroup.qq.com/")
qqguild_sandbox_api_base: HttpUrl = Field("https://sandbox.api.sgroup.qq.com")
qqguild_bots: List[BotInfo] = Field(default_factory=list)
class Config:
extra = "ignore"
|
"""
This file offers the methods to automatically retrieve the graph Pseudoalteromonas tunicata.
The graph is automatically retrieved from the STRING repository.
Report
---------------------
At the time of rendering these methods (please see datetime below), the graph
had the following characteristics:
Datetime: 2021-02-02 18:59:41.922410
The undirected graph Pseudoalteromonas tunicata has 4449 nodes and 452740
weighted edges, of which none are self-loops. The graph is dense as it
has a density of 0.04576 and has 32 connected components, where the component
with most nodes has 4376 nodes and the component with the least nodes has
2 nodes. The graph median node degree is 190, the mean node degree is 203.52,
and the node degree mode is 2. The top 5 most central nodes are 87626.PTD2_18840
(degree 1398), 87626.PTD2_06569 (degree 1364), 87626.PTD2_12984 (degree
1139), 87626.PTD2_18285 (degree 1124) and 87626.PTD2_12989 (degree 956).
References
---------------------
Please cite the following if you use the data:
@article{szklarczyk2019string,
title={STRING v11: protein--protein association networks with increased coverage, supporting functional discovery in genome-wide experimental datasets},
author={Szklarczyk, Damian and Gable, Annika L and Lyon, David and Junge, Alexander and Wyder, Stefan and Huerta-Cepas, Jaime and Simonovic, Milan and Doncheva, Nadezhda T and Morris, John H and Bork, Peer and others},
journal={Nucleic acids research},
volume={47},
number={D1},
pages={D607--D613},
year={2019},
publisher={Oxford University Press}
}
Usage example
----------------------
The usage of this graph is relatively straightforward:
.. code:: python
# First import the function to retrieve the graph from the datasets
from ensmallen_graph.datasets.string import PseudoalteromonasTunicata
# Then load the graph
graph = PseudoalteromonasTunicata()
# Finally, you can do anything with it, for instance, compute its report:
print(graph)
# If you need to run a link prediction task with validation,
# you can split the graph using a connected holdout as follows:
train_graph, validation_graph = graph.connected_holdout(
# You can use an 80/20 split the holdout, for example.
train_size=0.8,
# The random state is used to reproduce the holdout.
random_state=42,
# Wether to show a loading bar.
verbose=True
)
# Remember that, if you need, you can enable the memory-time trade-offs:
train_graph.enable(
vector_sources=True,
vector_destinations=True,
vector_outbounds=True
)
# Consider using the methods made available in the Embiggen package
# to run graph embedding or link prediction tasks.
"""
from typing import Dict
from ..automatic_graph_retrieval import AutomaticallyRetrievedGraph
from ...ensmallen_graph import EnsmallenGraph # pylint: disable=import-error
def PseudoalteromonasTunicata(
directed: bool = False,
verbose: int = 2,
cache_path: str = "graphs/string",
**additional_graph_kwargs: Dict
) -> EnsmallenGraph:
"""Return new instance of the Pseudoalteromonas tunicata graph.
The graph is automatically retrieved from the STRING repository.
Parameters
-------------------
directed: bool = False,
Wether to load the graph as directed or undirected.
By default false.
verbose: int = 2,
Wether to show loading bars during the retrieval and building
of the graph.
cache_path: str = "graphs",
Where to store the downloaded graphs.
additional_graph_kwargs: Dict,
Additional graph kwargs.
Returns
-----------------------
Instace of Pseudoalteromonas tunicata graph.
Report
---------------------
At the time of rendering these methods (please see datetime below), the graph
had the following characteristics:
Datetime: 2021-02-02 18:59:41.922410
The undirected graph Pseudoalteromonas tunicata has 4449 nodes and 452740
weighted edges, of which none are self-loops. The graph is dense as it
has a density of 0.04576 and has 32 connected components, where the component
with most nodes has 4376 nodes and the component with the least nodes has
2 nodes. The graph median node degree is 190, the mean node degree is 203.52,
and the node degree mode is 2. The top 5 most central nodes are 87626.PTD2_18840
(degree 1398), 87626.PTD2_06569 (degree 1364), 87626.PTD2_12984 (degree
1139), 87626.PTD2_18285 (degree 1124) and 87626.PTD2_12989 (degree 956).
References
---------------------
Please cite the following if you use the data:
@article{szklarczyk2019string,
title={STRING v11: protein--protein association networks with increased coverage, supporting functional discovery in genome-wide experimental datasets},
author={Szklarczyk, Damian and Gable, Annika L and Lyon, David and Junge, Alexander and Wyder, Stefan and Huerta-Cepas, Jaime and Simonovic, Milan and Doncheva, Nadezhda T and Morris, John H and Bork, Peer and others},
journal={Nucleic acids research},
volume={47},
number={D1},
pages={D607--D613},
year={2019},
publisher={Oxford University Press}
}
Usage example
----------------------
The usage of this graph is relatively straightforward:
.. code:: python
# First import the function to retrieve the graph from the datasets
from ensmallen_graph.datasets.string import PseudoalteromonasTunicata
# Then load the graph
graph = PseudoalteromonasTunicata()
# Finally, you can do anything with it, for instance, compute its report:
print(graph)
# If you need to run a link prediction task with validation,
# you can split the graph using a connected holdout as follows:
train_graph, validation_graph = graph.connected_holdout(
# You can use an 80/20 split the holdout, for example.
train_size=0.8,
# The random state is used to reproduce the holdout.
random_state=42,
# Wether to show a loading bar.
verbose=True
)
# Remember that, if you need, you can enable the memory-time trade-offs:
train_graph.enable(
vector_sources=True,
vector_destinations=True,
vector_outbounds=True
)
# Consider using the methods made available in the Embiggen package
# to run graph embedding or link prediction tasks.
"""
return AutomaticallyRetrievedGraph(
graph_name="PseudoalteromonasTunicata",
dataset="string",
directed=directed,
verbose=verbose,
cache_path=cache_path,
additional_graph_kwargs=additional_graph_kwargs
)()
|
import os
import pathlib
import shutil
import typing
import alembic.command
import alembic.config
from mlrun import mlconf
class AlembicUtil(object):
def __init__(self, alembic_config_path: pathlib.Path):
self._alembic_config_path = str(alembic_config_path)
self._alembic_config = alembic.config.Config(self._alembic_config_path)
self._alembic_output = ""
def init_alembic(self, from_scratch: bool = False):
revision_history = self._get_revision_history_list()
latest_revision = revision_history[0]
initial_alembic_revision = revision_history[-1]
db_file_path = self._get_db_file_path()
db_path_exists = os.path.isfile(db_file_path)
# this command for some reason creates a dummy db file so it has to be after db_path_exists
current_revision = self._get_current_revision()
if not from_scratch and db_path_exists and not current_revision:
# if database file exists but no alembic version exists, stamp the existing
# database with the initial alembic version, so we can upgrade it later
alembic.command.stamp(self._alembic_config, initial_alembic_revision)
elif (
db_path_exists
and current_revision
and current_revision not in revision_history
):
self._downgrade_to_revision(db_file_path, current_revision, latest_revision)
# get current revision again if it changed during the last commands
current_revision = self._get_current_revision()
if current_revision:
self._backup_revision(db_file_path, current_revision)
alembic.command.upgrade(self._alembic_config, "head")
@staticmethod
def _get_db_file_path() -> str:
"""
Get the db file path from the dsn.
Converts the dsn to the file path. e.g.:
sqlite:////mlrun/db/mlrun.db?check_same_thread=false -> /mlrun/db/mlrun.db
"""
return mlconf.httpdb.dsn.split("?")[0].split("sqlite:///")[-1]
def _get_current_revision(self) -> typing.Optional[str]:
# create separate config in order to catch the stdout
catch_stdout_config = alembic.config.Config(self._alembic_config_path)
catch_stdout_config.print_stdout = self._save_output
self._flush_output()
try:
alembic.command.current(catch_stdout_config)
return self._alembic_output.strip().replace(" (head)", "")
except Exception as exc:
if "Can't locate revision identified by" in exc.args[0]:
# DB has a revision that isn't known to us, extracting it from the exception.
return exc.args[0].split("'")[2]
return None
def _get_revision_history_list(self) -> typing.List[str]:
"""
Returns a list of the revision history sorted from latest to oldest.
"""
# create separate config in order to catch the stdout
catch_stdout_config = alembic.config.Config(self._alembic_config_path)
catch_stdout_config.print_stdout = self._save_output
self._flush_output()
alembic.command.history(catch_stdout_config)
return self._parse_revision_history(self._alembic_output)
@staticmethod
def _parse_revision_history(output: str) -> typing.List[str]:
return [line.split(" ")[2].replace(",", "") for line in output.splitlines()]
@staticmethod
def _backup_revision(db_file_path: str, current_version: str):
if db_file_path == ":memory:":
return
db_dir_path = pathlib.Path(os.path.dirname(db_file_path))
backup_path = db_dir_path / f"{current_version}.db"
shutil.copy2(db_file_path, backup_path)
@staticmethod
def _downgrade_to_revision(
db_file_path: str, current_revision: str, fallback_version: str
):
db_dir_path = pathlib.Path(os.path.dirname(db_file_path))
backup_path = db_dir_path / f"{fallback_version}.db"
if not os.path.isfile(backup_path):
raise RuntimeError(
f"Cannot fall back to revision {fallback_version}, "
f"no back up exists. Current revision: {current_revision}"
)
# backup the current DB
current_backup_path = db_dir_path / f"{current_revision}.db"
shutil.copy2(db_file_path, current_backup_path)
shutil.copy2(backup_path, db_file_path)
def _save_output(self, text: str, *_):
self._alembic_output += f"{text}\n"
def _flush_output(self):
self._alembic_output = ""
|
# -*- coding: utf8 - *-
"""
Utility and helper methods for cihai.
"""
from __future__ import absolute_import, print_function, unicode_literals
import sys
from . import exc
from ._compat import collections_abc, reraise
def merge_dict(base, additional):
"""
Combine two dictionary-like objects.
Notes
-----
Code from https://github.com/pypa/warehouse
Copyright 2013 Donald Stufft
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
if base is None:
return additional
if additional is None:
return base
if not (
isinstance(base, collections_abc.Mapping)
and isinstance(additional, collections_abc.Mapping)
):
return additional
merged = base
for key, value in additional.items():
if isinstance(value, collections_abc.Mapping):
merged[key] = merge_dict(merged.get(key), value)
else:
merged[key] = value
return merged
def supports_wide():
"""Return affirmative if python interpreter supports wide characters.
Returns
-------
bool :
True if python supports wide character sets
"""
return sys.maxunicode > 0xFFFF
def import_string(import_name, silent=False): # NOQA: C901
"""
Imports an object based on a string.
This is useful if you want to use import paths as endpoints or
something similar. An import path can be specified either in dotted
notation (``xml.sax.saxutils.escape``) or with a colon as object
delimiter (``xml.sax.saxutils:escape``).
If `silent` is True the return value will be `None` if the import fails.
Parameters
----------
import_name : string
the dotted name for the object to import.
silent : bool
if set to `True` import errors are ignored and `None` is returned instead.
Returns
-------
imported object
Raises
------
cihai.exc.ImportStringError (ImportError, cihai.exc.CihaiException)
Notes
-----
This is from werkzeug.utils c769200 on May 23, LICENSE BSD.
https://github.com/pallets/werkzeug
Changes:
- Exception raised is cihai.exc.ImportStringError
- Add NOQA C901 to avoid complexity lint
- Format with black
"""
# force the import name to automatically convert to strings
# __import__ is not able to handle unicode strings in the fromlist
# if the module is a package
import_name = str(import_name).replace(':', '.')
try:
try:
__import__(import_name)
except ImportError:
if '.' not in import_name:
raise
else:
return sys.modules[import_name]
module_name, obj_name = import_name.rsplit('.', 1)
try:
module = __import__(module_name, None, None, [obj_name])
except ImportError:
# support importing modules not yet set up by the parent module
# (or package for that matter)
module = import_string(module_name)
try:
return getattr(module, obj_name)
except AttributeError as e:
raise ImportError(e)
except ImportError as e:
if not silent:
reraise(
exc.ImportStringError,
exc.ImportStringError(import_name, e),
sys.exc_info()[2],
)
|
from pydub import AudioSegment
import json
import re
timing_src = [
"./inputs/timing/04-JHN-01-timing.txt",
"./inputs/timing/04-JHN-02-timing.txt",
"./inputs/timing/04-JHN-03-timing.txt",
"./inputs/timing/04-JHN-04-timing.txt",
"./inputs/timing/04-JHN-05-timing.txt",
"./inputs/timing/04-JHN-06-timing.txt",
"./inputs/timing/04-JHN-07-timing.txt",
"./inputs/timing/04-JHN-08-timing.txt",
"./inputs/timing/04-JHN-09-timing.txt",
"./inputs/timing/04-JHN-10-timing.txt",
"./inputs/timing/04-JHN-11-timing.txt",
"./inputs/timing/04-JHN-12-timing.txt",
"./inputs/timing/04-JHN-13-timing.txt",
"./inputs/timing/04-JHN-14-timing.txt",
"./inputs/timing/04-JHN-15-timing.txt",
"./inputs/timing/04-JHN-16-timing.txt",
"./inputs/timing/04-JHN-17-timing.txt",
"./inputs/timing/04-JHN-18-timing.txt",
"./inputs/timing/04-JHN-19-timing.txt",
"./inputs/timing/04-JHN-20-timing.txt",
"./inputs/timing/04-JHN-21-timing.txt"
]
audio_src = [
"./inputs/mp3/44-JHNgul-01.mp3",
"./inputs/mp3/44-JHNgul-02.mp3",
"./inputs/mp3/44-JHNgul-03.mp3",
"./inputs/mp3/44-JHNgul-04.mp3",
"./inputs/mp3/44-JHNgul-05.mp3",
"./inputs/mp3/44-JHNgul-06.mp3",
"./inputs/mp3/44-JHNgul-07.mp3",
"./inputs/mp3/44-JHNgul-08.mp3",
"./inputs/mp3/44-JHNgul-09.mp3",
"./inputs/mp3/44-JHNgul-10.mp3",
"./inputs/mp3/44-JHNgul-11.mp3",
"./inputs/mp3/44-JHNgul-12.mp3",
"./inputs/mp3/44-JHNgul-13.mp3",
"./inputs/mp3/44-JHNgul-14.mp3",
"./inputs/mp3/44-JHNgul-15.mp3",
"./inputs/mp3/44-JHNgul-16.mp3",
"./inputs/mp3/44-JHNgul-17.mp3",
"./inputs/mp3/44-JHNgul-18.mp3",
"./inputs/mp3/44-JHNgul-19.mp3",
"./inputs/mp3/44-JHNgul-20.mp3",
"./inputs/mp3/44-JHNgul-21.mp3"
]
def get_audio(i):
if not audio[i]:
audio[i] = AudioSegment.from_mp3(audio_src[i])
return audio[i]
pages_src = "./inputs/story_data.json"
timing_raw = []
pages_raw = ""
audio = [None for _ in audio_src]
for t in timing_src:
with open(t) as file:
timing_raw.append(file.read())
#for a in audio_src:
# audio.append(AudioSegment.from_mp3(a))
with open(pages_src) as file:
pages_raw = file.read()
segments = [None] * len(timing_raw)
def get_segment(i):
if not segments[i]:
segments[i] = []
raw = timing_raw[i]
raw = raw.replace('\ufeff','')
raw = raw.strip()
timings = raw.split("\n")
timings = [x.split("\t") for x in timings]
timings = [(float(x[0]), float(x[1]), x[2]) for x in timings]
timings = [(int(x[0] * 1000), int(x[1] * 1000), x[2]) for x in timings]
timings = [x for x in timings if x[2][0].isdigit()]
#print(timings)
timings2 = []
curr_verse = 0
curr_start = 0
curr_end = 0
for x in timings:
verse = int(re.match(r"[0-9]+",x[2]).group(0))
if verse != curr_verse:
timings2.append((curr_start,curr_end,curr_verse))
curr_verse = verse
curr_start = x[0]
curr_end = x[1]
timings2.append((curr_start,curr_end,curr_verse))
timings = timings2[1:]
for t in timings:
#print(t)
start = t[0]
end = t[1]
seg = get_audio(i)[start:end]
segments[i].append(seg)
return segments[i]
# assumes that start and end are in the same book
def get_seg(ref_start,ref_end):
seg = AudioSegment.empty()
m_start = re.match(r"([0-9]+):([0-9]+)",ref_start)
m_end = re.match(r"([0-9]+):([0-9]+)",ref_end)
book = int(m_start.group(1))
verse_start = int(m_start.group(2))
verse_end = int(m_end.group(2))
#print(book,verse_start,verse_end)
for verse in range(verse_start,verse_end+1):
seg += get_segment(book-1)[verse-1]
return seg
def format_book_title(t):
return re.sub(r"[ -]",'_',t)
# produce audio files for a story
def segment_story(story):
durations = [] # in millis
filenames = []
for p in story["pages"]:
seg = get_seg(p["ref_start"],p["ref_end"])
filename = "./outputs/{0}_{1:02d}.mp3".format(format_book_title(story["title"]),p["page"])
file = open(filename,"w+")
file.write(' ')
file.close()
seg.export(filename, format="mp3")
print(filename)
durations.append(len(seg))
filenames.append(filename)
return filenames, durations
if __name__ == "__main__":
stories = json.loads(pages_raw)
for story in stories["storyCollection"]:
segment_story(story["story"])
# print by verse
'''
for i in range(len(segments)):
for j in range(len(segments[i])):
#print(i,j)
filename = "./outputs/{0:02d}_{1:02d}.mp3".format(i+1,j+1)
file = open(filename,"w+")
file.write(' ')
file.close()
segments[i][j].export(filename, format="mp3")
print(filename)
'''
|
import info
class subinfo(info.infoclass):
def setDependencies(self):
self.runtimeDependencies["virtual/base"] = None
self.runtimeDependencies["libs/qt5/qtbase"] = None
self.runtimeDependencies["libs/openssl"] = None
self.runtimeDependencies["libs/cyrus-sasl"] = None
def setTargets(self):
self.description = "Qt Cryptographic Architecture (QCA)"
self.svnTargets["master"] = "https://anongit.kde.org/qca.git"
# latest stable version
self.defaultTarget = "2.3.3"
self.targets[self.defaultTarget] = f"https://download.kde.org/stable/qca/{self.defaultTarget}/qca-{self.defaultTarget}.tar.xz"
self.targetDigestUrls[self.defaultTarget] = f"https://download.kde.org/stable/qca/{self.defaultTarget}/qca-{self.defaultTarget}.tar.xz.sha256"
self.targetInstSrc[self.defaultTarget] = f"qca-{self.defaultTarget}"
self.patchToApply[self.defaultTarget] = [("msvc.diff", 1)]
self.patchLevel[self.defaultTarget] = 1
from Package.CMakePackageBase import *
class Package(CMakePackageBase):
def __init__(self, **args):
CMakePackageBase.__init__(self)
# the cmake config is not relocatable
self.subinfo.options.package.disableBinaryCache = True
# tests fail to build with missing openssl header
self.subinfo.options.configure.args = "-DBUILD_TESTS=OFF "
|
"""
WSGI config for dataflair project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/3.0/howto/deployment/wsgi/
"""
import os
from django.core.wsgi import get_wsgi_application
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'dataflair.settings')
application = get_wsgi_application()
|
from tictactoe.board import BoardAPI, XOBoard
from tictactoe.main import PlayerFcty, recap_game_stats
from tictactoe.player import PlayerAPI, HumanUI
__ALL__ = [PlayerAPI, PlayerFcty, HumanUI, BoardAPI, XOBoard, recap_game_stats]
|
import io
import re
import textwrap
from typing import List
import pytest
from pytest_console_scripts import ScriptRunner
SCRIPT_NAME = "zkeys"
SCRIPT_USAGE = f"usage: {SCRIPT_NAME} [-h] [--version]"
def test_prints_help(script_runner: ScriptRunner) -> None:
result = script_runner.run(SCRIPT_NAME, "-h")
assert result.success
assert result.stdout.startswith(SCRIPT_USAGE)
def test_prints_help_for_invalid_option(script_runner: ScriptRunner) -> None:
result = script_runner.run(SCRIPT_NAME, "-!")
assert not result.success
assert result.stderr.startswith(SCRIPT_USAGE)
def test_prints_version(script_runner: ScriptRunner) -> None:
result = script_runner.run(SCRIPT_NAME, "--version")
assert result.success
assert re.match(rf"{SCRIPT_NAME} \d+\.\d", result.stdout)
def test_prints_keybindings_from_zsh(script_runner: ScriptRunner) -> None:
result = script_runner.run(SCRIPT_NAME)
assert result.success
for line in result.stdout.splitlines():
assert re.match(r"(?#string)[M^]\S+(?#space) +(?#widget)[-_a-z]+", line)
SCRIPT_INPUT = r"""
bindkey "^@" set-mark-command
bindkey "^L" clear-screen
bindkey "^Q" push-line
bindkey "^X^U" undo
bindkey "^X?" _complete_debug
bindkey "^Xu" undo
bindkey "^[^L" clear-screen
bindkey "^[\"" quote-region
bindkey "^['" quote-line
bindkey "^[Q" push-line
bindkey "^[[A" up-line-or-history
bindkey "^[[B" down-line-or-history
bindkey "^[q" push-line
bindkey "^_" undo
bindkey "\M-Q" push-line
bindkey "\M-q" push-line
"""
@pytest.mark.parametrize(
"options,expected_output",
[
pytest.param(
[],
"""
^X? _complete_debug
^L clear-screen
^[^L clear-screen
^[[B down-line-or-history
^Q push-line
^[Q push-line
^[q push-line
M-Q push-line
M-q push-line
^[' quote-line
^[" quote-region
^@ set-mark-command
^_ undo
^Xu undo
^X^U undo
^[[A up-line-or-history
""",
id="sorted_by_widget",
),
pytest.param(
["-i"],
"""
^@ set-mark-command
^L clear-screen
^Q push-line
^_ undo
^[" quote-region
^[' quote-line
^[Q push-line
^[q push-line
^[^L clear-screen
M-Q push-line
M-q push-line
^X? _complete_debug
^Xu undo
^X^U undo
^[[A up-line-or-history
^[[B down-line-or-history
""",
id="sorted_by_string",
),
pytest.param(
["-w"],
"""
_complete_debug ^X?
clear-screen ^L ^[^L
down-line-or-history ^[[B
push-line ^Q ^[Q ^[q M-Q M-q
quote-line ^['
quote-region ^["
set-mark-command ^@
undo ^_ ^Xu ^X^U
up-line-or-history ^[[A
""",
id="grouped_by_widget",
),
pytest.param(
["-p"],
"""
^ @ L Q _
^[ " ' Q q
^[^ L
M- Q q
^X ? u
^X^ U
^[[ A B
""",
id="grouped_by_prefix",
),
],
)
def test_prints_keybindings_from_stdin(
options: List[str],
expected_output: str,
script_runner: ScriptRunner,
) -> None:
stdin = io.StringIO(textwrap.dedent(SCRIPT_INPUT))
result = script_runner.run(SCRIPT_NAME, *options, "-", stdin=stdin)
assert result.success
assert result.stdout.strip() == textwrap.dedent(expected_output).strip()
|
"""Module for parsing montly school collections data."""
from typing import ClassVar
import pandas as pd
import pdfplumber
from ...utils.misc import rename_tax_rows
from ...utils.pdf import extract_words, words_to_table
from .core import COLLECTION_TYPES, MonthlyCollectionsReport, get_column_names
class SchoolTaxCollections(MonthlyCollectionsReport): # type: ignore
"""
Monthly School District Collections Report.
Parameters
----------
month :
the calendar month number (starting at 1)
year :
the calendar year
"""
report_type: ClassVar[COLLECTION_TYPES] = "school"
@property
def legacy(self) -> bool:
"""Whether the format is the legacy or current version."""
return self.num_pages > 1
def extract(self) -> pd.DataFrame:
"""Internal function to parse the contents of a legacy PDF page."""
# Open the PDF document
with pdfplumber.open(self.path) as pdf:
# Loop over each page
out: list[pd.DataFrame] = []
for pg in pdf.pages:
# Extract the words
words = extract_words(
pg, keep_blank_chars=False, x_tolerance=1, y_tolerance=1
)
# Group the words into a table
data = words_to_table(
words,
text_tolerance_y=5,
text_tolerance_x=5,
column_tolerance=20,
min_col_sep=24,
row_header_tolerance=10,
)
# Skip the header (first five rows)
data = data.iloc[6:]
assert "REAL ESTATE" in data.iloc[0][0]
# # Remove first row of header if we need to
# for phrase in ["prelim", "final", "budget"]:
# sel = data[0].str.lower().str.startswith(phrase)
# data = data.loc[~sel]
# # Remove empty columns
# data = remove_empty_columns(data, use_nan=False)
# Check number of columns
if len(out):
if len(data.columns) != len(out[-1].columns):
raise ValueError("Column mismatch when parsing multiple pages")
# Save it
out.append(data)
# Return concatenation
return pd.concat(out, axis=0, ignore_index=True)
def transform(self, data: pd.DataFrame) -> pd.DataFrame:
"""Transform the raw parsing data into a clean data frame."""
# Call base transform
data = super().transform(data)
# Determine columns for the report
columns = get_column_names(self.month, self.year)
ncols = len(data.columns)
assert ncols in [11, 12, 14]
if ncols == 14:
data = data.drop(labels=[7, 8, 9, 10], axis=1)
else:
data = data.drop(labels=[data.columns[-6]], axis=1)
# Set the columns
columns = ["name"] + columns[-7:]
data = data[[0] + list(data.columns[-7:])]
assert len(columns) == len(data.columns)
assert len(data) in [14, 15]
# Do current/prior/total
if len(data) == 14:
index = rename_tax_rows(
data,
0,
["real_estate", "school_income", "use_and_occupancy", "liquor"],
)
else:
index = rename_tax_rows(
data,
0,
["real_estate"], # , "school_income", "use_and_occupancy", "liquor"],
)
if "PAYMENT" in data.loc[index, 0]:
data.loc[index, 0] = "pilots_total"
index += 1
index = rename_tax_rows(
data,
index,
["school_income", "use_and_occupancy", "liquor"],
)
if "PAYMENT" in data.loc[index, 0]:
data.loc[index, 0] = "pilots_total"
index += 1
# Other non-tax
data.loc[index, 0] = "other_nontax_total"
index += 1
# Total
data.loc[index, 0] = "total_revenue_total"
index += 1
# Set the columns
data.columns = columns
# Split out current/prior/total into its own column
data["kind"] = data["name"].apply(lambda x: x.split("_")[-1])
data["name"] = data["name"].apply(lambda x: "_".join(x.split("_")[:-1]))
return data
def validate(self, data: pd.DataFrame) -> bool:
"""Validate the input data."""
# Sum up
t = data.query("kind == 'total' and name != 'total_revenue'")
t = t.filter(regex=f"^{self.month_name}", axis=1)
# Compare to total
for col in t.columns:
total_revenue = data.query("name == 'total_revenue'")[col].squeeze()
diff = t[col].sum() - total_revenue
assert diff < 5
return True
def load(self, data: pd.DataFrame) -> None:
"""Load the data."""
# Get the path
dirname = self.get_data_directory("processed")
path = dirname / f"{self.year}-{self.month:02d}-tax.csv"
# Load
super()._load_csv_data(data, path)
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
time math calculation.
"""
from datetime import date, datetime, timedelta
try:
from .tz import utc, local
except: # pragma: no cover
from rolex.tz import utc, local
def to_ordinal(a_date):
"""
Calculate number of days from 0000-00-00.
"""
return a_date.toordinal()
def from_ordinal(days):
"""
Create a date object that number ``days`` of days after 0000-00-00.
"""
return date.fromordinal(days)
def to_utctimestamp(a_datetime):
"""
Calculate number of seconds from UTC 1970-01-01 00:00:00.
When:
- dt doesn't have tzinfo: assume it's a utc time.
- dt has tzinfo: use tzinfo.
WARNING, if your datetime object doens't have ``tzinfo``, make sure
it's a UTC time, but **NOT a LOCAL TIME**.
**中文文档**
计算时间戳, 若:
- 不带tzinfo: 则默认为是UTC time。
- 带tzinfo: 则使用tzinfo。
"""
if a_datetime.tzinfo is None:
delta = a_datetime - datetime(1970, 1, 1)
else:
delta = a_datetime - datetime(1970, 1, 1, tzinfo=utc)
return delta.total_seconds()
def from_utctimestamp(timestamp):
"""
Create a **datetime** object that number of seconds after
UTC 1970-01-01 00:00:00. If you want local time, use
:meth:`from_timestamp`
This method support negative timestamp.
:returns: non-timezone awared UTC datetime.
**中文文档**
返回一个在UTC 1970-01-01 00:00:00 之后 #timestamp 秒后的时间。默认为
UTC时间。即返回的datetime不带tzinfo
"""
return datetime(1970, 1, 1) + timedelta(seconds=timestamp)
def to_utc(a_datetime, keep_utc_tzinfo=False):
"""
Convert a time awared datetime to utc datetime.
:param a_datetime: a timezone awared datetime. (If not, then just returns)
:param keep_utc_tzinfo: whether to retain the utc time zone information.
**中文文档**
将一个带时区的时间转化成UTC时间。而对于UTC时间而言, 有没有时区信息都无所谓了。
"""
if a_datetime.tzinfo:
utc_datetime = a_datetime.astimezone(utc) # convert to utc time
if keep_utc_tzinfo is False:
utc_datetime = utc_datetime.replace(tzinfo=None)
return utc_datetime
else:
return a_datetime
def utc_to_tz(utc_datetime, tzinfo, keep_tzinfo=False):
"""
Convert a UTC datetime to a time awared local time
:param utc_datetime:
:param tzinfo:
:param keep_tzinfo:
"""
tz_awared_datetime = utc_datetime.replace(tzinfo=utc).astimezone(tzinfo)
if keep_tzinfo is False:
tz_awared_datetime = tz_awared_datetime.replace(tzinfo=None)
return tz_awared_datetime
def utc_to_local(utc_datetime, keep_tzinfo=False):
"""
Convert a UTC datetime to current machine local timezone datetime.
:param utc_datetime:
:param keep_tzinfo:
"""
return utc_to_tz(utc_datetime, local, keep_tzinfo)
def is_weekend(d_or_dt):
"""Check if a datetime is weekend.
"""
return d_or_dt.isoweekday() in [6, 7]
def is_weekday(d_or_dt):
"""Check if a datetime is weekday.
"""
return d_or_dt.isoweekday() not in [6, 7]
|
#!/usr/bin/env python2.4
"""
"""
def thousands_with_commas(i):
s = ''
while i > 999:
r = i % 1000
i = i // 1000
s = (",%03d" % r) + s
if i:
s = str(i) + s
print s
return s
if __name__ == '__main__':
assert thousands_with_commas(1001) == '1,001'
assert thousands_with_commas(1234) == '1,234'
assert thousands_with_commas(123456789) == '123,456,789'
assert thousands_with_commas(12) == '12'
assert thousands_with_commas(123) == '123'
assert thousands_with_commas(999) == '999'
assert thousands_with_commas(1000) == '1,000'
|
from django.contrib.auth import get_user_model
from django.urls import reverse
from django.test import TestCase
from rest_framework import status
from rest_framework.test import APIClient
from core.models import Ingredient, Recipe
from recipe.serializers import IngredientSerializer
INGREDIENTS_URL = reverse('recipe:ingredient-list')
class PublicIngredientAPITest(TestCase):
# Test the publicly available ingredient api
def setUp(self):
self.client = APIClient()
def test_login_required(self):
# Test that login is required always
res = self.client.get(INGREDIENTS_URL)
self.assertEqual(res.status_code, status.HTTP_401_UNAUTHORIZED)
class PrivateIngredientAPITest(TestCase):
# Test the private ingredients api
def setUp(self):
self.client = APIClient()
self.user = get_user_model().objects.create_user(
'root@root.com',
'Welcome1234'
)
self.client.force_authenticate(self.user)
def test_retrieve_ingredient_list(self):
# Test retrieving ingredients list
Ingredient.objects.create(user=self.user, name='Kale')
Ingredient.objects.create(user=self.user, name='Salt')
res = self.client.get(INGREDIENTS_URL)
ingredients = Ingredient.objects.all().order_by('-name')
serializer = IngredientSerializer(ingredients, many=True)
self.assertEqual(res.status_code, status.HTTP_200_OK)
self.assertEqual(res.data, serializer.data)
def test_ingredient_limited_to_user(self):
# Test that only ingredient for the authenticated user returned
user2 = get_user_model().objects.create_user(
'zane@darya.comDarya@2018',
'Welcome2431'
)
Ingredient.objects.create(user=user2, name='Vineger')
ingredient = Ingredient.objects.create(user=self.user, name='Tumeric')
res = self.client.get(INGREDIENTS_URL)
self.assertEqual(res.status_code, status.HTTP_200_OK)
self.assertEqual(len(res.data), 1)
self.assertEqual(res.data[0]['name'], ingredient.name)
def test_create_ingredient_successfully(self):
# Test creating a new ingredient
payload = {'name': 'Test ingredient'}
self.client.post(INGREDIENTS_URL, payload)
exists = Ingredient.objects.filter(
user=self.user,
name=payload['name']
).exists()
self.assertTrue(exists)
def test_create_ingredient_invalid(self):
# Creating a tag with invalid payload
payload = {'name': ''}
res = self.client.post(INGREDIENTS_URL, payload)
self.assertEqual(res.status_code, status.HTTP_400_BAD_REQUEST)
def test_ingredient_assigned_to_recipes(self):
# Test filtering ingredients assigned to recipes
ingredient1 = Ingredient.objects.create(
user=self.user, name='Apple'
)
ingredient2 = Ingredient.objects.create(
user=self.user, name='Turkey'
)
recipe = Recipe.objects.create(
title='Apple crumble',
time_minutes=5,
price=10.00,
user=self.user
)
recipe.ingredients.add(ingredient1)
res = self.client.get(INGREDIENTS_URL, {'assigned_only': 1})
serializer1 = IngredientSerializer(ingredient1)
serializer2 = IngredientSerializer(ingredient2)
self.assertIn(serializer1.data, res.data)
self.assertNotIn(serializer2.data, res.data)
def test_retrieve_ingredients_assigned_unique(self):
""" Test filtering ingredients by assigned returns unique """
ingredient = Ingredient.objects.create(user=self.user, name='Eggs')
Ingredient.objects.create(user=self.user, name='Cheese')
recipe1 = Recipe.objects.create(
title='Eggs benedict',
time_minutes=30,
price=12.00,
user=self.user
)
recipe1.ingredients.add(ingredient)
recipe2 = Recipe.objects.create(
title='Coriander eggs on toast',
time_minutes=20,
price=5.00,
user=self.user
)
recipe2.ingredients.add(ingredient)
res = self.client.get(INGREDIENTS_URL, {'assigned_only': 1})
self.assertEqual(len(res.data), 1)
|
#
# Solution to Project Euler problem 231
# Copyright (c) Project Nayuki. All rights reserved.
#
# https://www.nayuki.io/page/project-euler-solutions
# https://github.com/nayuki/Project-Euler-solutions
#
import eulerlib
def compute():
N = 20000000
K = 15000000
smallestprimefactor = eulerlib.list_smallest_prime_factors(N)
def factorial_prime_factor_sum(n):
result = 0
for i in range(n + 1):
j = i
while j > 1:
p = smallestprimefactor[j]
result += p
j //= p
return result
ans = factorial_prime_factor_sum(N) - factorial_prime_factor_sum(K) - factorial_prime_factor_sum(N - K)
return str(ans)
if __name__ == "__main__":
print(compute())
|
class Solution:
def removeInterval(self, intervals: List[List[int]], toBeRemoved: List[int]) -> List[List[int]]:
res = []
x, y = toBeRemoved
for interval in intervals:
l, h = interval
if l <= x <= h <= y:
res.append([l, x])
elif x <= l <= y <= h:
res.append([y, h])
elif l <= x <= y <= h:
res.append([l, x])
res.append([y, h])
elif x <= l <= h <= y:
continue
else:
res.append([l, h])
return res
|
VGG_MEAN = [103.939, 116.779, 123.68]
|
import json
import boto3
from datetime import datetime
import ast
class S3DAO:
USERS_DISHES_S3_KEY = 'users-dishes'
PAGE_CONTENT_S3_KEY = 'page-content'
BRINE_DATA_BUCKET_NAME = 'brine-data'
def __init__(self):
self.s3_client = None
self.users_dishes_last_modified = None
self.page_content_last_modified = None
self.users_dishes = None
self.pages_content = None
def __getS3Client(self):
if self.s3_client is None:
self.s3_client = boto3.client('s3')
return self.s3_client
# GETTERS AND SETTERS FOR USERS DISHES
def __getUsersDishesFromS3(self):
if self.users_dishes is None:
s3 = self.__getS3Client()
s3Object = s3.get_object(Bucket = self.BRINE_DATA_BUCKET_NAME, Key = self.USERS_DISHES_S3_KEY)
users_dishes_str = s3Object['Body'].read().decode("UTF-8")
self.users_dishes = ast.literal_eval(users_dishes_str)
self.users_dishes_last_modified = self.__getTimeStampOfLastUpdateFromS3(self.USERS_DISHES_S3_KEY,s3Object)
return self.users_dishes
def __getTimeStampOfLastUpdateFromS3(self,bucketKeyName: str, s3Object = None):
if s3Object is None:
s3 = self.__getS3Client()
s3Object = s3.get_object(Bucket = self.BRINE_DATA_BUCKET_NAME, Key = bucketKeyName)
timestamp = self.__getTimeStampFromS3Object(s3Object)
return timestamp
def __saveUsersDishesToS3(self,data: dict):
s3 = self.__getS3Client()
data_as_json = json.dumps(data)
s3.put_object(Bucket = self.BRINE_DATA_BUCKET_NAME, Key = self.USERS_DISHES_S3_KEY, Body = data_as_json)
def __getTimeStampFromS3Object(self,s3Object):
timeAsString = s3Object['ResponseMetadata']['HTTPHeaders']['last-modified']
timeAsDt = datetime.strptime(timeAsString[:-4],"%a, %d %b %Y %H:%M:%S")
return timeAsDt
def __isUserInDb(self,userId:str):
usersDishes = self.__getUsersDishesFromS3()
for id in usersDishes:
if id == userId:
return True
return False
def __createNewUserInUsersDishes(self,userId:str):
self.users_dishes[userId] = []
def addDishToUsersDishes(self,userId: str, dishId:str):
usersDishes = self.__getUsersDishesFromS3()
isUserInDB = self.__isUserInDb(userId)
if not isUserInDB:
self.__createNewUserInUsersDishes(userId)
usersDishes[userId].append(dishId)
def deleteDishFromUsersDishes(self,userId: str, dishId: str):
isDishNew = self.isDishNew(userId,dishId)
if isDishNew:
return
self.users_dishes[userId].remove(dishId)
def isDishNew(self,userId: str, dishId: str):
userDishes = self.__getUsersDishesFromS3()[userId]
if dishId in userDishes:
return False
return True
def saveUsersDishesToS3(self, userId:str):
latest_update_timestamp = self.__getTimeStampOfLastUpdateFromS3(self.USERS_DISHES_S3_KEY)
usersDishesToSave = dict(self.users_dishes)
if latest_update_timestamp < self.users_dishes_last_modified:
usersDishesToSave = self.__getUsersDishesFromS3()
usersDishesToSave[userId] = self.users_dishes[userId]
print('users dishes to save')
print(usersDishesToSave)
self.__saveUsersDishesToS3(usersDishesToSave)
# GETTERS AND SETTERS FOR PAGE CONTENT
def getPageContentFromS3(self):
return
def parsePath(path:str):
firstSlash = path.find("/",1)
secondSlash = path.find("/",firstSlash+1)
userId = path[1:firstSlash]
dishId = path[firstSlash+1:secondSlash]
dataType = path[secondSlash+1:]
if secondSlash == -1:
dishId = path[firstSlash+1:]
dataType = None
return ParsedPath(userId,dishId,dataType)
def parseFunctions(functions:dict):
out = ""
ingredientEmoji = "🌶️"
beverageEmoji = "🍹"
dessertEmoji = "🍩"
proteinEmoji = "🍖"
starchEmoji = "🥖"
vegetableEmoji = "🥦"
dipEmoji = "🥣"
dressingEmoji = "🥗"
commaSpace = ", "
if functions["beverage"]:
out += (beverageEmoji + " Beverage" + commaSpace)
if functions["dessert"]:
out += (dessertEmoji + " Dessert" + commaSpace)
if functions["dip"]:
out += (dipEmoji + " Dip" + commaSpace)
if functions["dressing"]:
out += (dressingEmoji + " Dressing" + commaSpace)
if functions["ingredient"]:
out += (ingredientEmoji + " Ingredient" + commaSpace)
if functions["protein"]:
out += (proteinEmoji + " Protein" + commaSpace)
if functions["starch"]:
out += (starchEmoji + " Starch" + commaSpace)
if functions["veg"]:
out += (vegetableEmoji + " Vegetable" + commaSpace)
return out[:-2]
class ParsedPath:
def __init__(self, userId: str, dishId: int, dataType: str):
self.userId = userId
self.dishId = dishId
self.dataType = dataType
class PageContent:
def __init__(self, userId: str, dishId: str, title: str, description = "", images = [], content = ""):
self.userId = userId
self.dishId = dishId
self.title = title
self.description = description
self.images = images
self.content = content
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import datetime
import json
import sys
import traceback
from getopt import getopt, GetoptError
from multiprocessing import Process
from wsgiref.simple_server import make_server
from JobBrowserBFF.jsonrpcbase import JSONRPCService, InvalidParamsError, KeywordError, \
JSONRPCError, InvalidRequestError
from JobBrowserBFF.jsonrpcbase import ServerError as JSONServerError
from biokbase import log
from JobBrowserBFF.authclient import KBaseAuth as _KBaseAuth
from JobBrowserBFF.Config import Config
from JobBrowserBFF.JobBrowserBFFImpl import JobBrowserBFF # noqa @IgnorePep8
from JobBrowserBFF.constants import AUTH
config = Config()
impl_JobBrowserBFF = JobBrowserBFF(config.get_config())
class JSONObjectEncoder(json.JSONEncoder):
def default(self, obj):
if isinstance(obj, set):
return list(obj)
if isinstance(obj, frozenset):
return list(obj)
if hasattr(obj, 'toJSONable'):
return obj.toJSONable()
return json.JSONEncoder.default(self, obj)
class JSONRPCServiceCustom(JSONRPCService):
def call(self, ctx, jsondata):
"""
Calls jsonrpc service's method and returns its return value in a JSON
string or None if there is none.
Arguments:
jsondata -- remote method call in jsonrpc format
"""
result = self.call_py(ctx, jsondata)
if result is not None:
return json.dumps(result, cls=JSONObjectEncoder)
return None
def _call_method(self, ctx, request):
"""Calls given method with given params and returns it value."""
method = self.method_data[request['method']]['method']
params = request.get('params', None)
result = None
try:
if isinstance(params, list):
# Does it have enough arguments?
if len(params) < self._man_args(method) - 1:
raise InvalidParamsError(data={'issue': 'not enough arguments'})
# Does it have too many arguments?
if(not self._vargs(method) and len(params) >
self._max_args(method) - 1):
raise InvalidParamsError(data={'issue': 'too many arguments'})
result = method(ctx, *params)
elif isinstance(params, dict):
# Do not accept keyword arguments if the jsonrpc version is
# not >=2.0
if request['jsonrpc'] < 20:
raise KeywordError
result = method(ctx, params)
else: # No params
result = method(ctx)
except JSONRPCError:
raise
except Exception as e:
# log.exception('method %s threw an exception' % request['method'])
# Exception was raised inside the method.
newerr = JSONServerError()
newerr.trace = traceback.format_exc()
if len(e.args) == 1:
newerr.data = repr(e.args[0])
else:
newerr.data = repr(e.args)
raise newerr
return result
def call_py(self, ctx, jsondata):
"""
Calls jsonrpc service's method and returns its return value in python
object format or None if there is none.
This method is same as call() except the return value is a python
object instead of JSON string. This method is mainly only useful for
debugging purposes.
"""
rdata = jsondata
# we already deserialize the json string earlier in the server code, no
# need to do it again
# try:
# rdata = json.loads(jsondata)
# except ValueError:
# raise ParseError
# set some default values for error handling
request = self._get_default_vals()
if isinstance(rdata, dict) and rdata:
# It's a single request.
self._fill_request(request, rdata)
respond = self._handle_request(ctx, request)
# Don't respond to notifications
if respond is None:
return None
return respond
elif isinstance(rdata, list) and rdata:
# It's a batch.
requests = []
responds = []
for rdata_ in rdata:
# set some default values for error handling
request_ = self._get_default_vals()
self._fill_request(request_, rdata_)
requests.append(request_)
for request_ in requests:
respond = self._handle_request(ctx, request_)
# Don't respond to notifications
if respond is not None:
responds.append(respond)
if responds:
return responds
# Nothing to respond.
return None
else:
# empty dict, list or wrong type
raise InvalidRequestError
def _handle_request(self, ctx, request):
"""Handles given request and returns its response."""
if 'types' in self.method_data[request['method']]:
self._validate_params_types(request['method'], request['params'])
result = self._call_method(ctx, request)
# Do not respond to notifications.
if request['id'] is None:
return None
respond = {}
self._fill_ver(request['jsonrpc'], respond)
respond['result'] = result
respond['id'] = request['id']
return respond
class MethodContext(dict):
def __init__(self, logger):
self['client_ip'] = None
self['user_id'] = None
self['authenticated'] = None
self['token'] = None
self['module'] = None
self['method'] = None
self['call_id'] = None
self['rpc_context'] = None
self._debug_levels = set([7, 8, 9, 'DEBUG', 'DEBUG2', 'DEBUG3'])
self._logger = logger
def log_err(self, message):
self._log(log.ERR, message)
def log_info(self, message):
self._log(log.INFO, message)
def log_debug(self, message, level=1):
if level in self._debug_levels:
pass
else:
level = int(level)
if level < 1 or level > 3:
raise ValueError("Illegal log level: " + str(level))
level = level + 6
self._log(level, message)
def set_log_level(self, level):
self._logger.set_log_level(level)
def get_log_level(self):
return self._logger.get_log_level()
def clear_log_level(self):
self._logger.clear_user_log_level()
def _log(self, level, message):
self._logger.log_message(level, message, self['client_ip'],
self['user_id'], self['module'],
self['method'], self['call_id'])
class ServerError(Exception):
'''
The call returned an error. Fields:
name - the name of the error.
code - the error code.
message - a human readable error message.
data - the server side stacktrace.
'''
def __init__(self, name, code, message, data=None, error=None):
super(Exception, self).__init__(message)
self.name = name
self.code = code
self.message = message if message else ''
if data:
self.data = data
elif error:
self.data = {
'error': error
}
def __str__(self):
return self.name + ': ' + str(self.code) + '. ' + self.message + \
'\n' + self.data
def getIPAddress(environ):
xFF = environ.get('HTTP_X_FORWARDED_FOR')
realIP = environ.get('HTTP_X_REAL_IP')
trustXHeaders = config.test('dont_trust_x_ip_headers', 'true', True)
trustXHeaders = config is None or \
config.get('dont_trust_x_ip_headers') != 'true'
if (trustXHeaders):
if (xFF):
return xFF.split(',')[0].strip()
if (realIP):
return realIP.strip()
return environ.get('REMOTE_ADDR')
class Application(object):
# Wrap the wsgi handler in a class definition so that we can
# do some initialization and avoid regenerating stuff over
# and over
def logcallback(self):
self.serverlog.set_log_file(self.userlog.get_log_file())
def log(self, level, context, message):
self.serverlog.log_message(level, message, context['client_ip'],
context['user_id'], context['module'],
context['method'], context['call_id'])
def __init__(self):
submod = config.get_service_name() or 'JobBrowserBFF'
self.userlog = log.log(
submod, ip_address=True, authuser=True, module=True, method=True,
call_id=True, changecallback=self.logcallback,
config=config.get_config_file())
self.serverlog = log.log(
submod, ip_address=True, authuser=True, module=True, method=True,
call_id=True, logfile=self.userlog.get_log_file())
self.serverlog.set_log_level(6)
self.rpc_service = JSONRPCServiceCustom()
self.method_authentication = dict()
self.rpc_service.add(impl_JobBrowserBFF.get_jobs,
name='JobBrowserBFF.get_jobs')
self.method_authentication['JobBrowserBFF.get_jobs'] = 'required' # noqa
self.rpc_service.add(impl_JobBrowserBFF.query_jobs,
name='JobBrowserBFF.query_jobs')
self.method_authentication['JobBrowserBFF.query_jobs'] = 'required' # noqa
self.rpc_service.add(impl_JobBrowserBFF.get_job_log,
name='JobBrowserBFF.get_job_log')
self.method_authentication['JobBrowserBFF.get_job_log'] = 'required' # noqa
self.rpc_service.add(impl_JobBrowserBFF.cancel_job,
name='JobBrowserBFF.cancel_job')
self.method_authentication['JobBrowserBFF.cancel_job'] = 'required' # noqa
self.rpc_service.add(impl_JobBrowserBFF.get_job_types,
name='JobBrowserBFF.get_job_types')
self.method_authentication['JobBrowserBFF.get_job_types'] = 'required' # noqa
self.rpc_service.add(impl_JobBrowserBFF.get_job_states,
name='JobBrowserBFF.get_job_states')
self.method_authentication['JobBrowserBFF.get_job_states'] = 'required' # noqa
self.rpc_service.add(impl_JobBrowserBFF.get_client_groups,
name='JobBrowserBFF.get_client_groups')
self.method_authentication['JobBrowserBFF.get_client_groups'] = 'required' # noqa
self.rpc_service.add(impl_JobBrowserBFF.get_searchable_job_fields,
name='JobBrowserBFF.get_searchable_job_fields')
self.method_authentication['JobBrowserBFF.get_searchable_job_fields'] = 'required' # noqa
self.rpc_service.add(impl_JobBrowserBFF.get_sort_specs,
name='JobBrowserBFF.get_sort_specs')
self.method_authentication['JobBrowserBFF.get_sort_specs'] = 'required' # noqa
self.rpc_service.add(impl_JobBrowserBFF.get_log_levels,
name='JobBrowserBFF.get_log_levels')
self.method_authentication['JobBrowserBFF.get_log_levels'] = 'required' # noqa
self.rpc_service.add(impl_JobBrowserBFF.is_admin,
name='JobBrowserBFF.is_admin')
self.method_authentication['JobBrowserBFF.is_admin'] = 'required' # noqa
self.rpc_service.add(impl_JobBrowserBFF.status,
name='JobBrowserBFF.status')
authurl = config.get(AUTH) if config else None
self.auth_client = _KBaseAuth(authurl)
def handle_call(self, environ, ctx):
try:
body_size = int(environ.get('CONTENT_LENGTH', 0))
except ValueError:
body_size = 0
if environ['REQUEST_METHOD'] == 'OPTIONS':
return '', None
if body_size == 0:
return None, self.process_error({
'error': {
'code': -32700,
'message': "Parse error - no request data",
}
}, ctx)
try:
request_body = environ['wsgi.input'].read(body_size)
except Exception as e:
return None, self.process_error({
'error': {
'code': -32603,
'message': 'Internal error',
'data': {
'exception': str(e)
}
}
}, ctx)
try:
request = json.loads(request_body)
except ValueError as ve:
return None, self.process_error({
'error': {
'code': -32700,
'message': "Parse error - parsing the request as json",
'data': {
'exception': str(ve)
}
}
}, ctx)
ctx['module'], ctx['method'] = request['method'].split('.')
ctx['call_id'] = request['id']
ctx['rpc_context'] = {
'call_stack': [{
'time': self.now_in_utc(),
'method': request['method']
}]
}
# Provenance not used in jsonrpc calls as far as I can tell.
# Perhaps propagated to any local methods called? But I don't think any
# pure sdk dynamic services use local methods, so...
# TODO: ensure that prov_action cannot be used, before removing entirely.
# prov_action = {
# 'service': ctx['module'],
# 'method': ctx['method'],
# 'method_params': request['params']
# }
try:
token = environ.get('HTTP_AUTHORIZATION')
# Enforce authentication requirement.
method_name = request['method']
auth_req = self.method_authentication.get(method_name, 'none')
if auth_req != 'none':
if token is None and auth_req == 'required':
# TODO: replace with actual error return
err = JSONServerError()
err.data = (
'Authentication required for ' +
'JobBrowserBFF ' +
'but no authentication header was passed')
raise err
elif token is None and auth_req == 'optional':
pass
else:
try:
user = self.auth_client.get_user(token)
ctx['user_id'] = user
ctx['authenticated'] = 1
ctx['token'] = token
except Exception as e:
if auth_req == 'required':
err = JSONServerError()
err.data = \
"Token validation failed: %s" % e
raise err
if (environ.get('HTTP_X_FORWARDED_FOR')):
self.log(log.INFO, ctx, 'X-Forwarded-For: ' + environ.get('HTTP_X_FORWARDED_FOR'))
self.log(log.INFO, ctx, 'start method')
rpc_result = self.rpc_service.call(ctx, request)
self.log(log.INFO, ctx, 'end method')
if not rpc_result:
return None, self.process_error({
'error': {
'code': -32001,
'message': 'Empty response from method'
}
}, ctx, request)
# TADA!
return rpc_result, None
except JSONRPCError as jre:
response = {
'error': {
'code': jre.code,
'message': jre.message,
'data': jre.data
}
}
trace = jre.trace if hasattr(jre, 'trace') else None
return None, self.process_error(response, ctx, request, trace)
except Exception as ex:
trace = ex.trace if hasattr(ex, 'trace') else None
response = {
'error': {
'code': -32000,
'message': 'Unexpected Server Error'
}
}
return None, self.process_error(response, ctx, request, trace)
def __call__(self, environ, start_response):
ctx = MethodContext(self.userlog)
ctx['client_ip'] = getIPAddress(environ)
result, error = self.handle_call(environ, ctx)
if result is not None:
response_body = result
status = '200 OK'
else:
response_body = error
status = '200 OK'
# TODO: CORS should not be handled here! That should be an administrative
# decision at the proxy level.
response_headers = [
('Access-Control-Allow-Origin', '*'),
('Access-Control-Allow-Headers', environ.get(
'HTTP_ACCESS_CONTROL_REQUEST_HEADERS', 'authorization')),
('content-type', 'application/json'),
('content-length', str(len(response_body)))]
start_response(status, response_headers)
return [response_body.encode('utf8')]
def process_error(self, response, context, request=None, trace=None):
response['jsonrpc'] = '2.0'
# If trace provided, log it.
if trace:
trace = trace.split('\n')[0:-1]
self.log(log.ERR, context, trace)
# TODO: trace down how 'data' is getting corrupted as a string...
if 'data' not in response['error']:
response['error']['data'] = {}
elif not isinstance(response['error']['data'], dict):
response['error']['data'] = {
'message': response['error']['data']
}
response['error']['data']['trace'] = trace
# Errors early in the processing phase before a valid request is
# available will not have a jsonrpc request available.
if request is None:
response['id'] = None
else:
response['id'] = request['id']
return json.dumps(response)
def now_in_utc(self):
# noqa Taken from http://stackoverflow.com/questions/3401428/how-to-get-an-isoformat-datetime-string-including-the-default-timezone @IgnorePep8
dtnow = datetime.datetime.now()
dtutcnow = datetime.datetime.utcnow()
delta = dtnow - dtutcnow
hh, mm = divmod((delta.days * 24 * 60 * 60 + delta.seconds + 30) // 60,
60)
return "%s%+02d:%02d" % (dtnow.isoformat(), hh, mm)
application = Application()
# This is the uwsgi application dictionary. On startup uwsgi will look
# for this dict and pull its configuration from here.
# This simply lists where to "mount" the application in the URL path
#
# This uwsgi module "magically" appears when running the app within
# uwsgi and is not available otherwise, so wrap an exception handler
# around it
#
# To run this server in uwsgi with 4 workers listening on port 9999 use:
# uwsgi -M -p 4 --http :9999 --wsgi-file _this_file_
# To run a using the single threaded python BaseHTTP service
# listening on port 9999 by default execute this file
#
try:
import uwsgi
# Before we do anything with the application, see if the
# configs specify patching all std routines to be asynch
# *ONLY* use this if you are going to wrap the service in
# a wsgi container that has enabled gevent, such as
# uwsgi with the --gevent option
if config is not None and config.get('gevent_monkeypatch_all', False):
print("Monkeypatching std libraries for async")
from gevent import monkey
monkey.patch_all()
uwsgi.applications = {'': application}
except ImportError:
# Not available outside of wsgi, ignore
pass
_proc = None
def start_server(host='localhost', port=0, newprocess=False):
'''
By default, will start the server on localhost on a system assigned port
in the main thread. Execution of the main thread will stay in the server
main loop until interrupted. To run the server in a separate process, and
thus allow the stop_server method to be called, set newprocess = True. This
will also allow returning of the port number.'''
global _proc
if _proc:
raise RuntimeError('server is already running')
httpd = make_server(host, port, application)
port = httpd.server_address[1]
print("Listening on port %s" % port)
if newprocess:
_proc = Process(target=httpd.serve_forever)
_proc.daemon = True
_proc.start()
else:
httpd.serve_forever()
return port
def stop_server():
global _proc
_proc.terminate()
_proc = None
if __name__ == "__main__":
try:
opts, args = getopt(sys.argv[1:], "", ["port=", "host="])
except GetoptError as err:
# print help information and exit:
print(str(err)) # will print something like "option -a not recognized"
sys.exit(2)
port = 9999
host = 'localhost'
for o, a in opts:
if o == '--port':
port = int(a)
elif o == '--host':
host = a
print("Host set to %s" % host)
else:
assert False, "unhandled option"
start_server(host=host, port=port)
|
# -*- coding: utf-8 -*-
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
#
import json
import unittest
from copy import deepcopy
from parameterized import parameterized
from airflow import AirflowException
from airflow.gcp.hooks.cloud_storage_transfer_service import (
DESCRIPTION, FILTER_JOB_NAMES, FILTER_PROJECT_ID, METADATA, OPERATIONS, PROJECT_ID, STATUS,
TIME_TO_SLEEP_IN_SECONDS, TRANSFER_JOB, TRANSFER_JOB_FIELD_MASK, TRANSFER_JOBS, GcpTransferJobsStatus,
GcpTransferOperationStatus, GCPTransferServiceHook,
)
from tests.compat import PropertyMock, mock
from tests.gcp.utils.base_gcp_mock import (
GCP_PROJECT_ID_HOOK_UNIT_TEST, mock_base_gcp_hook_default_project_id,
mock_base_gcp_hook_no_default_project_id,
)
NAME = "name"
TEST_PROJECT_ID = 'project-id'
TEST_BODY = {DESCRIPTION: 'AAA', PROJECT_ID: TEST_PROJECT_ID}
TEST_TRANSFER_JOB_NAME = "transfer-job"
TEST_TRANSFER_OPERATION_NAME = "transfer-operation"
TEST_TRANSFER_JOB = {NAME: TEST_TRANSFER_JOB_NAME}
TEST_TRANSFER_OPERATION = {NAME: TEST_TRANSFER_OPERATION_NAME}
TEST_TRANSFER_JOB_FILTER = {FILTER_PROJECT_ID: TEST_PROJECT_ID, FILTER_JOB_NAMES: [TEST_TRANSFER_JOB_NAME]}
TEST_TRANSFER_OPERATION_FILTER = {
FILTER_PROJECT_ID: TEST_PROJECT_ID,
FILTER_JOB_NAMES: [TEST_TRANSFER_JOB_NAME],
}
TEST_UPDATE_TRANSFER_JOB_BODY = {
TRANSFER_JOB: {DESCRIPTION: 'description-1'},
PROJECT_ID: TEST_PROJECT_ID,
TRANSFER_JOB_FIELD_MASK: 'description',
}
def _without_key(body, key):
obj = deepcopy(body)
del obj[key]
return obj
class TestGCPTransferServiceHookWithPassedProjectId(unittest.TestCase):
def setUp(self):
with mock.patch(
'airflow.gcp.hooks.base.GoogleCloudBaseHook.__init__',
new=mock_base_gcp_hook_no_default_project_id,
):
self.gct_hook = GCPTransferServiceHook(gcp_conn_id='test')
@mock.patch("airflow.gcp.hooks.cloud_storage_transfer_service.GCPTransferServiceHook._authorize")
@mock.patch("airflow.gcp.hooks.cloud_storage_transfer_service.build")
def test_gct_client_creation(self, mock_build, mock_authorize):
result = self.gct_hook.get_conn()
mock_build.assert_called_once_with(
'storagetransfer', 'v1', http=mock_authorize.return_value, cache_discovery=False
)
self.assertEqual(mock_build.return_value, result)
self.assertEqual(self.gct_hook._conn, result)
@mock.patch(
'airflow.gcp.hooks.base.GoogleCloudBaseHook.project_id',
new_callable=PropertyMock,
return_value=None
)
@mock.patch('airflow.gcp.hooks.cloud_storage_transfer_service.GCPTransferServiceHook.get_conn')
def test_create_transfer_job(self, get_conn, mock_project_id):
create_method = get_conn.return_value.transferJobs.return_value.create
execute_method = create_method.return_value.execute
execute_method.return_value = TEST_TRANSFER_JOB
res = self.gct_hook.create_transfer_job(body=TEST_BODY)
self.assertEqual(res, TEST_TRANSFER_JOB)
create_method.assert_called_once_with(body=TEST_BODY)
execute_method.assert_called_once_with(num_retries=5)
@mock.patch('airflow.gcp.hooks.cloud_storage_transfer_service.GCPTransferServiceHook.get_conn')
def test_get_transfer_job(self, get_conn):
get_method = get_conn.return_value.transferJobs.return_value.get
execute_method = get_method.return_value.execute
execute_method.return_value = TEST_TRANSFER_JOB
res = self.gct_hook.get_transfer_job(job_name=TEST_TRANSFER_JOB_NAME, project_id=TEST_PROJECT_ID)
self.assertIsNotNone(res)
self.assertEqual(TEST_TRANSFER_JOB_NAME, res[NAME])
get_method.assert_called_once_with(jobName=TEST_TRANSFER_JOB_NAME, projectId=TEST_PROJECT_ID)
execute_method.assert_called_once_with(num_retries=5)
@mock.patch(
'airflow.gcp.hooks.base.GoogleCloudBaseHook.project_id',
new_callable=PropertyMock,
return_value=None
)
@mock.patch('airflow.gcp.hooks.cloud_storage_transfer_service.GCPTransferServiceHook.get_conn')
def test_list_transfer_job(self, get_conn, mock_project_id):
list_method = get_conn.return_value.transferJobs.return_value.list
list_execute_method = list_method.return_value.execute
list_execute_method.return_value = {TRANSFER_JOBS: [TEST_TRANSFER_JOB]}
list_next = get_conn.return_value.transferJobs.return_value.list_next
list_next.return_value = None
res = self.gct_hook.list_transfer_job(request_filter=TEST_TRANSFER_JOB_FILTER)
self.assertIsNotNone(res)
self.assertEqual(res, [TEST_TRANSFER_JOB])
list_method.assert_called_once_with(filter=mock.ANY)
args, kwargs = list_method.call_args_list[0]
self.assertEqual(
json.loads(kwargs['filter']),
{FILTER_PROJECT_ID: TEST_PROJECT_ID, FILTER_JOB_NAMES: [TEST_TRANSFER_JOB_NAME]},
)
list_execute_method.assert_called_once_with(num_retries=5)
@mock.patch(
'airflow.gcp.hooks.base.GoogleCloudBaseHook.project_id',
new_callable=PropertyMock,
return_value=None
)
@mock.patch('airflow.gcp.hooks.cloud_storage_transfer_service.GCPTransferServiceHook.get_conn')
def test_update_transfer_job(self, get_conn, mock_project_id):
update_method = get_conn.return_value.transferJobs.return_value.patch
execute_method = update_method.return_value.execute
execute_method.return_value = TEST_TRANSFER_JOB
res = self.gct_hook.update_transfer_job(
job_name=TEST_TRANSFER_JOB_NAME, body=TEST_UPDATE_TRANSFER_JOB_BODY
)
self.assertIsNotNone(res)
update_method.assert_called_once_with(
jobName=TEST_TRANSFER_JOB_NAME, body=TEST_UPDATE_TRANSFER_JOB_BODY
)
execute_method.assert_called_once_with(num_retries=5)
@mock.patch('airflow.gcp.hooks.cloud_storage_transfer_service.GCPTransferServiceHook.get_conn')
def test_delete_transfer_job(self, get_conn):
update_method = get_conn.return_value.transferJobs.return_value.patch
execute_method = update_method.return_value.execute
self.gct_hook.delete_transfer_job(job_name=TEST_TRANSFER_JOB_NAME, project_id=TEST_PROJECT_ID)
update_method.assert_called_once_with(
jobName=TEST_TRANSFER_JOB_NAME,
body={
PROJECT_ID: TEST_PROJECT_ID,
TRANSFER_JOB: {STATUS: GcpTransferJobsStatus.DELETED},
TRANSFER_JOB_FIELD_MASK: STATUS,
},
)
execute_method.assert_called_once_with(num_retries=5)
@mock.patch('airflow.gcp.hooks.cloud_storage_transfer_service.GCPTransferServiceHook.get_conn')
def test_cancel_transfer_operation(self, get_conn):
cancel_method = get_conn.return_value.transferOperations.return_value.cancel
execute_method = cancel_method.return_value.execute
self.gct_hook.cancel_transfer_operation(operation_name=TEST_TRANSFER_OPERATION_NAME)
cancel_method.assert_called_once_with(name=TEST_TRANSFER_OPERATION_NAME)
execute_method.assert_called_once_with(num_retries=5)
@mock.patch('airflow.gcp.hooks.cloud_storage_transfer_service.GCPTransferServiceHook.get_conn')
def test_get_transfer_operation(self, get_conn):
get_method = get_conn.return_value.transferOperations.return_value.get
execute_method = get_method.return_value.execute
execute_method.return_value = TEST_TRANSFER_OPERATION
res = self.gct_hook.get_transfer_operation(operation_name=TEST_TRANSFER_OPERATION_NAME)
self.assertEqual(res, TEST_TRANSFER_OPERATION)
get_method.assert_called_once_with(name=TEST_TRANSFER_OPERATION_NAME)
execute_method.assert_called_once_with(num_retries=5)
@mock.patch(
'airflow.gcp.hooks.base.GoogleCloudBaseHook.project_id',
new_callable=PropertyMock,
return_value=None
)
@mock.patch('airflow.gcp.hooks.cloud_storage_transfer_service.GCPTransferServiceHook.get_conn')
def test_list_transfer_operation(self, get_conn, mock_project_id):
list_method = get_conn.return_value.transferOperations.return_value.list
list_execute_method = list_method.return_value.execute
list_execute_method.return_value = {OPERATIONS: [TEST_TRANSFER_OPERATION]}
list_next = get_conn.return_value.transferOperations.return_value.list_next
list_next.return_value = None
res = self.gct_hook.list_transfer_operations(request_filter=TEST_TRANSFER_OPERATION_FILTER)
self.assertIsNotNone(res)
self.assertEqual(res, [TEST_TRANSFER_OPERATION])
list_method.assert_called_once_with(filter=mock.ANY, name='transferOperations')
args, kwargs = list_method.call_args_list[0]
self.assertEqual(
json.loads(kwargs['filter']),
{FILTER_PROJECT_ID: TEST_PROJECT_ID, FILTER_JOB_NAMES: [TEST_TRANSFER_JOB_NAME]},
)
list_execute_method.assert_called_once_with(num_retries=5)
@mock.patch('airflow.gcp.hooks.cloud_storage_transfer_service.GCPTransferServiceHook.get_conn')
def test_pause_transfer_operation(self, get_conn):
pause_method = get_conn.return_value.transferOperations.return_value.pause
execute_method = pause_method.return_value.execute
self.gct_hook.pause_transfer_operation(operation_name=TEST_TRANSFER_OPERATION_NAME)
pause_method.assert_called_once_with(name=TEST_TRANSFER_OPERATION_NAME)
execute_method.assert_called_once_with(num_retries=5)
@mock.patch('airflow.gcp.hooks.cloud_storage_transfer_service.GCPTransferServiceHook.get_conn')
def test_resume_transfer_operation(self, get_conn):
resume_method = get_conn.return_value.transferOperations.return_value.resume
execute_method = resume_method.return_value.execute
self.gct_hook.resume_transfer_operation(operation_name=TEST_TRANSFER_OPERATION_NAME)
resume_method.assert_called_once_with(name=TEST_TRANSFER_OPERATION_NAME)
execute_method.assert_called_once_with(num_retries=5)
@mock.patch(
'airflow.gcp.hooks.base.GoogleCloudBaseHook.project_id',
new_callable=PropertyMock,
return_value=None
)
@mock.patch('airflow.gcp.hooks.cloud_storage_transfer_service.time.sleep')
@mock.patch('airflow.gcp.hooks.cloud_storage_transfer_service.'
'GCPTransferServiceHook.list_transfer_operations')
def test_wait_for_transfer_job(self, mock_list, mock_sleep, mock_project_id):
mock_list.side_effect = [
[{METADATA: {STATUS: GcpTransferOperationStatus.IN_PROGRESS}}],
[{METADATA: {STATUS: GcpTransferOperationStatus.SUCCESS}}],
]
job_name = 'transferJobs/test-job'
self.gct_hook.wait_for_transfer_job({PROJECT_ID: TEST_PROJECT_ID, 'name': job_name})
calls = [
mock.call(request_filter={FILTER_PROJECT_ID: TEST_PROJECT_ID, FILTER_JOB_NAMES: [job_name]}),
mock.call(request_filter={FILTER_PROJECT_ID: TEST_PROJECT_ID, FILTER_JOB_NAMES: [job_name]})
]
mock_list.assert_has_calls(calls, any_order=True)
mock_sleep.assert_called_once_with(TIME_TO_SLEEP_IN_SECONDS)
@mock.patch(
'airflow.gcp.hooks.base.GoogleCloudBaseHook.project_id',
new_callable=PropertyMock,
return_value=None
)
@mock.patch('airflow.gcp.hooks.cloud_storage_transfer_service.time.sleep')
@mock.patch('airflow.gcp.hooks.cloud_storage_transfer_service.GCPTransferServiceHook.get_conn')
def test_wait_for_transfer_job_failed(
self, mock_get_conn, mock_sleep, mock_project_id
):
list_method = mock_get_conn.return_value.transferOperations.return_value.list
list_execute_method = list_method.return_value.execute
list_execute_method.return_value = {
OPERATIONS: [
{NAME: TEST_TRANSFER_OPERATION_NAME, METADATA: {STATUS: GcpTransferOperationStatus.FAILED}}
]
}
mock_get_conn.return_value.transferOperations.return_value.list_next.return_value = None
with self.assertRaises(AirflowException):
self.gct_hook.wait_for_transfer_job({PROJECT_ID: TEST_PROJECT_ID, NAME: 'transferJobs/test-job'})
self.assertTrue(list_method.called)
@mock.patch(
'airflow.gcp.hooks.base.GoogleCloudBaseHook.project_id',
new_callable=PropertyMock,
return_value=None
)
@mock.patch('airflow.gcp.hooks.cloud_storage_transfer_service.time.sleep')
@mock.patch('airflow.gcp.hooks.cloud_storage_transfer_service.GCPTransferServiceHook.get_conn')
def test_wait_for_transfer_job_expect_failed(
self, get_conn, mock_sleep, mock_project_id
): # pylint: disable=unused-argument
list_method = get_conn.return_value.transferOperations.return_value.list
list_execute_method = list_method.return_value.execute
list_execute_method.return_value = {
OPERATIONS: [
{NAME: TEST_TRANSFER_OPERATION_NAME, METADATA: {STATUS: GcpTransferOperationStatus.FAILED}}
]
}
get_conn.return_value.transferOperations.return_value.list_next.return_value = None
with self.assertRaisesRegex(
AirflowException, "An unexpected operation status was encountered. Expected: SUCCESS"
):
self.gct_hook.wait_for_transfer_job(
job={PROJECT_ID: 'test-project', NAME: 'transferJobs/test-job'},
expected_statuses=GcpTransferOperationStatus.SUCCESS,
)
@parameterized.expand(
[
([GcpTransferOperationStatus.ABORTED], (GcpTransferOperationStatus.IN_PROGRESS,)),
(
[GcpTransferOperationStatus.SUCCESS, GcpTransferOperationStatus.ABORTED],
(GcpTransferOperationStatus.IN_PROGRESS,),
),
(
[GcpTransferOperationStatus.PAUSED, GcpTransferOperationStatus.ABORTED],
(GcpTransferOperationStatus.IN_PROGRESS,),
),
([GcpTransferOperationStatus.ABORTED], (GcpTransferOperationStatus.IN_PROGRESS,)),
(
[GcpTransferOperationStatus.SUCCESS, GcpTransferOperationStatus.ABORTED],
(GcpTransferOperationStatus.IN_PROGRESS,),
),
(
[GcpTransferOperationStatus.PAUSED, GcpTransferOperationStatus.ABORTED],
(GcpTransferOperationStatus.IN_PROGRESS,),
),
]
)
def test_operations_contain_expected_statuses_red_path(self, statuses, expected_statuses):
operations = [{NAME: TEST_TRANSFER_OPERATION_NAME, METADATA: {STATUS: status}} for status in statuses]
with self.assertRaisesRegex(
AirflowException,
"An unexpected operation status was encountered. Expected: {}".format(
", ".join(expected_statuses)
),
):
GCPTransferServiceHook.operations_contain_expected_statuses(
operations, GcpTransferOperationStatus.IN_PROGRESS
)
@parameterized.expand(
[
([GcpTransferOperationStatus.ABORTED], GcpTransferOperationStatus.ABORTED),
(
[GcpTransferOperationStatus.SUCCESS, GcpTransferOperationStatus.ABORTED],
GcpTransferOperationStatus.ABORTED,
),
(
[GcpTransferOperationStatus.PAUSED, GcpTransferOperationStatus.ABORTED],
GcpTransferOperationStatus.ABORTED,
),
([GcpTransferOperationStatus.ABORTED], (GcpTransferOperationStatus.ABORTED,)),
(
[GcpTransferOperationStatus.SUCCESS, GcpTransferOperationStatus.ABORTED],
(GcpTransferOperationStatus.ABORTED,),
),
(
[GcpTransferOperationStatus.PAUSED, GcpTransferOperationStatus.ABORTED],
(GcpTransferOperationStatus.ABORTED,),
),
]
)
def test_operations_contain_expected_statuses_green_path(self, statuses, expected_statuses):
operations = [{NAME: TEST_TRANSFER_OPERATION_NAME, METADATA: {STATUS: status}} for status in statuses]
result = GCPTransferServiceHook.operations_contain_expected_statuses(operations, expected_statuses)
self.assertTrue(result)
class TestGCPTransferServiceHookWithProjectIdFromConnection(unittest.TestCase):
def setUp(self):
with mock.patch(
'airflow.gcp.hooks.base.GoogleCloudBaseHook.__init__',
new=mock_base_gcp_hook_default_project_id,
):
self.gct_hook = GCPTransferServiceHook(gcp_conn_id='test')
@mock.patch("airflow.gcp.hooks.cloud_storage_transfer_service.GCPTransferServiceHook._authorize")
@mock.patch("airflow.gcp.hooks.cloud_storage_transfer_service.build")
def test_gct_client_creation(self, mock_build, mock_authorize):
result = self.gct_hook.get_conn()
mock_build.assert_called_once_with(
'storagetransfer', 'v1', http=mock_authorize.return_value, cache_discovery=False
)
self.assertEqual(mock_build.return_value, result)
self.assertEqual(self.gct_hook._conn, result)
@mock.patch(
'airflow.gcp.hooks.base.GoogleCloudBaseHook.project_id',
new_callable=PropertyMock,
return_value=GCP_PROJECT_ID_HOOK_UNIT_TEST
)
@mock.patch('airflow.gcp.hooks.cloud_storage_transfer_service.GCPTransferServiceHook.get_conn')
def test_create_transfer_job(self, get_conn, mock_project_id):
create_method = get_conn.return_value.transferJobs.return_value.create
execute_method = create_method.return_value.execute
execute_method.return_value = deepcopy(TEST_TRANSFER_JOB)
res = self.gct_hook.create_transfer_job(body=self._without_project_id(TEST_BODY))
self.assertEqual(res, TEST_TRANSFER_JOB)
create_method.assert_called_once_with(body=self._with_project_id(TEST_BODY, 'example-project'))
execute_method.assert_called_once_with(num_retries=5)
@mock.patch(
'airflow.gcp.hooks.base.GoogleCloudBaseHook.project_id',
new_callable=PropertyMock,
return_value=GCP_PROJECT_ID_HOOK_UNIT_TEST
)
@mock.patch('airflow.gcp.hooks.cloud_storage_transfer_service.GCPTransferServiceHook.get_conn')
def test_get_transfer_job(self, get_conn, mock_project_id):
get_method = get_conn.return_value.transferJobs.return_value.get
execute_method = get_method.return_value.execute
execute_method.return_value = TEST_TRANSFER_JOB
res = self.gct_hook.get_transfer_job(job_name=TEST_TRANSFER_JOB_NAME)
self.assertIsNotNone(res)
self.assertEqual(TEST_TRANSFER_JOB_NAME, res[NAME])
get_method.assert_called_once_with(jobName=TEST_TRANSFER_JOB_NAME, projectId='example-project')
execute_method.assert_called_once_with(num_retries=5)
@mock.patch(
'airflow.gcp.hooks.base.GoogleCloudBaseHook.project_id',
new_callable=PropertyMock,
return_value=GCP_PROJECT_ID_HOOK_UNIT_TEST
)
@mock.patch('airflow.gcp.hooks.cloud_storage_transfer_service.GCPTransferServiceHook.get_conn')
def test_list_transfer_job(self, get_conn, mock_project_id):
list_method = get_conn.return_value.transferJobs.return_value.list
list_execute_method = list_method.return_value.execute
list_execute_method.return_value = {TRANSFER_JOBS: [TEST_TRANSFER_JOB]}
list_next = get_conn.return_value.transferJobs.return_value.list_next
list_next.return_value = None
res = self.gct_hook.list_transfer_job(
request_filter=_without_key(TEST_TRANSFER_JOB_FILTER, FILTER_PROJECT_ID)
)
self.assertIsNotNone(res)
self.assertEqual(res, [TEST_TRANSFER_JOB])
list_method.assert_called_once_with(filter=mock.ANY)
args, kwargs = list_method.call_args_list[0]
self.assertEqual(
json.loads(kwargs['filter']),
{FILTER_PROJECT_ID: 'example-project', FILTER_JOB_NAMES: [TEST_TRANSFER_JOB_NAME]},
)
list_execute_method.assert_called_once_with(num_retries=5)
@mock.patch(
'airflow.gcp.hooks.base.GoogleCloudBaseHook.project_id',
new_callable=PropertyMock,
return_value=GCP_PROJECT_ID_HOOK_UNIT_TEST
)
@mock.patch('airflow.gcp.hooks.cloud_storage_transfer_service.GCPTransferServiceHook.get_conn')
def test_update_transfer_job(self, get_conn, mock_project_id):
update_method = get_conn.return_value.transferJobs.return_value.patch
execute_method = update_method.return_value.execute
execute_method.return_value = TEST_TRANSFER_JOB
res = self.gct_hook.update_transfer_job(
job_name=TEST_TRANSFER_JOB_NAME, body=self._without_project_id(TEST_UPDATE_TRANSFER_JOB_BODY)
)
self.assertIsNotNone(res)
update_method.assert_called_once_with(
jobName=TEST_TRANSFER_JOB_NAME,
body=self._with_project_id(TEST_UPDATE_TRANSFER_JOB_BODY, 'example-project'),
)
execute_method.assert_called_once_with(num_retries=5)
@mock.patch('airflow.gcp.hooks.cloud_storage_transfer_service.GCPTransferServiceHook.get_conn')
def test_delete_transfer_job(self, get_conn):
update_method = get_conn.return_value.transferJobs.return_value.patch
execute_method = update_method.return_value.execute
self.gct_hook.delete_transfer_job(job_name=TEST_TRANSFER_JOB_NAME, project_id=TEST_PROJECT_ID)
update_method.assert_called_once_with(
jobName=TEST_TRANSFER_JOB_NAME,
body={
PROJECT_ID: TEST_PROJECT_ID,
TRANSFER_JOB: {STATUS: 'DELETED'},
TRANSFER_JOB_FIELD_MASK: STATUS,
},
)
execute_method.assert_called_once_with(num_retries=5)
@mock.patch('airflow.gcp.hooks.cloud_storage_transfer_service.GCPTransferServiceHook.get_conn')
def test_cancel_transfer_operation(self, get_conn):
cancel_method = get_conn.return_value.transferOperations.return_value.cancel
execute_method = cancel_method.return_value.execute
self.gct_hook.cancel_transfer_operation(operation_name=TEST_TRANSFER_OPERATION_NAME)
cancel_method.assert_called_once_with(name=TEST_TRANSFER_OPERATION_NAME)
execute_method.assert_called_once_with(num_retries=5)
@mock.patch('airflow.gcp.hooks.cloud_storage_transfer_service.GCPTransferServiceHook.get_conn')
def test_get_transfer_operation(self, get_conn):
get_method = get_conn.return_value.transferOperations.return_value.get
execute_method = get_method.return_value.execute
execute_method.return_value = TEST_TRANSFER_OPERATION
res = self.gct_hook.get_transfer_operation(operation_name=TEST_TRANSFER_OPERATION_NAME)
self.assertEqual(res, TEST_TRANSFER_OPERATION)
get_method.assert_called_once_with(name=TEST_TRANSFER_OPERATION_NAME)
execute_method.assert_called_once_with(num_retries=5)
@mock.patch(
'airflow.gcp.hooks.base.GoogleCloudBaseHook.project_id',
new_callable=PropertyMock,
return_value=GCP_PROJECT_ID_HOOK_UNIT_TEST
)
@mock.patch('airflow.gcp.hooks.cloud_storage_transfer_service.GCPTransferServiceHook.get_conn')
def test_list_transfer_operation(self, get_conn, mock_project_id):
list_method = get_conn.return_value.transferOperations.return_value.list
list_execute_method = list_method.return_value.execute
list_execute_method.return_value = {OPERATIONS: [TEST_TRANSFER_OPERATION]}
list_next = get_conn.return_value.transferOperations.return_value.list_next
list_next.return_value = None
res = self.gct_hook.list_transfer_operations(
request_filter=_without_key(TEST_TRANSFER_OPERATION_FILTER, FILTER_PROJECT_ID)
)
self.assertIsNotNone(res)
self.assertEqual(res, [TEST_TRANSFER_OPERATION])
list_method.assert_called_once_with(filter=mock.ANY, name='transferOperations')
args, kwargs = list_method.call_args_list[0]
self.assertEqual(
json.loads(kwargs['filter']),
{FILTER_PROJECT_ID: 'example-project', FILTER_JOB_NAMES: [TEST_TRANSFER_JOB_NAME]},
)
list_execute_method.assert_called_once_with(num_retries=5)
@staticmethod
def _without_project_id(body):
body = deepcopy(body)
del body[PROJECT_ID]
return body
@staticmethod
def _with_project_id(body, project_id):
body = deepcopy(body)
del body[PROJECT_ID]
body[PROJECT_ID] = project_id
return body
class TestGCPTransferServiceHookWithoutProjectId(unittest.TestCase):
def setUp(self):
with mock.patch(
'airflow.gcp.hooks.base.GoogleCloudBaseHook.__init__',
new=mock_base_gcp_hook_no_default_project_id,
):
self.gct_hook = GCPTransferServiceHook(gcp_conn_id='test')
@mock.patch("airflow.gcp.hooks.cloud_storage_transfer_service.GCPTransferServiceHook._authorize")
@mock.patch("airflow.gcp.hooks.cloud_storage_transfer_service.build")
def test_gct_client_creation(self, mock_build, mock_authorize):
result = self.gct_hook.get_conn()
mock_build.assert_called_once_with(
'storagetransfer', 'v1', http=mock_authorize.return_value, cache_discovery=False
)
self.assertEqual(mock_build.return_value, result)
self.assertEqual(self.gct_hook._conn, result)
@mock.patch(
'airflow.gcp.hooks.base.GoogleCloudBaseHook.project_id',
new_callable=PropertyMock,
return_value=None
)
@mock.patch('airflow.gcp.hooks.cloud_storage_transfer_service.GCPTransferServiceHook.get_conn')
def test_create_transfer_job(self, get_conn, mock_project_id):
create_method = get_conn.return_value.transferJobs.return_value.create
execute_method = create_method.return_value.execute
execute_method.return_value = deepcopy(TEST_TRANSFER_JOB)
with self.assertRaises(AirflowException) as e:
self.gct_hook.create_transfer_job(body=_without_key(TEST_BODY, PROJECT_ID))
self.assertEqual(
'The project id must be passed either as `projectId` key in `body` parameter or as project_id '
'extra in GCP connection definition. Both are not set!',
str(e.exception),
)
@mock.patch(
'airflow.gcp.hooks.base.GoogleCloudBaseHook.project_id',
new_callable=PropertyMock,
return_value=None
)
@mock.patch('airflow.gcp.hooks.cloud_storage_transfer_service.GCPTransferServiceHook.get_conn')
def test_get_transfer_job(self, get_conn, mock_project_id):
get_method = get_conn.return_value.transferJobs.return_value.get
execute_method = get_method.return_value.execute
execute_method.return_value = TEST_TRANSFER_JOB
with self.assertRaises(AirflowException) as e:
self.gct_hook.get_transfer_job(job_name=TEST_TRANSFER_JOB_NAME)
self.assertEqual(
'The project id must be passed either as keyword project_id '
'parameter or as project_id extra in GCP connection definition. '
'Both are not set!',
str(e.exception),
)
@mock.patch(
'airflow.gcp.hooks.base.GoogleCloudBaseHook.project_id',
new_callable=PropertyMock,
return_value=None
)
@mock.patch('airflow.gcp.hooks.cloud_storage_transfer_service.GCPTransferServiceHook.get_conn')
def test_list_transfer_job(self, get_conn, mock_project_id):
list_method = get_conn.return_value.transferJobs.return_value.list
list_execute_method = list_method.return_value.execute
list_execute_method.return_value = {"transferJobs": [TEST_TRANSFER_JOB]}
list_next = get_conn.return_value.transferJobs.return_value.list_next
list_next.return_value = None
with self.assertRaises(AirflowException) as e:
self.gct_hook.list_transfer_job(
request_filter=_without_key(TEST_TRANSFER_JOB_FILTER, FILTER_PROJECT_ID))
self.assertEqual(
'The project id must be passed either as `project_id` key in `filter` parameter or as '
'project_id extra in GCP connection definition. Both are not set!',
str(e.exception),
)
@mock.patch(
'airflow.gcp.hooks.base.GoogleCloudBaseHook.project_id',
new_callable=PropertyMock,
return_value=None
)
@mock.patch('airflow.gcp.hooks.cloud_storage_transfer_service.GCPTransferServiceHook.get_conn')
def test_list_transfer_operation_multiple_page(self, get_conn, mock_project_id):
pages_requests = [
mock.Mock(**{'execute.return_value': {"operations": [TEST_TRANSFER_OPERATION]}}) for _ in range(4)
]
transfer_operation_mock = mock.Mock(
**{'list.return_value': pages_requests[1], 'list_next.side_effect': pages_requests[1:] + [None]}
)
get_conn.return_value.transferOperations.return_value = transfer_operation_mock
res = self.gct_hook.list_transfer_operations(request_filter=TEST_TRANSFER_OPERATION_FILTER)
self.assertEqual(res, [TEST_TRANSFER_OPERATION] * 4)
@mock.patch(
'airflow.gcp.hooks.base.GoogleCloudBaseHook.project_id',
new_callable=PropertyMock,
return_value=None
)
@mock.patch('airflow.gcp.hooks.cloud_storage_transfer_service.GCPTransferServiceHook.get_conn')
def test_update_transfer_job(self, get_conn, mock_project_id):
update_method = get_conn.return_value.transferJobs.return_value.patch
execute_method = update_method.return_value.execute
execute_method.return_value = TEST_TRANSFER_JOB
with self.assertRaises(AirflowException) as e:
self.gct_hook.update_transfer_job(
job_name=TEST_TRANSFER_JOB_NAME, body=_without_key(TEST_UPDATE_TRANSFER_JOB_BODY, PROJECT_ID)
)
self.assertEqual(
'The project id must be passed either as `projectId` key in `body` parameter or as project_id '
'extra in GCP connection definition. Both are not set!',
str(e.exception),
)
@mock.patch(
'airflow.gcp.hooks.base.GoogleCloudBaseHook.project_id',
new_callable=PropertyMock,
return_value=None
)
@mock.patch('airflow.gcp.hooks.cloud_storage_transfer_service.GCPTransferServiceHook.get_conn')
def test_delete_transfer_job(self, get_conn, mock_project_id): # pylint: disable=unused-argument
with self.assertRaises(AirflowException) as e:
self.gct_hook.delete_transfer_job( # pylint: disable=no-value-for-parameter
job_name=TEST_TRANSFER_JOB_NAME)
self.assertEqual(
'The project id must be passed either as keyword project_id parameter or as project_id extra in '
'GCP connection definition. Both are not set!',
str(e.exception),
)
@mock.patch(
'airflow.gcp.hooks.base.GoogleCloudBaseHook.project_id',
new_callable=PropertyMock,
return_value=None
)
@mock.patch('airflow.gcp.hooks.cloud_storage_transfer_service.GCPTransferServiceHook.get_conn')
def test_list_transfer_operation(self, get_conn, mock_project_id):
list_method = get_conn.return_value.transferOperations.return_value.list
list_execute_method = list_method.return_value.execute
list_execute_method.return_value = {"operations": [TEST_TRANSFER_OPERATION]}
list_next = get_conn.return_value.transferOperations.return_value.list_next
list_next.return_value = None
with self.assertRaises(AirflowException) as e:
self.gct_hook.list_transfer_operations(
request_filter=_without_key(TEST_TRANSFER_OPERATION_FILTER, FILTER_PROJECT_ID)
)
self.assertEqual(
'The project id must be passed either as `project_id` key in `filter` parameter or as project_id '
'extra in GCP connection definition. Both are not set!',
str(e.exception),
)
|
__doc__ = """
Manipulate audio with an simple and easy high level interface.
See the README file for details, usage info, and a list of gotchas.
"""
from setuptools import setup
setup(
name='pydub',
version='0.11.1',
author='James Robert',
author_email='jiaaro@gmail.com',
description='Manipulate audio with an simple and easy high level interface',
license='MIT',
keywords='audio sound high-level',
url='http://pydub.com',
packages=['pydub'],
long_description=__doc__,
classifiers=[
'Development Status :: 5 - Production/Stable',
'License :: OSI Approved :: MIT License',
'Programming Language :: Python',
'Programming Language :: Python :: 2',
'Programming Language :: Python :: 2.6',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.2',
'Programming Language :: Python :: 3.3',
'Intended Audience :: Developers',
'Operating System :: OS Independent',
"Topic :: Multimedia :: Sound/Audio",
"Topic :: Multimedia :: Sound/Audio :: Analysis",
"Topic :: Multimedia :: Sound/Audio :: Conversion",
"Topic :: Multimedia :: Sound/Audio :: Editors",
"Topic :: Multimedia :: Sound/Audio :: Mixers",
"Topic :: Software Development :: Libraries",
'Topic :: Utilities',
]
)
|
for num in range(1, 51):
if num % 3 == 0 and num % 5 == 0:
print("fizzbuzz")
elif num % 3 == 0 and num % 5 != 0:
print("fizz")
elif num % 3 != 0 and num % 5 == 0:
print("buzz")
else:
print(num)
|
from unittest import TestCase
import pytest
from django.core.exceptions import ValidationError
from va_explorer.tests.factories import UserFactory
from va_explorer.users.models import UserPasswordHistory
from va_explorer.users.validators import PasswordComplexityValidator, PasswordHistoryValidator
pytestmark = pytest.mark.django_db
class TestPasswordComplexityValidator(TestCase):
def setUp(self):
self.user = UserFactory.create()
self.validator = PasswordComplexityValidator()
def test_rejects_no_number(self):
with self.assertRaisesRegex(ValidationError, "number"):
self.validator.validate("Password!", self.user)
def test_rejects_no_lower(self):
with self.assertRaisesRegex(ValidationError, "lowercase"):
self.validator.validate("PASSWORD!", self.user)
def test_rejects_no_upper(self):
with self.assertRaisesRegex(ValidationError, "uppercase"):
self.validator.validate("password!", self.user)
def test_rejects_no_special(self):
with self.assertRaisesRegex(ValidationError, "nonalphanumeric"):
self.validator.validate("Password", self.user)
def test_rejects_multiple(self):
# Expect no_number, no_upper, and no_special in that order
with self.assertRaisesRegex(ValidationError, "(number).*(uppercase).*(nonalphanumeric)"):
self.validator.validate("pass", self.user)
def test_accepts_complex_password(self):
try:
self.validator.validate('Password1!', self.user)
except ValidationError:
self.fail("PasswordComplexityValidator raised ValidationError unexpectedly")
class TestPasswordHistoryValidator(TestCase):
def setUp(self):
self.user = UserFactory.create()
self.validator = PasswordHistoryValidator()
def test_accepts_new_password(self):
try:
self.validator.validate('test1', self.user)
except ValidationError:
self.fail("PasswordHistoryValidator raised ValidationError unexpectedly")
def test_rejects_repeated_password(self):
for i in range(0, 13):
self.user.set_password(f"test{i}")
self.user.save()
with self.assertRaises(ValidationError):
self.validator.validate("test7", self.user)
def test_keeps_limited_history(self):
for i in range(0, 13):
self.user.set_password(f"test{i}")
self.user.save()
self.validator.validate("new_password", self.user)
password_history = UserPasswordHistory.objects.filter(user_id=self.user)
self.assertEqual(password_history.count(), 12)
|
#!/usr/bin/env python
# Copyright 2017 The Forseti Security Authors. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Test forwarding rules data."""
from google.cloud.security.common.util import parser
FAKE_FORWARDING_RULE1 = {
"kind": "compute#forwardingRule",
"description": "",
"IPAddress": "10.10.10.1",
"region": "https://www.googleapis.com/compute/v1/projects/project1/regions/us-central1",
"loadBalancingScheme": "EXTERNAL",
"target": "https://www.googleapis.com/compute/v1/projects/project1/regions/us-central1/targetPools/project1-pool",
"portRange": "80-80",
"IPProtocol": "TCP",
"creationTimestamp": "2017-05-05T12:00:01.000-07:00",
"id": "111111111111",
"selfLink": "https://www.googleapis.com/compute/v1/projects/project1/regions/us-central1/forwardingRules/project1-rule",
"name": "project1-rule"
}
FAKE_FORWARDING_RULE2 = {
"kind": "compute#forwardingRule",
"description": "",
"IPAddress": "10.10.10.2",
"region": "https://www.googleapis.com/compute/v1/projects/project2/regions/us-central1",
"loadBalancingScheme": "EXTERNAL",
"target": "https://www.googleapis.com/compute/v1/projects/project2/regions/us-central1/targetPools/project2-pool",
"portRange": "80-80",
"IPProtocol": "TCP",
"creationTimestamp": "2017-05-05T12:00:01.000-07:00",
"id": "222222222222",
"selfLink": "https://www.googleapis.com/compute/v1/projects/project2/regions/us-central1/forwardingRules/project2-rule",
"name": "project2-rule"
}
FAKE_API_RESPONSE1 = [FAKE_FORWARDING_RULE1]
FAKE_API_RESPONSE2 = [FAKE_FORWARDING_RULE2]
FAKE_PROJECT_FWD_RULES_MAP = {
'project1': [FAKE_FORWARDING_RULE1],
'project2': [FAKE_FORWARDING_RULE2],
}
EXPECTED_LOADABLE_FWD_RULES = [
{'project_id': 'project1',
'description': '',
'ip_address': '10.10.10.1',
'region': 'https://www.googleapis.com/compute/v1/projects/project1/regions/us-central1',
'backend_service': None,
'load_balancing_scheme': 'EXTERNAL',
'target': 'https://www.googleapis.com/compute/v1/projects/project1/regions/us-central1/targetPools/project1-pool',
'port_range': '80-80',
'ports': '[]',
'ip_protocol': 'TCP',
'creation_timestamp': '2017-05-05 12:00:01',
'id': '111111111111',
'name': 'project1-rule',
'network': None,
'subnetwork': None,
'raw_forwarding_rule': parser.json_stringify(FAKE_FORWARDING_RULE1),
},
{'project_id': 'project2',
'description': '',
'ip_address': '10.10.10.2',
'region': 'https://www.googleapis.com/compute/v1/projects/project2/regions/us-central1',
'backend_service': None,
'load_balancing_scheme': 'EXTERNAL',
'target': 'https://www.googleapis.com/compute/v1/projects/project2/regions/us-central1/targetPools/project2-pool',
'port_range': '80-80',
'ports': '[]',
'ip_protocol': 'TCP',
'creation_timestamp': '2017-05-05 12:00:01',
'id': '222222222222',
'name': 'project2-rule',
'network': None,
'subnetwork': None,
'raw_forwarding_rule': parser.json_stringify(FAKE_FORWARDING_RULE2),
},
]
|
# -*- coding: utf-8 -*-
# Copyright (c) 2014, Vispy Development Team.
# Distributed under the (new) BSD License. See LICENSE.txt for more info.
""" vispy.app.backends
The backend modules are dynamically imported when needed. This module
defines a small description of each supported backend, so that for
instance we can test whether the GUI toolkit for a backend is already
imported. This stuff is mostly used in the Application.use method.
"""
# Define backends: name, vispy.app.backends.xxx module, native module name.
# This is the order in which they are attempted to be imported.
CORE_BACKENDS = [
('PyQt4', '_pyqt4', 'PyQt4'),
('PyQt5', '_pyqt5', 'PyQt5'),
('PySide', '_pyside', 'PySide'),
('Pyglet', '_pyglet', 'pyglet'),
('Glfw', '_glfw', 'vispy.ext.glfw'),
('SDL2', '_sdl2', 'sdl2'),
('wx', '_wx', 'wx'),
('EGL', '_egl', 'vispy.ext.egl'),
('Glut', '_glut', 'OpenGL.GLUT'),
]
# Whereas core backends really represents libraries that can create a
# canvas, the pseudo backends act more like a proxy.
PSEUDO_BACKENDS = [
# ('ipynb_vnc', '_ipynb_vnc', None),
# ('ipynb_static', '_ipynb_static', None),
('ipynb_webgl', '_ipynb_webgl', None),
('_test', '_test', 'vispy.app.backends._test'), # add one that will fail
]
# Combine
BACKENDS = CORE_BACKENDS + PSEUDO_BACKENDS
# Get list of backend names
BACKEND_NAMES = [b[0].lower() for b in BACKENDS]
# Map of the lowercase backend names to the backend descriptions above
# so that we can look up its properties if we only have a name.
BACKENDMAP = dict([(be[0].lower(), be) for be in BACKENDS])
# List of attempted backends. For logging.
TRIED_BACKENDS = []
# Flag for _pyside, _pyqt4 and _qt modules to communicate.
qt_lib = None
|
# -*- coding: utf-8 -*-
import dgl
import networkx as nx
import numpy as np
import torch
from dataset.codesearchnet import MAX_SUB_TOKEN_LEN
def build_graph(tree_dict, dictionary, tree_leaf_subtoken=1, DGLGraph_PAD_WORD=-1) -> dgl.DGLGraph:
# 叶子节点存的是拆开后的subtoken ,当然,如果token拆不开,那就还是一个token
# 用来训练的.pt数据里叶子节点token保存格式是["a_hu",["a","hu"]],
# (1)tree_leaf_subtoken为1时 本函数只将其subtoken转换成wordid ,#即保存为[和a对应的id,和hu对应的id],比如[23,179]
# 如果是拆不开的token,pt数据里格式是 ["imq",["imq",PAD_WORD]]
# 那么这里将其转换为[和imq对应的id,和codesum.PAD_WORD],比如[258,0]
# pad到的长度由train val test整个数据集里token拆开后最大长度决定
# (2)tree_leaf_subtoken为0时,本函数用的拆之前的token得到wordid,即比如用a_hu得到wordid
nx_graph = nx.DiGraph()
def _build(nid, idx, tree):
# non-leaf node, 'children': ["sub_token", ["sub", "token", <PAD>, <PAD>, <PAD>]]
if not isinstance(tree[idx]['children'][1], list):
child_ids = tree[idx]['children']
if nid is None:
nx_graph.add_node(0, x=[DGLGraph_PAD_WORD] * MAX_SUB_TOKEN_LEN, y=int(idx), mask=0)
# print('node={}, x={}, y={}, mask={}'.format(0, [DGLGraph_PAD_WORD] * MAX_SUB_TOKEN_LEN, int(idx), 0))
nid = 0
for idx in child_ids:
cid = nx_graph.number_of_nodes()
y_value = int(idx)
if not isinstance(tree[str(idx)]['children'][1], list): # non-leaf node
nx_graph.add_node(cid, x=[DGLGraph_PAD_WORD] * MAX_SUB_TOKEN_LEN, y=y_value, mask=0)
# print(
# 'node={}, x={}, y={}, mask={}'.format(cid, [DGLGraph_PAD_WORD] * MAX_SUB_TOKEN_LEN, y_value, 0))
_build(cid, str(idx), tree)
else: # leaf node
if tree_leaf_subtoken:
word_index = [dictionary.index(subtoken) for subtoken in tree[str(idx)]['children'][1]]
else:
word_index = [dictionary.index(tree[idx]['children'][0])]
nx_graph.add_node(cid, x=word_index, y=y_value, mask=1)
# print('node={}, x={}, y={}, mask={}'.format(cid, word_index, y_value, 1))
nx_graph.add_edge(cid, nid) # 因为用的 DiGraph,所以这里添加的edge应该是cid指向nid,而nid是root节点的方向,cid是叶子节点的方向
# print('edge={}->{}'.format(cid, nid))
else: # leaf node
if tree_leaf_subtoken:
word_index = [dictionary.index(subtoken) for subtoken in tree[idx]['children'][-1]]
else:
word_index = [dictionary.index(tree[idx]['children'][0])]
if nid is None:
cid = 0
else:
cid = nx_graph.number_of_nodes()
nx_graph.add_node(cid, x=word_index, y=int(idx), mask=1)
# print('node={}, x={}, y={}, mask={}'.format(cid, word_index, int(idx), 1))
if nid is not None:
nx_graph.add_edge(cid, nid) # 因为用的 DiGraph,所以这里添加的edge应该是cid指向nid,而nid是root节点的方向,cid是叶子节点的方向
# print('edge={}->{}'.format(cid, nid))
_build(None, '0', tree_dict)
dgl_graph = dgl.DGLGraph()
dgl_graph.from_networkx(nx_graph, node_attrs=['x', 'y', 'mask'])
assert len(tree_dict) == dgl_graph.number_of_nodes(), Exception('build dgl tree error')
return dgl_graph
def tree2dgl(tree_dict, dictionary, DGLGraph_PAD_WORD=-1):
"""
if _subtoken == True, it means that we tokenize leaf node info into sub-tokens
e.g. ["sub_token", ["sub", "token", <PAD>, <PAD>, <PAD>]]
else, no tokenization. e.g. ["sub_token"]
"""
_subtoken = False
for node in tree_dict.values():
if isinstance(node['children'][1], list):
_subtoken = True
break
def nonleaf_node_info():
if _subtoken:
return [DGLGraph_PAD_WORD] * MAX_SUB_TOKEN_LEN
else:
return [DGLGraph_PAD_WORD]
def token2idx(node_info):
"""
node info => indices
if _subtoken == True, ["sub_token", ["sub", "token", <PAD>, <PAD>, <PAD>]] => index(["sub", "token", <PAD>, <PAD>, <PAD>])
else, ["sub_token"] => index(["sub_token"])
"""
if _subtoken:
return [dictionary.index(subtoken) for subtoken in node_info[-1]]
else:
return [dictionary.index(node_info[0])]
"""
how to build DGL graph?
node:
x: node info (if it's non-leaf nodes, padded with [-1, ...]),
y: current node idx
mask: if leaf node, mask=1; else, mask=0
* if current node is the root node,
edge: child => parent
"""
dgl_graph = dgl.DGLGraph()
ids = sorted(tree_dict.keys(), key=int)
dgl_graph.add_nodes(
len(tree_dict),
data={
'x': torch.LongTensor([
token2idx(tree_dict[idx]['children']) if isinstance(tree_dict[idx]['children'][1], list) \
else nonleaf_node_info()
for idx in ids
]),
'y': torch.LongTensor(range(len(tree_dict))),
'mask': torch.LongTensor([isinstance(tree_dict[idx]['children'][1], list) for idx in ids]),
}
)
for idx in ids:
node = tree_dict[idx]
if node['parent'] is not None:
dgl_graph.add_edges(int(idx), int(node['parent']))
# print('edge={}->{}'.format(int(idx), int(node['parent'])))
return dgl_graph
def tree2nx2dgl(tree_dict, dictionary, DGLGraph_PAD_WORD=-1):
"""
if _subtoken == True, it means that we tokenize leaf node info into sub-tokens
e.g. ["sub_token", ["sub", "token", <PAD>, <PAD>, <PAD>]]
else, no tokenization. e.g. ["sub_token"]
"""
_subtoken = False
for node in tree_dict.values():
if isinstance(node['children'][1], list):
_subtoken = True
break
def nonleaf_node_info():
if _subtoken:
return [DGLGraph_PAD_WORD] * MAX_SUB_TOKEN_LEN
else:
return [DGLGraph_PAD_WORD]
def token2idx(node_info):
"""
node info => indices
if _subtoken == True, ["sub_token", ["sub", "token", <PAD>, <PAD>, <PAD>]] => index(["sub", "token", <PAD>, <PAD>, <PAD>])
else, ["sub_token"] => index(["sub_token"])
"""
if _subtoken:
return [dictionary.index(subtoken) for subtoken in node_info[-1]]
else:
return [dictionary.index(node_info[0])]
"""
how to build DGL graph?
node:
x: node info (if it's non-leaf nodes, padded with [-1, ...]),
y: current node idx
mask: if leaf node, mask=1; else, mask=0
* if current node is the root node,
edge: child => parent
"""
nx_graph = nx.DiGraph()
ids = sorted(tree_dict.keys(), key=int)
for idx in ids:
node = tree_dict[idx]
nx_graph.add_node(
int(idx),
x=token2idx(tree_dict[idx]['children']) if isinstance(tree_dict[idx]['children'][1], list) \
else nonleaf_node_info(),
y=int(idx),
mask=int(isinstance(tree_dict[idx]['children'][1], list))
)
# print('node={}, x={}, y={}, mask={}'.format(
# idx, token2idx(tree_dict[idx]['children']) if isinstance(tree_dict[idx]['children'][1], list) \
# else nonleaf_node_info(), int(idx), int(isinstance(tree_dict[idx]['children'][1], list))))
if node['parent'] is not None:
nx_graph.add_edge(int(idx), int(node['parent']))
# print('edge={}->{}'.format(int(idx), int(node['parent'])))
dgl_graph = dgl.DGLGraph()
dgl_graph.from_networkx(nx_graph, node_attrs=['x', 'y', 'mask'])
assert len(tree_dict) == dgl_graph.number_of_nodes(), Exception('build dgl tree error')
return dgl_graph
def pack_graph(graphs):
def get_root_node_info(dgl_trees):
root_indices, node_nums = [None] * len(dgl_trees), [None] * len(dgl_trees)
for ind, tree in enumerate(dgl_trees):
topological_nodes = dgl.topological_nodes_generator(tree)
root_ind_tree_dgldigraph = topological_nodes[-1].item()
root_indices[ind] = root_ind_tree_dgldigraph
all_num_node_tree_dgldigraph = tree.number_of_nodes()
node_nums[ind] = all_num_node_tree_dgldigraph
root_indices = np.array(root_indices)
num_nodes = np.array(node_nums)
return root_indices, num_nodes,
# merge many dgl graphs into a huge one
root_indices, node_nums, = get_root_node_info(graphs)
packed_graph = dgl.batch(graphs)
return packed_graph, root_indices, node_nums,
if __name__ == '__main__':
from ncc.tasks.summarization import SummarizationTask
dict = SummarizationTask.load_dictionary(
filename='/home/yang/.ncc/multi/summarization/data-mmap/ruby/binary_ast.dict.json'
)
bin_ast = {
"0": {"type": "method", "parent": None, "children": [1, 2]},
"1": {"type": "def_keyword", "parent": 0, "children": ["def", ["def", "<pad>", "<pad>", "<pad>", "<pad>"]]},
"2": {"type": "TMP", "parent": 0, "children": [3, 4]},
"3": {"type": "identifier", "parent": 2, "children": ["set", ["set", "<pad>", "<pad>", "<pad>", "<pad>"]]},
"4": {"type": "TMP", "parent": 2, "children": [5, 10]},
"5": {"type": "method_parameters", "parent": 4, "children": [6, 7]},
"6": {"type": "LeftParenOp", "parent": 5, "children": ["(", ["(", "<pad>", "<pad>", "<pad>", "<pad>"]]},
"7": {"type": "TMP", "parent": 5, "children": [8, 9]}, "8": {"type": "identifier", "parent": 7,
"children": ["set_attributes",
["set", "attributes", "<pad>",
"<pad>", "<pad>"]]},
"9": {"type": "LeftParenOp", "parent": 7, "children": [")", [")", "<pad>", "<pad>", "<pad>", "<pad>"]]},
"10": {"type": "TMP", "parent": 4, "children": [11, 26]},
"11": {"type": "assignment", "parent": 10, "children": [12, 13]},
"12": {"type": "identifier", "parent": 11,
"children": ["old_attributes", ["old", "attributes", "<pad>", "<pad>", "<pad>"]]},
"13": {"type": "TMP", "parent": 11, "children": [14, 15]},
"14": {"type": "AsgnOp", "parent": 13, "children": ["=", ["=", "<pad>", "<pad>", "<pad>", "<pad>"]]},
"15": {"type": "method_call", "parent": 13, "children": [16, 17]},
"16": {"type": "identifier", "parent": 15,
"children": ["compute_attributes", ["compute", "attributes", "<pad>", "<pad>", "<pad>"]]},
"17": {"type": "argument_list", "parent": 15, "children": [18, 19]},
"18": {"type": "LeftParenOp", "parent": 17,
"children": ["(", ["(", "<pad>", "<pad>", "<pad>", "<pad>"]]},
"19": {"type": "TMP", "parent": 17, "children": [20, 25]},
"20": {"type": "call", "parent": 19, "children": [21, 22]},
"21": {"type": "identifier", "parent": 20,
"children": ["set_attributes", ["set", "attributes", "<pad>", "<pad>", "<pad>"]]},
"22": {"type": "TMP", "parent": 20, "children": [23, 24]},
"23": {"type": "DotOp", "parent": 22, "children": [".", [".", "<pad>", "<pad>", "<pad>", "<pad>"]]},
"24": {"type": "identifier", "parent": 22,
"children": ["keys", ["keys", "<pad>", "<pad>", "<pad>", "<pad>"]]},
"25": {"type": "LeftParenOp", "parent": 19,
"children": [")", [")", "<pad>", "<pad>", "<pad>", "<pad>"]]},
"26": {"type": "TMP", "parent": 10, "children": [27, 34]},
"27": {"type": "method_call", "parent": 26, "children": [28, 29]},
"28": {"type": "identifier", "parent": 27,
"children": ["assign_attributes", ["assign", "attributes", "<pad>", "<pad>", "<pad>"]]},
"29": {"type": "argument_list", "parent": 27, "children": [30, 31]},
"30": {"type": "LeftParenOp", "parent": 29,
"children": ["(", ["(", "<pad>", "<pad>", "<pad>", "<pad>"]]},
"31": {"type": "TMP", "parent": 29, "children": [32, 33]},
"32": {"type": "identifier", "parent": 31,
"children": ["set_attributes", ["set", "attributes", "<pad>", "<pad>", "<pad>"]]},
"33": {"type": "LeftParenOp", "parent": 31,
"children": [")", [")", "<pad>", "<pad>", "<pad>", "<pad>"]]},
"34": {"type": "TMP", "parent": 26, "children": [35, 36]},
"35": {"type": "yield_keyword", "parent": 34,
"children": ["yield", ["yield", "<pad>", "<pad>", "<pad>", "<pad>"]]},
"36": {"type": "TMP", "parent": 34, "children": [37, 46]},
"37": {"type": "ensure", "parent": 36, "children": [38, 39]},
"38": {"type": "ensure_keyword", "parent": 37,
"children": ["ensure", ["ensure", "<pad>", "<pad>", "<pad>", "<pad>"]]},
"39": {"type": "method_call", "parent": 37, "children": [40, 41]},
"40": {"type": "identifier", "parent": 39,
"children": ["assign_attributes", ["assign", "attributes", "<pad>", "<pad>", "<pad>"]]},
"41": {"type": "argument_list", "parent": 39, "children": [42, 43]},
"42": {"type": "LeftParenOp", "parent": 41,
"children": ["(", ["(", "<pad>", "<pad>", "<pad>", "<pad>"]]},
"43": {"type": "TMP", "parent": 41, "children": [44, 45]}, "44": {"type": "identifier", "parent": 43,
"children": ["old_attributes",
["old", "attributes",
"<pad>", "<pad>",
"<pad>"]]},
"45": {"type": "LeftParenOp", "parent": 43,
"children": [")", [")", "<pad>", "<pad>", "<pad>", "<pad>"]]},
"46": {"type": "end_keyword", "parent": 36,
"children": ["end", ["end", "<pad>", "<pad>", "<pad>", "<pad>"]]}}
nx2dgl_graph = build_graph(bin_ast, dict)
dgl_graph = tree2dgl(bin_ast, dict)
dgl_graph
|
from ._SpeechDataset import SpeechDataset, SpeechDatasetQuerySet
|
#!/usr/bin/env python
# coding=utf8
from flask import Blueprint
running = Blueprint('running', __name__)
from snapshot import *
from log import *
|
from rest_framework import serializers
from ..models import Tweet
from accounts.api.serializers import UserModelSerializer
from django.utils.timesince import timesince
class ParentTweetModelSerializer(serializers.ModelSerializer):
user = UserModelSerializer(read_only=True)
date_display = serializers.SerializerMethodField()
timesince = serializers.SerializerMethodField()
likes = serializers.SerializerMethodField()
didlike = serializers.SerializerMethodField()
class Meta:
model = Tweet
fields = [
'id',
'user',
'content',
'timestamp',
'date_display',
'timesince',
'likes',
'didlike',
]
def get_likes(self ,obj):
return obj.liked.all().count()
def get_didlike(self,obj):
try:
request = self.context.get('request').user
user = request.user
if user in obj.liked.all():
return True
except:
pass
return False
def get_date_display(self , obj):
return obj.timestamp.strftime("%b %d, %Y | at %I:%M %p")
def get_timesince(self , obj):
return timesince(obj.timestamp) + " ago"
class TweetModelSerializer(serializers.ModelSerializer):
parent_id = serializers.CharField(required=False)
user = UserModelSerializer(read_only=True)
date_display = serializers.SerializerMethodField()
timesince = serializers.SerializerMethodField()
is_retweet = serializers.SerializerMethodField()
parent = ParentTweetModelSerializer(read_only=True)
likes = serializers.SerializerMethodField()
didlike = serializers.SerializerMethodField()
class Meta:
model = Tweet
fields = [
'parent_id',
'id',
'user',
'content',
'timestamp',
'date_display',
'timesince',
'is_retweet',
'parent',
'likes',
'didlike',
'reply',
]
#read_only_fields =['reply']
def get_likes(self ,obj):
return obj.liked.all().count()
def get_didlike(self,obj):
try:
user = self.context.get('request').user
if user in obj.liked.all():
return True
except:
pass
return False
def get_date_display(self , obj):
return obj.timestamp.strftime("%b %d, %Y | at %I:%M %p")
def get_timesince(self , obj):
return timesince(obj.timestamp) + " ago"
def get_is_retweet(self , obj):
if obj.parent:
return True
return False
|
#!/usr/bin/env python3
# SPDX-License-Identifier: Apache-2.0
# Copyright (c) 2020 Intel Corporation
""" ETCD Access Management Tool """
import argparse
import logging
import os
import sys
import eis_integ
def parse_arguments(_cli_args):
""" Parse argument passed to function """
parser = argparse.ArgumentParser(description="ETCD Access Management Tool"
" - add and remove a new EIS application user to the"
" ETCD database and grant it proper privileges")
group = parser.add_mutually_exclusive_group(required=True)
group.add_argument("-a", "--auth", help="Enable authentication for etcd."
" Create root user with root role."
" Enter the new password for the root user.")
group.add_argument("-c", "--create", help="Create etcd user and role for given application."
" Enter the application name.")
group.add_argument("-r", "--remove", help="Remove the given user from the etcd.")
return parser.parse_args()
def main(args):
""" Create, remove user or enable authentication for etcd - depending on the input argument """
eis_integ.init_logger()
os.environ["ETCDCTL_ENDPOINTS"] = "https://" + eis_integ.extract_etcd_endpoint()
eis_integ.check_path_variable("ETCDCTL_CACERT", os.environ.get("ETCDCTL_CACERT"))
eis_integ.check_path_variable("ETCDCTL_CERT", os.environ.get("ETCDCTL_CERT"))
eis_integ.check_path_variable("ETCDCTL_KEY", os.environ.get("ETCDCTL_KEY"))
if args.auth:
print("Enable authentication for etcd. Create root user with root role.")
eis_integ.enable_etcd_auth(args.auth)
elif args.create:
print("Create etcd {} user and role for application.".format(args.create))
eis_integ.create_etcd_users(args.create)
elif args.remove:
print("Remove the {} role and user from the etcd.".format(args.remove))
eis_integ.remove_user_privilege(args.remove)
eis_integ.remove_user(args.remove)
return eis_integ.CODES.NO_ERROR
if __name__ == '__main__':
try:
sys.exit(main(parse_arguments(sys.argv[1:])).value)
except eis_integ.EisIntegError as exception:
logging.error("Error while doing operations on ETCD users: %s", exception)
sys.exit(exception.code.value)
|
from pytest import fixture
from novel_tools.common import NovelData, Type
from novel_tools.processors.transformers.pattern_transformer import PatternTransformer
@fixture
def pattern_transformer():
return PatternTransformer({
'units': [
{
'filter': {
'type': 'chapter_content'
},
'subs': [
{
'pattern': '\\*{3,}',
'new': '---'
},
{
'pattern': '“(.+)”',
'new': '"{0}"'
}
]
},
{
'filter': {
'type': 'volume_intro'
},
'subs': [
{
'pattern': '◇',
'new': '*'
}
]
}
]
})
def test_pattern(pattern_transformer: PatternTransformer):
before = NovelData('******', Type.CHAPTER_CONTENT)
after = pattern_transformer.process(before)
assert after == NovelData('---', Type.CHAPTER_CONTENT)
before = NovelData('“We want universal quotation marks.”', Type.CHAPTER_CONTENT)
after = pattern_transformer.process(before)
assert after == NovelData('"We want universal quotation marks."', Type.CHAPTER_CONTENT)
before = NovelData('◇This needs to be highlighted◇', Type.VOLUME_INTRO)
after = pattern_transformer.process(before)
assert after == NovelData('*This needs to be highlighted*', Type.VOLUME_INTRO)
|
import json
import logging
import pathlib
import shutil
import tempfile
from typing import List
import marshmallow_dataclass
from defusedxml.ElementTree import parse
from osgeo import gdal
from osgeo import ogr
from osgeo import osr
from .util_numba import _accumulate_dicts
logger = logging.getLogger(__name__)
@marshmallow_dataclass.dataclass
class ImageInfo:
x_size: int
y_size: int
x_block_size: int
y_block_size: int
pixel_width: float
pixel_height: float
def get_n_blocks(self):
return (
len([*range(0, self.x_size, self.x_block_size)]) *
len([*range(0, self.y_size, self.y_block_size)])
)
def get_image_info(path: pathlib.Path):
ds = gdal.Open(str(path))
gt = ds.GetGeoTransform()
band = ds.GetRasterBand(1)
block_sizes = band.GetBlockSize()
return ImageInfo(
band.XSize, band.YSize, block_sizes[0], block_sizes[1], gt[1], gt[5]
)
def setup_output_image(
in_file: pathlib.Path, out_file: pathlib.Path, n_bands: int,
image_info: ImageInfo
):
# Need two output bands for each four year period, plus one for the JRC
# layer
# Setup output file for max drought and population counts
driver = gdal.GetDriverByName("GTiff")
dst_ds = driver.Create(
str(out_file),
image_info.x_size,
image_info.y_size,
n_bands,
gdal.GDT_Int16,
options=['COMPRESS=LZW']
)
src_ds = gdal.Open(str(in_file))
src_gt = src_ds.GetGeoTransform()
dst_ds.SetGeoTransform(src_gt)
dst_srs = osr.SpatialReference()
dst_srs.ImportFromWkt(src_ds.GetProjectionRef())
dst_ds.SetProjection(dst_srs.ExportToWkt())
return dst_ds
def get_sourcefiles_in_vrt(vrt):
vrt_tree = parse(vrt)
vrt_root = vrt_tree.getroot()
filenames = []
for band in vrt_root.findall('VRTRasterBand'):
sources = band.findall('SimpleSource')
for source in sources:
filenames.append(source.find('SourceFilename').text)
return list(set(filenames))
def combine_all_bands_into_vrt(
in_files: List[pathlib.Path],
out_file: pathlib.Path,
is_relative=True,
aws_access_key_id=None,
aws_secret_access_key=None
):
'''creates a vrt file combining all bands of in_files
All bands must have the same extent, resolution, and crs
'''
if aws_access_key_id is not None:
gdal.SetConfigOption("AWS_ACCESS_KEY_ID", aws_access_key_id)
if aws_secret_access_key is not None:
gdal.SetConfigOption("AWS_SECRET_ACCESS_KEY", aws_secret_access_key)
simple_source_raw = ''' <SimpleSource>
<SourceFilename relativeToVRT="{is_relative}">{source_path}</SourceFilename>
<SourceBand>{source_band_num}</SourceBand>
<SrcRect xOff="0" yOff="0" xSize="{out_Xsize}" ySize="{out_Ysize}"/>
<DstRect xOff="0" yOff="0" xSize="{out_Xsize}" ySize="{out_Ysize}"/>
</SimpleSource>'''
for file_num, in_file in enumerate(in_files):
in_ds = gdal.Open(str(in_file))
this_gt = in_ds.GetGeoTransform()
this_proj = in_ds.GetProjectionRef()
if file_num == 0:
out_gt = this_gt
out_proj = this_proj
else:
assert [round(x, 8) for x in out_gt] == [
round(x, 8) for x in this_gt
], (
f"base file ({in_files[0]}) geotransform ({out_gt}) doesn't match "
f"geotransform in {in_file} ({this_gt})"
)
assert out_proj == this_proj
for band_num in range(1, in_ds.RasterCount + 1):
this_dt = in_ds.GetRasterBand(band_num).DataType
this_band = in_ds.GetRasterBand(band_num)
this_Xsize = this_band.XSize
this_Ysize = this_band.YSize
if band_num == 1:
out_Xsize = this_Xsize
out_Ysize = this_Ysize
if file_num == 0:
# If this is the first band of the first file, need to
# create the output VRT file
driver = gdal.GetDriverByName("VRT")
out_ds = driver.Create(
str(out_file),
out_Xsize,
out_Ysize,
0,
)
out_ds.SetGeoTransform(out_gt)
out_srs = osr.SpatialReference()
out_srs.ImportFromWkt(out_proj)
out_ds.SetProjection(out_srs.ExportToWkt())
if file_num > 1:
assert this_Xsize == out_Xsize
assert this_Ysize == out_Ysize
out_ds.AddBand(this_dt)
# The new band will always be last band in out_ds
band = out_ds.GetRasterBand(out_ds.RasterCount)
md = {}
md['source_0'] = simple_source_raw.format(
is_relative=1 if is_relative else 0,
source_path=in_file,
source_band_num=band_num,
out_Xsize=out_Xsize,
out_Ysize=out_Ysize
)
band.SetMetadata(md, 'vrt_sources')
out_ds = None
# Use a regex to remove the parent elements from the paths for each band
# (have to include them when setting metadata or else GDAL throws an error)
fh, new_file = tempfile.mkstemp()
new_file = pathlib.Path(new_file)
with new_file.open('w') as fh_new:
with out_file.open() as fh_old:
for line in fh_old:
fh_new.write(line.replace(str(out_file.parents[0]) + '/', ''))
out_file.unlink()
shutil.copy(str(new_file), str(out_file))
return True
def save_vrt(source_path: pathlib.Path, source_band_index: int) -> str:
temporary_file = tempfile.NamedTemporaryFile(suffix=".vrt", delete=False)
temporary_file.close()
gdal.BuildVRT(
temporary_file.name, str(source_path), bandList=[source_band_index]
)
return temporary_file.name
def wkt_geom_to_geojson_file_string(wkt):
out_file = tempfile.NamedTemporaryFile(suffix='.geojson').name
out_ds = ogr.GetDriverByName('GeoJSON').CreateDataSource(out_file)
out_layer = out_ds.CreateLayer('wkt_geom', geom_type=ogr.wkbPolygon)
feature_def = out_layer.GetLayerDefn()
out_feat = ogr.Feature(feature_def)
out_feat.SetGeometry(ogr.CreateGeometryFromWkt(wkt))
out_layer.CreateFeature(out_feat)
out_layer = None
out_ds = None
with open(out_file, 'r') as f:
return json.load(f)
def accumulate_dicts(z):
# allow to handle items that may be None (comes up with
# lc_trans_prod_bizonal for example, when initial year of cover is not
# available to match with productivity data)
z = [item for item in z if (item is not None and item is not {})]
if len(z) == 0:
return {}
elif len(z) == 1:
return z[0]
else:
return _accumulate_dicts(z)
def log_progress(fraction, message=None, data=None):
logger.info('%s - %.2f%%', message, 100 * fraction)
|
#!python
# -*- coding: utf-8 -*-
"""Example of qdarkstyle use for Python and Qt applications.
This module a main window with every item that could be created with
Qt Design (common ones) in the basic states (enabled/disabled), and
(checked/unchecked) for those who has this attribute.
Requirements:
- Python 2 or Python 3
- PyQt4 or PyQt5 or PySide or PySide2
- QtPy or PyQtGraph (if choosen)
To run this example using PyQt4, simple do
.. code-block:: python
python example.py
or
.. code-block:: python
python example.py --qt_from=pyqt
Other options for qt_from are: pyqt5, pyside, qtpy and pyqtgraph.
You also can run the example without dark theme (no_dark), to check for
problems.
.. code-block:: python
python example.py --qt_from=pyqt --no_dark
.. note.. :: qdarkstyle does not have to be installed to run the example.
"""
# Standard library imports
import argparse
import logging
import os
import sys
# make the example runnable without the need to install
from os.path import abspath, dirname
sys.path.insert(0, abspath(dirname(abspath(__file__)) + '/..'))
# must be in this place, after setting path, to not need to install
import qdarkstyle
from qdarkstyle import QT_BINDING, QT_ABSTRACTION
# Constants
EXAMPLE_PATH = os.path.abspath(os.path.dirname(__file__))
REPO_PATH = os.path.dirname(EXAMPLE_PATH)
SCREENSHOTS_PATH = os.path.join(REPO_PATH, 'screenshots')
def main():
"""Execute QDarkStyle example."""
parser = argparse.ArgumentParser(description=__doc__,
formatter_class=argparse.RawDescriptionHelpFormatter)
parser.add_argument('--qt_from', default='qtpy',
choices=['pyqt', 'pyqt5', 'pyside','pyside2', 'qtpy', 'pyqtgraph'],
help="Choose which wrapper/framework is to be used to run the example.", type=str)
parser.add_argument('--no_dark', action='store_true',
help="Exihibts the original window (without qdarkstyle).")
parser.add_argument('--test', action='store_true',
help="Auto close window after 2s.")
parser.add_argument('--reset', action='store_true',
help="Reset GUI settings (position, size).")
parser.add_argument('--screenshots', action='store_true',
help="Generate screenshots.")
# parsing arguments from command line
args = parser.parse_args()
# set log for debug
logging.basicConfig(level=logging.DEBUG)
# to avoid problems when testing without screen
if args.test:
os.environ['QT_QPA_PLATFORM']='offscreen'
if args.qt_from == 'pyside':
# using PySide wrapper
from PySide.QtGui import QApplication, QMainWindow, QDockWidget, QStatusBar, QLabel, QPushButton
from PySide.QtCore import QTimer, Qt, QSettings, QByteArray, QPoint, QSize
# import examples UI according to wrapper
from ui.mw_menus_pyside_ui import Ui_MainWindow as ui_main
from ui.dw_buttons_pyside_ui import Ui_DockWidget as ui_buttons
from ui.dw_displays_pyside_ui import Ui_DockWidget as ui_displays
from ui.dw_inputs_fields_pyside_ui import Ui_DockWidget as ui_inputs_fields
from ui.dw_inputs_no_fields_pyside_ui import Ui_DockWidget as ui_inputs_no_fields
from ui.dw_widgets_pyside_ui import Ui_DockWidget as ui_widgets
from ui.dw_views_pyside_ui import Ui_DockWidget as ui_views
from ui.dw_containers_tabs_pyside_ui import Ui_DockWidget as ui_containers_tabs
from ui.dw_containers_no_tabs_pyside_ui import Ui_DockWidget as ui_containers_no_tabs
# Getting style
style_method = qdarkstyle.load_stylesheet_pyside
elif args.qt_from == 'pyqt':
# using PyQt4 wrapper
from PyQt4.QtGui import QApplication, QMainWindow, QDockWidget, QStatusBar, QLabel, QPushButton
from PyQt4.QtCore import QTimer, Qt, QSettings, QByteArray, QPoint, QSize
# import examples UI according to wrapper
from ui.mw_menus_pyqt_ui import Ui_MainWindow as ui_main
from ui.dw_buttons_pyqt_ui import Ui_DockWidget as ui_buttons
from ui.dw_displays_pyqt_ui import Ui_DockWidget as ui_displays
from ui.dw_inputs_fields_pyqt_ui import Ui_DockWidget as ui_inputs_fields
from ui.dw_inputs_no_fields_pyqt_ui import Ui_DockWidget as ui_inputs_no_fields
from ui.dw_widgets_pyqt_ui import Ui_DockWidget as ui_widgets
from ui.dw_views_pyqt_ui import Ui_DockWidget as ui_views
from ui.dw_containers_tabs_pyqt_ui import Ui_DockWidget as ui_containers_tabs
from ui.dw_containers_no_tabs_pyqt_ui import Ui_DockWidget as ui_containers_no_tabs
# Getting style
style_method = qdarkstyle.load_stylesheet_pyqt
elif args.qt_from == 'pyqt5':
# using PyQt5 wrapper
from PyQt5.QtWidgets import QApplication, QMainWindow, QDockWidget, QStatusBar, QLabel, QPushButton
from PyQt5.QtCore import QTimer, Qt, QSettings, QByteArray, QPoint, QSize
# import examples UI according to wrapper
from ui.mw_menus_pyqt5_ui import Ui_MainWindow as ui_main
from ui.dw_buttons_pyqt5_ui import Ui_DockWidget as ui_buttons
from ui.dw_displays_pyqt5_ui import Ui_DockWidget as ui_displays
from ui.dw_inputs_fields_pyqt5_ui import Ui_DockWidget as ui_inputs_fields
from ui.dw_inputs_no_fields_pyqt5_ui import Ui_DockWidget as ui_inputs_no_fields
from ui.dw_widgets_pyqt5_ui import Ui_DockWidget as ui_widgets
from ui.dw_views_pyqt5_ui import Ui_DockWidget as ui_views
from ui.dw_containers_tabs_pyqt5_ui import Ui_DockWidget as ui_containers_tabs
from ui.dw_containers_no_tabs_pyqt5_ui import Ui_DockWidget as ui_containers_no_tabs
# Getting style
style_method = qdarkstyle.load_stylesheet_pyqt5
elif args.qt_from == 'pyside2':
# using PyQt5 wrapper
from PySide2.QtWidgets import QApplication, QMainWindow, QDockWidget, QStatusBar, QLabel, QPushButton
from PySide2.QtCore import QTimer, Qt, QSettings, QByteArray, QPoint, QSize
# import examples UI according to wrapper
from ui.mw_menus_pyside2_ui import Ui_MainWindow as ui_main
from ui.dw_buttons_pyside2_ui import Ui_DockWidget as ui_buttons
from ui.dw_displays_pyside2_ui import Ui_DockWidget as ui_displays
from ui.dw_inputs_fields_pyside2_ui import Ui_DockWidget as ui_inputs_fields
from ui.dw_inputs_no_fields_pyside2_ui import Ui_DockWidget as ui_inputs_no_fields
from ui.dw_widgets_pyside2_ui import Ui_DockWidget as ui_widgets
from ui.dw_views_pyside2_ui import Ui_DockWidget as ui_views
from ui.dw_containers_tabs_pyside2_ui import Ui_DockWidget as ui_containers_tabs
from ui.dw_containers_no_tabs_pyside2_ui import Ui_DockWidget as ui_containers_no_tabs
# Getting style
style_method = qdarkstyle.load_stylesheet_pyside2
elif args.qt_from == 'qtpy':
# using QtPy API
from qtpy.QtWidgets import QApplication, QMainWindow, QDockWidget, QStatusBar, QLabel, QPushButton
from qtpy.QtCore import QTimer, Qt, QSettings, QByteArray, QPoint, QSize
# import examples UI according to wrapper
from ui.mw_menus_qtpy_ui import Ui_MainWindow as ui_main
from ui.dw_buttons_qtpy_ui import Ui_DockWidget as ui_buttons
from ui.dw_displays_qtpy_ui import Ui_DockWidget as ui_displays
from ui.dw_inputs_fields_qtpy_ui import Ui_DockWidget as ui_inputs_fields
from ui.dw_inputs_no_fields_qtpy_ui import Ui_DockWidget as ui_inputs_no_fields
from ui.dw_widgets_qtpy_ui import Ui_DockWidget as ui_widgets
from ui.dw_views_qtpy_ui import Ui_DockWidget as ui_views
from ui.dw_containers_tabs_qtpy_ui import Ui_DockWidget as ui_containers_tabs
from ui.dw_containers_no_tabs_qtpy_ui import Ui_DockWidget as ui_containers_no_tabs
# Getting style
style_method = qdarkstyle.load_stylesheet_from_environment
elif args.qt_from == 'pyqtgraph':
# using PyQtGraph API
from pyqtgraph.Qt.QtGui import QApplication, QMainWindow, QDockWidget, QStatusBar, QLabel, QPushButton
from pyqtgraph.Qt.QtCore import QTimer, Qt, QSettings, QByteArray, QPoint, QSize
#from pyqtgraph.Qt import QtGui, QtCore
# import examples UI according to wrapper
from ui.mw_menus_pyqtgraph_ui import Ui_MainWindow as ui_main
from ui.dw_buttons_pyqtgraph_ui import Ui_DockWidget as ui_buttons
from ui.dw_displays_pyqtgraph_ui import Ui_DockWidget as ui_displays
from ui.dw_inputs_fields_pyqtgraph_ui import Ui_DockWidget as ui_inputs_fields
from ui.dw_inputs_no_fields_pyqtgraph_ui import Ui_DockWidget as ui_inputs_no_fields
from ui.dw_widgets_pyqtgraph_ui import Ui_DockWidget as ui_widgets
from ui.dw_views_pyqtgraph_ui import Ui_DockWidget as ui_views
from ui.dw_containers_tabs_pyqtgraph_ui import Ui_DockWidget as ui_containers_tabs
from ui.dw_containers_no_tabs_pyqtgraph_ui import Ui_DockWidget as ui_containers_no_tabs
# Getting style
style_method = lambda: qdarkstyle.load_stylesheet_from_environment(is_pyqtgraph=True)
if args.no_dark:
style = ''
def write_settings(window):
"""Get window settings and write it into a file."""
settings = QSettings('QDarkStyle', 'QDarkStyle Example')
settings.setValue('pos', window.pos())
settings.setValue('size', window.size())
settings.setValue('state', window.saveState())
def read_settings(window, reset=False):
"""Read and set window settings from a file."""
settings = QSettings('QDarkStyle', 'QDarkStyle Example')
if args.qt_from == 'pyside' or args.qt_from == 'pyside2':
pos = settings.value('pos', window.pos())
size = settings.value('size', window.size())
state = settings.value('state', window.saveState())
else:
pos = settings.value('pos', window.pos(), type='QPoint')
size = settings.value('size', window.size(), type='QSize')
state = settings.value('state', window.saveState(), type='QByteArray')
if not reset:
window.restoreState(state)
window.resize(size)
window.move(pos)
# create the application
app = QApplication(sys.argv)
app.setOrganizationName('QDarkStyle')
app.setApplicationName('QDarkStyle Example')
# setup stylesheet
style = style_method()
app.setStyleSheet(style)
# create main window
window = QMainWindow()
window.setObjectName('mainwindow')
ui = ui_main()
ui.setupUi(window)
window.setWindowTitle("QDarkStyle v." + qdarkstyle.__version__)
# create docks for buttons
dw_buttons = QDockWidget()
dw_buttons.setObjectName('buttons')
ui_buttons = ui_buttons()
ui_buttons.setupUi(dw_buttons)
window.addDockWidget(Qt.RightDockWidgetArea, dw_buttons)
# create docks for buttons
dw_displays = QDockWidget()
dw_displays.setObjectName('displays')
ui_displays = ui_displays()
ui_displays.setupUi(dw_displays)
window.addDockWidget(Qt.RightDockWidgetArea, dw_displays)
# create docks for inputs - no fields
dw_inputs_no_fields = QDockWidget()
dw_inputs_no_fields.setObjectName('inputs_no_fields')
ui_inputs_no_fields = ui_inputs_no_fields()
ui_inputs_no_fields.setupUi(dw_inputs_no_fields)
window.addDockWidget(Qt.RightDockWidgetArea, dw_inputs_no_fields)
# create docks for inputs - fields
dw_inputs_fields = QDockWidget()
dw_inputs_fields.setObjectName('_fields')
ui_inputs_fields = ui_inputs_fields()
ui_inputs_fields.setupUi(dw_inputs_fields)
window.addDockWidget(Qt.RightDockWidgetArea, dw_inputs_fields)
# create docks for widgets
dw_widgets = QDockWidget()
dw_widgets.setObjectName('widgets')
ui_widgets = ui_widgets()
ui_widgets.setupUi(dw_widgets)
window.addDockWidget(Qt.LeftDockWidgetArea, dw_widgets)
# create docks for views
dw_views = QDockWidget()
dw_views.setObjectName('views')
ui_views = ui_views()
ui_views.setupUi(dw_views)
window.addDockWidget(Qt.LeftDockWidgetArea, dw_views)
# create docks for containers - no tabs
dw_containers_no_tabs = QDockWidget()
dw_containers_no_tabs.setObjectName('containers_no_tabs')
ui_containers_no_tabs = ui_containers_no_tabs()
ui_containers_no_tabs.setupUi(dw_containers_no_tabs)
window.addDockWidget(Qt.LeftDockWidgetArea, dw_containers_no_tabs)
# create docks for containters - tabs
dw_containers_tabs = QDockWidget()
dw_containers_tabs.setObjectName('containers')
ui_containers_tabs = ui_containers_tabs()
ui_containers_tabs.setupUi(dw_containers_tabs)
window.addDockWidget(Qt.LeftDockWidgetArea, dw_containers_tabs)
# tabify right docks
window.tabifyDockWidget(dw_buttons, dw_displays)
window.tabifyDockWidget(dw_displays, dw_inputs_fields)
window.tabifyDockWidget(dw_inputs_fields, dw_inputs_no_fields)
# tabify right docks
window.tabifyDockWidget(dw_containers_no_tabs, dw_containers_tabs)
window.tabifyDockWidget(dw_containers_tabs, dw_widgets)
window.tabifyDockWidget(dw_widgets, dw_views)
# issues #9120, #9121 on Spyder
qstatusbar = QStatusBar()
qstatusbar.addWidget(QLabel('Issue Spyder #9120, #9121 - background not matching.'))
qstatusbar.addWidget(QPushButton('OK'))
window.setStatusBar(qstatusbar)
# auto quit after 2s when testing on travis-ci
if args.test:
QTimer.singleShot(2000, app.exit)
# run
read_settings(window, args.reset)
window.showMaximized()
# Save screenshots for differents displays and quit
if args.screenshots:
window.showFullScreen()
QTimer.singleShot(1000, lambda: create_screenshots(app, window, not args.no_dark))
app.exec_()
write_settings(window)
def create_screenshots(app, window, is_darkstyle):
"""Save screenshots for different application views and quit."""
from qtpy.QtCore import QCoreApplication
from qtpy.QtGui import QGuiApplication
from qtpy.QtWidgets import QDockWidget, QTabWidget
theme = 'dark' if is_darkstyle else 'normal'
print('\nCreating {} screenshots'.format(theme))
docks = window.findChildren(QDockWidget)
tabs = window.findChildren(QTabWidget)
widget_data = {
'containers_buttons.png': [
'Containers - No Tabs',
'Buttons',
],
'containers_tabs_displays.png': [
'Containers - Tabs',
'Displays',
],
'views_inputs_no_fields.png': [
'Views',
'Inputs - No Fields',
],
'widgets_inputs_fields.png': [
'Widgets',
'Inputs - Fields',
],
}
prefix = 'qdarkstyle_' if is_darkstyle else 'no_dark_'
screen = QGuiApplication.primaryScreen()
QCoreApplication.processEvents()
tab = [tab for tab in tabs if tab.count() >= 12][0]
tab.setCurrentIndex(11)
QCoreApplication.processEvents()
for fname_suffix, widgets in widget_data.items():
QCoreApplication.processEvents()
png_path = os.path.join(SCREENSHOTS_PATH, prefix + fname_suffix)
print('\t' + png_path)
for dockwidget_name in widgets:
dockwidget = [dw for dw in docks if dw.windowTitle() == dockwidget_name]
if dockwidget:
dockwidget = dockwidget[0]
dockwidget.show()
dockwidget.raise_()
QCoreApplication.processEvents()
dockwidget = None
QCoreApplication.processEvents()
pixmap = screen.grabWindow(window.winId())
img = pixmap.toImage()
img.save(png_path)
QCoreApplication.processEvents()
window.showNormal()
QCoreApplication.processEvents()
print('\n')
app.exit()
if __name__ == "__main__":
sys.exit(main())
|
"""
A state module designed to enforce load-balancing configurations for F5 Big-IP entities.
:maturity: develop
:platform: f5_bigip_11.6
"""
import salt.utils.json
# set up virtual function
def __virtual__():
"""
Only load if the bigip exec module is available in __salt__
"""
if "bigip.list_transaction" in __salt__:
return "bigip"
return (False, "bigip module could not be loaded")
def _load_result(response, ret):
"""
format the results of listing functions
"""
# were we able to connect?
if response["code"] is None:
ret["comment"] = response["content"]
# forbidden?
elif response["code"] == 401:
ret["comment"] = "401 Forbidden: Authentication required!"
# Not found?
elif response["code"] == 404:
ret["comment"] = response["content"]["message"]
# 200?
elif response["code"] == 200:
ret["result"] = True
ret["comment"] = (
"Listing Current Configuration Only. "
"Not action or changes occurred during the execution of this state."
)
ret["changes"] = response["content"]
# something bad
else:
ret["comment"] = response["content"]["message"]
return ret
def _strip_key(dictionary, keyword):
"""
look for a certain key within a dictionary and nullify ti's contents, check within nested
dictionaries and lists as well. Certain attributes such as "generation" will change even
when there were no changes made to the entity.
"""
for key, value in dictionary.items():
if key == keyword:
dictionary[key] = None
elif isinstance(value, dict):
_strip_key(value, keyword)
elif isinstance(value, list):
for item in value:
if isinstance(item, dict):
_strip_key(item, keyword)
return dictionary
def _check_for_changes(entity_type, ret, existing, modified):
"""
take an existing entity and a modified entity and check for changes.
"""
ret["result"] = True
# were there any changes? generation always changes, remove it.
if isinstance(existing, dict) and isinstance(modified, dict):
if "generation" in modified["content"].keys():
del modified["content"]["generation"]
if "generation" in existing["content"].keys():
del existing["content"]["generation"]
if modified["content"] == existing["content"]:
ret[
"comment"
] = "{entity_type} is currently enforced to the desired state. No changes made.".format(
entity_type=entity_type
)
else:
ret["comment"] = (
"{entity_type} was enforced to the desired state. Note: Only parameters specified "
"were enforced. See changes for details.".format(
entity_type=entity_type
)
)
ret["changes"]["old"] = existing["content"]
ret["changes"]["new"] = modified["content"]
else:
if modified == existing:
ret[
"comment"
] = "{entity_type} is currently enforced to the desired state. No changes made.".format(
entity_type=entity_type
)
else:
ret["comment"] = (
"{entity_type} was enforced to the desired state. Note: Only parameters specified "
"were enforced. See changes for details.".format(
entity_type=entity_type
)
)
ret["changes"]["old"] = existing
ret["changes"]["new"] = modified
return ret
def _test_output(ret, action, params):
"""
For testing just output what the state will attempt to do without actually doing it.
"""
if action == "list":
ret[
"comment"
] += "The list action will just list an entity and will make no changes.\n"
elif action == "create" or action == "add":
ret[
"comment"
] += "The create action will attempt to create an entity if it does not already exist.\n"
elif action == "delete":
ret[
"comment"
] += "The delete action will attempt to delete an existing entity if it exists.\n"
elif action == "manage":
ret["comment"] += (
"The manage action will create a new entity if it does not exist. If it does exist, it will be enforced"
"to the desired state.\n"
)
elif action == "modify":
ret[
"comment"
] += "The modify action will attempt to modify an existing entity only if it exists.\n"
ret["comment"] += "An iControl REST Request will be made using the parameters:\n"
ret["comment"] += salt.utils.json.dumps(params, indent=4)
ret["changes"] = {}
# Return ``None`` when running with ``test=true``.
ret["result"] = None
return ret
def list_node(hostname, username, password, name):
"""
A function to connect to a bigip device and list a specific node.
hostname
The host/address of the bigip device
username
The iControl REST username
password
The iControl REST password
name
The name of the node to list.
"""
ret = {"name": name, "changes": {}, "result": False, "comment": ""}
if __opts__["test"]:
return _test_output(
ret,
"list",
params={
"hostname": hostname,
"username": username,
"password": password,
"name": name,
},
)
response = __salt__["bigip.list_node"](hostname, username, password, name)
return _load_result(response, ret)
def create_node(hostname, username, password, name, address):
"""
Create a new node if it does not already exist.
hostname
The host/address of the bigip device
username
The iControl REST username
password
The iControl REST password
name
The name of the node to create
address
The address of the node
"""
ret = {"name": name, "changes": {}, "result": False, "comment": ""}
if __opts__["test"]:
return _test_output(
ret,
"create",
params={
"hostname": hostname,
"username": username,
"password": password,
"name": name,
"address": address,
},
)
# is this node currently configured?
existing = __salt__["bigip.list_node"](hostname, username, password, name)
# if it exists
if existing["code"] == 200:
ret["result"] = True
ret["comment"] = "A node by this name currently exists. No change made."
# if it doesn't exist
elif existing["code"] == 404:
response = __salt__["bigip.create_node"](
hostname, username, password, name, address
)
ret["result"] = True
ret["changes"]["old"] = {}
ret["changes"]["new"] = response["content"]
ret["comment"] = "Node was successfully created."
# else something else was returned
else:
ret = _load_result(existing, ret)
return ret
def manage_node(
hostname,
username,
password,
name,
address,
connection_limit=None,
description=None,
dynamic_ratio=None,
logging=None,
monitor=None,
rate_limit=None,
ratio=None,
session=None,
node_state=None,
):
"""
Manages a node of a given bigip device. If the node does not exist it will be created, otherwise,
only the properties which are different than the existing will be updated.
hostname
The host/address of the bigip device
username
The iControl REST username
password
The iControl REST password
name
The name of the node to manage.
address
The address of the node
connection_limit
[integer]
description
[string]
dynam
c_ratio: [integer]
logging
[enabled | disabled]
monitor
[[name] | none | default]
rate_limit
[integer]
ratio
[integer]
session
[user-enabled | user-disabled]
node_state (state)
[user-down | user-up ]
"""
ret = {"name": name, "changes": {}, "result": False, "comment": ""}
if __opts__["test"]:
return _test_output(
ret,
"manage",
params={
"hostname": hostname,
"username": username,
"password": password,
"name": name,
"address": address,
"connection_limit": connection_limit,
"description": description,
"dynamic_ratio": dynamic_ratio,
"logging": logging,
"monitor": monitor,
"rate_limit": rate_limit,
"ratio": ratio,
"session": session,
"state:": node_state,
},
)
# is this node currently configured?
existing = __salt__["bigip.list_node"](hostname, username, password, name)
# if it exists by name
if existing["code"] == 200:
# ensure the address is the same, we don't want to modify a different node than what
# we think we are managing
if existing["content"]["address"] != address:
ret["result"] = False
ret[
"comment"
] = "A node with this name exists but the address does not match."
modified = __salt__["bigip.modify_node"](
hostname=hostname,
username=username,
password=password,
name=name,
connection_limit=connection_limit,
description=description,
dynamic_ratio=dynamic_ratio,
logging=logging,
monitor=monitor,
rate_limit=rate_limit,
ratio=ratio,
session=session,
state=node_state,
)
# was the modification successful?
if modified["code"] == 200:
ret = _check_for_changes("Node", ret, existing, modified)
else:
ret = _load_result(modified, ret)
# not found, attempt to create it
elif existing["code"] == 404:
new = __salt__["bigip.create_node"](hostname, username, password, name, address)
# were we able to create it?
if new["code"] == 200:
# try modification
modified = __salt__["bigip.modify_node"](
hostname=hostname,
username=username,
password=password,
name=name,
connection_limit=connection_limit,
description=description,
dynamic_ratio=dynamic_ratio,
logging=logging,
monitor=monitor,
rate_limit=rate_limit,
ratio=ratio,
session=session,
state=node_state,
)
# was the modification successful?
if modified["code"] == 200:
ret["result"] = True
ret["comment"] = (
"Node was created and enforced to the desired state. Note: Only parameters specified "
"were enforced. See changes for details."
)
ret["changes"]["old"] = {}
ret["changes"]["new"] = modified["content"]
# roll it back
else:
deleted = __salt__["bigip.delete_node"](
hostname, username, password, name
)
# did we get rid of it?
if deleted["code"] == 200:
ret["comment"] = (
"Node was successfully created but an error occurred during modification. "
"The creation of the node has been rolled back. Message is as follows:\n"
"{message}".format(message=modified["content"]["message"])
)
# something bad happened
else:
ret["comment"] = (
"Node was successfully created but an error occurred during modification. "
"The creation of the node was not able to be rolled back. Message is as follows:"
"\n {message}\n{message_two}".format(
message=modified["content"]["message"],
message_two=deleted["content"]["message"],
)
)
# unable to create it
else:
ret = _load_result(new, ret)
# an error occurred
else:
ret = _load_result(existing, ret)
return ret
def modify_node(
hostname,
username,
password,
name,
connection_limit=None,
description=None,
dynamic_ratio=None,
logging=None,
monitor=None,
rate_limit=None,
ratio=None,
session=None,
node_state=None,
):
"""
Modify an existing node. Only a node which already exists will be modified and
only the parameters specified will be enforced.
hostname
The host/address of the bigip device
username
The iControl REST username
password
The iControl REST password
name
The name of the node to modify
connection_limit
[integer]
description
[string]
dynamic_ratio
[integer]
logging
[enabled | disabled]
monitor
[[name] | none | default]
rate_limit
[integer]
ratio
[integer]
session
[user-enabled | user-disabled]
node_state (state)
[user-down | user-up ]
"""
ret = {"name": name, "changes": {}, "result": False, "comment": ""}
if __opts__["test"]:
return _test_output(
ret,
"modify",
params={
"hostname": hostname,
"username": username,
"password": password,
"name": name,
"connection_limit": connection_limit,
"description": description,
"dynamic_ratio": dynamic_ratio,
"logging": logging,
"monitor": monitor,
"rate_limit": rate_limit,
"ratio": ratio,
"session": session,
"state:": node_state,
},
)
# is this node currently configured?
existing = __salt__["bigip.list_node"](hostname, username, password, name)
# if it exists by name
if existing["code"] == 200:
modified = __salt__["bigip.modify_node"](
hostname=hostname,
username=username,
password=password,
name=name,
connection_limit=connection_limit,
description=description,
dynamic_ratio=dynamic_ratio,
logging=logging,
monitor=monitor,
rate_limit=rate_limit,
ratio=ratio,
session=session,
state=node_state,
)
# was the modification successful?
if modified["code"] == 200:
ret = _check_for_changes("Node", ret, existing, modified)
else:
ret = _load_result(modified, ret)
# not found, attempt to create it
elif existing["code"] == 404:
ret["comment"] = "A node with this name was not found."
# an error occurred
else:
ret = _load_result(existing, ret)
return ret
def delete_node(hostname, username, password, name):
"""
Delete an existing node.
hostname
The host/address of the bigip device
username
The iControl REST username
password
The iControl REST password
name
The name of the node which will be deleted.
"""
ret = {"name": name, "changes": {}, "result": False, "comment": ""}
if __opts__["test"]:
return _test_output(
ret,
"delete",
params={
"hostname": hostname,
"username": username,
"password": password,
"name": name,
},
)
# is this node currently configured?
existing = __salt__["bigip.list_node"](hostname, username, password, name)
# if it exists by name
if existing["code"] == 200:
deleted = __salt__["bigip.delete_node"](hostname, username, password, name)
# did we get rid of it?
if deleted["code"] == 200:
ret["result"] = True
ret["comment"] = "Node was successfully deleted."
ret["changes"]["old"] = existing["content"]
ret["changes"]["new"] = {}
# something bad happened
else:
ret = _load_result(existing, ret)
# not found
elif existing["code"] == 404:
ret["result"] = True
ret["comment"] = "This node already does not exist. No changes made."
ret["changes"]["old"] = {}
ret["changes"]["new"] = {}
else:
ret = _load_result(existing, ret)
return ret
def list_pool(hostname, username, password, name):
"""
A function to connect to a bigip device and list a specific pool.
hostname
The host/address of the bigip device
username
The iControl REST username
password
The iControl REST password
name
The name of the pool to list.
"""
ret = {"name": name, "changes": {}, "result": False, "comment": ""}
if __opts__["test"]:
return _test_output(
ret,
"list",
params={
"hostname": hostname,
"username": username,
"password": password,
"name": name,
},
)
response = __salt__["bigip.list_pool"](hostname, username, password, name)
return _load_result(response, ret)
def create_pool(
hostname,
username,
password,
name,
members=None,
allow_nat=None,
allow_snat=None,
description=None,
gateway_failsafe_device=None,
ignore_persisted_weight=None,
ip_tos_to_client=None,
ip_tos_to_server=None,
link_qos_to_client=None,
link_qos_to_server=None,
load_balancing_mode=None,
min_active_members=None,
min_up_members=None,
min_up_members_action=None,
min_up_members_checking=None,
monitor=None,
profiles=None,
queue_depth_limit=None,
queue_on_connection_limit=None,
queue_time_limit=None,
reselect_tries=None,
service_down_action=None,
slow_ramp_time=None,
):
"""
Create a new node if it does not already exist.
hostname
The host/address of the bigip device
username
The iControl REST username
password
The iControl REST password
name
The name of the pool to create
members
List of members to be added to the pool
allow_nat
[yes | no]
allow_snat
[yes | no]
description
[string]
gateway_failsafe_device
[string]
ignore_persisted_weight
[enabled | disabled]
ip_tos_to_client
[pass-through | [integer]]
ip_tos_to_server
[pass-through | [integer]]
link_qos_to_client
[pass-through | [integer]]
link_qos_to_server
[pass-through | [integer]]
load_balancing_mode
[dynamic-ratio-member | dynamic-ratio-node |
fastest-app-response | fastest-node |
least-connections-members |
least-connections-node |
least-sessions |
observed-member | observed-node |
predictive-member | predictive-node |
ratio-least-connections-member |
ratio-least-connections-node |
ratio-member | ratio-node | ratio-session |
round-robin | weighted-least-connections-member |
weighted-least-connections-node]
min_active_members
[integer]
min_up_members
[integer]
min_up_members_action
[failover | reboot | restart-all]
min_up_members_checking
[enabled | disabled]
monitor
[name]
profiles
[none | profile_name]
queue_depth_limit
[integer]
queue_on_connection_limit
[enabled | disabled]
queue_time_limit
[integer]
reselect_tries
[integer]
service_down_action
[drop | none | reselect | reset]
slow_ramp_time
[integer]
"""
ret = {"name": name, "changes": {}, "result": False, "comment": ""}
if __opts__["test"]:
return _test_output(
ret,
"create",
params={
"hostname": hostname,
"username": username,
"password": password,
"name": name,
"members": members,
"allow_nat": allow_nat,
"allow_snat": allow_snat,
"description": description,
"gateway_failsafe_device": gateway_failsafe_device,
"ignore_persisted_weight": ignore_persisted_weight,
"ip_tos_client:": ip_tos_to_client,
"ip_tos_server": ip_tos_to_server,
"link_qos_to_client": link_qos_to_client,
"link_qos_to_server": link_qos_to_server,
"load_balancing_mode": load_balancing_mode,
"min_active_members": min_active_members,
"min_up_members": min_up_members,
"min_up_members_checking": min_up_members_checking,
"monitor": monitor,
"profiles": profiles,
"queue_depth_limit": queue_depth_limit,
"queue_on_connection_limit": queue_on_connection_limit,
"queue_time_limit": queue_time_limit,
"reselect_tries": reselect_tries,
"service_down_action": service_down_action,
"slow_ramp_time": slow_ramp_time,
},
)
# is this pool currently configured?
existing = __salt__["bigip.list_pool"](hostname, username, password, name)
# if it exists
if existing["code"] == 200:
ret["result"] = True
ret["comment"] = "A pool by this name currently exists. No change made."
# if it doesn't exist
elif existing["code"] == 404:
response = __salt__["bigip.create_pool"](
hostname=hostname,
username=username,
password=password,
name=name,
members=members,
allow_nat=allow_nat,
allow_snat=allow_snat,
description=description,
gateway_failsafe_device=gateway_failsafe_device,
ignore_persisted_weight=ignore_persisted_weight,
ip_tos_to_client=ip_tos_to_client,
ip_tos_to_server=ip_tos_to_server,
link_qos_to_client=link_qos_to_client,
link_qos_to_server=link_qos_to_server,
load_balancing_mode=load_balancing_mode,
min_active_members=min_active_members,
min_up_members=min_up_members,
min_up_members_action=min_up_members_action,
min_up_members_checking=min_up_members_checking,
monitor=monitor,
profiles=profiles,
queue_depth_limit=queue_depth_limit,
queue_on_connection_limit=queue_on_connection_limit,
queue_time_limit=queue_time_limit,
reselect_tries=reselect_tries,
service_down_action=service_down_action,
slow_ramp_time=slow_ramp_time,
)
if response["code"] == 200:
ret["result"] = True
ret["changes"]["old"] = {}
ret["changes"]["new"] = response["content"]
ret["comment"] = "Pool was successfully created."
else:
ret = _load_result(existing, ret)
# else something else was returned
else:
ret = _load_result(existing, ret)
return ret
def manage_pool(
hostname,
username,
password,
name,
allow_nat=None,
allow_snat=None,
description=None,
gateway_failsafe_device=None,
ignore_persisted_weight=None,
ip_tos_to_client=None,
ip_tos_to_server=None,
link_qos_to_client=None,
link_qos_to_server=None,
load_balancing_mode=None,
min_active_members=None,
min_up_members=None,
min_up_members_action=None,
min_up_members_checking=None,
monitor=None,
profiles=None,
queue_depth_limit=None,
queue_on_connection_limit=None,
queue_time_limit=None,
reselect_tries=None,
service_down_action=None,
slow_ramp_time=None,
):
"""
Create a new pool if it does not already exist. Pool members are managed separately. Only the
parameters specified are enforced.
hostname
The host/address of the bigip device
username
The iControl REST username
password
The iControl REST password
name
The name of the pool to create
allow_nat
[yes | no]
allow_snat
[yes | no]
description
[string]
gateway_failsafe_device
[string]
ignore_persisted_weight
[enabled | disabled]
ip_tos_to_client
[pass-through | [integer]]
ip_tos_to_server
[pass-through | [integer]]
link_qos_to_client
[pass-through | [integer]]
link_qos_to_server
[pass-through | [integer]]
load_balancing_mode
[dynamic-ratio-member | dynamic-ratio-node |
fastest-app-response | fastest-node |
least-connections-members |
least-connections-node |
least-sessions |
observed-member | observed-node |
predictive-member | predictive-node |
ratio-least-connections-member |
ratio-least-connections-node |
ratio-member | ratio-node | ratio-session |
round-robin | weighted-least-connections-member |
weighted-least-connections-node]
min_active_members
[integer]
min_up_members
[integer]
min_up_members_action
[failover | reboot | restart-all]
min_up_members_checking
[enabled | disabled]
monitor
[name]
profiles
[none | profile_name]
queue_depth_limit
[integer]
queue_on_connection_limit
[enabled | disabled]
queue_time_limit
[integer]
reselect_tries
[integer]
service_down_action
[drop | none | reselect | reset]
slow_ramp_time
[integer]
"""
ret = {"name": name, "changes": {}, "result": False, "comment": ""}
if __opts__["test"]:
return _test_output(
ret,
"manage",
params={
"hostname": hostname,
"username": username,
"password": password,
"name": name,
"allow_nat": allow_nat,
"allow_snat": allow_snat,
"description": description,
"gateway_failsafe_device": gateway_failsafe_device,
"ignore_persisted_weight": ignore_persisted_weight,
"ip_tos_client:": ip_tos_to_client,
"ip_tos_server": ip_tos_to_server,
"link_qos_to_client": link_qos_to_client,
"link_qos_to_server": link_qos_to_server,
"load_balancing_mode": load_balancing_mode,
"min_active_members": min_active_members,
"min_up_members": min_up_members,
"min_up_members_checking": min_up_members_checking,
"monitor": monitor,
"profiles": profiles,
"queue_depth_limit": queue_depth_limit,
"queue_on_connection_limit": queue_on_connection_limit,
"queue_time_limit": queue_time_limit,
"reselect_tries": reselect_tries,
"service_down_action": service_down_action,
"slow_ramp_time": slow_ramp_time,
},
)
# is this pool currently configured?
existing = __salt__["bigip.list_pool"](hostname, username, password, name)
# if it exists
if existing["code"] == 200:
modified = __salt__["bigip.modify_pool"](
hostname=hostname,
username=username,
password=password,
name=name,
allow_nat=allow_nat,
allow_snat=allow_snat,
description=description,
gateway_failsafe_device=gateway_failsafe_device,
ignore_persisted_weight=ignore_persisted_weight,
ip_tos_to_client=ip_tos_to_client,
ip_tos_to_server=ip_tos_to_server,
link_qos_to_client=link_qos_to_client,
link_qos_to_server=link_qos_to_server,
load_balancing_mode=load_balancing_mode,
min_active_members=min_active_members,
min_up_members=min_up_members,
min_up_members_action=min_up_members_action,
min_up_members_checking=min_up_members_checking,
monitor=monitor,
profiles=profiles,
queue_depth_limit=queue_depth_limit,
queue_on_connection_limit=queue_on_connection_limit,
queue_time_limit=queue_time_limit,
reselect_tries=reselect_tries,
service_down_action=service_down_action,
slow_ramp_time=slow_ramp_time,
)
# was the modification successful?
if modified["code"] == 200:
# remove member listings and self-links
del existing["content"]["membersReference"]
del modified["content"]["membersReference"]
del existing["content"]["selfLink"]
del modified["content"]["selfLink"]
ret = _check_for_changes("Pool", ret, existing, modified)
else:
ret = _load_result(modified, ret)
# if it doesn't exist
elif existing["code"] == 404:
new = __salt__["bigip.create_pool"](
hostname=hostname,
username=username,
password=password,
name=name,
allow_nat=allow_nat,
allow_snat=allow_snat,
description=description,
gateway_failsafe_device=gateway_failsafe_device,
ignore_persisted_weight=ignore_persisted_weight,
ip_tos_to_client=ip_tos_to_client,
ip_tos_to_server=ip_tos_to_server,
link_qos_to_client=link_qos_to_client,
link_qos_to_server=link_qos_to_server,
load_balancing_mode=load_balancing_mode,
min_active_members=min_active_members,
min_up_members=min_up_members,
min_up_members_action=min_up_members_action,
min_up_members_checking=min_up_members_checking,
monitor=monitor,
profiles=profiles,
queue_depth_limit=queue_depth_limit,
queue_on_connection_limit=queue_on_connection_limit,
queue_time_limit=queue_time_limit,
reselect_tries=reselect_tries,
service_down_action=service_down_action,
slow_ramp_time=slow_ramp_time,
)
# were we able to create it?
if new["code"] == 200:
ret["result"] = True
ret["comment"] = (
"Pool was created and enforced to the desired state. Note: Only parameters specified "
"were enforced. See changes for details."
)
ret["changes"]["old"] = {}
ret["changes"]["new"] = new["content"]
# unable to create it
else:
ret = _load_result(new, ret)
# else something else was returned
else:
ret = _load_result(existing, ret)
return ret
def modify_pool(
hostname,
username,
password,
name,
allow_nat=None,
allow_snat=None,
description=None,
gateway_failsafe_device=None,
ignore_persisted_weight=None,
ip_tos_to_client=None,
ip_tos_to_server=None,
link_qos_to_client=None,
link_qos_to_server=None,
load_balancing_mode=None,
min_active_members=None,
min_up_members=None,
min_up_members_action=None,
min_up_members_checking=None,
monitor=None,
profiles=None,
queue_depth_limit=None,
queue_on_connection_limit=None,
queue_time_limit=None,
reselect_tries=None,
service_down_action=None,
slow_ramp_time=None,
):
"""
Modify an existing pool. Pool members are managed separately. Only the
parameters specified are enforced.
hostname
The host/address of the bigip device
username
The iControl REST username
password
The iControl REST password
name
The name of the pool to create
allow_nat
[yes | no]
allow_snat
[yes | no]
description
[string]
gateway_failsafe_device
[string]
ignore_persisted_weight
[enabled | disabled]
ip_tos_to_client
[pass-through | [integer]]
ip_tos_to_server
[pass-through | [integer]]
link_qos_to_client
[pass-through | [integer]]
link_qos_to_server
[pass-through | [integer]]
load_balancing_mode
[dynamic-ratio-member | dynamic-ratio-node |
fastest-app-response | fastest-node |
least-connections-members |
least-connections-node |
least-sessions |
observed-member | observed-node |
predictive-member | predictive-node |
ratio-least-connections-member |
ratio-least-connections-node |
ratio-member | ratio-node | ratio-session |
round-robin | weighted-least-connections-member |
weighted-least-connections-node]
min_active_members
[integer]
min_up_members
[integer]
min_up_members_action
[failover | reboot | restart-all]
min_up_members_checking
[enabled | disabled]
monitor
[name]
profiles
[none | profile_name]
queue_depth_limit
[integer]
queue_on_connection_limit
[enabled | disabled]
queue_time_limit
[integer]
reselect_tries
[integer]
service_down_action
[drop | none | reselect | reset]
slow_ramp_time
[integer]
"""
ret = {"name": name, "changes": {}, "result": False, "comment": ""}
if __opts__["test"]:
return _test_output(
ret,
"modify",
params={
"hostname": hostname,
"username": username,
"password": password,
"name": name,
"allow_nat": allow_nat,
"allow_snat": allow_snat,
"description": description,
"gateway_failsafe_device": gateway_failsafe_device,
"ignore_persisted_weight": ignore_persisted_weight,
"ip_tos_client:": ip_tos_to_client,
"ip_tos_server": ip_tos_to_server,
"link_qos_to_client": link_qos_to_client,
"link_qos_to_server": link_qos_to_server,
"load_balancing_mode": load_balancing_mode,
"min_active_members": min_active_members,
"min_up_members": min_up_members,
"min_up_members_checking": min_up_members_checking,
"monitor": monitor,
"profiles": profiles,
"queue_depth_limit": queue_depth_limit,
"queue_on_connection_limit": queue_on_connection_limit,
"queue_time_limit": queue_time_limit,
"reselect_tries": reselect_tries,
"service_down_action": service_down_action,
"slow_ramp_time": slow_ramp_time,
},
)
# is this pool currently configured?
existing = __salt__["bigip.list_pool"](hostname, username, password, name)
# if it exists
if existing["code"] == 200:
modified = __salt__["bigip.modify_pool"](
hostname=hostname,
username=username,
password=password,
name=name,
allow_nat=allow_nat,
allow_snat=allow_snat,
description=description,
gateway_failsafe_device=gateway_failsafe_device,
ignore_persisted_weight=ignore_persisted_weight,
ip_tos_to_client=ip_tos_to_client,
ip_tos_to_server=ip_tos_to_server,
link_qos_to_client=link_qos_to_client,
link_qos_to_server=link_qos_to_server,
load_balancing_mode=load_balancing_mode,
min_active_members=min_active_members,
min_up_members=min_up_members,
min_up_members_action=min_up_members_action,
min_up_members_checking=min_up_members_checking,
monitor=monitor,
profiles=profiles,
queue_depth_limit=queue_depth_limit,
queue_on_connection_limit=queue_on_connection_limit,
queue_time_limit=queue_time_limit,
reselect_tries=reselect_tries,
service_down_action=service_down_action,
slow_ramp_time=slow_ramp_time,
)
# was the modification successful?
if modified["code"] == 200:
# remove member listings and self-links
del existing["content"]["membersReference"]
del modified["content"]["membersReference"]
del existing["content"]["selfLink"]
del modified["content"]["selfLink"]
ret = _check_for_changes("Pool", ret, existing, modified)
else:
ret = _load_result(modified, ret)
# if it doesn't exist
elif existing["code"] == 404:
ret["comment"] = "A pool with this name was not found."
# else something else was returned
else:
ret = _load_result(existing, ret)
return ret
def delete_pool(hostname, username, password, name):
"""
Delete an existing pool.
hostname
The host/address of the bigip device
username
The iControl REST username
password
The iControl REST password
name
The name of the pool which will be deleted
"""
ret = {"name": name, "changes": {}, "result": False, "comment": ""}
if __opts__["test"]:
return _test_output(
ret,
"delete",
params={
"hostname": hostname,
"username": username,
"password": password,
"name": name,
},
)
# is this pool currently configured?
existing = __salt__["bigip.list_pool"](hostname, username, password, name)
# if it exists by name
if existing["code"] == 200:
deleted = __salt__["bigip.delete_pool"](hostname, username, password, name)
# did we get rid of it?
if deleted["code"] == 200:
ret["result"] = True
ret["comment"] = "Pool was successfully deleted."
ret["changes"]["old"] = existing["content"]
ret["changes"]["new"] = {}
# something bad happened
else:
ret = _load_result(deleted, ret)
# not found
elif existing["code"] == 404:
ret["result"] = True
ret["comment"] = "This pool already does not exist. No changes made."
ret["changes"]["old"] = {}
ret["changes"]["new"] = {}
else:
ret = _load_result(existing, ret)
return ret
def manage_pool_members(hostname, username, password, name, members):
"""
Manage the members of an existing pool. This function replaces all current pool members.
Only the parameters specified are enforced.
hostname
The host/address of the bigip device
username
The iControl REST username
password
The iControl REST password
name
The name of the pool to modify
members
list of pool members to manage.
"""
ret = {"name": name, "changes": {}, "result": False, "comment": ""}
if __opts__["test"]:
return _test_output(
ret,
"manage",
params={
"hostname": hostname,
"username": username,
"password": password,
"name": name,
"members": members,
},
)
# is this pool currently configured?
existing = __salt__["bigip.list_pool"](hostname, username, password, name)
# if it exists
if existing["code"] == 200:
# what are the current members?
current_members = existing["content"]["membersReference"]["items"]
modified = __salt__["bigip.replace_pool_members"](
hostname, username, password, name, members
)
# was the modification successful?
if modified["code"] == 200:
# re-list the pool with new membership
new_listing = __salt__["bigip.list_pool"](
hostname, username, password, name
)
# just in case something happened...
if new_listing["code"] != 200:
ret = _load_result(new_listing, ret)
ret["comment"] = (
"modification of the pool was successful but an error occurred upon retrieving new"
" listing."
)
return ret
new_members = new_listing["content"]["membersReference"]["items"]
# remove generation keys and create new lists indexed by integers
for current_member in current_members:
del current_member["generation"]
for new_member in new_members:
del new_member["generation"]
# anything changed?
ret = _check_for_changes(
"Pool Membership", ret, current_members, new_members
)
else:
ret = _load_result(modified, ret)
# pool does not exists
elif existing["code"] == 404:
ret["comment"] = "A pool with this name was not found."
else:
ret = _load_result(existing, ret)
return ret
def add_pool_member(hostname, username, password, name, member):
"""
A function to connect to a bigip device and add a new member to an existing pool.
hostname
The host/address of the bigip device
username
The iControl REST username
password
The iControl REST password
name
The name of the pool to modify
member
The member to add to the pool
"""
ret = {"name": name, "changes": {}, "result": False, "comment": ""}
if __opts__["test"]:
return _test_output(
ret,
"add",
params={
"hostname": hostname,
"username": username,
"password": password,
"name": name,
"members": member,
},
)
# is this pool member currently configured?
existing_pool = __salt__["bigip.list_pool"](hostname, username, password, name)
if existing_pool["code"] == 200:
# for some reason iControl REST doesn't support listing a single pool member.
# the response from GET for listing a member will return 200 even if it doesn't exists.
# because of this we have to do some rather "unnecessary" searching within a pool.
# what are the current members?
current_members = existing_pool["content"]["membersReference"]["items"]
# loop through them
exists = False
for current_member in current_members:
if current_member["name"] == member["name"]:
exists = True
break
if exists:
ret["result"] = True
ret[
"comment"
] = "Member: {name} already exists within this pool. No changes made.".format(
name=member["name"]
)
ret["changes"]["old"] = {}
ret["changes"]["new"] = {}
else:
new_member = __salt__["bigip.add_pool_member"](
hostname, username, password, name, member
)
if new_member["code"] == 200:
ret["result"] = True
ret[
"comment"
] = "Member: {name} has been successfully added to the pool.".format(
name=member["name"]
)
ret["changes"]["old"] = {}
# look up the member again...
pool_listing = __salt__["bigip.list_pool"](
hostname, username, password, name
)
if pool_listing["code"] != 200:
ret = _load_result(new_member, ret)
return ret
members = pool_listing["content"]["membersReference"]["items"]
# loop through them
for current_member in members:
if current_member["name"] == member["name"]:
added_member = current_member
break
ret["changes"]["new"] = added_member
# member wasn't added
else:
ret = _load_result(new_member, ret)
# pool does not exists
elif existing_pool["code"] == 404:
ret["comment"] = "A pool with this name was not found."
else:
ret = _load_result(existing_pool, ret)
return ret
def modify_pool_member(
hostname,
username,
password,
name,
member,
connection_limit=None,
description=None,
dynamic_ratio=None,
inherit_profile=None,
logging=None,
monitor=None,
priority_group=None,
profiles=None,
rate_limit=None,
ratio=None,
session=None,
member_state=None,
):
"""
A function to connect to a bigip device and modify a member of an existing pool.
hostname
The host/address of the bigip device
username
The iControl REST username
password
The iControl REST password
name
The name of the pool to modify
member
The member modify
connection_limit
[integer]
description
[string]
dynamic_ratio
[integer]
inherit_profile
[enabled | disabled]
logging
[enabled | disabled]
monitor
[name]
priority_group
[integer]
profiles
[none | profile_name]
rate_limit
[integer]
ratio
[integer]
session
[user-enabled | user-disabled]
member_state (state)
[ user-up | user-down ]
"""
ret = {"name": name, "changes": {}, "result": False, "comment": ""}
if __opts__["test"]:
return _test_output(
ret,
"modify",
params={
"hostname": hostname,
"username": username,
"password": password,
"name": name,
"members": member,
},
)
# is this pool member currently configured?
existing_pool = __salt__["bigip.list_pool"](hostname, username, password, name)
if existing_pool["code"] == 200:
# for some reason iControl REST doesn't support listing a single pool member.
# the response from GET for listing a member will return 200 even if it doesn't exists.
# because of this we have to do some rather "unnecessary" searching within a pool.
# what are the current members?
current_members = existing_pool["content"]["membersReference"]["items"]
# loop through them
exists = False
for current_member in current_members:
if current_member["name"] == member:
exists = True
existing_member = current_member
break
if exists:
# modify the pool member
modified = __salt__["bigip.modify_pool_member"](
hostname=hostname,
username=username,
password=password,
name=name,
member=member,
connection_limit=connection_limit,
description=description,
dynamic_ratio=dynamic_ratio,
inherit_profile=inherit_profile,
logging=logging,
monitor=monitor,
priority_group=priority_group,
profiles=profiles,
rate_limit=rate_limit,
ratio=ratio,
session=session,
state=member_state,
)
# re-list the pool
new_pool = __salt__["bigip.list_pool"](hostname, username, password, name)
if modified["code"] == 200 and modified["code"] == 200:
# what are the new members?
new_members = new_pool["content"]["membersReference"]["items"]
# loop through them
for new_member in new_members:
if new_member["name"] == member:
modified_member = new_member
break
# check for changes
old = {"content": existing_member}
new = {"content": modified_member}
ret = _check_for_changes(
"Pool Member: {member}".format(member=member), ret, old, new
)
else:
ret = _load_result(modified, ret)
else:
ret[
"comment"
] = "Member: {name} does not exists within this pool. No changes made.".format(
name=member["name"]
)
# pool does not exists
elif existing_pool["code"] == 404:
ret["comment"] = "A pool with this name was not found."
else:
ret = _load_result(existing_pool, ret)
return ret
def delete_pool_member(hostname, username, password, name, member):
"""
Delete an existing pool member.
hostname
The host/address of the bigip device
username
The iControl REST username
password
The iControl REST password
name
The name of the pool to be modified
member
The name of the member to delete from the pool
"""
ret = {"name": name, "changes": {}, "result": False, "comment": ""}
if __opts__["test"]:
return _test_output(
ret,
"delete",
params={
"hostname": hostname,
"username": username,
"password": password,
"name": name,
"members": member,
},
)
# is this pool currently configured?
existing = __salt__["bigip.list_pool"](hostname, username, password, name)
# if it exists by name
if existing["code"] == 200:
# what are the current members?
current_members = existing["content"]["membersReference"]["items"]
# loop through them
exists = False
for current_member in current_members:
if current_member["name"] == member:
exists = True
existing_member = current_member
break
if exists:
deleted = __salt__["bigip.delete_pool_member"](
hostname, username, password, name, member
)
# did we get rid of it?
if deleted["code"] == 200:
ret["result"] = True
ret[
"comment"
] = "Pool Member: {member} was successfully deleted.".format(
member=member
)
ret["changes"]["old"] = existing_member
ret["changes"]["new"] = {}
# something bad happened
else:
ret["result"] = True
ret["comment"] = "This pool member already does not exist. No changes made."
ret["changes"]["old"] = {}
ret["changes"]["new"] = {}
else:
ret = _load_result(existing, ret)
return ret
def list_virtual(hostname, username, password, name):
"""
A function to list a specific virtual.
hostname
The host/address of the bigip device
username
The iControl REST username
password
The iControl REST password
name
The name of the virtual to list
"""
ret = {"name": name, "changes": {}, "result": False, "comment": ""}
if __opts__["test"]:
return _test_output(
ret,
"list",
params={
"hostname": hostname,
"username": username,
"password": password,
"name": name,
},
)
response = __salt__["bigip.list_virtual"](hostname, username, password, name)
return _load_result(response, ret)
def create_virtual(
hostname,
username,
password,
name,
destination,
pool=None,
address_status=None,
auto_lasthop=None,
bwc_policy=None,
cmp_enabled=None,
connection_limit=None,
dhcp_relay=None,
description=None,
fallback_persistence=None,
flow_eviction_policy=None,
gtm_score=None,
ip_forward=None,
ip_protocol=None,
internal=None,
twelve_forward=None,
last_hop_pool=None,
mask=None,
mirror=None,
nat64=None,
persist=None,
profiles=None,
policies=None,
rate_class=None,
rate_limit=None,
rate_limit_mode=None,
rate_limit_dst=None,
rate_limit_src=None,
rules=None,
related_rules=None,
reject=None,
source=None,
source_address_translation=None,
source_port=None,
virtual_state=None,
traffic_classes=None,
translate_address=None,
translate_port=None,
vlans=None,
):
"""
A function to connect to a bigip device and create a virtual server if it does not already exists.
hostname
The host/address of the bigip device
username
The iControl REST username
password
The iControl REST password
name
The name of the virtual to create
destination
[ [virtual_address_name:port] | [ipv4:port] | [ipv6.port] ]
pool
[ [pool_name] | none]
address_status
[yes | no]
auto_lasthop
[default | enabled | disabled ]
bwc_policy
[none] | string]
cmp_enabled
[yes | no]
dhcp_relay
[yes | no}
connection_limit
[integer]
description
[string]
state
[disabled | enabled]
fallback_persistence
[none | [profile name] ]
flow_eviction_policy
[none | [eviction policy name] ]
gtm_score
[integer]
ip_forward
[yes | no]
ip_protocol
[any | protocol]
internal
[yes | no]
twelve_forward(12-forward)
[yes | no]
last_hop-pool
[ [pool_name] | none]
mask
{ [ipv4] | [ipv6] }
mirror
{ [disabled | enabled | none] }
nat64
[enabled | disabled]
persist
[list]
profiles
[none | default | list ]
policies
[none | default | list ]
rate_class
[name]
rate_limit
[integer]
rate_limit-mode
[destination | object | object-destination |
object-source | object-source-destination |
source | source-destination]
rate_limit-dst
[integer]
rate_limit-src
[integer]
rules
[none | list ]
related_rules
[none | list ]
reject
[yes | no]
source
{ [ipv4[/prefixlen]] | [ipv6[/prefixlen]] }
source_address_translation
[none | snat:pool_name | lsn | automap | dictionary ]
source_port
[change | preserve | preserve-strict]
state
[enabled | disabled]
traffic_classes
[none | default | list ]
translate_address
[enabled | disabled]
translate_port
[enabled | disabled]
vlans
[none | default | dictionary]
vlan_ids
[ list]
enabled
[ true | false ]
"""
ret = {"name": name, "changes": {}, "result": False, "comment": ""}
if __opts__["test"]:
return _test_output(
ret,
"create",
params={
"hostname": hostname,
"username": username,
"password": password,
"name": name,
"destination": destination,
"pool": pool,
"address_status": address_status,
"auto_lasthop": auto_lasthop,
"bwc_policy": bwc_policy,
"cmp_enabled": cmp_enabled,
"connection_limit": connection_limit,
"dhcp_relay": dhcp_relay,
"description": description,
"fallback_persistence": fallback_persistence,
"flow_eviction_policy": flow_eviction_policy,
"gtm_score": gtm_score,
"ip_forward": ip_forward,
"ip_protocol": ip_protocol,
"internal": internal,
"twelve_forward": twelve_forward,
"last_hop_pool": last_hop_pool,
"mask": mask,
"mirror": mirror,
"nat64": nat64,
"persist": persist,
"profiles": profiles,
"policies": policies,
"rate_class": rate_class,
"rate_limit": rate_limit,
"rate_limit_mode": rate_limit_mode,
"rate_limit_dst": rate_limit_dst,
"rate_limit_src": rate_limit_src,
"rules": rules,
"related_rules": related_rules,
"reject": reject,
"source": source,
"source_address_translation": source_address_translation,
"source_port": source_port,
"virtual_state": virtual_state,
"traffic_classes": traffic_classes,
"translate_address": translate_address,
"translate_port": translate_port,
"vlans": vlans,
},
)
existing = __salt__["bigip.list_virtual"](hostname, username, password, name)
# does this virtual exist?
if existing["code"] == 200:
ret["result"] = True
ret["comment"] = "A virtual by this name currently exists. No change made."
elif existing["code"] == 404:
# create it
virtual = __salt__["bigip.create_virtual"](
hostname=hostname,
username=username,
password=password,
name=name,
destination=destination,
description=description,
pool=pool,
address_status=address_status,
auto_lasthop=auto_lasthop,
bwc_policy=bwc_policy,
cmp_enabled=cmp_enabled,
connection_limit=connection_limit,
dhcp_relay=dhcp_relay,
fallback_persistence=fallback_persistence,
flow_eviction_policy=flow_eviction_policy,
gtm_score=gtm_score,
ip_forward=ip_forward,
ip_protocol=ip_protocol,
internal=internal,
twelve_forward=twelve_forward,
last_hop_pool=last_hop_pool,
mask=mask,
mirror=mirror,
nat64=nat64,
persist=persist,
profiles=profiles,
policies=policies,
rate_class=rate_class,
rate_limit=rate_limit,
rate_limit_mode=rate_limit_mode,
rate_limit_dst=rate_limit_dst,
rate_limit_src=rate_limit_src,
rules=rules,
related_rules=related_rules,
reject=reject,
source=source,
source_address_translation=source_address_translation,
source_port=source_port,
state=virtual_state,
traffic_classes=traffic_classes,
translate_address=translate_address,
translate_port=translate_port,
vlans=vlans,
)
if virtual["code"] == 200:
ret["result"] = True
ret["changes"]["old"] = {}
ret["changes"]["new"] = virtual["content"]
ret["comment"] = "Virtual was successfully created."
else:
ret = _load_result(existing, ret)
# else something else was returned
else:
ret = _load_result(existing, ret)
return ret
def manage_virtual(
hostname,
username,
password,
name,
destination,
pool=None,
address_status=None,
auto_lasthop=None,
bwc_policy=None,
cmp_enabled=None,
connection_limit=None,
dhcp_relay=None,
description=None,
fallback_persistence=None,
flow_eviction_policy=None,
gtm_score=None,
ip_forward=None,
ip_protocol=None,
internal=None,
twelve_forward=None,
last_hop_pool=None,
mask=None,
mirror=None,
nat64=None,
persist=None,
profiles=None,
policies=None,
rate_class=None,
rate_limit=None,
rate_limit_mode=None,
rate_limit_dst=None,
rate_limit_src=None,
rules=None,
related_rules=None,
reject=None,
source=None,
source_address_translation=None,
source_port=None,
virtual_state=None,
traffic_classes=None,
translate_address=None,
translate_port=None,
vlans=None,
):
"""
Manage a virtual server. If a virtual does not exists it will be created, otherwise only the
parameters specified will be enforced.
hostname
The host/address of the bigip device
username
The iControl REST username
password
The iControl REST password
name
The name of the virtual to create
destination
[ [virtual_address_name:port] | [ipv4:port] | [ipv6.port] ]
pool
[ [pool_name] | none]
address_status
[yes | no]
auto_lasthop
[default | enabled | disabled ]
bwc_policy
[none] | string]
cmp_enabled
[yes | no]
dhcp_relay
[yes | no}
connection_limit
[integer]
description
[string]
state
[disabled | enabled]
fallback_persistence
[none | [profile name] ]
flow_eviction_policy
[none | [eviction policy name] ]
gtm_score
[integer]
ip_forward
[yes | no]
ip_protocol
[any | protocol]
internal
[yes | no]
twelve_forward(12-forward)
[yes | no]
last_hop-pool
[ [pool_name] | none]
mask
{ [ipv4] | [ipv6] }
mirror
{ [disabled | enabled | none] }
nat64
[enabled | disabled]
persist
[list]
profiles
[none | default | list ]
policies
[none | default | list ]
rate_class
[name]
rate_limit
[integer]
rate_limit-mode
[destination | object | object-destination |
object-source | object-source-destination |
source | source-destination]
rate_limit-dst
[integer]
rate_limit-src
[integer]
rules
[none | list ]
related_rules
[none | list ]
reject
[yes | no]
source
{ [ipv4[/prefixlen]] | [ipv6[/prefixlen]] }
source_address_translation
[none | snat:pool_name | lsn | automap | dictionary ]
source_port
[change | preserve | preserve-strict]
state
[enabled | disabled]
traffic_classes
[none | default | list ]
translate_address
[enabled | disabled]
translate_port
[enabled | disabled]
vlans
[none | default | dictionary]
vlan_ids
[ list]
enabled
[ true | false ]
"""
ret = {"name": name, "changes": {}, "result": False, "comment": ""}
if __opts__["test"]:
return _test_output(
ret,
"manage",
params={
"hostname": hostname,
"username": username,
"password": password,
"name": name,
"destination": destination,
"pool": pool,
"address_status": address_status,
"auto_lasthop": auto_lasthop,
"bwc_policy": bwc_policy,
"cmp_enabled": cmp_enabled,
"connection_limit": connection_limit,
"dhcp_relay": dhcp_relay,
"description": description,
"fallback_persistence": fallback_persistence,
"flow_eviction_policy": flow_eviction_policy,
"gtm_score": gtm_score,
"ip_forward": ip_forward,
"ip_protocol": ip_protocol,
"internal": internal,
"twelve_forward": twelve_forward,
"last_hop_pool": last_hop_pool,
"mask": mask,
"mirror": mirror,
"nat64": nat64,
"persist": persist,
"profiles": profiles,
"policies": policies,
"rate_class": rate_class,
"rate_limit": rate_limit,
"rate_limit_mode": rate_limit_mode,
"rate_limit_dst": rate_limit_dst,
"rate_limit_src": rate_limit_src,
"rules": rules,
"related_rules": related_rules,
"reject": reject,
"source": source,
"source_address_translation": source_address_translation,
"source_port": source_port,
"virtual_state": virtual_state,
"traffic_classes": traffic_classes,
"translate_address": translate_address,
"translate_port": translate_port,
"vlans": vlans,
},
)
existing = __salt__["bigip.list_virtual"](hostname, username, password, name)
# does this virtual exist?
if existing["code"] == 200:
# modify
modified = __salt__["bigip.modify_virtual"](
hostname=hostname,
username=username,
password=password,
name=name,
destination=destination,
description=description,
pool=pool,
address_status=address_status,
auto_lasthop=auto_lasthop,
bwc_policy=bwc_policy,
cmp_enabled=cmp_enabled,
connection_limit=connection_limit,
dhcp_relay=dhcp_relay,
fallback_persistence=fallback_persistence,
flow_eviction_policy=flow_eviction_policy,
gtm_score=gtm_score,
ip_forward=ip_forward,
ip_protocol=ip_protocol,
internal=internal,
twelve_forward=twelve_forward,
last_hop_pool=last_hop_pool,
mask=mask,
mirror=mirror,
nat64=nat64,
persist=persist,
profiles=profiles,
policies=policies,
rate_class=rate_class,
rate_limit=rate_limit,
rate_limit_mode=rate_limit_mode,
rate_limit_dst=rate_limit_dst,
rate_limit_src=rate_limit_src,
rules=rules,
related_rules=related_rules,
reject=reject,
source=source,
source_address_translation=source_address_translation,
source_port=source_port,
state=virtual_state,
traffic_classes=traffic_classes,
translate_address=translate_address,
translate_port=translate_port,
vlans=vlans,
)
# was the modification successful?
if modified["code"] == 200:
# relist it to compare
relisting = __salt__["bigip.list_virtual"](
hostname, username, password, name
)
if relisting["code"] == 200:
relisting = _strip_key(relisting, "generation")
existing = _strip_key(existing, "generation")
ret = _check_for_changes("Virtual", ret, existing, relisting)
else:
ret = _load_result(relisting, ret)
else:
ret = _load_result(modified, ret)
elif existing["code"] == 404:
# create it
virtual = __salt__["bigip.create_virtual"](
hostname=hostname,
username=username,
password=password,
name=name,
destination=destination,
description=description,
pool=pool,
address_status=address_status,
auto_lasthop=auto_lasthop,
bwc_policy=bwc_policy,
cmp_enabled=cmp_enabled,
connection_limit=connection_limit,
dhcp_relay=dhcp_relay,
fallback_persistence=fallback_persistence,
flow_eviction_policy=flow_eviction_policy,
gtm_score=gtm_score,
ip_forward=ip_forward,
ip_protocol=ip_protocol,
internal=internal,
twelve_forward=twelve_forward,
last_hop_pool=last_hop_pool,
mask=mask,
mirror=mirror,
nat64=nat64,
persist=persist,
profiles=profiles,
policies=policies,
rate_class=rate_class,
rate_limit=rate_limit,
rate_limit_mode=rate_limit_mode,
rate_limit_dst=rate_limit_dst,
rate_limit_src=rate_limit_src,
rules=rules,
related_rules=related_rules,
reject=reject,
source=source,
source_address_translation=source_address_translation,
source_port=source_port,
state=virtual_state,
traffic_classes=traffic_classes,
translate_address=translate_address,
translate_port=translate_port,
vlans=vlans,
)
# were we able to create it?
if virtual["code"] == 200:
ret["result"] = True
ret["changes"]["old"] = {}
ret["changes"]["new"] = virtual["content"]
ret[
"comment"
] = "Virtual was successfully created and enforced to the desired state."
else:
ret = _load_result(virtual, ret)
else:
ret = _load_result(existing, ret)
return ret
def modify_virtual(
hostname,
username,
password,
name,
destination,
pool=None,
address_status=None,
auto_lasthop=None,
bwc_policy=None,
cmp_enabled=None,
connection_limit=None,
dhcp_relay=None,
description=None,
fallback_persistence=None,
flow_eviction_policy=None,
gtm_score=None,
ip_forward=None,
ip_protocol=None,
internal=None,
twelve_forward=None,
last_hop_pool=None,
mask=None,
mirror=None,
nat64=None,
persist=None,
profiles=None,
policies=None,
rate_class=None,
rate_limit=None,
rate_limit_mode=None,
rate_limit_dst=None,
rate_limit_src=None,
rules=None,
related_rules=None,
reject=None,
source=None,
source_address_translation=None,
source_port=None,
virtual_state=None,
traffic_classes=None,
translate_address=None,
translate_port=None,
vlans=None,
):
"""
Modify an virtual server. modify an existing virtual. Only parameters specified will be enforced.
hostname
The host/address of the bigip device
username
The iControl REST username
password
The iControl REST password
name
The name of the virtual to create
destination
[ [virtual_address_name:port] | [ipv4:port] | [ipv6.port] ]
pool
[ [pool_name] | none]
address_status
[yes | no]
auto_lasthop
[default | enabled | disabled ]
bwc_policy
[none] | string]
cmp_enabled
[yes | no]
dhcp_relay
[yes | no}
connection_limit
[integer]
description
[string]
state
[disabled | enabled]
fallback_persistence
[none | [profile name] ]
flow_eviction_policy
[none | [eviction policy name] ]
gtm_score
[integer]
ip_forward
[yes | no]
ip_protocol
[any | protocol]
internal
[yes | no]
twelve_forward(12-forward)
[yes | no]
last_hop-pool
[ [pool_name] | none]
mask
{ [ipv4] | [ipv6] }
mirror
{ [disabled | enabled | none] }
nat64
[enabled | disabled]
persist
[list]
profiles
[none | default | list ]
policies
[none | default | list ]
rate_class
[name]
rate_limit
[integer]
rate_limit-mode
[destination | object | object-destination |
object-source | object-source-destination |
source | source-destination]
rate_limit_dst
[integer]
rate_limit_src
[integer]
rules
[none | list ]
related_rules
[none | list ]
reject
[yes | no]
source
{ [ipv4[/prefixlen]] | [ipv6[/prefixlen]] }
source_address_translation
[none | snat:pool_name | lsn | automap | dictionary ]
source_port
[change | preserve | preserve-strict]
state
[enabled | disabled]
traffic_classes
[none | default | list ]
translate_address
[enabled | disabled]
translate_port
[enabled | disabled]
vlans
[none | default | dictionary ]
vlan_ids
[ list]
enabled
[ true | false ]
"""
ret = {"name": name, "changes": {}, "result": False, "comment": ""}
if __opts__["test"]:
return _test_output(
ret,
"modify",
params={
"hostname": hostname,
"username": username,
"password": password,
"name": name,
"destination": destination,
"pool": pool,
"address_status": address_status,
"auto_lasthop": auto_lasthop,
"bwc_policy": bwc_policy,
"cmp_enabled": cmp_enabled,
"connection_limit": connection_limit,
"dhcp_relay": dhcp_relay,
"description": description,
"fallback_persistence": fallback_persistence,
"flow_eviction_policy": flow_eviction_policy,
"gtm_score": gtm_score,
"ip_forward": ip_forward,
"ip_protocol": ip_protocol,
"internal": internal,
"twelve_forward": twelve_forward,
"last_hop_pool": last_hop_pool,
"mask": mask,
"mirror": mirror,
"nat64": nat64,
"persist": persist,
"profiles": profiles,
"policies": policies,
"rate_class": rate_class,
"rate_limit": rate_limit,
"rate_limit_mode": rate_limit_mode,
"rate_limit_dst": rate_limit_dst,
"rate_limit_src": rate_limit_src,
"rules": rules,
"related_rules": related_rules,
"reject": reject,
"source": source,
"source_address_translation": source_address_translation,
"source_port": source_port,
"virtual_state": virtual_state,
"traffic_classes": traffic_classes,
"translate_address": translate_address,
"translate_port": translate_port,
"vlans": vlans,
},
)
existing = __salt__["bigip.list_virtual"](hostname, username, password, name)
# does this virtual exist?
if existing["code"] == 200:
# modify
modified = __salt__["bigip.modify_virtual"](
hostname=hostname,
username=username,
password=password,
name=name,
destination=destination,
description=description,
pool=pool,
address_status=address_status,
auto_lasthop=auto_lasthop,
bwc_policy=bwc_policy,
cmp_enabled=cmp_enabled,
connection_limit=connection_limit,
dhcp_relay=dhcp_relay,
fallback_persistence=fallback_persistence,
flow_eviction_policy=flow_eviction_policy,
gtm_score=gtm_score,
ip_forward=ip_forward,
ip_protocol=ip_protocol,
internal=internal,
twelve_forward=twelve_forward,
last_hop_pool=last_hop_pool,
mask=mask,
mirror=mirror,
nat64=nat64,
persist=persist,
profiles=profiles,
policies=policies,
rate_class=rate_class,
rate_limit=rate_limit,
rate_limit_mode=rate_limit_mode,
rate_limit_dst=rate_limit_dst,
rate_limit_src=rate_limit_src,
rules=rules,
related_rules=related_rules,
reject=reject,
source=source,
source_address_translation=source_address_translation,
source_port=source_port,
state=virtual_state,
traffic_classes=traffic_classes,
translate_address=translate_address,
translate_port=translate_port,
vlans=vlans,
)
# was the modification successful?
if modified["code"] == 200:
# relist it to compare
relisting = __salt__["bigip.list_virtual"](
hostname, username, password, name
)
if relisting["code"] == 200:
relisting = _strip_key(relisting, "generation")
existing = _strip_key(existing, "generation")
ret = _check_for_changes("Virtual", ret, existing, relisting)
else:
ret = _load_result(relisting, ret)
else:
ret = _load_result(modified, ret)
elif existing["code"] == 404:
ret["comment"] = "A Virtual with this name was not found."
# else something else was returned
else:
ret = _load_result(existing, ret)
return ret
def delete_virtual(hostname, username, password, name):
"""
Delete an existing virtual.
hostname
The host/address of the bigip device
username
The iControl REST username
password
The iControl REST password
name
The name of the virtual which will be deleted
"""
ret = {"name": name, "changes": {}, "result": False, "comment": ""}
if __opts__["test"]:
return _test_output(
ret,
"delete",
params={
"hostname": hostname,
"username": username,
"password": password,
"name": name,
},
)
# is this virtual currently configured?
existing = __salt__["bigip.list_virtual"](hostname, username, password, name)
# if it exists by name
if existing["code"] == 200:
deleted = __salt__["bigip.delete_virtual"](hostname, username, password, name)
# did we get rid of it?
if deleted["code"] == 200:
ret["result"] = True
ret["comment"] = "Virtual was successfully deleted."
ret["changes"]["old"] = existing["content"]
ret["changes"]["new"] = {}
# something bad happened
else:
ret = _load_result(deleted, ret)
# not found
elif existing["code"] == 404:
ret["result"] = True
ret["comment"] = "This virtual already does not exist. No changes made."
ret["changes"]["old"] = {}
ret["changes"]["new"] = {}
else:
ret = _load_result(existing, ret)
return ret
def list_monitor(hostname, username, password, monitor_type, name):
"""
A function to list an existing monitor.
hostname
The host/address of the bigip device
username
The iControl REST username
password
The iControl REST password
monitor_type
The type of monitor to list
name
The name of the monitor to list
"""
ret = {"name": name, "changes": {}, "result": False, "comment": ""}
if __opts__["test"]:
return _test_output(
ret,
"list",
params={
"hostname": hostname,
"username": username,
"password": password,
"monitor_type": monitor_type,
"name": name,
},
)
response = __salt__["bigip.list_monitor"](
hostname, username, password, monitor_type, name
)
return _load_result(response, ret)
def create_monitor(hostname, username, password, monitor_type, name, **kwargs):
"""
A function to connect to a bigip device and create a monitor.
hostname
The host/address of the bigip device
username
The iControl REST username
password
The iControl REST password
monitor_type
The type of monitor to create
name
The name of the monitor to create
kwargs
[ arg=val ] ...
Consult F5 BIGIP user guide for specific options for each monitor type.
Typically, tmsh arg names are used.
"""
ret = {"name": name, "changes": {}, "result": False, "comment": ""}
if __opts__["test"]:
params = {
"hostname": hostname,
"username": username,
"password": password,
"monitor_type": monitor_type,
"name": name,
}
for key, value in kwargs.items():
params[key] = value
return _test_output(ret, "create", params)
# is this monitor currently configured?
existing = __salt__["bigip.list_monitor"](
hostname, username, password, monitor_type, name
)
# if it exists
if existing["code"] == 200:
ret["result"] = True
ret["comment"] = "A monitor by this name currently exists. No change made."
# if it doesn't exist
elif existing["code"] == 404:
response = __salt__["bigip.create_monitor"](
hostname, username, password, monitor_type, name, **kwargs
)
if response["code"] == 200:
ret["result"] = True
ret["changes"]["old"] = {}
ret["changes"]["new"] = response["content"]
ret["comment"] = "Monitor was successfully created."
else:
ret = _load_result(response, ret)
# else something else was returned
else:
ret = _load_result(existing, ret)
return ret
def manage_monitor(hostname, username, password, monitor_type, name, **kwargs):
"""
Create a new monitor if a monitor of this type and name does not already exists. If it does exists, only
the parameters specified will be enforced.
hostname
The host/address of the bigip device
username
The iControl REST username
password
The iControl REST password
monitor_type
The type of monitor to create
name
The name of the monitor to create
kwargs
[ arg=val ] ...
Consult F5 BIGIP user guide for specific options for each monitor type.
Typically, tmsh arg names are used.
"""
ret = {"name": name, "changes": {}, "result": False, "comment": ""}
if __opts__["test"]:
params = {
"hostname": hostname,
"username": username,
"password": password,
"monitor_type": monitor_type,
"name": name,
}
for key, value in kwargs.items():
params[key] = value
return _test_output(ret, "manage", params)
# is this monitor currently configured?
existing = __salt__["bigip.list_monitor"](
hostname, username, password, monitor_type, name
)
# if it exists
if existing["code"] == 200:
# modify the monitor
modified = __salt__["bigip.modify_monitor"](
hostname, username, password, monitor_type, name, **kwargs
)
# was the modification successful?
if modified["code"] == 200:
del existing["content"]["selfLink"]
del modified["content"]["selfLink"]
ret = _check_for_changes("Monitor", ret, existing, modified)
else:
ret = _load_result(modified, ret)
# if it doesn't exist
elif existing["code"] == 404:
response = __salt__["bigip.create_monitor"](
hostname, username, password, monitor_type, name, **kwargs
)
if response["code"] == 200:
ret["result"] = True
ret["changes"]["old"] = {}
ret["changes"]["new"] = response["content"]
ret["comment"] = "Monitor was successfully created."
else:
ret = _load_result(response, ret)
# else something else was returned
else:
ret = _load_result(existing, ret)
return ret
def modify_monitor(hostname, username, password, monitor_type, name, **kwargs):
"""
Modify an existing monitor. If it does exists, only
the parameters specified will be enforced.
hostname
The host/address of the bigip device
username
The iControl REST username
password
The iControl REST password
monitor_type
The type of monitor to create
name
The name of the monitor to create
kwargs
[ arg=val ] ...
Consult F5 BIGIP user guide for specific options for each monitor type.
Typically, tmsh arg names are used.
"""
ret = {"name": name, "changes": {}, "result": False, "comment": ""}
if __opts__["test"]:
params = {
"hostname": hostname,
"username": username,
"password": password,
"monitor_type": monitor_type,
"name": name,
}
for key, value in kwargs.items():
params[key] = value
return _test_output(ret, "modify", params)
# is this monitor currently configured?
existing = __salt__["bigip.list_monitor"](
hostname, username, password, monitor_type, name
)
# if it exists
if existing["code"] == 200:
# modify the monitor
modified = __salt__["bigip.modify_monitor"](
hostname, username, password, monitor_type, name, **kwargs
)
# was the modification successful?
if modified["code"] == 200:
del existing["content"]["selfLink"]
del modified["content"]["selfLink"]
ret = _check_for_changes("Monitor", ret, existing, modified)
else:
ret = _load_result(modified, ret)
# if it doesn't exist
elif existing["code"] == 404:
ret["comment"] = "A Monitor with this name was not found."
# else something else was returned
else:
ret = _load_result(existing, ret)
return ret
def delete_monitor(hostname, username, password, monitor_type, name):
"""
Modify an existing monitor. If it does exists, only
the parameters specified will be enforced.
hostname
The host/address of the bigip device
username
The iControl REST username
password
The iControl REST password
monitor_type
The type of monitor to create
name
The name of the monitor to create
kwargs
[ arg=val ] ...
Consult F5 BIGIP user guide for specific options for each monitor type.
Typically, tmsh arg names are used.
"""
ret = {"name": name, "changes": {}, "result": False, "comment": ""}
if __opts__["test"]:
return _test_output(
ret,
"delete",
params={
"hostname": hostname,
"username": username,
"password": password,
"monitor_type": monitor_type,
"name": name,
},
)
# is this profile currently configured?
existing = __salt__["bigip.list_monitor"](
hostname, username, password, monitor_type, name
)
# if it exists by name
if existing["code"] == 200:
deleted = __salt__["bigip.delete_monitor"](
hostname, username, password, monitor_type, name
)
# did we get rid of it?
if deleted["code"] == 200:
ret["result"] = True
ret["comment"] = "Monitor was successfully deleted."
ret["changes"]["old"] = existing["content"]
ret["changes"]["new"] = {}
# something bad happened
else:
ret = _load_result(deleted, ret)
# not found
elif existing["code"] == 404:
ret["result"] = True
ret["comment"] = "This Monitor already does not exist. No changes made."
ret["changes"]["old"] = {}
ret["changes"]["new"] = {}
else:
ret = _load_result(existing, ret)
return ret
def list_profile(hostname, username, password, profile_type, name):
"""
A function to list an existing profile.
hostname
The host/address of the bigip device
username
The iControl REST username
password
The iControl REST password
profile_type
The type of profile to list
name
The name of the profile to list
"""
ret = {"name": name, "changes": {}, "result": False, "comment": ""}
if __opts__["test"]:
return _test_output(
ret,
"list",
params={
"hostname": hostname,
"username": username,
"password": password,
"profile_type": profile_type,
"name": name,
},
)
response = __salt__["bigip.list_profile"](
hostname, username, password, profile_type, name
)
return _load_result(response, ret)
def create_profile(hostname, username, password, profile_type, name, **kwargs):
r"""
A function to connect to a bigip device and create a profile.
hostname
The host/address of the bigip device
username
The iControl REST username
password
The iControl REST password
profile_type
The type of profile to create
name
The name of the profile to create
kwargs
[ arg=val ] ...
Consult F5 BIGIP user guide for specific options for each profile type.
Typically, tmsh arg names are used.
Special Characters ``|``, ``,`` and ``:`` must be escaped using ``\`` when
used within strings.
"""
ret = {"name": name, "changes": {}, "result": False, "comment": ""}
if __opts__["test"]:
return _test_output(
ret,
"create",
params={
"hostname": hostname,
"username": username,
"password": password,
"profile_type": profile_type,
"name": name,
},
)
# is this profile currently configured?
existing = __salt__["bigip.list_profile"](
hostname, username, password, profile_type, name
)
# if it exists
if existing["code"] == 200:
ret["result"] = True
ret["comment"] = "A profile by this name currently exists. No change made."
# if it doesn't exist
elif existing["code"] == 404:
response = __salt__["bigip.create_profile"](
hostname, username, password, profile_type, name, **kwargs
)
if response["code"] == 200:
ret["result"] = True
ret["changes"]["old"] = {}
ret["changes"]["new"] = response["content"]
ret["comment"] = "Profile was successfully created."
else:
ret = _load_result(response, ret)
# else something else was returned
else:
ret = _load_result(existing, ret)
return ret
def manage_profile(hostname, username, password, profile_type, name, **kwargs):
"""
Create a new profile if a monitor of this type and name does not already exists. If it does exists, only
the parameters specified will be enforced.
hostname
The host/address of the bigip device
username
The iControl REST username
password
The iControl REST password
profile_type
The type of profile to create
name
The name of the profile to create
kwargs
[ arg=val ] ...
Consult F5 BIGIP user guide for specific options for each profile type.
Typically, tmsh arg names are used.
"""
ret = {"name": name, "changes": {}, "result": False, "comment": ""}
if __opts__["test"]:
params = {
"hostname": hostname,
"username": username,
"password": password,
"profile_type": profile_type,
"name": name,
}
for key, value in kwargs.items():
params[key] = value
return _test_output(ret, "manage", params)
# is this profile currently configured?
existing = __salt__["bigip.list_profile"](
hostname, username, password, profile_type, name
)
# if it exists
if existing["code"] == 200:
# modify the profile
modified = __salt__["bigip.modify_profile"](
hostname, username, password, profile_type, name, **kwargs
)
# was the modification successful?
if modified["code"] == 200:
del existing["content"]["selfLink"]
del modified["content"]["selfLink"]
ret = _check_for_changes("Profile", ret, existing, modified)
else:
ret = _load_result(modified, ret)
# if it doesn't exist
elif existing["code"] == 404:
response = __salt__["bigip.create_profile"](
hostname, username, password, profile_type, name, **kwargs
)
if response["code"] == 200:
ret["result"] = True
ret["changes"]["old"] = {}
ret["changes"]["new"] = response["content"]
ret["comment"] = "Profile was successfully created."
else:
ret = _load_result(existing, ret)
# else something else was returned
else:
ret = _load_result(existing, ret)
return ret
def modify_profile(hostname, username, password, profile_type, name, **kwargs):
"""
Modify an existing profile. If it does exists, only
the parameters specified will be enforced.
hostname
The host/address of the bigip device
username
The iControl REST username
password
The iControl REST password
profile_type
The type of profile to create
name
The name of the profile to create
kwargs
[ arg=val ] ...
Consult F5 BIGIP user guide for specific options for each monitor type.
Typically, tmsh arg names are used.
"""
ret = {"name": name, "changes": {}, "result": False, "comment": ""}
if __opts__["test"]:
params = {
"hostname": hostname,
"username": username,
"password": password,
"profile_type": profile_type,
"name": name,
}
for key, value in kwargs.items():
params[key] = value
return _test_output(ret, "modify", params)
# is this profile currently configured?
existing = __salt__["bigip.list_profile"](
hostname, username, password, profile_type, name
)
# if it exists
if existing["code"] == 200:
# modify the profile
modified = __salt__["bigip.modify_profile"](
hostname, username, password, profile_type, name, **kwargs
)
# was the modification successful?
if modified["code"] == 200:
del existing["content"]["selfLink"]
del modified["content"]["selfLink"]
ret = _check_for_changes("Profile", ret, existing, modified)
else:
ret = _load_result(modified, ret)
# if it doesn't exist
elif existing["code"] == 404:
ret["comment"] = "A Profile with this name was not found."
# else something else was returned
else:
ret = _load_result(existing, ret)
return ret
def delete_profile(hostname, username, password, profile_type, name):
"""
Modify an existing profile. If it does exists, only
the parameters specified will be enforced.
hostname
The host/address of the bigip device
username
The iControl REST username
password
The iControl REST password
profile_type
The type of profile to create
name
The name of the profile to create
kwargs
[ arg=val ] ...
Consult F5 BIGIP user guide for specific options for each profile type.
Typically, tmsh arg names are used.
"""
ret = {"name": name, "changes": {}, "result": False, "comment": ""}
if __opts__["test"]:
return _test_output(
ret,
"delete",
params={
"hostname": hostname,
"username": username,
"password": password,
"profile_type": profile_type,
"name": name,
},
)
# is this profile currently configured?
existing = __salt__["bigip.list_profile"](
hostname, username, password, profile_type, name
)
# if it exists by name
if existing["code"] == 200:
deleted = __salt__["bigip.delete_profile"](
hostname, username, password, profile_type, name
)
# did we get rid of it?
if deleted["code"] == 200:
ret["result"] = True
ret["comment"] = "Profile was successfully deleted."
ret["changes"]["old"] = existing["content"]
ret["changes"]["new"] = {}
# something bad happened
else:
ret = _load_result(deleted, ret)
# not found
elif existing["code"] == 404:
ret["result"] = True
ret["comment"] = "This Profile already does not exist. No changes made."
ret["changes"]["old"] = {}
ret["changes"]["new"] = {}
else:
ret = _load_result(existing, ret)
return ret
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.