text
stringlengths 2
999k
|
|---|
#!/usr/bin/python
from __future__ import print_function
import struct
import sys
import usb.core
import usb.util
from intelhex import IntelHex
scrambleCode = (0x29, 0x52, 0x8C, 0x70)
stats = [0xff, 0x02, 0x00, 0xf5, 0xe5, 0x75, 0x03, 0x04,
0x80, 0x05, 0xd2, 0x01, 0xe4, 0xef, 0x82, 0x83,
0x08, 0x24, 0xc2, 0x60, 0xe0, 0x12, 0x7f, 0x34,
0x10, 0x07, 0x22, 0x40, 0x54, 0x94, 0x30, 0x70,
0xc0, 0xf0, 0xaf, 0xd0, 0x44, 0xa3, 0x36, 0x74,
0x15, 0xc3, 0x09, 0x93, 0x53, 0xec, 0x48, 0x06,
0x0a, 0x14, 0x20, 0x25, 0x50, 0x64, 0xd4, 0x16,
0x43, 0x47, 0xd6, 0xe7, 0xea, 0x0c, 0x32, 0x3f,
0x46, 0x90, 0xc8, 0xdf, 0x38, 0x45, 0xb4, 0xd3,
0xfa, 0xa1, 0xc5, 0xca, 0xcc, 0xde, 0xfc, 0x0b,
0x23, 0x37, 0x42, 0xed, 0xfb, 0x2f, 0x95, 0x55,
0x85, 0xdc, 0x18, 0x26, 0x33, 0x7d, 0x89, 0xac,
0xae, 0xfe, 0x0f, 0x17, 0x1b, 0x27, 0x35, 0x39,
0x3e, 0x57, 0x78, 0x8f, 0xa9, 0xaa, 0xc1, 0xd9,
0xdd, 0xe3, 0xf3, 0xf8, 0x0d, 0x21, 0x3b, 0x3c,
0x73, 0x81, 0x87, 0x88, 0x8a, 0x99, 0xbf, 0xdb,
0xf2, 0xfd, 0x1a, 0x1f, 0x31, 0x5f, 0x6c, 0x7a,
0x7e, 0x8e, 0xbc, 0xd5, 0xd8, 0xda, 0xe9, 0xeb,
0xee, 0xf6, 0x11, 0x1c, 0x29, 0x2d, 0x56, 0x58,
0x7c, 0x8d, 0x91, 0x98, 0xb3, 0xb9, 0xd7, 0xe1,
0xe6, 0xe8, 0xf9, 0x13, 0x1e, 0x28, 0x2e, 0x41,
0x4e, 0x69, 0x79, 0x7b, 0x9e, 0x9f, 0xa0, 0xab,
0xad, 0xcf, 0xe2, 0x0e, 0x19, 0x1d, 0x2a, 0x4b,
0x52, 0x5b, 0x63, 0x84, 0x86, 0x8c, 0x9d, 0xa2,
0xb1, 0xb2, 0xc4, 0x2b, 0x49, 0x4a, 0x4c, 0x4d,
0x59, 0x61, 0x67, 0x68, 0x6b, 0x6d, 0x6e, 0x6f,
0x77, 0x92, 0x96, 0x9a, 0xa6, 0xa8, 0xb0, 0xb5,
0xbb, 0xc6, 0xc7, 0xc9, 0xcd, 0xd1, 0xf4, 0x2c,
0x3a, 0x3d, 0x4f, 0x51, 0x5a, 0x5c, 0x5d, 0x5e,
0x62, 0x65, 0x66, 0x6a, 0x71, 0x72, 0x76, 0x8b,
0x97, 0x9b, 0x9c, 0xa4, 0xa5, 0xa7, 0xb6, 0xb7,
0xb8, 0xba, 0xbd, 0xbe, 0xcb, 0xce, 0xf1, 0xf7]
def scramble(l):
return [v ^ scrambleCode[i%4] for i, v in enumerate(l)]
def binStrOfList(l):
return ''.join(chr(x) for x in l)
class WCHISP:
def __init__(self):
# find our device
dev = usb.core.find(idVendor=0x4348, idProduct=0x55e0)
if dev is None:
raise ValueError('Device not found')
dev.set_configuration()
cfg = dev.get_active_configuration()
intf = cfg[(0, 0)]
self.epout = usb.util.find_descriptor(intf, custom_match = lambda e: usb.util.endpoint_direction(e.bEndpointAddress) == usb.util.ENDPOINT_OUT)
self.epin = usb.util.find_descriptor(intf, custom_match = lambda e: usb.util.endpoint_direction(e.bEndpointAddress) == usb.util.ENDPOINT_IN)
def cmd(self, msg, length=64):
self.writeb(msg)
b = self.readb(length)
if len(b) == 2:
return struct.unpack('<H', b)[0]
return b
def xcmd(self, msg, exp):
#xmsg = map(lambda x: hex(ord(x))[2:], msg)
#print ' '.join(xmsg)
#return 0
ret = self.cmd(msg)
if ret != exp:
xmsg = map(lambda x: hex(ord(x)), msg[0:4])
raise Exception('cmd[%s] return %d != %d' % (','.join(xmsg), ret, exp))
def info(self):
v = self.cmd('\xa2\x13USB DBG CH559 & ISP' + '\0')
self.cmd('\xbb\x00')
return v
def readb(self, size):
return self.epin.read(size)
def writeb(self, b):
self.epout.write(b)
def dump(self):
# send the key
b = '\xa6\x04' + struct.pack('BBBB', *scrambleCode)
self.xcmd(b, 0)
# find block of 16 0xFF at the end of the device memory
block = [0xff] * 16
found = False
for address in range(0x3ff0, -1, -1):
print('\rLooking for address 0x{:04X}'.format(address), end='')
r = self.cmd('\xa7\16' + struct.pack('<H', address) +
binStrOfList(scramble(block)))
if r == 0:
print('\nFound 0xFF block at address 0x{:04X}'.format(address))
found = True
break
if not found:
print('\nUnable to find 0xFF block')
return
memdump = IntelHex()
memdump.puts(address, binStrOfList(block))
print('Starting flash dumping')
base = [0xa7, 16, 0, 0]
nTry = 0
nBytes = 0
for address in range(address - 1, - 1, -1):
block[1:] = block[:-1] # shift
base[2:4] = address & 0xFF, address >> 8
found = False
for i in range(256):
i = stats[i]
block[0] = i
nTry += 1
r = self.cmd(binStrOfList(base + scramble(block)), 4)
if r == 0: # verification ok, we found the correct byte
print('{:02X} '.format(i), end='')
sys.stdout.flush()
found = True
nBytes += 1
memdump[address] = i
break
if not found:
raise ValueError('Unable to find correct '
'byte for address 0x{:04X}'.format(address))
output_bin = 'out.bin'
output_hex = 'out.hex'
print('\nDone, writing output files {} and {}'. format(output_bin,
output_hex))
print('Ntry = {} {:.2f}try/bytes'.format(nTry, float(nTry) / nBytes))
memdump.tobinfile(output_bin)
memdump.tofile(output_hex, format='hex')
isp = WCHISP()
# check chip ID and bootloader presence
if isp.info() != 0x52:
raise IOError("not a CH552T device")
# dump flash
isp.dump()
|
###
# Copyright (c) 2004-2005, Jeremiah Fincher
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright notice,
# this list of conditions, and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions, and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# * Neither the name of the author of this software nor the name of
# contributors to this software may be used to endorse or promote products
# derived from this software without specific prior written consent.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
###
from supybot.test import *
class LimiterTestCase(ChannelPluginTestCase):
plugins = ('Limiter',)
config = {'supybot.plugins.Limiter.enable': True}
def testEnforceLimit(self):
origMin = conf.supybot.plugins.Limiter.minimumExcess()
origMax = conf.supybot.plugins.Limiter.maximumExcess()
try:
conf.supybot.plugins.Limiter.minimumExcess.setValue(5)
conf.supybot.plugins.Limiter.maximumExcess.setValue(10)
self.irc.feedMsg(ircmsgs.join('#foo', prefix='foo!root@host'))
m = self.irc.takeMsg()
self.assertEqual(m, ircmsgs.limit('#foo', 1+10))
self.irc.feedMsg(ircmsgs.join('#foo', prefix='bar!root@host'))
m = self.irc.takeMsg()
self.failIf(m is not None)
conf.supybot.plugins.Limiter.maximumExcess.setValue(7)
self.irc.feedMsg(ircmsgs.part('#foo', prefix='bar!root@host'))
m = self.irc.takeMsg()
self.assertEqual(m, ircmsgs.limit('#foo', 1+5))
finally:
conf.supybot.plugins.Limiter.minimumExcess.setValue(origMin)
conf.supybot.plugins.Limiter.maximumExcess.setValue(origMax)
# vim:set shiftwidth=4 softtabstop=4 expandtab textwidth=79:
|
import textwrap
import aiohttp
from aiostripe import error
def new_default_http_client(*args, **kwargs):
return AsyncioClient(*args, **kwargs)
class HTTPClient(object):
def __init__(self, verify_ssl_certs=True):
self._verify_ssl_certs = verify_ssl_certs
def request(self, method, url, headers, post_data=None):
raise NotImplementedError('HTTPClient subclasses must implement `request`')
class AsyncioClient(HTTPClient):
name = 'aiohttp'
async def request(self, method, url, headers, post_data=None):
if isinstance(post_data, str):
post_data = post_data.encode('utf8')
with aiohttp.ClientSession(headers=headers,
skip_auto_headers=('User-Agent', 'Content-Type', 'Authorization')) as client:
try:
async with client.request(method.upper(), url, data=post_data) as res:
rbody = await res.read()
rstatus = res.status
rheaders = {k.lower(): v for k, v in res.headers.items()}
except Exception as e:
self._handle_request_error(e)
assert False, 'unreachable'
return rbody, rstatus, rheaders
@staticmethod
def _handle_request_error(e):
msg = 'Unexpected error communicating with Stripe. If this problem persists, let me know at ' \
'<alex@downtownapp.co>.'
msg = textwrap.fill(msg) + '\n\n(Network error: %r)' % e
raise error.APIConnectionError(msg) from e
|
"""
Base settings to build other settings files upon.
"""
import environ
ROOT_DIR = environ.Path(__file__) - 3 # (algerian_library/config/settings/base.py - 3 = algerian_library/)
APPS_DIR = ROOT_DIR.path('algerian_library')
env = environ.Env()
READ_DOT_ENV_FILE = env.bool('DJANGO_READ_DOT_ENV_FILE', default=False)
if READ_DOT_ENV_FILE:
# OS environment variables take precedence over variables from .env
env.read_env(str(ROOT_DIR.path('.env')))
# GENERAL
# ------------------------------------------------------------------------------
# https://docs.djangoproject.com/en/dev/ref/settings/#debug
DEBUG = env.bool('DJANGO_DEBUG', False)
# Local time zone. Choices are
# http://en.wikipedia.org/wiki/List_of_tz_zones_by_name
# though not all of them may be available with every OS.
# In Windows, this must be set to your system time zone.
TIME_ZONE = 'UTC'
# https://docs.djangoproject.com/en/dev/ref/settings/#language-code
LANGUAGE_CODE = 'en-us'
# https://docs.djangoproject.com/en/dev/ref/settings/#site-id
SITE_ID = 1
# https://docs.djangoproject.com/en/dev/ref/settings/#use-i18n
USE_I18N = True
# https://docs.djangoproject.com/en/dev/ref/settings/#use-l10n
USE_L10N = True
# https://docs.djangoproject.com/en/dev/ref/settings/#use-tz
USE_TZ = True
# DATABASES
# ------------------------------------------------------------------------------
# https://docs.djangoproject.com/en/dev/ref/settings/#databases
DATABASES = {
'default': env.db('DATABASE_URL', default='postgres:///algerian_library'),
}
DATABASES['default']['ATOMIC_REQUESTS'] = True
# URLS
# ------------------------------------------------------------------------------
# https://docs.djangoproject.com/en/dev/ref/settings/#root-urlconf
ROOT_URLCONF = 'config.urls'
# https://docs.djangoproject.com/en/dev/ref/settings/#wsgi-application
WSGI_APPLICATION = 'config.wsgi.application'
# APPS
# ------------------------------------------------------------------------------
DJANGO_APPS = [
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.sites',
'django.contrib.messages',
'django.contrib.staticfiles',
# 'django.contrib.humanize', # Handy template tags
'django.contrib.admin',
]
THIRD_PARTY_APPS = [
'crispy_forms',
'allauth',
'allauth.account',
'allauth.socialaccount',
'rest_framework',
]
LOCAL_APPS = [
'algerian_library.users.apps.UsersAppConfig',
'catalog.apps.CatalogConfig',
]
# https://docs.djangoproject.com/en/dev/ref/settings/#installed-apps
INSTALLED_APPS = DJANGO_APPS + THIRD_PARTY_APPS + LOCAL_APPS
# MIGRATIONS
# ------------------------------------------------------------------------------
# https://docs.djangoproject.com/en/dev/ref/settings/#migration-modules
MIGRATION_MODULES = {
'sites': 'algerian_library.contrib.sites.migrations'
}
# AUTHENTICATION
# ------------------------------------------------------------------------------
# https://docs.djangoproject.com/en/dev/ref/settings/#authentication-backends
AUTHENTICATION_BACKENDS = [
'django.contrib.auth.backends.ModelBackend',
'allauth.account.auth_backends.AuthenticationBackend',
]
# https://docs.djangoproject.com/en/dev/ref/settings/#auth-user-model
AUTH_USER_MODEL = 'users.User'
# https://docs.djangoproject.com/en/dev/ref/settings/#login-redirect-url
LOGIN_REDIRECT_URL = 'users:redirect'
# https://docs.djangoproject.com/en/dev/ref/settings/#login-url
LOGIN_URL = 'account_login'
# PASSWORDS
# ------------------------------------------------------------------------------
# https://docs.djangoproject.com/en/dev/ref/settings/#password-hashers
PASSWORD_HASHERS = [
# https://docs.djangoproject.com/en/dev/topics/auth/passwords/#using-argon2-with-django
'django.contrib.auth.hashers.Argon2PasswordHasher',
'django.contrib.auth.hashers.PBKDF2PasswordHasher',
'django.contrib.auth.hashers.PBKDF2SHA1PasswordHasher',
'django.contrib.auth.hashers.BCryptSHA256PasswordHasher',
'django.contrib.auth.hashers.BCryptPasswordHasher',
]
# https://docs.djangoproject.com/en/dev/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# MIDDLEWARE
# ------------------------------------------------------------------------------
# https://docs.djangoproject.com/en/dev/ref/settings/#middleware
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
# STATIC
# ------------------------------------------------------------------------------
# https://docs.djangoproject.com/en/dev/ref/settings/#static-root
STATIC_ROOT = str(ROOT_DIR('staticfiles'))
# https://docs.djangoproject.com/en/dev/ref/settings/#static-url
STATIC_URL = '/static/'
# https://docs.djangoproject.com/en/dev/ref/contrib/staticfiles/#std:setting-STATICFILES_DIRS
STATICFILES_DIRS = [
str(APPS_DIR.path('static')),
]
# https://docs.djangoproject.com/en/dev/ref/contrib/staticfiles/#staticfiles-finders
STATICFILES_FINDERS = [
'django.contrib.staticfiles.finders.FileSystemFinder',
'django.contrib.staticfiles.finders.AppDirectoriesFinder',
]
# MEDIA
# ------------------------------------------------------------------------------
# https://docs.djangoproject.com/en/dev/ref/settings/#media-root
MEDIA_ROOT = str(APPS_DIR('media'))
# https://docs.djangoproject.com/en/dev/ref/settings/#media-url
MEDIA_URL = '/media/'
# TEMPLATES
# ------------------------------------------------------------------------------
# https://docs.djangoproject.com/en/dev/ref/settings/#templates
TEMPLATES = [
{
# https://docs.djangoproject.com/en/dev/ref/settings/#std:setting-TEMPLATES-BACKEND
'BACKEND': 'django.template.backends.django.DjangoTemplates',
# https://docs.djangoproject.com/en/dev/ref/settings/#template-dirs
'DIRS': [
str(APPS_DIR.path('templates')),
],
'OPTIONS': {
# https://docs.djangoproject.com/en/dev/ref/settings/#template-debug
'debug': DEBUG,
# https://docs.djangoproject.com/en/dev/ref/settings/#template-loaders
# https://docs.djangoproject.com/en/dev/ref/templates/api/#loader-types
'loaders': [
'django.template.loaders.filesystem.Loader',
'django.template.loaders.app_directories.Loader',
],
# https://docs.djangoproject.com/en/dev/ref/settings/#template-context-processors
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.template.context_processors.i18n',
'django.template.context_processors.media',
'django.template.context_processors.static',
'django.template.context_processors.tz',
'django.contrib.messages.context_processors.messages',
],
},
},
]
# http://django-crispy-forms.readthedocs.io/en/latest/install.html#template-packs
CRISPY_TEMPLATE_PACK = 'bootstrap4'
# FIXTURES
# ------------------------------------------------------------------------------
# https://docs.djangoproject.com/en/dev/ref/settings/#fixture-dirs
FIXTURE_DIRS = (
str(APPS_DIR.path('fixtures')),
)
# EMAIL
# ------------------------------------------------------------------------------
# https://docs.djangoproject.com/en/dev/ref/settings/#email-backend
EMAIL_BACKEND = env('DJANGO_EMAIL_BACKEND', default='django.core.mail.backends.smtp.EmailBackend')
# ADMIN
# ------------------------------------------------------------------------------
# Django Admin URL.
ADMIN_URL = 'admin/'
# https://docs.djangoproject.com/en/dev/ref/settings/#admins
ADMINS = [
("""Achour Ait Hamiche""", 'axchouraithamiche40@gmail.com'),
]
# https://docs.djangoproject.com/en/dev/ref/settings/#managers
MANAGERS = ADMINS
# django-allauth
# ------------------------------------------------------------------------------
ACCOUNT_ALLOW_REGISTRATION = env.bool('DJANGO_ACCOUNT_ALLOW_REGISTRATION', True)
# https://django-allauth.readthedocs.io/en/latest/configuration.html
ACCOUNT_AUTHENTICATION_METHOD = 'username'
# https://django-allauth.readthedocs.io/en/latest/configuration.html
ACCOUNT_EMAIL_REQUIRED = True
# https://django-allauth.readthedocs.io/en/latest/configuration.html
ACCOUNT_EMAIL_VERIFICATION = 'mandatory'
# https://django-allauth.readthedocs.io/en/latest/configuration.html
ACCOUNT_ADAPTER = 'algerian_library.users.adapters.AccountAdapter'
# https://django-allauth.readthedocs.io/en/latest/configuration.html
SOCIALACCOUNT_ADAPTER = 'algerian_library.users.adapters.SocialAccountAdapter'
# Your stuff...
# ------------------------------------------------------------------------------
|
# Zbiór przedziałów [(a[1], b[1]), ..., (a[n], b[n])], każdy przedział należy do [0, 1]. Opisać algorytm
# który sprawdzi czy jest możliwy taki wybór przedziałów, aby cały przedział [0, 1] zawierał się
# w wybranych odcinkach. Przedział ma składać się z jak najmniejszej ilości odcinków.
def minimum_intervals(T):
T.sort(key=lambda x: x[0])
i = 0
end = 0
result = []
while i < len(T) and end != 1:
actual_start = T[i][0]
actual_end = T[i][1]
flag = True
while i != len(T) and T[i][0] <= end:
if actual_end < T[i][1]:
actual_start = T[i][0]
actual_end = T[i][1]
i += 1
flag = False
if flag:
i += 1
result.append((actual_start, actual_end))
end = actual_end
return result
T = [[0, 0.4], [0, 0.35], [0.2, 0.6], [0.4, 0.6], [0.5, 0.6], [0.1, 0.9], [0.85, 1], [0.9, 1],
[0.3, 0.4], [0.35, 0.4], [0.2, 0.75], [0.4, 1], [0.55, 1], [0.6, 1], [0.9, 1]]
print(minimum_intervals(T))
|
# Copyright 2014 Mirantis, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from proboscis.asserts import assert_equal
from proboscis import test
from fuelweb_test.helpers import common
from fuelweb_test.helpers import os_actions
from fuelweb_test import settings
from fuelweb_test import logger
from fuelweb_test.tests import base_test_case
@test(groups=["thread_non_func_1"])
class DeployHAOneControllerMasterNodeFail(base_test_case.TestBasic):
@test(depends_on=[base_test_case.SetupEnvironment.prepare_slaves_3],
groups=["non_functional",
"deploy_ha_one_controller_flat_master_node_fail"])
def deploy_ha_one_controller_flat_master_node_fail(self):
"""Deploy HA cluster with nova-network and check it without master node
Scenario:
1. Create cluster in ha mode with 1 controller
2. Add 1 node with controller role
3. Add 1 node with compute role
4. Deploy the cluster
5. Validate cluster was set up correctly, there are no dead
services, there are no errors in logs
6. Verify networks
7. Verify network configuration on controller
8. Run OSTF
9. Shut down master node
10. Run openstack verification
Duration 1000m
"""
self.env.revert_snapshot("ready_with_3_slaves")
cluster_id = self.fuel_web.create_cluster(
name=self.__class__.__name__,
mode=settings.DEPLOYMENT_MODE
)
self.fuel_web.update_nodes(
cluster_id,
{
'slave-01': ['controller'],
'slave-02': ['compute']
}
)
self.fuel_web.deploy_cluster_wait(cluster_id)
controller_ip = self.fuel_web.get_public_vip(cluster_id)
os_conn = os_actions.OpenStackActions(controller_ip)
self.fuel_web.assert_cluster_ready(
os_conn, smiles_count=6, networks_count=1, timeout=300)
self.fuel_web.verify_network(cluster_id)
logger.info('PASS DEPLOYMENT')
self.fuel_web.run_ostf(
cluster_id=cluster_id)
logger.info('PASS OSTF')
logger.info('Destroy admin node...')
self.env.nodes().admin.destroy()
logger.info('Admin node destroyed')
common_func = common.Common(
controller_ip,
settings.SERVTEST_USERNAME,
settings.SERVTEST_PASSWORD,
settings.SERVTEST_TENANT)
# create instance
server = common_func.create_instance()
# get_instance details
details = common_func.get_instance_detail(server)
assert_equal(details.name, 'test_instance')
# Check if instacne active
common_func.verify_instance_status(server, 'ACTIVE')
# delete instance
common_func.delete_instance(server)
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
This python program saves test XMLs from denon receiver to current directory.
Usage: python denon_receiver_xml.py --host 192.168.0.250 --prefix AVR-X4100W
:copyright: (c) 2017 by Oliver Goetz.
:license: MIT, see LICENSE for more details.
"""
import argparse
from io import BytesIO
import requests
import xml.etree.ElementTree as ET
from collections import namedtuple
XML = namedtuple("XML", ["port", "type", "path", "tags", "filename"])
SAVED_XML = [XML("80", "post", "/goform/AppCommand.xml",
["GetFriendlyName"],
"AppCommand-setup"),
XML("80", "post", "/goform/AppCommand.xml",
["GetAllZonePowerStatus", "GetAllZoneSource",
"GetRenameSource", "GetDeletedSource",
"GetSurroundModeStatus", "GetToneControl",
"GetAllZoneVolume", "GetAllZoneMuteStatus"],
"AppCommand-update"),
XML("80", "get", "/goform/Deviceinfo.xml", [], "Deviceinfo.xml"),
XML("80", "get", "/goform/formMainZone_MainZoneXmlStatus.xml",
[], "formMainZone_MainZoneXmlStatus"),
XML("80", "get", "/goform/formMainZone_MainZoneXml.xml",
[], "formMainZone_MainZoneXml"),
XML("80", "get", "/goform/formNetAudio_StatusXml.xml",
[], "formNetAudio_StatusXml"),
XML("80", "get", "/goform/formTuner_TunerXml.xml",
[], "formTuner_TunerXml"),
XML("80", "get", "/goform/formTuner_HdXml.xml",
[], "formTuner_HdXml"),
XML("80", "get", "/goform/formZone2_Zone2XmlStatus.xml",
[], "formZone2_Zone2XmlStatus"),
XML("80", "get", "/goform/formZone3_Zone3XmlStatus.xml",
[], "formZone3_Zone3XmlStatus"),
XML("8080", "post", "/goform/AppCommand.xml",
["GetFriendlyName"],
"AppCommand-setup"),
XML("8080", "post", "/goform/AppCommand.xml",
["GetAllZonePowerStatus", "GetAllZoneSource",
"GetRenameSource", "GetDeletedSource",
"GetSurroundModeStatus", "GetToneControl",
"GetAllZoneVolume", "GetAllZoneMuteStatus"],
"AppCommand-update"),
XML("8080", "get", "/goform/Deviceinfo.xml", [],
"Deviceinfo.xml"),
XML("8080", "get", "/goform/formMainZone_MainZoneXmlStatus.xml",
[], "formMainZone_MainZoneXmlStatus"),
XML("8080", "get", "/goform/formMainZone_MainZoneXml.xml",
[], "formMainZone_MainZoneXml"),
XML("8080", "get", "/goform/formNetAudio_StatusXml.xml",
[], "formNetAudio_StatusXml"),
XML("8080", "get", "/goform/formTuner_TunerXml.xml",
[], "formTuner_TunerXml"),
XML("8080", "get", "/goform/formTuner_HdXml.xml",
[], "formTuner_HdXml"),
XML("8080", "get", "/goform/formZone2_Zone2XmlStatus.xml",
[], "formZone2_Zone2XmlStatus"),
XML("8080", "get", "/goform/formZone3_Zone3XmlStatus.xml",
[], "formZone3_Zone3XmlStatus")]
def create_post_body(attribute_list):
# Buffer XML body as binary IO
body = BytesIO()
chunks = [attribute_list[i:i+5] for i in range(
0, len(attribute_list), 5)]
for i, chunk in enumerate(chunks):
# Prepare POST XML body for AppCommand.xml
post_root = ET.Element("tx")
for attribute in chunk:
# Append tags for each attribute
item = ET.Element("cmd")
item.set("id", "1")
item.text = attribute
post_root.append(item)
post_tree = ET.ElementTree(post_root)
post_tree.write(body, encoding="utf-8", xml_declaration=bool(i == 0))
body_bytes = body.getvalue()
body.close()
return body_bytes
def http_post(host, port, path, tags, filename):
filename = filename + "-" + str(port)
data = create_post_body(tags)
try:
r = requests.post(
"http://{host}:{port}/{path}".format(
host=host, port=port, path=path), data=data)
except requests.exceptions.ConnectionError:
print("ConnectionError retrieving data from host {} port {} \
path {}".format(host, port, path))
filename = filename + "-ConnectionError.xml"
with open("./{}".format(filename), "wb") as file:
file.write("".encode())
except requests.exceptions.Timeout:
print("Timeout retrieving data from host {} port {} path {}".format(
host, port, path))
filename = filename + "-Timeout.xml"
with open("./{}".format(filename), "wb") as file:
file.write("".encode())
else:
print("HTTP Status Code of {}: {}".format(path, r.status_code))
filename = filename + "-" + str(r.status_code) + ".xml"
with open("./{}".format(filename), "wb") as file:
file.write(r.content)
def http_get(host, port, path, filename):
filename = filename + "-" + str(port)
try:
r = requests.get(
"http://{host}:{port}/{path}".format(
host=host, port=port, path=path))
except requests.exceptions.ConnectionError:
print("ConnectionError retrieving data from host {} path {}".format(
host, path))
filename = filename + "-ConnectionError.xml"
with open("./{}".format(filename), "wb") as file:
file.write("".encode())
except requests.exceptions.Timeout:
print("Timeout retrieving data from host {} path {}".format(
host, path))
filename = filename + "-Timeout.xml"
with open("./{}".format(filename), "wb") as file:
file.write("".encode())
else:
print("HTTP Status Code of {}: {}".format(path, r.status_code))
filename = filename + "-" + str(r.status_code) + ".xml"
with open("./{}".format(filename), "wb") as file:
file.write(r.content)
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('--host', type=str,
default='192.168.0.250',
help='host of Denon AVR receiver')
parser.add_argument('--prefix', type=str,
default='AVR',
help='prefix of filenames to be saved')
args = parser.parse_args()
for entry in SAVED_XML:
if entry.type == "post":
http_post(args.host, entry.port, entry.path, entry.tags,
"{}-{}".format(args.prefix, entry.filename))
elif entry.type == "get":
http_get(args.host, entry.port, entry.path, "{}-{}".format(
args.prefix, entry.filename))
else:
print("wrong type, only \"get\" and \"post\" are allowed")
|
import logging
from sqlalchemy import Column, INT
from sqlalchemy_utc import UtcDateTime
from pajbot import utils
from pajbot.managers.db import Base
log = logging.getLogger(__name__)
class Roulette(Base):
__tablename__ = "roulette"
id = Column(INT, primary_key=True)
user_id = Column(INT, index=True, nullable=False)
created_at = Column(UtcDateTime(), nullable=False)
points = Column(INT, nullable=False)
def __init__(self, user_id, points):
self.user_id = user_id
self.created_at = utils.now()
self.points = points
|
# python3
import sys
def fib_slow(n):
'''Dumb (slow) example solution.
'''
if (n <= 1):
return n
return fib_slow(n - 1) + fib_slow(n - 2)
def fib_countup(n):
'''Less-dumb 'count up as you go' solution.
'''
if (n <= 1):
return n
x, y = 0, 1
for i in range(n):
x, y = y, x + y
return x
def fib_memoize(n, saved={0: 0, 1: 1}):
'''Use memoization to speed things up.
'''
if (n <= 1):
return n
if n not in saved:
saved[n] = fib_memoize(n-1, saved) + fib_memoize(n-2, saved)
return saved[n]
def fib_matrix(n):
'''Use matrix multiplication to solve it.
Ref: https://en.wikipedia.org/wiki/Fibonacci_number#Matrix_form
'''
if (n <= 1):
return n
v1, v2, v3 = 1, 1, 0 # Initialise a matrix [[1,1],[1,0]]
for rec in bin(n)[3:]: # Raise it to the nth power
calc = v2 * v2
v1, v2, v3 = v1 * v1 + calc, (v1 + v3) * v2, calc + v3 * v3
if rec == '1':
v1, v2, v3 = v1 + v2, v1, v2
return v2
def fib_fast_double(n):
'''Use fast doubling method.
Ref: https://www.nayuki.io/page/fast-fibonacci-algorithms
'''
if n == 0:
return (0, 1)
else:
a, b = fib_fast_double(n // 2)
c = a * (b * 2 - a)
d = a * a + b * b
if n % 2 == 0:
return (c, d)
else:
return (d, c + d)
def fibonacci(n):
"""Returns F(n)
"""
if n < 0:
raise ValueError
return fib_fast_double(n)[0]
if __name__ == '__main__':
n = int(sys.stdin.read())
print(fibonacci(n))
|
'''
Copyright 2017-present, Airbnb Inc.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
'''
import json
import os
from collections import OrderedDict
class ConfigError(Exception):
pass
def load_config(conf_dir='conf/'):
"""Load the configuration for StreamAlert.
All configuration files live in the `conf` directory in JSON format.
`sources` define a colleciton of AWS services (S3, Kinesis) supported as
inputs to StreamAlert, specific entities (S3 buckets, Kinesis streams),
and log types emitted from them.
`logs` declare the schema for the listed log types in `sources`. Each
key denotes the name of the log type, and includes 'keys' used to match
rules to log fields.
"""
conf_files = {
'sources': 'sources.json',
'logs': 'logs.json'
}
config = {}
for desc, filename in conf_files.iteritems():
with open(os.path.join(conf_dir, filename)) as data:
try:
config[desc] = json.load(data, object_pairs_hook=OrderedDict)
except ValueError:
raise ConfigError('Invalid JSON format for {}.json'.format(desc))
if validate_config(config):
return config
def validate_config(config):
"""Validate the StreamAlert configuration contains a valid structure.
Checks for `logs.json`:
- each log has a schema and parser declared
Checks for `sources.json`
- the sources contains either kinesis or s3 keys
- each sources has a list of logs declared
"""
for config_key, settings in config.iteritems():
# check log declarations
if config_key == 'logs':
for log, attrs in settings.iteritems():
if not {'schema', 'parser'}.issubset(set(attrs.keys())):
raise ConfigError('Schema or parser missing for {}'.format(log))
# check sources attributes
elif config_key == 'sources':
if not set(settings.keys()).issubset({'kinesis', 's3', 'sns'}):
raise ConfigError('Sources missing \'kinesis\', \'s3\', or \'sns\' keys')
for log, attrs in settings.iteritems():
for entity, entity_attrs in attrs.iteritems():
if 'logs' not in set(entity_attrs.keys()):
raise ConfigError('Logs are not declared for {}'.format(entity))
if len(entity_attrs['logs']) == 0:
raise ConfigError('Log list is empty for {}'.format(entity))
return True
def load_env(context):
"""Get the current environment for the running Lambda function.
Parses the invoked_function_arn from the given context object to get
the name of the currently running alias (either production or staging)
and the name of the function.
Example:
arn:aws:lambda:aws-region:acct-id:function:stream_alert:production
Args:
context: The AWS Lambda context object.
Returns:
{'lambda_region': 'region_name',
'account_id': <ACCOUNT_ID>,
'lambda_function_name': 'function_name',
'lambda_alias': 'qualifier'}
"""
env = {}
if context:
arn = context.invoked_function_arn.split(':')
env['lambda_region'] = arn[3]
env['account_id'] = arn[4]
env['lambda_function_name'] = arn[6]
env['lambda_alias'] = arn[7]
else:
env['lambda_region'] = 'us-east-1'
env['account_id'] = '123456789012'
env['lambda_function_name'] = 'test_streamalert_rule_processor'
env['lambda_alias'] = 'development'
return env
|
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import datetime
import os
import sys
import time
import traceback
import uuid
import testtools
import xvfbwrapper
from openstack_dashboard.test.integration_tests import config
from openstack_dashboard.test.integration_tests.pages import loginpage
from openstack_dashboard.test.integration_tests import webdriver
ROOT_PATH = os.path.dirname(os.path.abspath(__file__))
if ROOT_PATH not in sys.path:
sys.path.append(ROOT_PATH)
def gen_random_resource_name(resource="", timestamp=True):
"""Generate random resource name using uuid and timestamp.
Input fields are usually limited to 255 or 80 characters hence their
provide enough space for quite long resource names, but it might be
the case that maximum field length is quite restricted, it is then
necessary to consider using shorter resource argument or avoid using
timestamp by setting timestamp argument to False.
"""
fields = ["horizon"]
if resource:
fields.append(resource)
if timestamp:
tstamp = time.strftime("%d-%m-%H-%M-%S")
fields.append(tstamp)
fields.append(str(uuid.uuid4()).replace("-", ""))
return "_".join(fields)
class BaseTestCase(testtools.TestCase):
CONFIG = config.get_config()
def setUp(self):
if os.environ.get('INTEGRATION_TESTS', False):
# Start a virtual display server for running the tests headless.
if os.environ.get('SELENIUM_HEADLESS', False):
self.vdisplay = xvfbwrapper.Xvfb(width=1280, height=720)
args = []
# workaround for memory leak in Xvfb taken from:
# http://blog.jeffterrace.com/2012/07/xvfb-memory-leak-workaround.html
args.append("-noreset")
# disables X access control
args.append("-ac")
if hasattr(self.vdisplay, 'extra_xvfb_args'):
# xvfbwrapper 0.2.8 or newer
self.vdisplay.extra_xvfb_args.extend(args)
else:
self.vdisplay.xvfb_cmd.extend(args)
self.vdisplay.start()
# Start the Selenium webdriver and setup configuration.
self.driver = webdriver.WebDriverWrapper()
self.driver.maximize_window()
self.driver.implicitly_wait(self.CONFIG.selenium.implicit_wait)
self.driver.set_page_load_timeout(
self.CONFIG.selenium.page_timeout)
self.addOnException(self._dump_page_html_source)
self.addOnException(self._save_screenshot)
else:
msg = "The INTEGRATION_TESTS env variable is not set."
raise self.skipException(msg)
super(BaseTestCase, self).setUp()
def _dump_page_html_source(self, exc_info):
content = None
try:
pg_source = self._get_page_html_source()
content = testtools.content.Content(
testtools.content_type.ContentType('text', 'html'),
lambda: pg_source)
except Exception:
exc_traceback = traceback.format_exc()
content = testtools.content.text_content(exc_traceback)
finally:
self.addDetail("PageHTMLSource.html", content)
def _save_screenshot(self, exc_info):
screenshot_dir = os.path.join(
ROOT_PATH,
self.CONFIG.selenium.screenshots_directory)
if not os.path.exists(screenshot_dir):
os.makedirs(screenshot_dir)
date_string = datetime.datetime.now().strftime(
'%Y.%m.%d-%H%M%S')
test_name = self._testMethodName
name = '%s_%s.png' % (test_name, date_string)
filename = os.path.join(screenshot_dir, name)
self.driver.get_screenshot_as_file(filename)
content = testtools.content.text_content(filename)
self.addDetail("Screenshot", content)
def _get_page_html_source(self):
"""Gets html page source.
self.driver.page_source is not used on purpose because it does not
display html code generated/changed by javascript.
"""
html_elem = self.driver.find_element_by_tag_name("html")
return html_elem.get_attribute("innerHTML").encode("UTF-8")
def tearDown(self):
if os.environ.get('INTEGRATION_TESTS', False):
self.driver.quit()
if hasattr(self, 'vdisplay'):
self.vdisplay.stop()
super(BaseTestCase, self).tearDown()
class TestCase(BaseTestCase):
TEST_USER_NAME = BaseTestCase.CONFIG.identity.username
TEST_PASSWORD = BaseTestCase.CONFIG.identity.password
def setUp(self):
super(TestCase, self).setUp()
self.login_pg = loginpage.LoginPage(self.driver, self.CONFIG)
self.login_pg.go_to_login_page()
self.home_pg = self.login_pg.login(self.TEST_USER_NAME,
self.TEST_PASSWORD)
def tearDown(self):
try:
if self.home_pg.is_logged_in:
self.home_pg.go_to_home_page()
self.home_pg.log_out()
finally:
super(TestCase, self).tearDown()
class AdminTestCase(TestCase):
TEST_USER_NAME = TestCase.CONFIG.identity.admin_username
TEST_PASSWORD = TestCase.CONFIG.identity.admin_password
|
# Generated by Django 2.2.12 on 2020-05-01 03:52
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('issues', '0005_auto_20200501_0350'),
]
operations = [
migrations.AlterField(
model_name='issue',
name='assignee',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='assigned_to', to=settings.AUTH_USER_MODEL),
),
migrations.AlterField(
model_name='issue',
name='linked_to',
field=models.ManyToManyField(blank=True, related_name='_issue_linked_to_+', to='issues.Issue'),
),
]
|
# -*- coding: utf-8 -*-
import xml.etree.ElementTree as ET
import unittest
from context import xml_bibs as xb
class TestGetSubjectFields(unittest.TestCase):
"""Tests parsing of subjects from marcxml"""
def setUp(self):
tree = ET.parse("sample_marcxml.xml")
self.data1 = tree.getroot()
tree = ET.parse("missing_tags_sample_marcxml.xml")
self.data2 = tree.getroot()
def test_none(self):
self.assertEqual(xb.get_subject_fields(None), {})
def test_missing_tag(self):
self.assertEqual(xb.get_subject_fields(self.data2), {})
def test_present_600_tag(self):
self.assertEqual(xb.get_subject_fields(self.data1), {"600": "Elizabeth II"})
class TestGetTag082(unittest.TestCase):
"""Tests parsing 082 tag subfield a"""
def setUp(self):
tree = ET.parse("sample_marcxml.xml")
self.data1 = tree.getroot()
tree = ET.parse("missing_tags_sample_marcxml.xml")
self.data2 = tree.getroot()
def test_none(self):
self.assertIsNone(xb.get_tag_082(None))
def test_missing_tag(self):
self.assertIsNone(xb.get_tag_082(self.data2))
def test_found_tag(self):
self.assertEqual(xb.get_tag_082(self.data1), "973.9/092/2")
if __name__ == "__main__":
unittest.main()
|
import pickle
import argparse
import pandas as pd
import numpy as np
import math
from tqdm import tqdm
from sklearn import decomposition
CENTER_X = int(960 / 3 / 2)
CENTER_Y = int(540 / 3 / 2)
# CENTER_X = 0
# CENTER_Y = 0
def load_data(path, data_size=None):
with open(path, 'rb') as f:
data = pickle.load(f)
if data_size != -1:
dataset = data[:data_size]
else:
dataset = data[:]
return dataset
def save_data(path, data):
with open(path, 'wb') as f:
pickle.dump(data, f)
'''
filling empty coordination,
relocate landmark position,
and filtering landmarks which have abnormal pulpil coordination
'''
def run_fill_filter(eye_dataset):
for ed in tqdm(eye_dataset):
# preprocessing landmarks
# print('[INFO] Current video: {}'.format(ed['vid']))
for clip_info in ed['clip_info']:
landmarks = clip_info['landmarks']
filled_landmarks = []
for landmark in landmarks:
ci_df = pd.DataFrame(np.array(landmark))
ci_df = ci_df.replace(0, np.nan)
ci_df = ci_df.fillna(method='ffill') # fill NaN values in dataset
ci_df = ci_df.rolling(3).mean() # moving average filtering
temp_lm = []
for landmark in ci_df.values.tolist():
filled = [int(lm) for lm in landmark if not(np.isnan(lm))]
if len(filled) == 50:
# centering
diff_x = CENTER_X - filled[48]
diff_y = CENTER_Y - filled[49]
for f_i in range(0, len(filled), 2):
filled[f_i] += diff_x
filled[f_i+1] += diff_y
# check right pupil is outside of eye region
condition1 = filled[0] > filled[4] and filled[0] < filled[10]
condition2 = filled[1] > filled[7] and filled[1] > filled[9]
condition3 = filled[1] < filled[13] and filled[1] < filled[14]
if condition1 and condition2 and condition3:
temp_lm.append(filled)
filled_landmarks.append(temp_lm)
clip_info['landmarks'] = filled_landmarks
return eye_dataset
'''
Normalize eye expression motion scale over whole dataset.
To avoid pulpil dislocation, we use same vector on right and left pulpil.
'''
def run_normalization(eye_dataset):
eb_standard_len = 100
def get_dist(x1, y1, x2, y2):
return np.sqrt((x1-x2) ** 2 + (y1- y2) ** 2)
def get_theta(var_x, var_y, fix_x, fix_y):
return math.atan2(var_y - fix_y, var_x - fix_x)
def get_new_coor(theta, dist, point):
return dist * np.array([math.cos(theta),
math.sin(theta)]) + np.array([point[0], point[1]])
def run_len_norm(var_x, var_y, fix_x, fix_y, expected_len):
angle = get_theta(var_x, var_y, fix_x, fix_y)
new_coor = get_new_coor(angle, expected_len, [fix_x, fix_y])
return new_coor
for ed in tqdm(eye_dataset):
# preprocessing landmarks
# print('[INFO] Current video: {}'.format(ed['vid']))
for clip_info in ed['clip_info']:
tmp_landmarks = []
for landmark in clip_info['landmarks']:
tmp_landmark = []
for lm in landmark:
# calculate different ratio with standard length
right_len_ratio = eb_standard_len / get_dist(lm[46], lm[47], lm[48], lm[49])
left_len_ratio = eb_standard_len / get_dist(lm[28], lm[29], lm[48], lm[49])
len_ratio = (right_len_ratio + left_len_ratio) / 2
fix_x, fix_y = lm[48], lm[49]
new_coor_list = []
for lm_i in range(0, len(lm[:48]), 2):
new_coor = run_len_norm(lm[lm_i], lm[lm_i+1], fix_x, fix_y,
get_dist(lm[lm_i], lm[lm_i+1], fix_x, fix_y) * len_ratio)
new_coor_list += [int(new_coor[0]), int(new_coor[1])]
# pupil preprocessing
right_theta = get_theta(lm[0], lm[1], lm[6], lm[7])
right_dist = get_dist(lm[0], lm[1], lm[6], lm[7])
left_new_pulpil = get_new_coor(right_theta, right_dist, [lm[18], lm[19]])
lm[2] = int(left_new_pulpil[0])
lm[3] = int(left_new_pulpil[1])
new_coor_list += [fix_x, fix_y]
tmp_landmark.append(new_coor_list)
tmp_landmarks.append(tmp_landmark)
clip_info['landmarks'] = tmp_landmarks
return eye_dataset
'''
Run PCA.
We set 7 components to run pca.
'''
def run_estimator(eye_dataset, opt):
landmark_list = []
for ed in eye_dataset:
for clip_info in ed['clip_info']:
for clip_landmarks in clip_info['landmarks']:
for landmarks in clip_landmarks:
landmark_list.append(landmarks)
landmark_array = np.array(landmark_list)
n_samples, n_features = landmark_array.shape
print('[INFO] n_samples:{}, n_features:{}'.format(n_samples, n_features))
print('[INFO] Estimated running time: {:0.2f} hrs with {} fps'.format(n_samples/opt.fps/60/60, opt.fps))
data = landmark_array[:, :-2]
estimator = decomposition.PCA(opt.n_components, svd_solver='randomized', whiten=True)
estimator.fit(data)
var_ratio = estimator.explained_variance_ratio_
print('[INFO] {} number of components explain {:0.2f} of original dataset.'.format(opt.n_components, np.sum(var_ratio)))
print('[INFO] Without first and seconde axis, rest of hyperplain consists of {:0.2f} of original dataset.'.format(np.sum(var_ratio[3:])))
return estimator
'''
Based on learned PCA eigen vectors (7 hyperplanes that can explain original dataset),
We transform 50 dimention to 7 dimention to represent eye expression.
Due to first and second egien vectors represent rotating motion in our pca space,
we make these values to zero.
'''
def run_transform(eye_dataset, estimator, opt):
for ed in tqdm(eye_dataset):
for clip_info in ed['clip_info']:
landmarks = clip_info['landmarks']
transformed_landmarks = []
for landmark in landmarks:
tmp_trans = []
for lm in landmark:
transformed_array = estimator.transform(np.array([lm[:-2]]))
transformed_list = transformed_array.tolist()[0]
if opt.is_rotation_killed: # we killed pca hyperplanes which have a rotation
# transformed_list[0] = int(transformed_list[0]/3)
# transformed_list[1] = int(transformed_list[1]/3)
transformed_list[0] = 0
transformed_list[1] = 0
tmp_trans.append(transformed_list)
transformed_landmarks.append(tmp_trans)
clip_info['landmarks'] = transformed_landmarks
return eye_dataset
def main():
parser = argparse.ArgumentParser()
parser.add_argument('-dataset_path', default='./dataset')
parser.add_argument('-data_size', type=int, default=-1) # -1 means whole dataset
parser.add_argument('-fps', type=int, default=10)
parser.add_argument('-n_components', type=int, default=7)
parser.add_argument('-is_rotation_killed', type=bool, default=True)
opt = parser.parse_args()
eye_dataset = load_data('{}/eye_motion_dataset.pickle'.format(opt.dataset_path), opt.data_size)
print('[INFO] Dataset length: {}'.format(len(eye_dataset)))
print('[INFO] Filling, filtering and centering is now processing.')
eye_dataset = run_fill_filter(eye_dataset)
print('[INFO] Normalization is now processing.')
eye_dataset = run_normalization(eye_dataset)
print('[INFO] Estimator is now running.')
estimator = run_estimator(eye_dataset, opt)
print('[INFO] Landmarks are now transforming.')
eye_dataset = run_transform(eye_dataset, estimator, opt)
# save processed dataset
processed_dataset = {'eye_dataset': eye_dataset,
'estimator': estimator,
}
save_path = '{}/processed_eye_motion_dataset_pca_{}.pickle'.format(opt.dataset_path, estimator.n_components)
print('[INFO] Save preprocessed dataset at {}'.format(save_path))
save_data(save_path, processed_dataset)
if __name__ == '__main__':
main()
|
#!/usr/bin/env python3
## How many clusters have more than one organisms as it's members
import sys
import pandas as pd
import logging
def main():
clstr_table = sys.argv[1]
output = sys.argv[2]
clstr_df = pd.read_table(clstr_table, header=0)
clstr_df["organism"] = clstr_df["id"].apply(lambda x: x.split(":")[2].split("_")[0])
summ_df = clstr_df.groupby("clstr").agg(
num_organisms=("organism", pd.Series.nunique), organism_list=("organism", set)
)
close_strains = set()
for row in summ_df.query("num_organisms > 1").itertuples(index=False):
close_strains.update(row.organism_list)
logging.info(
f"There are {len(close_strains)} strains in the community for which another strain exists with an identical V3-V4 region"
)
summ_df["organism_list"] = summ_df["organism_list"].apply(
lambda x: "; ".join(set(x))
)
summ_df = summ_df.sort_values("num_organisms", ascending=False)
summ_df.to_csv(output)
if __name__ == "__main__":
logging.basicConfig(
level=logging.INFO, format="%(asctime)s\t[%(levelname)s]:\t%(message)s",
)
main()
|
from http import HTTPStatus
from rest_framework import views, viewsets
from rest_framework.exceptions import ParseError
from rest_framework.request import Request
from rest_framework.response import Response
from scraper import models, serializers, tasks
from scraper.utils import get_random_working_proxy
class WebsiteViewSet(viewsets.ModelViewSet):
queryset = models.Website.objects.all()
serializer_class = serializers.WebsiteSerializer
filterset_fields = ("is_active",)
search_field = ("name", "code", "url")
ordering_fields = ("name", "code", "id")
class PageViewSet(viewsets.ModelViewSet):
queryset = models.Page.objects.all()
serializer_class = serializers.PageSerializer
filterset_fields = ("is_active", "has_js")
search_field = ("site__name", "site__code", "path")
ordering_fields = ("site", "id")
class ProxyViewSet(viewsets.ModelViewSet):
queryset = models.Proxy.objects.all()
serializer_class = serializers.ProxySerializer
filterset_fields = ("is_active", "is_dead", "anonymity", "protocol")
search_field = ("ip", "port", "country")
ordering_fields = ("id", "ip", "port", "country")
class ScrapeSitesAPI(views.APIView):
def post(self, request):
task = tasks.scrape_sites.apply_async()
return Response({"task_id": task.id, "status": task.status})
class CheckProxiesAPI(views.APIView):
def post(self, request):
task = tasks.check_proxies.apply_async()
return Response({"task_id": task.id, "status": task.status})
class GetProxyAPI(views.APIView):
def get_proxy(
self, output: str = "dict", test_urls: list or tuple = None
) -> Response:
result = get_random_working_proxy(output="dict", test_urls=test_urls)
if result:
return Response(
{"result": result, "status": "SUCCESS"}, status=HTTPStatus.OK
) # 200 OK
else:
return Response(
{"status": "NO PROXY FOUND"}, status=HTTPStatus.NO_CONTENT
) # 204 No content
def get(self, request: Request):
return self.get_proxy()
def post(self, request: Request):
test_urls = request.POST.get("test_urls", None)
if not test_urls:
raise ParseError("Must provide test_urls for proxy check")
if isinstance(test_urls, str):
test_urls = (test_urls,)
return self.get_proxy(test_urls=test_urls)
|
from ..apibits import *
from ..endpoints import GeneratorsEndpoint
from ..endpoints import GeneratorRowsEndpoint
class Generator(ApiResource):
@classmethod
def all(cls, params={}, headers={}):
res = cls.default_client().generators().all(params, headers)
return res
@classmethod
def retrieve(cls, generator_id, params={}, headers={}):
res = cls.default_client().generators().retrieve(generator_id, params, headers)
return res
@classmethod
def update(cls, generator_id, params={}, headers={}):
res = cls.default_client().generators().update(generator_id, params, headers)
return res
@classmethod
def create(cls, params={}, headers={}):
res = cls.default_client().generators().create(params, headers)
return res
def refresh(self, params={}, headers={}):
res = self.get_client().generators().retrieve(self.id, params, headers)
return self.refresh_from(res.json, res.api_method, res.client)
def delete(self, params={}, headers={}):
res = self.get_client().generators().delete(self.id, params, headers)
return res
def rows(self):
from ..endpoints import GeneratorRowsEndpoint
return GeneratorRowsEndpoint(self.client, self)
# Everything below here is used behind the scenes.
def __init__(self, *args, **kwargs):
super(Generator, self).__init__(*args, **kwargs)
ApiResource.register_api_subclass(self, "generator")
_api_attributes = {
"columns" : {},
"created_at" : {},
"data" : {},
"description" : {},
"generator_type" : {},
"id" : {},
"name" : {},
"row_count" : {},
}
|
###
# Copyright (c) 2005, Jeremiah Fincher
# Copyright (c) 2010-2021, The Limnoria Contributors
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright notice,
# this list of conditions, and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions, and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# * Neither the name of the author of this software nor the name of
# contributors to this software may be used to endorse or promote products
# derived from this software without specific prior written consent.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
###
"""
Handles relaying between networks.
"""
import supybot
import supybot.world as world
# Use this for the version of this plugin. You may wish to put a CVS keyword
# in here if you're keeping the plugin in CVS or some similar system.
__version__ = "%%VERSION%%"
__author__ = supybot.authors.jemfinch
__maintainer__ = supybot.authors.limnoria_core
# This is a dictionary mapping supybot.Author instances to lists of
# contributions.
__contributors__ = {}
from . import config
from . import plugin
from importlib import reload
reload(plugin) # In case we're being reloaded.
# Add more reloads here if you add third-party modules and want them to be
# reloaded when this plugin is reloaded. Don't forget to import them as well!
if world.testing:
from . import test
Class = plugin.Class
configure = config.configure
# vim:set shiftwidth=4 softtabstop=4 expandtab textwidth=79:
|
__copyright__ = "Copyright (c) 2020 Jina AI Limited. All rights reserved."
__license__ = "Apache-2.0"
# do not change this line manually
# this is managed by git tag and updated on every release
__version__ = '0.6.8'
# do not change this line manually
# this is managed by proto/build-proto.sh and updated on every execution
__proto_version__ = '0.0.65'
import platform
import sys
# do some os-wise patches
if sys.version_info < (3, 7, 0):
raise OSError('Jina requires Python 3.7 and above, but yours is %s' % sys.version_info)
if sys.version_info >= (3, 8, 0) and platform.system() == 'Darwin':
# temporary fix for python 3.8 on macos where the default start is set to "spawn"
# https://docs.python.org/3/library/multiprocessing.html#contexts-and-start-methods
from multiprocessing import set_start_method
set_start_method('fork')
from datetime import datetime
from types import SimpleNamespace
import os
# fix fork error on MacOS but seems no effect? must do EXPORT manually before jina start
os.environ['OBJC_DISABLE_INITIALIZE_FORK_SAFETY'] = 'YES'
__uptime__ = datetime.now().strftime('%Y%m%d%H%M%S')
# update on MacOS
# 1. clean this tuple,
# 2. grep -ohE "\'JINA_.*?\'" **/*.py | sort -u | sed "s/$/,/g"
# 3. copy all lines EXCEPT the first (which is the grep command in the last line)
__jina_env__ = ('JINA_ARRAY_QUANT',
'JINA_BINARY_DELIMITER',
'JINA_CONTRIB_MODULE',
'JINA_CONTRIB_MODULE_IS_LOADING',
'JINA_CONTROL_PORT',
'JINA_DB_COLLECTION',
'JINA_DB_HOSTNAME',
'JINA_DB_NAME',
'JINA_DB_PASSWORD',
'JINA_DB_USERNAME',
'JINA_DEFAULT_HOST',
'JINA_DISABLE_UVLOOP',
'JINA_EXECUTOR_WORKDIR',
'JINA_FULL_CLI',
'JINA_IPC_SOCK_TMP',
'JINA_LOG_CONFIG',
'JINA_LOG_NO_COLOR',
'JINA_POD_NAME',
'JINA_PROFILING',
'JINA_RANDOM_PORTS',
'JINA_SOCKET_HWM',
'JINA_TEST_GPU',
'JINA_TEST_PRETRAINED',
'JINA_VCS_VERSION',
'JINA_WARN_UNNAMED')
__default_host__ = os.environ.get('JINA_DEFAULT_HOST', '0.0.0.0')
__ready_msg__ = 'ready and listening'
__stop_msg__ = 'terminated'
__unable_to_load_pretrained_model_msg__ = 'Executor depending on pretrained model file could not find the pretrained model'
__binary_delimiter__ = os.environ.get('JINA_BINARY_DELIMITER', '460841a0a8a430ae25d9ad7c1f048c57').encode()
JINA_GLOBAL = SimpleNamespace()
JINA_GLOBAL.imported = SimpleNamespace()
JINA_GLOBAL.imported.executors = False
JINA_GLOBAL.imported.drivers = False
JINA_GLOBAL.imported.hub = False
JINA_GLOBAL.logserver = SimpleNamespace()
def import_classes(namespace: str, targets=None,
show_import_table: bool = False, import_once: bool = False):
"""
Import all or selected executors into the runtime. This is called when Jina is first imported for registering the YAML
constructor beforehand. It can be also used to import third-part or external executors.
:param namespace: the namespace to import
:param targets: the list of executor names to import
:param show_import_table: show the import result as a table
:param import_once: import everything only once, to avoid repeated import
"""
import os, re
from .logging import default_logger
if namespace == 'jina.executors':
import_type = 'ExecutorType'
if import_once and JINA_GLOBAL.imported.executors:
return
elif namespace == 'jina.drivers':
import_type = 'DriverType'
if import_once and JINA_GLOBAL.imported.drivers:
return
elif namespace == 'jina.hub':
import_type = 'ExecutorType'
if import_once and JINA_GLOBAL.imported.hub:
return
else:
raise TypeError(f'namespace: {namespace} is unrecognized')
from setuptools import find_packages
import pkgutil
from pkgutil import iter_modules
try:
path = os.path.dirname(pkgutil.get_loader(namespace).path)
except AttributeError:
if namespace == 'jina.hub':
default_logger.debug(f'hub submodule is not initialized. Please try "git submodule update --init"')
return {}
modules = set()
for info in iter_modules([path]):
if (namespace != 'jina.hub' and not info.ispkg) or (namespace == 'jina.hub' and info.ispkg):
modules.add('.'.join([namespace, info.name]))
for pkg in find_packages(path):
modules.add('.'.join([namespace, pkg]))
pkgpath = path + '/' + pkg.replace('.', '/')
for info in iter_modules([pkgpath]):
if (namespace != 'jina.hub' and not info.ispkg) or (namespace == 'jina.hub' and info.ispkg):
modules.add('.'.join([namespace, pkg, info.name]))
# filter
ignored_module_pattern = r'\.tests|\.api|\.bump_version'
modules = {m for m in modules if not re.findall(ignored_module_pattern, m)}
from collections import defaultdict
load_stat = defaultdict(list)
bad_imports = []
if isinstance(targets, str):
targets = {targets}
elif isinstance(targets, list):
targets = set(targets)
elif targets is None:
targets = {}
else:
raise TypeError(f'target must be a set, but received {targets!r}')
depend_tree = {}
import importlib
from .helper import colored
for m in modules:
try:
mod = importlib.import_module(m)
for k in dir(mod):
# import the class
if (getattr(mod, k).__class__.__name__ == import_type) and (not targets or k in targets):
try:
_c = getattr(mod, k)
load_stat[m].append(
(k, True, colored('▸', 'green').join(f'{vvv.__name__}' for vvv in _c.mro()[:-1][::-1])))
d = depend_tree
for vvv in _c.mro()[:-1][::-1]:
if vvv.__name__ not in d:
d[vvv.__name__] = {}
d = d[vvv.__name__]
d['module'] = m
if k in targets:
targets.remove(k)
if not targets:
return # target execs are all found and loaded, return
try:
# load the default request for this executor if possible
from .executors.requests import get_default_reqs
get_default_reqs(type.mro(getattr(mod, k)))
except ValueError:
pass
except Exception as ex:
load_stat[m].append((k, False, ex))
bad_imports.append('.'.join([m, k]))
if k in targets:
raise ex # target class is found but not loaded, raise return
except Exception as ex:
load_stat[m].append(('', False, ex))
bad_imports.append(m)
if targets:
raise ImportError(f'{targets} can not be found in jina')
if show_import_table:
from .helper import print_load_table, print_dep_tree_rst
print_load_table(load_stat)
else:
if bad_imports:
if namespace != 'jina.hub':
default_logger.error(
f'theses modules or classes can not be imported {bad_imports}. '
f'You can use `jina check` to list all executors and drivers')
else:
default_logger.warning(
f'due to the missing dependencies or bad implementations, {bad_imports} can not be imported '
f'if you are using these executors/drivers, they wont work. '
f'You can use `jina check` to list all executors and drivers')
if namespace == 'jina.executors':
JINA_GLOBAL.imported.executors = True
elif namespace == 'jina.drivers':
JINA_GLOBAL.imported.drivers = True
elif namespace == 'jina.hub':
JINA_GLOBAL.imported.hub = True
return depend_tree
# driver first, as executor may contain driver
import_classes('jina.drivers', show_import_table=False, import_once=True)
import_classes('jina.executors', show_import_table=False, import_once=True)
import_classes('jina.hub', show_import_table=False, import_once=True)
# manually install the default signal handler
import signal
signal.signal(signal.SIGINT, signal.default_int_handler)
def set_nofile(nofile_atleast=4096):
"""
sets nofile soft limit to at least 4096, useful for running matlplotlib/seaborn on
parallel executing plot generators vs. Ubuntu default ulimit -n 1024 or OS X El Captian 256
temporary setting extinguishing with Python session.
"""
try:
import resource as res
except ImportError: # Windows
res = None
from .logging import default_logger
if res is None:
return (None,) * 2
soft, ohard = res.getrlimit(res.RLIMIT_NOFILE)
hard = ohard
if soft < nofile_atleast:
soft = nofile_atleast
if hard < soft:
hard = soft
default_logger.debug(f'setting soft & hard ulimit -n {soft} {hard}')
try:
res.setrlimit(res.RLIMIT_NOFILE, (soft, hard))
except (ValueError, res.error):
try:
hard = soft
default_logger.warning(f'trouble with max limit, retrying with soft,hard {soft},{hard}')
res.setrlimit(res.RLIMIT_NOFILE, (soft, hard))
except Exception:
default_logger.warning('failed to set ulimit, giving up')
soft, hard = res.getrlimit(res.RLIMIT_NOFILE)
default_logger.debug(f'ulimit -n soft,hard: {soft} {hard}')
return soft, hard
set_nofile()
|
###########################################################################
#
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
###########################################################################
contentCategory_Schema = [{
'description': '',
'name': 'accountId',
'type': 'INT64',
'mode': 'NULLABLE'
}, {
'description': '',
'name': 'id',
'type': 'INT64',
'mode': 'NULLABLE'
}, {
'description': '',
'name': 'kind',
'type': 'STRING',
'mode': 'NULLABLE'
}, {
'description': '',
'name': 'name',
'type': 'STRING',
'mode': 'NULLABLE'
}]
|
#
# Copyright 2019 The FATE Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import time
import numpy as np
from arch.api.utils import log_utils
from federatedml.evaluation import Evaluation
from federatedml.ftl.data_util.common_data_util import overlapping_samples_converter, load_model_parameters, \
save_model_parameters, create_table, convert_instance_table_to_dict, convert_instance_table_to_array, \
add_random_mask_for_list_of_values, remove_random_mask_from_list_of_values
from federatedml.ftl.data_util.log_util import create_shape_msg
from federatedml.ftl.eggroll_computation.helper import decrypt_matrix
from federatedml.ftl.encrypted_ftl import EncryptedFTLHostModel
from federatedml.ftl.encryption.encryption import generate_encryption_key_pair, decrypt_scalar, decrypt_array
from federatedml.ftl.faster_encrypted_ftl import FasterEncryptedFTLHostModel
from federatedml.ftl.hetero_ftl.hetero_ftl_base import HeteroFTLParty
from federatedml.ftl.plain_ftl import PlainFTLHostModel
from federatedml.param.param import FTLModelParam
from federatedml.util import consts
from federatedml.util.transfer_variable import HeteroFTLTransferVariable
LOGGER = log_utils.getLogger()
class HeteroFTLHost(HeteroFTLParty):
def __init__(self, host: PlainFTLHostModel, model_param: FTLModelParam, transfer_variable: HeteroFTLTransferVariable):
super(HeteroFTLHost, self).__init__()
self.host_model = host
self.model_param = model_param
self.transfer_variable = transfer_variable
self.max_iter = model_param.max_iter
self.n_iter_ = 0
def prepare_data(self, host_data):
LOGGER.info("@ start host prepare data")
host_features_dict, _, host_sample_indexes = convert_instance_table_to_dict(host_data)
host_sample_indexes = np.array(host_sample_indexes)
self._do_remote(host_sample_indexes,
name=self.transfer_variable.host_sample_indexes.name,
tag=self.transfer_variable.generate_transferid(self.transfer_variable.host_sample_indexes),
role=consts.GUEST,
idx=-1)
guest_sample_indexes = self._do_get(name=self.transfer_variable.guest_sample_indexes.name,
tag=self.transfer_variable.generate_transferid(self.transfer_variable.guest_sample_indexes),
idx=-1)[0]
host_features, overlap_indexes, _ = overlapping_samples_converter(host_features_dict, host_sample_indexes,
guest_sample_indexes)
return host_features, overlap_indexes
def classified(self, prob_table, threshold):
"""
convert a probability table into a predicted class table.
"""
predict_table = prob_table.mapValues(lambda x: 1 if x > threshold else 0)
return predict_table
def evaluate(self, labels, pred_prob, pred_labels, evaluate_param):
LOGGER.info("@ start host evaluate")
predict_res = None
if evaluate_param.classi_type == consts.BINARY:
predict_res = pred_prob
elif evaluate_param.classi_type == consts.MULTY:
predict_res = pred_labels
else:
LOGGER.warning("unknown classification type, return None as evaluation results")
eva = Evaluation(evaluate_param.classi_type)
eva_report = eva.report(labels, predict_res, evaluate_param.metrics, evaluate_param.thresholds,
evaluate_param.pos_label)
LOGGER.info("@ evaluation report:" + str(eva_report))
return eva_report
def predict(self, host_data, predict_param):
LOGGER.info("@ start host predict")
features, labels, instances_indexes = convert_instance_table_to_array(host_data)
host_x = np.squeeze(features)
LOGGER.debug("host_x: " + str(host_x.shape))
host_prob = self.host_model.predict(host_x)
self._do_remote(host_prob,
name=self.transfer_variable.host_prob.name,
tag=self.transfer_variable.generate_transferid(
self.transfer_variable.host_prob),
role=consts.GUEST, idx=-1)
pred_prob = self._do_get(name=self.transfer_variable.pred_prob.name,
tag=self.transfer_variable.generate_transferid(self.transfer_variable.pred_prob),
idx=-1)[0]
pred_prob = np.squeeze(pred_prob)
LOGGER.debug("pred_prob: " + str(pred_prob.shape))
pred_prob_table = create_table(pred_prob, instances_indexes)
actual_label_table = create_table(labels, instances_indexes)
pred_label_table = self.classified(pred_prob_table, predict_param.threshold)
if predict_param.with_proba:
predict_result = actual_label_table.join(pred_prob_table, lambda label, prob: (label if label > 0 else 0, prob))
predict_result = predict_result.join(pred_label_table, lambda x, y: (x[0], x[1], y))
else:
predict_result = actual_label_table.join(pred_label_table, lambda a_label, p_label: (a_label, None, p_label))
return predict_result
def load_model(self, model_table_name, model_namespace):
LOGGER.info("@ load host model from name/ns" + ", " + str(model_table_name) + ", " + str(model_namespace))
model_parameters = load_model_parameters(model_table_name, model_namespace)
self.host_model.restore_model(model_parameters)
def save_model(self, model_table_name, model_namespace):
LOGGER.info("@ save host model to name/ns" + ", " + str(model_table_name) + ", " + str(model_namespace))
_ = save_model_parameters(self.host_model.get_model_parameters(), model_table_name, model_namespace)
class HeteroPlainFTLHost(HeteroFTLHost):
def __init__(self, host: PlainFTLHostModel, model_param: FTLModelParam, transfer_variable: HeteroFTLTransferVariable):
super(HeteroPlainFTLHost, self).__init__(host, model_param, transfer_variable)
def fit(self, host_data):
LOGGER.info("@ start host fit")
host_x, overlap_indexes = self.prepare_data(host_data)
LOGGER.debug("host_x: " + str(host_x.shape))
LOGGER.debug("overlap_indexes: " + str(len(overlap_indexes)))
self.host_model.set_batch(host_x, overlap_indexes)
while self.n_iter_ < self.max_iter:
host_comp = self.host_model.send_components()
self._do_remote(host_comp, name=self.transfer_variable.host_component_list.name,
tag=self.transfer_variable.generate_transferid(self.transfer_variable.host_component_list, self.n_iter_),
role=consts.GUEST,
idx=-1)
guest_comp = self._do_get(name=self.transfer_variable.guest_component_list.name,
tag=self.transfer_variable.generate_transferid(self.transfer_variable.guest_component_list, self.n_iter_),
idx=-1)[0]
self.host_model.receive_components(guest_comp)
is_stop = self._do_get(name=self.transfer_variable.is_stopped.name,
tag=self.transfer_variable.generate_transferid(self.transfer_variable.is_stopped, self.n_iter_),
idx=-1)[0]
LOGGER.info("@ time: " + str(time.time()) + ", ep: " + str(self.n_iter_) + ", converged: " + str(is_stop))
self.n_iter_ += 1
if is_stop:
break
"""
Centralized encryption scheme with an arbiter in the loop for decryption.
"""
class HeteroEncryptFTLHost(HeteroFTLHost):
def __init__(self, host, model_param: FTLModelParam, transfer_variable: HeteroFTLTransferVariable):
super(HeteroEncryptFTLHost, self).__init__(host, model_param, transfer_variable)
self.host_model: EncryptedFTLHostModel = host
def _precompute(self):
pass
def fit(self, host_data):
LOGGER.info("@ start host fit")
# get public key from arbiter
public_key = self._do_get(name=self.transfer_variable.paillier_pubkey.name,
tag=self.transfer_variable.generate_transferid(self.transfer_variable.paillier_pubkey),
idx=-1)[0]
host_x, overlap_indexes = self.prepare_data(host_data)
LOGGER.debug("host_x: " + str(host_x.shape))
LOGGER.debug("overlap_indexes: " + str(len(overlap_indexes)))
self.host_model.set_batch(host_x, overlap_indexes)
self.host_model.set_public_key(public_key)
start_time = time.time()
while self.n_iter_ < self.max_iter:
host_comp = self.host_model.send_components()
self._do_remote(host_comp, name=self.transfer_variable.host_component_list.name,
tag=self.transfer_variable.generate_transferid(self.transfer_variable.host_component_list, self.n_iter_),
role=consts.GUEST,
idx=-1)
guest_comp = self._do_get(name=self.transfer_variable.guest_component_list.name,
tag=self.transfer_variable.generate_transferid(
self.transfer_variable.guest_component_list, self.n_iter_),
idx=-1)[0]
self.host_model.receive_components(guest_comp)
self._precompute()
encrypt_host_gradients = self.host_model.send_gradients()
self._do_remote(encrypt_host_gradients, name=self.transfer_variable.encrypt_host_gradient.name,
tag=self.transfer_variable.generate_transferid(self.transfer_variable.encrypt_host_gradient, self.n_iter_),
role=consts.ARBITER,
idx=-1)
decrypt_host_gradients = self._do_get(name=self.transfer_variable.decrypt_host_gradient.name,
tag=self.transfer_variable.generate_transferid(
self.transfer_variable.decrypt_host_gradient, self.n_iter_),
idx=-1)[0]
self.host_model.receive_gradients(decrypt_host_gradients)
is_stop = self._do_get(name=self.transfer_variable.is_encrypted_ftl_stopped.name,
tag=self.transfer_variable.generate_transferid(self.transfer_variable.is_encrypted_ftl_stopped, self.n_iter_),
idx=-1)[0]
LOGGER.info("@ time: " + str(time.time()) + ", ep: " + str(self.n_iter_) + ", converged: " + str(is_stop))
self.n_iter_ += 1
if is_stop:
break
end_time = time.time()
LOGGER.info("@ running time: " + str(end_time - start_time))
class FasterHeteroEncryptFTLHost(HeteroEncryptFTLHost):
def __init__(self, host, model_param: FTLModelParam, transfer_variable: HeteroFTLTransferVariable):
super(FasterHeteroEncryptFTLHost, self).__init__(host, model_param, transfer_variable)
self.host_model: FasterEncryptedFTLHostModel = host
def _precompute(self):
LOGGER.info("@ start host precompute")
host_precomputed_comp = self.host_model.send_precomputed_components()
self._do_remote(host_precomputed_comp, name=self.transfer_variable.host_precomputed_comp_list.name,
tag=self.transfer_variable.generate_transferid(self.transfer_variable.host_precomputed_comp_list,
self.n_iter_),
role=consts.GUEST,
idx=-1)
guest_precomputed_comp = self._do_get(name=self.transfer_variable.guest_precomputed_comp_list.name,
tag=self.transfer_variable.generate_transferid(
self.transfer_variable.guest_precomputed_comp_list, self.n_iter_),
idx=-1)[0]
self.host_model.receive_precomputed_components(guest_precomputed_comp)
"""
Decentralized encryption scheme without arbiter in the loop.
"""
class HeteroDecentralizedEncryptFTLHost(HeteroFTLHost):
def __init__(self, host, model_param: FTLModelParam, transfer_variable: HeteroFTLTransferVariable):
super(HeteroDecentralizedEncryptFTLHost, self).__init__(host, model_param, transfer_variable)
self.host_model: EncryptedFTLHostModel = host
self.public_key = None
self.private_key = None
self.guest_public_key = None
def _precompute(self):
pass
def prepare_encryption_key_pair(self):
LOGGER.info("@ start host prepare encryption key pair")
self.public_key, self.private_key = generate_encryption_key_pair()
# exchange public_key with guest
self._do_remote(self.public_key, name=self.transfer_variable.host_public_key.name,
tag=self.transfer_variable.generate_transferid(self.transfer_variable.host_public_key,
self.n_iter_),
role=consts.GUEST,
idx=-1)
self.guest_public_key = self._do_get(name=self.transfer_variable.guest_public_key.name,
tag=self.transfer_variable.generate_transferid(
self.transfer_variable.guest_public_key, self.n_iter_),
idx=-1)[0]
def fit(self, host_data):
LOGGER.info("@ start host fit")
self.prepare_encryption_key_pair()
host_x, overlap_indexes = self.prepare_data(host_data)
LOGGER.debug("host_x: " + str(host_x.shape))
LOGGER.debug("overlap_indexes: " + str(len(overlap_indexes)))
self.host_model.set_batch(host_x, overlap_indexes)
self.host_model.set_public_key(self.public_key)
self.host_model.set_guest_public_key(self.guest_public_key)
self.host_model.set_private_key(self.private_key)
start_time = time.time()
while self.n_iter_ < self.max_iter:
# Stage 1: compute and encrypt components (using host public key) required by guest to
# calculate gradients and loss.
LOGGER.debug("@ Stage 1: ")
host_comp = self.host_model.send_components()
LOGGER.debug("send enc host_comp: " + create_shape_msg(host_comp))
self._do_remote(host_comp, name=self.transfer_variable.host_component_list.name,
tag=self.transfer_variable.generate_transferid(self.transfer_variable.host_component_list, self.n_iter_),
role=consts.GUEST,
idx=-1)
# Stage 2: receive guest components in encrypted form (encrypted by guest public key),
# and calculate host gradients in encrypted form (encrypted by guest public key),
# and send them to guest for decryption
LOGGER.debug("@ Stage 2: ")
guest_comp = self._do_get(name=self.transfer_variable.guest_component_list.name,
tag=self.transfer_variable.generate_transferid(
self.transfer_variable.guest_component_list, self.n_iter_),
idx=-1)[0]
LOGGER.debug("receive enc guest_comp: " + create_shape_msg(guest_comp))
self.host_model.receive_components(guest_comp)
self._precompute()
# calculate host gradients in encrypted form (encrypted by guest public key)
encrypt_host_gradients = self.host_model.send_gradients()
LOGGER.debug("send encrypt_guest_gradients: " + create_shape_msg(encrypt_host_gradients))
# add random mask to encrypt_host_gradients and send them to guest for decryption
masked_enc_host_gradients, gradients_masks = add_random_mask_for_list_of_values(encrypt_host_gradients)
LOGGER.debug("send masked_enc_host_gradients: " + create_shape_msg(masked_enc_host_gradients))
self._do_remote(masked_enc_host_gradients, name=self.transfer_variable.masked_enc_host_gradients.name,
tag=self.transfer_variable.generate_transferid(self.transfer_variable.masked_enc_host_gradients, self.n_iter_),
role=consts.GUEST,
idx=-1)
# Stage 3: receive and then decrypt masked encrypted guest gradients and masked encrypted guest loss,
# and send them to guest
LOGGER.debug("@ Stage 3: ")
masked_enc_guest_gradients = self._do_get(name=self.transfer_variable.masked_enc_guest_gradients.name,
tag=self.transfer_variable.generate_transferid(self.transfer_variable.masked_enc_guest_gradients, self.n_iter_),
idx=-1)[0]
masked_enc_guest_loss = self._do_get(name=self.transfer_variable.masked_enc_loss.name,
tag=self.transfer_variable.generate_transferid(self.transfer_variable.masked_enc_loss, self.n_iter_),
idx=-1)[0]
masked_dec_guest_gradients = self.__decrypt_gradients(masked_enc_guest_gradients)
masked_dec_guest_loss = self.__decrypt_loss(masked_enc_guest_loss)
LOGGER.debug("send masked_dec_guest_gradients: " + create_shape_msg(masked_dec_guest_gradients))
self._do_remote(masked_dec_guest_gradients, name=self.transfer_variable.masked_dec_guest_gradients.name,
tag=self.transfer_variable.generate_transferid(self.transfer_variable.masked_dec_guest_gradients, self.n_iter_),
role=consts.GUEST,
idx=-1)
LOGGER.debug("send masked_dec_guest_loss: " + str(masked_dec_guest_loss))
self._do_remote(masked_dec_guest_loss, name=self.transfer_variable.masked_dec_loss.name,
tag=self.transfer_variable.generate_transferid(self.transfer_variable.masked_dec_loss, self.n_iter_),
role=consts.GUEST,
idx=-1)
# Stage 4: receive masked but decrypted host gradients from guest and remove mask,
# and update host model parameters using these gradients.
LOGGER.debug("@ Stage 4: ")
masked_dec_host_gradients = self._do_get(name=self.transfer_variable.masked_dec_host_gradients.name,
tag=self.transfer_variable.generate_transferid(
self.transfer_variable.masked_dec_host_gradients, self.n_iter_),
idx=-1)[0]
LOGGER.debug("receive masked_dec_host_gradients: " + create_shape_msg(masked_dec_host_gradients))
cleared_dec_host_gradients = remove_random_mask_from_list_of_values(masked_dec_host_gradients, gradients_masks)
# update host model parameters using these gradients.
self.host_model.receive_gradients(cleared_dec_host_gradients)
# Stage 5: determine whether training is terminated.
LOGGER.debug("@ Stage 5: ")
is_stop = self._do_get(name=self.transfer_variable.is_decentralized_enc_ftl_stopped.name,
tag=self.transfer_variable.generate_transferid(self.transfer_variable.is_decentralized_enc_ftl_stopped, self.n_iter_),
idx=-1)[0]
LOGGER.info("@ time: " + str(time.time()) + ", ep: " + str(self.n_iter_) + ", converged: " + str(is_stop))
self.n_iter_ += 1
if is_stop:
break
end_time = time.time()
LOGGER.info("@ running time: " + str(end_time - start_time))
def __decrypt_gradients(self, encrypt_gradients):
return decrypt_matrix(self.private_key, encrypt_gradients[0]), decrypt_array(self.private_key, encrypt_gradients[1])
def __decrypt_loss(self, encrypt_loss):
return decrypt_scalar(self.private_key, encrypt_loss)
class FasterHeteroDecentralizedEncryptFTLHost(HeteroDecentralizedEncryptFTLHost):
def __init__(self, host, model_param: FTLModelParam, transfer_variable: HeteroFTLTransferVariable):
super(FasterHeteroDecentralizedEncryptFTLHost, self).__init__(host, model_param, transfer_variable)
self.host_model: FasterEncryptedFTLHostModel = host
def _precompute(self):
LOGGER.debug("@ start precompute")
host_precomputed_comp = self.host_model.send_precomputed_components()
self._do_remote(host_precomputed_comp, name=self.transfer_variable.host_precomputed_comp_list.name,
tag=self.transfer_variable.generate_transferid(self.transfer_variable.host_precomputed_comp_list,
self.n_iter_),
role=consts.GUEST,
idx=-1)
guest_precomputed_comp = self._do_get(name=self.transfer_variable.guest_precomputed_comp_list.name,
tag=self.transfer_variable.generate_transferid(
self.transfer_variable.guest_precomputed_comp_list, self.n_iter_),
idx=-1)[0]
self.host_model.receive_precomputed_components(guest_precomputed_comp)
class HostFactory(object):
@classmethod
def create(cls, ftl_model_param: FTLModelParam, transfer_variable: HeteroFTLTransferVariable, ftl_local_model):
if ftl_model_param.is_encrypt:
if ftl_model_param.enc_ftl == "dct_enc_ftl":
# decentralized encrypted ftl host
LOGGER.debug("@ create decentralized encrypted ftl_host")
host_model = EncryptedFTLHostModel(local_model=ftl_local_model, model_param=ftl_model_param)
host = HeteroDecentralizedEncryptFTLHost(host_model, ftl_model_param, transfer_variable)
elif ftl_model_param.enc_ftl == "dct_enc_ftl2":
# decentralized encrypted faster ftl host
LOGGER.debug("@ create decentralized encrypted faster ftl_host")
host_model = FasterEncryptedFTLHostModel(local_model=ftl_local_model, model_param=ftl_model_param)
host = FasterHeteroDecentralizedEncryptFTLHost(host_model, ftl_model_param, transfer_variable)
elif ftl_model_param.enc_ftl == "enc_ftl2":
# encrypted faster ftl host
LOGGER.debug("@ create encrypted faster ftl_host")
host_model = FasterEncryptedFTLHostModel(local_model=ftl_local_model, model_param=ftl_model_param)
host = FasterHeteroEncryptFTLHost(host_model, ftl_model_param, transfer_variable)
else:
# encrypted ftl host
LOGGER.debug("@ create encrypted ftl_host")
host_model = EncryptedFTLHostModel(local_model=ftl_local_model, model_param=ftl_model_param)
host = HeteroEncryptFTLHost(host_model, ftl_model_param, transfer_variable)
else:
# plain ftl host
LOGGER.debug("@ create plain ftl_host")
host_model = PlainFTLHostModel(local_model=ftl_local_model, model_param=ftl_model_param)
host = HeteroPlainFTLHost(host_model, ftl_model_param, transfer_variable)
return host
|
#!/usr/bin/env python
#*-----------------------------------------------------------------------*
#| |
#| Copyright (c) 2013 by Paul Scherrer Institute (http://www.psi.ch) |
#| |
#| Author Thierry Zamofing (thierry.zamofing@psi.ch) |
#*-----------------------------------------------------------------------*
'''
implements an image view to show a colored image of a hdf5 dataset.
'''
if __name__ == '__main__':
#Used to guarantee to use at least Wx2.8
import wxversion
wxversion.ensureMinimal('2.8')
import wx
import matplotlib as mpl
if __name__ == '__main__':
mpl.use('WXAgg')
#or mpl.use('WX')
#matplotlib.get_backend()
import os,h5py
import numpy as np
import utilities as ut
from matplotlib.backends.backend_wxagg import FigureCanvasWxAgg as FigureCanvas
import os,h5py
from GLCanvasImg import *
import pyFAI
from hdfImageGL import HdfImageGLFrame
from glumpy.image.texture import Texture
from scipy import ndimage as ndi
def FindCenter(arr):
m=ndi.median_filter(arr, 5)
sx=m.sum(1)
sy=m.sum(0)
shape=arr.shape
xx=np.arange(shape[0])
yy=np.arange(shape[1])
x=(xx*sx).sum()/sx.sum()
y=(yy*sy).sum()/sy.sum()
#print x,y
#import pylab as plt #used for the colormaps
#plt.figure()
#plt.subplot(211)
#plt.plot(sx)
#plt.subplot(212)
#plt.plot(sy)
#plt.show(block=False)
return (x,y)
class MPLCanvasPyFAI1D(FigureCanvas):
def __init__(self,parent,SetStatusCB=None):
if SetStatusCB:
self.SetStatusCB=SetStatusCB
fig = mpl.figure.Figure()
ax = fig.add_axes([0.075,0.1,0.75,0.85])
FigureCanvas.__init__(self,parent, -1, fig)
#self.mpl_connect('motion_notify_event', self.OnMotion)
#self.mpl_connect('button_press_event', self.OnBtnPress)
#self.mpl_connect('button_release_event', self.OnBtnRelease)
#self.mpl_connect('scroll_event', self.OnBtnScroll)
#self.mpl_connect('key_press_event',self.OnKeyPress)
self.fig=fig
self.ax=ax
def InitChild(self,data):
fig=self.fig
ax=self.ax
ctrX,ctrY=self.center=FindCenter(data)
self.ai = pyFAI.AzimuthalIntegrator(1.e3, ctrX, ctrY, 0.0, 0.0, 0.0, 1.e0, 1.e0)
#canvas=self.canvas
self.numPtTh=int(np.average(data.shape)/2.)
out=self.ai.xrpd(data,self.numPtTh)
self.hl=ax.plot(*out)
ax.set_yscale('log')
#canvas.data=imgPolar
#print imgPolar.shape
#out=ai.xrpd(imgData,1000)
#out=ai.xrpd_OpenCL(imgData,1000)
#import pylab
#pylab.plot(*out)
#pylab.yscale("log")
#pylab.show()
class HdfPyFAI1DFrame(wx.Frame):
def __init__(self, parent,lbl,hid):
wx.Frame.__init__(self, parent, title=lbl, size=wx.Size(850, 650))
imgDir=ut.Path.GetImage()
icon = wx.Icon(os.path.join(imgDir,'h5pyViewer.ico'), wx.BITMAP_TYPE_ICO)
self.SetIcon(icon)
t=type(hid)
if t==h5py.h5d.DatasetID:
data=h5py.Dataset(hid)
canvas = MPLCanvasPyFAI1D(self,self.SetStatusCB)
sizer = wx.BoxSizer(wx.VERTICAL)
sizer.Add(canvas, 1, wx.LEFT | wx.TOP | wx.GROW)
self.SetSizer(sizer)
toolbar=ut.AddToolbar(canvas,sizer)
wxAxCtrlLst=[]
l=len(data.shape)
idxXY=(l-2,l-1)
for idx,l in enumerate(data.shape):
if idx in idxXY:
continue
wxAxCtrl=ut.SliderGroup(self, label='Axis:%d'%idx,range=(0,l-1))
wxAxCtrl.idx=idx
wxAxCtrlLst.append(wxAxCtrl)
sizer.Add(wxAxCtrl.sizer, 0, wx.EXPAND | wx.ALIGN_CENTER | wx.ALL, border=5)
wxAxCtrl.SetCallback(HdfPyFAI1DFrame.OnSetView,wxAxCtrl)
sl=ut.GetSlice(idxXY,data.shape,wxAxCtrlLst)
canvas.InitChild(data[sl])
#self.Fit()
self.Centre()
self.BuildMenu()
self.canvas=canvas
self.sizer=sizer
self.toolbar=toolbar
self.data=data
self.idxXY=idxXY
self.wxAxCtrlLst=wxAxCtrlLst
def BuildMenu(self):
mnBar = wx.MenuBar()
#-------- Edit Menu --------
mn = wx.Menu()
#mnItem=mn.Append(wx.ID_ANY, 'Setup Colormap', 'Setup the color mapping ');self.Bind(wx.EVT_MENU, self.OnColmapSetup, mnItem)
#mnItem=mn.Append(wx.ID_ANY, 'Linear Mapping', 'Use a linear values to color mapping ');self.Bind(wx.EVT_MENU, self.OnMapLin, mnItem)
#mnItem=mn.Append(wx.ID_ANY, 'Log Mapping', 'Use a logarithmic values to color mapping ');self.Bind(wx.EVT_MENU, self.OnMapLog, mnItem)
#mnItem=mn.Append(wx.ID_ANY, 'Invert X-Axis', kind=wx.ITEM_CHECK);self.Bind(wx.EVT_MENU, self.OnInvertAxis, mnItem)
#self.mnIDxAxis=mnItem.GetId()
#mnItem=mn.Append(wx.ID_ANY, 'Invert Y-Axis', kind=wx.ITEM_CHECK);self.Bind(wx.EVT_MENU, self.OnInvertAxis, mnItem)
mnBar.Append(mn, '&Edit')
mn = wx.Menu()
#mnItem=mn.Append(wx.ID_ANY, 'Help', 'How to use the image viewer');self.Bind(wx.EVT_MENU, self.OnHelp, mnItem)
mnBar.Append(mn, '&Help')
self.SetMenuBar(mnBar)
self.CreateStatusBar()
def SetIdxXY(self,x,y):
self.idxXY=(x,y)
@staticmethod
def SetStatusCB(obj,mode,v):
if mode==0:
obj.SetStatusText( "x= %d y=%d val=%g"%v,0)
elif mode==1:
obj.SetStatusText( "Colormap Value %d (drag to scale)"%v,0)
else:
raise KeyError('wrong mode')
@staticmethod
def OnSetView(usrData,value,msg):
'called when a slice is selected with the slider controls'
imgFrm=usrData.slider.Parent
#imgFrm.img.set_array(imgFrm.data[usrData.value,...])
data=imgFrm.data
sl=ut.GetSlice(imgFrm.idxXY,data.shape,imgFrm.wxAxCtrlLst)
hl=imgFrm.canvas.hl
ai=imgFrm.canvas.ai
numPtTh=imgFrm.canvas.numPtTh
out=ai.xrpd(data[sl],numPtTh)
hl[0].set_ydata(out[1])
imgFrm.canvas.draw()
pass
###########################################
class HdfPyFAIFrame(HdfImageGLFrame):
def __init__(self, parent, title, hid):
HdfImageGLFrame.__init__(self, parent, title, hid)
#HdfPyFAI1DFrame(self, title, hid)
canvas=self.canvas
raw=canvas.data
ctrX,ctrY=FindCenter(raw)
self.ai = pyFAI.AzimuthalIntegrator(1.e3, ctrX, ctrY, 0.0, 0.0, 0.0, 1.e0, 1.e0)
raw
self.numPtTh=int(np.average(raw.shape)/2.)
self.numPtCh=360
imgPolar,theta,chi=self.ai.xrpd2(raw,self.numPtTh,self.numPtCh)
canvas.data=imgPolar
print (imgPolar.shape)
def BuildMenu(self):
HdfImageGLFrame.BuildMenu(self)
mnBar=self.GetMenuBar()
mn=mnBar.GetMenu(0)
itemLst=mn.GetMenuItems()
it=itemLst[0]
it.GetItemLabel()
mnItem=mn.Append(wx.ID_ANY, 'Setup FAI', 'Setup fast azimutal integration ');self.Bind(wx.EVT_MENU, self.OnFAISetup, mnItem)
@staticmethod
def OnSetView(usrData,value,msg):
'called when a slice is selected with the slider controls'
frm=usrData.slider.Parent
ds=frm.dataSet
canvas=frm.canvas
glImg=canvas.glImg
sl=ut.GetSlice(frm.idxXY,ds.shape,frm.wxAxCtrlLst)
imgPolar,theta,chi=frm.ai.xrpd2(ds[sl],frm.numPtTh,frm.numPtCh)
canvas.data[:]=imgPolar[:]
glImg.data[:]=canvas.GetTxrData()
glImg.update()
canvas.OnPaint(None)#force to repaint, Refresh and Update do not force !
#canvas.Refresh(False)
#canvas.Update()
pass
def OnFAISetup(self, event):
dlg=DlgSetupPyFAI(self)
if dlg.ShowModal()==wx.ID_OK:
pass
dlg.Destroy()
class DlgSetupPyFAI(wx.Dialog):
def __init__(self,parent):
wx.Dialog.__init__(self,parent,-1,'pyFAI Setup')
ai=parent.ai
#glColBar=parent.glColBar
#dataRange=parent.dataRange
txtCtrX=wx.StaticText(self,-1,'center X')
txtCtrY=wx.StaticText(self,-1,'center Y')
txtNumPtTh=wx.StaticText(self,-1,'number of pt in Theta')
txtNumPtCh=wx.StaticText(self,-1,'number of pt in Chi')
txtMethod=wx.StaticText(self,-1,'method')
self.edCtrX=edCtrX=wx.TextCtrl(self,-1,'%g'%ai.get_poni1(),style=wx.TE_PROCESS_ENTER)
self.edCtrY=edCtrY=wx.TextCtrl(self,-1,'%g'%ai.get_poni2(),style=wx.TE_PROCESS_ENTER)
self.edNumPtTh=edNumPtTh=wx.TextCtrl(self,-1,'%g'%parent.numPtTh,style=wx.TE_PROCESS_ENTER)
self.edNumPtCh=edNumPtCh=wx.TextCtrl(self,-1,'%g'%parent.numPtCh,style=wx.TE_PROCESS_ENTER)
self.cbMethod=cbMethod=wx.ComboBox(self, -1, choices=('default','numny'), style=wx.CB_READONLY)
#cbtxrFunc.SetSelection(parent.txrTrfFunc)
sizer=wx.BoxSizer(wx.VERTICAL)
fgs=wx.FlexGridSizer(5,2,5,5)
fgs.Add(txtCtrX,0,wx.ALIGN_RIGHT)
fgs.Add(edCtrX,0,wx.EXPAND)
fgs.Add(txtCtrY,0,wx.ALIGN_RIGHT)
fgs.Add(edCtrY,0,wx.EXPAND)
fgs.Add(txtNumPtTh,0,wx.ALIGN_RIGHT)
fgs.Add(edNumPtTh,0,wx.EXPAND)
fgs.Add(txtNumPtCh,0,wx.ALIGN_RIGHT)
fgs.Add(edNumPtCh,0,wx.EXPAND)
fgs.Add(txtMethod,0,wx.ALIGN_RIGHT)
fgs.Add(cbMethod,0,wx.EXPAND)
sizer.Add(fgs,0,wx.EXPAND|wx.ALL,5)
#edVMin.SetFocus()
btns = self.CreateButtonSizer(wx.OK|wx.CANCEL)
btnApply=wx.Button(self, -1, 'Apply')
btns.Add(btnApply, 0, wx.ALL, 5)
sizer.Add(btns,0,wx.EXPAND|wx.ALL,5)
self.Bind(wx.EVT_BUTTON, self.OnModify, id=wx.ID_OK)
self.Bind(wx.EVT_BUTTON, self.OnModify, btnApply)
#self.Bind(wx.EVT_TEXT, self.OnModify, edCtrX)
#self.Bind(wx.EVT_TEXT, self.OnModify, edCtrY)
#self.Bind(wx.EVT_TEXT, self.OnModify, edNumSector)
self.Bind(wx.EVT_COMBOBOX, self.OnModify, cbMethod)
self.SetSizer(sizer)
sizer.Fit(self)
def OnModify(self, event):
print ('OnModify')
frm=self.GetParent()
ds=frm.dataSet
canvas=frm.canvas
glImg=canvas.glImg
ai=frm.ai
ai.set_poni1(float(self.edCtrX.Value))
ai.set_poni2(float(self.edCtrY.Value))
frm.numPtTh=int(self.edNumPtTh.Value)
frm.numPtCh=int(self.edNumPtCh.Value)
sl=ut.GetSlice(frm.idxXY,ds.shape,frm.wxAxCtrlLst)
imgPolar,theta,chi=frm.ai.xrpd2(ds[sl],frm.numPtTh,frm.numPtCh)
if canvas.data.shape==imgPolar.shape:
canvas.data[:]=imgPolar[:]
glImg.data[:]=canvas.GetTxrData()
else:
canvas.data=imgPolar;
glImg._data=canvas.GetTxrData()
glImg._texture=Texture(glImg._data)
#self.glImg=glImg=glumpy.image.Image(txrData, colormap=colMap,vmin=txrRng[0], vmax=txrRng[1])
print (canvas.data.shape,glImg.data.shape)
glImg.update()
canvas.OnPaint(None)#force to repaint, Refresh and Update do not force !
frm.Refresh(False)
if event.GetId()==wx.ID_OK:
event.Skip()#do not consume (use event to close the window and sent return code)
if __name__ == '__main__':
import os,sys,argparse #since python 2.7
def GetParser(required=True):
fnHDF='/scratch/detectorData/e14472_00033.hdf5'
#lbl='mcs'
lbl='pilatus_1'
#lbl='spec'
elem='/entry/data/'+lbl
exampleCmd='--hdfFile='+fnHDF+' --elem='+elem
parser = argparse.ArgumentParser(formatter_class=argparse.RawDescriptionHelpFormatter,
description=__doc__,
epilog='Example:\n'+os.path.basename(sys.argv[0])+' '+exampleCmd+'\n ')
parser.add_argument('--hdfFile', required=required, default=fnHDF, help='the hdf5 to show')
parser.add_argument('--elem', required=required, default=elem, help='the path to the element in the hdf5 file')
return parser
args = parser.parse_args()
return args
class App(wx.App):
def OnInit(self):
parser=GetParser()
#parser=GetParser(False) # debug with exampleCmd
args = parser.parse_args()
try:
self.fid=fid=h5py.h5f.open(args.hdfFile)
except IOError as e:
sys.stderr.write('Unable to open File: '+args.hdfFile+'\n')
parser.print_usage(sys.stderr)
return True
try:
hid = h5py.h5o.open(fid,args.elem)
except KeyError as e:
sys.stderr.write('Unable to open Object: '+args.elem+'\n')
parser.print_usage(sys.stderr)
return True
frame = HdfPyFAIFrame(None,args.elem,hid)
#frame = HdfPyFAI1DFrame(None,args.elem,hid)
frame.Show()
self.SetTopWindow(frame)
return True
def OnExit(self):
self.fid.close()
ut.StopWatch.Start()
app = App()
app.MainLoop()
|
import gym
from gym import logger
from core.states import StateSerializer
from core.runner import Runner
from agents.dqn_agent import DQNAgent
logger.set_level(logger.INFO)
env_name = 'CartPole-v0'
env = gym.make(env_name)
env._max_episode_steps = 500
serializer = StateSerializer(env.observation_space.shape)
agent = DQNAgent.from_h5(file_path=env_name+'.h5')
runner = Runner(env, serializer, agent,
epsilon_policy = lambda e: 0,
max_episode_steps = 500)
runner.render()
runner.demonstrate(num_episodes=100)
|
__author__ = 'luke.beer'
import subprocess
import threading
import logging
import socket
import time
import questions
import states
class Executor(threading.Thread):
def __init__(self, r, channel):
threading.Thread.__init__(self)
self.redis = r
self.channel = channel
self.pubsub = self.redis.pubsub()
self.pubsub.subscribe([channel])
self.state = states.State.INIT
self.name = socket.gethostname()
self.hosts = []
def communicate(self, msg='Sup?'):
msg = '%s :: %s :: %s' % (socket.gethostname(), self.state, msg)
logging.info(msg)
self.redis.publish(self.channel, msg)
def set_state(self, state):
state_msg = 'State changed from %s to %s' % (self.state, state)
logging.info(state_msg)
self.communicate(state_msg)
def archive(self):
self.set_state(states.State.ARCHIVE)
time.sleep(5)
def compress(self):
self.set_state(states.State.COMPRESS)
time.sleep(5)
def file_sync(self, host):
self.set_state(states.State.COLLECT)
self.communicate("%s: Valid config." % host.hostname)
try:
destination = "%s/%s" % (host.destination, host.hostname)
src = "%s@%s:%s/" % (host.username, host.address, host.source)
result = subprocess.check_output(['/usr/bin/rsync', '-pvvctrz', '--include=\"%s\"' % host.match, src,
destination], stderr=subprocess.STDOUT)
self.communicate(result)
except Exception as e:
self.communicate(e.message)
def stop(self):
self.pubsub.unsubscribe()
self.communicate('Goodbye....')
def ready(self, item):
hostname, state, msg = item['data'].split(' :: ', 3)
if hostname == self.name:
return
if msg == questions.Questions.WHAT:
self.communicate("Hey friend, I'm %s" % self.state)
else:
if state == states.State.IDLE:
return True
if state in [states.State.COLLECT, states.State.ARCHIVE, states.State.COMPRESS]:
return False
|
import re
import json
from color_print import *
# read rules from json file
with open("rules.json", "r", encoding="utf-8") as json_data:
rules = json.load(json_data)
# create new rules by replacing 'is' to 'was', 'has been', ...
def augment(sentence):
"""change 'is' in the sentence to was, has been,
should be ... to augment some new sentences
Args:
sentence: str, candidate sentence to be removed/replaced
Return:
augmented_strs: array of new candidates
"""
pad_sentence = " " + sentence + " "
augmented_str = [pad_sentence]
if 'is' in pad_sentence:
index = pad_sentence.find("is")
reps = ["was", "have been", "has been", "had been", "should be"]
for replace_candidate in reps:
new_str = pad_sentence[:index]
new_str += replace_candidate
new_str += pad_sentence[index+2:]
augmented_str.append(new_str)
return augmented_str
def get_context(src_text, index):
'''get the full sentence that contain the position index'''
stop_puncs = ['.', ',', '!', '?', ';', ':', '\n']
istart = max([src_text[:index].rfind(st) for st in stop_puncs])
iend = min([src_text[index:].find(st) for st in stop_puncs if src_text[index:].find(st)!=-1])
return istart, iend
# create suggestions for sentences to remove
def suggest_remove(file_path, to_remove, verbose=True):
with open(file_path, "r", encoding="utf-8") as fsrc:
src_text = fsrc.read().lower()
suggestions = []
for item in to_remove:
for s in augment(item):
if s not in src_text: continue
suggestions.append(color_red("remove: "+s))
if verbose:
indices = [m.start() for m in re.finditer(s, src_text)]
for index in indices:
istart, iend = get_context(src_text, index)
ctx = src_text[istart+1:index]+color_red(src_text[index:index+len(s)])
ctx += src_text[index+len(s):index+iend]
suggestions.append(ctx)
return suggestions
# create suggestions for sentences to replace
def suggest_replace(file_path, to_replace, verbose=True):
with open(file_path, "r", encoding="utf-8") as fsrc:
src_text = fsrc.read().lower()
suggestions = []
for key, value in to_replace.items():
for s in augment(key):
if s not in src_text: continue
suggestions.append(color_red("replace: "+s)+" ---> "+color_magenta(value))
if verbose:
indices = [m.start() for m in re.finditer(s, src_text)]
for index in indices:
istart, iend = get_context(src_text, index)
ctx = src_text[istart+1:index]+color_red(src_text[index:index+len(s)])
ctx += src_text[index+len(s):index+iend]
suggestions.append(ctx)
return suggestions
def report_wrong_words(fname, verbose=True):
'''report problematic words that are not simple, not precise, sexial, or needless'''
to_remove = rules["to_remove"]
to_replace = rules["to_replace"]
sug1 = suggest_remove(fname, to_remove, verbose)
sug2 = suggest_replace(fname, to_replace, verbose)
suggestions = sug1 + sug2
# if no suggestions, continue to process next file
if len(suggestions) == 0: return
# otherwise print suggestions to screen
print(color_blue("******************************************************"))
print("Words suggestions for ", color_cyan(fname.split('/')[-1]))
for suggestion in suggestions:
print(suggestion)
print("")
|
# -*- coding: utf-8 -*-
import pyqtgraph as pg
from pyqtgraph.Qt import QtGui, QtCore
import numpy as np
import csv, gzip, os
from pyqtgraph import Point
class GlassDB:
"""
Database of dispersion coefficients for Schott glasses
+ Corning 7980
"""
def __init__(self, fileName='schott_glasses.csv'):
path = os.path.dirname(__file__)
fh = gzip.open(os.path.join(path, 'schott_glasses.csv.gz'), 'rb')
r = csv.reader(map(str, fh.readlines()))
lines = [x for x in r]
self.data = {}
header = lines[0]
for l in lines[1:]:
info = {}
for i in range(1, len(l)):
info[header[i]] = l[i]
self.data[l[0]] = info
self.data['Corning7980'] = { ## Thorlabs UV fused silica--not in schott catalog.
'B1': 0.68374049400,
'B2': 0.42032361300,
'B3': 0.58502748000,
'C1': 0.00460352869,
'C2': 0.01339688560,
'C3': 64.49327320000,
'TAUI25/250': 0.95, ## transmission data is fabricated, but close.
'TAUI25/1400': 0.98,
}
for k in self.data:
self.data[k]['ior_cache'] = {}
def ior(self, glass, wl):
"""
Return the index of refraction for *glass* at wavelength *wl*.
The *glass* argument must be a key in self.data.
"""
info = self.data[glass]
cache = info['ior_cache']
if wl not in cache:
B = list(map(float, [info['B1'], info['B2'], info['B3']]))
C = list(map(float, [info['C1'], info['C2'], info['C3']]))
w2 = (wl/1000.)**2
n = np.sqrt(1.0 + (B[0]*w2 / (w2-C[0])) + (B[1]*w2 / (w2-C[1])) + (B[2]*w2 / (w2-C[2])))
cache[wl] = n
return cache[wl]
def transmissionCurve(self, glass):
data = self.data[glass]
keys = [int(x[7:]) for x in data.keys() if 'TAUI25' in x]
keys.sort()
curve = np.empty((2,len(keys)))
for i in range(len(keys)):
curve[0][i] = keys[i]
key = 'TAUI25/%d' % keys[i]
val = data[key]
if val == '':
val = 0
else:
val = float(val)
curve[1][i] = val
return curve
GLASSDB = GlassDB()
def wlPen(wl):
"""Return a pen representing the given wavelength"""
l1 = 400
l2 = 700
hue = np.clip(((l2-l1) - (wl-l1)) * 0.8 / (l2-l1), 0, 0.8)
val = 1.0
if wl > 700:
val = 1.0 * (((700-wl)/700.) + 1)
elif wl < 400:
val = wl * 1.0/400.
#print hue, val
color = pg.hsvColor(hue, 1.0, val)
pen = pg.mkPen(color)
return pen
class ParamObj(object):
# Just a helper for tracking parameters and responding to changes
def __init__(self):
self.__params = {}
def __setitem__(self, item, val):
self.setParam(item, val)
def setParam(self, param, val):
self.setParams(**{param:val})
def setParams(self, **params):
"""Set parameters for this optic. This is a good function to override for subclasses."""
self.__params.update(params)
self.paramStateChanged()
def paramStateChanged(self):
pass
def __getitem__(self, item):
# bug in pyside 1.2.2 causes getitem to be called inside QGraphicsObject.parentItem:
return self.getParam(item) # PySide bug: https://bugreports.qt.io/browse/PYSIDE-671
def __len__(self):
# Workaround for PySide bug: https://bugreports.qt.io/browse/PYSIDE-671
return 0
def getParam(self, param):
return self.__params[param]
class Optic(pg.GraphicsObject, ParamObj):
sigStateChanged = QtCore.Signal()
def __init__(self, gitem, **params):
ParamObj.__init__(self)
pg.GraphicsObject.__init__(self) #, [0,0], [1,1])
self.gitem = gitem
self.surfaces = gitem.surfaces
gitem.setParentItem(self)
self.roi = pg.ROI([0,0], [1,1])
self.roi.addRotateHandle([1, 1], [0.5, 0.5])
self.roi.setParentItem(self)
defaults = {
'pos': Point(0,0),
'angle': 0,
}
defaults.update(params)
self._ior_cache = {}
self.roi.sigRegionChanged.connect(self.roiChanged)
self.setParams(**defaults)
def updateTransform(self):
self.setPos(0, 0)
tr = QtGui.QTransform()
self.setTransform(tr.translate(Point(self['pos'])).rotate(self['angle']))
def setParam(self, param, val):
ParamObj.setParam(self, param, val)
def paramStateChanged(self):
"""Some parameters of the optic have changed."""
# Move graphics item
self.gitem.setPos(Point(self['pos']))
self.gitem.resetTransform()
self.gitem.setRotation(self['angle'])
# Move ROI to match
try:
self.roi.sigRegionChanged.disconnect(self.roiChanged)
br = self.gitem.boundingRect()
o = self.gitem.mapToParent(br.topLeft())
self.roi.setAngle(self['angle'])
self.roi.setPos(o)
self.roi.setSize([br.width(), br.height()])
finally:
self.roi.sigRegionChanged.connect(self.roiChanged)
self.sigStateChanged.emit()
def roiChanged(self, *args):
pos = self.roi.pos()
# rotate gitem temporarily so we can decide where it will need to move
self.gitem.resetTransform()
self.gitem.setRotation(self.roi.angle())
br = self.gitem.boundingRect()
o1 = self.gitem.mapToParent(br.topLeft())
self.setParams(angle=self.roi.angle(), pos=pos + (self.gitem.pos() - o1))
def boundingRect(self):
return QtCore.QRectF()
def paint(self, p, *args):
pass
def ior(self, wavelength):
return GLASSDB.ior(self['glass'], wavelength)
class Lens(Optic):
def __init__(self, **params):
defaults = {
'dia': 25.4, ## diameter of lens
'r1': 50., ## positive means convex, use 0 for planar
'r2': 0, ## negative means convex
'd': 4.0,
'glass': 'N-BK7',
'reflect': False,
}
defaults.update(params)
d = defaults.pop('d')
defaults['x1'] = -d/2.
defaults['x2'] = d/2.
gitem = CircularSolid(brush=(100, 100, 130, 100), **defaults)
Optic.__init__(self, gitem, **defaults)
def propagateRay(self, ray):
"""Refract, reflect, absorb, and/or scatter ray. This function may create and return new rays"""
"""
NOTE:: We can probably use this to compute refractions faster: (from GLSL 120 docs)
For the incident vector I and surface normal N, and the
ratio of indices of refraction eta, return the refraction
vector. The result is computed by
k = 1.0 - eta * eta * (1.0 - dot(N, I) * dot(N, I))
if (k < 0.0)
return genType(0.0)
else
return eta * I - (eta * dot(N, I) + sqrt(k)) * N
The input parameters for the incident vector I and the
surface normal N must already be normalized to get the
desired results. eta == ratio of IORs
For reflection:
For the incident vector I and surface orientation N,
returns the reflection direction:
I – 2 ∗ dot(N, I) ∗ N
N must already be normalized in order to achieve the
desired result.
"""
iors = [self.ior(ray['wl']), 1.0]
for i in [0,1]:
surface = self.surfaces[i]
ior = iors[i]
p1, ai = surface.intersectRay(ray)
if p1 is None:
ray.setEnd(None)
break
p1 = surface.mapToItem(ray, p1)
rd = ray['dir']
a1 = np.arctan2(rd[1], rd[0])
ar = a1 - ai + np.arcsin((np.sin(ai) * ray['ior'] / ior))
ray.setEnd(p1)
dp = Point(np.cos(ar), np.sin(ar))
ray = Ray(parent=ray, ior=ior, dir=dp)
return [ray]
class Mirror(Optic):
def __init__(self, **params):
defaults = {
'r1': 0,
'r2': 0,
'd': 0.01,
}
defaults.update(params)
d = defaults.pop('d')
defaults['x1'] = -d/2.
defaults['x2'] = d/2.
gitem = CircularSolid(brush=(100,100,100,255), **defaults)
Optic.__init__(self, gitem, **defaults)
def propagateRay(self, ray):
"""Refract, reflect, absorb, and/or scatter ray. This function may create and return new rays"""
surface = self.surfaces[0]
p1, ai = surface.intersectRay(ray)
if p1 is not None:
p1 = surface.mapToItem(ray, p1)
rd = ray['dir']
a1 = np.arctan2(rd[1], rd[0])
ar = a1 + np.pi - 2*ai
ray.setEnd(p1)
dp = Point(np.cos(ar), np.sin(ar))
ray = Ray(parent=ray, dir=dp)
else:
ray.setEnd(None)
return [ray]
class CircularSolid(pg.GraphicsObject, ParamObj):
"""GraphicsObject with two circular or flat surfaces."""
def __init__(self, pen=None, brush=None, **opts):
"""
Arguments for each surface are:
x1,x2 - position of center of _physical surface_
r1,r2 - radius of curvature
d1,d2 - diameter of optic
"""
defaults = dict(x1=-2, r1=100, d1=25.4, x2=2, r2=100, d2=25.4)
defaults.update(opts)
ParamObj.__init__(self)
self.surfaces = [CircleSurface(defaults['r1'], defaults['d1']), CircleSurface(-defaults['r2'], defaults['d2'])]
pg.GraphicsObject.__init__(self)
for s in self.surfaces:
s.setParentItem(self)
if pen is None:
self.pen = pg.mkPen((220,220,255,200), width=1, cosmetic=True)
else:
self.pen = pg.mkPen(pen)
if brush is None:
self.brush = pg.mkBrush((230, 230, 255, 30))
else:
self.brush = pg.mkBrush(brush)
self.setParams(**defaults)
def paramStateChanged(self):
self.updateSurfaces()
def updateSurfaces(self):
self.surfaces[0].setParams(self['r1'], self['d1'])
self.surfaces[1].setParams(-self['r2'], self['d2'])
self.surfaces[0].setPos(self['x1'], 0)
self.surfaces[1].setPos(self['x2'], 0)
self.path = QtGui.QPainterPath()
self.path.connectPath(self.surfaces[0].path.translated(self.surfaces[0].pos()))
self.path.connectPath(self.surfaces[1].path.translated(self.surfaces[1].pos()).toReversed())
self.path.closeSubpath()
def boundingRect(self):
return self.path.boundingRect()
def shape(self):
return self.path
def paint(self, p, *args):
p.setRenderHints(p.renderHints() | p.Antialiasing)
p.setPen(self.pen)
p.fillPath(self.path, self.brush)
p.drawPath(self.path)
class CircleSurface(pg.GraphicsObject):
def __init__(self, radius=None, diameter=None):
"""center of physical surface is at 0,0
radius is the radius of the surface. If radius is None, the surface is flat.
diameter is of the optic's edge."""
pg.GraphicsObject.__init__(self)
self.r = radius
self.d = diameter
self.mkPath()
def setParams(self, r, d):
self.r = r
self.d = d
self.mkPath()
def mkPath(self):
self.prepareGeometryChange()
r = self.r
d = self.d
h2 = d/2.
self.path = QtGui.QPainterPath()
if r == 0: ## flat surface
self.path.moveTo(0, h2)
self.path.lineTo(0, -h2)
else:
## half-height of surface can't be larger than radius
h2 = min(h2, abs(r))
arc = QtCore.QRectF(0, -r, r*2, r*2)
a1 = np.arcsin(h2/r) * 180. / np.pi
a2 = -2*a1
a1 += 180.
self.path.arcMoveTo(arc, a1)
self.path.arcTo(arc, a1, a2)
self.h2 = h2
def boundingRect(self):
return self.path.boundingRect()
def paint(self, p, *args):
return ## usually we let the optic draw.
def intersectRay(self, ray):
## return the point of intersection and the angle of incidence
#print "intersect ray"
h = self.h2
r = self.r
p, dir = ray.currentState(relativeTo=self) # position and angle of ray in local coords.
#print " ray: ", p, dir
p = p - Point(r, 0) ## move position so center of circle is at 0,0
#print " adj: ", p, r
if r == 0:
#print " flat"
if dir[0] == 0:
y = 0
else:
y = p[1] - p[0] * dir[1]/dir[0]
if abs(y) > h:
return None, None
else:
return (Point(0, y), np.arctan2(dir[1], dir[0]))
else:
#print " curve"
## find intersection of circle and line (quadratic formula)
dx = dir[0]
dy = dir[1]
dr = (dx**2 + dy**2) ** 0.5
D = p[0] * (p[1]+dy) - (p[0]+dx) * p[1]
idr2 = 1.0 / dr**2
disc = r**2 * dr**2 - D**2
if disc < 0:
return None, None
disc2 = disc**0.5
if dy < 0:
sgn = -1
else:
sgn = 1
br = self.path.boundingRect()
x1 = (D*dy + sgn*dx*disc2) * idr2
y1 = (-D*dx + abs(dy)*disc2) * idr2
if br.contains(x1+r, y1):
pt = Point(x1, y1)
else:
x2 = (D*dy - sgn*dx*disc2) * idr2
y2 = (-D*dx - abs(dy)*disc2) * idr2
pt = Point(x2, y2)
if not br.contains(x2+r, y2):
return None, None
raise Exception("No intersection!")
norm = np.arctan2(pt[1], pt[0])
if r < 0:
norm += np.pi
#print " norm:", norm*180/3.1415
dp = p - pt
#print " dp:", dp
ang = np.arctan2(dp[1], dp[0])
#print " ang:", ang*180/3.1415
#print " ai:", (ang-norm)*180/3.1415
#print " intersection:", pt
return pt + Point(r, 0), ang-norm
class Ray(pg.GraphicsObject, ParamObj):
"""Represents a single straight segment of a ray"""
sigStateChanged = QtCore.Signal()
def __init__(self, **params):
ParamObj.__init__(self)
defaults = {
'ior': 1.0,
'wl': 500,
'end': None,
'dir': Point(1,0),
}
self.params = {}
pg.GraphicsObject.__init__(self)
self.children = []
parent = params.get('parent', None)
if parent is not None:
defaults['start'] = parent['end']
defaults['wl'] = parent['wl']
self['ior'] = parent['ior']
self['dir'] = parent['dir']
parent.addChild(self)
defaults.update(params)
defaults['dir'] = Point(defaults['dir'])
self.setParams(**defaults)
self.mkPath()
def clearChildren(self):
for c in self.children:
c.clearChildren()
c.setParentItem(None)
self.scene().removeItem(c)
self.children = []
def paramStateChanged(self):
pass
def addChild(self, ch):
self.children.append(ch)
ch.setParentItem(self)
def currentState(self, relativeTo=None):
pos = self['start']
dir = self['dir']
if relativeTo is None:
return pos, dir
else:
trans = self.itemTransform(relativeTo)[0]
p1 = trans.map(pos)
p2 = trans.map(pos + dir)
return Point(p1), Point(p2-p1)
def setEnd(self, end):
self['end'] = end
self.mkPath()
def boundingRect(self):
return self.path.boundingRect()
def paint(self, p, *args):
#p.setPen(pg.mkPen((255,0,0, 150)))
p.setRenderHints(p.renderHints() | p.Antialiasing)
p.setCompositionMode(p.CompositionMode_Plus)
p.setPen(wlPen(self['wl']))
p.drawPath(self.path)
def mkPath(self):
self.prepareGeometryChange()
self.path = QtGui.QPainterPath()
self.path.moveTo(self['start'])
if self['end'] is not None:
self.path.lineTo(self['end'])
else:
self.path.lineTo(self['start']+500*self['dir'])
def trace(rays, optics):
if len(optics) < 1 or len(rays) < 1:
return
for r in rays:
r.clearChildren()
o = optics[0]
r2 = o.propagateRay(r)
trace(r2, optics[1:])
class Tracer(QtCore.QObject):
"""
Simple ray tracer.
Initialize with a list of rays and optics;
calling trace() will cause rays to be extended by propagating them through
each optic in sequence.
"""
def __init__(self, rays, optics):
QtCore.QObject.__init__(self)
self.optics = optics
self.rays = rays
for o in self.optics:
o.sigStateChanged.connect(self.trace)
self.trace()
def trace(self):
trace(self.rays, self.optics)
|
# -*- coding: utf-8 -*-
# Generated by the protocol buffer compiler. DO NOT EDIT!
# source: google/cloud/tasks_v2/proto/task.proto
import sys
_b = sys.version_info[0] < 3 and (lambda x: x) or (lambda x: x.encode("latin1"))
from google.protobuf import descriptor as _descriptor
from google.protobuf import message as _message
from google.protobuf import reflection as _reflection
from google.protobuf import symbol_database as _symbol_database
# @@protoc_insertion_point(imports)
_sym_db = _symbol_database.Default()
from google.api import resource_pb2 as google_dot_api_dot_resource__pb2
from ..proto import (
target_pb2 as google_dot_cloud_dot_tasks__v2_dot_proto_dot_target__pb2,
)
from google.protobuf import duration_pb2 as google_dot_protobuf_dot_duration__pb2
from google.protobuf import timestamp_pb2 as google_dot_protobuf_dot_timestamp__pb2
from google.rpc import status_pb2 as google_dot_rpc_dot_status__pb2
from google.api import annotations_pb2 as google_dot_api_dot_annotations__pb2
DESCRIPTOR = _descriptor.FileDescriptor(
name="google/cloud/tasks_v2/proto/task.proto",
package="google.cloud.tasks.v2",
syntax="proto3",
serialized_options=_b(
"\n\031com.google.cloud.tasks.v2B\tTaskProtoP\001Z:google.golang.org/genproto/googleapis/cloud/tasks/v2;tasks"
),
serialized_pb=_b(
'\n&google/cloud/tasks_v2/proto/task.proto\x12\x15google.cloud.tasks.v2\x1a\x19google/api/resource.proto\x1a(google/cloud/tasks_v2/proto/target.proto\x1a\x1egoogle/protobuf/duration.proto\x1a\x1fgoogle/protobuf/timestamp.proto\x1a\x17google/rpc/status.proto\x1a\x1cgoogle/api/annotations.proto"\xb4\x05\n\x04Task\x12\x0c\n\x04name\x18\x01 \x01(\t\x12N\n\x17\x61pp_engine_http_request\x18\x02 \x01(\x0b\x32+.google.cloud.tasks.v2.AppEngineHttpRequestH\x00\x12:\n\x0chttp_request\x18\x03 \x01(\x0b\x32".google.cloud.tasks.v2.HttpRequestH\x00\x12\x31\n\rschedule_time\x18\x04 \x01(\x0b\x32\x1a.google.protobuf.Timestamp\x12/\n\x0b\x63reate_time\x18\x05 \x01(\x0b\x32\x1a.google.protobuf.Timestamp\x12\x34\n\x11\x64ispatch_deadline\x18\x06 \x01(\x0b\x32\x19.google.protobuf.Duration\x12\x16\n\x0e\x64ispatch_count\x18\x07 \x01(\x05\x12\x16\n\x0eresponse_count\x18\x08 \x01(\x05\x12\x35\n\rfirst_attempt\x18\t \x01(\x0b\x32\x1e.google.cloud.tasks.v2.Attempt\x12\x34\n\x0clast_attempt\x18\n \x01(\x0b\x32\x1e.google.cloud.tasks.v2.Attempt\x12.\n\x04view\x18\x0b \x01(\x0e\x32 .google.cloud.tasks.v2.Task.View"1\n\x04View\x12\x14\n\x10VIEW_UNSPECIFIED\x10\x00\x12\t\n\x05\x42\x41SIC\x10\x01\x12\x08\n\x04\x46ULL\x10\x02:h\xea\x41\x65\n\x1e\x63loudtasks.googleapis.com/Task\x12\x43projects/{project}/locations/{location}/queues/{queue}/tasks/{task}B\x0e\n\x0cmessage_type"\xcf\x01\n\x07\x41ttempt\x12\x31\n\rschedule_time\x18\x01 \x01(\x0b\x32\x1a.google.protobuf.Timestamp\x12\x31\n\rdispatch_time\x18\x02 \x01(\x0b\x32\x1a.google.protobuf.Timestamp\x12\x31\n\rresponse_time\x18\x03 \x01(\x0b\x32\x1a.google.protobuf.Timestamp\x12+\n\x0fresponse_status\x18\x04 \x01(\x0b\x32\x12.google.rpc.StatusBd\n\x19\x63om.google.cloud.tasks.v2B\tTaskProtoP\x01Z:google.golang.org/genproto/googleapis/cloud/tasks/v2;tasksb\x06proto3'
),
dependencies=[
google_dot_api_dot_resource__pb2.DESCRIPTOR,
google_dot_cloud_dot_tasks__v2_dot_proto_dot_target__pb2.DESCRIPTOR,
google_dot_protobuf_dot_duration__pb2.DESCRIPTOR,
google_dot_protobuf_dot_timestamp__pb2.DESCRIPTOR,
google_dot_rpc_dot_status__pb2.DESCRIPTOR,
google_dot_api_dot_annotations__pb2.DESCRIPTOR,
],
)
_TASK_VIEW = _descriptor.EnumDescriptor(
name="View",
full_name="google.cloud.tasks.v2.Task.View",
filename=None,
file=DESCRIPTOR,
values=[
_descriptor.EnumValueDescriptor(
name="VIEW_UNSPECIFIED",
index=0,
number=0,
serialized_options=None,
type=None,
),
_descriptor.EnumValueDescriptor(
name="BASIC", index=1, number=1, serialized_options=None, type=None
),
_descriptor.EnumValueDescriptor(
name="FULL", index=2, number=2, serialized_options=None, type=None
),
],
containing_type=None,
serialized_options=None,
serialized_start=776,
serialized_end=825,
)
_sym_db.RegisterEnumDescriptor(_TASK_VIEW)
_TASK = _descriptor.Descriptor(
name="Task",
full_name="google.cloud.tasks.v2.Task",
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name="name",
full_name="google.cloud.tasks.v2.Task.name",
index=0,
number=1,
type=9,
cpp_type=9,
label=1,
has_default_value=False,
default_value=_b("").decode("utf-8"),
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR,
),
_descriptor.FieldDescriptor(
name="app_engine_http_request",
full_name="google.cloud.tasks.v2.Task.app_engine_http_request",
index=1,
number=2,
type=11,
cpp_type=10,
label=1,
has_default_value=False,
default_value=None,
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR,
),
_descriptor.FieldDescriptor(
name="http_request",
full_name="google.cloud.tasks.v2.Task.http_request",
index=2,
number=3,
type=11,
cpp_type=10,
label=1,
has_default_value=False,
default_value=None,
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR,
),
_descriptor.FieldDescriptor(
name="schedule_time",
full_name="google.cloud.tasks.v2.Task.schedule_time",
index=3,
number=4,
type=11,
cpp_type=10,
label=1,
has_default_value=False,
default_value=None,
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR,
),
_descriptor.FieldDescriptor(
name="create_time",
full_name="google.cloud.tasks.v2.Task.create_time",
index=4,
number=5,
type=11,
cpp_type=10,
label=1,
has_default_value=False,
default_value=None,
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR,
),
_descriptor.FieldDescriptor(
name="dispatch_deadline",
full_name="google.cloud.tasks.v2.Task.dispatch_deadline",
index=5,
number=6,
type=11,
cpp_type=10,
label=1,
has_default_value=False,
default_value=None,
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR,
),
_descriptor.FieldDescriptor(
name="dispatch_count",
full_name="google.cloud.tasks.v2.Task.dispatch_count",
index=6,
number=7,
type=5,
cpp_type=1,
label=1,
has_default_value=False,
default_value=0,
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR,
),
_descriptor.FieldDescriptor(
name="response_count",
full_name="google.cloud.tasks.v2.Task.response_count",
index=7,
number=8,
type=5,
cpp_type=1,
label=1,
has_default_value=False,
default_value=0,
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR,
),
_descriptor.FieldDescriptor(
name="first_attempt",
full_name="google.cloud.tasks.v2.Task.first_attempt",
index=8,
number=9,
type=11,
cpp_type=10,
label=1,
has_default_value=False,
default_value=None,
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR,
),
_descriptor.FieldDescriptor(
name="last_attempt",
full_name="google.cloud.tasks.v2.Task.last_attempt",
index=9,
number=10,
type=11,
cpp_type=10,
label=1,
has_default_value=False,
default_value=None,
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR,
),
_descriptor.FieldDescriptor(
name="view",
full_name="google.cloud.tasks.v2.Task.view",
index=10,
number=11,
type=14,
cpp_type=8,
label=1,
has_default_value=False,
default_value=0,
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR,
),
],
extensions=[],
nested_types=[],
enum_types=[_TASK_VIEW],
serialized_options=_b(
"\352Ae\n\036cloudtasks.googleapis.com/Task\022Cprojects/{project}/locations/{location}/queues/{queue}/tasks/{task}"
),
is_extendable=False,
syntax="proto3",
extension_ranges=[],
oneofs=[
_descriptor.OneofDescriptor(
name="message_type",
full_name="google.cloud.tasks.v2.Task.message_type",
index=0,
containing_type=None,
fields=[],
)
],
serialized_start=255,
serialized_end=947,
)
_ATTEMPT = _descriptor.Descriptor(
name="Attempt",
full_name="google.cloud.tasks.v2.Attempt",
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name="schedule_time",
full_name="google.cloud.tasks.v2.Attempt.schedule_time",
index=0,
number=1,
type=11,
cpp_type=10,
label=1,
has_default_value=False,
default_value=None,
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR,
),
_descriptor.FieldDescriptor(
name="dispatch_time",
full_name="google.cloud.tasks.v2.Attempt.dispatch_time",
index=1,
number=2,
type=11,
cpp_type=10,
label=1,
has_default_value=False,
default_value=None,
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR,
),
_descriptor.FieldDescriptor(
name="response_time",
full_name="google.cloud.tasks.v2.Attempt.response_time",
index=2,
number=3,
type=11,
cpp_type=10,
label=1,
has_default_value=False,
default_value=None,
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR,
),
_descriptor.FieldDescriptor(
name="response_status",
full_name="google.cloud.tasks.v2.Attempt.response_status",
index=3,
number=4,
type=11,
cpp_type=10,
label=1,
has_default_value=False,
default_value=None,
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR,
),
],
extensions=[],
nested_types=[],
enum_types=[],
serialized_options=None,
is_extendable=False,
syntax="proto3",
extension_ranges=[],
oneofs=[],
serialized_start=950,
serialized_end=1157,
)
_TASK.fields_by_name[
"app_engine_http_request"
].message_type = (
google_dot_cloud_dot_tasks__v2_dot_proto_dot_target__pb2._APPENGINEHTTPREQUEST
)
_TASK.fields_by_name[
"http_request"
].message_type = google_dot_cloud_dot_tasks__v2_dot_proto_dot_target__pb2._HTTPREQUEST
_TASK.fields_by_name[
"schedule_time"
].message_type = google_dot_protobuf_dot_timestamp__pb2._TIMESTAMP
_TASK.fields_by_name[
"create_time"
].message_type = google_dot_protobuf_dot_timestamp__pb2._TIMESTAMP
_TASK.fields_by_name[
"dispatch_deadline"
].message_type = google_dot_protobuf_dot_duration__pb2._DURATION
_TASK.fields_by_name["first_attempt"].message_type = _ATTEMPT
_TASK.fields_by_name["last_attempt"].message_type = _ATTEMPT
_TASK.fields_by_name["view"].enum_type = _TASK_VIEW
_TASK_VIEW.containing_type = _TASK
_TASK.oneofs_by_name["message_type"].fields.append(
_TASK.fields_by_name["app_engine_http_request"]
)
_TASK.fields_by_name["app_engine_http_request"].containing_oneof = _TASK.oneofs_by_name[
"message_type"
]
_TASK.oneofs_by_name["message_type"].fields.append(_TASK.fields_by_name["http_request"])
_TASK.fields_by_name["http_request"].containing_oneof = _TASK.oneofs_by_name[
"message_type"
]
_ATTEMPT.fields_by_name[
"schedule_time"
].message_type = google_dot_protobuf_dot_timestamp__pb2._TIMESTAMP
_ATTEMPT.fields_by_name[
"dispatch_time"
].message_type = google_dot_protobuf_dot_timestamp__pb2._TIMESTAMP
_ATTEMPT.fields_by_name[
"response_time"
].message_type = google_dot_protobuf_dot_timestamp__pb2._TIMESTAMP
_ATTEMPT.fields_by_name[
"response_status"
].message_type = google_dot_rpc_dot_status__pb2._STATUS
DESCRIPTOR.message_types_by_name["Task"] = _TASK
DESCRIPTOR.message_types_by_name["Attempt"] = _ATTEMPT
_sym_db.RegisterFileDescriptor(DESCRIPTOR)
Task = _reflection.GeneratedProtocolMessageType(
"Task",
(_message.Message,),
dict(
DESCRIPTOR=_TASK,
__module__="google.cloud.tasks_v2.proto.task_pb2",
__doc__="""A unit of scheduled work.
Attributes:
name:
Optionally caller-specified in
[CreateTask][google.cloud.tasks.v2.CloudTasks.CreateTask].
The task name. The task name must have the following format:
``projects/PROJECT_ID/locations/LOCATION_ID/queues/QUEUE_ID/ta
sks/TASK_ID`` - ``PROJECT_ID`` can contain letters
([A-Za-z]), numbers ([0-9]), hyphens (-), colons (:), or
periods (.). For more information, see `Identifying
projects <https://cloud.google.com/resource-
manager/docs/creating-managing-
projects#identifying_projects>`_ - ``LOCATION_ID`` is the
canonical ID for the task's location. The list of available
locations can be obtained by calling [ListLocations][google
.cloud.location.Locations.ListLocations]. For more
information, see https://cloud.google.com/about/locations/. -
``QUEUE_ID`` can contain letters ([A-Za-z]), numbers ([0-9]),
or hyphens (-). The maximum length is 100 characters. -
``TASK_ID`` can contain only letters ([A-Za-z]), numbers
([0-9]), hyphens (-), or underscores (\_). The maximum
length is 500 characters.
message_type:
Required. The message to send to the worker.
app_engine_http_request:
HTTP request that is sent to the App Engine app handler. An
App Engine task is a task that has [AppEngineHttpRequest][goog
le.cloud.tasks.v2.AppEngineHttpRequest] set.
http_request:
HTTP request that is sent to the worker. An HTTP task is a
task that has [HttpRequest][google.cloud.tasks.v2.HttpRequest]
set.
schedule_time:
The time when the task is scheduled to be attempted or
retried. ``schedule_time`` will be truncated to the nearest
microsecond.
create_time:
Output only. The time that the task was created.
``create_time`` will be truncated to the nearest second.
dispatch_deadline:
The deadline for requests sent to the worker. If the worker
does not respond by this deadline then the request is
cancelled and the attempt is marked as a ``DEADLINE_EXCEEDED``
failure. Cloud Tasks will retry the task according to the
[RetryConfig][google.cloud.tasks.v2.RetryConfig]. Note that
when the request is cancelled, Cloud Tasks will stop listing
for the response, but whether the worker stops processing
depends on the worker. For example, if the worker is stuck, it
may not react to cancelled requests. The default and maximum
values depend on the type of request: - For [HTTP
tasks][google.cloud.tasks.v2.HttpRequest], the default is
10 minutes. The deadline must be in the interval [15 seconds,
30 minutes]. - For [App Engine
tasks][google.cloud.tasks.v2.AppEngineHttpRequest], 0
indicates that the request has the default deadline. The
default deadline depends on the `scaling type
<https://cloud.google.com/appengine/docs/standard/go/how-
instances-are-managed#instance_scaling>`_ of the service:
10 minutes for standard apps with automatic scaling, 24
hours for standard apps with manual and basic scaling, and 60
minutes for flex apps. If the request deadline is set, it must
be in the interval [15 seconds, 24 hours 15 seconds].
Regardless of the task's ``dispatch_deadline``, the app
handler will not run for longer than than the service's
timeout. We recommend setting the ``dispatch_deadline`` to
at most a few seconds more than the app handler's timeout.
For more information see `Timeouts
<https://cloud.google.com/tasks/docs/creating-appengine-
handlers#timeouts>`_. ``dispatch_deadline`` will be
truncated to the nearest millisecond. The deadline is an
approximate deadline.
dispatch_count:
Output only. The number of attempts dispatched. This count
includes attempts which have been dispatched but haven't
received a response.
response_count:
Output only. The number of attempts which have received a
response.
first_attempt:
Output only. The status of the task's first attempt. Only
[dispatch\_time][google.cloud.tasks.v2.Attempt.dispatch\_time]
will be set. The other
[Attempt][google.cloud.tasks.v2.Attempt] information is not
retained by Cloud Tasks.
last_attempt:
Output only. The status of the task's last attempt.
view:
Output only. The view specifies which subset of the
[Task][google.cloud.tasks.v2.Task] has been returned.
""",
# @@protoc_insertion_point(class_scope:google.cloud.tasks.v2.Task)
),
)
_sym_db.RegisterMessage(Task)
Attempt = _reflection.GeneratedProtocolMessageType(
"Attempt",
(_message.Message,),
dict(
DESCRIPTOR=_ATTEMPT,
__module__="google.cloud.tasks_v2.proto.task_pb2",
__doc__="""The status of a task attempt.
Attributes:
schedule_time:
Output only. The time that this attempt was scheduled.
``schedule_time`` will be truncated to the nearest
microsecond.
dispatch_time:
Output only. The time that this attempt was dispatched.
``dispatch_time`` will be truncated to the nearest
microsecond.
response_time:
Output only. The time that this attempt response was received.
``response_time`` will be truncated to the nearest
microsecond.
response_status:
Output only. The response from the worker for this attempt.
If ``response_time`` is unset, then the task has not been
attempted or is currently running and the ``response_status``
field is meaningless.
""",
# @@protoc_insertion_point(class_scope:google.cloud.tasks.v2.Attempt)
),
)
_sym_db.RegisterMessage(Attempt)
DESCRIPTOR._options = None
_TASK._options = None
# @@protoc_insertion_point(module_scope)
|
# Copyright Tom SF Haines, Reinier de Blois, Aaron Snoswell
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from panda3d.core import VBase4
from panda3d.core import AmbientLight as PAmbientLight
class AmbLight:
"""Creates a simple ambient light"""
def __init__(self,manager,xml):
self.light = PAmbientLight('alight')
self.lightNode = render.attachNewNode(self.light)
self.reload(manager,xml)
def reload(self,manager,xml):
color = xml.find('color')
if color!=None:
self.light.setColor(VBase4(float(color.get('r')), float(color.get('g')), float(color.get('b')), 1.0))
def start(self):
render.setLight(self.lightNode)
def stop(self):
render.clearLight(self.lightNode)
|
#
# Run NN, multinomial logistic regression using simple gradient descent.
#
import config
import numpy as np
import tensorflow as tf
from tensorflow import (Variable, constant, global_variables_initializer,
truncated_normal, zeros)
from tf_training_helper import TrainingHelper
class TF_notMNIST_Training_Gradient_Descent:
def __init__(self, each_object_size_width=config.TRAIN_OBJECT_WIDTH, each_object_size_height=config.TRAIN_OBJECT_HEIGHT, train_batch=10000, train_steps=800, train_learning_rate=0.5):
"""
Constructor.
"""
self.each_object_size_width = each_object_size_width
self.each_object_size_height = each_object_size_height
self.train_batch = train_batch
self.train_steps = train_steps
self.train_learning_rate = train_learning_rate
helper = TrainingHelper()
self.__print_predications__ = helper.print_predications
self.__print_test_accuracy__ = helper.print_test_accuracy
self.__activation__ = helper.activation
self.__loss_optimizer__ = helper.loss_optimizer
def start_with(self, train_dataset, train_labels, valid_dataset, valid_labels, test_dataset, test_labels, count_classes, beta_for_regularizer=0.01):
"""
Start multinomial logistic regression using simple gradient descent.
"""
#
# Fixed values while training
#
tf_train_dataset = constant(train_dataset[:self.train_batch, :])
tf_train_labels = constant(train_labels[:self.train_batch])
tf_valid_dataset = constant(valid_dataset)
tf_test_dataset = constant(test_dataset)
#
# Variables should be trained.
# Classical weight and biases.
#
tf_weights = Variable(truncated_normal(
[self.each_object_size_width * self.each_object_size_height, count_classes]))
tf_biases = Variable(zeros([count_classes]))
logits = self.__activation__(tf_train_dataset, tf_weights, tf_biases)
loss, optimizer = self.__loss_optimizer__(
tf_train_labels, logits, self.train_learning_rate, beta_for_regularizer, [tf_weights])
#
# Convert dataset to predication
# The actual problem is transformed into a probabilistic problem.
#
predication_for_train = tf.nn.softmax(logits)
predication_for_valid = tf.nn.softmax(
self.__activation__(tf_valid_dataset, tf_weights, tf_biases))
predication_for_test = tf.nn.softmax(
self.__activation__(tf_test_dataset, tf_weights, tf_biases))
#
# Training
#
print("\n")
with tf.Session() as sess:
init = global_variables_initializer()
sess.run(init)
for step in range(self.train_steps):
_, ls, predications = sess.run(
[optimizer, loss, predication_for_train])
self.__print_predications__(
step, ls, predications, train_labels[:self.train_batch, :], predication_for_valid, valid_labels)
self.__print_test_accuracy__(predication_for_test, test_labels)
|
import inspect
import math
import re
from functools import singledispatch, partial
from itertools import chain, cycle
from .api import (
always_break,
annotate,
concat,
contextual,
flat_choice,
fill,
group,
nest,
NIL,
LINE,
SOFTLINE,
HARDLINE
)
from .doc import (
Annotated,
Doc
)
from .layout import layout_smart
from .syntax import Token
from .utils import identity, intersperse
UNSET_SENTINEL = object()
COMMA = annotate(Token.PUNCTUATION, ',')
COLON = annotate(Token.PUNCTUATION, ':')
ELLIPSIS = annotate(Token.PUNCTUATION, '...')
LPAREN = annotate(Token.PUNCTUATION, '(')
RPAREN = annotate(Token.PUNCTUATION, ')')
LBRACKET = annotate(Token.PUNCTUATION, '[')
RBRACKET = annotate(Token.PUNCTUATION, ']')
LBRACE = annotate(Token.PUNCTUATION, '{')
RBRACE = annotate(Token.PUNCTUATION, '}')
NEG_OP = annotate(Token.OPERATOR, '-')
MUL_OP = annotate(Token.OPERATOR, '*')
ADD_OP = annotate(Token.OPERATOR, '+')
ASSIGN_OP = annotate(Token.OPERATOR, '=')
WHITESPACE_PATTERN_TEXT = re.compile(r'(\s+)')
WHITESPACE_PATTERN_BYTES = re.compile(rb'(\s+)')
NONWORD_PATTERN_TEXT = re.compile(r'(\W+)')
NONWORD_PATTERN_BYTES = re.compile(rb'(\W+)')
# For dict keys
"""
(
'aaaaaaaaaa'
'aaaaaa'
)
"""
MULTILINE_STATEGY_PARENS = 'MULTILINE_STATEGY_PARENS'
# For dict values
"""
'aaaaaaaaaa'
'aaaaa'
"""
MULTILINE_STATEGY_INDENTED = 'MULTILINE_STATEGY_INDENTED'
# For sequence elements
"""
'aaaaaaaaa'
'aaaaaa'
"""
MULTILINE_STATEGY_HANG = 'MULTILINE_STATEGY_HANG'
# For top level strs
"""
'aaaaaaaaa'
'aaaaaa'
"""
MULTILINE_STATEGY_PLAIN = 'MULTILINE_STATEGY_PLAIN'
IMPLICIT_MODULES = {
'__main__',
'builtins',
}
class CommentAnnotation:
def __init__(self, value):
assert isinstance(value, str)
self.value = value
def __repr__(self):
return f'ValueComment({repr(self.value)})'
class _CommentedValue:
def __init__(self, value, comment):
self.value = value
self.comment = comment
class _TrailingCommentedValue:
def __init__(self, value, comment):
self.value = value
self.comment = comment
def annotate_comment(comment, doc):
"""Annotate ``doc`` with ``comment`` text.
Peprint will make sure the parent (or top-level) handler
will render the comment in a proper way. E.g. if ``doc``
represents an element in a list, then the ``list`` pretty
printer will handle where to place the comment.
Differs from ``comment`` and ``trailing_comment`` by
operating only on Docs, not normal values.
"""
return annotate(CommentAnnotation(comment), doc)
def comment(comment_str, value):
"""Annotates a value with a comment str.
Allows you to insert comments into Peprint output
by annotating them on the values directly, instead
of first having to render them into a Doc and then
annotating the Doc with ``annotate_comment``.
Generally, you want to use this to annotate arguments
to ``prettycall``.
"""
return _CommentedValue(value, comment_str)
def trailing_comment(comment_str, value):
"""Annotates a value with a comment str, so that
the comment will be rendered "trailing", e.g. in place
of the last element in a list, set or tuple, or after
the last argument in a function.
This will force the rendering of `value` to be broken
to multple lines as Python does not have inline comments.
"""
return _TrailingCommentedValue(value, comment_str)
def unwrap_comments(value):
comment = None
trailing_comment = None
while isinstance(value, (_CommentedValue, _TrailingCommentedValue)):
if isinstance(value, _CommentedValue):
comment = value.comment
value = value.value
elif isinstance(value, _TrailingCommentedValue):
trailing_comment = value.comment
value = value.value
return (value, comment, trailing_comment)
def is_commented(value):
return (
isinstance(value, Annotated) and
isinstance(value.annotation, CommentAnnotation)
)
def builtin_identifier(s):
return annotate(Token.NAME_BUILTIN, s)
def identifier(s):
return annotate(Token.NAME_FUNCTION, s)
def general_identifier(s):
if callable(s):
module, qualname = s.__module__, s.__qualname__
if module in IMPLICIT_MODULES:
if module == 'builtins':
return builtin_identifier(qualname)
return identifier(qualname)
return identifier(f'{module}.{qualname}')
return identifier(s)
def classattr(cls, attrname):
return concat([
general_identifier(cls),
identifier(f'.{attrname}')
])
class PrettyContext:
__slots__ = (
'indent',
'depth_left',
'visited',
'multiline_strategy',
'user_ctx'
)
def __init__(
self,
indent,
depth_left,
visited=None,
multiline_strategy=MULTILINE_STATEGY_PLAIN,
user_ctx=None,
):
self.indent = indent
self.depth_left = depth_left
self.multiline_strategy = multiline_strategy
if visited is None:
visited = set()
self.visited = visited
if user_ctx is None:
user_ctx = {}
self.user_ctx = user_ctx
def _replace(self, **kwargs):
passed_keys = set(kwargs.keys())
fieldnames = type(self).__slots__
assert passed_keys.issubset(set(fieldnames))
return PrettyContext(
**{
k: (
kwargs[k]
if k in passed_keys
else getattr(self, k)
)
for k in fieldnames
}
)
def use_multiline_strategy(self, strategy):
return self._replace(multiline_strategy=strategy)
def set(self, key, value):
return self._replace(user_ctx={
**self.user_ctx,
key: value,
})
def get(self, key, default=None):
return self.user_ctx.get(key, default)
def nested_call(self):
return self._replace(depth_left=self.depth_left - 1)
def start_visit(self, value):
self.visited.add(id(value))
def end_visit(self, value):
self.visited.remove(id(value))
def is_visited(self, value):
return id(value) in self.visited
def _run_pretty(pretty_fn, value, ctx, trailing_comment=None):
if ctx.is_visited(value):
return _pretty_recursion(value)
ctx.start_visit(value)
if trailing_comment:
doc = pretty_fn(value, ctx, trailing_comment=trailing_comment)
else:
doc = pretty_fn(value, ctx)
if not (
isinstance(doc, str) or
isinstance(doc, Doc)
):
fnname = f'{pretty_fn.__module__}.{pretty_fn.__qualname__}'
raise ValueError(
'Functions decorated with register_pretty must return '
f'an instance of str or Doc. {fnname} returned '
f'{repr(doc)} instead.'
)
ctx.end_visit(value)
return doc
_PREDICATE_REGISTRY = []
def _repr_pretty(value, ctx):
for predicate, fn in _PREDICATE_REGISTRY:
if predicate(value):
return fn(value, ctx)
return repr(value)
pretty_dispatch = singledispatch(partial(_run_pretty, _repr_pretty))
def pretty_python_value(value, ctx):
comment = None
trailing_comment = None
value, comment, trailing_comment = unwrap_comments(value)
if trailing_comment:
doc = pretty_dispatch(
value,
ctx,
trailing_comment=trailing_comment
)
else:
doc = pretty_dispatch(
value,
ctx
)
if comment:
return annotate_comment(
comment,
doc
)
return doc
def register_pretty(type=None, predicate=None):
"""Returns a decorator that registers the decorated function
as the pretty printer for instances of ``type``.
"""
if type is None and predicate is None:
raise ValueError(
"You must provide either the 'type' or 'predicate' argument."
)
if type is not None and predicate is not None:
raise ValueError(
"You must provide either the 'type' or 'predicate' argument,"
"but not both"
)
if predicate is not None:
if not callable(predicate):
raise ValueError(
f"Expected a callable for 'predicate', got {repr(predicate)}"
)
def decorator(fn):
sig = inspect.signature(fn)
value = None
ctx = None
try:
sig.bind(value, ctx)
except TypeError:
fnname = f'{fn.__module__}.{fn.__qualname__}'
raise ValueError(
"Functions decorated with register_pretty must accept "
"exactly two positional parameters: 'value' and 'ctx'. "
f"The function signature for {fnname} was not compatible."
)
if type:
pretty_dispatch.register(type, partial(_run_pretty, fn))
else:
assert callable(predicate)
_PREDICATE_REGISTRY.append((predicate, fn))
return fn
return decorator
def bracket(ctx, left, child, right):
return concat([
left,
nest(ctx.indent, concat([SOFTLINE, child])),
SOFTLINE,
right
])
def commentdoc(text):
"""Returns a Doc representing a comment `text`. `text` is
treated as words, and any whitespace may be used to break
the comment to multiple lines."""
if not text:
raise ValueError(
f'Expected non-empty comment str, got {repr(text)}'
)
commentlines = []
for line in text.splitlines():
alternating_words_ws = list(filter(None, WHITESPACE_PATTERN_TEXT.split(line)))
starts_with_whitespace = bool(
WHITESPACE_PATTERN_TEXT.match(alternating_words_ws[0])
)
if starts_with_whitespace:
prefix = alternating_words_ws[0]
alternating_words_ws = alternating_words_ws[1:]
else:
prefix = NIL
if len(alternating_words_ws) % 2 == 0:
# The last part must be whitespace.
alternating_words_ws = alternating_words_ws[:-1]
for idx, tup in enumerate(zip(alternating_words_ws, cycle([False, True]))):
part, is_ws = tup
if is_ws:
alternating_words_ws[idx] = flat_choice(
when_flat=part,
when_broken=always_break(
concat([
HARDLINE,
'# ',
])
)
)
commentlines.append(
concat([
'# ',
prefix,
fill(alternating_words_ws)
])
)
outer = identity
if len(commentlines) > 1:
outer = always_break
return annotate(
Token.COMMENT_SINGLE,
outer(concat(intersperse(HARDLINE, commentlines)))
)
def sequence_of_docs(ctx, left, docs, right, dangle=False):
docs = list(docs)
# Performance optimization:
# in case of really long sequences,
# the layout algorithm can be quite slow.
# No branching here is needed if the sequence
# is long enough that even with the shortest
# element output, it does not fit the ribbon width.
minimum_output_len = (
2 + # Assume left and right are one character each
len(', ') * (len(docs) - 1) +
len(docs) # each element must take at least one character
)
MAX_PRACTICAL_RIBBON_WIDTH = 150
will_break = minimum_output_len > MAX_PRACTICAL_RIBBON_WIDTH
has_comment = any(is_commented(doc) for doc in docs)
parts = []
for idx, doc in enumerate(docs):
last = idx == len(docs) - 1
if is_commented(doc):
comment_str = doc.annotation.value
# Try to fit the comment at the end of the same line.
flat_version = concat([
doc,
COMMA if not last else NIL,
' ',
commentdoc(comment_str),
HARDLINE if not last else NIL
])
# If the value is broken to multiple lines, add
# comment on the line above.
broken_version = concat([
commentdoc(comment_str),
HARDLINE,
doc,
COMMA if not last else NIL,
HARDLINE if not last else NIL
])
parts.append(
group(
flat_choice(
when_flat=flat_version,
when_broken=broken_version,
)
)
)
else:
parts.append(doc)
if not last:
parts.append(
concat([COMMA, LINE])
)
if dangle:
parts.append(COMMA)
outer = (
always_break
if will_break or has_comment
else group
)
return outer(bracket(ctx, left, concat(parts), right))
def prettycall(ctx, fn, *args, **kwargs):
"""Returns a Doc that represents a function call to :keyword:`fn` with
the remaining positional and keyword arguments.
Given an arbitrary context ``ctx``,::
prettycall(ctx, sorted, [7, 4, 5], reverse=True)
Will result in output::
sorted([7, 4, 5], reverse=True)
The layout algorithm will automatically break the call to multiple
lines if needed::
sorted(
[7, 4, 5],
reverse=True
)
``prettycall`` automatically handles syntax highlighting.
:param ctx: a context value
:type ctx: peprint.peprint.PrettyContext
:param fn: a callable
:param args: positional arguments to render to the call
:param kwargs: keyword arguments to render to the call
:returns: :class:`~peprint.doc.Doc`
"""
fndoc = general_identifier(fn)
if ctx.depth_left <= 0:
return concat([fndoc, LPAREN, ELLIPSIS, RPAREN])
if not kwargs and len(args) == 1:
sole_arg = args[0]
unwrapped_sole_arg, _comment, _trailing_comment = unwrap_comments(args[0])
if type(unwrapped_sole_arg) in (list, dict, tuple):
return build_fncall(
ctx,
fndoc,
argdocs=[pretty_python_value(sole_arg, ctx)],
hug_sole_arg=True,
)
nested_ctx = (
ctx
.nested_call()
.use_multiline_strategy(MULTILINE_STATEGY_HANG)
)
return build_fncall(
ctx,
fndoc,
argdocs=(
pretty_python_value(arg, nested_ctx)
for arg in args
),
kwargdocs=(
(kwarg, pretty_python_value(v, nested_ctx))
for kwarg, v in kwargs.items()
),
)
def build_fncall(
ctx,
fndoc,
argdocs=(),
kwargdocs=(),
hug_sole_arg=False,
trailing_comment=None,
):
"""Builds a doc that looks like a function call,
from docs that represent the function, arguments
and keyword arguments.
If ``hug_sole_arg`` is True, and the represented
functional call is done with a single non-keyword
argument, the function call parentheses will hug
the sole argument doc without newlines and indentation
in break mode. This makes a difference in calls
like this::
> hug_sole_arg = False
frozenset(
[
1,
2,
3,
4,
5
]
)
> hug_sole_arg = True
frozenset([
1,
2,
3,
4,
5,
])
If ``trailing_comment`` is provided, the text is
rendered as a comment after the last argument and
before the closing parenthesis. This will force
the function call to be broken to multiple lines.
"""
if callable(fndoc):
fndoc = general_identifier(fndoc)
has_comment = bool(trailing_comment)
argdocs = list(argdocs)
kwargdocs = list(kwargdocs)
kwargdocs = [
# Propagate any comments to the kwarg doc.
(
annotate_comment(
doc.annotation.value,
concat([binding, ASSIGN_OP, doc.doc])
)
if is_commented(doc)
else concat([binding, ASSIGN_OP, doc])
)
for binding, doc in kwargdocs
]
if not (argdocs or kwargdocs):
return concat([
fndoc,
LPAREN,
RPAREN,
])
if (
hug_sole_arg and
not kwargdocs and
len(argdocs) == 1 and
not is_commented(argdocs[0])
):
return group(
concat([
fndoc,
LPAREN,
argdocs[0],
RPAREN
])
)
allarg_docs = [*argdocs, *kwargdocs]
if trailing_comment:
allarg_docs.append(commentdoc(trailing_comment))
parts = []
for idx, doc in enumerate(allarg_docs):
last = idx == len(allarg_docs) - 1
if is_commented(doc):
has_comment = True
comment_str = doc.annotation.value
doc = doc.doc
else:
comment_str = None
part = concat([doc, NIL if last else COMMA])
if comment_str:
part = group(
flat_choice(
when_flat=concat([
part,
' ',
commentdoc(comment_str)
]),
when_broken=concat([
commentdoc(comment_str),
HARDLINE,
part,
]),
)
)
if not last:
part = concat([part, HARDLINE if has_comment else LINE])
parts.append(part)
outer = (
always_break
if has_comment
else group
)
return outer(
concat([
fndoc,
LPAREN,
nest(
ctx.indent,
concat([
SOFTLINE,
concat(parts),
])
),
SOFTLINE,
RPAREN
])
)
@register_pretty(tuple)
@register_pretty(list)
@register_pretty(set)
def pretty_bracketable_iterable(value, ctx, trailing_comment=None):
dangle = False
if isinstance(value, list):
left, right = LBRACKET, RBRACKET
elif isinstance(value, tuple):
left, right = LPAREN, RPAREN
if len(value) == 1:
dangle = True
elif isinstance(value, set):
left, right = LBRACE, RBRACE
if not value:
if isinstance(value, (list, tuple)):
return concat([left, right])
else:
assert isinstance(value, set)
return prettycall(ctx, set)
if ctx.depth_left == 0:
return concat([left, ELLIPSIS, right])
if len(value) == 1:
sole_value = list(value)[0]
els = [
pretty_python_value(
sole_value,
ctx=(
ctx
.nested_call()
.use_multiline_strategy(MULTILINE_STATEGY_PLAIN)
)
)
]
else:
els = (
pretty_python_value(
el,
ctx=(
ctx
.nested_call()
.use_multiline_strategy(MULTILINE_STATEGY_HANG)
)
)
for el in value
)
if trailing_comment:
els = chain(els, [commentdoc(trailing_comment)])
dangle = False
return sequence_of_docs(ctx, left, els, right, dangle=dangle)
@register_pretty(frozenset)
def pretty_frozenset(value, ctx):
if value:
return prettycall(ctx, frozenset, list(value))
return prettycall(ctx, frozenset)
class _AlwaysSortable(object):
__slots__ = ('value', )
def __init__(self, value):
self.value = value
def sortable_value(self):
return (str(type(self)), id(self))
def __lt__(self, other):
try:
return self.value < other.value
except TypeError:
return self.sortable_value() < other.sortable_value()
@register_pretty(dict)
def pretty_dict(d, ctx):
if ctx.depth_left == 0:
return concat([LBRACE, ELLIPSIS, RBRACE])
has_comment = False
pairs = []
for k in sorted(d.keys(), key=_AlwaysSortable):
v = d[k]
if isinstance(k, (str, bytes)):
kdoc = pretty_str(
k,
# not a nested call on purpose
ctx=ctx.use_multiline_strategy(MULTILINE_STATEGY_PARENS),
)
else:
kdoc = pretty_python_value(
k,
ctx=ctx.nested_call()
)
vdoc = pretty_python_value(
v,
ctx=(
ctx
.nested_call()
.use_multiline_strategy(MULTILINE_STATEGY_INDENTED)
),
)
kcomment = None
if is_commented(kdoc):
has_comment = True
kcomment = kdoc.annotation.value
kdoc = kdoc.doc
vcomment = None
if is_commented(vdoc):
has_comment = True
vcomment = vdoc.annotation.value
vdoc = vdoc.doc
pairs.append((k, v, kdoc, vdoc, kcomment, vcomment))
parts = []
for idx, tup in enumerate(pairs):
last = idx == len(pairs) - 1
k, v, kdoc, vdoc, kcomment, vcomment = tup
if not (kcomment or vcomment):
parts.append(
concat([
kdoc,
concat([COLON, ' ']),
vdoc,
NIL if last else COMMA,
NIL if last else LINE,
]),
)
continue
if kcomment:
kcommented = concat([
commentdoc(kcomment),
HARDLINE,
kdoc,
])
else:
kcommented = kdoc
if vcomment:
vcommented = group(
flat_choice(
# Add comment at the end of the line
when_flat=concat([
vdoc,
NIL if last else COMMA,
' ',
commentdoc(vcomment),
NIL if last else HARDLINE,
]),
# Put comment above the value
# on its own line
when_broken=concat([
nest(
ctx.indent,
concat([
HARDLINE,
commentdoc(vcomment),
HARDLINE,
# Rerender vdoc with plain multiline strategy,
# since we already have an indentation.
pretty_python_value(
v,
ctx=(
ctx
.nested_call()
.use_multiline_strategy(MULTILINE_STATEGY_PLAIN)
),
),
COMMA if not last else NIL,
HARDLINE if not last else NIL
])
),
])
)
)
else:
vcommented = concat([
vdoc,
COMMA if not last else NIL,
LINE if not last else NIL
])
parts.append(
concat([
kcommented,
concat([COLON, ' ']),
vcommented
])
)
doc = bracket(
ctx,
LBRACE,
concat(parts),
RBRACE,
)
if len(pairs) > 2 or has_comment:
doc = always_break(doc)
else:
doc = group(doc)
return doc
INF_FLOAT = float('inf')
NEG_INF_FLOAT = float('-inf')
@register_pretty(float)
def pretty_float(value, ctx):
if ctx.depth_left == 0:
return prettycall(ctx, float, ...)
if value == INF_FLOAT:
return prettycall(ctx, float, 'inf')
elif value == NEG_INF_FLOAT:
return prettycall(ctx, float, '-inf')
elif math.isnan(value):
return prettycall(ctx, float, 'nan')
return annotate(Token.NUMBER_FLOAT, repr(value))
@register_pretty(int)
def pretty_int(value, ctx):
if ctx.depth_left == 0:
return prettycall(ctx, int, ...)
return annotate(Token.NUMBER_INT, repr(value))
@register_pretty(type(...))
def pretty_ellipsis(value, ctx):
return ELLIPSIS
@register_pretty(bool)
@register_pretty(type(None))
def pretty_singletons(value, ctx):
return annotate(Token.KEYWORD_CONSTANT, repr(value))
SINGLE_QUOTE_TEXT = "'"
SINGLE_QUOTE_BYTES = b"'"
DOUBLE_QUOTE_TEXT = '"'
DOUBLE_QUOTE_BYTES = b'"'
def determine_quote_strategy(s):
if isinstance(s, str):
single_quote = SINGLE_QUOTE_TEXT
double_quote = DOUBLE_QUOTE_TEXT
else:
single_quote = SINGLE_QUOTE_BYTES
double_quote = DOUBLE_QUOTE_BYTES
contains_single = single_quote in s
contains_double = double_quote in s
if not contains_single:
return SINGLE_QUOTE_TEXT
if not contains_double:
return DOUBLE_QUOTE_TEXT
assert contains_single and contains_double
single_count = s.count(single_quote)
double_count = s.count(double_quote)
if single_count <= double_count:
return SINGLE_QUOTE_TEXT
return DOUBLE_QUOTE_TEXT
def escape_str_for_quote(use_quote, s):
escaped_with_quotes = repr(s)
repr_used_quote = escaped_with_quotes[-1]
# string may have a prefix
first_quote_at_index = escaped_with_quotes.find(repr_used_quote)
repr_escaped = escaped_with_quotes[first_quote_at_index + 1:-1]
if repr_used_quote == use_quote:
# repr produced the quotes we wanted -
# escaping is correct.
return repr_escaped
# repr produced different quotes, which escapes
# alternate quotes.
if use_quote == SINGLE_QUOTE_TEXT:
# repr used double quotes
return (
repr_escaped
.replace('\\"', DOUBLE_QUOTE_TEXT)
.replace(SINGLE_QUOTE_TEXT, "\\'")
)
else:
# repr used single quotes
return (
repr_escaped
.replace("\\'", SINGLE_QUOTE_TEXT)
.replace(DOUBLE_QUOTE_TEXT, '\\"')
)
STR_LITERAL_ESCAPES = re.compile(
r'''((?:\\[\\abfnrtv"'])|'''
r'(?:\\N\{.*?\})|'
r'(?:\\u[a-fA-F0-9]{4})|'
r'(?:\\U[a-fA-F0-9]{8})|'
r'(?:\\x[a-fA-F0-9]{2})|'
r'(?:\\[0-7]{1,3}))'
)
def highlight_escapes(s):
if not s:
return NIL
matches = STR_LITERAL_ESCAPES.split(s)
starts_with_match = bool(STR_LITERAL_ESCAPES.match(matches[0]))
docs = []
for part, is_escaped in zip(
matches,
cycle([starts_with_match, not starts_with_match])
):
if not part:
continue
docs.append(
annotate(
(
Token.STRING_ESCAPE
if is_escaped
else Token.LITERAL_STRING
),
part
)
)
return concat(docs)
def pretty_single_line_str(s, indent, use_quote=None):
prefix = (
annotate(Token.STRING_AFFIX, 'b')
if isinstance(s, bytes)
else ''
)
if use_quote is None:
use_quote = determine_quote_strategy(s)
escaped = escape_str_for_quote(use_quote, s)
escapes_highlighted = highlight_escapes(escaped)
return concat([
prefix,
annotate(
Token.LITERAL_STRING,
concat([
use_quote,
escapes_highlighted,
use_quote
])
)
])
def split_at(idx, sequence):
return (sequence[:idx], sequence[idx:])
def escaped_len(s, use_quote):
return len(escape_str_for_quote(use_quote, s))
def str_to_lines(max_len, use_quote, s):
if isinstance(s, str):
whitespace_pattern = WHITESPACE_PATTERN_TEXT
nonword_pattern = NONWORD_PATTERN_TEXT
empty = ''
else:
assert isinstance(s, bytes)
whitespace_pattern = WHITESPACE_PATTERN_BYTES
nonword_pattern = NONWORD_PATTERN_BYTES
empty = b''
alternating_words_ws = whitespace_pattern.split(s)
if len(alternating_words_ws) <= 1:
# no whitespace: try splitting with nonword pattern.
alternating_words_ws = nonword_pattern.split(s)
starts_with_whitespace = bool(nonword_pattern.match(alternating_words_ws[0]))
else:
starts_with_whitespace = bool(whitespace_pattern.match(alternating_words_ws[0]))
# List[Tuple[str, bool]]
# The boolean associated with each part indicates if it is a
# whitespce/non-word part or not.
tagged_alternating = list(
zip(
alternating_words_ws,
cycle([starts_with_whitespace, not starts_with_whitespace])
)
)
remaining_stack = list(reversed(tagged_alternating))
curr_line_parts = []
curr_line_len = 0
while remaining_stack:
curr, is_whitespace = remaining_stack.pop()
curr_line_parts.append(curr)
curr_line_len += escaped_len(curr, use_quote)
if curr_line_len == max_len:
if not is_whitespace and len(curr_line_parts) > 2:
curr_line_parts.pop()
yield empty.join(curr_line_parts)
curr_line_parts = []
curr_line_len = 0
remaining_stack.append((curr, is_whitespace))
else:
yield empty.join(curr_line_parts)
curr_line_parts = []
curr_line_len = 0
continue
elif curr_line_len > max_len:
if not is_whitespace and len(curr_line_parts) > 1:
curr_line_parts.pop()
yield empty.join(curr_line_parts)
remaining_stack.append((curr, is_whitespace))
curr_line_parts = []
curr_line_len = 0
continue
curr_line_parts.pop()
remaining_len = max_len - (curr_line_len - escaped_len(curr, use_quote))
this_line_part, next_line_part = split_at(max(remaining_len, 0), curr)
curr_line_parts.append(this_line_part)
yield empty.join(curr_line_parts)
curr_line_parts = []
curr_line_len = 0
if next_line_part:
remaining_stack.append((next_line_part, is_whitespace))
if curr_line_parts:
yield empty.join(curr_line_parts)
@register_pretty(str)
@register_pretty(bytes)
def pretty_str(s, ctx):
# Subclasses of str/bytes
# will be printed as StrSubclass('the actual string')
constructor = type(s)
is_native_type = constructor in (str, bytes)
if ctx.depth_left == 0:
if isinstance(s, str):
return prettycall(ctx, constructor, ...)
else:
assert isinstance(s, bytes)
return prettycall(ctx, constructor, ...)
multiline_strategy = ctx.multiline_strategy
peprint_indent = ctx.indent
def evaluator(indent, column, page_width, ribbon_width):
nonlocal multiline_strategy
columns_left_in_line = page_width - column
columns_left_in_ribbon = indent + ribbon_width - column
available_width = min(columns_left_in_line, columns_left_in_ribbon)
singleline_str_chars = len(s) + len('""')
flat_version = pretty_single_line_str(s, peprint_indent)
if singleline_str_chars <= available_width:
if is_native_type:
return flat_version
return build_fncall(ctx, constructor, argdocs=[flat_version])
# multiline string
each_line_starts_on_col = indent + peprint_indent
each_line_ends_on_col = min(page_width, each_line_starts_on_col + ribbon_width)
each_line_max_str_len = each_line_ends_on_col - each_line_starts_on_col - 2
use_quote = determine_quote_strategy(s)
lines = str_to_lines(
max_len=each_line_max_str_len,
use_quote=use_quote,
s=s,
)
parts = intersperse(
HARDLINE,
(
pretty_single_line_str(
line,
indent=peprint_indent,
use_quote=use_quote,
)
for line in lines
)
)
if not is_native_type:
multiline_strategy = MULTILINE_STATEGY_PLAIN
if multiline_strategy == MULTILINE_STATEGY_PLAIN:
res = always_break(concat(parts))
if is_native_type:
return res
return build_fncall(ctx, constructor, argdocs=[res])
elif multiline_strategy == MULTILINE_STATEGY_HANG:
return always_break(
nest(
peprint_indent,
concat(parts)
)
)
else:
if multiline_strategy == MULTILINE_STATEGY_PARENS:
left_paren, right_paren = LPAREN, RPAREN
else:
assert multiline_strategy == MULTILINE_STATEGY_INDENTED
left_paren, right_paren = '', ''
return always_break(
concat([
left_paren,
nest(
peprint_indent,
concat([
HARDLINE,
*parts,
])
),
(
HARDLINE
if multiline_strategy == MULTILINE_STATEGY_PARENS
else NIL
),
right_paren
])
)
return contextual(evaluator)
def _pretty_recursion(value):
return f'<Recursion on {type(value).__name__} with id={id(value)}>'
def python_to_sdocs(value, indent, width, depth, ribbon_width=71):
if depth is None:
depth = float('inf')
doc = pretty_python_value(
value,
ctx=PrettyContext(indent=indent, depth_left=depth, visited=set())
)
if is_commented(doc):
doc = group(
flat_choice(
when_flat=concat([
doc,
' ',
commentdoc(doc.annotation.value),
]),
when_broken=concat([
commentdoc(doc.annotation.value),
HARDLINE,
doc
])
)
)
ribbon_frac = min(1.0, ribbon_width / width)
return layout_smart(doc, width=width, ribbon_frac=ribbon_frac)
|
#!/usr/bin/python2.7
#coding:utf-8
def getFunc(n):
return lambda x, y: x**n + y**n
f = getFunc(2)
print '1**2 + 2**2 = ', f(1,2)
print '2**2 + 3**2 = ', f(2,3)
f = getFunc(3)
print '1**3 + 2**3 = ', f(1,2)
print '2**3 + 3**3 = ', f(2,3)
|
# AUTOGENERATED! DO NOT EDIT! File to edit: nbs/71_callback.tensorboard.ipynb (unless otherwise specified).
__all__ = ['TensorBoardCallback']
# Cell
from ..basics import *
# Cell
import tensorboard
from torch.utils.tensorboard import SummaryWriter
from .fp16 import ModelToHalf
# Cell
class TensorBoardCallback(Callback):
"Saves model topology, losses & metrics"
def __init__(self, log_dir=None, trace_model=True, log_preds=True, n_preds=9):
store_attr(self, 'log_dir,trace_model,log_preds,n_preds')
def before_fit(self):
self.run = not hasattr(self.learn, 'lr_finder') and not hasattr(self, "gather_preds") and rank_distrib()==0
self.writer = SummaryWriter(log_dir=self.log_dir)
if self.trace_model:
if hasattr(self.learn, 'mixed_precision'):
raise Exception("Can't trace model in mixed precision, pass `trace_model=False` or don't use FP16.")
b = self.dls.one_batch()
self.learn._split(b)
self.writer.add_graph(self.model, *self.xb)
def after_batch(self):
self.writer.add_scalar('train_loss', self.smooth_loss, self.train_iter)
for i,h in enumerate(self.opt.hypers):
for k,v in h.items(): self.writer.add_scalar(f'{k}_{i}', v, self.train_iter)
def after_epoch(self):
for n,v in zip(self.recorder.metric_names[2:-1], self.recorder.log[2:-1]):
self.writer.add_scalar(n, v, self.train_iter)
if self.log_preds:
b = self.dls.valid.one_batch()
self.learn.one_batch(0, b)
preds = getattr(self.loss_func, 'activation', noop)(self.pred)
out = getattr(self.loss_func, 'decodes', noop)(preds)
x,y,its,outs = self.dls.valid.show_results(b, out, show=False, max_n=self.n_preds)
tensorboard_log(x, y, its, outs, self.writer, self.train_iter)
def after_fit(self): self.writer.close()
# Cell
from ..vision.data import *
# Cell
@typedispatch
def tensorboard_log(x:TensorImage, y: TensorCategory, samples, outs, writer, step):
fig,axs = get_grid(len(samples), add_vert=1, return_fig=True)
for i in range(2):
axs = [b.show(ctx=c) for b,c in zip(samples.itemgot(i),axs)]
axs = [r.show(ctx=c, color='green' if b==r else 'red')
for b,r,c in zip(samples.itemgot(1),outs.itemgot(0),axs)]
writer.add_figure('Sample results', fig, step)
# Cell
from ..vision.core import TensorPoint,TensorBBox
# Cell
@typedispatch
def tensorboard_log(x:TensorImage, y: (TensorImageBase, TensorPoint, TensorBBox), samples, outs, writer, step):
fig,axs = get_grid(len(samples), add_vert=1, return_fig=True, double=True)
for i in range(2):
axs[::2] = [b.show(ctx=c) for b,c in zip(samples.itemgot(i),axs[::2])]
for x in [samples,outs]:
axs[1::2] = [b.show(ctx=c) for b,c in zip(x.itemgot(0),axs[1::2])]
writer.add_figure('Sample results', fig, step)
|
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
from ansible.plugins.action import ActionBase
try:
from ansible_collections.ansible.utils.plugins.module_utils.common.argspec_validate import (
AnsibleArgSpecValidator,
)
except ImportError:
ANSIBLE_UTILS_IS_INSTALLED = False
else:
ANSIBLE_UTILS_IS_INSTALLED = True
from ansible.errors import AnsibleActionFail
from ansible_collections.cisco.ise.plugins.module_utils.ise import (
ISESDK,
ise_argument_spec,
)
# Get common arguements specification
argument_spec = ise_argument_spec()
# Add arguments specific for this module
argument_spec.update(dict(
name=dict(type="str"),
id=dict(type="str"),
page=dict(type="int"),
size=dict(type="int"),
))
required_if = []
required_one_of = []
mutually_exclusive = []
required_together = []
class ActionModule(ActionBase):
def __init__(self, *args, **kwargs):
if not ANSIBLE_UTILS_IS_INSTALLED:
raise AnsibleActionFail("ansible.utils is not installed. Execute 'ansible-galaxy collection install ansible.utils'")
super(ActionModule, self).__init__(*args, **kwargs)
self._supports_async = True
self._result = None
# Checks the supplied parameters against the argument spec for this module
def _check_argspec(self):
aav = AnsibleArgSpecValidator(
data=self._task.args,
schema=dict(argument_spec=argument_spec),
schema_format="argspec",
schema_conditionals=dict(
required_if=required_if,
required_one_of=required_one_of,
mutually_exclusive=mutually_exclusive,
required_together=required_together,
),
name=self._task.action,
)
valid, errors, self._task.args = aav.validate()
if not valid:
raise AnsibleActionFail(errors)
def get_object(self, params):
new_object = dict(
name=params.get("name"),
id=params.get("id"),
page=params.get("page"),
size=params.get("size"),
)
return new_object
def run(self, tmp=None, task_vars=None):
self._task.diff = False
self._result = super(ActionModule, self).run(tmp, task_vars)
self._result["changed"] = False
self._check_argspec()
ise = ISESDK(params=self._task.args)
id = self._task.args.get("id")
name = self._task.args.get("name")
if id:
response = ise.exec(
family="psn_node_details_with_radius_service",
function='get_session_service_node_by_id',
params=self.get_object(self._task.args)
).response['SessionServiceNode']
self._result.update(dict(ise_response=response))
self._result.update(ise.exit_json())
return self._result
if name:
response = ise.exec(
family="psn_node_details_with_radius_service",
function='get_session_service_node_by_name',
params=self.get_object(self._task.args)
).response['SessionServiceNode']
self._result.update(dict(ise_response=response))
self._result.update(ise.exit_json())
return self._result
if not name and not id:
response = []
generator = ise.exec(
family="psn_node_details_with_radius_service",
function='get_session_service_node_generator',
params=self.get_object(self._task.args),
)
for item in generator:
tmp_response = item.response['SearchResult']['resources']
if isinstance(tmp_response, list):
response += tmp_response
else:
response.append(tmp_response)
self._result.update(dict(ise_response=response))
self._result.update(ise.exit_json())
return self._result
|
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft and contributors. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
#
# See the License for the specific language governing permissions and
# limitations under the License.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
from msrest.serialization import Model
class CloudPool(Model):
"""
A pool in the Azure Batch service.
:param id: A string that uniquely identifies the pool within the account.
The id can contain any combination of alphanumeric characters including
hyphens and underscores, and cannot contain more than 64 characters.
:type id: str
:param display_name: The display name for the pool.
:type display_name: str
:param url: The URL of the pool.
:type url: str
:param e_tag: The ETag of the pool.
:type e_tag: str
:param last_modified: The last modified time of the pool.
:type last_modified: datetime
:param creation_time: The creation time of the pool.
:type creation_time: datetime
:param state: The current state of the pool. Possible values include:
'active', 'deleting', 'upgrading'
:type state: str or :class:`PoolState <azure.batch.models.PoolState>`
:param state_transition_time: The time at which the pool entered its
current state.
:type state_transition_time: datetime
:param allocation_state: Whether the pool is resizing. Possible values
include: 'steady', 'resizing', 'stopping'
:type allocation_state: str or :class:`AllocationState
<azure.batch.models.AllocationState>`
:param allocation_state_transition_time: The time at which the pool
entered its current allocation state.
:type allocation_state_transition_time: datetime
:param vm_size: The size of virtual machines in the pool. All virtual
machines in a pool are the same size.
:type vm_size: str
:param cloud_service_configuration: The cloud service configuration for
the pool. This property and VirtualMachineConfiguration are mutually
exclusive and one of the properties must be specified.
:type cloud_service_configuration: :class:`CloudServiceConfiguration
<azure.batch.models.CloudServiceConfiguration>`
:param virtual_machine_configuration: The virtual machine configuration
for the pool. This property and CloudServiceConfiguration are mutually
exclusive and one of the properties must be specified.
:type virtual_machine_configuration: :class:`VirtualMachineConfiguration
<azure.batch.models.VirtualMachineConfiguration>`
:param resize_timeout: The timeout for allocation of compute nodes to the
pool. In a Get Pool operation, this is the timeout for the most recent
resize operation. The default value is 10 minutes.
:type resize_timeout: timedelta
:param resize_error: Details of any error encountered while performing
the last resize on the pool. This property is set only if an error
occurred during the last pool resize, and only when the pool
AllocationState is Steady.
:type resize_error: :class:`ResizeError <azure.batch.models.ResizeError>`
:param current_dedicated: The number of compute nodes currently in the
pool.
:type current_dedicated: int
:param target_dedicated: The desired number of compute nodes in the pool.
This property must have the default value if EnableAutoScale is true. It
is required if EnableAutoScale is false.
:type target_dedicated: int
:param enable_auto_scale: Whether the pool size should automatically
adjust over time. If true, the AutoScaleFormula property must be set. If
false, the TargetDedicated property must be set.
:type enable_auto_scale: bool
:param auto_scale_formula: A formula for the desired number of compute
nodes in the pool.
:type auto_scale_formula: str
:param auto_scale_evaluation_interval: A time interval for the desired
AutoScale evaluation period in the pool.
:type auto_scale_evaluation_interval: timedelta
:param auto_scale_run: The results and errors from the last execution of
the autoscale formula.
:type auto_scale_run: :class:`AutoScaleRun
<azure.batch.models.AutoScaleRun>`
:param enable_inter_node_communication: Whether the pool permits direct
communication between nodes.
:type enable_inter_node_communication: bool
:param start_task: A task specified to run on each compute node as it
joins the pool.
:type start_task: :class:`StartTask <azure.batch.models.StartTask>`
:param certificate_references: The list of certificates to be installed
on each compute node in the pool.
:type certificate_references: list of :class:`CertificateReference
<azure.batch.models.CertificateReference>`
:param application_package_references: The list of application packages
to be installed on each compute node in the pool.
:type application_package_references: list of
:class:`ApplicationPackageReference
<azure.batch.models.ApplicationPackageReference>`
:param max_tasks_per_node: The maximum number of tasks that can run
concurrently on a single compute node in the pool.
:type max_tasks_per_node: int
:param task_scheduling_policy: How the Batch service distributes tasks
between compute nodes in the pool.
:type task_scheduling_policy: :class:`TaskSchedulingPolicy
<azure.batch.models.TaskSchedulingPolicy>`
:param metadata: A list of name-value pairs associated with the pool as
metadata.
:type metadata: list of :class:`MetadataItem
<azure.batch.models.MetadataItem>`
:param stats: Utilization and resource usage statistics for the entire
lifetime of the pool.
:type stats: :class:`PoolStatistics <azure.batch.models.PoolStatistics>`
"""
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'display_name': {'key': 'displayName', 'type': 'str'},
'url': {'key': 'url', 'type': 'str'},
'e_tag': {'key': 'eTag', 'type': 'str'},
'last_modified': {'key': 'lastModified', 'type': 'iso-8601'},
'creation_time': {'key': 'creationTime', 'type': 'iso-8601'},
'state': {'key': 'state', 'type': 'PoolState'},
'state_transition_time': {'key': 'stateTransitionTime', 'type': 'iso-8601'},
'allocation_state': {'key': 'allocationState', 'type': 'AllocationState'},
'allocation_state_transition_time': {'key': 'allocationStateTransitionTime', 'type': 'iso-8601'},
'vm_size': {'key': 'vmSize', 'type': 'str'},
'cloud_service_configuration': {'key': 'cloudServiceConfiguration', 'type': 'CloudServiceConfiguration'},
'virtual_machine_configuration': {'key': 'virtualMachineConfiguration', 'type': 'VirtualMachineConfiguration'},
'resize_timeout': {'key': 'resizeTimeout', 'type': 'duration'},
'resize_error': {'key': 'resizeError', 'type': 'ResizeError'},
'current_dedicated': {'key': 'currentDedicated', 'type': 'int'},
'target_dedicated': {'key': 'targetDedicated', 'type': 'int'},
'enable_auto_scale': {'key': 'enableAutoScale', 'type': 'bool'},
'auto_scale_formula': {'key': 'autoScaleFormula', 'type': 'str'},
'auto_scale_evaluation_interval': {'key': 'autoScaleEvaluationInterval', 'type': 'duration'},
'auto_scale_run': {'key': 'autoScaleRun', 'type': 'AutoScaleRun'},
'enable_inter_node_communication': {'key': 'enableInterNodeCommunication', 'type': 'bool'},
'start_task': {'key': 'startTask', 'type': 'StartTask'},
'certificate_references': {'key': 'certificateReferences', 'type': '[CertificateReference]'},
'application_package_references': {'key': 'applicationPackageReferences', 'type': '[ApplicationPackageReference]'},
'max_tasks_per_node': {'key': 'maxTasksPerNode', 'type': 'int'},
'task_scheduling_policy': {'key': 'taskSchedulingPolicy', 'type': 'TaskSchedulingPolicy'},
'metadata': {'key': 'metadata', 'type': '[MetadataItem]'},
'stats': {'key': 'stats', 'type': 'PoolStatistics'},
}
def __init__(self, id=None, display_name=None, url=None, e_tag=None, last_modified=None, creation_time=None, state=None, state_transition_time=None, allocation_state=None, allocation_state_transition_time=None, vm_size=None, cloud_service_configuration=None, virtual_machine_configuration=None, resize_timeout=None, resize_error=None, current_dedicated=None, target_dedicated=None, enable_auto_scale=None, auto_scale_formula=None, auto_scale_evaluation_interval=None, auto_scale_run=None, enable_inter_node_communication=None, start_task=None, certificate_references=None, application_package_references=None, max_tasks_per_node=None, task_scheduling_policy=None, metadata=None, stats=None):
self.id = id
self.display_name = display_name
self.url = url
self.e_tag = e_tag
self.last_modified = last_modified
self.creation_time = creation_time
self.state = state
self.state_transition_time = state_transition_time
self.allocation_state = allocation_state
self.allocation_state_transition_time = allocation_state_transition_time
self.vm_size = vm_size
self.cloud_service_configuration = cloud_service_configuration
self.virtual_machine_configuration = virtual_machine_configuration
self.resize_timeout = resize_timeout
self.resize_error = resize_error
self.current_dedicated = current_dedicated
self.target_dedicated = target_dedicated
self.enable_auto_scale = enable_auto_scale
self.auto_scale_formula = auto_scale_formula
self.auto_scale_evaluation_interval = auto_scale_evaluation_interval
self.auto_scale_run = auto_scale_run
self.enable_inter_node_communication = enable_inter_node_communication
self.start_task = start_task
self.certificate_references = certificate_references
self.application_package_references = application_package_references
self.max_tasks_per_node = max_tasks_per_node
self.task_scheduling_policy = task_scheduling_policy
self.metadata = metadata
self.stats = stats
|
import os
import yaml
import pandas as pd
import xml.etree.ElementTree as ET
from types import SimpleNamespace
from sklearn.model_selection import train_test_split
from utils.experiment_utils import create_linspace
from utils.preprocess import *
SOURCE_PATH = './source_data'
DATA_PATH = './data'
CONFIG_PATH = './conf'
DATASETS = ['ami', 'emoevent', 'haternet', 'hateval2019', 'mex-a3t', 'universal_joy', 'tass2019', 'detoxis']
class Colors:
BLACK = '\033[1;30m'
RED = '\033[1;31m'
GREEN = '\033[1;32m'
YELLOW = '\033[1;33m'
BLUE = '\033[1;34m'
PURPLE = '\033[1;35m'
CYAN = '\033[1;36m'
WHITE = '\033[1;37m'
ENDC = '\033[0m'
def colored(text, color):
return f'{color}{text}{Colors.ENDC}'
def write_split_files(dataset, trn, dev, tst):
trn.to_csv(os.path.join(DATA_PATH, dataset, 'train_es.tsv'), index=False, sep='\t', mode='w')
dev.to_csv(os.path.join(DATA_PATH, dataset, 'dev_es.tsv'), index=False, sep='\t', mode='w')
tst.to_csv(os.path.join(DATA_PATH, dataset, 'test_es.tsv'), index=False, sep='\t', mode='w')
def prepare_files():
seed = 100
test_ratio = 0.2
# EmoEvent and HaterNet
filename = 'original_es.tsv'
data = {'emoevent': pd.read_csv(os.path.join(SOURCE_PATH, 'emoevent', filename), sep='\t'),
'haternet': pd.read_csv(os.path.join(SOURCE_PATH, 'haternet', filename), sep=';\\|\\|;',
names=['id', 'text', 'hateful'],
header=None,
engine="python")}
labels = {'emoevent': 'offensive',
'haternet': 'hateful'}
for dataset in data:
data[dataset].text = basic_text_normalization(data[dataset].text)
y = data[dataset][labels[dataset]]
trn, tst = train_test_split(data[dataset], shuffle=True, test_size=test_ratio, stratify=y, random_state=seed)
y = trn[labels[dataset]]
trn, dev = train_test_split(trn, shuffle=True, test_size=test_ratio, stratify=y, random_state=seed)
write_split_files(dataset, trn, dev, tst)
print(f'Dataset: {dataset} --> N. Instances: {data[dataset].shape[0]} --> Train, Dev., Test: '
f'{trn.shape[0]}, {dev.shape[0]}, {tst.shape[0]}')
# HatEval 2019
dataset = 'hateval2019'
n_instances = {}
for phase in ['train', 'dev', 'test']:
data = pd.read_csv(os.path.join(SOURCE_PATH, dataset, f'original_{phase}_es.csv'), sep=',')
data.text = basic_text_normalization(data.text)
data.to_csv(os.path.join(DATA_PATH, dataset, f'{phase}_es.tsv'), index=False, sep='\t', mode='w')
n_instances[phase] = data.shape[0]
print(f'Dataset: {dataset} --> N. Instances: {sum(n_instances.values())} --> Train, Dev., Test: '
f'{n_instances["train"]}, {n_instances["dev"]}, {n_instances["test"]}')
# MEX-A3T
dataset = 'mex-a3t'
columns = ['text', 'aggressiveness']
trn = pd.read_csv(os.path.join(SOURCE_PATH, dataset, 'original_train.tsv'), sep='\t', names=columns)
tst = pd.read_csv(os.path.join(SOURCE_PATH, dataset, 'original_test.tsv'), sep='\t', names=columns)
trn, dev = train_test_split(trn, shuffle=True, test_size=test_ratio, stratify=trn.aggressiveness, random_state=seed)
for subset in [trn, dev, tst]:
subset.text = basic_text_normalization(subset.text)
write_split_files(dataset, trn, dev, tst)
print(f'Dataset: {dataset} --> N. Instances: {trn.shape[0] + dev.shape[0] + tst.shape[0]} --> Train, Dev., Test: '
f'{trn.shape[0]}, {dev.shape[0]}, {tst.shape[0]}')
# TASS 2019
dataset = 'tass2019'
n_instances = {}
for phase in ['train', 'dev', 'test']:
phase_data = pd.DataFrame()
for country in ['ES', 'CR', 'MX', 'PE', 'UY']:
root = ET.parse(os.path.join(SOURCE_PATH, dataset, f'TASS2019_country_{country}_{phase}.xml')).getroot()
tweets = []
for item in root.iter('tweet'):
tweet = {'country': country}
for tweet_field in item.iter():
if tweet_field.tag not in ['tweet', 'sentiment', 'polarity']:
tweet[tweet_field.tag] = tweet_field.text
tweets.append(tweet)
phase_data = phase_data.append(tweets)
new_cols = {'tweetid': 'tweet_id', 'content': 'text', 'user': 'user_id', 'value': 'polarity'}
phase_data.rename(columns=new_cols, inplace=True)
phase_data = phase_data[['tweet_id', 'user_id', 'country', 'date', 'text', 'polarity']]
phase_data.text = basic_text_normalization(phase_data.text)
phase_data.to_csv(os.path.join(DATA_PATH, dataset, f'{phase}_es.tsv'), index=False, sep='\t', mode='w')
n_instances[phase] = phase_data.shape[0]
print(f'Dataset: {dataset} --> N. Instances: {sum(n_instances.values())} --> Train, Dev., Test: '
f'{n_instances["train"]}, {n_instances["dev"]}, {n_instances["test"]}')
# Universal Joy
dataset = 'universal_joy'
trn_data = {}
for filename in ['small', 'large', 'combi']:
trn_data[filename] = pd.read_csv(os.path.join(SOURCE_PATH, dataset, filename + '.csv'))
trn_data[filename] = trn_data[filename][trn_data[filename].language == 'es']
trn_data[filename].text = trn_data[filename].text.apply(universal_joy_cleaning)
# Apparently, spanish comments in 'large' and 'combi' are the same and 'small' is created using a subset of those
trn = pd.concat(trn_data.values(), axis=0, ignore_index=True)
trn.drop_duplicates(inplace=True, subset='text')
# There is no overlapping between training, validation and test (also, they do not contain duplicates)
dev = pd.read_csv(os.path.join(SOURCE_PATH, dataset, 'val.csv'))
dev.drop_duplicates(inplace=True, subset='text')
tst = pd.read_csv(os.path.join(SOURCE_PATH, dataset, 'test.csv'))
tst.drop_duplicates(inplace=True, subset='text')
# The test set approximately represents 12.5% of the total data
# print(tst.shape[0]/(trn.shape[0] + dev.shape[0] + tst.shape[0]))
# DETOXIS
dataset = 'detoxis'
trn = pd.read_csv(os.path.join(SOURCE_PATH, dataset, f'train.csv'), sep=',')
tst = pd.read_csv(os.path.join(SOURCE_PATH, dataset, f'test.csv'), sep=',')
trn, dev = train_test_split(trn, shuffle=True, test_size=test_ratio, stratify=trn.toxicity_level, random_state=seed)
for subset in [trn, dev, tst]:
subset.rename(columns={'comment': 'text'}, inplace=True)
subset.text = basic_text_normalization(subset.text)
write_split_files(dataset, trn, dev, tst)
print(f'Dataset: {dataset} --> N. Instances: {trn.shape[0] + dev.shape[0] + tst.shape[0]} --> Train, Dev., Test: '
f'{trn.shape[0]}, {dev.shape[0]}, {tst.shape[0]}')
def read_datasets(datasets, tasks, lang='es'):
data = {}
for dataset in datasets:
if dataset not in DATASETS:
raise Exception(f'Dataset {dataset} is not in the list of available datasets!')
data[dataset] = {
'trn': pd.read_csv(os.path.join(DATA_PATH, dataset, f'train_{lang}.tsv'), sep='\t'),
'dev': pd.read_csv(os.path.join(DATA_PATH, dataset, f'dev_{lang}.tsv'), sep='\t'),
'tst': pd.read_csv(os.path.join(DATA_PATH, dataset, f'test_{lang}.tsv'), sep='\t')
}
for phase in data[dataset]:
data[dataset][phase] = data[dataset][phase][['text'] + tasks[dataset]]
return data
def create_namespace_from_dict(dic, name=None):
for k, v in dic.items():
if isinstance(v, dict):
dic[k] = create_namespace_from_dict(v, k)
ns = SimpleNamespace(**dic)
ns.__name__ = name
return ns
def process_config(dic, name=None):
for k, v in dic.items():
if k not in ['transfer_learning', 'optimization']:
if isinstance(v, dict):
dic[k] = process_config(v, k)
elif isinstance(v, list):
for vi in v:
if isinstance(vi, dict):
dic[k] += create_linspace(vi)
dic[k] = dic[k][1:]
else:
dic[k] = [v]
return dic
def load_config(config_file):
with open(os.path.join(CONFIG_PATH, config_file), 'r') as f:
config = yaml.load(f, Loader=yaml.FullLoader)
return process_config(config) # create_namespace_from_dict(config)
def log(string, indent=0):
start = '\t' * indent
print(f'{start}{string}')
|
from django import template
from django.contrib.admin.models import LogEntry
register = template.Library()
class AdminLogNode(template.Node):
def __init__(self, limit, varname, user):
self.limit, self.varname, self.user = limit, varname, user
def __repr__(self):
return "<GetAdminLog Node>"
def render(self, context):
if self.user is None:
context[self.varname] = LogEntry.objects.all().select_related('content_type', 'user')[:self.limit]
else:
user_id = self.user
if not user_id.isdigit():
user_id = context[self.user].pk
context[self.varname] = LogEntry.objects.filter(user__pk__exact=user_id).select_related('content_type', 'user')[:int(self.limit)]
return ''
@register.tag
def get_admin_log(parser, token):
"""
Populates a template variable with the admin log for the given criteria.
Usage::
{% get_admin_log [limit] as [varname] for_user [context_var_containing_user_obj] %}
Examples::
{% get_admin_log 10 as admin_log for_user 23 %}
{% get_admin_log 10 as admin_log for_user user %}
{% get_admin_log 10 as admin_log %}
Note that ``context_var_containing_user_obj`` can be a hard-coded integer
(user ID) or the name of a template context variable containing the user
object whose ID you want.
"""
tokens = token.contents.split()
if len(tokens) < 4:
raise template.TemplateSyntaxError(
"'get_admin_log' statements require two arguments")
if not tokens[1].isdigit():
raise template.TemplateSyntaxError(
"First argument to 'get_admin_log' must be an integer")
if tokens[2] != 'as':
raise template.TemplateSyntaxError(
"Second argument to 'get_admin_log' must be 'as'")
if len(tokens) > 4:
if tokens[4] != 'for_user':
raise template.TemplateSyntaxError(
"Fourth argument to 'get_admin_log' must be 'for_user'")
return AdminLogNode(limit=tokens[1], varname=tokens[3], user=(len(tokens) > 5 and tokens[5] or None))
|
# -*- coding: utf-8 -*-
"""
Created on Tue Aug 21 15:53:46 2018
@author: Eric S. Russell
Laboratory for Atmospheric Research
Dept. of Civil and Environmental Engineering
Washington State University
eric.s.russell@wsu.edu
Not all of these functions are used in the column rename script; these are potentially to be used with this processing
depending on other's thoughts. This is a trial run of dealing with code across sites.
"""
import numpy as np
import pandas as pd
import datetime
"""
QA/QC processing for flux data:
Inputs:
data: Full input data
grade: Maximum QA/QC grade as assigned by the flux calculation code
LE_B: Two number array with the highest (LE_B[1]) and lowest (LE_B[0]) hard limit LE value
H_B: Same as LE_B but for H
F_B: Same as LE-B but for Fc
cls:
gg:
Outputs:
data: Dataframe with the filtered data; does not track reason for removing data.
Conditional for door_is_open_Hst since not all sites will/do have enclosure door sensors installed
"""
# This function not implemented into the script; still thinking about how I want to format this and integrate so user doesn't have to do a lot to make work
def Grade_cs(df,info, Site, site=False):
if site == True:
grade = int(info['grade'][Site])
LE_B = [float(info['LEL'][Site]),float(info['LEU'][Site])]
H_B = [float(info['HL'][Site]),float(info['HU'][Site])]
F_B = [float(info['FCL'][Site]),float(info['FCU'][Site])]
T_B = [float(info['TL'][Site]),float(info['TU'][Site])]
elif site == False:
grade = int(info['Val_L']['grade'])
LE_B = [float(info['Val_L']['LE_B']),float(info['Val_U']['LE_B'])]
H_B = [float(info['Val_L']['H_B']),float(info['Val_U']['H_B'])]
F_B = [float(info['Val_L']['F_B']),float(info['Val_U']['F_B'])]
T_B = [float(info['Val_L']['T_B']),float(info['Val_U']['T_B'])]
gg = ['H_SSITC_TEST','LE_SSITC_TEST','FC_SSITC_TEST','TAU_SSITC_TEST']
cls =['H','LE','FC', 'TAU']
# var = ['H_Flags','LE_Flags','Fc_Flags'] Needs flagging system for QC
pd.options.mode.chained_assignment = None
if (grade >9) | (grade<1):
print('Grade number must be between 0-9.')
return # 'exit' function and return error
Good = None
data = []; data=pd.DataFrame(data,index=df.index)
if cls[1] in df.columns:
HL = (df[cls[1]].astype(float) < LE_B[0]) | (df[cls[1]].astype(float)>LE_B[1]) | df[cls[1]].astype(float).isnull()
if gg[1] in df.columns:
Grade = (df[gg[1]].astype(float) <= grade) & (~HL)
else: Grade = ~HL
df[cls[1]][~Grade] = np.NaN
data[cls[1]+'_Flag'] = 0
data[cls[1]+'_Flag'][~Grade] = 1
if cls[0] in df.columns:
HL = (df[cls[0]].astype(float) < H_B[0]) | (df[cls[0]].astype(float)> H_B[1]) | df[cls[0]].astype(float).isnull()
if gg[0] in df.columns:
Grade = (df[gg[0]].astype(float) <= grade) & (~HL)
else: Grade = ~HL
df[cls[0]][~Grade] = np.NaN
data[cls[0]+'_Flag'] = 0
data[cls[0]+'_Flag'][~Grade] = 1
if cls[2] in df.columns:
HL = (df[cls[2]].astype(float) < F_B[0])|(df[cls[2]].astype(float) > F_B[1]) | df[cls[2]].astype(float).isnull()
if gg[2] in df.columns:
Grade = (df[gg[2]].astype(float) <= grade) & (~HL)
else: Grade = ~HL
df[cls[2]][~Grade] = np.NaN
data[cls[2]+'_Flag'] = 0
data[cls[2]+'_Flag'][~Grade] = 1
if cls[3] in df.columns:
HL = (df[cls[3]].astype(float) < T_B[0])|(df[cls[3]].astype(float) > T_B[1]) | df[cls[3]].astype(float).isnull()
if gg[3] in df.columns:
Grade = (df[gg[3]].astype(float) <= grade) & (~HL)
else: Grade = ~HL
data[cls[3]+'_Flag'] = 0
data[cls[3]+'_Flag'][~Grade] = 1
# Rain Mask
if 'P' in df.columns:
Precip = (df['P'].astype(float) == 0) | (df['P'].astype(float) == -9999)
precip = True
data['P_Flag'] = 0
data['P_Flag'][~Precip] = 1
else: precip = False
if 'CO2_sig_strgth_Min' in df.columns:
c_sig_strength = df['CO2_sig_strgth_Min'] > 0.7
data['CO2_Signal_Strength'] = 0
data['CO2_Signal_Strength'][~c_sig_strength] = 1
if 'H2O_sig_strgth_Min' in df.columns:
w_sig_strength = df['H2O_sig_strgth_Min'] > 0.7
data['H2O_Signal_Strength'] = 0
data['H2O_Signal_Strength'][~w_sig_strength] = 1
if 'CO2_samples_Tot' in df.columns:
Samp_Good_IRGA = df['CO2_samples_Tot'].astype(float)>14400
data['CO2_Samples_Flag'] = 0
data['CO2_Samples_Flag'][~Samp_Good_IRGA] = 1
irga = True
else: irga=False
if 'sonic_samples_Tot' in df.columns:
Samp_Good_Sonic = df['sonic_samples_Tot'].astype(float) > 14400
data['Sonic_Samples_Flag'] = 0
data['Sonic_Samples_Flag'][~Samp_Good_Sonic] = 1
sonic = True
else: sonic=False
if 'used_records' in df.columns:
Samp_Good_Sonic = df['used_records'].astype(float)>14400
sonic = True
else: sonic=False
if 'door_is_open_Hst' in df.columns:
Door_Closed = df['door_is_open_Hst'].astype(float) == 0
pc = True
else: pc = False
if precip&irga&sonic&pc:
Good = Door_Closed &Samp_Good_Sonic&Samp_Good_IRGA&Precip&w_sig_strength&c_sig_strength
elif precip&irga&sonic&~pc:
Good = Samp_Good_Sonic&Samp_Good_IRGA&Precip&w_sig_strength&c_sig_strength
elif precip&~irga&~sonic&~pc:
Good = Precip&w_sig_strength&c_sig_strength
elif precip&~irga&sonic&~pc:
Good = Samp_Good_Sonic&Precip&w_sig_strength&c_sig_strength
elif ~precip&~irga&sonic&~pc:
Good = Samp_Good_Sonic&w_sig_strength&c_sig_strength
elif ~precip&irga&sonic&pc:
Good = Samp_Good_Sonic&Samp_Good_IRGA&w_sig_strength&c_sig_strength
if Good is not None:
if cls[3] in df.columns:
df[cls[3]][~Good] = np.NaN
if cls[2] in df.columns:
df[cls[2]][~Good] = np.NaN
if cls[1] in df.columns:
df[cls[1]][~Good] = np.NaN
if cls[0] in df.columns:
df[cls[0]][~Good] = np.NaN
return df, data
#Fills in the blanks spaces with NaN's so the time index is continuous
def indx_fill(df, time):
df.index = pd.to_datetime(df.index)
# Sort index in case it came in out of order, a possibility depending on filenames and naming scheme
df = df.sort_index()
# Remove any duplicate times, can occur if files from mixed sources and have overlapping endpoints
df = df[~df.index.duplicated(keep='first')]
for k in range (0,len(df)):
if str(df.index[k])=='NaT':
df = df.drop(df.index[k])
# Fill in missing times due to tower being down and pad dataframe to midnight of the first and last day
idx = pd.date_range(df.index[0].floor('D'),df.index[len(df.index)-1].ceil('D'),freq = time)
df = df.reindex(idx, fill_value=np.NaN)
return df
# Used to format EddyPro data by combining the date and time into a common index and dropping the filename column
def format_ep(df):
df.index = df['date']+' '+df['time']
df = df.drop(['filename'],1)
df.index = pd.to_datetime(df.index)
return df
# This function not used in main script; potential to be used with QC function
def ReadIn_Initial(info):
# Values pulled in from a separate *.csv file because easier and flexible
grade = int(info['Val_L']['grade'])
LE_B = [float(info['Val_L']['LE_B']),float(info['Val_U']['LE_B'])]
H_B = [float(info['Val_L']['H_B']),float(info['Val_U']['H_B'])]
F_B = [float(info['Val_L']['F_B']),float(info['Val_U']['F_B'])]
gg = [(info['Val_L']['gg']),(info['Val_U']['gg']),(info['Val_3']['gg'])]
cls = [(info['Val_L']['cls']),(info['Val_U']['cls']),(info['Val_3']['cls']), (info['Val_4']['cls'])]
return grade, LE_B,H_B,F_B,gg,cls
# Reads in a directory of files based on the format for either EddyPro or EasyFlux
def Fast_Read(filenames, time, form):
if len(filenames) == 0:
print('No Files in directory, check the path name.')
return # 'exit' function and return error
else:
#Initialize dataframe used within function
Final = [];Final = pd.DataFrame(Final)
if form == 'EF':
for k in range (0,len(filenames)):
df = pd.read_csv(filenames[k],index_col = 'TIMESTAMP',header= 1,skiprows=[2,3],low_memory=False)
Final = pd.concat([Final,df], sort = False)
elif form == 'EP':
for k in range (0,len(filenames)):
df = pd.read_csv(filenames[k],header= 1,skiprows=[2],sep=',',low_memory=False)
Final = pd.concat([Final,df])
Final.index = Final['date']+' '+Final['time'] # Eddypro outputs both time and date as separate columns
Final =Final.drop(['filename'],1) # not needed string-based column; gets in the way of converting to floating point
elif form == 'Biomet':
for k in range (0,len(filenames)):
df = pd.read_csv(filenames[k],header= 0,skiprows=[1],sep=',',low_memory=False)
Final = pd.concat([Final,df])
Final.index = Final['date']+' '+Final['time'] # Eddypro outputs both time and date as separate columns
else:
print('Format must be either EF or EP')
return
# Convert time index
Final = Final.sort_index()
Out = indx_fill(Final, time)
return Out # Return dataframe to main function.
def Despike_7(s,ss,x,lab,delta_time, multi):
an,Tim = [],[]
while ss < x.index[-1]:
x_m = np.nanmean(x[ss:s])
x_s = np.nanstd(x[ss:s])
x_d = x[ss:s]
an.append((x_d > (x_m-(multi*x_s))) & (x_d < (x_m+(multi*x_s))))
ss+= datetime.timedelta(days=delta_time)
Tim.append((x_d.index))
s+= datetime.timedelta(days=delta_time)
qq = np.hstack(an)
an = pd.DataFrame(qq, columns = [lab])
an.index = np.hstack(Tim)
an = an[~an.index.duplicated(keep='first')]
# x[an[lab]==False] = np.NaN
return an
def Met_QAQC(**kwargs):
Q = None
if 'Tair' in kwargs.keys():
Tair = pd.DataFrame(kwargs['Tair'])
Q = Tair; Q = pd.DataFrame(Q);
Q['Tair_Hard_Limit'] = (Q[Tair.columns[0]].astype(float) <= 50) & (Q[Tair.columns[0]].astype(float) >= -40)
Q['Tair_Change'] = ~(np.abs(Q[Tair.columns[0]].diff() >= 25)) & (np.abs(Q[Tair.columns[0]].diff() != 0)) # (~np.isnan(Q[Tair.columns[0]].diff())) &
Q['Tair_Day_Change'] = (Tair.resample('D').mean().diff !=0)
Q['Tair_Filtered'] = Q[Tair.columns[0]][Q['Tair_Hard_Limit'] & Q['Tair_Change'] & Q['Tair_Day_Change']]
else:
print('**** Temperature not present ****')
if 'RH' in kwargs.keys():
RH = pd.DataFrame(kwargs['RH'])
if Q is None:
Q = RH; Q = pd.DataFrame(Q)
else: Q= Q.join(RH)
Q['RH_Hard_Limit'] = (Q[RH.columns[0]].astype(float) <= 100) & (Q[RH.columns[0]].astype(float) >= 0)
Q['RH_gt_100'] = (Q[RH.columns[0]].astype(float) >= 100) & (Q[RH.columns[0]].astype(float) <= 110)
Q['RH_Change'] = (np.abs(Q[RH.columns[0]].astype(float).diff() <= 50)) & (np.abs(Q[RH.columns[0]].diff() != 0)) # & (~np.isnan(Q[RH.columns[0]].astype(float).diff()))
Q['RH_Day_Change'] = (RH.resample('D').mean().diff !=0)
Q['RH_Filtered'] = Q[RH.columns[0]][Q['RH_Hard_Limit']&Q['RH_Change']& Q['RH_Day_Change']]
Q['RH_Filtered'] = Q['RH_Filtered'].replace(to_replace=Q['RH_Filtered'][Q['RH_gt_100']], value = 100)
# Q['RH_Filtered'][Q['RH_gt_100']]=100
else:
print('**** RH not present ****')
if 'P' in kwargs.keys():
P = pd.DataFrame(kwargs['P']);
if Q is None:
Q = P; Q = pd.DataFrame(Q)
else: Q= Q.join(P)
Q['P_Hard_Limit'] = (Q[P.columns[0]].astype(float) <= 100) &(Q[P.columns[0]].astype(float) >= 70)
Q['P_Change'] = (np.abs(Q[P.columns[0]].diff() <= 3.1)) & (np.abs(Q[P.columns[0]].diff() != 0)) # & (~np.isnan(Q[P.columns[0]].diff()))
Q['P_Filtered'] = Q[P.columns[0]][Q['P_Hard_Limit'] & Q['P_Change']]
if ('Tair' in kwargs.keys()) & ('z' in kwargs.keys()):
MSLP = [];
H = pd.DataFrame((8.314*(Tair[Tair.columns[0]]+273.15))/(0.029*9.81)/1000) # Scale height
x = pd.DataFrame(-kwargs['z']/H[H.columns[0]]);
MSLP = P[P.columns[0]]/np.exp(x[x.columns[0]]) # Mean Sea Level Pressure
MSLP = pd.DataFrame(MSLP);MSLP = MSLP.rename(columns={MSLP.columns[0]:"MSLP"})
Q= Q.join(MSLP)
Q['MSLP_Hard_Limit'] = (Q[MSLP.columns[0]].astype(float) <= 110) &(Q[MSLP.columns[0]].astype(float) >= 80)
Q['MSLP_Change'] = (np.abs(Q[MSLP.columns[0]].diff() <= 31)) & (np.abs(Q[MSLP.columns[0]].diff() != 0)) #& (~np.isnan(Q[MSLP.columns[0]].diff()))
Q['MSLP_Filtered'] = Q[MSLP.columns[0]][Q['MSLP_Hard_Limit'] & Q['MSLP_Change']]
else:
print('**** Mean sea level pressure not present ****')
else:
print('**** Pressure not present ****')
if 'WS' in kwargs.keys():
WS = pd.DataFrame(kwargs['WS'])
if Q is None:
Q = WS; Q = pd.DataFrame(Q)
else: Q= Q.join(WS)
Q['WS_Hard_Limit'] = (Q[WS.columns[0]].astype(float) < 60) & (Q[WS.columns[0]].astype(float) >= 0)
Q['WS_Change'] = (np.abs(Q[WS.columns[0]].diff() <= 15)) & (np.abs(Q[WS.columns[0]].diff() != 0)) #& (~np.isnan(Q[WS.columns[0]].diff()))
Q['WS_Day_Change'] = (WS.resample('D').mean().diff !=0)
Q['WS_Filtered'] = Q[WS.columns[0]][Q['WS_Hard_Limit']&Q['WS_Change']&Q['WS_Day_Change']]
else:
print('**** Wind Speed not present ****')
if 'WD' in kwargs.keys():
WD = pd.DataFrame(kwargs['WD'])
if Q is None:
Q = WD; Q = pd.DataFrame(Q)
else: Q= Q.join(WD)
Q['WD_Hard_Limit'] = (Q[WD.columns[0]].astype(float) < 360) & (Q[WD.columns[0]].astype(float) >= 0)
Q['WD_Change'] = (np.abs(Q[WD.columns[0]].diff() != 0)) # (~np.isnan(Q[WD.columns[0]].diff())) &
Q['WD_Filtered'] = Q[WD.columns[0]][Q['WD_Hard_Limit']&Q['WD_Change']]
else:
print('**** Wind Direction not present ****')
if 'PAR' in kwargs.keys():
PAR = pd.DataFrame(kwargs['PAR']);
if Q is None:
Q = PAR; Q = pd.DataFrame(Q)
else: Q= Q.join(PAR)
Q['PAR_Hard_Limit'] = (Q[PAR.columns[0]].astype(float) >= 0) & (Q[PAR.columns[0]].astype(float) < 5000)
Q['PAR_Change'] = (np.abs(Q[PAR.columns[0]].diff() <= 1500))# & (~np.isnan(Q[PAR.columns[0]].diff()))
Q['PAR_Day_Change'] = (PAR.resample('D').mean().diff != 0) # Causing problems for some reason
Q['PAR_Filtered'] = Q[PAR.columns[0]][Q['PAR_Hard_Limit']&Q['PAR_Change']&Q['PAR_Day_Change']]
else:
print('**** PAR not present ****')
if 'Rn' in kwargs.keys():
Rn = pd.DataFrame(kwargs['Rn'])
if Q is None:
Q = Rn; Q = pd.DataFrame(Q)
else: Q= Q.join(Rn)
Q['Rn_Hard_Limit'] = (Q[Rn.columns[0]].astype(float) >= -150) & (Q[Rn.columns[0]].astype(float) <= 1500)
Q['Rn_Change'] = (np.abs(Q[Rn.columns[0]].astype(float).diff() <= 500)) & (np.abs(Q[Rn.columns[0]].diff() != 0)) #& (~np.isnan(Q[Rn.columns[0]].astype(float).diff()))
Q['Rn_Day_Change'] = (Rn.resample('D').mean().diff !=0)
Q['Rn_Filtered'] = Q[Rn.columns[0]][Q['Rn_Hard_Limit']&Q['Rn_Change']&Q['Rn_Day_Change']]
else:
print('**** Net Radiations not present ****')
if 'Precip' in kwargs.keys():
Precip = pd.DataFrame(kwargs['Precip'])
if Q is None:
Q = P; Q = pd.DataFrame(Q)
else: Q= Q.join(Precip)
Q['Precip_Hard_Limit'] = (Q[Precip.columns[0]].astype(float) < 100) & (Q[Precip.columns[0]].astype(float) >= 0)
Z_Precip = Q[Precip.columns[0]].astype(float) ==0
# if ('RH' in kwargs.keys()) & ('Tair' in kwargs.keys()):
# Q['Precip_RH_gt_90'] = (Q[Precip.columns[0]].astype(float) > 0) & (Q['RH_Filtered'].astype(float) >= 90)
# Q['Precip_Tair_lt_Zero'] = (Q[Precip.columns[0]].astype(float) > 0) & (Q['Tair_Filtered'] < 0)
# Q['Precip_Filtered'] = Q[Precip.columns[0]][Q['Precip_Hard_Limit']&Q['Precip_RH_gt_90']&~Q['Precip_Tair_lt_Zero']]
# Q['Precip_Filtered'] = Q['Precip_Filtered'].replace(to_replace=Q['Precip_Filtered'][Z_Precip], value = 0)
# elif ('RH' in kwargs.keys()) & ('Tair' not in kwargs.keys()):
# Q['Precip_RH_gt_90'] = (Q[Precip.columns[0]].astype(float) > 0) & (Q['RH_Filtered'].astype(float) >= 90)
# Q['Precip_Filtered'] = Q[Precip.columns[0]][Q['Precip_Hard_Limit']&Q['Precip_RH']]
# Q['Precip_Filtered'] = Q['Precip_Filtered'].replace(to_replace=Q['Precip_Filtered'][Z_Precip], value = 0)
if 'Tair' in kwargs.keys():
Q['Precip_Tair_lt_Zero'] = (Q[Precip.columns[0]].astype(float) > 0) & (Q['Tair_Filtered'] < 0)
Q['Precip_Filtered'] = Q[Precip.columns[0]][Q['Precip_Hard_Limit']& ~Q['Precip_Tair_lt_Zero']]
Q['Precip_Filtered'] = Q['Precip_Filtered'].replace(to_replace=Q['Precip_Filtered'][Z_Precip], value = 0)
else:
Q['Precip_Filtered'] = Q[Precip.columns[0]][Q['Precip_Hard_Limit']]
Q['Precip_Filtered'] = Q['Precip_Filtered'].replace(to_replace=Q['Precip_Filtered'][Z_Precip], value = 0)
else:
print('**** Precipitation not present ****')
if 'VPD' in kwargs.keys():
VPD = pd.DataFrame(kwargs['VPD'])
if Q is None:
Q = VPD; Q = pd.DataFrame(Q)
else: Q= Q.join(VPD)
Q['VPD_Hard_Limit'] = (Q[VPD.columns[0]].astype(float) < 50) & (Q[VPD.columns[0]].astype(float) >= 0)
Q['VPD_Change'] = (np.abs(Q[VPD.columns[0]].astype(float).diff() <= 10)) & (np.abs(Q[VPD.columns[0]].diff() != 0))
Q['VPD_Day_Change'] = (VPD.resample('D').mean().diff !=0)
Q['VPD_Filtered'] = Q[VPD.columns[0]][Q['VPD_Hard_Limit']&Q['VPD_Change']&Q['VPD_Day_Change']]
if 'e' in kwargs.keys():
e = pd.DataFrame(kwargs['e'])
if Q is None:
Q = e; Q = pd.DataFrame(Q)
else: Q= Q.join(e)
Q['e_Hard_Limit'] = (Q[e.columns[0]].astype(float) < 50) & (Q[e.columns[0]].astype(float) >= 0)
Q['e_Change'] = (np.abs(Q[e.columns[0]].astype(float).diff() <= 10)) & (np.abs(Q[e.columns[0]].diff() != 0))
Q['e_Day_Change'] = (e.resample('D').mean().diff !=0)
Q['e_Filtered'] = Q[e.columns[0]][Q['e_Hard_Limit']&Q['e_Change']&Q['e_Day_Change']]
if 'e_s' in kwargs.keys():
e_s = pd.DataFrame(kwargs['e_s'])
if Q is None:
Q = e_s; Q = pd.DataFrame(Q)
else: Q= Q.join(e_s)
Q['e_s_Hard_Limit'] = (Q[e_s.columns[0]].astype(float) < 50) & (Q[e_s.columns[0]].astype(float) >= 0)
Q['e_s_Change'] = (np.abs(Q[e_s.columns[0]].astype(float).diff() <= 10)) & (np.abs(Q[e_s.columns[0]].diff() != 0))
Q['e_s_Day_Change'] = (e_s.resample('D').mean().diff !=0)
Q['e_s_Filtered'] = Q[e_s.columns[0]][Q['e_s_Hard_Limit']&Q['e_s_Change']&Q['e_s_Day_Change']]
return Q
|
#!/usr/bin/env python
ph = float(input('enter pH level: '))
if ph < 7.0:
print(ph, "is acidic")
|
import os
import pytest
def test_wrong_select_db_index(cli):
cli.sendline("select 1")
cli.expect(["OK", "127.0.0.1"])
cli.sendline("select 128")
cli.expect(["DB index is out of range", "127.0.0.1:6379[1]>"])
if int(os.environ["REDIS_VERSION"]) > 5:
text = "value is not an integer or out of range"
else:
text = "invalid DB index"
cli.sendline("select abc")
cli.expect([text, "127.0.0.1:6379[1]>"])
cli.sendline("select 15")
cli.expect("OK")
def test_set_command_with_shash(clean_redis, cli):
cli.sendline("set a \\hello\\") # legal redis command
cli.expect("OK")
cli.sendline("get a")
cli.expect(r"hello")
def test_enter_key_binding(clean_redis, cli):
cli.send("set")
cli.expect("set")
cli.send("\033[B") # down
cli.sendline() # enter
cli.sendline(" a 'hello'")
cli.expect("OK")
cli.sendline("get a")
cli.expect(r"hello")
@pytest.mark.skipif("int(os.environ['REDIS_VERSION']) < 6")
def test_auth_hidden_password_with_username(clean_redis, cli):
cli.send("auth default hello-world")
cli.expect("default")
cli.expect(r"\*{11}")
@pytest.mark.skipif("int(os.environ['REDIS_VERSION']) > 5")
def test_auth_hidden_password(clean_redis, cli):
cli.send("auth hello-world")
cli.expect("auth")
cli.expect(r"\*{11}")
def test_hello_command_is_not_supported(cli):
cli.sendline("hello 3")
cli.expect("IRedis currently not support RESP3")
def test_abort_reading_connection(cli):
cli.sendline("blpop mylist 30")
cli.send(chr(3))
cli.expect(
r"KeyboardInterrupt received! User canceled reading response!", timeout=10
)
cli.sendline("set foo bar")
cli.expect("OK")
cli.sendline("get foo")
cli.expect("bar")
|
import sys
from distutils.version import LooseVersion
if sys.version_info.major < 3:
print('[!] You are running an old version of Python. '
'This tutorial requires Python 3.')
sys.exit(1)
with open('requirements.txt') as f:
reqs = f.readlines()
reqs = [(pkg, ver) for (pkg, _, ver) in
(req.split() for req in reqs if req.strip())]
pkg_names = {
'scikit-image': 'skimage',
'scikit-learn': 'sklearn'
}
for (pkg, version_wanted) in reqs:
module_name = pkg_names.get(pkg, pkg)
try:
m = __import__(module_name)
status = '✓'
except ImportError as e:
m = None
if (pkg != 'numpy' and 'numpy' in str(e)):
status = '?'
version_installed = 'Needs NumPy'
else:
version_installed = 'Not installed'
status = 'X'
if m is not None:
version_installed = m.__version__
if LooseVersion(version_wanted) > LooseVersion(version_installed):
status = 'X'
print('[{}] {:<11} {}'.format(
status, pkg.ljust(13), version_installed)
)
|
from math import ceil
S, D = [int(i) for i in input().split()]
cont = [0 for i in range(S)]
for d in range(D):
t = [int(i) for i in input().split()]
for i in range(S):
cont[i] += t[i]
media = ceil(sum(cont) / D)
pref = cont.index(max(cont))
print(str(media))
print(str(pref + 1))
|
#!/scratch_net/nudel/esandstroem/venvs/tsdf_fusion_env/bin/python
import os
app_path = '/scratch_net/nudel/esandstroem/venvs/tsdf_fusion_env/bin'
os.environ["PATH"] = app_path + os.pathsep + os.environ["PATH"]
from TSDFHandle import *
import numpy as np
import cv2
from utils import extract_mesh_marching_cubes
from visualization import plot_mesh
import plyfile
from sys import argv
import pathlib
if (len(argv) < 3):
print('Usage: {0} <name of depth directory> <save mode>'.format(argv[0]))
exit(0)
CURRENT_DIR = str(pathlib.Path().absolute())
depth_path = CURRENT_DIR + '/' + argv[1]
campose_path = CURRENT_DIR + '/' + 'left_camera_matrix'
box = np.array([[-4,4],[-4,4],[-4,4]]) # each cell depicts the interval where we will reconstruct the shape i.e.
# [[-xmin,xmax],[-ymin,ymax],[-zmin,zmax]]
tsdf = TSDF(bbox=box, resolution=0.025, resolution_factor=1)
depth_dir = os.listdir(depth_path)
sortOrder_depth = [int(x[:-4]) for x in depth_dir]
depth_dir = [x for _, x in sorted(zip(sortOrder_depth, depth_dir))]
campose_dir = os.listdir(campose_path)
sortOrder_pose = [int(x[:-4]) for x in campose_dir]
campose_dir = [x for _, x in sorted(zip(sortOrder_pose, campose_dir))]
camera_intrinsics = np.array([[256, 0, 256], [0, 256, 256], [0, 0, 1]]).astype(np.float32)
# apparently, the tsdf fusion code expects that the camera coordinate system is such that z is in the
# camera viewing direction, y is down and x is to the right. This is achieved by a serie of rotations
rot_180_around_y = np.array([[-1, 0, 0], [0, 1, 0], [0, 0, -1]]).astype(np.float32)
rot_180_around_z = np.array([[-1, 0, 0], [0, -1, 0], [0, 0, 1]]).astype(np.float32)
rotation = np.matmul(rot_180_around_z, rot_180_around_y)
for i in range(len(depth_dir)):
depth = cv2.imread(depth_path + '/' + depth_dir[i], -1)
depth = depth / 1000
weight_map = np.ones(depth.shape)
campose = np.linalg.inv(np.loadtxt(campose_path + '/' + campose_dir[i]).astype(np.float32))
campose = np.matmul(camera_intrinsics, np.matmul(rotation,campose[0:3, 0:4]))
tsdf.fuse(campose, depth.astype(np.float32), weight_map.astype(np.float32))
mesh = extract_mesh_marching_cubes(tsdf.get_volume()[:, :, :, 0])
if argv[2]:
mesh.write('tsdf_fusion_' + argv[1] + '.ply')
plot_mesh(mesh)
|
# --------------------------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# --------------------------------------------------------------------------------------------
# Generated file, DO NOT EDIT
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------------------------
from msrest.serialization import Model
class AddProcessWorkItemTypeFieldRequest(Model):
"""AddProcessWorkItemTypeFieldRequest.
:param allow_groups: Allow setting field value to a group identity. Only applies to identity fields.
:type allow_groups: bool
:param default_value: The default value of the field.
:type default_value: object
:param read_only: If true the field cannot be edited.
:type read_only: bool
:param reference_name: Reference name of the field.
:type reference_name: str
:param required: If true the field cannot be empty.
:type required: bool
"""
_attribute_map = {
'allow_groups': {'key': 'allowGroups', 'type': 'bool'},
'default_value': {'key': 'defaultValue', 'type': 'object'},
'read_only': {'key': 'readOnly', 'type': 'bool'},
'reference_name': {'key': 'referenceName', 'type': 'str'},
'required': {'key': 'required', 'type': 'bool'}
}
def __init__(self, allow_groups=None, default_value=None, read_only=None, reference_name=None, required=None):
super(AddProcessWorkItemTypeFieldRequest, self).__init__()
self.allow_groups = allow_groups
self.default_value = default_value
self.read_only = read_only
self.reference_name = reference_name
self.required = required
class Control(Model):
"""Control.
:param contribution: Contribution for the control.
:type contribution: :class:`WitContribution <azure.devops.v5_1.work_item_tracking.models.WitContribution>`
:param control_type: Type of the control.
:type control_type: str
:param height: Height of the control, for html controls.
:type height: int
:param id: The id for the layout node.
:type id: str
:param inherited: A value indicating whether this layout node has been inherited. from a parent layout. This is expected to only be only set by the combiner.
:type inherited: bool
:param is_contribution: A value indicating if the layout node is contribution or not.
:type is_contribution: bool
:param label: Label for the field.
:type label: str
:param metadata: Inner text of the control.
:type metadata: str
:param order: Order in which the control should appear in its group.
:type order: int
:param overridden: A value indicating whether this layout node has been overridden . by a child layout.
:type overridden: bool
:param read_only: A value indicating if the control is readonly.
:type read_only: bool
:param visible: A value indicating if the control should be hidden or not.
:type visible: bool
:param watermark: Watermark text for the textbox.
:type watermark: str
"""
_attribute_map = {
'contribution': {'key': 'contribution', 'type': 'WitContribution'},
'control_type': {'key': 'controlType', 'type': 'str'},
'height': {'key': 'height', 'type': 'int'},
'id': {'key': 'id', 'type': 'str'},
'inherited': {'key': 'inherited', 'type': 'bool'},
'is_contribution': {'key': 'isContribution', 'type': 'bool'},
'label': {'key': 'label', 'type': 'str'},
'metadata': {'key': 'metadata', 'type': 'str'},
'order': {'key': 'order', 'type': 'int'},
'overridden': {'key': 'overridden', 'type': 'bool'},
'read_only': {'key': 'readOnly', 'type': 'bool'},
'visible': {'key': 'visible', 'type': 'bool'},
'watermark': {'key': 'watermark', 'type': 'str'}
}
def __init__(self, contribution=None, control_type=None, height=None, id=None, inherited=None, is_contribution=None, label=None, metadata=None, order=None, overridden=None, read_only=None, visible=None, watermark=None):
super(Control, self).__init__()
self.contribution = contribution
self.control_type = control_type
self.height = height
self.id = id
self.inherited = inherited
self.is_contribution = is_contribution
self.label = label
self.metadata = metadata
self.order = order
self.overridden = overridden
self.read_only = read_only
self.visible = visible
self.watermark = watermark
class CreateProcessModel(Model):
"""CreateProcessModel.
:param description: Description of the process
:type description: str
:param name: Name of the process
:type name: str
:param parent_process_type_id: The ID of the parent process
:type parent_process_type_id: str
:param reference_name: Reference name of process being created. If not specified, server will assign a unique reference name
:type reference_name: str
"""
_attribute_map = {
'description': {'key': 'description', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'parent_process_type_id': {'key': 'parentProcessTypeId', 'type': 'str'},
'reference_name': {'key': 'referenceName', 'type': 'str'}
}
def __init__(self, description=None, name=None, parent_process_type_id=None, reference_name=None):
super(CreateProcessModel, self).__init__()
self.description = description
self.name = name
self.parent_process_type_id = parent_process_type_id
self.reference_name = reference_name
class CreateProcessRuleRequest(Model):
"""CreateProcessRuleRequest.
:param actions: List of actions to take when the rule is triggered.
:type actions: list of :class:`RuleAction <azure.devops.v5_1.work_item_tracking.models.RuleAction>`
:param conditions: List of conditions when the rule should be triggered.
:type conditions: list of :class:`RuleCondition <azure.devops.v5_1.work_item_tracking.models.RuleCondition>`
:param is_disabled: Indicates if the rule is disabled.
:type is_disabled: bool
:param name: Name for the rule.
:type name: str
"""
_attribute_map = {
'actions': {'key': 'actions', 'type': '[RuleAction]'},
'conditions': {'key': 'conditions', 'type': '[RuleCondition]'},
'is_disabled': {'key': 'isDisabled', 'type': 'bool'},
'name': {'key': 'name', 'type': 'str'}
}
def __init__(self, actions=None, conditions=None, is_disabled=None, name=None):
super(CreateProcessRuleRequest, self).__init__()
self.actions = actions
self.conditions = conditions
self.is_disabled = is_disabled
self.name = name
class CreateProcessWorkItemTypeRequest(Model):
"""CreateProcessWorkItemTypeRequest.
:param color: Color hexadecimal code to represent the work item type
:type color: str
:param description: Description of the work item type
:type description: str
:param icon: Icon to represent the work item type
:type icon: str
:param inherits_from: Parent work item type for work item type
:type inherits_from: str
:param is_disabled: True if the work item type need to be disabled
:type is_disabled: bool
:param name: Name of work item type
:type name: str
"""
_attribute_map = {
'color': {'key': 'color', 'type': 'str'},
'description': {'key': 'description', 'type': 'str'},
'icon': {'key': 'icon', 'type': 'str'},
'inherits_from': {'key': 'inheritsFrom', 'type': 'str'},
'is_disabled': {'key': 'isDisabled', 'type': 'bool'},
'name': {'key': 'name', 'type': 'str'}
}
def __init__(self, color=None, description=None, icon=None, inherits_from=None, is_disabled=None, name=None):
super(CreateProcessWorkItemTypeRequest, self).__init__()
self.color = color
self.description = description
self.icon = icon
self.inherits_from = inherits_from
self.is_disabled = is_disabled
self.name = name
class Extension(Model):
"""Extension.
:param id: Id of the extension
:type id: str
"""
_attribute_map = {
'id': {'key': 'id', 'type': 'str'}
}
def __init__(self, id=None):
super(Extension, self).__init__()
self.id = id
class FieldModel(Model):
"""FieldModel.
:param description:
:type description: str
:param id:
:type id: str
:param is_identity:
:type is_identity: bool
:param name:
:type name: str
:param type:
:type type: object
:param url:
:type url: str
"""
_attribute_map = {
'description': {'key': 'description', 'type': 'str'},
'id': {'key': 'id', 'type': 'str'},
'is_identity': {'key': 'isIdentity', 'type': 'bool'},
'name': {'key': 'name', 'type': 'str'},
'type': {'key': 'type', 'type': 'object'},
'url': {'key': 'url', 'type': 'str'}
}
def __init__(self, description=None, id=None, is_identity=None, name=None, type=None, url=None):
super(FieldModel, self).__init__()
self.description = description
self.id = id
self.is_identity = is_identity
self.name = name
self.type = type
self.url = url
class FieldRuleModel(Model):
"""FieldRuleModel.
:param actions:
:type actions: list of :class:`RuleActionModel <azure.devops.v5_1.work_item_tracking.models.RuleActionModel>`
:param conditions:
:type conditions: list of :class:`RuleConditionModel <azure.devops.v5_1.work_item_tracking.models.RuleConditionModel>`
:param friendly_name:
:type friendly_name: str
:param id:
:type id: str
:param is_disabled:
:type is_disabled: bool
:param is_system:
:type is_system: bool
"""
_attribute_map = {
'actions': {'key': 'actions', 'type': '[RuleActionModel]'},
'conditions': {'key': 'conditions', 'type': '[RuleConditionModel]'},
'friendly_name': {'key': 'friendlyName', 'type': 'str'},
'id': {'key': 'id', 'type': 'str'},
'is_disabled': {'key': 'isDisabled', 'type': 'bool'},
'is_system': {'key': 'isSystem', 'type': 'bool'}
}
def __init__(self, actions=None, conditions=None, friendly_name=None, id=None, is_disabled=None, is_system=None):
super(FieldRuleModel, self).__init__()
self.actions = actions
self.conditions = conditions
self.friendly_name = friendly_name
self.id = id
self.is_disabled = is_disabled
self.is_system = is_system
class FormLayout(Model):
"""FormLayout.
:param extensions: Gets and sets extensions list.
:type extensions: list of :class:`Extension <azure.devops.v5_1.work_item_tracking.models.Extension>`
:param pages: Top level tabs of the layout.
:type pages: list of :class:`Page <azure.devops.v5_1.work_item_tracking.models.Page>`
:param system_controls: Headers controls of the layout.
:type system_controls: list of :class:`Control <azure.devops.v5_1.work_item_tracking.models.Control>`
"""
_attribute_map = {
'extensions': {'key': 'extensions', 'type': '[Extension]'},
'pages': {'key': 'pages', 'type': '[Page]'},
'system_controls': {'key': 'systemControls', 'type': '[Control]'}
}
def __init__(self, extensions=None, pages=None, system_controls=None):
super(FormLayout, self).__init__()
self.extensions = extensions
self.pages = pages
self.system_controls = system_controls
class Group(Model):
"""Group.
:param contribution: Contribution for the group.
:type contribution: :class:`WitContribution <azure.devops.v5_1.work_item_tracking.models.WitContribution>`
:param controls: Controls to be put in the group.
:type controls: list of :class:`Control <azure.devops.v5_1.work_item_tracking.models.Control>`
:param height: The height for the contribution.
:type height: int
:param id: The id for the layout node.
:type id: str
:param inherited: A value indicating whether this layout node has been inherited from a parent layout. This is expected to only be only set by the combiner.
:type inherited: bool
:param is_contribution: A value indicating if the layout node is contribution are not.
:type is_contribution: bool
:param label: Label for the group.
:type label: str
:param order: Order in which the group should appear in the section.
:type order: int
:param overridden: A value indicating whether this layout node has been overridden by a child layout.
:type overridden: bool
:param visible: A value indicating if the group should be hidden or not.
:type visible: bool
"""
_attribute_map = {
'contribution': {'key': 'contribution', 'type': 'WitContribution'},
'controls': {'key': 'controls', 'type': '[Control]'},
'height': {'key': 'height', 'type': 'int'},
'id': {'key': 'id', 'type': 'str'},
'inherited': {'key': 'inherited', 'type': 'bool'},
'is_contribution': {'key': 'isContribution', 'type': 'bool'},
'label': {'key': 'label', 'type': 'str'},
'order': {'key': 'order', 'type': 'int'},
'overridden': {'key': 'overridden', 'type': 'bool'},
'visible': {'key': 'visible', 'type': 'bool'}
}
def __init__(self, contribution=None, controls=None, height=None, id=None, inherited=None, is_contribution=None, label=None, order=None, overridden=None, visible=None):
super(Group, self).__init__()
self.contribution = contribution
self.controls = controls
self.height = height
self.id = id
self.inherited = inherited
self.is_contribution = is_contribution
self.label = label
self.order = order
self.overridden = overridden
self.visible = visible
class HideStateModel(Model):
"""HideStateModel.
:param hidden: Returns 'true', if workitem state is hidden, 'false' otherwise.
:type hidden: bool
"""
_attribute_map = {
'hidden': {'key': 'hidden', 'type': 'bool'}
}
def __init__(self, hidden=None):
super(HideStateModel, self).__init__()
self.hidden = hidden
class Page(Model):
"""Page.
:param contribution: Contribution for the page.
:type contribution: :class:`WitContribution <azure.devops.v5_1.work_item_tracking.models.WitContribution>`
:param id: The id for the layout node.
:type id: str
:param inherited: A value indicating whether this layout node has been inherited from a parent layout. This is expected to only be only set by the combiner.
:type inherited: bool
:param is_contribution: A value indicating if the layout node is contribution are not.
:type is_contribution: bool
:param label: The label for the page.
:type label: str
:param locked: A value indicating whether any user operations are permitted on this page and the contents of this page
:type locked: bool
:param order: Order in which the page should appear in the layout.
:type order: int
:param overridden: A value indicating whether this layout node has been overridden by a child layout.
:type overridden: bool
:param page_type: The icon for the page.
:type page_type: object
:param sections: The sections of the page.
:type sections: list of :class:`Section <azure.devops.v5_1.work_item_tracking.models.Section>`
:param visible: A value indicating if the page should be hidden or not.
:type visible: bool
"""
_attribute_map = {
'contribution': {'key': 'contribution', 'type': 'WitContribution'},
'id': {'key': 'id', 'type': 'str'},
'inherited': {'key': 'inherited', 'type': 'bool'},
'is_contribution': {'key': 'isContribution', 'type': 'bool'},
'label': {'key': 'label', 'type': 'str'},
'locked': {'key': 'locked', 'type': 'bool'},
'order': {'key': 'order', 'type': 'int'},
'overridden': {'key': 'overridden', 'type': 'bool'},
'page_type': {'key': 'pageType', 'type': 'object'},
'sections': {'key': 'sections', 'type': '[Section]'},
'visible': {'key': 'visible', 'type': 'bool'}
}
def __init__(self, contribution=None, id=None, inherited=None, is_contribution=None, label=None, locked=None, order=None, overridden=None, page_type=None, sections=None, visible=None):
super(Page, self).__init__()
self.contribution = contribution
self.id = id
self.inherited = inherited
self.is_contribution = is_contribution
self.label = label
self.locked = locked
self.order = order
self.overridden = overridden
self.page_type = page_type
self.sections = sections
self.visible = visible
class PickListMetadata(Model):
"""PickListMetadata.
:param id: ID of the picklist
:type id: str
:param is_suggested: Indicates whether items outside of suggested list are allowed
:type is_suggested: bool
:param name: Name of the picklist
:type name: str
:param type: DataType of picklist
:type type: str
:param url: Url of the picklist
:type url: str
"""
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'is_suggested': {'key': 'isSuggested', 'type': 'bool'},
'name': {'key': 'name', 'type': 'str'},
'type': {'key': 'type', 'type': 'str'},
'url': {'key': 'url', 'type': 'str'}
}
def __init__(self, id=None, is_suggested=None, name=None, type=None, url=None):
super(PickListMetadata, self).__init__()
self.id = id
self.is_suggested = is_suggested
self.name = name
self.type = type
self.url = url
class ProcessBehavior(Model):
"""ProcessBehavior.
:param color: Color.
:type color: str
:param customization: Indicates the type of customization on this work item. System behaviors are inherited from parent process but not modified. Inherited behaviors are modified modified behaviors that were inherited from parent process. Custom behaviors are behaviors created by user in current process.
:type customization: object
:param description: . Description
:type description: str
:param fields: Process Behavior Fields.
:type fields: list of :class:`ProcessBehaviorField <azure.devops.v5_1.work_item_tracking.models.ProcessBehaviorField>`
:param inherits: Parent behavior reference.
:type inherits: :class:`ProcessBehaviorReference <azure.devops.v5_1.work_item_tracking.models.ProcessBehaviorReference>`
:param name: Behavior Name.
:type name: str
:param rank: Rank of the behavior
:type rank: int
:param reference_name: Behavior Id
:type reference_name: str
:param url: Url of the behavior.
:type url: str
"""
_attribute_map = {
'color': {'key': 'color', 'type': 'str'},
'customization': {'key': 'customization', 'type': 'object'},
'description': {'key': 'description', 'type': 'str'},
'fields': {'key': 'fields', 'type': '[ProcessBehaviorField]'},
'inherits': {'key': 'inherits', 'type': 'ProcessBehaviorReference'},
'name': {'key': 'name', 'type': 'str'},
'rank': {'key': 'rank', 'type': 'int'},
'reference_name': {'key': 'referenceName', 'type': 'str'},
'url': {'key': 'url', 'type': 'str'}
}
def __init__(self, color=None, customization=None, description=None, fields=None, inherits=None, name=None, rank=None, reference_name=None, url=None):
super(ProcessBehavior, self).__init__()
self.color = color
self.customization = customization
self.description = description
self.fields = fields
self.inherits = inherits
self.name = name
self.rank = rank
self.reference_name = reference_name
self.url = url
class ProcessBehaviorCreateRequest(Model):
"""ProcessBehaviorCreateRequest.
:param color: Color.
:type color: str
:param inherits: Parent behavior id.
:type inherits: str
:param name: Name of the behavior.
:type name: str
:param reference_name: ReferenceName is optional, if not specified will be auto-generated.
:type reference_name: str
"""
_attribute_map = {
'color': {'key': 'color', 'type': 'str'},
'inherits': {'key': 'inherits', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'reference_name': {'key': 'referenceName', 'type': 'str'}
}
def __init__(self, color=None, inherits=None, name=None, reference_name=None):
super(ProcessBehaviorCreateRequest, self).__init__()
self.color = color
self.inherits = inherits
self.name = name
self.reference_name = reference_name
class ProcessBehaviorField(Model):
"""ProcessBehaviorField.
:param name: Name of the field.
:type name: str
:param reference_name: Reference name of the field.
:type reference_name: str
:param url: Url to field.
:type url: str
"""
_attribute_map = {
'name': {'key': 'name', 'type': 'str'},
'reference_name': {'key': 'referenceName', 'type': 'str'},
'url': {'key': 'url', 'type': 'str'}
}
def __init__(self, name=None, reference_name=None, url=None):
super(ProcessBehaviorField, self).__init__()
self.name = name
self.reference_name = reference_name
self.url = url
class ProcessBehaviorReference(Model):
"""ProcessBehaviorReference.
:param behavior_ref_name: Id of a Behavior.
:type behavior_ref_name: str
:param url: Url to behavior.
:type url: str
"""
_attribute_map = {
'behavior_ref_name': {'key': 'behaviorRefName', 'type': 'str'},
'url': {'key': 'url', 'type': 'str'}
}
def __init__(self, behavior_ref_name=None, url=None):
super(ProcessBehaviorReference, self).__init__()
self.behavior_ref_name = behavior_ref_name
self.url = url
class ProcessBehaviorUpdateRequest(Model):
"""ProcessBehaviorUpdateRequest.
:param color: Color.
:type color: str
:param name: Behavior Name.
:type name: str
"""
_attribute_map = {
'color': {'key': 'color', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'}
}
def __init__(self, color=None, name=None):
super(ProcessBehaviorUpdateRequest, self).__init__()
self.color = color
self.name = name
class ProcessInfo(Model):
"""ProcessInfo.
:param customization_type: Indicates the type of customization on this process. System Process is default process. Inherited Process is modified process that was System process before.
:type customization_type: object
:param description: Description of the process.
:type description: str
:param is_default: Is the process default.
:type is_default: bool
:param is_enabled: Is the process enabled.
:type is_enabled: bool
:param name: Name of the process.
:type name: str
:param parent_process_type_id: ID of the parent process.
:type parent_process_type_id: str
:param projects: Projects in this process to which the user is subscribed to.
:type projects: list of :class:`ProjectReference <azure.devops.v5_1.work_item_tracking.models.ProjectReference>`
:param reference_name: Reference name of the process.
:type reference_name: str
:param type_id: The ID of the process.
:type type_id: str
"""
_attribute_map = {
'customization_type': {'key': 'customizationType', 'type': 'object'},
'description': {'key': 'description', 'type': 'str'},
'is_default': {'key': 'isDefault', 'type': 'bool'},
'is_enabled': {'key': 'isEnabled', 'type': 'bool'},
'name': {'key': 'name', 'type': 'str'},
'parent_process_type_id': {'key': 'parentProcessTypeId', 'type': 'str'},
'projects': {'key': 'projects', 'type': '[ProjectReference]'},
'reference_name': {'key': 'referenceName', 'type': 'str'},
'type_id': {'key': 'typeId', 'type': 'str'}
}
def __init__(self, customization_type=None, description=None, is_default=None, is_enabled=None, name=None, parent_process_type_id=None, projects=None, reference_name=None, type_id=None):
super(ProcessInfo, self).__init__()
self.customization_type = customization_type
self.description = description
self.is_default = is_default
self.is_enabled = is_enabled
self.name = name
self.parent_process_type_id = parent_process_type_id
self.projects = projects
self.reference_name = reference_name
self.type_id = type_id
class ProcessModel(Model):
"""ProcessModel.
:param description: Description of the process
:type description: str
:param name: Name of the process
:type name: str
:param projects: Projects in this process
:type projects: list of :class:`ProjectReference <azure.devops.v5_1.work_item_tracking.models.ProjectReference>`
:param properties: Properties of the process
:type properties: :class:`ProcessProperties <azure.devops.v5_1.work_item_tracking.models.ProcessProperties>`
:param reference_name: Reference name of the process
:type reference_name: str
:param type_id: The ID of the process
:type type_id: str
"""
_attribute_map = {
'description': {'key': 'description', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'projects': {'key': 'projects', 'type': '[ProjectReference]'},
'properties': {'key': 'properties', 'type': 'ProcessProperties'},
'reference_name': {'key': 'referenceName', 'type': 'str'},
'type_id': {'key': 'typeId', 'type': 'str'}
}
def __init__(self, description=None, name=None, projects=None, properties=None, reference_name=None, type_id=None):
super(ProcessModel, self).__init__()
self.description = description
self.name = name
self.projects = projects
self.properties = properties
self.reference_name = reference_name
self.type_id = type_id
class ProcessProperties(Model):
"""ProcessProperties.
:param class_: Class of the process.
:type class_: object
:param is_default: Is the process default process.
:type is_default: bool
:param is_enabled: Is the process enabled.
:type is_enabled: bool
:param parent_process_type_id: ID of the parent process.
:type parent_process_type_id: str
:param version: Version of the process.
:type version: str
"""
_attribute_map = {
'class_': {'key': 'class', 'type': 'object'},
'is_default': {'key': 'isDefault', 'type': 'bool'},
'is_enabled': {'key': 'isEnabled', 'type': 'bool'},
'parent_process_type_id': {'key': 'parentProcessTypeId', 'type': 'str'},
'version': {'key': 'version', 'type': 'str'}
}
def __init__(self, class_=None, is_default=None, is_enabled=None, parent_process_type_id=None, version=None):
super(ProcessProperties, self).__init__()
self.class_ = class_
self.is_default = is_default
self.is_enabled = is_enabled
self.parent_process_type_id = parent_process_type_id
self.version = version
class ProcessRule(CreateProcessRuleRequest):
"""ProcessRule.
:param actions: List of actions to take when the rule is triggered.
:type actions: list of :class:`RuleAction <azure.devops.v5_1.work_item_tracking.models.RuleAction>`
:param conditions: List of conditions when the rule should be triggered.
:type conditions: list of :class:`RuleCondition <azure.devops.v5_1.work_item_tracking.models.RuleCondition>`
:param is_disabled: Indicates if the rule is disabled.
:type is_disabled: bool
:param name: Name for the rule.
:type name: str
:param customization_type: Indicates if the rule is system generated or created by user.
:type customization_type: object
:param id: Id to uniquely identify the rule.
:type id: str
:param url: Resource Url.
:type url: str
"""
_attribute_map = {
'actions': {'key': 'actions', 'type': '[RuleAction]'},
'conditions': {'key': 'conditions', 'type': '[RuleCondition]'},
'is_disabled': {'key': 'isDisabled', 'type': 'bool'},
'name': {'key': 'name', 'type': 'str'},
'customization_type': {'key': 'customizationType', 'type': 'object'},
'id': {'key': 'id', 'type': 'str'},
'url': {'key': 'url', 'type': 'str'}
}
def __init__(self, actions=None, conditions=None, is_disabled=None, name=None, customization_type=None, id=None, url=None):
super(ProcessRule, self).__init__(actions=actions, conditions=conditions, is_disabled=is_disabled, name=name)
self.customization_type = customization_type
self.id = id
self.url = url
class ProcessWorkItemType(Model):
"""ProcessWorkItemType.
:param behaviors:
:type behaviors: list of :class:`WorkItemTypeBehavior <azure.devops.v5_1.work_item_tracking.models.WorkItemTypeBehavior>`
:param color: Color hexadecimal code to represent the work item type
:type color: str
:param customization: Indicates the type of customization on this work item System work item types are inherited from parent process but not modified Inherited work item types are modified work item that were inherited from parent process Custom work item types are work item types that were created in the current process
:type customization: object
:param description: Description of the work item type
:type description: str
:param icon: Icon to represent the work item typ
:type icon: str
:param inherits: Reference name of the parent work item type
:type inherits: str
:param is_disabled: Indicates if a work item type is disabled
:type is_disabled: bool
:param layout:
:type layout: :class:`FormLayout <azure.devops.v5_1.work_item_tracking.models.FormLayout>`
:param name: Name of the work item type
:type name: str
:param reference_name: Reference name of work item type
:type reference_name: str
:param states:
:type states: list of :class:`WorkItemStateResultModel <azure.devops.v5_1.work_item_tracking.models.WorkItemStateResultModel>`
:param url: Url of the work item type
:type url: str
"""
_attribute_map = {
'behaviors': {'key': 'behaviors', 'type': '[WorkItemTypeBehavior]'},
'color': {'key': 'color', 'type': 'str'},
'customization': {'key': 'customization', 'type': 'object'},
'description': {'key': 'description', 'type': 'str'},
'icon': {'key': 'icon', 'type': 'str'},
'inherits': {'key': 'inherits', 'type': 'str'},
'is_disabled': {'key': 'isDisabled', 'type': 'bool'},
'layout': {'key': 'layout', 'type': 'FormLayout'},
'name': {'key': 'name', 'type': 'str'},
'reference_name': {'key': 'referenceName', 'type': 'str'},
'states': {'key': 'states', 'type': '[WorkItemStateResultModel]'},
'url': {'key': 'url', 'type': 'str'}
}
def __init__(self, behaviors=None, color=None, customization=None, description=None, icon=None, inherits=None, is_disabled=None, layout=None, name=None, reference_name=None, states=None, url=None):
super(ProcessWorkItemType, self).__init__()
self.behaviors = behaviors
self.color = color
self.customization = customization
self.description = description
self.icon = icon
self.inherits = inherits
self.is_disabled = is_disabled
self.layout = layout
self.name = name
self.reference_name = reference_name
self.states = states
self.url = url
class ProcessWorkItemTypeField(Model):
"""ProcessWorkItemTypeField.
:param allow_groups: Allow setting field value to a group identity. Only applies to identity fields.
:type allow_groups: bool
:param customization: Indicates the type of customization on this work item.
:type customization: object
:param default_value: The default value of the field.
:type default_value: object
:param description: Description of the field.
:type description: str
:param name: Name of the field.
:type name: str
:param read_only: If true the field cannot be edited.
:type read_only: bool
:param reference_name: Reference name of the field.
:type reference_name: str
:param required: If true the field cannot be empty.
:type required: bool
:param type: Type of the field.
:type type: object
:param url: Resource URL of the field.
:type url: str
"""
_attribute_map = {
'allow_groups': {'key': 'allowGroups', 'type': 'bool'},
'customization': {'key': 'customization', 'type': 'object'},
'default_value': {'key': 'defaultValue', 'type': 'object'},
'description': {'key': 'description', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'read_only': {'key': 'readOnly', 'type': 'bool'},
'reference_name': {'key': 'referenceName', 'type': 'str'},
'required': {'key': 'required', 'type': 'bool'},
'type': {'key': 'type', 'type': 'object'},
'url': {'key': 'url', 'type': 'str'}
}
def __init__(self, allow_groups=None, customization=None, default_value=None, description=None, name=None, read_only=None, reference_name=None, required=None, type=None, url=None):
super(ProcessWorkItemTypeField, self).__init__()
self.allow_groups = allow_groups
self.customization = customization
self.default_value = default_value
self.description = description
self.name = name
self.read_only = read_only
self.reference_name = reference_name
self.required = required
self.type = type
self.url = url
class ProjectReference(Model):
"""ProjectReference.
:param description: Description of the project
:type description: str
:param id: The ID of the project
:type id: str
:param name: Name of the project
:type name: str
:param url: Url of the project
:type url: str
"""
_attribute_map = {
'description': {'key': 'description', 'type': 'str'},
'id': {'key': 'id', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'url': {'key': 'url', 'type': 'str'}
}
def __init__(self, description=None, id=None, name=None, url=None):
super(ProjectReference, self).__init__()
self.description = description
self.id = id
self.name = name
self.url = url
class RuleAction(Model):
"""RuleAction.
:param action_type: Type of action to take when the rule is triggered.
:type action_type: object
:param target_field: Field on which the action should be taken.
:type target_field: str
:param value: Value to apply on target field, once the action is taken.
:type value: str
"""
_attribute_map = {
'action_type': {'key': 'actionType', 'type': 'object'},
'target_field': {'key': 'targetField', 'type': 'str'},
'value': {'key': 'value', 'type': 'str'}
}
def __init__(self, action_type=None, target_field=None, value=None):
super(RuleAction, self).__init__()
self.action_type = action_type
self.target_field = target_field
self.value = value
class RuleActionModel(Model):
"""RuleActionModel.
:param action_type:
:type action_type: str
:param target_field:
:type target_field: str
:param value:
:type value: str
"""
_attribute_map = {
'action_type': {'key': 'actionType', 'type': 'str'},
'target_field': {'key': 'targetField', 'type': 'str'},
'value': {'key': 'value', 'type': 'str'}
}
def __init__(self, action_type=None, target_field=None, value=None):
super(RuleActionModel, self).__init__()
self.action_type = action_type
self.target_field = target_field
self.value = value
class RuleCondition(Model):
"""RuleCondition.
:param condition_type: Type of condition. $When. This condition limits the execution of its children to cases when another field has a particular value, i.e. when the Is value of the referenced field is equal to the given literal value. $WhenNot.This condition limits the execution of its children to cases when another field does not have a particular value, i.e.when the Is value of the referenced field is not equal to the given literal value. $WhenChanged.This condition limits the execution of its children to cases when another field has changed, i.e.when the Is value of the referenced field is not equal to the Was value of that field. $WhenNotChanged.This condition limits the execution of its children to cases when another field has not changed, i.e.when the Is value of the referenced field is equal to the Was value of that field.
:type condition_type: object
:param field: Field that defines condition.
:type field: str
:param value: Value of field to define the condition for rule.
:type value: str
"""
_attribute_map = {
'condition_type': {'key': 'conditionType', 'type': 'object'},
'field': {'key': 'field', 'type': 'str'},
'value': {'key': 'value', 'type': 'str'}
}
def __init__(self, condition_type=None, field=None, value=None):
super(RuleCondition, self).__init__()
self.condition_type = condition_type
self.field = field
self.value = value
class RuleConditionModel(Model):
"""RuleConditionModel.
:param condition_type:
:type condition_type: str
:param field:
:type field: str
:param value:
:type value: str
"""
_attribute_map = {
'condition_type': {'key': 'conditionType', 'type': 'str'},
'field': {'key': 'field', 'type': 'str'},
'value': {'key': 'value', 'type': 'str'}
}
def __init__(self, condition_type=None, field=None, value=None):
super(RuleConditionModel, self).__init__()
self.condition_type = condition_type
self.field = field
self.value = value
class Section(Model):
"""Section.
:param groups: List of child groups in this section
:type groups: list of :class:`Group <azure.devops.v5_1.work_item_tracking.models.Group>`
:param id: The id for the layout node.
:type id: str
:param overridden: A value indicating whether this layout node has been overridden by a child layout.
:type overridden: bool
"""
_attribute_map = {
'groups': {'key': 'groups', 'type': '[Group]'},
'id': {'key': 'id', 'type': 'str'},
'overridden': {'key': 'overridden', 'type': 'bool'}
}
def __init__(self, groups=None, id=None, overridden=None):
super(Section, self).__init__()
self.groups = groups
self.id = id
self.overridden = overridden
class UpdateProcessModel(Model):
"""UpdateProcessModel.
:param description: New description of the process
:type description: str
:param is_default: If true new projects will use this process by default
:type is_default: bool
:param is_enabled: If false the process will be disabled and cannot be used to create projects
:type is_enabled: bool
:param name: New name of the process
:type name: str
"""
_attribute_map = {
'description': {'key': 'description', 'type': 'str'},
'is_default': {'key': 'isDefault', 'type': 'bool'},
'is_enabled': {'key': 'isEnabled', 'type': 'bool'},
'name': {'key': 'name', 'type': 'str'}
}
def __init__(self, description=None, is_default=None, is_enabled=None, name=None):
super(UpdateProcessModel, self).__init__()
self.description = description
self.is_default = is_default
self.is_enabled = is_enabled
self.name = name
class UpdateProcessRuleRequest(CreateProcessRuleRequest):
"""UpdateProcessRuleRequest.
:param actions: List of actions to take when the rule is triggered.
:type actions: list of :class:`RuleAction <azure.devops.v5_1.work_item_tracking.models.RuleAction>`
:param conditions: List of conditions when the rule should be triggered.
:type conditions: list of :class:`RuleCondition <azure.devops.v5_1.work_item_tracking.models.RuleCondition>`
:param is_disabled: Indicates if the rule is disabled.
:type is_disabled: bool
:param name: Name for the rule.
:type name: str
:param id: Id to uniquely identify the rule.
:type id: str
"""
_attribute_map = {
'actions': {'key': 'actions', 'type': '[RuleAction]'},
'conditions': {'key': 'conditions', 'type': '[RuleCondition]'},
'is_disabled': {'key': 'isDisabled', 'type': 'bool'},
'name': {'key': 'name', 'type': 'str'},
'id': {'key': 'id', 'type': 'str'}
}
def __init__(self, actions=None, conditions=None, is_disabled=None, name=None, id=None):
super(UpdateProcessRuleRequest, self).__init__(actions=actions, conditions=conditions, is_disabled=is_disabled, name=name)
self.id = id
class UpdateProcessWorkItemTypeFieldRequest(Model):
"""UpdateProcessWorkItemTypeFieldRequest.
:param allow_groups: Allow setting field value to a group identity. Only applies to identity fields.
:type allow_groups: bool
:param default_value: The default value of the field.
:type default_value: object
:param read_only: If true the field cannot be edited.
:type read_only: bool
:param required: The default value of the field.
:type required: bool
"""
_attribute_map = {
'allow_groups': {'key': 'allowGroups', 'type': 'bool'},
'default_value': {'key': 'defaultValue', 'type': 'object'},
'read_only': {'key': 'readOnly', 'type': 'bool'},
'required': {'key': 'required', 'type': 'bool'}
}
def __init__(self, allow_groups=None, default_value=None, read_only=None, required=None):
super(UpdateProcessWorkItemTypeFieldRequest, self).__init__()
self.allow_groups = allow_groups
self.default_value = default_value
self.read_only = read_only
self.required = required
class UpdateProcessWorkItemTypeRequest(Model):
"""UpdateProcessWorkItemTypeRequest.
:param color: Color of the work item type
:type color: str
:param description: Description of the work item type
:type description: str
:param icon: Icon of the work item type
:type icon: str
:param is_disabled: If set will disable the work item type
:type is_disabled: bool
"""
_attribute_map = {
'color': {'key': 'color', 'type': 'str'},
'description': {'key': 'description', 'type': 'str'},
'icon': {'key': 'icon', 'type': 'str'},
'is_disabled': {'key': 'isDisabled', 'type': 'bool'}
}
def __init__(self, color=None, description=None, icon=None, is_disabled=None):
super(UpdateProcessWorkItemTypeRequest, self).__init__()
self.color = color
self.description = description
self.icon = icon
self.is_disabled = is_disabled
class WitContribution(Model):
"""WitContribution.
:param contribution_id: The id for the contribution.
:type contribution_id: str
:param height: The height for the contribution.
:type height: int
:param inputs: A dictionary holding key value pairs for contribution inputs.
:type inputs: dict
:param show_on_deleted_work_item: A value indicating if the contribution should be show on deleted workItem.
:type show_on_deleted_work_item: bool
"""
_attribute_map = {
'contribution_id': {'key': 'contributionId', 'type': 'str'},
'height': {'key': 'height', 'type': 'int'},
'inputs': {'key': 'inputs', 'type': '{object}'},
'show_on_deleted_work_item': {'key': 'showOnDeletedWorkItem', 'type': 'bool'}
}
def __init__(self, contribution_id=None, height=None, inputs=None, show_on_deleted_work_item=None):
super(WitContribution, self).__init__()
self.contribution_id = contribution_id
self.height = height
self.inputs = inputs
self.show_on_deleted_work_item = show_on_deleted_work_item
class WorkItemBehavior(Model):
"""WorkItemBehavior.
:param abstract:
:type abstract: bool
:param color:
:type color: str
:param description:
:type description: str
:param fields:
:type fields: list of :class:`WorkItemBehaviorField <azure.devops.v5_1.work_item_tracking.models.WorkItemBehaviorField>`
:param id:
:type id: str
:param inherits:
:type inherits: :class:`WorkItemBehaviorReference <azure.devops.v5_1.work_item_tracking.models.WorkItemBehaviorReference>`
:param name:
:type name: str
:param overriden:
:type overriden: bool
:param rank:
:type rank: int
:param url:
:type url: str
"""
_attribute_map = {
'abstract': {'key': 'abstract', 'type': 'bool'},
'color': {'key': 'color', 'type': 'str'},
'description': {'key': 'description', 'type': 'str'},
'fields': {'key': 'fields', 'type': '[WorkItemBehaviorField]'},
'id': {'key': 'id', 'type': 'str'},
'inherits': {'key': 'inherits', 'type': 'WorkItemBehaviorReference'},
'name': {'key': 'name', 'type': 'str'},
'overriden': {'key': 'overriden', 'type': 'bool'},
'rank': {'key': 'rank', 'type': 'int'},
'url': {'key': 'url', 'type': 'str'}
}
def __init__(self, abstract=None, color=None, description=None, fields=None, id=None, inherits=None, name=None, overriden=None, rank=None, url=None):
super(WorkItemBehavior, self).__init__()
self.abstract = abstract
self.color = color
self.description = description
self.fields = fields
self.id = id
self.inherits = inherits
self.name = name
self.overriden = overriden
self.rank = rank
self.url = url
class WorkItemBehaviorField(Model):
"""WorkItemBehaviorField.
:param behavior_field_id:
:type behavior_field_id: str
:param id:
:type id: str
:param url:
:type url: str
"""
_attribute_map = {
'behavior_field_id': {'key': 'behaviorFieldId', 'type': 'str'},
'id': {'key': 'id', 'type': 'str'},
'url': {'key': 'url', 'type': 'str'}
}
def __init__(self, behavior_field_id=None, id=None, url=None):
super(WorkItemBehaviorField, self).__init__()
self.behavior_field_id = behavior_field_id
self.id = id
self.url = url
class WorkItemBehaviorReference(Model):
"""WorkItemBehaviorReference.
:param id: The ID of the reference behavior.
:type id: str
:param url: The url of the reference behavior.
:type url: str
"""
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'url': {'key': 'url', 'type': 'str'}
}
def __init__(self, id=None, url=None):
super(WorkItemBehaviorReference, self).__init__()
self.id = id
self.url = url
class WorkItemStateInputModel(Model):
"""WorkItemStateInputModel.
:param color: Color of the state
:type color: str
:param name: Name of the state
:type name: str
:param order: Order in which state should appear
:type order: int
:param state_category: Category of the state
:type state_category: str
"""
_attribute_map = {
'color': {'key': 'color', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'order': {'key': 'order', 'type': 'int'},
'state_category': {'key': 'stateCategory', 'type': 'str'}
}
def __init__(self, color=None, name=None, order=None, state_category=None):
super(WorkItemStateInputModel, self).__init__()
self.color = color
self.name = name
self.order = order
self.state_category = state_category
class WorkItemStateResultModel(Model):
"""WorkItemStateResultModel.
:param color: Work item state color.
:type color: str
:param customization_type: Work item state customization type.
:type customization_type: object
:param hidden: If the Work item state is hidden.
:type hidden: bool
:param id: Id of the Workitemstate.
:type id: str
:param name: Work item state name.
:type name: str
:param order: Work item state order.
:type order: int
:param state_category: Work item state statecategory.
:type state_category: str
:param url: Work item state url.
:type url: str
"""
_attribute_map = {
'color': {'key': 'color', 'type': 'str'},
'customization_type': {'key': 'customizationType', 'type': 'object'},
'hidden': {'key': 'hidden', 'type': 'bool'},
'id': {'key': 'id', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'order': {'key': 'order', 'type': 'int'},
'state_category': {'key': 'stateCategory', 'type': 'str'},
'url': {'key': 'url', 'type': 'str'}
}
def __init__(self, color=None, customization_type=None, hidden=None, id=None, name=None, order=None, state_category=None, url=None):
super(WorkItemStateResultModel, self).__init__()
self.color = color
self.customization_type = customization_type
self.hidden = hidden
self.id = id
self.name = name
self.order = order
self.state_category = state_category
self.url = url
class WorkItemTypeBehavior(Model):
"""WorkItemTypeBehavior.
:param behavior: Reference to the behavior of a work item type
:type behavior: :class:`WorkItemBehaviorReference <azure.devops.v5_1.work_item_tracking.models.WorkItemBehaviorReference>`
:param is_default: If true the work item type is the default work item type in the behavior
:type is_default: bool
:param url: URL of the work item type behavior
:type url: str
"""
_attribute_map = {
'behavior': {'key': 'behavior', 'type': 'WorkItemBehaviorReference'},
'is_default': {'key': 'isDefault', 'type': 'bool'},
'url': {'key': 'url', 'type': 'str'}
}
def __init__(self, behavior=None, is_default=None, url=None):
super(WorkItemTypeBehavior, self).__init__()
self.behavior = behavior
self.is_default = is_default
self.url = url
class WorkItemTypeModel(Model):
"""WorkItemTypeModel.
:param behaviors:
:type behaviors: list of :class:`WorkItemTypeBehavior <azure.devops.v5_1.work_item_tracking.models.WorkItemTypeBehavior>`
:param class_:
:type class_: object
:param color:
:type color: str
:param description:
:type description: str
:param icon:
:type icon: str
:param id:
:type id: str
:param inherits: Parent WIT Id/Internal ReferenceName that it inherits from
:type inherits: str
:param is_disabled:
:type is_disabled: bool
:param layout:
:type layout: :class:`FormLayout <azure.devops.v5_1.work_item_tracking.models.FormLayout>`
:param name:
:type name: str
:param states:
:type states: list of :class:`WorkItemStateResultModel <azure.devops.v5_1.work_item_tracking.models.WorkItemStateResultModel>`
:param url:
:type url: str
"""
_attribute_map = {
'behaviors': {'key': 'behaviors', 'type': '[WorkItemTypeBehavior]'},
'class_': {'key': 'class', 'type': 'object'},
'color': {'key': 'color', 'type': 'str'},
'description': {'key': 'description', 'type': 'str'},
'icon': {'key': 'icon', 'type': 'str'},
'id': {'key': 'id', 'type': 'str'},
'inherits': {'key': 'inherits', 'type': 'str'},
'is_disabled': {'key': 'isDisabled', 'type': 'bool'},
'layout': {'key': 'layout', 'type': 'FormLayout'},
'name': {'key': 'name', 'type': 'str'},
'states': {'key': 'states', 'type': '[WorkItemStateResultModel]'},
'url': {'key': 'url', 'type': 'str'}
}
def __init__(self, behaviors=None, class_=None, color=None, description=None, icon=None, id=None, inherits=None, is_disabled=None, layout=None, name=None, states=None, url=None):
super(WorkItemTypeModel, self).__init__()
self.behaviors = behaviors
self.class_ = class_
self.color = color
self.description = description
self.icon = icon
self.id = id
self.inherits = inherits
self.is_disabled = is_disabled
self.layout = layout
self.name = name
self.states = states
self.url = url
class PickList(PickListMetadata):
"""PickList.
:param id: ID of the picklist
:type id: str
:param is_suggested: Indicates whether items outside of suggested list are allowed
:type is_suggested: bool
:param name: Name of the picklist
:type name: str
:param type: DataType of picklist
:type type: str
:param url: Url of the picklist
:type url: str
:param items: A list of PicklistItemModel.
:type items: list of str
"""
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'is_suggested': {'key': 'isSuggested', 'type': 'bool'},
'name': {'key': 'name', 'type': 'str'},
'type': {'key': 'type', 'type': 'str'},
'url': {'key': 'url', 'type': 'str'},
'items': {'key': 'items', 'type': '[str]'}
}
def __init__(self, id=None, is_suggested=None, name=None, type=None, url=None, items=None):
super(PickList, self).__init__(id=id, is_suggested=is_suggested, name=name, type=type, url=url)
self.items = items
__all__ = [
'AddProcessWorkItemTypeFieldRequest',
'Control',
'CreateProcessModel',
'CreateProcessRuleRequest',
'CreateProcessWorkItemTypeRequest',
'Extension',
'FieldModel',
'FieldRuleModel',
'FormLayout',
'Group',
'HideStateModel',
'Page',
'PickListMetadata',
'ProcessBehavior',
'ProcessBehaviorCreateRequest',
'ProcessBehaviorField',
'ProcessBehaviorReference',
'ProcessBehaviorUpdateRequest',
'ProcessInfo',
'ProcessModel',
'ProcessProperties',
'ProcessRule',
'ProcessWorkItemType',
'ProcessWorkItemTypeField',
'ProjectReference',
'RuleAction',
'RuleActionModel',
'RuleCondition',
'RuleConditionModel',
'Section',
'UpdateProcessModel',
'UpdateProcessRuleRequest',
'UpdateProcessWorkItemTypeFieldRequest',
'UpdateProcessWorkItemTypeRequest',
'WitContribution',
'WorkItemBehavior',
'WorkItemBehaviorField',
'WorkItemBehaviorReference',
'WorkItemStateInputModel',
'WorkItemStateResultModel',
'WorkItemTypeBehavior',
'WorkItemTypeModel',
'PickList',
]
|
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import tensorflow as tf
from edward.models import Bernoulli, Normal
from edward.util import get_descendants
class test_get_descendants_class(tf.test.TestCase):
def test_v_structure(self):
"""a -> b -> e <- d <- c"""
with self.test_session():
a = Normal(0.0, 1.0)
b = Normal(a, 1.0)
c = Normal(0.0, 1.0)
d = Normal(c, 1.0)
e = Normal(b * d, 1.0)
self.assertEqual(set(get_descendants(a)), set([b, e]))
self.assertEqual(get_descendants(b), [e])
self.assertEqual(set(get_descendants(c)), set([d, e]))
self.assertEqual(get_descendants(d), [e])
self.assertEqual(get_descendants(e), [])
def test_a_structure(self):
"""e <- d <- a -> b -> c"""
with self.test_session():
a = Normal(0.0, 1.0)
b = Normal(a, 1.0)
c = Normal(b, 1.0)
d = Normal(a, 1.0)
e = Normal(d, 1.0)
self.assertEqual(set(get_descendants(a)), set([b, c, d, e]))
self.assertEqual(get_descendants(b), [c])
self.assertEqual(get_descendants(c), [])
self.assertEqual(get_descendants(d), [e])
self.assertEqual(get_descendants(e), [])
def test_chain_structure(self):
"""a -> b -> c -> d -> e"""
with self.test_session():
a = Normal(0.0, 1.0)
b = Normal(a, 1.0)
c = Normal(b, 1.0)
d = Normal(c, 1.0)
e = Normal(d, 1.0)
self.assertEqual(set(get_descendants(a)), set([b, c, d, e]))
self.assertEqual(set(get_descendants(b)), set([c, d, e]))
self.assertEqual(set(get_descendants(c)), set([d, e]))
self.assertEqual(get_descendants(d), [e])
self.assertEqual(get_descendants(e), [])
def test_tensor(self):
with self.test_session():
a = Normal(0.0, 1.0)
b = tf.constant(2.0)
c = a + b
d = Normal(c, 1.0)
self.assertEqual(get_descendants(a), [d])
self.assertEqual(get_descendants(b), [d])
self.assertEqual(get_descendants(c), [d])
self.assertEqual(get_descendants(d), [])
def test_control_flow(self):
with self.test_session():
a = Bernoulli(0.5)
b = Normal(0.0, 1.0)
c = tf.constant(0.0)
d = tf.cond(tf.cast(a, tf.bool), lambda: b, lambda: c)
e = Normal(d, 1.0)
self.assertEqual(get_descendants(a), [e])
self.assertEqual(get_descendants(b), [e])
self.assertEqual(get_descendants(c), [e])
self.assertEqual(get_descendants(d), [e])
self.assertEqual(get_descendants(e), [])
def test_scan(self):
"""copied from test_chain_structure"""
def cumsum(x):
return tf.scan(lambda a, x: a + x, x)
with self.test_session():
a = Normal(tf.ones([3]), tf.ones([3]))
b = Normal(cumsum(a), tf.ones([3]))
c = Normal(cumsum(b), tf.ones([3]))
d = Normal(cumsum(c), tf.ones([3]))
e = Normal(cumsum(d), tf.ones([3]))
self.assertEqual(set(get_descendants(a)), set([b, c, d, e]))
self.assertEqual(set(get_descendants(b)), set([c, d, e]))
self.assertEqual(set(get_descendants(c)), set([d, e]))
self.assertEqual(get_descendants(d), [e])
self.assertEqual(get_descendants(e), [])
if __name__ == '__main__':
tf.test.main()
|
# Copyright 2017 Google Inc.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
#
# 1. Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
#
# 3. Neither the name of the copyright holder nor the names of its
# contributors may be used to endorse or promote products derived from this
# software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
"""Utility library for working with protobufs."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from google.protobuf.internal import api_implementation
def uses_fast_cpp_protos_or_die():
if api_implementation.Type() != 'cpp':
raise ValueError('Expected to be using C++ protobuf implementation '
'(api_implementation.Type() == "cpp") but it is {}'.format(
api_implementation.Type()))
|
import os
import pytest
import sqlalchemy as sa
from libweasyl.configuration import configure_libweasyl
from libweasyl.models.meta import registry
from libweasyl.models.tables import metadata
from libweasyl.test.common import NotFound
from libweasyl.test.common import media_link_formatter
from libweasyl import cache
engine = sa.create_engine(os.environ.get('WEASYL_TEST_SQLALCHEMY_URL', 'postgresql+psycopg2cffi:///weasyl_test'))
sessionmaker = sa.orm.scoped_session(sa.orm.sessionmaker(bind=engine))
@pytest.fixture(scope='session', autouse=True)
def setup(request):
db = sessionmaker()
db.execute('DROP SCHEMA public CASCADE')
db.execute('CREATE SCHEMA public')
db.execute('CREATE EXTENSION HSTORE')
db.commit()
metadata.create_all(engine)
cache.region.configure('dogpile.cache.memory')
@pytest.fixture(autouse=True)
def staticdir(tmpdir):
tmpdir = tmpdir.join('libweasyl-staticdir')
configure_libweasyl(
dbsession=sessionmaker,
not_found_exception=NotFound,
base_file_path=tmpdir.strpath,
staff_config_dict={},
media_link_formatter_callback=media_link_formatter.format_media_link,
)
return tmpdir
@pytest.fixture
def db(request):
db = sessionmaker()
# If a previous test has failed due to an SQL problem, the session will be
# in a broken state, requiring a rollback. It's not harmful to
# unconditionally rollback, so just do that.
db.rollback()
def tear_down():
"Clears all rows from the test database."
for k, cls in registry.items():
if not k[0].isupper():
continue
db.query(cls).delete()
db.flush()
db.commit()
request.addfinalizer(tear_down)
return db
|
#!/usr/bin/env python
import time
import sys
import iothub_client
from iothub_client import IoTHubClient, IoTHubClientError, IoTHubTransportProvider, IoTHubClientResult
from iothub_client import IoTHubMessage, IoTHubMessageDispositionResult, IoTHubError
# String containing Hostname, Device Id & Device Key in the format:
# "HostName=<host_name>;DeviceId=<device_id>;SharedAccessKey=<device_key>"
CONNECTION_STRING = "[Device Connection String]"
MSG_TXT = "{\"msg\": \"%s\"}"
RECEIVE_CONTEXT = 0
SEND_CONTEXT = 0
SLEEP_TIME = 15
def receive_message_callback(message, user_context):
message_buffer = message.get_bytearray()
size = len(message_buffer)
print ( "Received Message: data = \"%s\" size=%d" % (message_buffer[:size].decode('utf-8'), size) )
return IoTHubMessageDispositionResult.ACCEPTED
def send_confirmation_callback(message, result, user_context):
print ( "Confirmation received for message with result = %s" % result )
def iothub_client_sample_run():
try:
# prepare iothub client
client = IoTHubClient(CONNECTION_STRING, IoTHubTransportProvider.MQTT)
# to enable MQTT logging set to 1
client.set_option("logtrace", 0)
client.set_message_callback(receive_message_callback, RECEIVE_CONTEXT)
while True:
# send a few messages every minute
print ( "IoTHubClient sending message" )
msg_txt_formatted = MSG_TXT % "This is a test"
message = IoTHubMessage(msg_txt_formatted)
client.send_event_async(message, send_confirmation_callback, SEND_CONTEXT)
print ( "IoTHubClient.send_event_async accepted message for transmission to IoT Hub." )
time.sleep(SLEEP_TIME)
except IoTHubError as iothub_error:
print ( "Unexpected error %s from IoTHub" % iothub_error )
return
except KeyboardInterrupt:
print ( "IoTHubClient sample stopped" )
if __name__ == '__main__':
print ( "\nPython %s" % sys.version )
print ( "IoT Hub Client for Python" )
iothub_client_sample_run()
|
import sys
time = input().strip()
splitted = time.split(':')
hours_12 = int(splitted[0])
mins = splitted[1]
secs = splitted[2][:2]
is_pm = splitted[2].endswith("PM")
if (is_pm):
if (hours_12 >= 1 and hours_12 < 12): # between 1pm and 11:59pm
hours_12 += 12
else:
if (hours_12 == 12):
hours_12 -= 12
print(':'.join(list((str(hours_12).zfill(2), mins, secs))))
|
"""
Licensed to the Apache Software Foundation (ASF) under one
or more contributor license agreements. See the NOTICE file
distributed with this work for additional information
regarding copyright ownership. The ASF licenses this file
to you under the Apache License, Version 2.0 (the
"License"); you may not use this file except in compliance
with the License. You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import os
from resource_management import *
def setup_hadoop():
"""
Setup hadoop files and directories
"""
import params
Execute("/bin/echo 0 > /selinux/enforce",
only_if="test -f /selinux/enforce"
)
install_snappy()
#directories
if params.has_namenode:
Directory(params.hdfs_log_dir_prefix,
recursive=True,
owner='root',
group=params.user_group,
mode=0775
)
Directory(params.hadoop_pid_dir_prefix,
recursive=True,
owner='root',
group='root'
)
#this doesn't needed with stack 1
Directory(params.hadoop_tmp_dir,
recursive=True,
owner=params.hdfs_user,
)
#files
if params.security_enabled:
tc_owner = "root"
else:
tc_owner = params.hdfs_user
File(os.path.join(params.hadoop_conf_dir, 'commons-logging.properties'),
owner=tc_owner,
content=Template('commons-logging.properties.j2')
)
health_check_template = "health_check-v2" #for stack 1 use 'health_check'
File(os.path.join(params.hadoop_conf_dir, "health_check"),
owner=tc_owner,
content=Template(health_check_template + ".j2")
)
log4j_filename = os.path.join(params.hadoop_conf_dir, "log4j.properties")
if (params.log4j_props != None):
File(log4j_filename,
mode=0644,
group=params.user_group,
owner=params.hdfs_user,
content=params.log4j_props
)
elif (os.path.exists(format("{params.hadoop_conf_dir}/log4j.properties"))):
File(log4j_filename,
mode=0644,
group=params.user_group,
owner=params.hdfs_user,
)
File(os.path.join(params.hadoop_conf_dir, "hadoop-metrics2.properties"),
owner=params.hdfs_user,
content=Template("hadoop-metrics2.properties.j2")
)
def setup_database():
"""
Load DB
"""
import params
db_driver_dload_cmd = ""
environment = {
"no_proxy": format("{ambari_server_hostname}")
}
if params.server_db_name == 'oracle' and params.oracle_driver_url != "":
db_driver_dload_cmd = format(
"curl -kf -x \"\" \
--retry 5 {oracle_driver_symlink_url} -o {hadoop_lib_home}/{db_driver_filename}",)
elif params.server_db_name == 'mysql' and params.mysql_driver_url != "":
db_driver_dload_cmd = format(
"curl -kf -x \"\" \
--retry 5 {mysql_driver_symlink_url} -o {hadoop_lib_home}/{db_driver_filename}")
if db_driver_dload_cmd:
Execute(db_driver_dload_cmd,
not_if =format("test -e {hadoop_lib_home}/{db_driver_filename}"),
environment = environment
)
def setup_configs():
"""
Creates configs for services HDFS mapred
"""
import params
if params.has_namenode:
File(params.task_log4j_properties_location,
content=StaticFile("task-log4j.properties"),
mode=0755
)
if os.path.exists(os.path.join(params.hadoop_conf_dir, 'configuration.xsl')):
File(os.path.join(params.hadoop_conf_dir, 'configuration.xsl'),
owner=params.hdfs_user,
group=params.user_group
)
if os.path.exists(os.path.join(params.hadoop_conf_dir, 'masters')):
File(os.path.join(params.hadoop_conf_dir, 'masters'),
owner=params.hdfs_user,
group=params.user_group
)
generate_include_file()
def generate_include_file():
import params
if params.has_namenode and params.dfs_hosts and params.has_slaves:
include_hosts_list = params.slave_hosts
File(params.dfs_hosts,
content=Template("include_hosts_list.j2"),
owner=params.hdfs_user,
group=params.user_group
)
def install_snappy():
import params
snappy_so = "libsnappy.so"
so_target_dir_x86 = format("{hadoop_lib_home}/native/Linux-i386-32")
so_target_dir_x64 = format("{hadoop_lib_home}/native/Linux-amd64-64")
so_target_x86 = format("{so_target_dir_x86}/{snappy_so}")
so_target_x64 = format("{so_target_dir_x64}/{snappy_so}")
so_src_dir_x86 = format("{hadoop_home}/lib")
so_src_dir_x64 = format("{hadoop_home}/lib64")
so_src_x86 = format("{so_src_dir_x86}/{snappy_so}")
so_src_x64 = format("{so_src_dir_x64}/{snappy_so}")
if params.has_namenode:
Execute(
format("mkdir -p {so_target_dir_x86}; ln -sf {so_src_x86} {so_target_x86}"))
Execute(
format("mkdir -p {so_target_dir_x64}; ln -sf {so_src_x64} {so_target_x64}"))
def create_javahome_symlink():
if os.path.exists("/usr/jdk/jdk1.6.0_31") and not os.path.exists("/usr/jdk64/jdk1.6.0_31"):
Execute("mkdir -p /usr/jdk64/")
Execute("ln -s /usr/jdk/jdk1.6.0_31 /usr/jdk64/jdk1.6.0_31")
|
#!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates.
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
"""
Transformer Agents.
"""
from typing import Optional
from parlai.core.params import ParlaiParser
from parlai.core.opt import Opt
from parlai.core.agents import Agent
from parlai.utils.torch import padded_3d
from parlai.core.torch_classifier_agent import TorchClassifierAgent
from parlai.core.torch_ranker_agent import TorchRankerAgent
from parlai.core.torch_generator_agent import TorchGeneratorAgent
from parlai.utils.misc import recursive_getattr
from parlai.utils.logging import logging
from .modules import (
TransformerMemNetModel,
TransformerGeneratorModel,
TransformerLinearWrapper,
MixerModel,
MixerGeneratorModel,
)
import torch
def add_common_cmdline_args(parser):
"""
Add common command line args.
"""
parser.add_argument(
'-esz',
'--embedding-size',
type=int,
default=300,
help='Size of all embedding layers. Must be a multiple of --n-heads.',
)
parser.add_argument(
'-nl', '--n-layers', type=int, default=2, help='Number of transformer layers.'
)
parser.add_argument(
'-hid',
'--ffn-size',
type=int,
default=300,
help='Hidden size of the FFN layers',
)
parser.add_argument(
'--dropout',
type=float,
default=0.0,
help='Dropout used around embeddings and before layer layer normalizations. '
'This is used in Vaswani 2017 and works well on large datasets.',
)
parser.add_argument(
'--attention-dropout',
type=float,
default=0.0,
help='Dropout used after attention softmax. This is not used in Vaswani 2017.',
)
parser.add_argument(
'--relu-dropout',
type=float,
default=0.0,
help='Dropout used after the ReLU in the FFN. Not used in Vaswani 2017, '
'but used in Tensor2Tensor.',
)
parser.add_argument(
'--n-heads', type=int, default=2, help='Number of multihead attention heads'
)
parser.add_argument(
'--learn-positional-embeddings',
type='bool',
default=False,
help='If off, sinusoidal embeddings are used. If on, position embeddings are '
'learned from scratch.',
)
parser.add_argument('--embeddings-scale', type='bool', default=True)
parser.add_argument(
'--n-positions',
type=int,
default=None,
hidden=True,
help='Number of positional embeddings to learn. Defaults '
'to truncate or 1024 if not provided.',
)
parser.add_argument(
'--n-segments',
type=int,
default=0,
help='The number of segments that support the model. '
'If zero no segment and no langs_embedding.',
)
parser.add_argument(
'--variant',
choices={'aiayn', 'xlm', 'prelayernorm', 'bart'},
default='aiayn',
help='Chooses locations of layer norms, etc. prelayernorm '
'is used to match some fairseq models',
recommended='xlm',
)
parser.add_argument(
'--activation',
choices={'relu', 'gelu'},
default='relu',
help='Nonlinear activation to use. AIAYN uses relu, but '
'more recent papers prefer gelu.',
recommended='gelu',
)
parser.add_argument(
'--output-scaling',
type=float,
default=1.0,
help='scale the output of every transformer by this quantity.',
)
parser.add_argument(
'--share-word-embeddings',
type='bool',
default=True,
help='Share word embeddings table for candidate and context'
'in the memory network',
)
parser.add_argument(
'-nel',
'--n-encoder-layers',
type=int,
default=-1,
help='This will overide the n-layers for asymmetrical transformers',
)
parser.add_argument(
'-ndl',
'--n-decoder-layers',
type=int,
default=-1,
help='This will overide the n-layers for asymmetrical transformers',
)
parser.add_argument(
'--model-parallel',
type='bool',
default=False,
help='Shard the layers across multiple GPUs.',
)
class Transformer(Agent):
"""
Placeholder Transformer Agent.
Placeholder class, which just throws an error telling the user to specify whether
they want the ranker or the generator.
"""
def __init__(self, opt, shared=None):
raise RuntimeError(
"`--model transformer` is not a valid choice. Please select either "
"`--model transformer/ranker` or `--model transformer/generator"
)
class TransformerRankerAgent(TorchRankerAgent):
"""
Transformer Ranker Agent.
Implementation of a TorchRankerAgent, where the model is a Transformer
"""
@classmethod
def add_cmdline_args(
cls, parser: ParlaiParser, partial_opt: Optional[Opt] = None
) -> ParlaiParser:
"""
Add command-line arguments specifically for this agent.
"""
super().add_cmdline_args(parser, partial_opt=partial_opt)
agent = parser.add_argument_group('Transformer Arguments')
add_common_cmdline_args(agent)
# memory and knowledge arguments
agent.add_argument(
'--use-memories',
type='bool',
default=False,
help='use memories: must implement the function '
'`_vectorize_memories` to use this',
)
agent.add_argument(
'--wrap-memory-encoder',
type='bool',
default=False,
help='wrap memory encoder with MLP',
)
agent.add_argument(
'--memory-attention',
type=str,
default='sqrt',
choices=['cosine', 'dot', 'sqrt'],
help='similarity for basic attention mechanism '
'when using transformer to encode memories',
)
# model specific arguments
agent.add_argument('--normalize-sent-emb', type='bool', default=False)
agent.add_argument('--share-encoders', type='bool', default=True)
parser.add_argument(
'--share-word-embeddings',
type='bool',
default=True,
help='Share word embeddings table for candidate and context'
'in the memory network',
)
agent.add_argument(
'--learn-embeddings', type='bool', default=True, help='learn embeddings'
)
agent.add_argument(
'--data-parallel',
type='bool',
default=False,
help='use model in data parallel, requires ' 'multiple gpus',
)
agent.add_argument(
'--reduction-type',
type=str,
default='mean',
choices=['first', 'max', 'mean'],
help='Type of reduction at the end of transformer',
)
parser.set_defaults(learningrate=0.0001, optimizer='adamax', truncate=1024)
cls.dictionary_class().add_cmdline_args(parser, partial_opt=partial_opt)
return agent
def _score(self, output, cands):
if cands.dim() == 2:
return torch.matmul(output, cands.t())
elif cands.dim() == 3:
return torch.bmm(output.unsqueeze(1), cands.transpose(1, 2)).squeeze(1)
else:
raise RuntimeError(
'Unexpected candidate dimensions {}' ''.format(cands.dim())
)
def build_model(self, states=None):
"""
Build and return model.
"""
model = MixerModel(self.opt, self.dict)
if self.opt['embedding_type'] != 'random':
self._copy_embeddings(model.embeddings.weight, self.opt['embedding_type'])
return model
def batchify(self, obs_batch, sort=False):
"""
Override so that we can add memories to the Batch object.
"""
batch = super().batchify(obs_batch, sort)
if self.opt['use_memories']:
valid_obs = [(i, ex) for i, ex in enumerate(obs_batch) if self.is_valid(ex)]
valid_inds, exs = zip(*valid_obs)
mems = None
if any('memory_vecs' in ex for ex in exs):
mems = [ex.get('memory_vecs', None) for ex in exs]
batch.memory_vecs = mems
return batch
def _vectorize_memories(self, obs):
# TODO: move this to Torch Ranker Agent
raise NotImplementedError(
'Abstract class: user must implement this function to use memories'
)
def vectorize(self, *args, **kwargs):
"""
Override to include vectorization of memories.
"""
kwargs['add_start'] = False
kwargs['add_end'] = False
obs = super().vectorize(*args, **kwargs)
if self.opt['use_memories']:
obs = self._vectorize_memories(obs)
return obs
def encode_candidates(self, padded_cands):
"""
Encode candidates.
"""
_, cands = self.model(xs=None, mems=None, cands=padded_cands)
return cands
def score_candidates(self, batch, cand_vecs, cand_encs=None):
"""
Score candidates.
"""
# convoluted check that not all memories are empty
if (
self.opt['use_memories']
and batch.memory_vecs is not None
and sum(len(m) for m in batch.memory_vecs)
):
mems = padded_3d(batch.memory_vecs, pad_idx=self.NULL_IDX)
else:
mems = None
if cand_encs is not None:
# we pre-encoded the candidates, do not re-encode here
cand_vecs = None
context_h, cands_h = self.model(xs=batch.text_vec, mems=mems, cands=cand_vecs)
if cand_encs is not None:
cands_h = cand_encs
scores = self._score(context_h, cands_h)
return scores
class TransformerGeneratorAgent(TorchGeneratorAgent):
"""
TransformerGeneratorAgent.
Implementation of TorchGeneratorAgent, where the model is a Transformer
"""
@classmethod
def add_cmdline_args(
cls, parser: ParlaiParser, partial_opt: Optional[Opt] = None
) -> ParlaiParser:
"""
Add command-line arguments specifically for this agent.
"""
agent = parser.add_argument_group('Transformer Arguments')
add_common_cmdline_args(agent)
cls.dictionary_class().add_cmdline_args(parser, partial_opt=partial_opt)
super().add_cmdline_args(parser, partial_opt=partial_opt)
return agent
def build_model(self, states=None):
"""
Build and return model.
"""
model = MixerGeneratorModel(self.opt, self.dict)
if self.opt['embedding_type'] != 'random':
self._copy_embeddings(
model.encoder.embeddings.weight, self.opt['embedding_type']
)
return model
def _resize_token_embeddings(self, state_dict, msg=None):
"""
Resize the token embeddings when are adding extra special tokens.
"""
# map extra special tokens carefully
new_size = self.model.embeddings.weight.size()[0]
orig_size = state_dict['embeddings.weight'].size()[0]
logging.info(f'Resizing token embeddings from {orig_size} to {new_size}')
if new_size <= orig_size:
# new size should be greater than original size,
# as we are adding special tokens
raise RuntimeError(msg)
for emb_weights in [
'embeddings.weight',
'encoder.embeddings.weight',
'decoder.embeddings.weight',
]:
# get new_embs
old_embs = state_dict[emb_weights]
new_embs = recursive_getattr(self.model, emb_weights).to(old_embs.device)
# copy over old weights
new_embs.data[:orig_size, :] = old_embs.data[:orig_size, :]
# reset in state dict
state_dict[emb_weights] = new_embs
return state_dict
class TransformerClassifierAgent(TorchClassifierAgent):
"""
Classifier based on Transformer.
"""
@classmethod
def add_cmdline_args(
cls, parser: ParlaiParser, partial_opt: Optional[Opt] = None
) -> ParlaiParser:
TransformerRankerAgent.add_cmdline_args(
parser, partial_opt=partial_opt
) # add transformer args
super().add_cmdline_args(parser, partial_opt=partial_opt)
parser.add_argument(
'--load-from-pretrained-ranker',
type='bool',
default=False,
help='load model from base transformer ranking model '
'(used for pretraining)',
)
parser.set_defaults(reduction_type='first')
return parser
def build_model(self):
num_classes = len(self.class_list)
self.base_model = MixerModel(self.opt, self.dict)
return TransformerLinearWrapper(self.base_model.context_encoder, num_classes)
def vectorize(self, *args, **kwargs):
"""
Add the start and end token to the text.
"""
kwargs['add_start'] = True
kwargs['add_end'] = True
obs = super().vectorize(*args, **kwargs)
return obs
def _set_text_vec(self, *args, **kwargs):
"""
Add the start and end token to the text.
"""
obs = super()._set_text_vec(*args, **kwargs)
if 'text_vec' in obs and 'added_start_end' not in obs:
obs.force_set(
'text_vec', self._add_start_end_tokens(obs['text_vec'], True, True)
)
obs['added_start_end'] = True
# check truncation after adding start end tokens
if obs.get('text_vec') is not None:
truncated_vec = self._check_truncate(
obs['text_vec'], self.text_truncate, True
)
obs.force_set('text_vec', torch.LongTensor(truncated_vec))
return obs
def score(self, batch):
return self.model(batch.text_vec)
def load_state_dict(self, state_dict):
"""
Load the state dict into model.
This is easily overridable to facilitate transfer of state dicts.
"""
if self.is_finetune and self.opt['load_from_pretrained_ranker']:
self.base_model.load_state_dict(state_dict, strict=False)
else:
self.model.load_state_dict(state_dict)
|
# Generated by the protocol buffer compiler. DO NOT EDIT!
# source: tensorflow/core/protobuf/config.proto
import sys
_b=sys.version_info[0]<3 and (lambda x:x) or (lambda x:x.encode('latin1'))
from google.protobuf import descriptor as _descriptor
from google.protobuf import message as _message
from google.protobuf import reflection as _reflection
from google.protobuf import symbol_database as _symbol_database
from google.protobuf import descriptor_pb2
# @@protoc_insertion_point(imports)
_sym_db = _symbol_database.Default()
from tensorflow.core.framework import cost_graph_pb2 as tensorflow_dot_core_dot_framework_dot_cost__graph__pb2
from tensorflow.core.framework import graph_pb2 as tensorflow_dot_core_dot_framework_dot_graph__pb2
from tensorflow.core.framework import step_stats_pb2 as tensorflow_dot_core_dot_framework_dot_step__stats__pb2
from tensorflow.core.protobuf import debug_pb2 as tensorflow_dot_core_dot_protobuf_dot_debug__pb2
DESCRIPTOR = _descriptor.FileDescriptor(
name='tensorflow/core/protobuf/config.proto',
package='tensorflow',
syntax='proto3',
serialized_pb=_b('\n%tensorflow/core/protobuf/config.proto\x12\ntensorflow\x1a*tensorflow/core/framework/cost_graph.proto\x1a%tensorflow/core/framework/graph.proto\x1a*tensorflow/core/framework/step_stats.proto\x1a$tensorflow/core/protobuf/debug.proto\"\xa1\x01\n\nGPUOptions\x12\'\n\x1fper_process_gpu_memory_fraction\x18\x01 \x01(\x01\x12\x16\n\x0e\x61llocator_type\x18\x02 \x01(\t\x12\x1f\n\x17\x64\x65\x66\x65rred_deletion_bytes\x18\x03 \x01(\x03\x12\x14\n\x0c\x61llow_growth\x18\x04 \x01(\x08\x12\x1b\n\x13visible_device_list\x18\x05 \x01(\t\"\xdf\x02\n\x10OptimizerOptions\x12+\n#do_common_subexpression_elimination\x18\x01 \x01(\x08\x12\x1b\n\x13\x64o_constant_folding\x18\x02 \x01(\x08\x12\x1c\n\x14\x64o_function_inlining\x18\x04 \x01(\x08\x12\x35\n\topt_level\x18\x03 \x01(\x0e\x32\".tensorflow.OptimizerOptions.Level\x12\x45\n\x10global_jit_level\x18\x05 \x01(\x0e\x32+.tensorflow.OptimizerOptions.GlobalJitLevel\" \n\x05Level\x12\x06\n\x02L1\x10\x00\x12\x0f\n\x02L0\x10\xff\xff\xff\xff\xff\xff\xff\xff\xff\x01\"C\n\x0eGlobalJitLevel\x12\x0b\n\x07\x44\x45\x46\x41ULT\x10\x00\x12\x10\n\x03OFF\x10\xff\xff\xff\xff\xff\xff\xff\xff\xff\x01\x12\x08\n\x04ON_1\x10\x01\x12\x08\n\x04ON_2\x10\x02\"\xb9\x02\n\x0cGraphOptions\x12\x1e\n\x16\x65nable_recv_scheduling\x18\x02 \x01(\x08\x12\x37\n\x11optimizer_options\x18\x03 \x01(\x0b\x32\x1c.tensorflow.OptimizerOptions\x12\x18\n\x10\x62uild_cost_model\x18\x04 \x01(\x03\x12\x1e\n\x16\x62uild_cost_model_after\x18\t \x01(\x03\x12\x14\n\x0cinfer_shapes\x18\x05 \x01(\x08\x12\x1a\n\x12place_pruned_graph\x18\x06 \x01(\x08\x12 \n\x18\x65nable_bfloat16_sendrecv\x18\x07 \x01(\x08\x12\x15\n\rtimeline_step\x18\x08 \x01(\x05J\x04\x08\x01\x10\x02R%skip_common_subexpression_elimination\",\n\x15ThreadPoolOptionProto\x12\x13\n\x0bnum_threads\x18\x01 \x01(\x05\"2\n\nRPCOptions\x12$\n\x1cuse_rpc_for_inprocess_master\x18\x01 \x01(\x08\"\xd1\x04\n\x0b\x43onfigProto\x12>\n\x0c\x64\x65vice_count\x18\x01 \x03(\x0b\x32(.tensorflow.ConfigProto.DeviceCountEntry\x12$\n\x1cintra_op_parallelism_threads\x18\x02 \x01(\x05\x12$\n\x1cinter_op_parallelism_threads\x18\x05 \x01(\x05\x12\x1f\n\x17use_per_session_threads\x18\t \x01(\x08\x12G\n\x1csession_inter_op_thread_pool\x18\x0c \x03(\x0b\x32!.tensorflow.ThreadPoolOptionProto\x12\x18\n\x10placement_period\x18\x03 \x01(\x05\x12\x16\n\x0e\x64\x65vice_filters\x18\x04 \x03(\t\x12+\n\x0bgpu_options\x18\x06 \x01(\x0b\x32\x16.tensorflow.GPUOptions\x12\x1c\n\x14\x61llow_soft_placement\x18\x07 \x01(\x08\x12\x1c\n\x14log_device_placement\x18\x08 \x01(\x08\x12/\n\rgraph_options\x18\n \x01(\x0b\x32\x18.tensorflow.GraphOptions\x12\x1f\n\x17operation_timeout_in_ms\x18\x0b \x01(\x03\x12+\n\x0brpc_options\x18\r \x01(\x0b\x32\x16.tensorflow.RPCOptions\x1a\x32\n\x10\x44\x65viceCountEntry\x12\x0b\n\x03key\x18\x01 \x01(\t\x12\r\n\x05value\x18\x02 \x01(\x05:\x02\x38\x01\"\xa5\x02\n\nRunOptions\x12\x36\n\x0btrace_level\x18\x01 \x01(\x0e\x32!.tensorflow.RunOptions.TraceLevel\x12\x15\n\rtimeout_in_ms\x18\x02 \x01(\x03\x12\x1c\n\x14inter_op_thread_pool\x18\x03 \x01(\x05\x12\x1f\n\x17output_partition_graphs\x18\x05 \x01(\x08\x12/\n\rdebug_options\x18\x06 \x01(\x0b\x32\x18.tensorflow.DebugOptions\"R\n\nTraceLevel\x12\x0c\n\x08NO_TRACE\x10\x00\x12\x12\n\x0eSOFTWARE_TRACE\x10\x01\x12\x12\n\x0eHARDWARE_TRACE\x10\x02\x12\x0e\n\nFULL_TRACE\x10\x03J\x04\x08\x04\x10\x05\"\x96\x01\n\x0bRunMetadata\x12)\n\nstep_stats\x18\x01 \x01(\x0b\x32\x15.tensorflow.StepStats\x12,\n\ncost_graph\x18\x02 \x01(\x0b\x32\x18.tensorflow.CostGraphDef\x12.\n\x10partition_graphs\x18\x03 \x03(\x0b\x32\x14.tensorflow.GraphDefB-\n\x18org.tensorflow.frameworkB\x0c\x43onfigProtosP\x01\xf8\x01\x01\x62\x06proto3')
,
dependencies=[tensorflow_dot_core_dot_framework_dot_cost__graph__pb2.DESCRIPTOR,tensorflow_dot_core_dot_framework_dot_graph__pb2.DESCRIPTOR,tensorflow_dot_core_dot_framework_dot_step__stats__pb2.DESCRIPTOR,tensorflow_dot_core_dot_protobuf_dot_debug__pb2.DESCRIPTOR,])
_sym_db.RegisterFileDescriptor(DESCRIPTOR)
_OPTIMIZEROPTIONS_LEVEL = _descriptor.EnumDescriptor(
name='Level',
full_name='tensorflow.OptimizerOptions.Level',
filename=None,
file=DESCRIPTOR,
values=[
_descriptor.EnumValueDescriptor(
name='L1', index=0, number=0,
options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='L0', index=1, number=-1,
options=None,
type=None),
],
containing_type=None,
options=None,
serialized_start=633,
serialized_end=665,
)
_sym_db.RegisterEnumDescriptor(_OPTIMIZEROPTIONS_LEVEL)
_OPTIMIZEROPTIONS_GLOBALJITLEVEL = _descriptor.EnumDescriptor(
name='GlobalJitLevel',
full_name='tensorflow.OptimizerOptions.GlobalJitLevel',
filename=None,
file=DESCRIPTOR,
values=[
_descriptor.EnumValueDescriptor(
name='DEFAULT', index=0, number=0,
options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='OFF', index=1, number=-1,
options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='ON_1', index=2, number=1,
options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='ON_2', index=3, number=2,
options=None,
type=None),
],
containing_type=None,
options=None,
serialized_start=667,
serialized_end=734,
)
_sym_db.RegisterEnumDescriptor(_OPTIMIZEROPTIONS_GLOBALJITLEVEL)
_RUNOPTIONS_TRACELEVEL = _descriptor.EnumDescriptor(
name='TraceLevel',
full_name='tensorflow.RunOptions.TraceLevel',
filename=None,
file=DESCRIPTOR,
values=[
_descriptor.EnumValueDescriptor(
name='NO_TRACE', index=0, number=0,
options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='SOFTWARE_TRACE', index=1, number=1,
options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='HARDWARE_TRACE', index=2, number=2,
options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='FULL_TRACE', index=3, number=3,
options=None,
type=None),
],
containing_type=None,
options=None,
serialized_start=1952,
serialized_end=2034,
)
_sym_db.RegisterEnumDescriptor(_RUNOPTIONS_TRACELEVEL)
_GPUOPTIONS = _descriptor.Descriptor(
name='GPUOptions',
full_name='tensorflow.GPUOptions',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='per_process_gpu_memory_fraction', full_name='tensorflow.GPUOptions.per_process_gpu_memory_fraction', index=0,
number=1, type=1, cpp_type=5, label=1,
has_default_value=False, default_value=float(0),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='allocator_type', full_name='tensorflow.GPUOptions.allocator_type', index=1,
number=2, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='deferred_deletion_bytes', full_name='tensorflow.GPUOptions.deferred_deletion_bytes', index=2,
number=3, type=3, cpp_type=2, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='allow_growth', full_name='tensorflow.GPUOptions.allow_growth', index=3,
number=4, type=8, cpp_type=7, label=1,
has_default_value=False, default_value=False,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='visible_device_list', full_name='tensorflow.GPUOptions.visible_device_list', index=4,
number=5, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=219,
serialized_end=380,
)
_OPTIMIZEROPTIONS = _descriptor.Descriptor(
name='OptimizerOptions',
full_name='tensorflow.OptimizerOptions',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='do_common_subexpression_elimination', full_name='tensorflow.OptimizerOptions.do_common_subexpression_elimination', index=0,
number=1, type=8, cpp_type=7, label=1,
has_default_value=False, default_value=False,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='do_constant_folding', full_name='tensorflow.OptimizerOptions.do_constant_folding', index=1,
number=2, type=8, cpp_type=7, label=1,
has_default_value=False, default_value=False,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='do_function_inlining', full_name='tensorflow.OptimizerOptions.do_function_inlining', index=2,
number=4, type=8, cpp_type=7, label=1,
has_default_value=False, default_value=False,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='opt_level', full_name='tensorflow.OptimizerOptions.opt_level', index=3,
number=3, type=14, cpp_type=8, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='global_jit_level', full_name='tensorflow.OptimizerOptions.global_jit_level', index=4,
number=5, type=14, cpp_type=8, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
_OPTIMIZEROPTIONS_LEVEL,
_OPTIMIZEROPTIONS_GLOBALJITLEVEL,
],
options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=383,
serialized_end=734,
)
_GRAPHOPTIONS = _descriptor.Descriptor(
name='GraphOptions',
full_name='tensorflow.GraphOptions',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='enable_recv_scheduling', full_name='tensorflow.GraphOptions.enable_recv_scheduling', index=0,
number=2, type=8, cpp_type=7, label=1,
has_default_value=False, default_value=False,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='optimizer_options', full_name='tensorflow.GraphOptions.optimizer_options', index=1,
number=3, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='build_cost_model', full_name='tensorflow.GraphOptions.build_cost_model', index=2,
number=4, type=3, cpp_type=2, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='build_cost_model_after', full_name='tensorflow.GraphOptions.build_cost_model_after', index=3,
number=9, type=3, cpp_type=2, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='infer_shapes', full_name='tensorflow.GraphOptions.infer_shapes', index=4,
number=5, type=8, cpp_type=7, label=1,
has_default_value=False, default_value=False,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='place_pruned_graph', full_name='tensorflow.GraphOptions.place_pruned_graph', index=5,
number=6, type=8, cpp_type=7, label=1,
has_default_value=False, default_value=False,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='enable_bfloat16_sendrecv', full_name='tensorflow.GraphOptions.enable_bfloat16_sendrecv', index=6,
number=7, type=8, cpp_type=7, label=1,
has_default_value=False, default_value=False,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='timeline_step', full_name='tensorflow.GraphOptions.timeline_step', index=7,
number=8, type=5, cpp_type=1, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=737,
serialized_end=1050,
)
_THREADPOOLOPTIONPROTO = _descriptor.Descriptor(
name='ThreadPoolOptionProto',
full_name='tensorflow.ThreadPoolOptionProto',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='num_threads', full_name='tensorflow.ThreadPoolOptionProto.num_threads', index=0,
number=1, type=5, cpp_type=1, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=1052,
serialized_end=1096,
)
_RPCOPTIONS = _descriptor.Descriptor(
name='RPCOptions',
full_name='tensorflow.RPCOptions',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='use_rpc_for_inprocess_master', full_name='tensorflow.RPCOptions.use_rpc_for_inprocess_master', index=0,
number=1, type=8, cpp_type=7, label=1,
has_default_value=False, default_value=False,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=1098,
serialized_end=1148,
)
_CONFIGPROTO_DEVICECOUNTENTRY = _descriptor.Descriptor(
name='DeviceCountEntry',
full_name='tensorflow.ConfigProto.DeviceCountEntry',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='key', full_name='tensorflow.ConfigProto.DeviceCountEntry.key', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='value', full_name='tensorflow.ConfigProto.DeviceCountEntry.value', index=1,
number=2, type=5, cpp_type=1, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=_descriptor._ParseOptions(descriptor_pb2.MessageOptions(), _b('8\001')),
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=1694,
serialized_end=1744,
)
_CONFIGPROTO = _descriptor.Descriptor(
name='ConfigProto',
full_name='tensorflow.ConfigProto',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='device_count', full_name='tensorflow.ConfigProto.device_count', index=0,
number=1, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='intra_op_parallelism_threads', full_name='tensorflow.ConfigProto.intra_op_parallelism_threads', index=1,
number=2, type=5, cpp_type=1, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='inter_op_parallelism_threads', full_name='tensorflow.ConfigProto.inter_op_parallelism_threads', index=2,
number=5, type=5, cpp_type=1, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='use_per_session_threads', full_name='tensorflow.ConfigProto.use_per_session_threads', index=3,
number=9, type=8, cpp_type=7, label=1,
has_default_value=False, default_value=False,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='session_inter_op_thread_pool', full_name='tensorflow.ConfigProto.session_inter_op_thread_pool', index=4,
number=12, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='placement_period', full_name='tensorflow.ConfigProto.placement_period', index=5,
number=3, type=5, cpp_type=1, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='device_filters', full_name='tensorflow.ConfigProto.device_filters', index=6,
number=4, type=9, cpp_type=9, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='gpu_options', full_name='tensorflow.ConfigProto.gpu_options', index=7,
number=6, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='allow_soft_placement', full_name='tensorflow.ConfigProto.allow_soft_placement', index=8,
number=7, type=8, cpp_type=7, label=1,
has_default_value=False, default_value=False,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='log_device_placement', full_name='tensorflow.ConfigProto.log_device_placement', index=9,
number=8, type=8, cpp_type=7, label=1,
has_default_value=False, default_value=False,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='graph_options', full_name='tensorflow.ConfigProto.graph_options', index=10,
number=10, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='operation_timeout_in_ms', full_name='tensorflow.ConfigProto.operation_timeout_in_ms', index=11,
number=11, type=3, cpp_type=2, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='rpc_options', full_name='tensorflow.ConfigProto.rpc_options', index=12,
number=13, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[_CONFIGPROTO_DEVICECOUNTENTRY, ],
enum_types=[
],
options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=1151,
serialized_end=1744,
)
_RUNOPTIONS = _descriptor.Descriptor(
name='RunOptions',
full_name='tensorflow.RunOptions',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='trace_level', full_name='tensorflow.RunOptions.trace_level', index=0,
number=1, type=14, cpp_type=8, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='timeout_in_ms', full_name='tensorflow.RunOptions.timeout_in_ms', index=1,
number=2, type=3, cpp_type=2, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='inter_op_thread_pool', full_name='tensorflow.RunOptions.inter_op_thread_pool', index=2,
number=3, type=5, cpp_type=1, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='output_partition_graphs', full_name='tensorflow.RunOptions.output_partition_graphs', index=3,
number=5, type=8, cpp_type=7, label=1,
has_default_value=False, default_value=False,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='debug_options', full_name='tensorflow.RunOptions.debug_options', index=4,
number=6, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
_RUNOPTIONS_TRACELEVEL,
],
options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=1747,
serialized_end=2040,
)
_RUNMETADATA = _descriptor.Descriptor(
name='RunMetadata',
full_name='tensorflow.RunMetadata',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='step_stats', full_name='tensorflow.RunMetadata.step_stats', index=0,
number=1, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='cost_graph', full_name='tensorflow.RunMetadata.cost_graph', index=1,
number=2, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='partition_graphs', full_name='tensorflow.RunMetadata.partition_graphs', index=2,
number=3, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=2043,
serialized_end=2193,
)
_OPTIMIZEROPTIONS.fields_by_name['opt_level'].enum_type = _OPTIMIZEROPTIONS_LEVEL
_OPTIMIZEROPTIONS.fields_by_name['global_jit_level'].enum_type = _OPTIMIZEROPTIONS_GLOBALJITLEVEL
_OPTIMIZEROPTIONS_LEVEL.containing_type = _OPTIMIZEROPTIONS
_OPTIMIZEROPTIONS_GLOBALJITLEVEL.containing_type = _OPTIMIZEROPTIONS
_GRAPHOPTIONS.fields_by_name['optimizer_options'].message_type = _OPTIMIZEROPTIONS
_CONFIGPROTO_DEVICECOUNTENTRY.containing_type = _CONFIGPROTO
_CONFIGPROTO.fields_by_name['device_count'].message_type = _CONFIGPROTO_DEVICECOUNTENTRY
_CONFIGPROTO.fields_by_name['session_inter_op_thread_pool'].message_type = _THREADPOOLOPTIONPROTO
_CONFIGPROTO.fields_by_name['gpu_options'].message_type = _GPUOPTIONS
_CONFIGPROTO.fields_by_name['graph_options'].message_type = _GRAPHOPTIONS
_CONFIGPROTO.fields_by_name['rpc_options'].message_type = _RPCOPTIONS
_RUNOPTIONS.fields_by_name['trace_level'].enum_type = _RUNOPTIONS_TRACELEVEL
_RUNOPTIONS.fields_by_name['debug_options'].message_type = tensorflow_dot_core_dot_protobuf_dot_debug__pb2._DEBUGOPTIONS
_RUNOPTIONS_TRACELEVEL.containing_type = _RUNOPTIONS
_RUNMETADATA.fields_by_name['step_stats'].message_type = tensorflow_dot_core_dot_framework_dot_step__stats__pb2._STEPSTATS
_RUNMETADATA.fields_by_name['cost_graph'].message_type = tensorflow_dot_core_dot_framework_dot_cost__graph__pb2._COSTGRAPHDEF
_RUNMETADATA.fields_by_name['partition_graphs'].message_type = tensorflow_dot_core_dot_framework_dot_graph__pb2._GRAPHDEF
DESCRIPTOR.message_types_by_name['GPUOptions'] = _GPUOPTIONS
DESCRIPTOR.message_types_by_name['OptimizerOptions'] = _OPTIMIZEROPTIONS
DESCRIPTOR.message_types_by_name['GraphOptions'] = _GRAPHOPTIONS
DESCRIPTOR.message_types_by_name['ThreadPoolOptionProto'] = _THREADPOOLOPTIONPROTO
DESCRIPTOR.message_types_by_name['RPCOptions'] = _RPCOPTIONS
DESCRIPTOR.message_types_by_name['ConfigProto'] = _CONFIGPROTO
DESCRIPTOR.message_types_by_name['RunOptions'] = _RUNOPTIONS
DESCRIPTOR.message_types_by_name['RunMetadata'] = _RUNMETADATA
GPUOptions = _reflection.GeneratedProtocolMessageType('GPUOptions', (_message.Message,), dict(
DESCRIPTOR = _GPUOPTIONS,
__module__ = 'tensorflow.core.protobuf.config_pb2'
# @@protoc_insertion_point(class_scope:tensorflow.GPUOptions)
))
_sym_db.RegisterMessage(GPUOptions)
OptimizerOptions = _reflection.GeneratedProtocolMessageType('OptimizerOptions', (_message.Message,), dict(
DESCRIPTOR = _OPTIMIZEROPTIONS,
__module__ = 'tensorflow.core.protobuf.config_pb2'
# @@protoc_insertion_point(class_scope:tensorflow.OptimizerOptions)
))
_sym_db.RegisterMessage(OptimizerOptions)
GraphOptions = _reflection.GeneratedProtocolMessageType('GraphOptions', (_message.Message,), dict(
DESCRIPTOR = _GRAPHOPTIONS,
__module__ = 'tensorflow.core.protobuf.config_pb2'
# @@protoc_insertion_point(class_scope:tensorflow.GraphOptions)
))
_sym_db.RegisterMessage(GraphOptions)
ThreadPoolOptionProto = _reflection.GeneratedProtocolMessageType('ThreadPoolOptionProto', (_message.Message,), dict(
DESCRIPTOR = _THREADPOOLOPTIONPROTO,
__module__ = 'tensorflow.core.protobuf.config_pb2'
# @@protoc_insertion_point(class_scope:tensorflow.ThreadPoolOptionProto)
))
_sym_db.RegisterMessage(ThreadPoolOptionProto)
RPCOptions = _reflection.GeneratedProtocolMessageType('RPCOptions', (_message.Message,), dict(
DESCRIPTOR = _RPCOPTIONS,
__module__ = 'tensorflow.core.protobuf.config_pb2'
# @@protoc_insertion_point(class_scope:tensorflow.RPCOptions)
))
_sym_db.RegisterMessage(RPCOptions)
ConfigProto = _reflection.GeneratedProtocolMessageType('ConfigProto', (_message.Message,), dict(
DeviceCountEntry = _reflection.GeneratedProtocolMessageType('DeviceCountEntry', (_message.Message,), dict(
DESCRIPTOR = _CONFIGPROTO_DEVICECOUNTENTRY,
__module__ = 'tensorflow.core.protobuf.config_pb2'
# @@protoc_insertion_point(class_scope:tensorflow.ConfigProto.DeviceCountEntry)
))
,
DESCRIPTOR = _CONFIGPROTO,
__module__ = 'tensorflow.core.protobuf.config_pb2'
# @@protoc_insertion_point(class_scope:tensorflow.ConfigProto)
))
_sym_db.RegisterMessage(ConfigProto)
_sym_db.RegisterMessage(ConfigProto.DeviceCountEntry)
RunOptions = _reflection.GeneratedProtocolMessageType('RunOptions', (_message.Message,), dict(
DESCRIPTOR = _RUNOPTIONS,
__module__ = 'tensorflow.core.protobuf.config_pb2'
# @@protoc_insertion_point(class_scope:tensorflow.RunOptions)
))
_sym_db.RegisterMessage(RunOptions)
RunMetadata = _reflection.GeneratedProtocolMessageType('RunMetadata', (_message.Message,), dict(
DESCRIPTOR = _RUNMETADATA,
__module__ = 'tensorflow.core.protobuf.config_pb2'
# @@protoc_insertion_point(class_scope:tensorflow.RunMetadata)
))
_sym_db.RegisterMessage(RunMetadata)
DESCRIPTOR.has_options = True
DESCRIPTOR._options = _descriptor._ParseOptions(descriptor_pb2.FileOptions(), _b('\n\030org.tensorflow.frameworkB\014ConfigProtosP\001\370\001\001'))
_CONFIGPROTO_DEVICECOUNTENTRY.has_options = True
_CONFIGPROTO_DEVICECOUNTENTRY._options = _descriptor._ParseOptions(descriptor_pb2.MessageOptions(), _b('8\001'))
# @@protoc_insertion_point(module_scope)
|
"""This package contains interfaces and functionality to compute pair-wise document similarities within a corpus
of documents.
"""
from gensim import parsing, corpora, matutils, interfaces, models, similarities, summarization, utils # noqa:F401
import logging
__version__ = '3.5.0'
class NullHandler(logging.Handler):
"""For python versions <= 2.6; same as `logging.NullHandler` in 2.7."""
def emit(self, record):
pass
logger = logging.getLogger('gensim')
if len(logger.handlers) == 0: # To ensure reload() doesn't add another one
logger.addHandler(NullHandler())
|
# coding: utf-8
"""
Intersight REST API
This is Intersight REST API
OpenAPI spec version: 1.0.9-262
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
from pprint import pformat
from six import iteritems
import re
class BootSanRef(object):
"""
NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
"""
Attributes:
swagger_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
swagger_types = {
'moid': 'str',
'object_type': 'str'
}
attribute_map = {
'moid': 'Moid',
'object_type': 'ObjectType'
}
def __init__(self, moid=None, object_type=None):
"""
BootSanRef - a model defined in Swagger
"""
self._moid = None
self._object_type = None
if moid is not None:
self.moid = moid
if object_type is not None:
self.object_type = object_type
@property
def moid(self):
"""
Gets the moid of this BootSanRef.
:return: The moid of this BootSanRef.
:rtype: str
"""
return self._moid
@moid.setter
def moid(self, moid):
"""
Sets the moid of this BootSanRef.
:param moid: The moid of this BootSanRef.
:type: str
"""
self._moid = moid
@property
def object_type(self):
"""
Gets the object_type of this BootSanRef.
:return: The object_type of this BootSanRef.
:rtype: str
"""
return self._object_type
@object_type.setter
def object_type(self, object_type):
"""
Sets the object_type of this BootSanRef.
:param object_type: The object_type of this BootSanRef.
:type: str
"""
self._object_type = object_type
def to_dict(self):
"""
Returns the model properties as a dict
"""
result = {}
for attr, _ in iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""
Returns the string representation of the model
"""
return pformat(self.to_dict())
def __repr__(self):
"""
For `print` and `pprint`
"""
return self.to_str()
def __eq__(self, other):
"""
Returns true if both objects are equal
"""
if not isinstance(other, BootSanRef):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""
Returns true if both objects are not equal
"""
return not self == other
|
import json
import configparser
import re
import logging
from modules.exceptions import SettingsError
log = logging.getLogger(__name__)
def readjson(jfile):
"""... just for reading json file and return data :)"""
with open(jfile) as descriptor:
data = json.load(descriptor)
return data
def read_ini_to_dict(inifile):
"""read ini file to parsed dictionary"""
log.debug("Reading %s", inifile)
return parseINI(readINI(inifile))
def parseINI(inidict):
"""... just for transform value into right type"""
initype = {}
for k in inidict.keys():
i = inidict[k].split("#")[0]
if i in ["True", "False"]:
initype[k] = i == "True"
elif re.match("^[0-9]+$", i):
initype[k] = int(i)
elif re.match("^[0-9]+\.[0-9]+$", i):
initype[k] = float(i)
else:
initype[k] = str(i)
return initype
def readINI(inifile):
"""... just for reading .ini file and return data"""
config = configparser.ConfigParser()
try:
config.read(inifile)
except configparser.DuplicateOptionError as err:
log.error("Error while reading ini file %s", err)
raise SettingsError(f"Duplicate Option in Settings File: {err}") from err
return config._sections
|
# -*- coding:utf-8 -*-
import base64
bs='iVBORw0KGgoAAAANSUhEUg....'
imgdata=base64.b64decode(bs)
file=open('2.jpg','wb')
file.write(imgdata)
file.close()
|
# coding=utf-8
# *** WARNING: this file was generated by the Pulumi SDK Generator. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union
from ... import _utilities, _tables
from . import outputs
from ._enums import *
__all__ = [
'AddressSpaceResponse',
'ApplicationGatewayAuthenticationCertificateResponse',
'ApplicationGatewayBackendAddressPoolResponse',
'ApplicationGatewayBackendAddressResponse',
'ApplicationGatewayBackendHttpSettingsResponse',
'ApplicationGatewayFrontendIPConfigurationResponse',
'ApplicationGatewayFrontendPortResponse',
'ApplicationGatewayHttpListenerResponse',
'ApplicationGatewayIPConfigurationResponse',
'ApplicationGatewayPathRuleResponse',
'ApplicationGatewayProbeResponse',
'ApplicationGatewayRequestRoutingRuleResponse',
'ApplicationGatewaySkuResponse',
'ApplicationGatewaySslCertificateResponse',
'ApplicationGatewaySslPolicyResponse',
'ApplicationGatewayUrlPathMapResponse',
'ApplicationGatewayWebApplicationFirewallConfigurationResponse',
'BackendAddressPoolResponse',
'BgpPeerStatusResponseResult',
'BgpSettingsResponse',
'DhcpOptionsResponse',
'ExpressRouteCircuitAuthorizationResponse',
'ExpressRouteCircuitPeeringConfigResponse',
'ExpressRouteCircuitPeeringResponse',
'ExpressRouteCircuitServiceProviderPropertiesResponse',
'ExpressRouteCircuitSkuResponse',
'ExpressRouteCircuitStatsResponse',
'FrontendIPConfigurationResponse',
'GatewayRouteResponseResult',
'IPConfigurationResponse',
'InboundNatPoolResponse',
'InboundNatRuleResponse',
'LoadBalancingRuleResponse',
'LocalNetworkGatewayResponse',
'NetworkInterfaceDnsSettingsResponse',
'NetworkInterfaceIPConfigurationResponse',
'NetworkInterfaceResponse',
'NetworkSecurityGroupResponse',
'OutboundNatRuleResponse',
'PacketCaptureFilterResponse',
'PacketCaptureStorageLocationResponse',
'ProbeResponse',
'PublicIPAddressDnsSettingsResponse',
'PublicIPAddressResponse',
'ResourceNavigationLinkResponse',
'RouteResponse',
'RouteTableResponse',
'SecurityRuleResponse',
'SubResourceResponse',
'SubnetResponse',
'TunnelConnectionHealthResponse',
'VirtualNetworkGatewayIPConfigurationResponse',
'VirtualNetworkGatewayResponse',
'VirtualNetworkGatewaySkuResponse',
'VirtualNetworkPeeringResponse',
'VpnClientConfigurationResponse',
'VpnClientRevokedCertificateResponse',
'VpnClientRootCertificateResponse',
]
@pulumi.output_type
class AddressSpaceResponse(dict):
"""
AddressSpace contains an array of IP address ranges that can be used by subnets of the virtual network.
"""
def __init__(__self__, *,
address_prefixes: Optional[Sequence[str]] = None):
"""
AddressSpace contains an array of IP address ranges that can be used by subnets of the virtual network.
:param Sequence[str] address_prefixes: A list of address blocks reserved for this virtual network in CIDR notation.
"""
if address_prefixes is not None:
pulumi.set(__self__, "address_prefixes", address_prefixes)
@property
@pulumi.getter(name="addressPrefixes")
def address_prefixes(self) -> Optional[Sequence[str]]:
"""
A list of address blocks reserved for this virtual network in CIDR notation.
"""
return pulumi.get(self, "address_prefixes")
def _translate_property(self, prop):
return _tables.CAMEL_TO_SNAKE_CASE_TABLE.get(prop) or prop
@pulumi.output_type
class ApplicationGatewayAuthenticationCertificateResponse(dict):
"""
Authentication certificates of an application gateway.
"""
def __init__(__self__, *,
data: Optional[str] = None,
etag: Optional[str] = None,
id: Optional[str] = None,
name: Optional[str] = None,
provisioning_state: Optional[str] = None):
"""
Authentication certificates of an application gateway.
:param str data: Certificate public data.
:param str etag: A unique read-only string that changes whenever the resource is updated.
:param str id: Resource ID.
:param str name: Name of the resource that is unique within a resource group. This name can be used to access the resource.
:param str provisioning_state: Provisioning state of the authentication certificate resource. Possible values are: 'Updating', 'Deleting', and 'Failed'.
"""
if data is not None:
pulumi.set(__self__, "data", data)
if etag is not None:
pulumi.set(__self__, "etag", etag)
if id is not None:
pulumi.set(__self__, "id", id)
if name is not None:
pulumi.set(__self__, "name", name)
if provisioning_state is not None:
pulumi.set(__self__, "provisioning_state", provisioning_state)
@property
@pulumi.getter
def data(self) -> Optional[str]:
"""
Certificate public data.
"""
return pulumi.get(self, "data")
@property
@pulumi.getter
def etag(self) -> Optional[str]:
"""
A unique read-only string that changes whenever the resource is updated.
"""
return pulumi.get(self, "etag")
@property
@pulumi.getter
def id(self) -> Optional[str]:
"""
Resource ID.
"""
return pulumi.get(self, "id")
@property
@pulumi.getter
def name(self) -> Optional[str]:
"""
Name of the resource that is unique within a resource group. This name can be used to access the resource.
"""
return pulumi.get(self, "name")
@property
@pulumi.getter(name="provisioningState")
def provisioning_state(self) -> Optional[str]:
"""
Provisioning state of the authentication certificate resource. Possible values are: 'Updating', 'Deleting', and 'Failed'.
"""
return pulumi.get(self, "provisioning_state")
def _translate_property(self, prop):
return _tables.CAMEL_TO_SNAKE_CASE_TABLE.get(prop) or prop
@pulumi.output_type
class ApplicationGatewayBackendAddressPoolResponse(dict):
"""
Backend Address Pool of an application gateway.
"""
def __init__(__self__, *,
backend_addresses: Optional[Sequence['outputs.ApplicationGatewayBackendAddressResponse']] = None,
backend_ip_configurations: Optional[Sequence['outputs.NetworkInterfaceIPConfigurationResponse']] = None,
etag: Optional[str] = None,
id: Optional[str] = None,
name: Optional[str] = None,
provisioning_state: Optional[str] = None):
"""
Backend Address Pool of an application gateway.
:param Sequence['ApplicationGatewayBackendAddressResponseArgs'] backend_addresses: Backend addresses
:param Sequence['NetworkInterfaceIPConfigurationResponseArgs'] backend_ip_configurations: Collection of references to IPs defined in network interfaces.
:param str etag: A unique read-only string that changes whenever the resource is updated.
:param str id: Resource ID.
:param str name: Resource that is unique within a resource group. This name can be used to access the resource.
:param str provisioning_state: Provisioning state of the backend address pool resource. Possible values are: 'Updating', 'Deleting', and 'Failed'.
"""
if backend_addresses is not None:
pulumi.set(__self__, "backend_addresses", backend_addresses)
if backend_ip_configurations is not None:
pulumi.set(__self__, "backend_ip_configurations", backend_ip_configurations)
if etag is not None:
pulumi.set(__self__, "etag", etag)
if id is not None:
pulumi.set(__self__, "id", id)
if name is not None:
pulumi.set(__self__, "name", name)
if provisioning_state is not None:
pulumi.set(__self__, "provisioning_state", provisioning_state)
@property
@pulumi.getter(name="backendAddresses")
def backend_addresses(self) -> Optional[Sequence['outputs.ApplicationGatewayBackendAddressResponse']]:
"""
Backend addresses
"""
return pulumi.get(self, "backend_addresses")
@property
@pulumi.getter(name="backendIPConfigurations")
def backend_ip_configurations(self) -> Optional[Sequence['outputs.NetworkInterfaceIPConfigurationResponse']]:
"""
Collection of references to IPs defined in network interfaces.
"""
return pulumi.get(self, "backend_ip_configurations")
@property
@pulumi.getter
def etag(self) -> Optional[str]:
"""
A unique read-only string that changes whenever the resource is updated.
"""
return pulumi.get(self, "etag")
@property
@pulumi.getter
def id(self) -> Optional[str]:
"""
Resource ID.
"""
return pulumi.get(self, "id")
@property
@pulumi.getter
def name(self) -> Optional[str]:
"""
Resource that is unique within a resource group. This name can be used to access the resource.
"""
return pulumi.get(self, "name")
@property
@pulumi.getter(name="provisioningState")
def provisioning_state(self) -> Optional[str]:
"""
Provisioning state of the backend address pool resource. Possible values are: 'Updating', 'Deleting', and 'Failed'.
"""
return pulumi.get(self, "provisioning_state")
def _translate_property(self, prop):
return _tables.CAMEL_TO_SNAKE_CASE_TABLE.get(prop) or prop
@pulumi.output_type
class ApplicationGatewayBackendAddressResponse(dict):
"""
Backend address of an application gateway.
"""
def __init__(__self__, *,
fqdn: Optional[str] = None,
ip_address: Optional[str] = None):
"""
Backend address of an application gateway.
:param str fqdn: Fully qualified domain name (FQDN).
:param str ip_address: IP address
"""
if fqdn is not None:
pulumi.set(__self__, "fqdn", fqdn)
if ip_address is not None:
pulumi.set(__self__, "ip_address", ip_address)
@property
@pulumi.getter
def fqdn(self) -> Optional[str]:
"""
Fully qualified domain name (FQDN).
"""
return pulumi.get(self, "fqdn")
@property
@pulumi.getter(name="ipAddress")
def ip_address(self) -> Optional[str]:
"""
IP address
"""
return pulumi.get(self, "ip_address")
def _translate_property(self, prop):
return _tables.CAMEL_TO_SNAKE_CASE_TABLE.get(prop) or prop
@pulumi.output_type
class ApplicationGatewayBackendHttpSettingsResponse(dict):
"""
Backend address pool settings of an application gateway.
"""
def __init__(__self__, *,
authentication_certificates: Optional[Sequence['outputs.SubResourceResponse']] = None,
cookie_based_affinity: Optional[str] = None,
etag: Optional[str] = None,
id: Optional[str] = None,
name: Optional[str] = None,
port: Optional[int] = None,
probe: Optional['outputs.SubResourceResponse'] = None,
protocol: Optional[str] = None,
provisioning_state: Optional[str] = None,
request_timeout: Optional[int] = None):
"""
Backend address pool settings of an application gateway.
:param Sequence['SubResourceResponseArgs'] authentication_certificates: Array of references to application gateway authentication certificates.
:param str cookie_based_affinity: Cookie based affinity. Possible values are: 'Enabled' and 'Disabled'.
:param str etag: A unique read-only string that changes whenever the resource is updated.
:param str id: Resource ID.
:param str name: Name of the resource that is unique within a resource group. This name can be used to access the resource.
:param int port: Port
:param 'SubResourceResponseArgs' probe: Probe resource of an application gateway.
:param str protocol: Protocol. Possible values are: 'Http' and 'Https'.
:param str provisioning_state: Provisioning state of the backend http settings resource. Possible values are: 'Updating', 'Deleting', and 'Failed'.
:param int request_timeout: Request timeout in seconds. Application Gateway will fail the request if response is not received within RequestTimeout. Acceptable values are from 1 second to 86400 seconds.
"""
if authentication_certificates is not None:
pulumi.set(__self__, "authentication_certificates", authentication_certificates)
if cookie_based_affinity is not None:
pulumi.set(__self__, "cookie_based_affinity", cookie_based_affinity)
if etag is not None:
pulumi.set(__self__, "etag", etag)
if id is not None:
pulumi.set(__self__, "id", id)
if name is not None:
pulumi.set(__self__, "name", name)
if port is not None:
pulumi.set(__self__, "port", port)
if probe is not None:
pulumi.set(__self__, "probe", probe)
if protocol is not None:
pulumi.set(__self__, "protocol", protocol)
if provisioning_state is not None:
pulumi.set(__self__, "provisioning_state", provisioning_state)
if request_timeout is not None:
pulumi.set(__self__, "request_timeout", request_timeout)
@property
@pulumi.getter(name="authenticationCertificates")
def authentication_certificates(self) -> Optional[Sequence['outputs.SubResourceResponse']]:
"""
Array of references to application gateway authentication certificates.
"""
return pulumi.get(self, "authentication_certificates")
@property
@pulumi.getter(name="cookieBasedAffinity")
def cookie_based_affinity(self) -> Optional[str]:
"""
Cookie based affinity. Possible values are: 'Enabled' and 'Disabled'.
"""
return pulumi.get(self, "cookie_based_affinity")
@property
@pulumi.getter
def etag(self) -> Optional[str]:
"""
A unique read-only string that changes whenever the resource is updated.
"""
return pulumi.get(self, "etag")
@property
@pulumi.getter
def id(self) -> Optional[str]:
"""
Resource ID.
"""
return pulumi.get(self, "id")
@property
@pulumi.getter
def name(self) -> Optional[str]:
"""
Name of the resource that is unique within a resource group. This name can be used to access the resource.
"""
return pulumi.get(self, "name")
@property
@pulumi.getter
def port(self) -> Optional[int]:
"""
Port
"""
return pulumi.get(self, "port")
@property
@pulumi.getter
def probe(self) -> Optional['outputs.SubResourceResponse']:
"""
Probe resource of an application gateway.
"""
return pulumi.get(self, "probe")
@property
@pulumi.getter
def protocol(self) -> Optional[str]:
"""
Protocol. Possible values are: 'Http' and 'Https'.
"""
return pulumi.get(self, "protocol")
@property
@pulumi.getter(name="provisioningState")
def provisioning_state(self) -> Optional[str]:
"""
Provisioning state of the backend http settings resource. Possible values are: 'Updating', 'Deleting', and 'Failed'.
"""
return pulumi.get(self, "provisioning_state")
@property
@pulumi.getter(name="requestTimeout")
def request_timeout(self) -> Optional[int]:
"""
Request timeout in seconds. Application Gateway will fail the request if response is not received within RequestTimeout. Acceptable values are from 1 second to 86400 seconds.
"""
return pulumi.get(self, "request_timeout")
def _translate_property(self, prop):
return _tables.CAMEL_TO_SNAKE_CASE_TABLE.get(prop) or prop
@pulumi.output_type
class ApplicationGatewayFrontendIPConfigurationResponse(dict):
"""
Frontend IP configuration of an application gateway.
"""
def __init__(__self__, *,
etag: Optional[str] = None,
id: Optional[str] = None,
name: Optional[str] = None,
private_ip_address: Optional[str] = None,
private_ip_allocation_method: Optional[str] = None,
provisioning_state: Optional[str] = None,
public_ip_address: Optional['outputs.SubResourceResponse'] = None,
subnet: Optional['outputs.SubResourceResponse'] = None):
"""
Frontend IP configuration of an application gateway.
:param str etag: A unique read-only string that changes whenever the resource is updated.
:param str id: Resource ID.
:param str name: Name of the resource that is unique within a resource group. This name can be used to access the resource.
:param str private_ip_address: PrivateIPAddress of the network interface IP Configuration.
:param str private_ip_allocation_method: PrivateIP allocation method. Possible values are: 'Static' and 'Dynamic'.
:param str provisioning_state: Provisioning state of the public IP resource. Possible values are: 'Updating', 'Deleting', and 'Failed'.
:param 'SubResourceResponseArgs' public_ip_address: Reference of the PublicIP resource.
:param 'SubResourceResponseArgs' subnet: Reference of the subnet resource.
"""
if etag is not None:
pulumi.set(__self__, "etag", etag)
if id is not None:
pulumi.set(__self__, "id", id)
if name is not None:
pulumi.set(__self__, "name", name)
if private_ip_address is not None:
pulumi.set(__self__, "private_ip_address", private_ip_address)
if private_ip_allocation_method is not None:
pulumi.set(__self__, "private_ip_allocation_method", private_ip_allocation_method)
if provisioning_state is not None:
pulumi.set(__self__, "provisioning_state", provisioning_state)
if public_ip_address is not None:
pulumi.set(__self__, "public_ip_address", public_ip_address)
if subnet is not None:
pulumi.set(__self__, "subnet", subnet)
@property
@pulumi.getter
def etag(self) -> Optional[str]:
"""
A unique read-only string that changes whenever the resource is updated.
"""
return pulumi.get(self, "etag")
@property
@pulumi.getter
def id(self) -> Optional[str]:
"""
Resource ID.
"""
return pulumi.get(self, "id")
@property
@pulumi.getter
def name(self) -> Optional[str]:
"""
Name of the resource that is unique within a resource group. This name can be used to access the resource.
"""
return pulumi.get(self, "name")
@property
@pulumi.getter(name="privateIPAddress")
def private_ip_address(self) -> Optional[str]:
"""
PrivateIPAddress of the network interface IP Configuration.
"""
return pulumi.get(self, "private_ip_address")
@property
@pulumi.getter(name="privateIPAllocationMethod")
def private_ip_allocation_method(self) -> Optional[str]:
"""
PrivateIP allocation method. Possible values are: 'Static' and 'Dynamic'.
"""
return pulumi.get(self, "private_ip_allocation_method")
@property
@pulumi.getter(name="provisioningState")
def provisioning_state(self) -> Optional[str]:
"""
Provisioning state of the public IP resource. Possible values are: 'Updating', 'Deleting', and 'Failed'.
"""
return pulumi.get(self, "provisioning_state")
@property
@pulumi.getter(name="publicIPAddress")
def public_ip_address(self) -> Optional['outputs.SubResourceResponse']:
"""
Reference of the PublicIP resource.
"""
return pulumi.get(self, "public_ip_address")
@property
@pulumi.getter
def subnet(self) -> Optional['outputs.SubResourceResponse']:
"""
Reference of the subnet resource.
"""
return pulumi.get(self, "subnet")
def _translate_property(self, prop):
return _tables.CAMEL_TO_SNAKE_CASE_TABLE.get(prop) or prop
@pulumi.output_type
class ApplicationGatewayFrontendPortResponse(dict):
"""
Frontend port of an application gateway.
"""
def __init__(__self__, *,
etag: Optional[str] = None,
id: Optional[str] = None,
name: Optional[str] = None,
port: Optional[int] = None,
provisioning_state: Optional[str] = None):
"""
Frontend port of an application gateway.
:param str etag: A unique read-only string that changes whenever the resource is updated.
:param str id: Resource ID.
:param str name: Name of the resource that is unique within a resource group. This name can be used to access the resource.
:param int port: Frontend port
:param str provisioning_state: Provisioning state of the frontend port resource. Possible values are: 'Updating', 'Deleting', and 'Failed'.
"""
if etag is not None:
pulumi.set(__self__, "etag", etag)
if id is not None:
pulumi.set(__self__, "id", id)
if name is not None:
pulumi.set(__self__, "name", name)
if port is not None:
pulumi.set(__self__, "port", port)
if provisioning_state is not None:
pulumi.set(__self__, "provisioning_state", provisioning_state)
@property
@pulumi.getter
def etag(self) -> Optional[str]:
"""
A unique read-only string that changes whenever the resource is updated.
"""
return pulumi.get(self, "etag")
@property
@pulumi.getter
def id(self) -> Optional[str]:
"""
Resource ID.
"""
return pulumi.get(self, "id")
@property
@pulumi.getter
def name(self) -> Optional[str]:
"""
Name of the resource that is unique within a resource group. This name can be used to access the resource.
"""
return pulumi.get(self, "name")
@property
@pulumi.getter
def port(self) -> Optional[int]:
"""
Frontend port
"""
return pulumi.get(self, "port")
@property
@pulumi.getter(name="provisioningState")
def provisioning_state(self) -> Optional[str]:
"""
Provisioning state of the frontend port resource. Possible values are: 'Updating', 'Deleting', and 'Failed'.
"""
return pulumi.get(self, "provisioning_state")
def _translate_property(self, prop):
return _tables.CAMEL_TO_SNAKE_CASE_TABLE.get(prop) or prop
@pulumi.output_type
class ApplicationGatewayHttpListenerResponse(dict):
"""
Http listener of an application gateway.
"""
def __init__(__self__, *,
etag: Optional[str] = None,
frontend_ip_configuration: Optional['outputs.SubResourceResponse'] = None,
frontend_port: Optional['outputs.SubResourceResponse'] = None,
host_name: Optional[str] = None,
id: Optional[str] = None,
name: Optional[str] = None,
protocol: Optional[str] = None,
provisioning_state: Optional[str] = None,
require_server_name_indication: Optional[bool] = None,
ssl_certificate: Optional['outputs.SubResourceResponse'] = None):
"""
Http listener of an application gateway.
:param str etag: A unique read-only string that changes whenever the resource is updated.
:param 'SubResourceResponseArgs' frontend_ip_configuration: Frontend IP configuration resource of an application gateway.
:param 'SubResourceResponseArgs' frontend_port: Frontend port resource of an application gateway.
:param str host_name: Host name of HTTP listener.
:param str id: Resource ID.
:param str name: Name of the resource that is unique within a resource group. This name can be used to access the resource.
:param str protocol: Protocol. Possible values are: 'Http' and 'Https'.
:param str provisioning_state: Provisioning state of the HTTP listener resource. Possible values are: 'Updating', 'Deleting', and 'Failed'.
:param bool require_server_name_indication: Applicable only if protocol is https. Enables SNI for multi-hosting.
:param 'SubResourceResponseArgs' ssl_certificate: SSL certificate resource of an application gateway.
"""
if etag is not None:
pulumi.set(__self__, "etag", etag)
if frontend_ip_configuration is not None:
pulumi.set(__self__, "frontend_ip_configuration", frontend_ip_configuration)
if frontend_port is not None:
pulumi.set(__self__, "frontend_port", frontend_port)
if host_name is not None:
pulumi.set(__self__, "host_name", host_name)
if id is not None:
pulumi.set(__self__, "id", id)
if name is not None:
pulumi.set(__self__, "name", name)
if protocol is not None:
pulumi.set(__self__, "protocol", protocol)
if provisioning_state is not None:
pulumi.set(__self__, "provisioning_state", provisioning_state)
if require_server_name_indication is not None:
pulumi.set(__self__, "require_server_name_indication", require_server_name_indication)
if ssl_certificate is not None:
pulumi.set(__self__, "ssl_certificate", ssl_certificate)
@property
@pulumi.getter
def etag(self) -> Optional[str]:
"""
A unique read-only string that changes whenever the resource is updated.
"""
return pulumi.get(self, "etag")
@property
@pulumi.getter(name="frontendIPConfiguration")
def frontend_ip_configuration(self) -> Optional['outputs.SubResourceResponse']:
"""
Frontend IP configuration resource of an application gateway.
"""
return pulumi.get(self, "frontend_ip_configuration")
@property
@pulumi.getter(name="frontendPort")
def frontend_port(self) -> Optional['outputs.SubResourceResponse']:
"""
Frontend port resource of an application gateway.
"""
return pulumi.get(self, "frontend_port")
@property
@pulumi.getter(name="hostName")
def host_name(self) -> Optional[str]:
"""
Host name of HTTP listener.
"""
return pulumi.get(self, "host_name")
@property
@pulumi.getter
def id(self) -> Optional[str]:
"""
Resource ID.
"""
return pulumi.get(self, "id")
@property
@pulumi.getter
def name(self) -> Optional[str]:
"""
Name of the resource that is unique within a resource group. This name can be used to access the resource.
"""
return pulumi.get(self, "name")
@property
@pulumi.getter
def protocol(self) -> Optional[str]:
"""
Protocol. Possible values are: 'Http' and 'Https'.
"""
return pulumi.get(self, "protocol")
@property
@pulumi.getter(name="provisioningState")
def provisioning_state(self) -> Optional[str]:
"""
Provisioning state of the HTTP listener resource. Possible values are: 'Updating', 'Deleting', and 'Failed'.
"""
return pulumi.get(self, "provisioning_state")
@property
@pulumi.getter(name="requireServerNameIndication")
def require_server_name_indication(self) -> Optional[bool]:
"""
Applicable only if protocol is https. Enables SNI for multi-hosting.
"""
return pulumi.get(self, "require_server_name_indication")
@property
@pulumi.getter(name="sslCertificate")
def ssl_certificate(self) -> Optional['outputs.SubResourceResponse']:
"""
SSL certificate resource of an application gateway.
"""
return pulumi.get(self, "ssl_certificate")
def _translate_property(self, prop):
return _tables.CAMEL_TO_SNAKE_CASE_TABLE.get(prop) or prop
@pulumi.output_type
class ApplicationGatewayIPConfigurationResponse(dict):
"""
IP configuration of an application gateway. Currently 1 public and 1 private IP configuration is allowed.
"""
def __init__(__self__, *,
etag: Optional[str] = None,
id: Optional[str] = None,
name: Optional[str] = None,
provisioning_state: Optional[str] = None,
subnet: Optional['outputs.SubResourceResponse'] = None):
"""
IP configuration of an application gateway. Currently 1 public and 1 private IP configuration is allowed.
:param str etag: A unique read-only string that changes whenever the resource is updated.
:param str id: Resource ID.
:param str name: Name of the resource that is unique within a resource group. This name can be used to access the resource.
:param str provisioning_state: Provisioning state of the application gateway subnet resource. Possible values are: 'Updating', 'Deleting', and 'Failed'.
:param 'SubResourceResponseArgs' subnet: Reference of the subnet resource. A subnet from where application gateway gets its private address.
"""
if etag is not None:
pulumi.set(__self__, "etag", etag)
if id is not None:
pulumi.set(__self__, "id", id)
if name is not None:
pulumi.set(__self__, "name", name)
if provisioning_state is not None:
pulumi.set(__self__, "provisioning_state", provisioning_state)
if subnet is not None:
pulumi.set(__self__, "subnet", subnet)
@property
@pulumi.getter
def etag(self) -> Optional[str]:
"""
A unique read-only string that changes whenever the resource is updated.
"""
return pulumi.get(self, "etag")
@property
@pulumi.getter
def id(self) -> Optional[str]:
"""
Resource ID.
"""
return pulumi.get(self, "id")
@property
@pulumi.getter
def name(self) -> Optional[str]:
"""
Name of the resource that is unique within a resource group. This name can be used to access the resource.
"""
return pulumi.get(self, "name")
@property
@pulumi.getter(name="provisioningState")
def provisioning_state(self) -> Optional[str]:
"""
Provisioning state of the application gateway subnet resource. Possible values are: 'Updating', 'Deleting', and 'Failed'.
"""
return pulumi.get(self, "provisioning_state")
@property
@pulumi.getter
def subnet(self) -> Optional['outputs.SubResourceResponse']:
"""
Reference of the subnet resource. A subnet from where application gateway gets its private address.
"""
return pulumi.get(self, "subnet")
def _translate_property(self, prop):
return _tables.CAMEL_TO_SNAKE_CASE_TABLE.get(prop) or prop
@pulumi.output_type
class ApplicationGatewayPathRuleResponse(dict):
"""
Path rule of URL path map of an application gateway.
"""
def __init__(__self__, *,
backend_address_pool: Optional['outputs.SubResourceResponse'] = None,
backend_http_settings: Optional['outputs.SubResourceResponse'] = None,
etag: Optional[str] = None,
id: Optional[str] = None,
name: Optional[str] = None,
paths: Optional[Sequence[str]] = None,
provisioning_state: Optional[str] = None):
"""
Path rule of URL path map of an application gateway.
:param 'SubResourceResponseArgs' backend_address_pool: Backend address pool resource of URL path map.
:param 'SubResourceResponseArgs' backend_http_settings: Backend http settings resource of URL path map.
:param str etag: A unique read-only string that changes whenever the resource is updated.
:param str id: Resource ID.
:param str name: Name of the resource that is unique within a resource group. This name can be used to access the resource.
:param Sequence[str] paths: Path rules of URL path map.
:param str provisioning_state: Path rule of URL path map resource. Possible values are: 'Updating', 'Deleting', and 'Failed'.
"""
if backend_address_pool is not None:
pulumi.set(__self__, "backend_address_pool", backend_address_pool)
if backend_http_settings is not None:
pulumi.set(__self__, "backend_http_settings", backend_http_settings)
if etag is not None:
pulumi.set(__self__, "etag", etag)
if id is not None:
pulumi.set(__self__, "id", id)
if name is not None:
pulumi.set(__self__, "name", name)
if paths is not None:
pulumi.set(__self__, "paths", paths)
if provisioning_state is not None:
pulumi.set(__self__, "provisioning_state", provisioning_state)
@property
@pulumi.getter(name="backendAddressPool")
def backend_address_pool(self) -> Optional['outputs.SubResourceResponse']:
"""
Backend address pool resource of URL path map.
"""
return pulumi.get(self, "backend_address_pool")
@property
@pulumi.getter(name="backendHttpSettings")
def backend_http_settings(self) -> Optional['outputs.SubResourceResponse']:
"""
Backend http settings resource of URL path map.
"""
return pulumi.get(self, "backend_http_settings")
@property
@pulumi.getter
def etag(self) -> Optional[str]:
"""
A unique read-only string that changes whenever the resource is updated.
"""
return pulumi.get(self, "etag")
@property
@pulumi.getter
def id(self) -> Optional[str]:
"""
Resource ID.
"""
return pulumi.get(self, "id")
@property
@pulumi.getter
def name(self) -> Optional[str]:
"""
Name of the resource that is unique within a resource group. This name can be used to access the resource.
"""
return pulumi.get(self, "name")
@property
@pulumi.getter
def paths(self) -> Optional[Sequence[str]]:
"""
Path rules of URL path map.
"""
return pulumi.get(self, "paths")
@property
@pulumi.getter(name="provisioningState")
def provisioning_state(self) -> Optional[str]:
"""
Path rule of URL path map resource. Possible values are: 'Updating', 'Deleting', and 'Failed'.
"""
return pulumi.get(self, "provisioning_state")
def _translate_property(self, prop):
return _tables.CAMEL_TO_SNAKE_CASE_TABLE.get(prop) or prop
@pulumi.output_type
class ApplicationGatewayProbeResponse(dict):
"""
Probe of the application gateway.
"""
def __init__(__self__, *,
etag: Optional[str] = None,
host: Optional[str] = None,
id: Optional[str] = None,
interval: Optional[int] = None,
name: Optional[str] = None,
path: Optional[str] = None,
protocol: Optional[str] = None,
provisioning_state: Optional[str] = None,
timeout: Optional[int] = None,
unhealthy_threshold: Optional[int] = None):
"""
Probe of the application gateway.
:param str etag: A unique read-only string that changes whenever the resource is updated.
:param str host: Host name to send the probe to.
:param str id: Resource ID.
:param int interval: The probing interval in seconds. This is the time interval between two consecutive probes. Acceptable values are from 1 second to 86400 seconds.
:param str name: Name of the resource that is unique within a resource group. This name can be used to access the resource.
:param str path: Relative path of probe. Valid path starts from '/'. Probe is sent to <Protocol>://<host>:<port><path>
:param str protocol: Protocol. Possible values are: 'Http' and 'Https'.
:param str provisioning_state: Provisioning state of the backend http settings resource. Possible values are: 'Updating', 'Deleting', and 'Failed'.
:param int timeout: the probe timeout in seconds. Probe marked as failed if valid response is not received with this timeout period. Acceptable values are from 1 second to 86400 seconds.
:param int unhealthy_threshold: The probe retry count. Backend server is marked down after consecutive probe failure count reaches UnhealthyThreshold. Acceptable values are from 1 second to 20.
"""
if etag is not None:
pulumi.set(__self__, "etag", etag)
if host is not None:
pulumi.set(__self__, "host", host)
if id is not None:
pulumi.set(__self__, "id", id)
if interval is not None:
pulumi.set(__self__, "interval", interval)
if name is not None:
pulumi.set(__self__, "name", name)
if path is not None:
pulumi.set(__self__, "path", path)
if protocol is not None:
pulumi.set(__self__, "protocol", protocol)
if provisioning_state is not None:
pulumi.set(__self__, "provisioning_state", provisioning_state)
if timeout is not None:
pulumi.set(__self__, "timeout", timeout)
if unhealthy_threshold is not None:
pulumi.set(__self__, "unhealthy_threshold", unhealthy_threshold)
@property
@pulumi.getter
def etag(self) -> Optional[str]:
"""
A unique read-only string that changes whenever the resource is updated.
"""
return pulumi.get(self, "etag")
@property
@pulumi.getter
def host(self) -> Optional[str]:
"""
Host name to send the probe to.
"""
return pulumi.get(self, "host")
@property
@pulumi.getter
def id(self) -> Optional[str]:
"""
Resource ID.
"""
return pulumi.get(self, "id")
@property
@pulumi.getter
def interval(self) -> Optional[int]:
"""
The probing interval in seconds. This is the time interval between two consecutive probes. Acceptable values are from 1 second to 86400 seconds.
"""
return pulumi.get(self, "interval")
@property
@pulumi.getter
def name(self) -> Optional[str]:
"""
Name of the resource that is unique within a resource group. This name can be used to access the resource.
"""
return pulumi.get(self, "name")
@property
@pulumi.getter
def path(self) -> Optional[str]:
"""
Relative path of probe. Valid path starts from '/'. Probe is sent to <Protocol>://<host>:<port><path>
"""
return pulumi.get(self, "path")
@property
@pulumi.getter
def protocol(self) -> Optional[str]:
"""
Protocol. Possible values are: 'Http' and 'Https'.
"""
return pulumi.get(self, "protocol")
@property
@pulumi.getter(name="provisioningState")
def provisioning_state(self) -> Optional[str]:
"""
Provisioning state of the backend http settings resource. Possible values are: 'Updating', 'Deleting', and 'Failed'.
"""
return pulumi.get(self, "provisioning_state")
@property
@pulumi.getter
def timeout(self) -> Optional[int]:
"""
the probe timeout in seconds. Probe marked as failed if valid response is not received with this timeout period. Acceptable values are from 1 second to 86400 seconds.
"""
return pulumi.get(self, "timeout")
@property
@pulumi.getter(name="unhealthyThreshold")
def unhealthy_threshold(self) -> Optional[int]:
"""
The probe retry count. Backend server is marked down after consecutive probe failure count reaches UnhealthyThreshold. Acceptable values are from 1 second to 20.
"""
return pulumi.get(self, "unhealthy_threshold")
def _translate_property(self, prop):
return _tables.CAMEL_TO_SNAKE_CASE_TABLE.get(prop) or prop
@pulumi.output_type
class ApplicationGatewayRequestRoutingRuleResponse(dict):
"""
Request routing rule of an application gateway.
"""
def __init__(__self__, *,
backend_address_pool: Optional['outputs.SubResourceResponse'] = None,
backend_http_settings: Optional['outputs.SubResourceResponse'] = None,
etag: Optional[str] = None,
http_listener: Optional['outputs.SubResourceResponse'] = None,
id: Optional[str] = None,
name: Optional[str] = None,
provisioning_state: Optional[str] = None,
rule_type: Optional[str] = None,
url_path_map: Optional['outputs.SubResourceResponse'] = None):
"""
Request routing rule of an application gateway.
:param 'SubResourceResponseArgs' backend_address_pool: Backend address pool resource of the application gateway.
:param 'SubResourceResponseArgs' backend_http_settings: Frontend port resource of the application gateway.
:param str etag: A unique read-only string that changes whenever the resource is updated.
:param 'SubResourceResponseArgs' http_listener: Http listener resource of the application gateway.
:param str id: Resource ID.
:param str name: Name of the resource that is unique within a resource group. This name can be used to access the resource.
:param str provisioning_state: Provisioning state of the request routing rule resource. Possible values are: 'Updating', 'Deleting', and 'Failed'.
:param str rule_type: Rule type. Possible values are: 'Basic' and 'PathBasedRouting'.
:param 'SubResourceResponseArgs' url_path_map: URL path map resource of the application gateway.
"""
if backend_address_pool is not None:
pulumi.set(__self__, "backend_address_pool", backend_address_pool)
if backend_http_settings is not None:
pulumi.set(__self__, "backend_http_settings", backend_http_settings)
if etag is not None:
pulumi.set(__self__, "etag", etag)
if http_listener is not None:
pulumi.set(__self__, "http_listener", http_listener)
if id is not None:
pulumi.set(__self__, "id", id)
if name is not None:
pulumi.set(__self__, "name", name)
if provisioning_state is not None:
pulumi.set(__self__, "provisioning_state", provisioning_state)
if rule_type is not None:
pulumi.set(__self__, "rule_type", rule_type)
if url_path_map is not None:
pulumi.set(__self__, "url_path_map", url_path_map)
@property
@pulumi.getter(name="backendAddressPool")
def backend_address_pool(self) -> Optional['outputs.SubResourceResponse']:
"""
Backend address pool resource of the application gateway.
"""
return pulumi.get(self, "backend_address_pool")
@property
@pulumi.getter(name="backendHttpSettings")
def backend_http_settings(self) -> Optional['outputs.SubResourceResponse']:
"""
Frontend port resource of the application gateway.
"""
return pulumi.get(self, "backend_http_settings")
@property
@pulumi.getter
def etag(self) -> Optional[str]:
"""
A unique read-only string that changes whenever the resource is updated.
"""
return pulumi.get(self, "etag")
@property
@pulumi.getter(name="httpListener")
def http_listener(self) -> Optional['outputs.SubResourceResponse']:
"""
Http listener resource of the application gateway.
"""
return pulumi.get(self, "http_listener")
@property
@pulumi.getter
def id(self) -> Optional[str]:
"""
Resource ID.
"""
return pulumi.get(self, "id")
@property
@pulumi.getter
def name(self) -> Optional[str]:
"""
Name of the resource that is unique within a resource group. This name can be used to access the resource.
"""
return pulumi.get(self, "name")
@property
@pulumi.getter(name="provisioningState")
def provisioning_state(self) -> Optional[str]:
"""
Provisioning state of the request routing rule resource. Possible values are: 'Updating', 'Deleting', and 'Failed'.
"""
return pulumi.get(self, "provisioning_state")
@property
@pulumi.getter(name="ruleType")
def rule_type(self) -> Optional[str]:
"""
Rule type. Possible values are: 'Basic' and 'PathBasedRouting'.
"""
return pulumi.get(self, "rule_type")
@property
@pulumi.getter(name="urlPathMap")
def url_path_map(self) -> Optional['outputs.SubResourceResponse']:
"""
URL path map resource of the application gateway.
"""
return pulumi.get(self, "url_path_map")
def _translate_property(self, prop):
return _tables.CAMEL_TO_SNAKE_CASE_TABLE.get(prop) or prop
@pulumi.output_type
class ApplicationGatewaySkuResponse(dict):
"""
SKU of an application gateway
"""
def __init__(__self__, *,
capacity: Optional[int] = None,
name: Optional[str] = None,
tier: Optional[str] = None):
"""
SKU of an application gateway
:param int capacity: Capacity (instance count) of an application gateway.
:param str name: Name of an application gateway SKU. Possible values are: 'Standard_Small', 'Standard_Medium', 'Standard_Large', 'WAF_Medium', and 'WAF_Large'.
:param str tier: Tier of an application gateway. Possible values are: 'Standard' and 'WAF'.
"""
if capacity is not None:
pulumi.set(__self__, "capacity", capacity)
if name is not None:
pulumi.set(__self__, "name", name)
if tier is not None:
pulumi.set(__self__, "tier", tier)
@property
@pulumi.getter
def capacity(self) -> Optional[int]:
"""
Capacity (instance count) of an application gateway.
"""
return pulumi.get(self, "capacity")
@property
@pulumi.getter
def name(self) -> Optional[str]:
"""
Name of an application gateway SKU. Possible values are: 'Standard_Small', 'Standard_Medium', 'Standard_Large', 'WAF_Medium', and 'WAF_Large'.
"""
return pulumi.get(self, "name")
@property
@pulumi.getter
def tier(self) -> Optional[str]:
"""
Tier of an application gateway. Possible values are: 'Standard' and 'WAF'.
"""
return pulumi.get(self, "tier")
def _translate_property(self, prop):
return _tables.CAMEL_TO_SNAKE_CASE_TABLE.get(prop) or prop
@pulumi.output_type
class ApplicationGatewaySslCertificateResponse(dict):
"""
SSL certificates of an application gateway.
"""
def __init__(__self__, *,
data: Optional[str] = None,
etag: Optional[str] = None,
id: Optional[str] = None,
name: Optional[str] = None,
password: Optional[str] = None,
provisioning_state: Optional[str] = None,
public_cert_data: Optional[str] = None):
"""
SSL certificates of an application gateway.
:param str data: Base-64 encoded pfx certificate. Only applicable in PUT Request.
:param str etag: A unique read-only string that changes whenever the resource is updated.
:param str id: Resource ID.
:param str name: Name of the resource that is unique within a resource group. This name can be used to access the resource.
:param str password: Password for the pfx file specified in data. Only applicable in PUT request.
:param str provisioning_state: Provisioning state of the SSL certificate resource Possible values are: 'Updating', 'Deleting', and 'Failed'.
:param str public_cert_data: Base-64 encoded Public cert data corresponding to pfx specified in data. Only applicable in GET request.
"""
if data is not None:
pulumi.set(__self__, "data", data)
if etag is not None:
pulumi.set(__self__, "etag", etag)
if id is not None:
pulumi.set(__self__, "id", id)
if name is not None:
pulumi.set(__self__, "name", name)
if password is not None:
pulumi.set(__self__, "password", password)
if provisioning_state is not None:
pulumi.set(__self__, "provisioning_state", provisioning_state)
if public_cert_data is not None:
pulumi.set(__self__, "public_cert_data", public_cert_data)
@property
@pulumi.getter
def data(self) -> Optional[str]:
"""
Base-64 encoded pfx certificate. Only applicable in PUT Request.
"""
return pulumi.get(self, "data")
@property
@pulumi.getter
def etag(self) -> Optional[str]:
"""
A unique read-only string that changes whenever the resource is updated.
"""
return pulumi.get(self, "etag")
@property
@pulumi.getter
def id(self) -> Optional[str]:
"""
Resource ID.
"""
return pulumi.get(self, "id")
@property
@pulumi.getter
def name(self) -> Optional[str]:
"""
Name of the resource that is unique within a resource group. This name can be used to access the resource.
"""
return pulumi.get(self, "name")
@property
@pulumi.getter
def password(self) -> Optional[str]:
"""
Password for the pfx file specified in data. Only applicable in PUT request.
"""
return pulumi.get(self, "password")
@property
@pulumi.getter(name="provisioningState")
def provisioning_state(self) -> Optional[str]:
"""
Provisioning state of the SSL certificate resource Possible values are: 'Updating', 'Deleting', and 'Failed'.
"""
return pulumi.get(self, "provisioning_state")
@property
@pulumi.getter(name="publicCertData")
def public_cert_data(self) -> Optional[str]:
"""
Base-64 encoded Public cert data corresponding to pfx specified in data. Only applicable in GET request.
"""
return pulumi.get(self, "public_cert_data")
def _translate_property(self, prop):
return _tables.CAMEL_TO_SNAKE_CASE_TABLE.get(prop) or prop
@pulumi.output_type
class ApplicationGatewaySslPolicyResponse(dict):
"""
Application gateway SSL policy.
"""
def __init__(__self__, *,
disabled_ssl_protocols: Optional[Sequence[str]] = None):
"""
Application gateway SSL policy.
:param Sequence[str] disabled_ssl_protocols: SSL protocols to be disabled on application gateway. Possible values are: 'TLSv1_0', 'TLSv1_1', and 'TLSv1_2'.
"""
if disabled_ssl_protocols is not None:
pulumi.set(__self__, "disabled_ssl_protocols", disabled_ssl_protocols)
@property
@pulumi.getter(name="disabledSslProtocols")
def disabled_ssl_protocols(self) -> Optional[Sequence[str]]:
"""
SSL protocols to be disabled on application gateway. Possible values are: 'TLSv1_0', 'TLSv1_1', and 'TLSv1_2'.
"""
return pulumi.get(self, "disabled_ssl_protocols")
def _translate_property(self, prop):
return _tables.CAMEL_TO_SNAKE_CASE_TABLE.get(prop) or prop
@pulumi.output_type
class ApplicationGatewayUrlPathMapResponse(dict):
"""
UrlPathMaps give a url path to the backend mapping information for PathBasedRouting.
"""
def __init__(__self__, *,
default_backend_address_pool: Optional['outputs.SubResourceResponse'] = None,
default_backend_http_settings: Optional['outputs.SubResourceResponse'] = None,
etag: Optional[str] = None,
id: Optional[str] = None,
name: Optional[str] = None,
path_rules: Optional[Sequence['outputs.ApplicationGatewayPathRuleResponse']] = None,
provisioning_state: Optional[str] = None):
"""
UrlPathMaps give a url path to the backend mapping information for PathBasedRouting.
:param 'SubResourceResponseArgs' default_backend_address_pool: Default backend address pool resource of URL path map.
:param 'SubResourceResponseArgs' default_backend_http_settings: Default backend http settings resource of URL path map.
:param str etag: A unique read-only string that changes whenever the resource is updated.
:param str id: Resource ID.
:param str name: Name of the resource that is unique within a resource group. This name can be used to access the resource.
:param Sequence['ApplicationGatewayPathRuleResponseArgs'] path_rules: Path rule of URL path map resource.
:param str provisioning_state: Provisioning state of the backend http settings resource. Possible values are: 'Updating', 'Deleting', and 'Failed'.
"""
if default_backend_address_pool is not None:
pulumi.set(__self__, "default_backend_address_pool", default_backend_address_pool)
if default_backend_http_settings is not None:
pulumi.set(__self__, "default_backend_http_settings", default_backend_http_settings)
if etag is not None:
pulumi.set(__self__, "etag", etag)
if id is not None:
pulumi.set(__self__, "id", id)
if name is not None:
pulumi.set(__self__, "name", name)
if path_rules is not None:
pulumi.set(__self__, "path_rules", path_rules)
if provisioning_state is not None:
pulumi.set(__self__, "provisioning_state", provisioning_state)
@property
@pulumi.getter(name="defaultBackendAddressPool")
def default_backend_address_pool(self) -> Optional['outputs.SubResourceResponse']:
"""
Default backend address pool resource of URL path map.
"""
return pulumi.get(self, "default_backend_address_pool")
@property
@pulumi.getter(name="defaultBackendHttpSettings")
def default_backend_http_settings(self) -> Optional['outputs.SubResourceResponse']:
"""
Default backend http settings resource of URL path map.
"""
return pulumi.get(self, "default_backend_http_settings")
@property
@pulumi.getter
def etag(self) -> Optional[str]:
"""
A unique read-only string that changes whenever the resource is updated.
"""
return pulumi.get(self, "etag")
@property
@pulumi.getter
def id(self) -> Optional[str]:
"""
Resource ID.
"""
return pulumi.get(self, "id")
@property
@pulumi.getter
def name(self) -> Optional[str]:
"""
Name of the resource that is unique within a resource group. This name can be used to access the resource.
"""
return pulumi.get(self, "name")
@property
@pulumi.getter(name="pathRules")
def path_rules(self) -> Optional[Sequence['outputs.ApplicationGatewayPathRuleResponse']]:
"""
Path rule of URL path map resource.
"""
return pulumi.get(self, "path_rules")
@property
@pulumi.getter(name="provisioningState")
def provisioning_state(self) -> Optional[str]:
"""
Provisioning state of the backend http settings resource. Possible values are: 'Updating', 'Deleting', and 'Failed'.
"""
return pulumi.get(self, "provisioning_state")
def _translate_property(self, prop):
return _tables.CAMEL_TO_SNAKE_CASE_TABLE.get(prop) or prop
@pulumi.output_type
class ApplicationGatewayWebApplicationFirewallConfigurationResponse(dict):
"""
Application gateway web application firewall configuration.
"""
def __init__(__self__, *,
enabled: bool,
firewall_mode: Optional[str] = None):
"""
Application gateway web application firewall configuration.
:param bool enabled: Whether the web application firewall is enabled.
:param str firewall_mode: Web application firewall mode. Possible values are: 'Detection' and 'Prevention'.
"""
pulumi.set(__self__, "enabled", enabled)
if firewall_mode is not None:
pulumi.set(__self__, "firewall_mode", firewall_mode)
@property
@pulumi.getter
def enabled(self) -> bool:
"""
Whether the web application firewall is enabled.
"""
return pulumi.get(self, "enabled")
@property
@pulumi.getter(name="firewallMode")
def firewall_mode(self) -> Optional[str]:
"""
Web application firewall mode. Possible values are: 'Detection' and 'Prevention'.
"""
return pulumi.get(self, "firewall_mode")
def _translate_property(self, prop):
return _tables.CAMEL_TO_SNAKE_CASE_TABLE.get(prop) or prop
@pulumi.output_type
class BackendAddressPoolResponse(dict):
"""
Pool of backend IP addresses.
"""
def __init__(__self__, *,
backend_ip_configurations: Sequence['outputs.NetworkInterfaceIPConfigurationResponse'],
load_balancing_rules: Sequence['outputs.SubResourceResponse'],
outbound_nat_rule: 'outputs.SubResourceResponse',
etag: Optional[str] = None,
id: Optional[str] = None,
name: Optional[str] = None,
provisioning_state: Optional[str] = None):
"""
Pool of backend IP addresses.
:param Sequence['NetworkInterfaceIPConfigurationResponseArgs'] backend_ip_configurations: Gets collection of references to IP addresses defined in network interfaces.
:param Sequence['SubResourceResponseArgs'] load_balancing_rules: Gets load balancing rules that use this backend address pool.
:param 'SubResourceResponseArgs' outbound_nat_rule: Gets outbound rules that use this backend address pool.
:param str etag: A unique read-only string that changes whenever the resource is updated.
:param str id: Resource ID.
:param str name: Gets name of the resource that is unique within a resource group. This name can be used to access the resource.
:param str provisioning_state: Get provisioning state of the public IP resource. Possible values are: 'Updating', 'Deleting', and 'Failed'.
"""
pulumi.set(__self__, "backend_ip_configurations", backend_ip_configurations)
pulumi.set(__self__, "load_balancing_rules", load_balancing_rules)
pulumi.set(__self__, "outbound_nat_rule", outbound_nat_rule)
if etag is not None:
pulumi.set(__self__, "etag", etag)
if id is not None:
pulumi.set(__self__, "id", id)
if name is not None:
pulumi.set(__self__, "name", name)
if provisioning_state is not None:
pulumi.set(__self__, "provisioning_state", provisioning_state)
@property
@pulumi.getter(name="backendIPConfigurations")
def backend_ip_configurations(self) -> Sequence['outputs.NetworkInterfaceIPConfigurationResponse']:
"""
Gets collection of references to IP addresses defined in network interfaces.
"""
return pulumi.get(self, "backend_ip_configurations")
@property
@pulumi.getter(name="loadBalancingRules")
def load_balancing_rules(self) -> Sequence['outputs.SubResourceResponse']:
"""
Gets load balancing rules that use this backend address pool.
"""
return pulumi.get(self, "load_balancing_rules")
@property
@pulumi.getter(name="outboundNatRule")
def outbound_nat_rule(self) -> 'outputs.SubResourceResponse':
"""
Gets outbound rules that use this backend address pool.
"""
return pulumi.get(self, "outbound_nat_rule")
@property
@pulumi.getter
def etag(self) -> Optional[str]:
"""
A unique read-only string that changes whenever the resource is updated.
"""
return pulumi.get(self, "etag")
@property
@pulumi.getter
def id(self) -> Optional[str]:
"""
Resource ID.
"""
return pulumi.get(self, "id")
@property
@pulumi.getter
def name(self) -> Optional[str]:
"""
Gets name of the resource that is unique within a resource group. This name can be used to access the resource.
"""
return pulumi.get(self, "name")
@property
@pulumi.getter(name="provisioningState")
def provisioning_state(self) -> Optional[str]:
"""
Get provisioning state of the public IP resource. Possible values are: 'Updating', 'Deleting', and 'Failed'.
"""
return pulumi.get(self, "provisioning_state")
def _translate_property(self, prop):
return _tables.CAMEL_TO_SNAKE_CASE_TABLE.get(prop) or prop
@pulumi.output_type
class BgpPeerStatusResponseResult(dict):
def __init__(__self__, *,
asn: int,
connected_duration: str,
local_address: str,
messages_received: float,
messages_sent: float,
neighbor: str,
routes_received: float,
state: str):
"""
:param int asn: The autonomous system number of the remote BGP peer
:param str connected_duration: For how long the peering has been up
:param str local_address: The virtual network gateway's local address
:param float messages_received: The number of BGP messages received
:param float messages_sent: The number of BGP messages sent
:param str neighbor: The remote BGP peer
:param float routes_received: The number of routes learned from this peer
:param str state: The BGP peer state
"""
pulumi.set(__self__, "asn", asn)
pulumi.set(__self__, "connected_duration", connected_duration)
pulumi.set(__self__, "local_address", local_address)
pulumi.set(__self__, "messages_received", messages_received)
pulumi.set(__self__, "messages_sent", messages_sent)
pulumi.set(__self__, "neighbor", neighbor)
pulumi.set(__self__, "routes_received", routes_received)
pulumi.set(__self__, "state", state)
@property
@pulumi.getter
def asn(self) -> int:
"""
The autonomous system number of the remote BGP peer
"""
return pulumi.get(self, "asn")
@property
@pulumi.getter(name="connectedDuration")
def connected_duration(self) -> str:
"""
For how long the peering has been up
"""
return pulumi.get(self, "connected_duration")
@property
@pulumi.getter(name="localAddress")
def local_address(self) -> str:
"""
The virtual network gateway's local address
"""
return pulumi.get(self, "local_address")
@property
@pulumi.getter(name="messagesReceived")
def messages_received(self) -> float:
"""
The number of BGP messages received
"""
return pulumi.get(self, "messages_received")
@property
@pulumi.getter(name="messagesSent")
def messages_sent(self) -> float:
"""
The number of BGP messages sent
"""
return pulumi.get(self, "messages_sent")
@property
@pulumi.getter
def neighbor(self) -> str:
"""
The remote BGP peer
"""
return pulumi.get(self, "neighbor")
@property
@pulumi.getter(name="routesReceived")
def routes_received(self) -> float:
"""
The number of routes learned from this peer
"""
return pulumi.get(self, "routes_received")
@property
@pulumi.getter
def state(self) -> str:
"""
The BGP peer state
"""
return pulumi.get(self, "state")
@pulumi.output_type
class BgpSettingsResponse(dict):
def __init__(__self__, *,
asn: Optional[float] = None,
bgp_peering_address: Optional[str] = None,
peer_weight: Optional[int] = None):
"""
:param float asn: The BGP speaker's ASN.
:param str bgp_peering_address: The BGP peering address and BGP identifier of this BGP speaker.
:param int peer_weight: The weight added to routes learned from this BGP speaker.
"""
if asn is not None:
pulumi.set(__self__, "asn", asn)
if bgp_peering_address is not None:
pulumi.set(__self__, "bgp_peering_address", bgp_peering_address)
if peer_weight is not None:
pulumi.set(__self__, "peer_weight", peer_weight)
@property
@pulumi.getter
def asn(self) -> Optional[float]:
"""
The BGP speaker's ASN.
"""
return pulumi.get(self, "asn")
@property
@pulumi.getter(name="bgpPeeringAddress")
def bgp_peering_address(self) -> Optional[str]:
"""
The BGP peering address and BGP identifier of this BGP speaker.
"""
return pulumi.get(self, "bgp_peering_address")
@property
@pulumi.getter(name="peerWeight")
def peer_weight(self) -> Optional[int]:
"""
The weight added to routes learned from this BGP speaker.
"""
return pulumi.get(self, "peer_weight")
def _translate_property(self, prop):
return _tables.CAMEL_TO_SNAKE_CASE_TABLE.get(prop) or prop
@pulumi.output_type
class DhcpOptionsResponse(dict):
"""
DhcpOptions contains an array of DNS servers available to VMs deployed in the virtual network. Standard DHCP option for a subnet overrides VNET DHCP options.
"""
def __init__(__self__, *,
dns_servers: Optional[Sequence[str]] = None):
"""
DhcpOptions contains an array of DNS servers available to VMs deployed in the virtual network. Standard DHCP option for a subnet overrides VNET DHCP options.
:param Sequence[str] dns_servers: The list of DNS servers IP addresses.
"""
if dns_servers is not None:
pulumi.set(__self__, "dns_servers", dns_servers)
@property
@pulumi.getter(name="dnsServers")
def dns_servers(self) -> Optional[Sequence[str]]:
"""
The list of DNS servers IP addresses.
"""
return pulumi.get(self, "dns_servers")
def _translate_property(self, prop):
return _tables.CAMEL_TO_SNAKE_CASE_TABLE.get(prop) or prop
@pulumi.output_type
class ExpressRouteCircuitAuthorizationResponse(dict):
"""
Authorization in an ExpressRouteCircuit resource.
"""
def __init__(__self__, *,
authorization_key: Optional[str] = None,
authorization_use_status: Optional[str] = None,
etag: Optional[str] = None,
id: Optional[str] = None,
name: Optional[str] = None,
provisioning_state: Optional[str] = None):
"""
Authorization in an ExpressRouteCircuit resource.
:param str authorization_key: The authorization key.
:param str authorization_use_status: AuthorizationUseStatus. Possible values are: 'Available' and 'InUse'.
:param str etag: A unique read-only string that changes whenever the resource is updated.
:param str id: Resource ID.
:param str name: Gets name of the resource that is unique within a resource group. This name can be used to access the resource.
:param str provisioning_state: Gets the provisioning state of the public IP resource. Possible values are: 'Updating', 'Deleting', and 'Failed'.
"""
if authorization_key is not None:
pulumi.set(__self__, "authorization_key", authorization_key)
if authorization_use_status is not None:
pulumi.set(__self__, "authorization_use_status", authorization_use_status)
if etag is not None:
pulumi.set(__self__, "etag", etag)
if id is not None:
pulumi.set(__self__, "id", id)
if name is not None:
pulumi.set(__self__, "name", name)
if provisioning_state is not None:
pulumi.set(__self__, "provisioning_state", provisioning_state)
@property
@pulumi.getter(name="authorizationKey")
def authorization_key(self) -> Optional[str]:
"""
The authorization key.
"""
return pulumi.get(self, "authorization_key")
@property
@pulumi.getter(name="authorizationUseStatus")
def authorization_use_status(self) -> Optional[str]:
"""
AuthorizationUseStatus. Possible values are: 'Available' and 'InUse'.
"""
return pulumi.get(self, "authorization_use_status")
@property
@pulumi.getter
def etag(self) -> Optional[str]:
"""
A unique read-only string that changes whenever the resource is updated.
"""
return pulumi.get(self, "etag")
@property
@pulumi.getter
def id(self) -> Optional[str]:
"""
Resource ID.
"""
return pulumi.get(self, "id")
@property
@pulumi.getter
def name(self) -> Optional[str]:
"""
Gets name of the resource that is unique within a resource group. This name can be used to access the resource.
"""
return pulumi.get(self, "name")
@property
@pulumi.getter(name="provisioningState")
def provisioning_state(self) -> Optional[str]:
"""
Gets the provisioning state of the public IP resource. Possible values are: 'Updating', 'Deleting', and 'Failed'.
"""
return pulumi.get(self, "provisioning_state")
def _translate_property(self, prop):
return _tables.CAMEL_TO_SNAKE_CASE_TABLE.get(prop) or prop
@pulumi.output_type
class ExpressRouteCircuitPeeringConfigResponse(dict):
"""
Specifies the peering configuration.
"""
def __init__(__self__, *,
advertised_public_prefixes: Optional[Sequence[str]] = None,
advertised_public_prefixes_state: Optional[str] = None,
customer_asn: Optional[int] = None,
routing_registry_name: Optional[str] = None):
"""
Specifies the peering configuration.
:param Sequence[str] advertised_public_prefixes: The reference of AdvertisedPublicPrefixes.
:param str advertised_public_prefixes_state: AdvertisedPublicPrefixState of the Peering resource. Possible values are 'NotConfigured', 'Configuring', 'Configured', and 'ValidationNeeded'.
:param int customer_asn: The CustomerASN of the peering.
:param str routing_registry_name: The RoutingRegistryName of the configuration.
"""
if advertised_public_prefixes is not None:
pulumi.set(__self__, "advertised_public_prefixes", advertised_public_prefixes)
if advertised_public_prefixes_state is not None:
pulumi.set(__self__, "advertised_public_prefixes_state", advertised_public_prefixes_state)
if customer_asn is not None:
pulumi.set(__self__, "customer_asn", customer_asn)
if routing_registry_name is not None:
pulumi.set(__self__, "routing_registry_name", routing_registry_name)
@property
@pulumi.getter(name="advertisedPublicPrefixes")
def advertised_public_prefixes(self) -> Optional[Sequence[str]]:
"""
The reference of AdvertisedPublicPrefixes.
"""
return pulumi.get(self, "advertised_public_prefixes")
@property
@pulumi.getter(name="advertisedPublicPrefixesState")
def advertised_public_prefixes_state(self) -> Optional[str]:
"""
AdvertisedPublicPrefixState of the Peering resource. Possible values are 'NotConfigured', 'Configuring', 'Configured', and 'ValidationNeeded'.
"""
return pulumi.get(self, "advertised_public_prefixes_state")
@property
@pulumi.getter(name="customerASN")
def customer_asn(self) -> Optional[int]:
"""
The CustomerASN of the peering.
"""
return pulumi.get(self, "customer_asn")
@property
@pulumi.getter(name="routingRegistryName")
def routing_registry_name(self) -> Optional[str]:
"""
The RoutingRegistryName of the configuration.
"""
return pulumi.get(self, "routing_registry_name")
def _translate_property(self, prop):
return _tables.CAMEL_TO_SNAKE_CASE_TABLE.get(prop) or prop
@pulumi.output_type
class ExpressRouteCircuitPeeringResponse(dict):
"""
Peering in an ExpressRouteCircuit resource.
"""
def __init__(__self__, *,
azure_asn: Optional[int] = None,
etag: Optional[str] = None,
gateway_manager_etag: Optional[str] = None,
id: Optional[str] = None,
last_modified_by: Optional[str] = None,
microsoft_peering_config: Optional['outputs.ExpressRouteCircuitPeeringConfigResponse'] = None,
name: Optional[str] = None,
peer_asn: Optional[int] = None,
peering_type: Optional[str] = None,
primary_azure_port: Optional[str] = None,
primary_peer_address_prefix: Optional[str] = None,
provisioning_state: Optional[str] = None,
secondary_azure_port: Optional[str] = None,
secondary_peer_address_prefix: Optional[str] = None,
shared_key: Optional[str] = None,
state: Optional[str] = None,
stats: Optional['outputs.ExpressRouteCircuitStatsResponse'] = None,
vlan_id: Optional[int] = None):
"""
Peering in an ExpressRouteCircuit resource.
:param int azure_asn: The Azure ASN.
:param str etag: A unique read-only string that changes whenever the resource is updated.
:param str gateway_manager_etag: The GatewayManager Etag.
:param str id: Resource ID.
:param str last_modified_by: Gets whether the provider or the customer last modified the peering.
:param 'ExpressRouteCircuitPeeringConfigResponseArgs' microsoft_peering_config: The Microsoft peering configuration.
:param str name: Gets name of the resource that is unique within a resource group. This name can be used to access the resource.
:param int peer_asn: The peer ASN.
:param str peering_type: The PeeringType. Possible values are: 'AzurePublicPeering', 'AzurePrivatePeering', and 'MicrosoftPeering'.
:param str primary_azure_port: The primary port.
:param str primary_peer_address_prefix: The primary address prefix.
:param str provisioning_state: Gets the provisioning state of the public IP resource. Possible values are: 'Updating', 'Deleting', and 'Failed'.
:param str secondary_azure_port: The secondary port.
:param str secondary_peer_address_prefix: The secondary address prefix.
:param str shared_key: The shared key.
:param str state: The state of peering. Possible values are: 'Disabled' and 'Enabled'
:param 'ExpressRouteCircuitStatsResponseArgs' stats: Gets peering stats.
:param int vlan_id: The VLAN ID.
"""
if azure_asn is not None:
pulumi.set(__self__, "azure_asn", azure_asn)
if etag is not None:
pulumi.set(__self__, "etag", etag)
if gateway_manager_etag is not None:
pulumi.set(__self__, "gateway_manager_etag", gateway_manager_etag)
if id is not None:
pulumi.set(__self__, "id", id)
if last_modified_by is not None:
pulumi.set(__self__, "last_modified_by", last_modified_by)
if microsoft_peering_config is not None:
pulumi.set(__self__, "microsoft_peering_config", microsoft_peering_config)
if name is not None:
pulumi.set(__self__, "name", name)
if peer_asn is not None:
pulumi.set(__self__, "peer_asn", peer_asn)
if peering_type is not None:
pulumi.set(__self__, "peering_type", peering_type)
if primary_azure_port is not None:
pulumi.set(__self__, "primary_azure_port", primary_azure_port)
if primary_peer_address_prefix is not None:
pulumi.set(__self__, "primary_peer_address_prefix", primary_peer_address_prefix)
if provisioning_state is not None:
pulumi.set(__self__, "provisioning_state", provisioning_state)
if secondary_azure_port is not None:
pulumi.set(__self__, "secondary_azure_port", secondary_azure_port)
if secondary_peer_address_prefix is not None:
pulumi.set(__self__, "secondary_peer_address_prefix", secondary_peer_address_prefix)
if shared_key is not None:
pulumi.set(__self__, "shared_key", shared_key)
if state is not None:
pulumi.set(__self__, "state", state)
if stats is not None:
pulumi.set(__self__, "stats", stats)
if vlan_id is not None:
pulumi.set(__self__, "vlan_id", vlan_id)
@property
@pulumi.getter(name="azureASN")
def azure_asn(self) -> Optional[int]:
"""
The Azure ASN.
"""
return pulumi.get(self, "azure_asn")
@property
@pulumi.getter
def etag(self) -> Optional[str]:
"""
A unique read-only string that changes whenever the resource is updated.
"""
return pulumi.get(self, "etag")
@property
@pulumi.getter(name="gatewayManagerEtag")
def gateway_manager_etag(self) -> Optional[str]:
"""
The GatewayManager Etag.
"""
return pulumi.get(self, "gateway_manager_etag")
@property
@pulumi.getter
def id(self) -> Optional[str]:
"""
Resource ID.
"""
return pulumi.get(self, "id")
@property
@pulumi.getter(name="lastModifiedBy")
def last_modified_by(self) -> Optional[str]:
"""
Gets whether the provider or the customer last modified the peering.
"""
return pulumi.get(self, "last_modified_by")
@property
@pulumi.getter(name="microsoftPeeringConfig")
def microsoft_peering_config(self) -> Optional['outputs.ExpressRouteCircuitPeeringConfigResponse']:
"""
The Microsoft peering configuration.
"""
return pulumi.get(self, "microsoft_peering_config")
@property
@pulumi.getter
def name(self) -> Optional[str]:
"""
Gets name of the resource that is unique within a resource group. This name can be used to access the resource.
"""
return pulumi.get(self, "name")
@property
@pulumi.getter(name="peerASN")
def peer_asn(self) -> Optional[int]:
"""
The peer ASN.
"""
return pulumi.get(self, "peer_asn")
@property
@pulumi.getter(name="peeringType")
def peering_type(self) -> Optional[str]:
"""
The PeeringType. Possible values are: 'AzurePublicPeering', 'AzurePrivatePeering', and 'MicrosoftPeering'.
"""
return pulumi.get(self, "peering_type")
@property
@pulumi.getter(name="primaryAzurePort")
def primary_azure_port(self) -> Optional[str]:
"""
The primary port.
"""
return pulumi.get(self, "primary_azure_port")
@property
@pulumi.getter(name="primaryPeerAddressPrefix")
def primary_peer_address_prefix(self) -> Optional[str]:
"""
The primary address prefix.
"""
return pulumi.get(self, "primary_peer_address_prefix")
@property
@pulumi.getter(name="provisioningState")
def provisioning_state(self) -> Optional[str]:
"""
Gets the provisioning state of the public IP resource. Possible values are: 'Updating', 'Deleting', and 'Failed'.
"""
return pulumi.get(self, "provisioning_state")
@property
@pulumi.getter(name="secondaryAzurePort")
def secondary_azure_port(self) -> Optional[str]:
"""
The secondary port.
"""
return pulumi.get(self, "secondary_azure_port")
@property
@pulumi.getter(name="secondaryPeerAddressPrefix")
def secondary_peer_address_prefix(self) -> Optional[str]:
"""
The secondary address prefix.
"""
return pulumi.get(self, "secondary_peer_address_prefix")
@property
@pulumi.getter(name="sharedKey")
def shared_key(self) -> Optional[str]:
"""
The shared key.
"""
return pulumi.get(self, "shared_key")
@property
@pulumi.getter
def state(self) -> Optional[str]:
"""
The state of peering. Possible values are: 'Disabled' and 'Enabled'
"""
return pulumi.get(self, "state")
@property
@pulumi.getter
def stats(self) -> Optional['outputs.ExpressRouteCircuitStatsResponse']:
"""
Gets peering stats.
"""
return pulumi.get(self, "stats")
@property
@pulumi.getter(name="vlanId")
def vlan_id(self) -> Optional[int]:
"""
The VLAN ID.
"""
return pulumi.get(self, "vlan_id")
def _translate_property(self, prop):
return _tables.CAMEL_TO_SNAKE_CASE_TABLE.get(prop) or prop
@pulumi.output_type
class ExpressRouteCircuitServiceProviderPropertiesResponse(dict):
"""
Contains ServiceProviderProperties in an ExpressRouteCircuit.
"""
def __init__(__self__, *,
bandwidth_in_mbps: Optional[int] = None,
peering_location: Optional[str] = None,
service_provider_name: Optional[str] = None):
"""
Contains ServiceProviderProperties in an ExpressRouteCircuit.
:param int bandwidth_in_mbps: The BandwidthInMbps.
:param str peering_location: The peering location.
:param str service_provider_name: The serviceProviderName.
"""
if bandwidth_in_mbps is not None:
pulumi.set(__self__, "bandwidth_in_mbps", bandwidth_in_mbps)
if peering_location is not None:
pulumi.set(__self__, "peering_location", peering_location)
if service_provider_name is not None:
pulumi.set(__self__, "service_provider_name", service_provider_name)
@property
@pulumi.getter(name="bandwidthInMbps")
def bandwidth_in_mbps(self) -> Optional[int]:
"""
The BandwidthInMbps.
"""
return pulumi.get(self, "bandwidth_in_mbps")
@property
@pulumi.getter(name="peeringLocation")
def peering_location(self) -> Optional[str]:
"""
The peering location.
"""
return pulumi.get(self, "peering_location")
@property
@pulumi.getter(name="serviceProviderName")
def service_provider_name(self) -> Optional[str]:
"""
The serviceProviderName.
"""
return pulumi.get(self, "service_provider_name")
def _translate_property(self, prop):
return _tables.CAMEL_TO_SNAKE_CASE_TABLE.get(prop) or prop
@pulumi.output_type
class ExpressRouteCircuitSkuResponse(dict):
"""
Contains SKU in an ExpressRouteCircuit.
"""
def __init__(__self__, *,
family: Optional[str] = None,
name: Optional[str] = None,
tier: Optional[str] = None):
"""
Contains SKU in an ExpressRouteCircuit.
:param str family: The family of the SKU. Possible values are: 'UnlimitedData' and 'MeteredData'.
:param str name: The name of the SKU.
:param str tier: The tier of the SKU. Possible values are 'Standard' and 'Premium'.
"""
if family is not None:
pulumi.set(__self__, "family", family)
if name is not None:
pulumi.set(__self__, "name", name)
if tier is not None:
pulumi.set(__self__, "tier", tier)
@property
@pulumi.getter
def family(self) -> Optional[str]:
"""
The family of the SKU. Possible values are: 'UnlimitedData' and 'MeteredData'.
"""
return pulumi.get(self, "family")
@property
@pulumi.getter
def name(self) -> Optional[str]:
"""
The name of the SKU.
"""
return pulumi.get(self, "name")
@property
@pulumi.getter
def tier(self) -> Optional[str]:
"""
The tier of the SKU. Possible values are 'Standard' and 'Premium'.
"""
return pulumi.get(self, "tier")
def _translate_property(self, prop):
return _tables.CAMEL_TO_SNAKE_CASE_TABLE.get(prop) or prop
@pulumi.output_type
class ExpressRouteCircuitStatsResponse(dict):
"""
Contains stats associated with the peering.
"""
def __init__(__self__, *,
primarybytes_in: Optional[float] = None,
primarybytes_out: Optional[float] = None,
secondarybytes_in: Optional[float] = None,
secondarybytes_out: Optional[float] = None):
"""
Contains stats associated with the peering.
:param float primarybytes_in: Gets BytesIn of the peering.
:param float primarybytes_out: Gets BytesOut of the peering.
:param float secondarybytes_in: Gets BytesIn of the peering.
:param float secondarybytes_out: Gets BytesOut of the peering.
"""
if primarybytes_in is not None:
pulumi.set(__self__, "primarybytes_in", primarybytes_in)
if primarybytes_out is not None:
pulumi.set(__self__, "primarybytes_out", primarybytes_out)
if secondarybytes_in is not None:
pulumi.set(__self__, "secondarybytes_in", secondarybytes_in)
if secondarybytes_out is not None:
pulumi.set(__self__, "secondarybytes_out", secondarybytes_out)
@property
@pulumi.getter(name="primarybytesIn")
def primarybytes_in(self) -> Optional[float]:
"""
Gets BytesIn of the peering.
"""
return pulumi.get(self, "primarybytes_in")
@property
@pulumi.getter(name="primarybytesOut")
def primarybytes_out(self) -> Optional[float]:
"""
Gets BytesOut of the peering.
"""
return pulumi.get(self, "primarybytes_out")
@property
@pulumi.getter(name="secondarybytesIn")
def secondarybytes_in(self) -> Optional[float]:
"""
Gets BytesIn of the peering.
"""
return pulumi.get(self, "secondarybytes_in")
@property
@pulumi.getter(name="secondarybytesOut")
def secondarybytes_out(self) -> Optional[float]:
"""
Gets BytesOut of the peering.
"""
return pulumi.get(self, "secondarybytes_out")
def _translate_property(self, prop):
return _tables.CAMEL_TO_SNAKE_CASE_TABLE.get(prop) or prop
@pulumi.output_type
class FrontendIPConfigurationResponse(dict):
"""
Frontend IP address of the load balancer.
"""
def __init__(__self__, *,
inbound_nat_pools: Sequence['outputs.SubResourceResponse'],
inbound_nat_rules: Sequence['outputs.SubResourceResponse'],
load_balancing_rules: Sequence['outputs.SubResourceResponse'],
outbound_nat_rules: Sequence['outputs.SubResourceResponse'],
etag: Optional[str] = None,
id: Optional[str] = None,
name: Optional[str] = None,
private_ip_address: Optional[str] = None,
private_ip_allocation_method: Optional[str] = None,
provisioning_state: Optional[str] = None,
public_ip_address: Optional['outputs.PublicIPAddressResponse'] = None,
subnet: Optional['outputs.SubnetResponse'] = None):
"""
Frontend IP address of the load balancer.
:param Sequence['SubResourceResponseArgs'] inbound_nat_pools: Read only. Inbound pools URIs that use this frontend IP.
:param Sequence['SubResourceResponseArgs'] inbound_nat_rules: Read only. Inbound rules URIs that use this frontend IP.
:param Sequence['SubResourceResponseArgs'] load_balancing_rules: Gets load balancing rules URIs that use this frontend IP.
:param Sequence['SubResourceResponseArgs'] outbound_nat_rules: Read only. Outbound rules URIs that use this frontend IP.
:param str etag: A unique read-only string that changes whenever the resource is updated.
:param str id: Resource ID.
:param str name: The name of the resource that is unique within a resource group. This name can be used to access the resource.
:param str private_ip_address: The private IP address of the IP configuration.
:param str private_ip_allocation_method: The Private IP allocation method. Possible values are: 'Static' and 'Dynamic'.
:param str provisioning_state: Gets the provisioning state of the public IP resource. Possible values are: 'Updating', 'Deleting', and 'Failed'.
:param 'PublicIPAddressResponseArgs' public_ip_address: The reference of the Public IP resource.
:param 'SubnetResponseArgs' subnet: The reference of the subnet resource.
"""
pulumi.set(__self__, "inbound_nat_pools", inbound_nat_pools)
pulumi.set(__self__, "inbound_nat_rules", inbound_nat_rules)
pulumi.set(__self__, "load_balancing_rules", load_balancing_rules)
pulumi.set(__self__, "outbound_nat_rules", outbound_nat_rules)
if etag is not None:
pulumi.set(__self__, "etag", etag)
if id is not None:
pulumi.set(__self__, "id", id)
if name is not None:
pulumi.set(__self__, "name", name)
if private_ip_address is not None:
pulumi.set(__self__, "private_ip_address", private_ip_address)
if private_ip_allocation_method is not None:
pulumi.set(__self__, "private_ip_allocation_method", private_ip_allocation_method)
if provisioning_state is not None:
pulumi.set(__self__, "provisioning_state", provisioning_state)
if public_ip_address is not None:
pulumi.set(__self__, "public_ip_address", public_ip_address)
if subnet is not None:
pulumi.set(__self__, "subnet", subnet)
@property
@pulumi.getter(name="inboundNatPools")
def inbound_nat_pools(self) -> Sequence['outputs.SubResourceResponse']:
"""
Read only. Inbound pools URIs that use this frontend IP.
"""
return pulumi.get(self, "inbound_nat_pools")
@property
@pulumi.getter(name="inboundNatRules")
def inbound_nat_rules(self) -> Sequence['outputs.SubResourceResponse']:
"""
Read only. Inbound rules URIs that use this frontend IP.
"""
return pulumi.get(self, "inbound_nat_rules")
@property
@pulumi.getter(name="loadBalancingRules")
def load_balancing_rules(self) -> Sequence['outputs.SubResourceResponse']:
"""
Gets load balancing rules URIs that use this frontend IP.
"""
return pulumi.get(self, "load_balancing_rules")
@property
@pulumi.getter(name="outboundNatRules")
def outbound_nat_rules(self) -> Sequence['outputs.SubResourceResponse']:
"""
Read only. Outbound rules URIs that use this frontend IP.
"""
return pulumi.get(self, "outbound_nat_rules")
@property
@pulumi.getter
def etag(self) -> Optional[str]:
"""
A unique read-only string that changes whenever the resource is updated.
"""
return pulumi.get(self, "etag")
@property
@pulumi.getter
def id(self) -> Optional[str]:
"""
Resource ID.
"""
return pulumi.get(self, "id")
@property
@pulumi.getter
def name(self) -> Optional[str]:
"""
The name of the resource that is unique within a resource group. This name can be used to access the resource.
"""
return pulumi.get(self, "name")
@property
@pulumi.getter(name="privateIPAddress")
def private_ip_address(self) -> Optional[str]:
"""
The private IP address of the IP configuration.
"""
return pulumi.get(self, "private_ip_address")
@property
@pulumi.getter(name="privateIPAllocationMethod")
def private_ip_allocation_method(self) -> Optional[str]:
"""
The Private IP allocation method. Possible values are: 'Static' and 'Dynamic'.
"""
return pulumi.get(self, "private_ip_allocation_method")
@property
@pulumi.getter(name="provisioningState")
def provisioning_state(self) -> Optional[str]:
"""
Gets the provisioning state of the public IP resource. Possible values are: 'Updating', 'Deleting', and 'Failed'.
"""
return pulumi.get(self, "provisioning_state")
@property
@pulumi.getter(name="publicIPAddress")
def public_ip_address(self) -> Optional['outputs.PublicIPAddressResponse']:
"""
The reference of the Public IP resource.
"""
return pulumi.get(self, "public_ip_address")
@property
@pulumi.getter
def subnet(self) -> Optional['outputs.SubnetResponse']:
"""
The reference of the subnet resource.
"""
return pulumi.get(self, "subnet")
def _translate_property(self, prop):
return _tables.CAMEL_TO_SNAKE_CASE_TABLE.get(prop) or prop
@pulumi.output_type
class GatewayRouteResponseResult(dict):
def __init__(__self__, *,
as_path: str,
local_address: str,
network: str,
next_hop: str,
origin: str,
source_peer: str,
weight: int):
"""
:param str as_path: The route's AS path sequence
:param str local_address: The gateway's local address
:param str network: The route's network prefix
:param str next_hop: The route's next hop
:param str origin: The source this route was learned from
:param str source_peer: The peer this route was learned from
:param int weight: The route's weight
"""
pulumi.set(__self__, "as_path", as_path)
pulumi.set(__self__, "local_address", local_address)
pulumi.set(__self__, "network", network)
pulumi.set(__self__, "next_hop", next_hop)
pulumi.set(__self__, "origin", origin)
pulumi.set(__self__, "source_peer", source_peer)
pulumi.set(__self__, "weight", weight)
@property
@pulumi.getter(name="asPath")
def as_path(self) -> str:
"""
The route's AS path sequence
"""
return pulumi.get(self, "as_path")
@property
@pulumi.getter(name="localAddress")
def local_address(self) -> str:
"""
The gateway's local address
"""
return pulumi.get(self, "local_address")
@property
@pulumi.getter
def network(self) -> str:
"""
The route's network prefix
"""
return pulumi.get(self, "network")
@property
@pulumi.getter(name="nextHop")
def next_hop(self) -> str:
"""
The route's next hop
"""
return pulumi.get(self, "next_hop")
@property
@pulumi.getter
def origin(self) -> str:
"""
The source this route was learned from
"""
return pulumi.get(self, "origin")
@property
@pulumi.getter(name="sourcePeer")
def source_peer(self) -> str:
"""
The peer this route was learned from
"""
return pulumi.get(self, "source_peer")
@property
@pulumi.getter
def weight(self) -> int:
"""
The route's weight
"""
return pulumi.get(self, "weight")
@pulumi.output_type
class IPConfigurationResponse(dict):
"""
IPConfiguration
"""
def __init__(__self__, *,
etag: Optional[str] = None,
id: Optional[str] = None,
name: Optional[str] = None,
private_ip_address: Optional[str] = None,
private_ip_allocation_method: Optional[str] = None,
provisioning_state: Optional[str] = None,
public_ip_address: Optional['outputs.PublicIPAddressResponse'] = None,
subnet: Optional['outputs.SubnetResponse'] = None):
"""
IPConfiguration
:param str etag: A unique read-only string that changes whenever the resource is updated.
:param str id: Resource ID.
:param str name: The name of the resource that is unique within a resource group. This name can be used to access the resource.
:param str private_ip_address: The private IP address of the IP configuration.
:param str private_ip_allocation_method: The private IP allocation method. Possible values are 'Static' and 'Dynamic'.
:param str provisioning_state: Gets the provisioning state of the public IP resource. Possible values are: 'Updating', 'Deleting', and 'Failed'.
:param 'PublicIPAddressResponseArgs' public_ip_address: The reference of the public IP resource.
:param 'SubnetResponseArgs' subnet: The reference of the subnet resource.
"""
if etag is not None:
pulumi.set(__self__, "etag", etag)
if id is not None:
pulumi.set(__self__, "id", id)
if name is not None:
pulumi.set(__self__, "name", name)
if private_ip_address is not None:
pulumi.set(__self__, "private_ip_address", private_ip_address)
if private_ip_allocation_method is not None:
pulumi.set(__self__, "private_ip_allocation_method", private_ip_allocation_method)
if provisioning_state is not None:
pulumi.set(__self__, "provisioning_state", provisioning_state)
if public_ip_address is not None:
pulumi.set(__self__, "public_ip_address", public_ip_address)
if subnet is not None:
pulumi.set(__self__, "subnet", subnet)
@property
@pulumi.getter
def etag(self) -> Optional[str]:
"""
A unique read-only string that changes whenever the resource is updated.
"""
return pulumi.get(self, "etag")
@property
@pulumi.getter
def id(self) -> Optional[str]:
"""
Resource ID.
"""
return pulumi.get(self, "id")
@property
@pulumi.getter
def name(self) -> Optional[str]:
"""
The name of the resource that is unique within a resource group. This name can be used to access the resource.
"""
return pulumi.get(self, "name")
@property
@pulumi.getter(name="privateIPAddress")
def private_ip_address(self) -> Optional[str]:
"""
The private IP address of the IP configuration.
"""
return pulumi.get(self, "private_ip_address")
@property
@pulumi.getter(name="privateIPAllocationMethod")
def private_ip_allocation_method(self) -> Optional[str]:
"""
The private IP allocation method. Possible values are 'Static' and 'Dynamic'.
"""
return pulumi.get(self, "private_ip_allocation_method")
@property
@pulumi.getter(name="provisioningState")
def provisioning_state(self) -> Optional[str]:
"""
Gets the provisioning state of the public IP resource. Possible values are: 'Updating', 'Deleting', and 'Failed'.
"""
return pulumi.get(self, "provisioning_state")
@property
@pulumi.getter(name="publicIPAddress")
def public_ip_address(self) -> Optional['outputs.PublicIPAddressResponse']:
"""
The reference of the public IP resource.
"""
return pulumi.get(self, "public_ip_address")
@property
@pulumi.getter
def subnet(self) -> Optional['outputs.SubnetResponse']:
"""
The reference of the subnet resource.
"""
return pulumi.get(self, "subnet")
def _translate_property(self, prop):
return _tables.CAMEL_TO_SNAKE_CASE_TABLE.get(prop) or prop
@pulumi.output_type
class InboundNatPoolResponse(dict):
"""
Inbound NAT pool of the load balancer.
"""
def __init__(__self__, *,
backend_port: int,
frontend_port_range_end: int,
frontend_port_range_start: int,
protocol: str,
etag: Optional[str] = None,
frontend_ip_configuration: Optional['outputs.SubResourceResponse'] = None,
id: Optional[str] = None,
name: Optional[str] = None,
provisioning_state: Optional[str] = None):
"""
Inbound NAT pool of the load balancer.
:param int backend_port: The port used for internal connections on the endpoint. Acceptable values are between 1 and 65535.
:param int frontend_port_range_end: The last port number in the range of external ports that will be used to provide Inbound Nat to NICs associated with a load balancer. Acceptable values range between 1 and 65535.
:param int frontend_port_range_start: The first port number in the range of external ports that will be used to provide Inbound Nat to NICs associated with a load balancer. Acceptable values range between 1 and 65534.
:param str protocol: The transport protocol for the endpoint. Possible values are: 'Udp' or 'Tcp'.
:param str etag: A unique read-only string that changes whenever the resource is updated.
:param 'SubResourceResponseArgs' frontend_ip_configuration: A reference to frontend IP addresses.
:param str id: Resource ID.
:param str name: The name of the resource that is unique within a resource group. This name can be used to access the resource.
:param str provisioning_state: Gets the provisioning state of the PublicIP resource. Possible values are: 'Updating', 'Deleting', and 'Failed'.
"""
pulumi.set(__self__, "backend_port", backend_port)
pulumi.set(__self__, "frontend_port_range_end", frontend_port_range_end)
pulumi.set(__self__, "frontend_port_range_start", frontend_port_range_start)
pulumi.set(__self__, "protocol", protocol)
if etag is not None:
pulumi.set(__self__, "etag", etag)
if frontend_ip_configuration is not None:
pulumi.set(__self__, "frontend_ip_configuration", frontend_ip_configuration)
if id is not None:
pulumi.set(__self__, "id", id)
if name is not None:
pulumi.set(__self__, "name", name)
if provisioning_state is not None:
pulumi.set(__self__, "provisioning_state", provisioning_state)
@property
@pulumi.getter(name="backendPort")
def backend_port(self) -> int:
"""
The port used for internal connections on the endpoint. Acceptable values are between 1 and 65535.
"""
return pulumi.get(self, "backend_port")
@property
@pulumi.getter(name="frontendPortRangeEnd")
def frontend_port_range_end(self) -> int:
"""
The last port number in the range of external ports that will be used to provide Inbound Nat to NICs associated with a load balancer. Acceptable values range between 1 and 65535.
"""
return pulumi.get(self, "frontend_port_range_end")
@property
@pulumi.getter(name="frontendPortRangeStart")
def frontend_port_range_start(self) -> int:
"""
The first port number in the range of external ports that will be used to provide Inbound Nat to NICs associated with a load balancer. Acceptable values range between 1 and 65534.
"""
return pulumi.get(self, "frontend_port_range_start")
@property
@pulumi.getter
def protocol(self) -> str:
"""
The transport protocol for the endpoint. Possible values are: 'Udp' or 'Tcp'.
"""
return pulumi.get(self, "protocol")
@property
@pulumi.getter
def etag(self) -> Optional[str]:
"""
A unique read-only string that changes whenever the resource is updated.
"""
return pulumi.get(self, "etag")
@property
@pulumi.getter(name="frontendIPConfiguration")
def frontend_ip_configuration(self) -> Optional['outputs.SubResourceResponse']:
"""
A reference to frontend IP addresses.
"""
return pulumi.get(self, "frontend_ip_configuration")
@property
@pulumi.getter
def id(self) -> Optional[str]:
"""
Resource ID.
"""
return pulumi.get(self, "id")
@property
@pulumi.getter
def name(self) -> Optional[str]:
"""
The name of the resource that is unique within a resource group. This name can be used to access the resource.
"""
return pulumi.get(self, "name")
@property
@pulumi.getter(name="provisioningState")
def provisioning_state(self) -> Optional[str]:
"""
Gets the provisioning state of the PublicIP resource. Possible values are: 'Updating', 'Deleting', and 'Failed'.
"""
return pulumi.get(self, "provisioning_state")
def _translate_property(self, prop):
return _tables.CAMEL_TO_SNAKE_CASE_TABLE.get(prop) or prop
@pulumi.output_type
class InboundNatRuleResponse(dict):
"""
Inbound NAT rule of the load balancer.
"""
def __init__(__self__, *,
backend_ip_configuration: 'outputs.NetworkInterfaceIPConfigurationResponse',
backend_port: Optional[int] = None,
enable_floating_ip: Optional[bool] = None,
etag: Optional[str] = None,
frontend_ip_configuration: Optional['outputs.SubResourceResponse'] = None,
frontend_port: Optional[int] = None,
id: Optional[str] = None,
idle_timeout_in_minutes: Optional[int] = None,
name: Optional[str] = None,
protocol: Optional[str] = None,
provisioning_state: Optional[str] = None):
"""
Inbound NAT rule of the load balancer.
:param 'NetworkInterfaceIPConfigurationResponseArgs' backend_ip_configuration: A reference to a private IP address defined on a network interface of a VM. Traffic sent to the frontend port of each of the frontend IP configurations is forwarded to the backed IP.
:param int backend_port: The port used for the internal endpoint. Acceptable values range from 1 to 65535.
:param bool enable_floating_ip: Configures a virtual machine's endpoint for the floating IP capability required to configure a SQL AlwaysOn Availability Group. This setting is required when using the SQL AlwaysOn Availability Groups in SQL server. This setting can't be changed after you create the endpoint.
:param str etag: A unique read-only string that changes whenever the resource is updated.
:param 'SubResourceResponseArgs' frontend_ip_configuration: A reference to frontend IP addresses.
:param int frontend_port: The port for the external endpoint. Port numbers for each rule must be unique within the Load Balancer. Acceptable values range from 1 to 65534.
:param str id: Resource ID.
:param int idle_timeout_in_minutes: The timeout for the TCP idle connection. The value can be set between 4 and 30 minutes. The default value is 4 minutes. This element is only used when the protocol is set to TCP.
:param str name: Gets name of the resource that is unique within a resource group. This name can be used to access the resource.
:param str protocol: The transport protocol for the endpoint. Possible values are: 'Udp' or 'Tcp'
:param str provisioning_state: Gets the provisioning state of the public IP resource. Possible values are: 'Updating', 'Deleting', and 'Failed'.
"""
pulumi.set(__self__, "backend_ip_configuration", backend_ip_configuration)
if backend_port is not None:
pulumi.set(__self__, "backend_port", backend_port)
if enable_floating_ip is not None:
pulumi.set(__self__, "enable_floating_ip", enable_floating_ip)
if etag is not None:
pulumi.set(__self__, "etag", etag)
if frontend_ip_configuration is not None:
pulumi.set(__self__, "frontend_ip_configuration", frontend_ip_configuration)
if frontend_port is not None:
pulumi.set(__self__, "frontend_port", frontend_port)
if id is not None:
pulumi.set(__self__, "id", id)
if idle_timeout_in_minutes is not None:
pulumi.set(__self__, "idle_timeout_in_minutes", idle_timeout_in_minutes)
if name is not None:
pulumi.set(__self__, "name", name)
if protocol is not None:
pulumi.set(__self__, "protocol", protocol)
if provisioning_state is not None:
pulumi.set(__self__, "provisioning_state", provisioning_state)
@property
@pulumi.getter(name="backendIPConfiguration")
def backend_ip_configuration(self) -> 'outputs.NetworkInterfaceIPConfigurationResponse':
"""
A reference to a private IP address defined on a network interface of a VM. Traffic sent to the frontend port of each of the frontend IP configurations is forwarded to the backed IP.
"""
return pulumi.get(self, "backend_ip_configuration")
@property
@pulumi.getter(name="backendPort")
def backend_port(self) -> Optional[int]:
"""
The port used for the internal endpoint. Acceptable values range from 1 to 65535.
"""
return pulumi.get(self, "backend_port")
@property
@pulumi.getter(name="enableFloatingIP")
def enable_floating_ip(self) -> Optional[bool]:
"""
Configures a virtual machine's endpoint for the floating IP capability required to configure a SQL AlwaysOn Availability Group. This setting is required when using the SQL AlwaysOn Availability Groups in SQL server. This setting can't be changed after you create the endpoint.
"""
return pulumi.get(self, "enable_floating_ip")
@property
@pulumi.getter
def etag(self) -> Optional[str]:
"""
A unique read-only string that changes whenever the resource is updated.
"""
return pulumi.get(self, "etag")
@property
@pulumi.getter(name="frontendIPConfiguration")
def frontend_ip_configuration(self) -> Optional['outputs.SubResourceResponse']:
"""
A reference to frontend IP addresses.
"""
return pulumi.get(self, "frontend_ip_configuration")
@property
@pulumi.getter(name="frontendPort")
def frontend_port(self) -> Optional[int]:
"""
The port for the external endpoint. Port numbers for each rule must be unique within the Load Balancer. Acceptable values range from 1 to 65534.
"""
return pulumi.get(self, "frontend_port")
@property
@pulumi.getter
def id(self) -> Optional[str]:
"""
Resource ID.
"""
return pulumi.get(self, "id")
@property
@pulumi.getter(name="idleTimeoutInMinutes")
def idle_timeout_in_minutes(self) -> Optional[int]:
"""
The timeout for the TCP idle connection. The value can be set between 4 and 30 minutes. The default value is 4 minutes. This element is only used when the protocol is set to TCP.
"""
return pulumi.get(self, "idle_timeout_in_minutes")
@property
@pulumi.getter
def name(self) -> Optional[str]:
"""
Gets name of the resource that is unique within a resource group. This name can be used to access the resource.
"""
return pulumi.get(self, "name")
@property
@pulumi.getter
def protocol(self) -> Optional[str]:
"""
The transport protocol for the endpoint. Possible values are: 'Udp' or 'Tcp'
"""
return pulumi.get(self, "protocol")
@property
@pulumi.getter(name="provisioningState")
def provisioning_state(self) -> Optional[str]:
"""
Gets the provisioning state of the public IP resource. Possible values are: 'Updating', 'Deleting', and 'Failed'.
"""
return pulumi.get(self, "provisioning_state")
def _translate_property(self, prop):
return _tables.CAMEL_TO_SNAKE_CASE_TABLE.get(prop) or prop
@pulumi.output_type
class LoadBalancingRuleResponse(dict):
"""
A load balancing rule for a load balancer.
"""
def __init__(__self__, *,
frontend_port: int,
protocol: str,
backend_address_pool: Optional['outputs.SubResourceResponse'] = None,
backend_port: Optional[int] = None,
enable_floating_ip: Optional[bool] = None,
etag: Optional[str] = None,
frontend_ip_configuration: Optional['outputs.SubResourceResponse'] = None,
id: Optional[str] = None,
idle_timeout_in_minutes: Optional[int] = None,
load_distribution: Optional[str] = None,
name: Optional[str] = None,
probe: Optional['outputs.SubResourceResponse'] = None,
provisioning_state: Optional[str] = None):
"""
A load balancing rule for a load balancer.
:param int frontend_port: The port for the external endpoint. Port numbers for each rule must be unique within the Load Balancer. Acceptable values are between 1 and 65534.
:param str protocol: The transport protocol for the external endpoint. Possible values are 'Udp' or 'Tcp'
:param 'SubResourceResponseArgs' backend_address_pool: A reference to a pool of DIPs. Inbound traffic is randomly load balanced across IPs in the backend IPs.
:param int backend_port: The port used for internal connections on the endpoint. Acceptable values are between 1 and 65535.
:param bool enable_floating_ip: Configures a virtual machine's endpoint for the floating IP capability required to configure a SQL AlwaysOn Availability Group. This setting is required when using the SQL AlwaysOn Availability Groups in SQL server. This setting can't be changed after you create the endpoint.
:param str etag: A unique read-only string that changes whenever the resource is updated.
:param 'SubResourceResponseArgs' frontend_ip_configuration: A reference to frontend IP addresses.
:param str id: Resource ID.
:param int idle_timeout_in_minutes: The timeout for the TCP idle connection. The value can be set between 4 and 30 minutes. The default value is 4 minutes. This element is only used when the protocol is set to TCP.
:param str load_distribution: The load distribution policy for this rule. Possible values are 'Default', 'SourceIP', and 'SourceIPProtocol'.
:param str name: The name of the resource that is unique within a resource group. This name can be used to access the resource.
:param 'SubResourceResponseArgs' probe: The reference of the load balancer probe used by the load balancing rule.
:param str provisioning_state: Gets the provisioning state of the PublicIP resource. Possible values are: 'Updating', 'Deleting', and 'Failed'.
"""
pulumi.set(__self__, "frontend_port", frontend_port)
pulumi.set(__self__, "protocol", protocol)
if backend_address_pool is not None:
pulumi.set(__self__, "backend_address_pool", backend_address_pool)
if backend_port is not None:
pulumi.set(__self__, "backend_port", backend_port)
if enable_floating_ip is not None:
pulumi.set(__self__, "enable_floating_ip", enable_floating_ip)
if etag is not None:
pulumi.set(__self__, "etag", etag)
if frontend_ip_configuration is not None:
pulumi.set(__self__, "frontend_ip_configuration", frontend_ip_configuration)
if id is not None:
pulumi.set(__self__, "id", id)
if idle_timeout_in_minutes is not None:
pulumi.set(__self__, "idle_timeout_in_minutes", idle_timeout_in_minutes)
if load_distribution is not None:
pulumi.set(__self__, "load_distribution", load_distribution)
if name is not None:
pulumi.set(__self__, "name", name)
if probe is not None:
pulumi.set(__self__, "probe", probe)
if provisioning_state is not None:
pulumi.set(__self__, "provisioning_state", provisioning_state)
@property
@pulumi.getter(name="frontendPort")
def frontend_port(self) -> int:
"""
The port for the external endpoint. Port numbers for each rule must be unique within the Load Balancer. Acceptable values are between 1 and 65534.
"""
return pulumi.get(self, "frontend_port")
@property
@pulumi.getter
def protocol(self) -> str:
"""
The transport protocol for the external endpoint. Possible values are 'Udp' or 'Tcp'
"""
return pulumi.get(self, "protocol")
@property
@pulumi.getter(name="backendAddressPool")
def backend_address_pool(self) -> Optional['outputs.SubResourceResponse']:
"""
A reference to a pool of DIPs. Inbound traffic is randomly load balanced across IPs in the backend IPs.
"""
return pulumi.get(self, "backend_address_pool")
@property
@pulumi.getter(name="backendPort")
def backend_port(self) -> Optional[int]:
"""
The port used for internal connections on the endpoint. Acceptable values are between 1 and 65535.
"""
return pulumi.get(self, "backend_port")
@property
@pulumi.getter(name="enableFloatingIP")
def enable_floating_ip(self) -> Optional[bool]:
"""
Configures a virtual machine's endpoint for the floating IP capability required to configure a SQL AlwaysOn Availability Group. This setting is required when using the SQL AlwaysOn Availability Groups in SQL server. This setting can't be changed after you create the endpoint.
"""
return pulumi.get(self, "enable_floating_ip")
@property
@pulumi.getter
def etag(self) -> Optional[str]:
"""
A unique read-only string that changes whenever the resource is updated.
"""
return pulumi.get(self, "etag")
@property
@pulumi.getter(name="frontendIPConfiguration")
def frontend_ip_configuration(self) -> Optional['outputs.SubResourceResponse']:
"""
A reference to frontend IP addresses.
"""
return pulumi.get(self, "frontend_ip_configuration")
@property
@pulumi.getter
def id(self) -> Optional[str]:
"""
Resource ID.
"""
return pulumi.get(self, "id")
@property
@pulumi.getter(name="idleTimeoutInMinutes")
def idle_timeout_in_minutes(self) -> Optional[int]:
"""
The timeout for the TCP idle connection. The value can be set between 4 and 30 minutes. The default value is 4 minutes. This element is only used when the protocol is set to TCP.
"""
return pulumi.get(self, "idle_timeout_in_minutes")
@property
@pulumi.getter(name="loadDistribution")
def load_distribution(self) -> Optional[str]:
"""
The load distribution policy for this rule. Possible values are 'Default', 'SourceIP', and 'SourceIPProtocol'.
"""
return pulumi.get(self, "load_distribution")
@property
@pulumi.getter
def name(self) -> Optional[str]:
"""
The name of the resource that is unique within a resource group. This name can be used to access the resource.
"""
return pulumi.get(self, "name")
@property
@pulumi.getter
def probe(self) -> Optional['outputs.SubResourceResponse']:
"""
The reference of the load balancer probe used by the load balancing rule.
"""
return pulumi.get(self, "probe")
@property
@pulumi.getter(name="provisioningState")
def provisioning_state(self) -> Optional[str]:
"""
Gets the provisioning state of the PublicIP resource. Possible values are: 'Updating', 'Deleting', and 'Failed'.
"""
return pulumi.get(self, "provisioning_state")
def _translate_property(self, prop):
return _tables.CAMEL_TO_SNAKE_CASE_TABLE.get(prop) or prop
@pulumi.output_type
class LocalNetworkGatewayResponse(dict):
"""
A common class for general resource information
"""
def __init__(__self__, *,
local_network_address_space: 'outputs.AddressSpaceResponse',
name: str,
provisioning_state: str,
type: str,
bgp_settings: Optional['outputs.BgpSettingsResponse'] = None,
etag: Optional[str] = None,
gateway_ip_address: Optional[str] = None,
id: Optional[str] = None,
location: Optional[str] = None,
resource_guid: Optional[str] = None,
tags: Optional[Mapping[str, str]] = None):
"""
A common class for general resource information
:param 'AddressSpaceResponseArgs' local_network_address_space: Local network site address space.
:param str name: Resource name.
:param str provisioning_state: The provisioning state of the LocalNetworkGateway resource. Possible values are: 'Updating', 'Deleting', and 'Failed'.
:param str type: Resource type.
:param 'BgpSettingsResponseArgs' bgp_settings: Local network gateway's BGP speaker settings.
:param str etag: A unique read-only string that changes whenever the resource is updated.
:param str gateway_ip_address: IP address of local network gateway.
:param str id: Resource ID.
:param str location: Resource location.
:param str resource_guid: The resource GUID property of the LocalNetworkGateway resource.
:param Mapping[str, str] tags: Resource tags.
"""
pulumi.set(__self__, "local_network_address_space", local_network_address_space)
pulumi.set(__self__, "name", name)
pulumi.set(__self__, "provisioning_state", provisioning_state)
pulumi.set(__self__, "type", type)
if bgp_settings is not None:
pulumi.set(__self__, "bgp_settings", bgp_settings)
if etag is not None:
pulumi.set(__self__, "etag", etag)
if gateway_ip_address is not None:
pulumi.set(__self__, "gateway_ip_address", gateway_ip_address)
if id is not None:
pulumi.set(__self__, "id", id)
if location is not None:
pulumi.set(__self__, "location", location)
if resource_guid is not None:
pulumi.set(__self__, "resource_guid", resource_guid)
if tags is not None:
pulumi.set(__self__, "tags", tags)
@property
@pulumi.getter(name="localNetworkAddressSpace")
def local_network_address_space(self) -> 'outputs.AddressSpaceResponse':
"""
Local network site address space.
"""
return pulumi.get(self, "local_network_address_space")
@property
@pulumi.getter
def name(self) -> str:
"""
Resource name.
"""
return pulumi.get(self, "name")
@property
@pulumi.getter(name="provisioningState")
def provisioning_state(self) -> str:
"""
The provisioning state of the LocalNetworkGateway resource. Possible values are: 'Updating', 'Deleting', and 'Failed'.
"""
return pulumi.get(self, "provisioning_state")
@property
@pulumi.getter
def type(self) -> str:
"""
Resource type.
"""
return pulumi.get(self, "type")
@property
@pulumi.getter(name="bgpSettings")
def bgp_settings(self) -> Optional['outputs.BgpSettingsResponse']:
"""
Local network gateway's BGP speaker settings.
"""
return pulumi.get(self, "bgp_settings")
@property
@pulumi.getter
def etag(self) -> Optional[str]:
"""
A unique read-only string that changes whenever the resource is updated.
"""
return pulumi.get(self, "etag")
@property
@pulumi.getter(name="gatewayIpAddress")
def gateway_ip_address(self) -> Optional[str]:
"""
IP address of local network gateway.
"""
return pulumi.get(self, "gateway_ip_address")
@property
@pulumi.getter
def id(self) -> Optional[str]:
"""
Resource ID.
"""
return pulumi.get(self, "id")
@property
@pulumi.getter
def location(self) -> Optional[str]:
"""
Resource location.
"""
return pulumi.get(self, "location")
@property
@pulumi.getter(name="resourceGuid")
def resource_guid(self) -> Optional[str]:
"""
The resource GUID property of the LocalNetworkGateway resource.
"""
return pulumi.get(self, "resource_guid")
@property
@pulumi.getter
def tags(self) -> Optional[Mapping[str, str]]:
"""
Resource tags.
"""
return pulumi.get(self, "tags")
def _translate_property(self, prop):
return _tables.CAMEL_TO_SNAKE_CASE_TABLE.get(prop) or prop
@pulumi.output_type
class NetworkInterfaceDnsSettingsResponse(dict):
"""
DNS settings of a network interface.
"""
def __init__(__self__, *,
applied_dns_servers: Optional[Sequence[str]] = None,
dns_servers: Optional[Sequence[str]] = None,
internal_dns_name_label: Optional[str] = None,
internal_domain_name_suffix: Optional[str] = None,
internal_fqdn: Optional[str] = None):
"""
DNS settings of a network interface.
:param Sequence[str] applied_dns_servers: If the VM that uses this NIC is part of an Availability Set, then this list will have the union of all DNS servers from all NICs that are part of the Availability Set. This property is what is configured on each of those VMs.
:param Sequence[str] dns_servers: List of DNS servers IP addresses. Use 'AzureProvidedDNS' to switch to azure provided DNS resolution. 'AzureProvidedDNS' value cannot be combined with other IPs, it must be the only value in dnsServers collection.
:param str internal_dns_name_label: Relative DNS name for this NIC used for internal communications between VMs in the same virtual network.
:param str internal_domain_name_suffix: Even if internalDnsNameLabel is not specified, a DNS entry is created for the primary NIC of the VM. This DNS name can be constructed by concatenating the VM name with the value of internalDomainNameSuffix.
:param str internal_fqdn: Fully qualified DNS name supporting internal communications between VMs in the same virtual network.
"""
if applied_dns_servers is not None:
pulumi.set(__self__, "applied_dns_servers", applied_dns_servers)
if dns_servers is not None:
pulumi.set(__self__, "dns_servers", dns_servers)
if internal_dns_name_label is not None:
pulumi.set(__self__, "internal_dns_name_label", internal_dns_name_label)
if internal_domain_name_suffix is not None:
pulumi.set(__self__, "internal_domain_name_suffix", internal_domain_name_suffix)
if internal_fqdn is not None:
pulumi.set(__self__, "internal_fqdn", internal_fqdn)
@property
@pulumi.getter(name="appliedDnsServers")
def applied_dns_servers(self) -> Optional[Sequence[str]]:
"""
If the VM that uses this NIC is part of an Availability Set, then this list will have the union of all DNS servers from all NICs that are part of the Availability Set. This property is what is configured on each of those VMs.
"""
return pulumi.get(self, "applied_dns_servers")
@property
@pulumi.getter(name="dnsServers")
def dns_servers(self) -> Optional[Sequence[str]]:
"""
List of DNS servers IP addresses. Use 'AzureProvidedDNS' to switch to azure provided DNS resolution. 'AzureProvidedDNS' value cannot be combined with other IPs, it must be the only value in dnsServers collection.
"""
return pulumi.get(self, "dns_servers")
@property
@pulumi.getter(name="internalDnsNameLabel")
def internal_dns_name_label(self) -> Optional[str]:
"""
Relative DNS name for this NIC used for internal communications between VMs in the same virtual network.
"""
return pulumi.get(self, "internal_dns_name_label")
@property
@pulumi.getter(name="internalDomainNameSuffix")
def internal_domain_name_suffix(self) -> Optional[str]:
"""
Even if internalDnsNameLabel is not specified, a DNS entry is created for the primary NIC of the VM. This DNS name can be constructed by concatenating the VM name with the value of internalDomainNameSuffix.
"""
return pulumi.get(self, "internal_domain_name_suffix")
@property
@pulumi.getter(name="internalFqdn")
def internal_fqdn(self) -> Optional[str]:
"""
Fully qualified DNS name supporting internal communications between VMs in the same virtual network.
"""
return pulumi.get(self, "internal_fqdn")
def _translate_property(self, prop):
return _tables.CAMEL_TO_SNAKE_CASE_TABLE.get(prop) or prop
@pulumi.output_type
class NetworkInterfaceIPConfigurationResponse(dict):
"""
IPConfiguration in a network interface.
"""
def __init__(__self__, *,
application_gateway_backend_address_pools: Optional[Sequence['outputs.ApplicationGatewayBackendAddressPoolResponse']] = None,
etag: Optional[str] = None,
id: Optional[str] = None,
load_balancer_backend_address_pools: Optional[Sequence['outputs.BackendAddressPoolResponse']] = None,
load_balancer_inbound_nat_rules: Optional[Sequence['outputs.InboundNatRuleResponse']] = None,
name: Optional[str] = None,
primary: Optional[bool] = None,
private_ip_address: Optional[str] = None,
private_ip_address_version: Optional[str] = None,
private_ip_allocation_method: Optional[str] = None,
provisioning_state: Optional[str] = None,
public_ip_address: Optional['outputs.PublicIPAddressResponse'] = None,
subnet: Optional['outputs.SubnetResponse'] = None):
"""
IPConfiguration in a network interface.
:param Sequence['ApplicationGatewayBackendAddressPoolResponseArgs'] application_gateway_backend_address_pools: The reference of ApplicationGatewayBackendAddressPool resource.
:param str etag: A unique read-only string that changes whenever the resource is updated.
:param str id: Resource ID.
:param Sequence['BackendAddressPoolResponseArgs'] load_balancer_backend_address_pools: The reference of LoadBalancerBackendAddressPool resource.
:param Sequence['InboundNatRuleResponseArgs'] load_balancer_inbound_nat_rules: A list of references of LoadBalancerInboundNatRules.
:param str name: The name of the resource that is unique within a resource group. This name can be used to access the resource.
:param bool primary: Gets whether this is a primary customer address on the network interface.
:param str private_ip_address_version: Available from Api-Version 2016-03-30 onwards, it represents whether the specific ipconfiguration is IPv4 or IPv6. Default is taken as IPv4. Possible values are: 'IPv4' and 'IPv6'.
:param str private_ip_allocation_method: Defines how a private IP address is assigned. Possible values are: 'Static' and 'Dynamic'.
:param 'PublicIPAddressResponseArgs' public_ip_address: Public IP address resource.
:param 'SubnetResponseArgs' subnet: Subnet in a virtual network resource.
"""
if application_gateway_backend_address_pools is not None:
pulumi.set(__self__, "application_gateway_backend_address_pools", application_gateway_backend_address_pools)
if etag is not None:
pulumi.set(__self__, "etag", etag)
if id is not None:
pulumi.set(__self__, "id", id)
if load_balancer_backend_address_pools is not None:
pulumi.set(__self__, "load_balancer_backend_address_pools", load_balancer_backend_address_pools)
if load_balancer_inbound_nat_rules is not None:
pulumi.set(__self__, "load_balancer_inbound_nat_rules", load_balancer_inbound_nat_rules)
if name is not None:
pulumi.set(__self__, "name", name)
if primary is not None:
pulumi.set(__self__, "primary", primary)
if private_ip_address is not None:
pulumi.set(__self__, "private_ip_address", private_ip_address)
if private_ip_address_version is not None:
pulumi.set(__self__, "private_ip_address_version", private_ip_address_version)
if private_ip_allocation_method is not None:
pulumi.set(__self__, "private_ip_allocation_method", private_ip_allocation_method)
if provisioning_state is not None:
pulumi.set(__self__, "provisioning_state", provisioning_state)
if public_ip_address is not None:
pulumi.set(__self__, "public_ip_address", public_ip_address)
if subnet is not None:
pulumi.set(__self__, "subnet", subnet)
@property
@pulumi.getter(name="applicationGatewayBackendAddressPools")
def application_gateway_backend_address_pools(self) -> Optional[Sequence['outputs.ApplicationGatewayBackendAddressPoolResponse']]:
"""
The reference of ApplicationGatewayBackendAddressPool resource.
"""
return pulumi.get(self, "application_gateway_backend_address_pools")
@property
@pulumi.getter
def etag(self) -> Optional[str]:
"""
A unique read-only string that changes whenever the resource is updated.
"""
return pulumi.get(self, "etag")
@property
@pulumi.getter
def id(self) -> Optional[str]:
"""
Resource ID.
"""
return pulumi.get(self, "id")
@property
@pulumi.getter(name="loadBalancerBackendAddressPools")
def load_balancer_backend_address_pools(self) -> Optional[Sequence['outputs.BackendAddressPoolResponse']]:
"""
The reference of LoadBalancerBackendAddressPool resource.
"""
return pulumi.get(self, "load_balancer_backend_address_pools")
@property
@pulumi.getter(name="loadBalancerInboundNatRules")
def load_balancer_inbound_nat_rules(self) -> Optional[Sequence['outputs.InboundNatRuleResponse']]:
"""
A list of references of LoadBalancerInboundNatRules.
"""
return pulumi.get(self, "load_balancer_inbound_nat_rules")
@property
@pulumi.getter
def name(self) -> Optional[str]:
"""
The name of the resource that is unique within a resource group. This name can be used to access the resource.
"""
return pulumi.get(self, "name")
@property
@pulumi.getter
def primary(self) -> Optional[bool]:
"""
Gets whether this is a primary customer address on the network interface.
"""
return pulumi.get(self, "primary")
@property
@pulumi.getter(name="privateIPAddress")
def private_ip_address(self) -> Optional[str]:
return pulumi.get(self, "private_ip_address")
@property
@pulumi.getter(name="privateIPAddressVersion")
def private_ip_address_version(self) -> Optional[str]:
"""
Available from Api-Version 2016-03-30 onwards, it represents whether the specific ipconfiguration is IPv4 or IPv6. Default is taken as IPv4. Possible values are: 'IPv4' and 'IPv6'.
"""
return pulumi.get(self, "private_ip_address_version")
@property
@pulumi.getter(name="privateIPAllocationMethod")
def private_ip_allocation_method(self) -> Optional[str]:
"""
Defines how a private IP address is assigned. Possible values are: 'Static' and 'Dynamic'.
"""
return pulumi.get(self, "private_ip_allocation_method")
@property
@pulumi.getter(name="provisioningState")
def provisioning_state(self) -> Optional[str]:
return pulumi.get(self, "provisioning_state")
@property
@pulumi.getter(name="publicIPAddress")
def public_ip_address(self) -> Optional['outputs.PublicIPAddressResponse']:
"""
Public IP address resource.
"""
return pulumi.get(self, "public_ip_address")
@property
@pulumi.getter
def subnet(self) -> Optional['outputs.SubnetResponse']:
"""
Subnet in a virtual network resource.
"""
return pulumi.get(self, "subnet")
def _translate_property(self, prop):
return _tables.CAMEL_TO_SNAKE_CASE_TABLE.get(prop) or prop
@pulumi.output_type
class NetworkInterfaceResponse(dict):
"""
A network interface in a resource group.
"""
def __init__(__self__, *,
name: str,
type: str,
dns_settings: Optional['outputs.NetworkInterfaceDnsSettingsResponse'] = None,
enable_accelerated_networking: Optional[bool] = None,
enable_ip_forwarding: Optional[bool] = None,
etag: Optional[str] = None,
id: Optional[str] = None,
ip_configurations: Optional[Sequence['outputs.NetworkInterfaceIPConfigurationResponse']] = None,
location: Optional[str] = None,
mac_address: Optional[str] = None,
network_security_group: Optional['outputs.NetworkSecurityGroupResponse'] = None,
primary: Optional[bool] = None,
provisioning_state: Optional[str] = None,
resource_guid: Optional[str] = None,
tags: Optional[Mapping[str, str]] = None,
virtual_machine: Optional['outputs.SubResourceResponse'] = None):
"""
A network interface in a resource group.
:param str name: Resource name.
:param str type: Resource type.
:param 'NetworkInterfaceDnsSettingsResponseArgs' dns_settings: The DNS settings in network interface.
:param bool enable_accelerated_networking: If the network interface is accelerated networking enabled.
:param bool enable_ip_forwarding: Indicates whether IP forwarding is enabled on this network interface.
:param str etag: A unique read-only string that changes whenever the resource is updated.
:param str id: Resource ID.
:param Sequence['NetworkInterfaceIPConfigurationResponseArgs'] ip_configurations: A list of IPConfigurations of the network interface.
:param str location: Resource location.
:param str mac_address: The MAC address of the network interface.
:param 'NetworkSecurityGroupResponseArgs' network_security_group: The reference of the NetworkSecurityGroup resource.
:param bool primary: Gets whether this is a primary network interface on a virtual machine.
:param str provisioning_state: The provisioning state of the public IP resource. Possible values are: 'Updating', 'Deleting', and 'Failed'.
:param str resource_guid: The resource GUID property of the network interface resource.
:param Mapping[str, str] tags: Resource tags.
:param 'SubResourceResponseArgs' virtual_machine: The reference of a virtual machine.
"""
pulumi.set(__self__, "name", name)
pulumi.set(__self__, "type", type)
if dns_settings is not None:
pulumi.set(__self__, "dns_settings", dns_settings)
if enable_accelerated_networking is not None:
pulumi.set(__self__, "enable_accelerated_networking", enable_accelerated_networking)
if enable_ip_forwarding is not None:
pulumi.set(__self__, "enable_ip_forwarding", enable_ip_forwarding)
if etag is not None:
pulumi.set(__self__, "etag", etag)
if id is not None:
pulumi.set(__self__, "id", id)
if ip_configurations is not None:
pulumi.set(__self__, "ip_configurations", ip_configurations)
if location is not None:
pulumi.set(__self__, "location", location)
if mac_address is not None:
pulumi.set(__self__, "mac_address", mac_address)
if network_security_group is not None:
pulumi.set(__self__, "network_security_group", network_security_group)
if primary is not None:
pulumi.set(__self__, "primary", primary)
if provisioning_state is not None:
pulumi.set(__self__, "provisioning_state", provisioning_state)
if resource_guid is not None:
pulumi.set(__self__, "resource_guid", resource_guid)
if tags is not None:
pulumi.set(__self__, "tags", tags)
if virtual_machine is not None:
pulumi.set(__self__, "virtual_machine", virtual_machine)
@property
@pulumi.getter
def name(self) -> str:
"""
Resource name.
"""
return pulumi.get(self, "name")
@property
@pulumi.getter
def type(self) -> str:
"""
Resource type.
"""
return pulumi.get(self, "type")
@property
@pulumi.getter(name="dnsSettings")
def dns_settings(self) -> Optional['outputs.NetworkInterfaceDnsSettingsResponse']:
"""
The DNS settings in network interface.
"""
return pulumi.get(self, "dns_settings")
@property
@pulumi.getter(name="enableAcceleratedNetworking")
def enable_accelerated_networking(self) -> Optional[bool]:
"""
If the network interface is accelerated networking enabled.
"""
return pulumi.get(self, "enable_accelerated_networking")
@property
@pulumi.getter(name="enableIPForwarding")
def enable_ip_forwarding(self) -> Optional[bool]:
"""
Indicates whether IP forwarding is enabled on this network interface.
"""
return pulumi.get(self, "enable_ip_forwarding")
@property
@pulumi.getter
def etag(self) -> Optional[str]:
"""
A unique read-only string that changes whenever the resource is updated.
"""
return pulumi.get(self, "etag")
@property
@pulumi.getter
def id(self) -> Optional[str]:
"""
Resource ID.
"""
return pulumi.get(self, "id")
@property
@pulumi.getter(name="ipConfigurations")
def ip_configurations(self) -> Optional[Sequence['outputs.NetworkInterfaceIPConfigurationResponse']]:
"""
A list of IPConfigurations of the network interface.
"""
return pulumi.get(self, "ip_configurations")
@property
@pulumi.getter
def location(self) -> Optional[str]:
"""
Resource location.
"""
return pulumi.get(self, "location")
@property
@pulumi.getter(name="macAddress")
def mac_address(self) -> Optional[str]:
"""
The MAC address of the network interface.
"""
return pulumi.get(self, "mac_address")
@property
@pulumi.getter(name="networkSecurityGroup")
def network_security_group(self) -> Optional['outputs.NetworkSecurityGroupResponse']:
"""
The reference of the NetworkSecurityGroup resource.
"""
return pulumi.get(self, "network_security_group")
@property
@pulumi.getter
def primary(self) -> Optional[bool]:
"""
Gets whether this is a primary network interface on a virtual machine.
"""
return pulumi.get(self, "primary")
@property
@pulumi.getter(name="provisioningState")
def provisioning_state(self) -> Optional[str]:
"""
The provisioning state of the public IP resource. Possible values are: 'Updating', 'Deleting', and 'Failed'.
"""
return pulumi.get(self, "provisioning_state")
@property
@pulumi.getter(name="resourceGuid")
def resource_guid(self) -> Optional[str]:
"""
The resource GUID property of the network interface resource.
"""
return pulumi.get(self, "resource_guid")
@property
@pulumi.getter
def tags(self) -> Optional[Mapping[str, str]]:
"""
Resource tags.
"""
return pulumi.get(self, "tags")
@property
@pulumi.getter(name="virtualMachine")
def virtual_machine(self) -> Optional['outputs.SubResourceResponse']:
"""
The reference of a virtual machine.
"""
return pulumi.get(self, "virtual_machine")
def _translate_property(self, prop):
return _tables.CAMEL_TO_SNAKE_CASE_TABLE.get(prop) or prop
@pulumi.output_type
class NetworkSecurityGroupResponse(dict):
"""
NetworkSecurityGroup resource.
"""
def __init__(__self__, *,
name: str,
network_interfaces: Sequence['outputs.NetworkInterfaceResponse'],
subnets: Sequence['outputs.SubnetResponse'],
type: str,
default_security_rules: Optional[Sequence['outputs.SecurityRuleResponse']] = None,
etag: Optional[str] = None,
id: Optional[str] = None,
location: Optional[str] = None,
provisioning_state: Optional[str] = None,
resource_guid: Optional[str] = None,
security_rules: Optional[Sequence['outputs.SecurityRuleResponse']] = None,
tags: Optional[Mapping[str, str]] = None):
"""
NetworkSecurityGroup resource.
:param str name: Resource name.
:param Sequence['NetworkInterfaceResponseArgs'] network_interfaces: A collection of references to network interfaces.
:param Sequence['SubnetResponseArgs'] subnets: A collection of references to subnets.
:param str type: Resource type.
:param Sequence['SecurityRuleResponseArgs'] default_security_rules: The default security rules of network security group.
:param str etag: A unique read-only string that changes whenever the resource is updated.
:param str id: Resource ID.
:param str location: Resource location.
:param str provisioning_state: The provisioning state of the public IP resource. Possible values are: 'Updating', 'Deleting', and 'Failed'.
:param str resource_guid: The resource GUID property of the network security group resource.
:param Sequence['SecurityRuleResponseArgs'] security_rules: A collection of security rules of the network security group.
:param Mapping[str, str] tags: Resource tags.
"""
pulumi.set(__self__, "name", name)
pulumi.set(__self__, "network_interfaces", network_interfaces)
pulumi.set(__self__, "subnets", subnets)
pulumi.set(__self__, "type", type)
if default_security_rules is not None:
pulumi.set(__self__, "default_security_rules", default_security_rules)
if etag is not None:
pulumi.set(__self__, "etag", etag)
if id is not None:
pulumi.set(__self__, "id", id)
if location is not None:
pulumi.set(__self__, "location", location)
if provisioning_state is not None:
pulumi.set(__self__, "provisioning_state", provisioning_state)
if resource_guid is not None:
pulumi.set(__self__, "resource_guid", resource_guid)
if security_rules is not None:
pulumi.set(__self__, "security_rules", security_rules)
if tags is not None:
pulumi.set(__self__, "tags", tags)
@property
@pulumi.getter
def name(self) -> str:
"""
Resource name.
"""
return pulumi.get(self, "name")
@property
@pulumi.getter(name="networkInterfaces")
def network_interfaces(self) -> Sequence['outputs.NetworkInterfaceResponse']:
"""
A collection of references to network interfaces.
"""
return pulumi.get(self, "network_interfaces")
@property
@pulumi.getter
def subnets(self) -> Sequence['outputs.SubnetResponse']:
"""
A collection of references to subnets.
"""
return pulumi.get(self, "subnets")
@property
@pulumi.getter
def type(self) -> str:
"""
Resource type.
"""
return pulumi.get(self, "type")
@property
@pulumi.getter(name="defaultSecurityRules")
def default_security_rules(self) -> Optional[Sequence['outputs.SecurityRuleResponse']]:
"""
The default security rules of network security group.
"""
return pulumi.get(self, "default_security_rules")
@property
@pulumi.getter
def etag(self) -> Optional[str]:
"""
A unique read-only string that changes whenever the resource is updated.
"""
return pulumi.get(self, "etag")
@property
@pulumi.getter
def id(self) -> Optional[str]:
"""
Resource ID.
"""
return pulumi.get(self, "id")
@property
@pulumi.getter
def location(self) -> Optional[str]:
"""
Resource location.
"""
return pulumi.get(self, "location")
@property
@pulumi.getter(name="provisioningState")
def provisioning_state(self) -> Optional[str]:
"""
The provisioning state of the public IP resource. Possible values are: 'Updating', 'Deleting', and 'Failed'.
"""
return pulumi.get(self, "provisioning_state")
@property
@pulumi.getter(name="resourceGuid")
def resource_guid(self) -> Optional[str]:
"""
The resource GUID property of the network security group resource.
"""
return pulumi.get(self, "resource_guid")
@property
@pulumi.getter(name="securityRules")
def security_rules(self) -> Optional[Sequence['outputs.SecurityRuleResponse']]:
"""
A collection of security rules of the network security group.
"""
return pulumi.get(self, "security_rules")
@property
@pulumi.getter
def tags(self) -> Optional[Mapping[str, str]]:
"""
Resource tags.
"""
return pulumi.get(self, "tags")
def _translate_property(self, prop):
return _tables.CAMEL_TO_SNAKE_CASE_TABLE.get(prop) or prop
@pulumi.output_type
class OutboundNatRuleResponse(dict):
"""
Outbound NAT pool of the load balancer.
"""
def __init__(__self__, *,
backend_address_pool: 'outputs.SubResourceResponse',
allocated_outbound_ports: Optional[int] = None,
etag: Optional[str] = None,
frontend_ip_configurations: Optional[Sequence['outputs.SubResourceResponse']] = None,
id: Optional[str] = None,
name: Optional[str] = None,
provisioning_state: Optional[str] = None):
"""
Outbound NAT pool of the load balancer.
:param 'SubResourceResponseArgs' backend_address_pool: A reference to a pool of DIPs. Outbound traffic is randomly load balanced across IPs in the backend IPs.
:param int allocated_outbound_ports: The number of outbound ports to be used for NAT.
:param str etag: A unique read-only string that changes whenever the resource is updated.
:param Sequence['SubResourceResponseArgs'] frontend_ip_configurations: The Frontend IP addresses of the load balancer.
:param str id: Resource ID.
:param str name: The name of the resource that is unique within a resource group. This name can be used to access the resource.
:param str provisioning_state: Gets the provisioning state of the PublicIP resource. Possible values are: 'Updating', 'Deleting', and 'Failed'.
"""
pulumi.set(__self__, "backend_address_pool", backend_address_pool)
if allocated_outbound_ports is not None:
pulumi.set(__self__, "allocated_outbound_ports", allocated_outbound_ports)
if etag is not None:
pulumi.set(__self__, "etag", etag)
if frontend_ip_configurations is not None:
pulumi.set(__self__, "frontend_ip_configurations", frontend_ip_configurations)
if id is not None:
pulumi.set(__self__, "id", id)
if name is not None:
pulumi.set(__self__, "name", name)
if provisioning_state is not None:
pulumi.set(__self__, "provisioning_state", provisioning_state)
@property
@pulumi.getter(name="backendAddressPool")
def backend_address_pool(self) -> 'outputs.SubResourceResponse':
"""
A reference to a pool of DIPs. Outbound traffic is randomly load balanced across IPs in the backend IPs.
"""
return pulumi.get(self, "backend_address_pool")
@property
@pulumi.getter(name="allocatedOutboundPorts")
def allocated_outbound_ports(self) -> Optional[int]:
"""
The number of outbound ports to be used for NAT.
"""
return pulumi.get(self, "allocated_outbound_ports")
@property
@pulumi.getter
def etag(self) -> Optional[str]:
"""
A unique read-only string that changes whenever the resource is updated.
"""
return pulumi.get(self, "etag")
@property
@pulumi.getter(name="frontendIPConfigurations")
def frontend_ip_configurations(self) -> Optional[Sequence['outputs.SubResourceResponse']]:
"""
The Frontend IP addresses of the load balancer.
"""
return pulumi.get(self, "frontend_ip_configurations")
@property
@pulumi.getter
def id(self) -> Optional[str]:
"""
Resource ID.
"""
return pulumi.get(self, "id")
@property
@pulumi.getter
def name(self) -> Optional[str]:
"""
The name of the resource that is unique within a resource group. This name can be used to access the resource.
"""
return pulumi.get(self, "name")
@property
@pulumi.getter(name="provisioningState")
def provisioning_state(self) -> Optional[str]:
"""
Gets the provisioning state of the PublicIP resource. Possible values are: 'Updating', 'Deleting', and 'Failed'.
"""
return pulumi.get(self, "provisioning_state")
def _translate_property(self, prop):
return _tables.CAMEL_TO_SNAKE_CASE_TABLE.get(prop) or prop
@pulumi.output_type
class PacketCaptureFilterResponse(dict):
"""
Filter that is applied to packet capture request. Multiple filters can be applied.
"""
def __init__(__self__, *,
local_ip_address: Optional[str] = None,
local_port: Optional[str] = None,
protocol: Optional[str] = None,
remote_ip_address: Optional[str] = None,
remote_port: Optional[str] = None):
"""
Filter that is applied to packet capture request. Multiple filters can be applied.
:param str local_ip_address: Local IP Address to be filtered on. Notation: "127.0.0.1" for single address entry. "127.0.0.1-127.0.0.255" for range. "127.0.0.1;127.0.0.5"? for multiple entries. Multiple ranges not currently supported. Mixing ranges with multiple entries not currently supported. Default = null.
:param str local_port: Local port to be filtered on. Notation: "80" for single port entry."80-85" for range. "80;443;" for multiple entries. Multiple ranges not currently supported. Mixing ranges with multiple entries not currently supported. Default = null.
:param str protocol: Protocol to be filtered on.
:param str remote_ip_address: Local IP Address to be filtered on. Notation: "127.0.0.1" for single address entry. "127.0.0.1-127.0.0.255" for range. "127.0.0.1;127.0.0.5;" for multiple entries. Multiple ranges not currently supported. Mixing ranges with multiple entries not currently supported. Default = null.
:param str remote_port: Remote port to be filtered on. Notation: "80" for single port entry."80-85" for range. "80;443;" for multiple entries. Multiple ranges not currently supported. Mixing ranges with multiple entries not currently supported. Default = null.
"""
if local_ip_address is not None:
pulumi.set(__self__, "local_ip_address", local_ip_address)
if local_port is not None:
pulumi.set(__self__, "local_port", local_port)
if protocol is None:
protocol = 'Any'
if protocol is not None:
pulumi.set(__self__, "protocol", protocol)
if remote_ip_address is not None:
pulumi.set(__self__, "remote_ip_address", remote_ip_address)
if remote_port is not None:
pulumi.set(__self__, "remote_port", remote_port)
@property
@pulumi.getter(name="localIPAddress")
def local_ip_address(self) -> Optional[str]:
"""
Local IP Address to be filtered on. Notation: "127.0.0.1" for single address entry. "127.0.0.1-127.0.0.255" for range. "127.0.0.1;127.0.0.5"? for multiple entries. Multiple ranges not currently supported. Mixing ranges with multiple entries not currently supported. Default = null.
"""
return pulumi.get(self, "local_ip_address")
@property
@pulumi.getter(name="localPort")
def local_port(self) -> Optional[str]:
"""
Local port to be filtered on. Notation: "80" for single port entry."80-85" for range. "80;443;" for multiple entries. Multiple ranges not currently supported. Mixing ranges with multiple entries not currently supported. Default = null.
"""
return pulumi.get(self, "local_port")
@property
@pulumi.getter
def protocol(self) -> Optional[str]:
"""
Protocol to be filtered on.
"""
return pulumi.get(self, "protocol")
@property
@pulumi.getter(name="remoteIPAddress")
def remote_ip_address(self) -> Optional[str]:
"""
Local IP Address to be filtered on. Notation: "127.0.0.1" for single address entry. "127.0.0.1-127.0.0.255" for range. "127.0.0.1;127.0.0.5;" for multiple entries. Multiple ranges not currently supported. Mixing ranges with multiple entries not currently supported. Default = null.
"""
return pulumi.get(self, "remote_ip_address")
@property
@pulumi.getter(name="remotePort")
def remote_port(self) -> Optional[str]:
"""
Remote port to be filtered on. Notation: "80" for single port entry."80-85" for range. "80;443;" for multiple entries. Multiple ranges not currently supported. Mixing ranges with multiple entries not currently supported. Default = null.
"""
return pulumi.get(self, "remote_port")
def _translate_property(self, prop):
return _tables.CAMEL_TO_SNAKE_CASE_TABLE.get(prop) or prop
@pulumi.output_type
class PacketCaptureStorageLocationResponse(dict):
"""
Describes the storage location for a packet capture session.
"""
def __init__(__self__, *,
file_path: Optional[str] = None,
storage_id: Optional[str] = None,
storage_path: Optional[str] = None):
"""
Describes the storage location for a packet capture session.
:param str file_path: A valid local path on the targeting VM. Must include the name of the capture file (*.cap). For linux virtual machine it must start with /var/captures. Required if no storage ID is provided, otherwise optional.
:param str storage_id: The ID of the storage account to save the packet capture session. Required if no local file path is provided.
:param str storage_path: The URI of the storage path to save the packet capture. Must be a well-formed URI describing the location to save the packet capture.
"""
if file_path is not None:
pulumi.set(__self__, "file_path", file_path)
if storage_id is not None:
pulumi.set(__self__, "storage_id", storage_id)
if storage_path is not None:
pulumi.set(__self__, "storage_path", storage_path)
@property
@pulumi.getter(name="filePath")
def file_path(self) -> Optional[str]:
"""
A valid local path on the targeting VM. Must include the name of the capture file (*.cap). For linux virtual machine it must start with /var/captures. Required if no storage ID is provided, otherwise optional.
"""
return pulumi.get(self, "file_path")
@property
@pulumi.getter(name="storageId")
def storage_id(self) -> Optional[str]:
"""
The ID of the storage account to save the packet capture session. Required if no local file path is provided.
"""
return pulumi.get(self, "storage_id")
@property
@pulumi.getter(name="storagePath")
def storage_path(self) -> Optional[str]:
"""
The URI of the storage path to save the packet capture. Must be a well-formed URI describing the location to save the packet capture.
"""
return pulumi.get(self, "storage_path")
def _translate_property(self, prop):
return _tables.CAMEL_TO_SNAKE_CASE_TABLE.get(prop) or prop
@pulumi.output_type
class ProbeResponse(dict):
"""
A load balancer probe.
"""
def __init__(__self__, *,
load_balancing_rules: Sequence['outputs.SubResourceResponse'],
port: int,
protocol: str,
etag: Optional[str] = None,
id: Optional[str] = None,
interval_in_seconds: Optional[int] = None,
name: Optional[str] = None,
number_of_probes: Optional[int] = None,
provisioning_state: Optional[str] = None,
request_path: Optional[str] = None):
"""
A load balancer probe.
:param Sequence['SubResourceResponseArgs'] load_balancing_rules: The load balancer rules that use this probe.
:param int port: The port for communicating the probe. Possible values range from 1 to 65535, inclusive.
:param str protocol: The protocol of the end point. Possible values are: 'Http' or 'Tcp'. If 'Tcp' is specified, a received ACK is required for the probe to be successful. If 'Http' is specified, a 200 OK response from the specifies URI is required for the probe to be successful.
:param str etag: A unique read-only string that changes whenever the resource is updated.
:param str id: Resource ID.
:param int interval_in_seconds: The interval, in seconds, for how frequently to probe the endpoint for health status. Typically, the interval is slightly less than half the allocated timeout period (in seconds) which allows two full probes before taking the instance out of rotation. The default value is 15, the minimum value is 5.
:param str name: Gets name of the resource that is unique within a resource group. This name can be used to access the resource.
:param int number_of_probes: The number of probes where if no response, will result in stopping further traffic from being delivered to the endpoint. This values allows endpoints to be taken out of rotation faster or slower than the typical times used in Azure.
:param str provisioning_state: Gets the provisioning state of the public IP resource. Possible values are: 'Updating', 'Deleting', and 'Failed'.
:param str request_path: The URI used for requesting health status from the VM. Path is required if a protocol is set to http. Otherwise, it is not allowed. There is no default value.
"""
pulumi.set(__self__, "load_balancing_rules", load_balancing_rules)
pulumi.set(__self__, "port", port)
pulumi.set(__self__, "protocol", protocol)
if etag is not None:
pulumi.set(__self__, "etag", etag)
if id is not None:
pulumi.set(__self__, "id", id)
if interval_in_seconds is not None:
pulumi.set(__self__, "interval_in_seconds", interval_in_seconds)
if name is not None:
pulumi.set(__self__, "name", name)
if number_of_probes is not None:
pulumi.set(__self__, "number_of_probes", number_of_probes)
if provisioning_state is not None:
pulumi.set(__self__, "provisioning_state", provisioning_state)
if request_path is not None:
pulumi.set(__self__, "request_path", request_path)
@property
@pulumi.getter(name="loadBalancingRules")
def load_balancing_rules(self) -> Sequence['outputs.SubResourceResponse']:
"""
The load balancer rules that use this probe.
"""
return pulumi.get(self, "load_balancing_rules")
@property
@pulumi.getter
def port(self) -> int:
"""
The port for communicating the probe. Possible values range from 1 to 65535, inclusive.
"""
return pulumi.get(self, "port")
@property
@pulumi.getter
def protocol(self) -> str:
"""
The protocol of the end point. Possible values are: 'Http' or 'Tcp'. If 'Tcp' is specified, a received ACK is required for the probe to be successful. If 'Http' is specified, a 200 OK response from the specifies URI is required for the probe to be successful.
"""
return pulumi.get(self, "protocol")
@property
@pulumi.getter
def etag(self) -> Optional[str]:
"""
A unique read-only string that changes whenever the resource is updated.
"""
return pulumi.get(self, "etag")
@property
@pulumi.getter
def id(self) -> Optional[str]:
"""
Resource ID.
"""
return pulumi.get(self, "id")
@property
@pulumi.getter(name="intervalInSeconds")
def interval_in_seconds(self) -> Optional[int]:
"""
The interval, in seconds, for how frequently to probe the endpoint for health status. Typically, the interval is slightly less than half the allocated timeout period (in seconds) which allows two full probes before taking the instance out of rotation. The default value is 15, the minimum value is 5.
"""
return pulumi.get(self, "interval_in_seconds")
@property
@pulumi.getter
def name(self) -> Optional[str]:
"""
Gets name of the resource that is unique within a resource group. This name can be used to access the resource.
"""
return pulumi.get(self, "name")
@property
@pulumi.getter(name="numberOfProbes")
def number_of_probes(self) -> Optional[int]:
"""
The number of probes where if no response, will result in stopping further traffic from being delivered to the endpoint. This values allows endpoints to be taken out of rotation faster or slower than the typical times used in Azure.
"""
return pulumi.get(self, "number_of_probes")
@property
@pulumi.getter(name="provisioningState")
def provisioning_state(self) -> Optional[str]:
"""
Gets the provisioning state of the public IP resource. Possible values are: 'Updating', 'Deleting', and 'Failed'.
"""
return pulumi.get(self, "provisioning_state")
@property
@pulumi.getter(name="requestPath")
def request_path(self) -> Optional[str]:
"""
The URI used for requesting health status from the VM. Path is required if a protocol is set to http. Otherwise, it is not allowed. There is no default value.
"""
return pulumi.get(self, "request_path")
def _translate_property(self, prop):
return _tables.CAMEL_TO_SNAKE_CASE_TABLE.get(prop) or prop
@pulumi.output_type
class PublicIPAddressDnsSettingsResponse(dict):
"""
Contains FQDN of the DNS record associated with the public IP address
"""
def __init__(__self__, *,
domain_name_label: Optional[str] = None,
fqdn: Optional[str] = None,
reverse_fqdn: Optional[str] = None):
"""
Contains FQDN of the DNS record associated with the public IP address
:param str domain_name_label: Gets or sets the Domain name label.The concatenation of the domain name label and the regionalized DNS zone make up the fully qualified domain name associated with the public IP address. If a domain name label is specified, an A DNS record is created for the public IP in the Microsoft Azure DNS system.
:param str fqdn: Gets the FQDN, Fully qualified domain name of the A DNS record associated with the public IP. This is the concatenation of the domainNameLabel and the regionalized DNS zone.
:param str reverse_fqdn: Gets or Sets the Reverse FQDN. A user-visible, fully qualified domain name that resolves to this public IP address. If the reverseFqdn is specified, then a PTR DNS record is created pointing from the IP address in the in-addr.arpa domain to the reverse FQDN.
"""
if domain_name_label is not None:
pulumi.set(__self__, "domain_name_label", domain_name_label)
if fqdn is not None:
pulumi.set(__self__, "fqdn", fqdn)
if reverse_fqdn is not None:
pulumi.set(__self__, "reverse_fqdn", reverse_fqdn)
@property
@pulumi.getter(name="domainNameLabel")
def domain_name_label(self) -> Optional[str]:
"""
Gets or sets the Domain name label.The concatenation of the domain name label and the regionalized DNS zone make up the fully qualified domain name associated with the public IP address. If a domain name label is specified, an A DNS record is created for the public IP in the Microsoft Azure DNS system.
"""
return pulumi.get(self, "domain_name_label")
@property
@pulumi.getter
def fqdn(self) -> Optional[str]:
"""
Gets the FQDN, Fully qualified domain name of the A DNS record associated with the public IP. This is the concatenation of the domainNameLabel and the regionalized DNS zone.
"""
return pulumi.get(self, "fqdn")
@property
@pulumi.getter(name="reverseFqdn")
def reverse_fqdn(self) -> Optional[str]:
"""
Gets or Sets the Reverse FQDN. A user-visible, fully qualified domain name that resolves to this public IP address. If the reverseFqdn is specified, then a PTR DNS record is created pointing from the IP address in the in-addr.arpa domain to the reverse FQDN.
"""
return pulumi.get(self, "reverse_fqdn")
def _translate_property(self, prop):
return _tables.CAMEL_TO_SNAKE_CASE_TABLE.get(prop) or prop
@pulumi.output_type
class PublicIPAddressResponse(dict):
"""
Public IP address resource.
"""
def __init__(__self__, *,
ip_configuration: 'outputs.IPConfigurationResponse',
name: str,
type: str,
dns_settings: Optional['outputs.PublicIPAddressDnsSettingsResponse'] = None,
etag: Optional[str] = None,
id: Optional[str] = None,
idle_timeout_in_minutes: Optional[int] = None,
ip_address: Optional[str] = None,
location: Optional[str] = None,
provisioning_state: Optional[str] = None,
public_ip_address_version: Optional[str] = None,
public_ip_allocation_method: Optional[str] = None,
resource_guid: Optional[str] = None,
tags: Optional[Mapping[str, str]] = None):
"""
Public IP address resource.
:param 'IPConfigurationResponseArgs' ip_configuration: IPConfiguration
:param str name: Resource name.
:param str type: Resource type.
:param 'PublicIPAddressDnsSettingsResponseArgs' dns_settings: The FQDN of the DNS record associated with the public IP address.
:param str etag: A unique read-only string that changes whenever the resource is updated.
:param str id: Resource ID.
:param int idle_timeout_in_minutes: The idle timeout of the public IP address.
:param str location: Resource location.
:param str provisioning_state: The provisioning state of the PublicIP resource. Possible values are: 'Updating', 'Deleting', and 'Failed'.
:param str public_ip_address_version: The public IP address version. Possible values are: 'IPv4' and 'IPv6'.
:param str public_ip_allocation_method: The public IP allocation method. Possible values are: 'Static' and 'Dynamic'.
:param str resource_guid: The resource GUID property of the public IP resource.
:param Mapping[str, str] tags: Resource tags.
"""
pulumi.set(__self__, "ip_configuration", ip_configuration)
pulumi.set(__self__, "name", name)
pulumi.set(__self__, "type", type)
if dns_settings is not None:
pulumi.set(__self__, "dns_settings", dns_settings)
if etag is not None:
pulumi.set(__self__, "etag", etag)
if id is not None:
pulumi.set(__self__, "id", id)
if idle_timeout_in_minutes is not None:
pulumi.set(__self__, "idle_timeout_in_minutes", idle_timeout_in_minutes)
if ip_address is not None:
pulumi.set(__self__, "ip_address", ip_address)
if location is not None:
pulumi.set(__self__, "location", location)
if provisioning_state is not None:
pulumi.set(__self__, "provisioning_state", provisioning_state)
if public_ip_address_version is not None:
pulumi.set(__self__, "public_ip_address_version", public_ip_address_version)
if public_ip_allocation_method is not None:
pulumi.set(__self__, "public_ip_allocation_method", public_ip_allocation_method)
if resource_guid is not None:
pulumi.set(__self__, "resource_guid", resource_guid)
if tags is not None:
pulumi.set(__self__, "tags", tags)
@property
@pulumi.getter(name="ipConfiguration")
def ip_configuration(self) -> 'outputs.IPConfigurationResponse':
"""
IPConfiguration
"""
return pulumi.get(self, "ip_configuration")
@property
@pulumi.getter
def name(self) -> str:
"""
Resource name.
"""
return pulumi.get(self, "name")
@property
@pulumi.getter
def type(self) -> str:
"""
Resource type.
"""
return pulumi.get(self, "type")
@property
@pulumi.getter(name="dnsSettings")
def dns_settings(self) -> Optional['outputs.PublicIPAddressDnsSettingsResponse']:
"""
The FQDN of the DNS record associated with the public IP address.
"""
return pulumi.get(self, "dns_settings")
@property
@pulumi.getter
def etag(self) -> Optional[str]:
"""
A unique read-only string that changes whenever the resource is updated.
"""
return pulumi.get(self, "etag")
@property
@pulumi.getter
def id(self) -> Optional[str]:
"""
Resource ID.
"""
return pulumi.get(self, "id")
@property
@pulumi.getter(name="idleTimeoutInMinutes")
def idle_timeout_in_minutes(self) -> Optional[int]:
"""
The idle timeout of the public IP address.
"""
return pulumi.get(self, "idle_timeout_in_minutes")
@property
@pulumi.getter(name="ipAddress")
def ip_address(self) -> Optional[str]:
return pulumi.get(self, "ip_address")
@property
@pulumi.getter
def location(self) -> Optional[str]:
"""
Resource location.
"""
return pulumi.get(self, "location")
@property
@pulumi.getter(name="provisioningState")
def provisioning_state(self) -> Optional[str]:
"""
The provisioning state of the PublicIP resource. Possible values are: 'Updating', 'Deleting', and 'Failed'.
"""
return pulumi.get(self, "provisioning_state")
@property
@pulumi.getter(name="publicIPAddressVersion")
def public_ip_address_version(self) -> Optional[str]:
"""
The public IP address version. Possible values are: 'IPv4' and 'IPv6'.
"""
return pulumi.get(self, "public_ip_address_version")
@property
@pulumi.getter(name="publicIPAllocationMethod")
def public_ip_allocation_method(self) -> Optional[str]:
"""
The public IP allocation method. Possible values are: 'Static' and 'Dynamic'.
"""
return pulumi.get(self, "public_ip_allocation_method")
@property
@pulumi.getter(name="resourceGuid")
def resource_guid(self) -> Optional[str]:
"""
The resource GUID property of the public IP resource.
"""
return pulumi.get(self, "resource_guid")
@property
@pulumi.getter
def tags(self) -> Optional[Mapping[str, str]]:
"""
Resource tags.
"""
return pulumi.get(self, "tags")
def _translate_property(self, prop):
return _tables.CAMEL_TO_SNAKE_CASE_TABLE.get(prop) or prop
@pulumi.output_type
class ResourceNavigationLinkResponse(dict):
"""
ResourceNavigationLink resource.
"""
def __init__(__self__, *,
etag: str,
provisioning_state: str,
id: Optional[str] = None,
link: Optional[str] = None,
linked_resource_type: Optional[str] = None,
name: Optional[str] = None):
"""
ResourceNavigationLink resource.
:param str etag: A unique read-only string that changes whenever the resource is updated.
:param str provisioning_state: Provisioning state of the ResourceNavigationLink resource.
:param str id: Resource ID.
:param str link: Link to the external resource
:param str linked_resource_type: Resource type of the linked resource.
:param str name: Name of the resource that is unique within a resource group. This name can be used to access the resource.
"""
pulumi.set(__self__, "etag", etag)
pulumi.set(__self__, "provisioning_state", provisioning_state)
if id is not None:
pulumi.set(__self__, "id", id)
if link is not None:
pulumi.set(__self__, "link", link)
if linked_resource_type is not None:
pulumi.set(__self__, "linked_resource_type", linked_resource_type)
if name is not None:
pulumi.set(__self__, "name", name)
@property
@pulumi.getter
def etag(self) -> str:
"""
A unique read-only string that changes whenever the resource is updated.
"""
return pulumi.get(self, "etag")
@property
@pulumi.getter(name="provisioningState")
def provisioning_state(self) -> str:
"""
Provisioning state of the ResourceNavigationLink resource.
"""
return pulumi.get(self, "provisioning_state")
@property
@pulumi.getter
def id(self) -> Optional[str]:
"""
Resource ID.
"""
return pulumi.get(self, "id")
@property
@pulumi.getter
def link(self) -> Optional[str]:
"""
Link to the external resource
"""
return pulumi.get(self, "link")
@property
@pulumi.getter(name="linkedResourceType")
def linked_resource_type(self) -> Optional[str]:
"""
Resource type of the linked resource.
"""
return pulumi.get(self, "linked_resource_type")
@property
@pulumi.getter
def name(self) -> Optional[str]:
"""
Name of the resource that is unique within a resource group. This name can be used to access the resource.
"""
return pulumi.get(self, "name")
def _translate_property(self, prop):
return _tables.CAMEL_TO_SNAKE_CASE_TABLE.get(prop) or prop
@pulumi.output_type
class RouteResponse(dict):
"""
Route resource
"""
def __init__(__self__, *,
next_hop_type: str,
address_prefix: Optional[str] = None,
etag: Optional[str] = None,
id: Optional[str] = None,
name: Optional[str] = None,
next_hop_ip_address: Optional[str] = None,
provisioning_state: Optional[str] = None):
"""
Route resource
:param str next_hop_type: The type of Azure hop the packet should be sent to. Possible values are: 'VirtualNetworkGateway', 'VnetLocal', 'Internet', 'VirtualAppliance', and 'None'
:param str address_prefix: The destination CIDR to which the route applies.
:param str etag: A unique read-only string that changes whenever the resource is updated.
:param str id: Resource ID.
:param str name: The name of the resource that is unique within a resource group. This name can be used to access the resource.
:param str next_hop_ip_address: The IP address packets should be forwarded to. Next hop values are only allowed in routes where the next hop type is VirtualAppliance.
:param str provisioning_state: The provisioning state of the resource. Possible values are: 'Updating', 'Deleting', and 'Failed'.
"""
pulumi.set(__self__, "next_hop_type", next_hop_type)
if address_prefix is not None:
pulumi.set(__self__, "address_prefix", address_prefix)
if etag is not None:
pulumi.set(__self__, "etag", etag)
if id is not None:
pulumi.set(__self__, "id", id)
if name is not None:
pulumi.set(__self__, "name", name)
if next_hop_ip_address is not None:
pulumi.set(__self__, "next_hop_ip_address", next_hop_ip_address)
if provisioning_state is not None:
pulumi.set(__self__, "provisioning_state", provisioning_state)
@property
@pulumi.getter(name="nextHopType")
def next_hop_type(self) -> str:
"""
The type of Azure hop the packet should be sent to. Possible values are: 'VirtualNetworkGateway', 'VnetLocal', 'Internet', 'VirtualAppliance', and 'None'
"""
return pulumi.get(self, "next_hop_type")
@property
@pulumi.getter(name="addressPrefix")
def address_prefix(self) -> Optional[str]:
"""
The destination CIDR to which the route applies.
"""
return pulumi.get(self, "address_prefix")
@property
@pulumi.getter
def etag(self) -> Optional[str]:
"""
A unique read-only string that changes whenever the resource is updated.
"""
return pulumi.get(self, "etag")
@property
@pulumi.getter
def id(self) -> Optional[str]:
"""
Resource ID.
"""
return pulumi.get(self, "id")
@property
@pulumi.getter
def name(self) -> Optional[str]:
"""
The name of the resource that is unique within a resource group. This name can be used to access the resource.
"""
return pulumi.get(self, "name")
@property
@pulumi.getter(name="nextHopIpAddress")
def next_hop_ip_address(self) -> Optional[str]:
"""
The IP address packets should be forwarded to. Next hop values are only allowed in routes where the next hop type is VirtualAppliance.
"""
return pulumi.get(self, "next_hop_ip_address")
@property
@pulumi.getter(name="provisioningState")
def provisioning_state(self) -> Optional[str]:
"""
The provisioning state of the resource. Possible values are: 'Updating', 'Deleting', and 'Failed'.
"""
return pulumi.get(self, "provisioning_state")
def _translate_property(self, prop):
return _tables.CAMEL_TO_SNAKE_CASE_TABLE.get(prop) or prop
@pulumi.output_type
class RouteTableResponse(dict):
"""
Route table resource.
"""
def __init__(__self__, *,
name: str,
subnets: Sequence['outputs.SubnetResponse'],
type: str,
etag: Optional[str] = None,
id: Optional[str] = None,
location: Optional[str] = None,
provisioning_state: Optional[str] = None,
routes: Optional[Sequence['outputs.RouteResponse']] = None,
tags: Optional[Mapping[str, str]] = None):
"""
Route table resource.
:param str name: Resource name.
:param Sequence['SubnetResponseArgs'] subnets: A collection of references to subnets.
:param str type: Resource type.
:param str etag: Gets a unique read-only string that changes whenever the resource is updated.
:param str id: Resource ID.
:param str location: Resource location.
:param str provisioning_state: The provisioning state of the resource. Possible values are: 'Updating', 'Deleting', and 'Failed'.
:param Sequence['RouteResponseArgs'] routes: Collection of routes contained within a route table.
:param Mapping[str, str] tags: Resource tags.
"""
pulumi.set(__self__, "name", name)
pulumi.set(__self__, "subnets", subnets)
pulumi.set(__self__, "type", type)
if etag is not None:
pulumi.set(__self__, "etag", etag)
if id is not None:
pulumi.set(__self__, "id", id)
if location is not None:
pulumi.set(__self__, "location", location)
if provisioning_state is not None:
pulumi.set(__self__, "provisioning_state", provisioning_state)
if routes is not None:
pulumi.set(__self__, "routes", routes)
if tags is not None:
pulumi.set(__self__, "tags", tags)
@property
@pulumi.getter
def name(self) -> str:
"""
Resource name.
"""
return pulumi.get(self, "name")
@property
@pulumi.getter
def subnets(self) -> Sequence['outputs.SubnetResponse']:
"""
A collection of references to subnets.
"""
return pulumi.get(self, "subnets")
@property
@pulumi.getter
def type(self) -> str:
"""
Resource type.
"""
return pulumi.get(self, "type")
@property
@pulumi.getter
def etag(self) -> Optional[str]:
"""
Gets a unique read-only string that changes whenever the resource is updated.
"""
return pulumi.get(self, "etag")
@property
@pulumi.getter
def id(self) -> Optional[str]:
"""
Resource ID.
"""
return pulumi.get(self, "id")
@property
@pulumi.getter
def location(self) -> Optional[str]:
"""
Resource location.
"""
return pulumi.get(self, "location")
@property
@pulumi.getter(name="provisioningState")
def provisioning_state(self) -> Optional[str]:
"""
The provisioning state of the resource. Possible values are: 'Updating', 'Deleting', and 'Failed'.
"""
return pulumi.get(self, "provisioning_state")
@property
@pulumi.getter
def routes(self) -> Optional[Sequence['outputs.RouteResponse']]:
"""
Collection of routes contained within a route table.
"""
return pulumi.get(self, "routes")
@property
@pulumi.getter
def tags(self) -> Optional[Mapping[str, str]]:
"""
Resource tags.
"""
return pulumi.get(self, "tags")
def _translate_property(self, prop):
return _tables.CAMEL_TO_SNAKE_CASE_TABLE.get(prop) or prop
@pulumi.output_type
class SecurityRuleResponse(dict):
"""
Network security rule.
"""
def __init__(__self__, *,
access: str,
destination_address_prefix: str,
direction: str,
protocol: str,
source_address_prefix: str,
description: Optional[str] = None,
destination_port_range: Optional[str] = None,
etag: Optional[str] = None,
id: Optional[str] = None,
name: Optional[str] = None,
priority: Optional[int] = None,
provisioning_state: Optional[str] = None,
source_port_range: Optional[str] = None):
"""
Network security rule.
:param str access: The network traffic is allowed or denied. Possible values are: 'Allow' and 'Deny'.
:param str destination_address_prefix: The destination address prefix. CIDR or source IP range. Asterisk '*' can also be used to match all source IPs. Default tags such as 'VirtualNetwork', 'AzureLoadBalancer' and 'Internet' can also be used.
:param str direction: The direction of the rule. The direction specifies if rule will be evaluated on incoming or outgoing traffic. Possible values are: 'Inbound' and 'Outbound'.
:param str protocol: Network protocol this rule applies to. Possible values are 'Tcp', 'Udp', and '*'.
:param str source_address_prefix: The CIDR or source IP range. Asterisk '*' can also be used to match all source IPs. Default tags such as 'VirtualNetwork', 'AzureLoadBalancer' and 'Internet' can also be used. If this is an ingress rule, specifies where network traffic originates from.
:param str description: A description for this rule. Restricted to 140 chars.
:param str destination_port_range: The destination port or range. Integer or range between 0 and 65535. Asterisk '*' can also be used to match all ports.
:param str etag: A unique read-only string that changes whenever the resource is updated.
:param str id: Resource ID.
:param str name: The name of the resource that is unique within a resource group. This name can be used to access the resource.
:param int priority: The priority of the rule. The value can be between 100 and 4096. The priority number must be unique for each rule in the collection. The lower the priority number, the higher the priority of the rule.
:param str provisioning_state: The provisioning state of the public IP resource. Possible values are: 'Updating', 'Deleting', and 'Failed'.
:param str source_port_range: The source port or range. Integer or range between 0 and 65535. Asterisk '*' can also be used to match all ports.
"""
pulumi.set(__self__, "access", access)
pulumi.set(__self__, "destination_address_prefix", destination_address_prefix)
pulumi.set(__self__, "direction", direction)
pulumi.set(__self__, "protocol", protocol)
pulumi.set(__self__, "source_address_prefix", source_address_prefix)
if description is not None:
pulumi.set(__self__, "description", description)
if destination_port_range is not None:
pulumi.set(__self__, "destination_port_range", destination_port_range)
if etag is not None:
pulumi.set(__self__, "etag", etag)
if id is not None:
pulumi.set(__self__, "id", id)
if name is not None:
pulumi.set(__self__, "name", name)
if priority is not None:
pulumi.set(__self__, "priority", priority)
if provisioning_state is not None:
pulumi.set(__self__, "provisioning_state", provisioning_state)
if source_port_range is not None:
pulumi.set(__self__, "source_port_range", source_port_range)
@property
@pulumi.getter
def access(self) -> str:
"""
The network traffic is allowed or denied. Possible values are: 'Allow' and 'Deny'.
"""
return pulumi.get(self, "access")
@property
@pulumi.getter(name="destinationAddressPrefix")
def destination_address_prefix(self) -> str:
"""
The destination address prefix. CIDR or source IP range. Asterisk '*' can also be used to match all source IPs. Default tags such as 'VirtualNetwork', 'AzureLoadBalancer' and 'Internet' can also be used.
"""
return pulumi.get(self, "destination_address_prefix")
@property
@pulumi.getter
def direction(self) -> str:
"""
The direction of the rule. The direction specifies if rule will be evaluated on incoming or outgoing traffic. Possible values are: 'Inbound' and 'Outbound'.
"""
return pulumi.get(self, "direction")
@property
@pulumi.getter
def protocol(self) -> str:
"""
Network protocol this rule applies to. Possible values are 'Tcp', 'Udp', and '*'.
"""
return pulumi.get(self, "protocol")
@property
@pulumi.getter(name="sourceAddressPrefix")
def source_address_prefix(self) -> str:
"""
The CIDR or source IP range. Asterisk '*' can also be used to match all source IPs. Default tags such as 'VirtualNetwork', 'AzureLoadBalancer' and 'Internet' can also be used. If this is an ingress rule, specifies where network traffic originates from.
"""
return pulumi.get(self, "source_address_prefix")
@property
@pulumi.getter
def description(self) -> Optional[str]:
"""
A description for this rule. Restricted to 140 chars.
"""
return pulumi.get(self, "description")
@property
@pulumi.getter(name="destinationPortRange")
def destination_port_range(self) -> Optional[str]:
"""
The destination port or range. Integer or range between 0 and 65535. Asterisk '*' can also be used to match all ports.
"""
return pulumi.get(self, "destination_port_range")
@property
@pulumi.getter
def etag(self) -> Optional[str]:
"""
A unique read-only string that changes whenever the resource is updated.
"""
return pulumi.get(self, "etag")
@property
@pulumi.getter
def id(self) -> Optional[str]:
"""
Resource ID.
"""
return pulumi.get(self, "id")
@property
@pulumi.getter
def name(self) -> Optional[str]:
"""
The name of the resource that is unique within a resource group. This name can be used to access the resource.
"""
return pulumi.get(self, "name")
@property
@pulumi.getter
def priority(self) -> Optional[int]:
"""
The priority of the rule. The value can be between 100 and 4096. The priority number must be unique for each rule in the collection. The lower the priority number, the higher the priority of the rule.
"""
return pulumi.get(self, "priority")
@property
@pulumi.getter(name="provisioningState")
def provisioning_state(self) -> Optional[str]:
"""
The provisioning state of the public IP resource. Possible values are: 'Updating', 'Deleting', and 'Failed'.
"""
return pulumi.get(self, "provisioning_state")
@property
@pulumi.getter(name="sourcePortRange")
def source_port_range(self) -> Optional[str]:
"""
The source port or range. Integer or range between 0 and 65535. Asterisk '*' can also be used to match all ports.
"""
return pulumi.get(self, "source_port_range")
def _translate_property(self, prop):
return _tables.CAMEL_TO_SNAKE_CASE_TABLE.get(prop) or prop
@pulumi.output_type
class SubResourceResponse(dict):
def __init__(__self__, *,
id: Optional[str] = None):
"""
:param str id: Resource ID.
"""
if id is not None:
pulumi.set(__self__, "id", id)
@property
@pulumi.getter
def id(self) -> Optional[str]:
"""
Resource ID.
"""
return pulumi.get(self, "id")
def _translate_property(self, prop):
return _tables.CAMEL_TO_SNAKE_CASE_TABLE.get(prop) or prop
@pulumi.output_type
class SubnetResponse(dict):
"""
Subnet in a virtual network resource.
"""
def __init__(__self__, *,
ip_configurations: Sequence['outputs.IPConfigurationResponse'],
address_prefix: Optional[str] = None,
etag: Optional[str] = None,
id: Optional[str] = None,
name: Optional[str] = None,
network_security_group: Optional['outputs.NetworkSecurityGroupResponse'] = None,
provisioning_state: Optional[str] = None,
resource_navigation_links: Optional[Sequence['outputs.ResourceNavigationLinkResponse']] = None,
route_table: Optional['outputs.RouteTableResponse'] = None):
"""
Subnet in a virtual network resource.
:param Sequence['IPConfigurationResponseArgs'] ip_configurations: Gets an array of references to the network interface IP configurations using subnet.
:param str address_prefix: The address prefix for the subnet.
:param str etag: A unique read-only string that changes whenever the resource is updated.
:param str id: Resource ID.
:param str name: The name of the resource that is unique within a resource group. This name can be used to access the resource.
:param 'NetworkSecurityGroupResponseArgs' network_security_group: The reference of the NetworkSecurityGroup resource.
:param str provisioning_state: The provisioning state of the resource.
:param Sequence['ResourceNavigationLinkResponseArgs'] resource_navigation_links: Gets an array of references to the external resources using subnet.
:param 'RouteTableResponseArgs' route_table: The reference of the RouteTable resource.
"""
pulumi.set(__self__, "ip_configurations", ip_configurations)
if address_prefix is not None:
pulumi.set(__self__, "address_prefix", address_prefix)
if etag is not None:
pulumi.set(__self__, "etag", etag)
if id is not None:
pulumi.set(__self__, "id", id)
if name is not None:
pulumi.set(__self__, "name", name)
if network_security_group is not None:
pulumi.set(__self__, "network_security_group", network_security_group)
if provisioning_state is not None:
pulumi.set(__self__, "provisioning_state", provisioning_state)
if resource_navigation_links is not None:
pulumi.set(__self__, "resource_navigation_links", resource_navigation_links)
if route_table is not None:
pulumi.set(__self__, "route_table", route_table)
@property
@pulumi.getter(name="ipConfigurations")
def ip_configurations(self) -> Sequence['outputs.IPConfigurationResponse']:
"""
Gets an array of references to the network interface IP configurations using subnet.
"""
return pulumi.get(self, "ip_configurations")
@property
@pulumi.getter(name="addressPrefix")
def address_prefix(self) -> Optional[str]:
"""
The address prefix for the subnet.
"""
return pulumi.get(self, "address_prefix")
@property
@pulumi.getter
def etag(self) -> Optional[str]:
"""
A unique read-only string that changes whenever the resource is updated.
"""
return pulumi.get(self, "etag")
@property
@pulumi.getter
def id(self) -> Optional[str]:
"""
Resource ID.
"""
return pulumi.get(self, "id")
@property
@pulumi.getter
def name(self) -> Optional[str]:
"""
The name of the resource that is unique within a resource group. This name can be used to access the resource.
"""
return pulumi.get(self, "name")
@property
@pulumi.getter(name="networkSecurityGroup")
def network_security_group(self) -> Optional['outputs.NetworkSecurityGroupResponse']:
"""
The reference of the NetworkSecurityGroup resource.
"""
return pulumi.get(self, "network_security_group")
@property
@pulumi.getter(name="provisioningState")
def provisioning_state(self) -> Optional[str]:
"""
The provisioning state of the resource.
"""
return pulumi.get(self, "provisioning_state")
@property
@pulumi.getter(name="resourceNavigationLinks")
def resource_navigation_links(self) -> Optional[Sequence['outputs.ResourceNavigationLinkResponse']]:
"""
Gets an array of references to the external resources using subnet.
"""
return pulumi.get(self, "resource_navigation_links")
@property
@pulumi.getter(name="routeTable")
def route_table(self) -> Optional['outputs.RouteTableResponse']:
"""
The reference of the RouteTable resource.
"""
return pulumi.get(self, "route_table")
def _translate_property(self, prop):
return _tables.CAMEL_TO_SNAKE_CASE_TABLE.get(prop) or prop
@pulumi.output_type
class TunnelConnectionHealthResponse(dict):
"""
VirtualNetworkGatewayConnection properties
"""
def __init__(__self__, *,
connection_status: str,
egress_bytes_transferred: float,
ingress_bytes_transferred: float,
last_connection_established_utc_time: str,
tunnel: str):
"""
VirtualNetworkGatewayConnection properties
:param str connection_status: Virtual network Gateway connection status
:param float egress_bytes_transferred: The Egress Bytes Transferred in this connection
:param float ingress_bytes_transferred: The Ingress Bytes Transferred in this connection
:param str last_connection_established_utc_time: The time at which connection was established in Utc format.
:param str tunnel: Tunnel name.
"""
pulumi.set(__self__, "connection_status", connection_status)
pulumi.set(__self__, "egress_bytes_transferred", egress_bytes_transferred)
pulumi.set(__self__, "ingress_bytes_transferred", ingress_bytes_transferred)
pulumi.set(__self__, "last_connection_established_utc_time", last_connection_established_utc_time)
pulumi.set(__self__, "tunnel", tunnel)
@property
@pulumi.getter(name="connectionStatus")
def connection_status(self) -> str:
"""
Virtual network Gateway connection status
"""
return pulumi.get(self, "connection_status")
@property
@pulumi.getter(name="egressBytesTransferred")
def egress_bytes_transferred(self) -> float:
"""
The Egress Bytes Transferred in this connection
"""
return pulumi.get(self, "egress_bytes_transferred")
@property
@pulumi.getter(name="ingressBytesTransferred")
def ingress_bytes_transferred(self) -> float:
"""
The Ingress Bytes Transferred in this connection
"""
return pulumi.get(self, "ingress_bytes_transferred")
@property
@pulumi.getter(name="lastConnectionEstablishedUtcTime")
def last_connection_established_utc_time(self) -> str:
"""
The time at which connection was established in Utc format.
"""
return pulumi.get(self, "last_connection_established_utc_time")
@property
@pulumi.getter
def tunnel(self) -> str:
"""
Tunnel name.
"""
return pulumi.get(self, "tunnel")
def _translate_property(self, prop):
return _tables.CAMEL_TO_SNAKE_CASE_TABLE.get(prop) or prop
@pulumi.output_type
class VirtualNetworkGatewayIPConfigurationResponse(dict):
"""
IP configuration for virtual network gateway
"""
def __init__(__self__, *,
provisioning_state: str,
public_ip_address: 'outputs.SubResourceResponse',
subnet: 'outputs.SubResourceResponse',
etag: Optional[str] = None,
id: Optional[str] = None,
name: Optional[str] = None,
private_ip_allocation_method: Optional[str] = None):
"""
IP configuration for virtual network gateway
:param str provisioning_state: The provisioning state of the public IP resource. Possible values are: 'Updating', 'Deleting', and 'Failed'.
:param 'SubResourceResponseArgs' public_ip_address: The reference of the public IP resource.
:param 'SubResourceResponseArgs' subnet: The reference of the subnet resource.
:param str etag: A unique read-only string that changes whenever the resource is updated.
:param str id: Resource ID.
:param str name: The name of the resource that is unique within a resource group. This name can be used to access the resource.
:param str private_ip_allocation_method: The private IP allocation method. Possible values are: 'Static' and 'Dynamic'.
"""
pulumi.set(__self__, "provisioning_state", provisioning_state)
pulumi.set(__self__, "public_ip_address", public_ip_address)
pulumi.set(__self__, "subnet", subnet)
if etag is not None:
pulumi.set(__self__, "etag", etag)
if id is not None:
pulumi.set(__self__, "id", id)
if name is not None:
pulumi.set(__self__, "name", name)
if private_ip_allocation_method is not None:
pulumi.set(__self__, "private_ip_allocation_method", private_ip_allocation_method)
@property
@pulumi.getter(name="provisioningState")
def provisioning_state(self) -> str:
"""
The provisioning state of the public IP resource. Possible values are: 'Updating', 'Deleting', and 'Failed'.
"""
return pulumi.get(self, "provisioning_state")
@property
@pulumi.getter(name="publicIPAddress")
def public_ip_address(self) -> 'outputs.SubResourceResponse':
"""
The reference of the public IP resource.
"""
return pulumi.get(self, "public_ip_address")
@property
@pulumi.getter
def subnet(self) -> 'outputs.SubResourceResponse':
"""
The reference of the subnet resource.
"""
return pulumi.get(self, "subnet")
@property
@pulumi.getter
def etag(self) -> Optional[str]:
"""
A unique read-only string that changes whenever the resource is updated.
"""
return pulumi.get(self, "etag")
@property
@pulumi.getter
def id(self) -> Optional[str]:
"""
Resource ID.
"""
return pulumi.get(self, "id")
@property
@pulumi.getter
def name(self) -> Optional[str]:
"""
The name of the resource that is unique within a resource group. This name can be used to access the resource.
"""
return pulumi.get(self, "name")
@property
@pulumi.getter(name="privateIPAllocationMethod")
def private_ip_allocation_method(self) -> Optional[str]:
"""
The private IP allocation method. Possible values are: 'Static' and 'Dynamic'.
"""
return pulumi.get(self, "private_ip_allocation_method")
def _translate_property(self, prop):
return _tables.CAMEL_TO_SNAKE_CASE_TABLE.get(prop) or prop
@pulumi.output_type
class VirtualNetworkGatewayResponse(dict):
"""
A common class for general resource information
"""
def __init__(__self__, *,
gateway_type: str,
ip_configurations: Sequence['outputs.VirtualNetworkGatewayIPConfigurationResponse'],
name: str,
provisioning_state: str,
type: str,
vpn_type: str,
active_active: Optional[bool] = None,
bgp_settings: Optional['outputs.BgpSettingsResponse'] = None,
enable_bgp: Optional[bool] = None,
etag: Optional[str] = None,
gateway_default_site: Optional['outputs.SubResourceResponse'] = None,
id: Optional[str] = None,
location: Optional[str] = None,
resource_guid: Optional[str] = None,
sku: Optional['outputs.VirtualNetworkGatewaySkuResponse'] = None,
tags: Optional[Mapping[str, str]] = None,
vpn_client_configuration: Optional['outputs.VpnClientConfigurationResponse'] = None):
"""
A common class for general resource information
:param str gateway_type: The type of this virtual network gateway. Possible values are: 'Vpn' and 'ExpressRoute'.
:param Sequence['VirtualNetworkGatewayIPConfigurationResponseArgs'] ip_configurations: IP configurations for virtual network gateway.
:param str name: Resource name.
:param str provisioning_state: The provisioning state of the VirtualNetworkGateway resource. Possible values are: 'Updating', 'Deleting', and 'Failed'.
:param str type: Resource type.
:param str vpn_type: The type of this virtual network gateway. Possible values are: 'PolicyBased' and 'RouteBased'.
:param bool active_active: ActiveActive flag
:param 'BgpSettingsResponseArgs' bgp_settings: Virtual network gateway's BGP speaker settings.
:param bool enable_bgp: Whether BGP is enabled for this virtual network gateway or not.
:param str etag: Gets a unique read-only string that changes whenever the resource is updated.
:param 'SubResourceResponseArgs' gateway_default_site: The reference of the LocalNetworkGateway resource which represents local network site having default routes. Assign Null value in case of removing existing default site setting.
:param str id: Resource ID.
:param str location: Resource location.
:param str resource_guid: The resource GUID property of the VirtualNetworkGateway resource.
:param 'VirtualNetworkGatewaySkuResponseArgs' sku: The reference of the VirtualNetworkGatewaySku resource which represents the SKU selected for Virtual network gateway.
:param Mapping[str, str] tags: Resource tags.
:param 'VpnClientConfigurationResponseArgs' vpn_client_configuration: The reference of the VpnClientConfiguration resource which represents the P2S VpnClient configurations.
"""
pulumi.set(__self__, "gateway_type", gateway_type)
pulumi.set(__self__, "ip_configurations", ip_configurations)
pulumi.set(__self__, "name", name)
pulumi.set(__self__, "provisioning_state", provisioning_state)
pulumi.set(__self__, "type", type)
pulumi.set(__self__, "vpn_type", vpn_type)
if active_active is not None:
pulumi.set(__self__, "active_active", active_active)
if bgp_settings is not None:
pulumi.set(__self__, "bgp_settings", bgp_settings)
if enable_bgp is not None:
pulumi.set(__self__, "enable_bgp", enable_bgp)
if etag is not None:
pulumi.set(__self__, "etag", etag)
if gateway_default_site is not None:
pulumi.set(__self__, "gateway_default_site", gateway_default_site)
if id is not None:
pulumi.set(__self__, "id", id)
if location is not None:
pulumi.set(__self__, "location", location)
if resource_guid is not None:
pulumi.set(__self__, "resource_guid", resource_guid)
if sku is not None:
pulumi.set(__self__, "sku", sku)
if tags is not None:
pulumi.set(__self__, "tags", tags)
if vpn_client_configuration is not None:
pulumi.set(__self__, "vpn_client_configuration", vpn_client_configuration)
@property
@pulumi.getter(name="gatewayType")
def gateway_type(self) -> str:
"""
The type of this virtual network gateway. Possible values are: 'Vpn' and 'ExpressRoute'.
"""
return pulumi.get(self, "gateway_type")
@property
@pulumi.getter(name="ipConfigurations")
def ip_configurations(self) -> Sequence['outputs.VirtualNetworkGatewayIPConfigurationResponse']:
"""
IP configurations for virtual network gateway.
"""
return pulumi.get(self, "ip_configurations")
@property
@pulumi.getter
def name(self) -> str:
"""
Resource name.
"""
return pulumi.get(self, "name")
@property
@pulumi.getter(name="provisioningState")
def provisioning_state(self) -> str:
"""
The provisioning state of the VirtualNetworkGateway resource. Possible values are: 'Updating', 'Deleting', and 'Failed'.
"""
return pulumi.get(self, "provisioning_state")
@property
@pulumi.getter
def type(self) -> str:
"""
Resource type.
"""
return pulumi.get(self, "type")
@property
@pulumi.getter(name="vpnType")
def vpn_type(self) -> str:
"""
The type of this virtual network gateway. Possible values are: 'PolicyBased' and 'RouteBased'.
"""
return pulumi.get(self, "vpn_type")
@property
@pulumi.getter(name="activeActive")
def active_active(self) -> Optional[bool]:
"""
ActiveActive flag
"""
return pulumi.get(self, "active_active")
@property
@pulumi.getter(name="bgpSettings")
def bgp_settings(self) -> Optional['outputs.BgpSettingsResponse']:
"""
Virtual network gateway's BGP speaker settings.
"""
return pulumi.get(self, "bgp_settings")
@property
@pulumi.getter(name="enableBgp")
def enable_bgp(self) -> Optional[bool]:
"""
Whether BGP is enabled for this virtual network gateway or not.
"""
return pulumi.get(self, "enable_bgp")
@property
@pulumi.getter
def etag(self) -> Optional[str]:
"""
Gets a unique read-only string that changes whenever the resource is updated.
"""
return pulumi.get(self, "etag")
@property
@pulumi.getter(name="gatewayDefaultSite")
def gateway_default_site(self) -> Optional['outputs.SubResourceResponse']:
"""
The reference of the LocalNetworkGateway resource which represents local network site having default routes. Assign Null value in case of removing existing default site setting.
"""
return pulumi.get(self, "gateway_default_site")
@property
@pulumi.getter
def id(self) -> Optional[str]:
"""
Resource ID.
"""
return pulumi.get(self, "id")
@property
@pulumi.getter
def location(self) -> Optional[str]:
"""
Resource location.
"""
return pulumi.get(self, "location")
@property
@pulumi.getter(name="resourceGuid")
def resource_guid(self) -> Optional[str]:
"""
The resource GUID property of the VirtualNetworkGateway resource.
"""
return pulumi.get(self, "resource_guid")
@property
@pulumi.getter
def sku(self) -> Optional['outputs.VirtualNetworkGatewaySkuResponse']:
"""
The reference of the VirtualNetworkGatewaySku resource which represents the SKU selected for Virtual network gateway.
"""
return pulumi.get(self, "sku")
@property
@pulumi.getter
def tags(self) -> Optional[Mapping[str, str]]:
"""
Resource tags.
"""
return pulumi.get(self, "tags")
@property
@pulumi.getter(name="vpnClientConfiguration")
def vpn_client_configuration(self) -> Optional['outputs.VpnClientConfigurationResponse']:
"""
The reference of the VpnClientConfiguration resource which represents the P2S VpnClient configurations.
"""
return pulumi.get(self, "vpn_client_configuration")
def _translate_property(self, prop):
return _tables.CAMEL_TO_SNAKE_CASE_TABLE.get(prop) or prop
@pulumi.output_type
class VirtualNetworkGatewaySkuResponse(dict):
"""
VirtualNetworkGatewaySku details
"""
def __init__(__self__, *,
name: str,
tier: str,
capacity: Optional[int] = None):
"""
VirtualNetworkGatewaySku details
:param str name: Gateway SKU name. Possible values are: 'Basic', 'HighPerformance','Standard', and 'UltraPerformance'.
:param str tier: Gateway SKU tier. Possible values are: 'Basic', 'HighPerformance','Standard', and 'UltraPerformance'.
:param int capacity: The capacity.
"""
pulumi.set(__self__, "name", name)
pulumi.set(__self__, "tier", tier)
if capacity is not None:
pulumi.set(__self__, "capacity", capacity)
@property
@pulumi.getter
def name(self) -> str:
"""
Gateway SKU name. Possible values are: 'Basic', 'HighPerformance','Standard', and 'UltraPerformance'.
"""
return pulumi.get(self, "name")
@property
@pulumi.getter
def tier(self) -> str:
"""
Gateway SKU tier. Possible values are: 'Basic', 'HighPerformance','Standard', and 'UltraPerformance'.
"""
return pulumi.get(self, "tier")
@property
@pulumi.getter
def capacity(self) -> Optional[int]:
"""
The capacity.
"""
return pulumi.get(self, "capacity")
def _translate_property(self, prop):
return _tables.CAMEL_TO_SNAKE_CASE_TABLE.get(prop) or prop
@pulumi.output_type
class VirtualNetworkPeeringResponse(dict):
"""
Peerings in a virtual network resource.
"""
def __init__(__self__, *,
allow_forwarded_traffic: Optional[bool] = None,
allow_gateway_transit: Optional[bool] = None,
allow_virtual_network_access: Optional[bool] = None,
etag: Optional[str] = None,
id: Optional[str] = None,
name: Optional[str] = None,
peering_state: Optional[str] = None,
provisioning_state: Optional[str] = None,
remote_virtual_network: Optional['outputs.SubResourceResponse'] = None,
use_remote_gateways: Optional[bool] = None):
"""
Peerings in a virtual network resource.
:param bool allow_forwarded_traffic: Whether the forwarded traffic from the VMs in the remote virtual network will be allowed/disallowed.
:param bool allow_gateway_transit: If gateway links can be used in remote virtual networking to link to this virtual network.
:param bool allow_virtual_network_access: Whether the VMs in the linked virtual network space would be able to access all the VMs in local Virtual network space.
:param str etag: A unique read-only string that changes whenever the resource is updated.
:param str id: Resource ID.
:param str name: The name of the resource that is unique within a resource group. This name can be used to access the resource.
:param str peering_state: The status of the virtual network peering. Possible values are 'Initiated', 'Connected', and 'Disconnected'.
:param str provisioning_state: The provisioning state of the resource.
:param 'SubResourceResponseArgs' remote_virtual_network: The reference of the remote virtual network.
:param bool use_remote_gateways: If remote gateways can be used on this virtual network. If the flag is set to true, and allowGatewayTransit on remote peering is also true, virtual network will use gateways of remote virtual network for transit. Only one peering can have this flag set to true. This flag cannot be set if virtual network already has a gateway.
"""
if allow_forwarded_traffic is not None:
pulumi.set(__self__, "allow_forwarded_traffic", allow_forwarded_traffic)
if allow_gateway_transit is not None:
pulumi.set(__self__, "allow_gateway_transit", allow_gateway_transit)
if allow_virtual_network_access is not None:
pulumi.set(__self__, "allow_virtual_network_access", allow_virtual_network_access)
if etag is not None:
pulumi.set(__self__, "etag", etag)
if id is not None:
pulumi.set(__self__, "id", id)
if name is not None:
pulumi.set(__self__, "name", name)
if peering_state is not None:
pulumi.set(__self__, "peering_state", peering_state)
if provisioning_state is not None:
pulumi.set(__self__, "provisioning_state", provisioning_state)
if remote_virtual_network is not None:
pulumi.set(__self__, "remote_virtual_network", remote_virtual_network)
if use_remote_gateways is not None:
pulumi.set(__self__, "use_remote_gateways", use_remote_gateways)
@property
@pulumi.getter(name="allowForwardedTraffic")
def allow_forwarded_traffic(self) -> Optional[bool]:
"""
Whether the forwarded traffic from the VMs in the remote virtual network will be allowed/disallowed.
"""
return pulumi.get(self, "allow_forwarded_traffic")
@property
@pulumi.getter(name="allowGatewayTransit")
def allow_gateway_transit(self) -> Optional[bool]:
"""
If gateway links can be used in remote virtual networking to link to this virtual network.
"""
return pulumi.get(self, "allow_gateway_transit")
@property
@pulumi.getter(name="allowVirtualNetworkAccess")
def allow_virtual_network_access(self) -> Optional[bool]:
"""
Whether the VMs in the linked virtual network space would be able to access all the VMs in local Virtual network space.
"""
return pulumi.get(self, "allow_virtual_network_access")
@property
@pulumi.getter
def etag(self) -> Optional[str]:
"""
A unique read-only string that changes whenever the resource is updated.
"""
return pulumi.get(self, "etag")
@property
@pulumi.getter
def id(self) -> Optional[str]:
"""
Resource ID.
"""
return pulumi.get(self, "id")
@property
@pulumi.getter
def name(self) -> Optional[str]:
"""
The name of the resource that is unique within a resource group. This name can be used to access the resource.
"""
return pulumi.get(self, "name")
@property
@pulumi.getter(name="peeringState")
def peering_state(self) -> Optional[str]:
"""
The status of the virtual network peering. Possible values are 'Initiated', 'Connected', and 'Disconnected'.
"""
return pulumi.get(self, "peering_state")
@property
@pulumi.getter(name="provisioningState")
def provisioning_state(self) -> Optional[str]:
"""
The provisioning state of the resource.
"""
return pulumi.get(self, "provisioning_state")
@property
@pulumi.getter(name="remoteVirtualNetwork")
def remote_virtual_network(self) -> Optional['outputs.SubResourceResponse']:
"""
The reference of the remote virtual network.
"""
return pulumi.get(self, "remote_virtual_network")
@property
@pulumi.getter(name="useRemoteGateways")
def use_remote_gateways(self) -> Optional[bool]:
"""
If remote gateways can be used on this virtual network. If the flag is set to true, and allowGatewayTransit on remote peering is also true, virtual network will use gateways of remote virtual network for transit. Only one peering can have this flag set to true. This flag cannot be set if virtual network already has a gateway.
"""
return pulumi.get(self, "use_remote_gateways")
def _translate_property(self, prop):
return _tables.CAMEL_TO_SNAKE_CASE_TABLE.get(prop) or prop
@pulumi.output_type
class VpnClientConfigurationResponse(dict):
"""
VpnClientConfiguration for P2S client.
"""
def __init__(__self__, *,
vpn_client_address_pool: Optional['outputs.AddressSpaceResponse'] = None,
vpn_client_revoked_certificates: Optional[Sequence['outputs.VpnClientRevokedCertificateResponse']] = None,
vpn_client_root_certificates: Optional[Sequence['outputs.VpnClientRootCertificateResponse']] = None):
"""
VpnClientConfiguration for P2S client.
:param 'AddressSpaceResponseArgs' vpn_client_address_pool: The reference of the address space resource which represents Address space for P2S VpnClient.
:param Sequence['VpnClientRevokedCertificateResponseArgs'] vpn_client_revoked_certificates: VpnClientRevokedCertificate for Virtual network gateway.
:param Sequence['VpnClientRootCertificateResponseArgs'] vpn_client_root_certificates: VpnClientRootCertificate for virtual network gateway.
"""
if vpn_client_address_pool is not None:
pulumi.set(__self__, "vpn_client_address_pool", vpn_client_address_pool)
if vpn_client_revoked_certificates is not None:
pulumi.set(__self__, "vpn_client_revoked_certificates", vpn_client_revoked_certificates)
if vpn_client_root_certificates is not None:
pulumi.set(__self__, "vpn_client_root_certificates", vpn_client_root_certificates)
@property
@pulumi.getter(name="vpnClientAddressPool")
def vpn_client_address_pool(self) -> Optional['outputs.AddressSpaceResponse']:
"""
The reference of the address space resource which represents Address space for P2S VpnClient.
"""
return pulumi.get(self, "vpn_client_address_pool")
@property
@pulumi.getter(name="vpnClientRevokedCertificates")
def vpn_client_revoked_certificates(self) -> Optional[Sequence['outputs.VpnClientRevokedCertificateResponse']]:
"""
VpnClientRevokedCertificate for Virtual network gateway.
"""
return pulumi.get(self, "vpn_client_revoked_certificates")
@property
@pulumi.getter(name="vpnClientRootCertificates")
def vpn_client_root_certificates(self) -> Optional[Sequence['outputs.VpnClientRootCertificateResponse']]:
"""
VpnClientRootCertificate for virtual network gateway.
"""
return pulumi.get(self, "vpn_client_root_certificates")
def _translate_property(self, prop):
return _tables.CAMEL_TO_SNAKE_CASE_TABLE.get(prop) or prop
@pulumi.output_type
class VpnClientRevokedCertificateResponse(dict):
"""
VPN client revoked certificate of virtual network gateway.
"""
def __init__(__self__, *,
provisioning_state: str,
etag: Optional[str] = None,
id: Optional[str] = None,
name: Optional[str] = None,
thumbprint: Optional[str] = None):
"""
VPN client revoked certificate of virtual network gateway.
:param str provisioning_state: The provisioning state of the VPN client revoked certificate resource. Possible values are: 'Updating', 'Deleting', and 'Failed'.
:param str etag: A unique read-only string that changes whenever the resource is updated.
:param str id: Resource ID.
:param str name: The name of the resource that is unique within a resource group. This name can be used to access the resource.
:param str thumbprint: The revoked VPN client certificate thumbprint.
"""
pulumi.set(__self__, "provisioning_state", provisioning_state)
if etag is not None:
pulumi.set(__self__, "etag", etag)
if id is not None:
pulumi.set(__self__, "id", id)
if name is not None:
pulumi.set(__self__, "name", name)
if thumbprint is not None:
pulumi.set(__self__, "thumbprint", thumbprint)
@property
@pulumi.getter(name="provisioningState")
def provisioning_state(self) -> str:
"""
The provisioning state of the VPN client revoked certificate resource. Possible values are: 'Updating', 'Deleting', and 'Failed'.
"""
return pulumi.get(self, "provisioning_state")
@property
@pulumi.getter
def etag(self) -> Optional[str]:
"""
A unique read-only string that changes whenever the resource is updated.
"""
return pulumi.get(self, "etag")
@property
@pulumi.getter
def id(self) -> Optional[str]:
"""
Resource ID.
"""
return pulumi.get(self, "id")
@property
@pulumi.getter
def name(self) -> Optional[str]:
"""
The name of the resource that is unique within a resource group. This name can be used to access the resource.
"""
return pulumi.get(self, "name")
@property
@pulumi.getter
def thumbprint(self) -> Optional[str]:
"""
The revoked VPN client certificate thumbprint.
"""
return pulumi.get(self, "thumbprint")
def _translate_property(self, prop):
return _tables.CAMEL_TO_SNAKE_CASE_TABLE.get(prop) or prop
@pulumi.output_type
class VpnClientRootCertificateResponse(dict):
"""
VPN client root certificate of virtual network gateway
"""
def __init__(__self__, *,
provisioning_state: str,
public_cert_data: str,
etag: Optional[str] = None,
id: Optional[str] = None,
name: Optional[str] = None):
"""
VPN client root certificate of virtual network gateway
:param str provisioning_state: The provisioning state of the VPN client root certificate resource. Possible values are: 'Updating', 'Deleting', and 'Failed'.
:param str public_cert_data: The certificate public data.
:param str etag: A unique read-only string that changes whenever the resource is updated.
:param str id: Resource ID.
:param str name: The name of the resource that is unique within a resource group. This name can be used to access the resource.
"""
pulumi.set(__self__, "provisioning_state", provisioning_state)
pulumi.set(__self__, "public_cert_data", public_cert_data)
if etag is not None:
pulumi.set(__self__, "etag", etag)
if id is not None:
pulumi.set(__self__, "id", id)
if name is not None:
pulumi.set(__self__, "name", name)
@property
@pulumi.getter(name="provisioningState")
def provisioning_state(self) -> str:
"""
The provisioning state of the VPN client root certificate resource. Possible values are: 'Updating', 'Deleting', and 'Failed'.
"""
return pulumi.get(self, "provisioning_state")
@property
@pulumi.getter(name="publicCertData")
def public_cert_data(self) -> str:
"""
The certificate public data.
"""
return pulumi.get(self, "public_cert_data")
@property
@pulumi.getter
def etag(self) -> Optional[str]:
"""
A unique read-only string that changes whenever the resource is updated.
"""
return pulumi.get(self, "etag")
@property
@pulumi.getter
def id(self) -> Optional[str]:
"""
Resource ID.
"""
return pulumi.get(self, "id")
@property
@pulumi.getter
def name(self) -> Optional[str]:
"""
The name of the resource that is unique within a resource group. This name can be used to access the resource.
"""
return pulumi.get(self, "name")
def _translate_property(self, prop):
return _tables.CAMEL_TO_SNAKE_CASE_TABLE.get(prop) or prop
|
from .BasicTypeAttr import BasicTypeAttr
class StringAttr(BasicTypeAttr):
def __init__(self, attr):
BasicTypeAttr.__init__(self, attr)
if self.get('Max') is not None:
self['Max'] = int(self['Max'])
if self.get('Min') is not None:
self['Min'] = int(self['Min'])
def printWarnings(self, out):
if self.get('Max') in (None, '') and not self.get('SQLType'):
out.write('warning: model %s: class %s: attr %s: max string length unspecified\n' % (
self.model().name(), self.klass().name(), self.name()))
|
import datetime
from django.conf import settings
from django.contrib.auth.decorators import user_passes_test
from django.http import HttpResponse, Http404, HttpResponseRedirect
from django.shortcuts import render_to_response
from django.template import loader, Context
from equipment.models import ItemType, Item, ItemError, Penalty, Transaction, TransactionError
from infobase.models import Person, STUDENT_KIND, PHASE_END_DATES, phase_for_cohort_and_date
def recent_transactions(person, number=6, hours=1, kind=None):
"""Fetch recent transactions by this person"""
cutoff_datetime = datetime.datetime.now() - datetime.timedelta(hours=hours)
checkouts = person.transaction_set.filter(kind=kind, timestamp__gte=cutoff_datetime)
items = [c.item for c in checkouts[:number]]
return items
def admin_access_allowed(user):
"""Does the user have the right permissions to be using this app?"""
if user:
return bool(user.groups.filter(name="equipment_tracker").count())
def render_response(template, var_dict, mimetype, filename):
"""Simple substitute for render_to_response (backporting some Django-1.0 stuff)"""
t = loader.get_template(template)
c = Context(var_dict)
response = HttpResponse(t.render(c), mimetype=mimetype)
response['Content-Disposition'] = "attachment; filename=%s" % filename
return response
@user_passes_test(admin_access_allowed)
def home(request):
"""Equipment app home page"""
title = "Main Menu"
message = "Welcome to the equipment tracker."
return render_to_response("home.html", locals())
@user_passes_test(admin_access_allowed)
def check_in(request):
"""
GET: Show check-in form.
POST: Check in the scanned item.
"""
title = "Check in"
if request.method == "POST":
number = request.POST['number']
try:
item = Item.find_by_number(number)
person = item.checked_out_by
title = "Checked in %s" % item
if item.days_overdue():
title += " (OVERDUE)"
item.check_in()
message = item.transaction_set.latest()
recent_checkins = recent_transactions(person, kind=Transaction.CHECKIN)
except (ItemError, TransactionError), error_message:
pass
return render_to_response("checkin.html", locals())
def checkout_url(request, due_timestamp, person_id):
"""
Helper method that builds a URL to be used in a redirect. It will either have both
due-timestamp and user ID number, or just user ID number. If GET values are provided
in `request`, for `due_timestamp` or `person_id`, they override the corresponding
passed arguments.
"""
if set(["due_date", "due_time"]).issubset(request.GET):
due_timestamp = (request.GET['due_date'] + "-" + request.GET['due_time']).replace(":", "-")
if due_timestamp:
url_template = "/equipment/checkout/%s/%%s/" % due_timestamp
else:
url_template = "/equipment/checkout/%s/"
if "person_id" in request.GET:
person_id = request.GET['person_id']
if Person.objects.filter(id_number=person_id).count() == 0:
raise Person.DoesNotExist("UNKNOWN")
url = url_template % person_id
return url
@user_passes_test(admin_access_allowed)
def check_out(request, person_id=None, due_timestamp=None):
"""
This view handles all stages of the checkout operation. In order for checkout to begin,
a person_id must be in the URL. Optional due_timestamp is also in the URL. Those are
designed to persist; i.e. if you change the person the custom due date (if any) is
kept, and if you change the due date the person (if any) is kept.
"""
# Set default due date values for use in "Change due date" form
dummy_item = Item()
dummy_item.set_due_datetime()
example_due_date = dummy_item.due.date()
example_due_time = dummy_item.due.time()
# If a specific due-date was requested, set it
if due_timestamp:
custom_due_datetime = datetime.datetime.strptime(due_timestamp, "%Y-%m-%d-%H-%M")
else:
custom_due_datetime = None
title = "Scan ID"
try:
# If a change is requested for person or due date, update the URL
if set(["due_date", "due_time", "person_id"]).intersection(request.GET):
url = checkout_url(request, due_timestamp, person_id)
return HttpResponseRedirect(url)
if person_id:
person = Person.objects.get(id_number=person_id)
if not person.is_active():
raise Person.DoesNotExist("ID EXPIRED")
title = "Checking out equipment to %s" % person
recent_checkouts = recent_transactions(person, kind=Transaction.CHECKOUT)
if request.method == "POST" and request.POST['number']:
try:
item = Item.find_by_number(request.POST['number'])
item.check_out(person, custom_due_datetime)
message = "Checked out %s" % item
soundfile = "Glass.aiff"
except (ItemError, TransactionError), error_message:
soundfile = "error.mp3"
except Person.DoesNotExist, reason:
title = "Bad ID"
id_number = person_id or request.GET['person_id']
error_message = "%s: %s" % (id_number, reason)
person = None
soundfile = "error.mp3"
return render_to_response("checkout.html", locals())
@user_passes_test(admin_access_allowed)
def item(request):
"""Display information on the specified item, with some editing options."""
title = "Find an item"
if 'number' in request.GET:
number = request.GET['number']
try:
item = Item.find_by_number(number)
title = unicode(item)
history = item.transaction_set.all()
except ItemError, error_message:
pass
else:
message = "Type or scan the item's HIP number or serial number"
return render_to_response("item.html", locals())
@user_passes_test(admin_access_allowed)
def person(request):
"""
Display information on the specified borrower (person)
"""
title = "Find a person"
if 'person_id' in request.GET:
person_id = request.GET['person_id']
try:
person = Person.objects.get(id_number=person_id)
title = unicode(person)
checked_out_items = person.item_set.all()
transaction_history = person.transaction_set.all()
except Person.DoesNotExist:
error_message = "No person with id number %s" % person_id
else:
message = "Enter or scan the person's ID number"
people = Person.objects.enrolled() # For clickable list of names
return render_to_response("person.html", locals())
def _penalty_report_data(cohort, phase=None):
"""
Data rows for late-equipment report
"""
if phase is None:
phase = phase_for_cohort_and_date(cohort, datetime.date.today())
else:
phase = int(phase)
if phase < 2 or phase > 4:
raise ValueError
start_date = PHASE_END_DATES[cohort][phase-1]
end_date = PHASE_END_DATES[cohort][phase]
all_penalties = Penalty.objects.filter(when_levied__range=(start_date, end_date))
rows = [("Firstname", "Lastname", "ID number", "Date", "Amount")]
rows += [(p.student.firstname, p.student.lastname, p.student.id_number, p.when_levied, 0-p.amount) for p in all_penalties]
return rows
@user_passes_test(admin_access_allowed)
def report(request, report_kind=None, number=None):
"""
General-purpose reporting view. To add a new report type, add an appropriate `if`
clause here, and a corresponding `{% if ... %}` clause in the template for display.
"""
if report_kind:
now = datetime.datetime.now()
title = "%s Report" % report_kind.title()
try:
if report_kind == "kits":
kits = Item.objects.filter(itemtype__kit=True)
if report_kind == "item":
item = Item.find_by_number(number)
title = item
if report_kind == "instock":
itemtypes = [i for i in ItemType.objects.all() if i.how_many_in_stock()]
if report_kind == "latepenalties":
try:
report_rows = _penalty_report_data(cohort=1, phase=number)
csv_link = "/equipment/report/latepenalties-csv/"
filename = "late_equipment.csv"
except ValueError:
report_rows = None
error_message = "Can't generate report (incorrect phase?)"
if report_kind.startswith("out-"):
items = Item.objects.filter(status=Item.OUT, part_of_kit__isnull=True).order_by("due")
if report_kind.startswith("overdue-"):
items = Item.objects.filter(status=Item.OUT, due__lt=now, part_of_kit__isnull=True).order_by("due","checked_out_by")
if report_kind.endswith("-student"):
items = items.filter(checked_out_by__kind=STUDENT_KIND)
if report_kind.endswith("-staff"):
items = items.exclude(checked_out_by__kind=STUDENT_KIND)
except ItemError, error_message:
pass # Letting error_message get picked up by the template
else:
title = "Reports"
if report_kind and report_kind.endswith("-csv") and report_rows:
return render_response("csv.html", locals(), mimetype="text/csv", filename=filename)
else:
return render_to_response("report.html", locals())
@user_passes_test(admin_access_allowed)
def find(request):
"""Control panel for finding items"""
return render_to_response("find.html", locals())
@user_passes_test(admin_access_allowed)
def add(request):
"""Interface for adding new items to the system."""
title = "Add, edit, or delete equipment items"
return render_to_response("add.html", locals())
@user_passes_test(admin_access_allowed)
def buildkit(request, kit_id=None):
"""
Helper view for building up kits. If no kit ID is passed, we ask for a kit.
If a kit ID is passed (via URL), we ask for items to add.
Workflow: Create (empty) kits in admin; come to this view and add items
"""
if "kit_id" in request.GET:
return HttpResponseRedirect("/equipment/buildkit/%s/" % request.GET['kit_id'])
title = "Enter/scan kit ID number"
if kit_id:
try:
kit = Item.find_by_number(kit_id)
assert(kit.itemtype.kit==True)
title = "Adding equipment to %s" % kit
except (Item.DoesNotExist, AssertionError):
raise Http404
if request.method == "POST":
number = request.POST['number']
item = Item.find_by_number(number)
try:
assert(item.itemtype.kit==False) # Don't add a kit to a kit
assert(item.part_of_kit==None) # Item must not already be in a kit
kit.contents.add(item)
message = "Added %s" % item
except ItemError, error_message:
pass
return render_to_response("buildkit.html", locals())
@user_passes_test(admin_access_allowed)
def penalty_statement(request, person_id):
"""
Present a printable statement of dollar-credit penalties accrued due to late equipment.
Also helps create that statement (presenting list of overdue equipment with a submit button).
"""
if person_id:
try:
person = Person.objects.get(id_number=person_id)
except Person.DoesNotExist:
return Http404
if request.method == "POST":
new_penalty = Penalty.levy(person)
message = "$%s penalty levied" % new_penalty.amount
else:
current_phase = phase_for_cohort_and_date(person.student_cohort, datetime.date.today())
phase_start = PHASE_END_DATES[person.student_cohort][current_phase - 1]
penalties = person.penalty_set.filter(when_levied__gte=phase_start)
total = sum(p.amount for p in penalties)
overduesies = Item.objects.filter(status=Item.OUT, due__lt=datetime.datetime.now(), checked_out_by=person).order_by("due")
# If overdue items have already been charged in a penalty, don't show them
for penalty in penalties:
if set(i.id for i in overduesies) == set(i.id for i in penalty.items.all()):
overduesies = None
break
else: # If no person_id is passed, template can display a list
people = Person.objects.enrolled()
return render_to_response("penalty_statement.html", locals())
|
from numba import jit
import sys
@jit
def fib(n):
return 1 if n < 3 else fib(n-1) + fib(n-2)
if __name__ == "__main__":
n = int(sys.argv[1])
print("{}".format(fib(n)))
|
import contextlib
import json
import os
import pprint
import shutil
import signal
import socket
import subprocess
import sys
import tempfile
import time
from cytoolz import (
merge,
valmap,
)
from eth_utils.curried import (
apply_formatter_if,
is_bytes,
is_checksum_address,
is_dict,
is_same_address,
remove_0x_prefix,
to_hex,
to_text,
to_wei,
)
from webu import Webu
from webu.utils.module_testing.emitter_contract import (
EMITTER_ABI,
EMITTER_BYTECODE,
EMITTER_ENUM,
)
from webu.utils.module_testing.math_contract import (
MATH_ABI,
MATH_BYTECODE,
)
COINBASE = '0xdc544d1aa88ff8bbd2f2aec754b1f1e99e1812fd'
COINBASE_PK = '0x58d23b55bc9cdce1f18c2500f40ff4ab7245df9a89505e9b1fa4851f623d241d'
KEYFILE_DATA = '{"address":"dc544d1aa88ff8bbd2f2aec754b1f1e99e1812fd","crypto":{"cipher":"aes-128-ctr","ciphertext":"52e06bc9397ea9fa2f0dae8de2b3e8116e92a2ecca9ad5ff0061d1c449704e98","cipherparams":{"iv":"aa5d0a5370ef65395c1a6607af857124"},"kdf":"scrypt","kdfparams":{"dklen":32,"n":262144,"p":1,"r":8,"salt":"9fdf0764eb3645ffc184e166537f6fe70516bf0e34dc7311dea21f100f0c9263"},"mac":"4e0b51f42b865c15c485f4faefdd1f01a38637e5247f8c75ffe6a8c0eba856f6"},"id":"5a6124e0-10f1-4c1c-ae3e-d903eacb740a","version":3}' # noqa: E501
KEYFILE_PW = 'webupy-test'
KEYFILE_FILENAME = 'UTC--2017-08-24T19-42-47.517572178Z--dc544d1aa88ff8bbd2f2aec754b1f1e99e1812fd' # noqa: E501
RAW_TXN_ACCOUNT = '0x39EEed73fb1D3855E90Cbd42f348b3D7b340aAA6'
UNLOCKABLE_PRIVATE_KEY = '0x392f63a79b1ff8774845f3fa69de4a13800a59e7083f5187f1558f0797ad0f01'
UNLOCKABLE_ACCOUNT = '0x12efdc31b1a8fa1a1e756dfd8a1601055c971e13'
UNLOCKABLE_ACCOUNT_PW = KEYFILE_PW
GENESIS_DATA = {
"nonce": "0xdeadbeefdeadbeef",
"timestamp": "0x0",
"parentHash": "0x0000000000000000000000000000000000000000000000000000000000000000", # noqa: E501
"extraData": "0x7765623370792d746573742d636861696e",
"gasLimit": "0x47d5cc",
"difficulty": "0x01",
"mixhash": "0x0000000000000000000000000000000000000000000000000000000000000000", # noqa: E501
"coinbase": "0x3333333333333333333333333333333333333333",
"alloc": {
remove_0x_prefix(COINBASE): {
'balance': str(to_wei(1000000000, 'huc')),
},
remove_0x_prefix(RAW_TXN_ACCOUNT): {
'balance': str(to_wei(10, 'huc')),
},
remove_0x_prefix(UNLOCKABLE_ACCOUNT): {
'balance': str(to_wei(10, 'huc')),
},
},
"config": {
"chainId": 131277322940537, # the string 'webupy' as an integer
"homesteadBlock": 0,
"eip155Block": 0,
"eip158Block": 0
},
}
def ensure_path_exists(dir_path):
"""
Make sure that a path exists
"""
if not os.path.exists(dir_path):
os.makedirs(dir_path)
return True
return False
@contextlib.contextmanager
def tempdir():
dir_path = tempfile.mkdtemp()
try:
yield dir_path
finally:
shutil.rmtree(dir_path)
def get_open_port():
sock = socket.socket()
sock.bind(('127.0.0.1', 0))
port = sock.getsockname()[1]
sock.close()
return str(port)
def get_ghuc_binary():
from ghuc.install import (
get_executable_path,
install_ghuc,
)
if 'GETH_BINARY' in os.environ:
return os.environ['GETH_BINARY']
elif 'GETH_VERSION' in os.environ:
ghuc_version = os.environ['GETH_VERSION']
_ghuc_binary = get_executable_path(ghuc_version)
if not os.path.exists(_ghuc_binary):
install_ghuc(ghuc_version)
assert os.path.exists(_ghuc_binary)
return _ghuc_binary
else:
return 'ghuc'
def wait_for_popen(proc, timeout):
start = time.time()
while time.time() < start + timeout:
if proc.poll() is None:
time.sleep(0.01)
else:
break
def kill_proc_gracefully(proc):
if proc.poll() is None:
proc.send_signal(signal.SIGINT)
wait_for_popen(proc, 13)
if proc.poll() is None:
proc.terminate()
wait_for_popen(proc, 5)
if proc.poll() is None:
proc.kill()
wait_for_popen(proc, 2)
def wait_for_socket(ipc_path, timeout=30):
start = time.time()
while time.time() < start + timeout:
try:
sock = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM)
sock.connect(ipc_path)
sock.settimeout(timeout)
except (FileNotFoundError, socket.error):
time.sleep(0.01)
else:
break
@contextlib.contextmanager
def graceful_kill_on_exit(proc):
try:
yield proc
finally:
kill_proc_gracefully(proc)
@contextlib.contextmanager
def get_ghuc_process(ghuc_binary,
datadir,
genesis_file_path,
ghuc_ipc_path,
ghuc_port):
init_datadir_command = (
ghuc_binary,
'--datadir', datadir,
'init',
genesis_file_path,
)
subprocess.check_output(
init_datadir_command,
stdin=subprocess.PIPE,
stderr=subprocess.PIPE,
)
run_ghuc_command = (
ghuc_binary,
'--datadir', datadir,
'--ipcpath', ghuc_ipc_path,
'--ethash.dagsondisk', '1',
'--gcmode', 'archive',
'--nodiscover',
'--port', ghuc_port,
'--coinbase', COINBASE[2:],
)
popen_proc = subprocess.Popen(
run_ghuc_command,
stdin=subprocess.PIPE,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
bufsize=1,
)
with popen_proc as proc:
with graceful_kill_on_exit(proc) as graceful_proc:
yield graceful_proc
output, errors = proc.communicate()
print(
"Ghuc Process Exited:\n"
"stdout:{0}\n\n"
"stderr:{1}\n\n".format(
to_text(output),
to_text(errors),
)
)
def write_config_json(config, datadir):
bytes_to_hex = apply_formatter_if(is_bytes, to_hex)
config_json_dict = valmap(bytes_to_hex, config)
config_path = os.path.join(datadir, 'config.json')
with open(config_path, 'w') as config_file:
config_file.write(json.dumps(config_json_dict))
config_file.write('\n')
def generate_go_happyuc_fixture(destination_dir):
with contextlib.ExitStack() as stack:
datadir = stack.enter_context(tempdir())
keystore_dir = os.path.join(datadir, 'keystore')
ensure_path_exists(keystore_dir)
keyfile_path = os.path.join(keystore_dir, KEYFILE_FILENAME)
with open(keyfile_path, 'w') as keyfile:
keyfile.write(KEYFILE_DATA)
genesis_file_path = os.path.join(datadir, 'genesis.json')
with open(genesis_file_path, 'w') as genesis_file:
genesis_file.write(json.dumps(GENESIS_DATA))
ghuc_ipc_path_dir = stack.enter_context(tempdir())
ghuc_ipc_path = os.path.join(ghuc_ipc_path_dir, 'ghuc.ipc')
ghuc_port = get_open_port()
ghuc_binary = get_ghuc_binary()
with get_ghuc_process(
ghuc_binary=ghuc_binary,
datadir=datadir,
genesis_file_path=genesis_file_path,
ghuc_ipc_path=ghuc_ipc_path,
ghuc_port=ghuc_port):
wait_for_socket(ghuc_ipc_path)
webu = Webu(Webu.IPCProvider(ghuc_ipc_path))
chain_data = setup_chain_state(webu)
# close ghuc by exiting context
# must be closed before copying data dir
verify_chain_state(webu, chain_data)
# verify that chain state is still valid after closing
# and re-opening ghuc
with get_ghuc_process(
ghuc_binary=ghuc_binary,
datadir=datadir,
genesis_file_path=genesis_file_path,
ghuc_ipc_path=ghuc_ipc_path,
ghuc_port=ghuc_port):
wait_for_socket(ghuc_ipc_path)
webu = Webu(Webu.IPCProvider(ghuc_ipc_path))
verify_chain_state(webu, chain_data)
static_data = {
'raw_txn_account': RAW_TXN_ACCOUNT,
'keyfile_pw': KEYFILE_PW,
}
config = merge(chain_data, static_data)
pprint.pprint(config)
write_config_json(config, datadir)
shutil.copytree(datadir, destination_dir)
def verify_chain_state(webu, chain_data):
receipt = webu.eth.getTransactionReceipt(chain_data['mined_txn_hash'])
latest = webu.eth.getBlock('latest')
assert receipt.blockNumber <= latest.number
def mine_transaction_hash(webu, txn_hash):
start_time = time.time()
webu.miner.start(1)
while time.time() < start_time + 60:
receipt = webu.eth.getTransactionReceipt(txn_hash)
if receipt is not None:
webu.miner.stop()
return receipt
else:
time.sleep(0.1)
else:
raise ValueError("Math contract deploy transaction not mined during wait period")
def mine_block(webu):
origin_block_number = webu.eth.blockNumber
start_time = time.time()
webu.miner.start(1)
while time.time() < start_time + 60:
block_number = webu.eth.blockNumber
if block_number > origin_block_number:
webu.miner.stop()
return block_number
else:
time.sleep(0.1)
else:
raise ValueError("No block mined during wait period")
def deploy_contract(webu, name, factory):
webu.personal.unlockAccount(webu.eth.coinbase, KEYFILE_PW)
deploy_txn_hash = factory.deploy({'from': webu.eth.coinbase})
print('{0}_CONTRACT_DEPLOY_HASH: '.format(name.upper()), deploy_txn_hash)
deploy_receipt = mine_transaction_hash(webu, deploy_txn_hash)
print('{0}_CONTRACT_DEPLOY_TRANSACTION_MINED'.format(name.upper()))
contract_address = deploy_receipt['contractAddress']
assert is_checksum_address(contract_address)
print('{0}_CONTRACT_ADDRESS:'.format(name.upper()), contract_address)
return deploy_receipt
def setup_chain_state(webu):
coinbase = webu.eth.coinbase
assert is_same_address(coinbase, COINBASE)
#
# Math Contract
#
math_contract_factory = webu.eth.contract(
abi=MATH_ABI,
bytecode=MATH_BYTECODE,
)
math_deploy_receipt = deploy_contract(webu, 'math', math_contract_factory)
assert is_dict(math_deploy_receipt)
#
# Emitter Contract
#
emitter_contract_factory = webu.eth.contract(
abi=EMITTER_ABI,
bytecode=EMITTER_BYTECODE,
)
emitter_deploy_receipt = deploy_contract(webu, 'emitter', emitter_contract_factory)
emitter_contract = emitter_contract_factory(emitter_deploy_receipt['contractAddress'])
txn_hash_with_log = emitter_contract.transact({
'from': webu.eth.coinbase,
}).logDouble(which=EMITTER_ENUM['LogDoubleWithIndex'], arg0=12345, arg1=54321)
print('TXN_HASH_WITH_LOG:', txn_hash_with_log)
txn_receipt_with_log = mine_transaction_hash(webu, txn_hash_with_log)
block_with_log = webu.eth.getBlock(txn_receipt_with_log['blockHash'])
print('BLOCK_HASH_WITH_LOG:', block_with_log['hash'])
#
# Empty Block
#
empty_block_number = mine_block(webu)
print('MINED_EMPTY_BLOCK')
empty_block = webu.eth.getBlock(empty_block_number)
assert is_dict(empty_block)
assert not empty_block['transactions']
print('EMPTY_BLOCK_HASH:', empty_block['hash'])
#
# Block with Transaction
#
webu.personal.unlockAccount(coinbase, KEYFILE_PW)
webu.miner.start(1)
mined_txn_hash = webu.eth.sendTransaction({
'from': coinbase,
'to': coinbase,
'value': 1,
'gas': 21000,
'gas_price': webu.eth.gasPrice,
})
mined_txn_receipt = mine_transaction_hash(webu, mined_txn_hash)
print('MINED_TXN_HASH:', mined_txn_hash)
block_with_txn = webu.eth.getBlock(mined_txn_receipt['blockHash'])
print('BLOCK_WITH_TXN_HASH:', block_with_txn['hash'])
ghuc_fixture = {
'math_deploy_txn_hash': math_deploy_receipt['transactionHash'],
'math_address': math_deploy_receipt['contractAddress'],
'emitter_deploy_txn_hash': emitter_deploy_receipt['transactionHash'],
'emitter_address': emitter_deploy_receipt['contractAddress'],
'txn_hash_with_log': txn_hash_with_log,
'block_hash_with_log': block_with_log['hash'],
'empty_block_hash': empty_block['hash'],
'mined_txn_hash': mined_txn_hash,
'block_with_txn_hash': block_with_txn['hash'],
}
return ghuc_fixture
if __name__ == '__main__':
fixture_dir = sys.argv[1]
generate_go_happyuc_fixture(fixture_dir)
|
import networkx as nx
class FeatureExtractor:
"""
Extracting some hand-crafted x1_features for the x1_graphs
- Number of (effective nodes)
- Average
"""
def __init__(self, g: nx.Graph, node_attr_name='op_name', s='input', t='output'):
"""
g: a valid networkx graph
node_attr_name: the tag of the node attribute. default is 'op_name'
s, t: the tag of the two special input and output nodes. Note that there can be more than one input node (s), but
only one output node (t)
"""
self.g = g
self.input_index = []
self.output_index = None
for n in range(g.number_of_nodes()):
assert node_attr_name in list(dict(g.nodes[n]).keys()), node_attr_name + " is not found in " + str(
g.nodes[n])
if str(g.nodes[n][node_attr_name]) == str(s):
self.input_index.append(n)
elif str(g.nodes[n][node_attr_name]) == str(t):
self.output_index = n
self.node_attr_name = node_attr_name
if len(self.input_index) == 0:
raise ValueError("Unknown input node!")
elif self.output_index is None:
raise ValueError("Unknown output node!")
# Specify the special nodes (i.e. the input and output, source and sink)
if isinstance(self.g, nx.DiGraph):
self.undirected_g = self.g.to_undirected()
else:
self.undirected_g = self.g
def __getattr__(self, item):
"""Identify the feature already implemented in the graph class"""
try:
res = getattr(self.g, item)
except AttributeError:
raise AttributeError("Item" + str(item) + ' is not found either in the feature extractor nor the graph'
'instance!')
if callable(res):
return res()
return res
def _paths(self) -> list:
"""Enumerate all paths from input to output. Return a list of lists with each sub-list the node indices from
the input to output
Data shape:
(N_input x2 N_path x2 length of each path)
for SISO graph, the data shape is (1 x2 N_path x2 length of each path)
"""
if not isinstance(self.g, nx.DiGraph):
raise TypeError("Longest path is only applicable for directed graph!")
result = []
for i in self.input_index:
result.append(list(nx.all_simple_paths(self.g, i, self.output_index)))
return result
@property
def number_of_paths(self):
paths = self._paths()
if len(paths) == 1:
return len(paths[0])
return [len(i) for i in paths]
@property
def longest_path(self):
"""Return the longest path from input to output. the return type is a list in case when there is more than one
input node."""
paths = self._paths()
if len(paths) == 1: # if the list is a singlet (i.e. the S-T style graph), then return a scalar output only
return len(max(paths[0], key=lambda x: len(x)))
return [len(max(i, key=lambda x: len(x))) for i in paths]
@property
def degree_distribution(self, normalize=False):
"""
return the degree distribution of the *undirected* counterpart of the graph, if the graph is directed.
return a dictionary in the form of ((D1, N1), (D2, N2)... ) where Di is the degree and Ni is the frequency
"""
from collections import Counter
degree_seq = sorted([d for d, n in dict(self.undirected_g.degree)], reverse=True)
degree_count = Counter(degree_seq)
deg, cnt = zip(*degree_count.items())
if normalize:
n = self.undirected_g.number_of_nodes()
cnt //= n
return deg, cnt
@property
def laplacian_spectrum(self, ):
return nx.normalized_laplacian_spectrum(self.undirected_g)
@property
def average_undirected_degree(self):
return sum(dict(self.undirected_g.degree).values()) / (self.undirected_g.number_of_nodes() + 0.0)
@property
def number_of_conv3x3(self):
i = 0
for node, attr in self.g.nodes(data=True):
if attr['op_name'] == 'conv3x3-bn-relu':
i += 1
return i
|
import os
import sys
import numpy as np
import pandas as pd
from torch.utils.data import Subset
from torch.utils.data.dataset import Dataset # For custom datasets
from torchvision import transforms
PROJECT_PATH = os.path.abspath(
os.path.join(os.path.dirname(__file__), '..', '..'))
sys.path.append(PROJECT_PATH)
from src.base.torchvision_dataset import TorchvisionDataset
from src.datasets.preprocessing import get_target_label_idx
from src.datasets.data_splitter import DatasetDivider
from src.datasets.data_set_generic import Dataset
class HitsDataset(TorchvisionDataset):
def __init__(self, root: str, normal_class=1):
super().__init__(root)
self.n_classes = 2 # 0: normal, 1: outlier
self.normal_classes = tuple([normal_class])
self.outlier_classes = list(range(0, 2))
self.outlier_classes.remove(normal_class)
self.data_dict = pd.read_pickle(self.root)
# hardcoded selected channel
images = self.normalize_by_image(self.data_dict['images'])[..., 3][
..., np.newaxis]
labels = np.array(self.data_dict['labels'])
dataset = Dataset(data_array=images, data_label=labels, batch_size=50)
data_splitter = DatasetDivider(test_size=0.3, validation_size=0.1)
data_splitter.set_dataset_obj(dataset)
train_dataset, test_dataset, val_dataset = \
data_splitter.get_train_test_val_set_objs()
transform = transforms.Compose([transforms.ToTensor()])
target_transform = transforms.Lambda(
lambda x: int(x in self.outlier_classes))
train_set = Hits(train_dataset.data_array, train_dataset.data_label,
transform=transform, target_transform=target_transform)
train_idx_normal = get_target_label_idx(
np.array(train_set.label_arr), self.normal_classes)
self.train_set = Subset(train_set, train_idx_normal)
print(self.train_set.__len__())
self.val_all_set = Hits(val_dataset.data_array, val_dataset.data_label,
transform=transform,
target_transform=target_transform)
val_idx_normal = get_target_label_idx(
np.array(self.val_all_set.label_arr), self.normal_classes)
self.val_normal_set = Subset(self.val_all_set, val_idx_normal)
print(self.val_normal_set.__len__())
self.test_set = Hits(test_dataset.data_array, test_dataset.data_label,
transform=transform,
target_transform=target_transform)
def normalize_by_image(self, images):
images -= np.nanmin(images, axis=(1, 2))[:, np.newaxis, np.newaxis, :]
images = images / np.nanmax(images, axis=(1, 2))[
:, np.newaxis, np.newaxis, :]
return images
class Hits(Dataset):
def __init__(self, images, labels, transform, target_transform):
"""
"""
# Transforms
self.transform = transform
self.target_transform = target_transform
self.image_arr = images
self.label_arr = labels
print(self.image_arr.shape)
self.data_len = self.label_arr.shape[0]
def __getitem__(self, index):
single_image = self.image_arr[index]
single_image_label = self.label_arr[index]
if self.transform is not None:
img = self.transform(single_image)
if self.target_transform is not None:
target = self.target_transform(single_image_label)
return img, target, index # only line changed
def __len__(self):
return self.data_len
|
# coding=utf-8
# *** WARNING: this file was generated by the Pulumi Terraform Bridge (tfgen) Tool. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union, overload
from . import _utilities
__all__ = ['TeamsArgs', 'Teams']
@pulumi.input_type
class TeamsArgs:
def __init__(__self__, *,
org_id: pulumi.Input[str],
usernames: pulumi.Input[Sequence[pulumi.Input[str]]],
name: Optional[pulumi.Input[str]] = None):
"""
The set of arguments for constructing a Teams resource.
"""
pulumi.set(__self__, "org_id", org_id)
pulumi.set(__self__, "usernames", usernames)
if name is not None:
pulumi.set(__self__, "name", name)
@property
@pulumi.getter(name="orgId")
def org_id(self) -> pulumi.Input[str]:
return pulumi.get(self, "org_id")
@org_id.setter
def org_id(self, value: pulumi.Input[str]):
pulumi.set(self, "org_id", value)
@property
@pulumi.getter
def usernames(self) -> pulumi.Input[Sequence[pulumi.Input[str]]]:
return pulumi.get(self, "usernames")
@usernames.setter
def usernames(self, value: pulumi.Input[Sequence[pulumi.Input[str]]]):
pulumi.set(self, "usernames", value)
@property
@pulumi.getter
def name(self) -> Optional[pulumi.Input[str]]:
return pulumi.get(self, "name")
@name.setter
def name(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "name", value)
@pulumi.input_type
class _TeamsState:
def __init__(__self__, *,
name: Optional[pulumi.Input[str]] = None,
org_id: Optional[pulumi.Input[str]] = None,
team_id: Optional[pulumi.Input[str]] = None,
usernames: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None):
"""
Input properties used for looking up and filtering Teams resources.
"""
if name is not None:
pulumi.set(__self__, "name", name)
if org_id is not None:
pulumi.set(__self__, "org_id", org_id)
if team_id is not None:
pulumi.set(__self__, "team_id", team_id)
if usernames is not None:
pulumi.set(__self__, "usernames", usernames)
@property
@pulumi.getter
def name(self) -> Optional[pulumi.Input[str]]:
return pulumi.get(self, "name")
@name.setter
def name(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "name", value)
@property
@pulumi.getter(name="orgId")
def org_id(self) -> Optional[pulumi.Input[str]]:
return pulumi.get(self, "org_id")
@org_id.setter
def org_id(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "org_id", value)
@property
@pulumi.getter(name="teamId")
def team_id(self) -> Optional[pulumi.Input[str]]:
return pulumi.get(self, "team_id")
@team_id.setter
def team_id(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "team_id", value)
@property
@pulumi.getter
def usernames(self) -> Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]:
return pulumi.get(self, "usernames")
@usernames.setter
def usernames(self, value: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]):
pulumi.set(self, "usernames", value)
class Teams(pulumi.CustomResource):
@overload
def __init__(__self__,
resource_name: str,
opts: Optional[pulumi.ResourceOptions] = None,
name: Optional[pulumi.Input[str]] = None,
org_id: Optional[pulumi.Input[str]] = None,
usernames: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,
__props__=None):
"""
Create a Teams resource with the given unique name, props, and options.
:param str resource_name: The name of the resource.
:param pulumi.ResourceOptions opts: Options for the resource.
"""
...
@overload
def __init__(__self__,
resource_name: str,
args: TeamsArgs,
opts: Optional[pulumi.ResourceOptions] = None):
"""
Create a Teams resource with the given unique name, props, and options.
:param str resource_name: The name of the resource.
:param TeamsArgs args: The arguments to use to populate this resource's properties.
:param pulumi.ResourceOptions opts: Options for the resource.
"""
...
def __init__(__self__, resource_name: str, *args, **kwargs):
resource_args, opts = _utilities.get_resource_args_opts(TeamsArgs, pulumi.ResourceOptions, *args, **kwargs)
if resource_args is not None:
__self__._internal_init(resource_name, opts, **resource_args.__dict__)
else:
__self__._internal_init(resource_name, *args, **kwargs)
def _internal_init(__self__,
resource_name: str,
opts: Optional[pulumi.ResourceOptions] = None,
name: Optional[pulumi.Input[str]] = None,
org_id: Optional[pulumi.Input[str]] = None,
usernames: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,
__props__=None):
if opts is None:
opts = pulumi.ResourceOptions()
if not isinstance(opts, pulumi.ResourceOptions):
raise TypeError('Expected resource options to be a ResourceOptions instance')
if opts.version is None:
opts.version = _utilities.get_version()
if opts.id is None:
if __props__ is not None:
raise TypeError('__props__ is only valid when passed in combination with a valid opts.id to get an existing resource')
__props__ = TeamsArgs.__new__(TeamsArgs)
__props__.__dict__["name"] = name
if org_id is None and not opts.urn:
raise TypeError("Missing required property 'org_id'")
__props__.__dict__["org_id"] = org_id
if usernames is None and not opts.urn:
raise TypeError("Missing required property 'usernames'")
__props__.__dict__["usernames"] = usernames
__props__.__dict__["team_id"] = None
super(Teams, __self__).__init__(
'mongodbatlas:index/teams:Teams',
resource_name,
__props__,
opts)
@staticmethod
def get(resource_name: str,
id: pulumi.Input[str],
opts: Optional[pulumi.ResourceOptions] = None,
name: Optional[pulumi.Input[str]] = None,
org_id: Optional[pulumi.Input[str]] = None,
team_id: Optional[pulumi.Input[str]] = None,
usernames: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None) -> 'Teams':
"""
Get an existing Teams resource's state with the given name, id, and optional extra
properties used to qualify the lookup.
:param str resource_name: The unique name of the resulting resource.
:param pulumi.Input[str] id: The unique provider ID of the resource to lookup.
:param pulumi.ResourceOptions opts: Options for the resource.
"""
opts = pulumi.ResourceOptions.merge(opts, pulumi.ResourceOptions(id=id))
__props__ = _TeamsState.__new__(_TeamsState)
__props__.__dict__["name"] = name
__props__.__dict__["org_id"] = org_id
__props__.__dict__["team_id"] = team_id
__props__.__dict__["usernames"] = usernames
return Teams(resource_name, opts=opts, __props__=__props__)
@property
@pulumi.getter
def name(self) -> pulumi.Output[str]:
return pulumi.get(self, "name")
@property
@pulumi.getter(name="orgId")
def org_id(self) -> pulumi.Output[str]:
return pulumi.get(self, "org_id")
@property
@pulumi.getter(name="teamId")
def team_id(self) -> pulumi.Output[str]:
return pulumi.get(self, "team_id")
@property
@pulumi.getter
def usernames(self) -> pulumi.Output[Sequence[str]]:
return pulumi.get(self, "usernames")
|
from .. import Provider as LoremProvider
class Provider(LoremProvider):
"""Implement lorem provider for ``hy_AM`` locale.
Sources:
- https://www.101languages.net/armenian/armenian-word-list
"""
word_list = (
"ես",
"դու",
"նա",
"մենք",
"դուք",
"նրանք",
"այս",
"այն",
"այստեղ",
"այնտեղ",
"ով",
"ինչ",
"որտեղ",
"ուր",
"երբ",
"ինչպես",
"ոչ",
"բոլոր",
"շատ",
"որոշ",
"քիչ",
"այլ",
"ուրիշ",
"մեկ",
"երկու",
"երեք",
"չորս",
"հինգ",
"մեծ",
"երկար",
"լայն",
"հաստ",
"ծանր",
"փոքր",
"կարճ",
"նեղ",
"բարակ",
"կին",
"տղամարդ",
"մարդ",
"երեխա",
"կին",
"ամուսին",
"մայր",
"հայր",
"կենդանի",
"ձուկ",
"թռչուն",
"շուն",
"ոջիլ",
"օձ",
"ճիճու",
"ծառ",
"անտառ",
"փայտ",
"պտուղ",
"սերմ",
"տերև",
"արմատ",
"կեղև",
"ծաղիկ",
"խոտ",
"պարան",
"մաշկ",
"կաշի",
"միս",
"արյուն",
"ոսկոր",
"ճարպ",
"ձու",
"եղջյուր",
"պոզ",
"պոչ",
"փետուր",
"մազ",
"գլուխ",
"ականջ",
"աչք",
"քիթ",
"բերան",
"ատամ",
"լեզու",
"եղունգ",
"ոտք",
"ծունկ",
"ձեռք",
"թև",
"փոր",
"փորոտիք",
"աղիք",
"վիզ",
"մեջք",
"կուրծք",
"սիրտ",
"լյարդ",
"խմել",
"ուտել",
"կծել",
"ծծել",
"թքել",
"ործկալ",
"փչել",
"շնչել",
"ծիծաղել",
"տեսնել",
"լսել",
"իմանալ",
"գիտենալ",
"մտածել",
"զգալ",
"վախենալ",
"քնել",
"ապրել",
"մեռնել",
"սպանել",
"կռվել",
"որսալ",
"խփել",
"հարվածել",
"կտրել",
"բաժանել",
"խոցել",
"քերծել",
"քորել",
"փորել",
"լողալ",
"թռչել",
"քայլել",
"գալ",
"պառկել",
"նստել",
"կանգնել",
"շրջվել",
"ընկնել",
"տալ",
"պահել",
"բռնել",
"սեղմել",
"շփել",
"լվալ",
"սրբել",
"ձգել",
"քաշել",
"հրել",
"նետել",
"կապել",
"կարել",
"հաշվել",
"ասել",
"երգել",
"խաղալ",
"լողալ",
"հոսել",
"սառչել",
"ուռել",
"արև",
"լուսին",
"աստղ",
"ջուր",
"անձրև",
"գետ",
"լիճ",
"ծով",
"աղ",
"քար",
"ավազ",
"փոշի",
"հող",
"ամպ",
"մառախուղ",
"մշուշ",
"երկինք",
"քամի",
"ձյուն",
"սառույց",
"ծուխ",
"հուր",
"կրակ",
"մոխիր",
"վառվել",
"այրվել",
"ճամփա",
"ճանապարհ",
"լեռ",
"սար",
"կարմիր",
"կանաչ",
"դեղին",
"սպիտակ",
"սև",
"գիշեր",
"օր",
"տարի",
"տաք",
"ցուրտ",
"լիքը",
"նոր",
"հին",
"լավ",
"վատ",
"փտած",
"կեղտոտ",
"ուղիղ",
"կլոր",
"սուր",
"բութ",
"հարթ",
"թաց",
"չոր",
"ճիշտ",
"մոտ",
"հեռու",
"աջ",
)
|
""" support for skip/xfail functions and markers. """
from _pytest.config import hookimpl
from _pytest.mark.evaluate import MarkEvaluator
from _pytest.outcomes import fail
from _pytest.outcomes import skip
from _pytest.outcomes import xfail
def pytest_addoption(parser):
group = parser.getgroup("general")
group.addoption(
"--runxfail",
action="store_true",
dest="runxfail",
default=False,
help="report the results of xfail tests as if they were not marked",
)
parser.addini(
"xfail_strict",
"default for the strict parameter of xfail "
"markers when not given explicitly (default: False)",
default=False,
type="bool",
)
def pytest_configure(config):
if config.option.runxfail:
# yay a hack
import pytest
old = pytest.xfail
config._cleanup.append(lambda: setattr(pytest, "xfail", old))
def nop(*args, **kwargs):
pass
nop.Exception = xfail.Exception
setattr(pytest, "xfail", nop)
config.addinivalue_line(
"markers",
"skip(reason=None): skip the given test function with an optional reason. "
'Example: skip(reason="no way of currently testing this") skips the '
"test.",
)
config.addinivalue_line(
"markers",
"skipif(condition): skip the given test function if eval(condition) "
"results in a True value. Evaluation happens within the "
"module global context. Example: skipif('sys.platform == \"win32\"') "
"skips the test if we are on the win32 platform. see "
"https://docs.pytest.org/en/latest/skipping.html",
)
config.addinivalue_line(
"markers",
"xfail(condition, reason=None, run=True, raises=None, strict=False): "
"mark the test function as an expected failure if eval(condition) "
"has a True value. Optionally specify a reason for better reporting "
"and run=False if you don't even want to execute the test function. "
"If only specific exception(s) are expected, you can list them in "
"raises, and if the test fails in other ways, it will be reported as "
"a true failure. See https://docs.pytest.org/en/latest/skipping.html",
)
@hookimpl(tryfirst=True)
def pytest_runtest_setup(item):
# Check if skip or skipif are specified as pytest marks
item._skipped_by_mark = False
eval_skipif = MarkEvaluator(item, "skipif")
if eval_skipif.istrue():
item._skipped_by_mark = True
skip(eval_skipif.getexplanation())
for skip_info in item.iter_markers(name="skip"):
item._skipped_by_mark = True
if "reason" in skip_info.kwargs:
skip(skip_info.kwargs["reason"])
elif skip_info.args:
skip(skip_info.args[0])
else:
skip("unconditional skip")
item._evalxfail = MarkEvaluator(item, "xfail")
check_xfail_no_run(item)
@hookimpl(hookwrapper=True)
def pytest_pyfunc_call(pyfuncitem):
check_xfail_no_run(pyfuncitem)
outcome = yield
passed = outcome.excinfo is None
if passed:
check_strict_xfail(pyfuncitem)
def check_xfail_no_run(item):
"""check xfail(run=False)"""
if not item.config.option.runxfail:
evalxfail = item._evalxfail
if evalxfail.istrue():
if not evalxfail.get("run", True):
xfail("[NOTRUN] " + evalxfail.getexplanation())
def check_strict_xfail(pyfuncitem):
"""check xfail(strict=True) for the given PASSING test"""
evalxfail = pyfuncitem._evalxfail
if evalxfail.istrue():
strict_default = pyfuncitem.config.getini("xfail_strict")
is_strict_xfail = evalxfail.get("strict", strict_default)
if is_strict_xfail:
del pyfuncitem._evalxfail
explanation = evalxfail.getexplanation()
fail("[XPASS(strict)] " + explanation, pytrace=False)
@hookimpl(hookwrapper=True)
def pytest_runtest_makereport(item, call):
outcome = yield
rep = outcome.get_result()
evalxfail = getattr(item, "_evalxfail", None)
# unittest special case, see setting of _unexpectedsuccess
if hasattr(item, "_unexpectedsuccess") and rep.when == "call":
if item._unexpectedsuccess:
rep.longrepr = "Unexpected success: {}".format(item._unexpectedsuccess)
else:
rep.longrepr = "Unexpected success"
rep.outcome = "failed"
elif item.config.option.runxfail:
pass # don't interfere
elif call.excinfo and call.excinfo.errisinstance(xfail.Exception):
rep.wasxfail = "reason: " + call.excinfo.value.msg
rep.outcome = "skipped"
elif evalxfail and not rep.skipped and evalxfail.wasvalid() and evalxfail.istrue():
if call.excinfo:
if evalxfail.invalidraise(call.excinfo.value):
rep.outcome = "failed"
else:
rep.outcome = "skipped"
rep.wasxfail = evalxfail.getexplanation()
elif call.when == "call":
strict_default = item.config.getini("xfail_strict")
is_strict_xfail = evalxfail.get("strict", strict_default)
explanation = evalxfail.getexplanation()
if is_strict_xfail:
rep.outcome = "failed"
rep.longrepr = "[XPASS(strict)] {}".format(explanation)
else:
rep.outcome = "passed"
rep.wasxfail = explanation
elif (
getattr(item, "_skipped_by_mark", False)
and rep.skipped
and type(rep.longrepr) is tuple
):
# skipped by mark.skipif; change the location of the failure
# to point to the item definition, otherwise it will display
# the location of where the skip exception was raised within pytest
_, _, reason = rep.longrepr
filename, line = item.location[:2]
rep.longrepr = filename, line + 1, reason
# called by terminalreporter progress reporting
def pytest_report_teststatus(report):
if hasattr(report, "wasxfail"):
if report.skipped:
return "xfailed", "x", "XFAIL"
elif report.passed:
return "xpassed", "X", "XPASS"
|
import torch
import torch.nn as nn
import os
from common import base_data_path
from typing import List
import pandas as pd
CONTEXT_SIZE = 1 # 1 words to the left, 1 to the right
EMDEDDING_DIM = 3
word_to_ix = {}
ix_to_word = {}
def make_context_vector(context, word_to_ix):
idxs = [word_to_ix[w] for w in context]
return torch.tensor(idxs, dtype=torch.long)
def get_index_of_max(input):
index = 0
for i in range(1, len(input)):
if input[i] > input[index]:
index = i
return index
def get_max_prob_result(input, ix_to_word):
return ix_to_word[get_index_of_max(input)]
def split_smiles_repr(smile_repr: str) -> List[str]:
element_list = []
skip_next = False
for i in range(len(smile_repr)):
if skip_next:
skip_next = False
continue
element = smile_repr[i]
if (i < (len(smile_repr) - 1)) and (smile_repr[i].isalpha()):
possible_element = element + smile_repr[i+1]
if possible_element in word_to_ix:
element = possible_element
skip_next = True
if element in word_to_ix:
element_list.append(element)
else:
raise ValueError('Inappropriate argument to function get_elements_from_smiles_data of Vocab class')
return element_list
def get_data(sequence_list: List[str]):
_sequence_list = []
sequence_elements_list = []
for s in sequence_list:
split_elements = split_smiles_repr(s)
_sequence_list.append(s)
sequence_elements_list.append(split_elements)
return sequence_elements_list
filepath = os.path.join(base_data_path, "vocab.txt")
f = open(filepath, "r")
elements_list = f.read().splitlines()
elements_list.append(' ')
f.close()
vocab = elements_list
vocab_size = len(elements_list)
for i, word in enumerate(vocab):
word_to_ix[word] = i
ix_to_word[i] = word
filepath = os.path.join(base_data_path, "dataset_v1.csv")
df = pd.read_csv(filepath, sep=",", header=0)
smiles_data = get_data(df.SMILES.tolist())
class CBOW(torch.nn.Module):
def __init__(self, vocab_size, embedding_dim):
super(CBOW, self).__init__()
self.embeddings = nn.Embedding(vocab_size, embedding_dim)
self.linear1 = nn.Linear(embedding_dim, 128)
self.activation_function1 = nn.ReLU()
self.linear2 = nn.Linear(128, vocab_size)
self.activation_function2 = nn.LogSoftmax(dim=-1)
def forward(self, inputs):
embeds = sum(self.embeddings(inputs)).view(1, -1)
out = self.linear1(embeds)
out = self.activation_function1(out)
out = self.linear2(out)
out = self.activation_function2(out)
return out
def get_word_emdedding(self, word):
word = torch.LongTensor([word_to_ix[word]])
return self.embeddings(word).view(1, -1)
model = CBOW(vocab_size, EMDEDDING_DIM)
loss_function = nn.NLLLoss()
optimizer = torch.optim.SGD(model.parameters(), lr=0.001)
for epoch in range(50):
total_loss = 0
for smiles_element_list in smiles_data:
for i in range(1, len(smiles_element_list) - 1):
context = [smiles_element_list[i - 1], smiles_element_list[i + 1]]
target = smiles_element_list[i]
context_vector = make_context_vector(context, word_to_ix)
model.zero_grad()
log_probs = model(context_vector)
loss = loss_function(log_probs, torch.tensor([word_to_ix[target]], dtype=torch.long))
total_loss += loss.item()
loss.backward()
optimizer.step()
print(f"Epoch - {epoch}, Loss - {total_loss}")
|
import functionality.planets as planets
import assets.tools as tools
from assets.variables import *
# TODO: Also add logger to code and display errors correctly
# TODO: Make one pixel correspond to 1/10 au so that acceleration works more realistic
class SolarSystem(metaclass=tools.Singleton):
"""This creates the space in which the planets interact with each other. It is a singleton
so it can be used in different functions and keeps all its information
Attributes
-----------
planets_list: list
this list contains all planet class objects
max_objects: int
this sets the max object count allowed in the planets list
system_time: int
the starting time for the simulation
error: bool
set to True, if error occurs
Methods
----------
add_planet(*args):
Adds planets to the planet list
remove_planet(planet):
Removes planet from the solar system
get_planet(planet):
Gets a specific planet from planet list
number_of_intrastellar_objects():
Returns the number of objects in list
planetary_interaction():
Calculates the new accelerations of each of the planets
planetary_position():
Utilizes verlet integration to get the next positions of all planets for a certain time step period
update():
Updates the calculated data and stores it inside the planets itself
reset():
This resets the class back to its empty state
"""
def __init__(self) -> None:
"""Initializes the intra-stellar objects and the class attributes
Returns
----------
None
"""
self.planets_list = []
self.max_objects = 10
self.system_time = 0
# To display error messages
self.error = False
def add_planet(self, *args) -> None:
"""Adds planets to the planet list
Parameters
----------
*args: Planet
takes in Planet class objects
Returns
----------
None
"""
# TODO: Display some error message
if self.number_of_intrastellar_objects() < self.max_objects:
for i in args:
self.planets_list.append(i)
else:
self.error = True
def remove_planet(self, planet) -> None:
"""Removes planet from the solar system
Parameters
----------
planet: Planet
planet to be removed from the planet list
Returns
----------
None
"""
self.planets_list.remove(planet)
def get_planet(self, planet):
"""Gets a specific planet from planet list
Parameters
----------
planet: Planet
planet to be get from planet list
Returns
----------
planet: Planet
planet from planet list or None if not found
"""
return planet if planet in self.planets_list else None
def number_of_intrastellar_objects(self):
"""Returns the number of objects in list
Returns
----------
planet_list_length: int
the length of the planet list
"""
return len(self.planets_list)
def planetary_interaction(self):
"""Calculates the new accelerations of each of the planets
Returns
----------
acceleration_list: list
the calculated acceleration of all the planets in the planet list
"""
acceleration_list = []
for i in self.planets_list:
a_x, a_y = 0, 0
for j in self.planets_list:
if i != j:
a_x += i.alien_acceleration(j)[0]
a_y += i.alien_acceleration(j)[1]
acceleration_list.append([a_x, a_y])
return acceleration_list
def planetary_positions(self):
"""Utilizes verlet integration to get the next positions of all planets for a certain time step period
Returns
-----------
temp_pos_list: list
the positions of the planets after the verlet integration
temp_vel_list: list
the velocity of the planets after the verlet integration
"""
temp_pos_list, temp_vel_list = [], []
for i, o in enumerate(self.planets_list):
# Calculates the position and velocity for each step and saves it to the planet
temp_pos_x, temp_v_x = tools.verlet_algorithm(o.pos_x_real, o.v_x, self.planetary_interaction()[i][0])
temp_pos_y, temp_v_y = tools.verlet_algorithm(o.pos_y_real, o.v_y, self.planetary_interaction()[i][1])
# Saves it to a temporary list as to not corrupt positional data for each step
temp_pos_list.append([temp_pos_x, temp_pos_y])
temp_vel_list.append([temp_v_x, temp_v_y])
return temp_pos_list, temp_vel_list
def update(self) -> None:
""""Updates the calculated data and stores it inside the planets itself
Returns
----------
None
"""
for i, o in enumerate(self.planets_list):
o.pos_x_real, o.pos_y_real = self.planetary_positions()[0][i]
o.v_x, o.v_y = self.planetary_positions()[1][i]
# o.trace.append(o.rect.center)
def reset(self) -> None:
"""This resets the class back to its empty state
Returns
-----------
None
"""
self.planets_list = []
self.system_time = 0
if __name__ == "__main__":
earth = planets.Planet(1e20, 5, 0)
satellite = planets.Planet(1e20, 10, 0)
moon = planets.Planet(1e20, 15, 0)
ss = SolarSystem()
ss.add_planet(earth, satellite, moon)
print(ss.planetary_interaction())
print(ss.planets_list)
for i in ss.planets_list:
print(i.pos_x, i.pos_y)
for i in ss.planets_list:
print(i.pos_x, i.pos_y)
|
import pytest
from katrain.core.constants import AI_STRATEGIES_RECOMMENDED_ORDER, AI_STRATEGIES
class TestAI:
def test_order(self):
assert set(AI_STRATEGIES_RECOMMENDED_ORDER) == set(AI_STRATEGIES)
|
""" Enums used in different API endpoints """
from enum import Enum
class PluginStatusState(str, Enum):
"""State of the plugin"""
NOTRUNNING = "NotRunning"
STARTING = "Starting"
RUNNING = "Running"
FAILEDTOSTART = "FailedToStart"
FAILEDTOSTAYRUNNING = "FailedToStayRunning"
STOPPING = "Stopping"
def __str__(self) -> str:
return str(self.value)
class SidebarCategoryType(str, Enum):
"""None"""
CHANNELS = "channels"
CUSTOM = "custom"
DIRECT_MESSAGES = "direct_messages"
FAVORITES = "favorites"
def __str__(self) -> str:
return str(self.value)
class SidebarCategoryWithChannelsType(str, Enum):
"""None"""
CHANNELS = "channels"
CUSTOM = "custom"
DIRECT_MESSAGES = "direct_messages"
FAVORITES = "favorites"
def __str__(self) -> str:
return str(self.value)
class UploadSessionType(str, Enum):
"""The type of the upload."""
ATTACHMENT = "attachment"
IMPORT_ = "import"
def __str__(self) -> str:
return str(self.value)
class PostMetadataEmbedsItemType(str, Enum):
"""The type of content that is embedded in this point."""
IMAGE = "image"
MESSAGE_ATTACHMENT = "message_attachment"
OPENGRAPH = "opengraph"
LINK = "link"
def __str__(self) -> str:
return str(self.value)
|
from __future__ import absolute_import, division, print_function
import sys
import pathlib
import matplotlib.pyplot as plt
import pandas as pd
import seaborn as sns
import tensorflow as tf
from tensorflow import keras
from tensorflow.keras import layers
EPOCHS = 1000
# The patience parameter is the amount of epochs to check for improvement
EARLY_STOP = keras.callbacks.EarlyStopping(monitor='val_loss', patience=30)
class PrintDot(keras.callbacks.Callback):
def on_epoch_end(self, epoch, logs):
if epoch % 100 == 0: print('')
print('.', end='')
def plot_history(history):
hist = pd.DataFrame(history.history)
hist['epoch'] = history.epoch
plt.figure()
plt.xlabel('Epoch')
plt.ylabel('Mean Abs Error [cost]')
plt.plot(hist['epoch'], hist['mean_absolute_error'],
label='Train Error')
plt.plot(hist['epoch'], hist['val_mean_absolute_error'],
label = 'Val Error')
plt.ylim([0,5])
plt.legend()
plt.figure()
plt.xlabel('Epoch')
plt.ylabel('Mean Square Error [$cost^2$]')
plt.plot(hist['epoch'], hist['mean_squared_error'],
label='Train Error')
plt.plot(hist['epoch'], hist['val_mean_squared_error'],
label = 'Val Error')
plt.ylim([0,20])
plt.legend()
plt.show()
# we hard-code the values instead of using stats so that integration with
# predictor using the model is easier
scaling = pd.DataFrame(data={
'min': [-10000, -10000, -10000, -2300, -2300, -2300, -6.0, -6.0, -6.0, -3.2, -3.2, -3.2],
'max': [ 10000, 10000, 10000, 2300, 2300, 2300, 6.0, 6.0, 6.0, 3.2, 3.2, 3.2],
}, index=[ 'x', 'y', 'z', 'vx', 'vy', 'vz', 'avx', 'avy', 'avz', 'roll', 'pitch', 'yaw'])
# scale to range [0, 1]
# TODO try polar coordinates. for velocity: https://math.stackexchange.com/questions/2444965/relationship-between-cartesian-velocity-and-polar-velocity
def scale(x):
return (x - scaling['min']) / (scaling['max'] - scaling['min'])
def build_model():
model = keras.Sequential([
layers.Dense(128, activation=tf.nn.relu, input_shape=[len(train_dataset.keys())]),
layers.Dense(128, activation=tf.nn.relu),
# these extra layers seem to hurt more than they help!
#layers.Dropout(0.01),
#layers.Dense(64, activation=tf.nn.relu),
# this doesn't work as well as a single 64-wide layer
#layers.Dense(12, activation=tf.nn.relu, input_shape=[len(train_dataset.keys())]),
#layers.Dense(12, activation=tf.nn.relu),
#layers.Dense(12, activation=tf.nn.relu),
#layers.Dense(12, activation=tf.nn.relu),
#layers.Dense(12, activation=tf.nn.relu),
layers.Dense(1)
])
#optimizer = tf.keras.optimizers.RMSprop(0.001)
optimizer = tf.train.AdamOptimizer(0.001)
model.compile(loss='mean_squared_error',
optimizer=optimizer,
metrics=['mean_absolute_error', 'mean_squared_error'])
return model
# should be the time.csv from generate-data's time binary
dataset_path = sys.argv[1]
column_names = ['cost', 'x', 'y', 'z', 'vx', 'vy', 'vz', 'avx', 'avy', 'avz', 'roll', 'pitch', 'yaw']
raw_dataset = pd.read_csv(dataset_path, names=column_names,
na_values = "", #comment='\t',
sep=",", skipinitialspace=True)
# visualize the data!
pos_plot = sns.pairplot(raw_dataset[["cost", "x", "y", "z"]], diag_kind="kde")
pos_plot.savefig("./pos.fig.png")
vel_plot = sns.pairplot(raw_dataset[["cost", "vx", "vy", "vz"]], diag_kind="kde")
vel_plot.savefig("./vel.fig.png")
avel_plot = sns.pairplot(raw_dataset[["cost", "avx", "avy", "avz"]], diag_kind="kde")
avel_plot.savefig("./avel.fig.png")
rot_plot = sns.pairplot(raw_dataset[["cost", "roll", "pitch", "yaw"]], diag_kind="kde")
rot_plot.savefig("./rot.fig.png")
pos_rot_plot = sns.pairplot(raw_dataset[["cost", "x", "y", "yaw"]], diag_kind="kde")
pos_rot_plot.savefig("./pos_rot.fig.png")
dataset = raw_dataset.copy()
dataset.tail()
# we don't have missing data
# dataset.isna().sum()
# dataset = dataset.dropna()
# split into training vs test datasets
train_dataset = dataset.sample(frac=0.95,random_state=0)
test_dataset = dataset.drop(train_dataset.index)
# using stats from full dataset
stats = raw_dataset.describe()
stats.pop("cost")
stats = stats.transpose()
stats
train_labels = train_dataset.pop('cost')
test_labels = test_dataset.pop('cost')
scaled_train_dataset = scale(train_dataset)
scaled_test_dataset = scale(test_dataset)
# build and train moddel
model = build_model()
model.summary()
history = model.fit(scaled_train_dataset, train_labels, epochs=EPOCHS,
validation_split = 0.2, verbose=0, callbacks=[EARLY_STOP, PrintDot()])
plot_history(history)
# check against test set
loss, mae, mse = model.evaluate(scaled_test_dataset, test_labels, verbose=0)
print("Testing set Mean Abs Error: {:5.2f} cost".format(mae))
# plot all test predictions
test_predictions = model.predict(scaled_test_dataset).flatten()
plt.scatter(test_labels, test_predictions)
plt.xlabel('True Values [cost]')
plt.ylabel('Predictions [cost]')
plt.axis('equal')
plt.axis('square')
plt.xlim([0,plt.xlim()[1]])
plt.ylim([0,plt.ylim()[1]])
plt.plot([-100, 100], [-100, 100])
plt.show()
# error distribution
error = test_predictions - test_labels
plt.hist(error, bins = 25)
plt.xlabel("Prediction Error [cost]")
plt.ylabel("Count")
plt.show()
model.save('./simple_throttle_cost_model.h5')
saved_model_path = tf.contrib.saved_model.save_keras_model(model, "./simple_throttle_cost_saved_model")
|
#!/usr/bin/env python
#
# test_probe_ports.py
#
"""Shows how to probe for available MIDI input and output ports."""
import sys
from rtmidi import *
try:
raw_input
except NameError:
# Python 3
raw_input = input
apis = {
API_MACOSX_CORE: "OS X CoreMIDI",
API_LINUX_ALSA: "Linux ALSA",
API_UNIX_JACK: "Jack Client",
API_WINDOWS_MM: "Windows MultiMedia",
API_WINDOWS_KS: "Windows Kernel Streaming",
API_RTMIDI_DUMMY: "RtMidi Dummy"
}
available_apis = get_compiled_api()
for api, desc in sorted(apis.items()):
if api in available_apis:
try:
r = raw_input("Probe ports using the %s API? (Y/n) " % desc).strip()
if r and r.lower() != "y":
continue
except (KeyboardInterrupt, EOFError):
print('')
break
for name, class_ in (("input", MidiIn), ("output", MidiOut)):
try:
midi = class_(api)
ports = midi.get_ports(encoding='latin1'
if sys.platform.startswith('win') else 'utf-8')
except RuntimeError as exc:
print("Could not probe MIDI %s ports: %s" % (name, exc))
continue
if not ports:
print("No MIDI %s ports found." % name)
else:
print("Available MIDI %s ports:\n" % name)
for port, name in enumerate(ports):
print("[%i] %s" % (port, name))
print('')
del midi
|
#-
# Copyright (c) 2015 Khilan Gudka
# All rights reserved.
#
# This software was developed by SRI International and the University of
# Cambridge Computer Laboratory under DARPA/AFRL contract FA8750-10-C-0237
# ("CTSRD"), as part of the DARPA CRASH research programme.
#
# @BERI_LICENSE_HEADER_START@
#
# Licensed to BERI Open Systems C.I.C. (BERI) under one or more contributor
# license agreements. See the NOTICE file distributed with this work for
# additional information regarding copyright ownership. BERI licenses this
# file to you under the BERI Hardware-Software License, Version 1.0 (the
# "License"); you may not use this file except in compliance with the
# License. You may obtain a copy of the License at:
#
# http://www.beri-open-systems.org/legal/license-1-0.txt
#
# Unless required by applicable law or agreed to in writing, Work distributed
# under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR
# CONDITIONS OF ANY KIND, either express or implied. See the License for the
# specific language governing permissions and limitations under the License.
#
# @BERI_LICENSE_HEADER_END@
#
#-
# Copyright (c) 2015 Khilan Gudka
# All rights reserved.
#
# This software was developed by SRI International and the University of
# Cambridge Computer Laboratory under DARPA/AFRL contract FA8750-10-C-0237
# ("CTSRD"), as part of the DARPA CRASH research programme.
#
# @BERI_LICENSE_HEADER_START@
#
# Licensed to BERI Open Systems C.I.C. (BERI) under one or more contributor
# license agreements. See the NOTICE file distributed with this work for
# additional information regarding copyright ownership. BERI licenses this
# file to you under the BERI Hardware-Software License, Version 1.0 (the
# "License"); you may not use this file except in compliance with the
# License. You may obtain a copy of the License at:
#
# http://www.beri-open-systems.org/legal/license-1-0.txt
#
# Unless required by applicable law or agreed to in writing, Work distributed
# under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR
# CONDITIONS OF ANY KIND, either express or implied. See the License for the
# specific language governing permissions and limitations under the License.
#
# @BERI_LICENSE_HEADER_END@
#
# cat the contents of test-output.txt
f = open('test-output.txt', 'r')
print f.read()
|
import logging
import typing
import web3
from util import constants
from web3tools import web3util, account
logger = logging.getLogger(__name__)
def randomWeb3Wallet():
private_key = account.randomPrivateKey()
return Web3Wallet(private_key=private_key)
class Web3Wallet:
"""Signs txs and msgs with an account's private key."""
_last_tx_count = dict()
MIN_GAS_PRICE = 1000000000
def __init__(self, private_key:str):
self._private_key = private_key
self._address = account.privateKeyToAddress(self._private_key)
#give this wallet a bunch of ETH for gas fees
@property
def address(self):
return self._address
@property
def private_key(self):
return self._private_key
@property
def account(self):
return account.Account(private_key=self.private_key)
@staticmethod
def reset_tx_count():
Web3Wallet._last_tx_count = dict()
def __get_key(self):
return self._private_key
def validate(self):
_web3 = web3util.get_web3()
key = self.__get_key()
account = _web3.eth.account.from_key(key)
return account.address == self._address
@staticmethod
def _get_nonce(address):
# We cannot rely on `web3.eth.getTransactionCount` because when sending multiple
# transactions in a row without wait in between the network may not get the chance to
# update the transaction count for the account address in time.
# So we have to manage this internally per account address.
_web3 = web3util.get_web3()
if address not in Web3Wallet._last_tx_count:
Web3Wallet._last_tx_count[address] = _web3.eth.getTransactionCount(address)
else:
Web3Wallet._last_tx_count[address] += 1
return Web3Wallet._last_tx_count[address]
def sign_tx(self, tx):
_web3 = web3util.get_web3()
account = _web3.eth.account.from_key(self._private_key)
nonce = Web3Wallet._get_nonce(account.address)
gas_price = int(_web3.eth.gasPrice / 100)
gas_price = max(gas_price, self.MIN_GAS_PRICE)
tx['nonce'] = nonce
tx['gasPrice'] = gas_price
signed_tx = _web3.eth.account.sign_transaction(tx, private_key)
return signed_tx.rawTransaction
def sign(self, msg_hash):
account = web3.eth.account.from_key(self._private_key)
return account.signHash(msg_hash)
def ETH_base(self) -> int: #returns ETH, in base 18 (i.e. num wei)
_web3 = web3util.get_web3()
return _web3.eth.getBalance(self._address)
def fundFromAbove(self, num_wei: int):
#Give the this wallet ETH to pay gas fees
#Use funds given to 'TEST_PRIVATE_KEY1' from ganache (see deploy.py)
network = web3util.get_network()
god_key = web3util.confFileValue(network, 'TEST_PRIVATE_KEY1')
god_wallet = Web3Wallet(god_key)
god_wallet.sendEth(self.address, num_wei)
def sendEth(self, to_address:str, num_wei:int):
return buildAndSendTx(
function=None, from_wallet=self, num_wei=num_wei,
to_address=to_address)
def buildAndSendTx(function,
from_wallet: Web3Wallet,
gaslimit: int = constants.GASLIMIT_DEFAULT,
num_wei: int = 0,
to_address=None):
assert isinstance(from_wallet.address, str)
#assert isinstance(from_wallet.private_key, str)
_web3 = web3util.get_web3()
nonce = _web3.eth.getTransactionCount(from_wallet.address)
network = web3util.get_network()
gas_price = int(web3util.confFileValue(network, 'GAS_PRICE'))
tx_params = {
"from": from_wallet.address,
"value": num_wei,
"nonce": nonce,
"gas": gaslimit,
"gasPrice": gas_price,
}
if function is None: #just send ETH, versus smart contract call?
assert to_address is not None
assert isinstance(to_address, str)
tx = tx_params
tx["to"] = to_address
else:
assert to_address is None
tx = function.buildTransaction(tx_params)
signed_tx = _web3.eth.account.sign_transaction(
tx, private_key=from_wallet.private_key)
tx_hash = _web3.eth.sendRawTransaction(signed_tx.rawTransaction)
tx_receipt = _web3.eth.waitForTransactionReceipt(tx_hash)
if tx_receipt['status'] == 0: # did tx fail?
raise Exception("The tx failed. tx_receipt: {tx_receipt}")
return (tx_hash, tx_receipt)
|
import asyncio
import codecs
import dataclasses
import functools
import io
import re
import sys
import traceback
import warnings
from hashlib import md5, sha1, sha256
from http.cookies import CookieError, Morsel, SimpleCookie
from types import MappingProxyType, TracebackType
from typing import (
TYPE_CHECKING,
Any,
Dict,
Iterable,
List,
Mapping,
Optional,
Tuple,
Type,
Union,
cast,
)
from multidict import CIMultiDict, CIMultiDictProxy, MultiDict, MultiDictProxy
from yarl import URL
from . import hdrs, helpers, http, multipart, payload
from .abc import AbstractStreamWriter
from .client_exceptions import (
ClientConnectionError,
ClientOSError,
ClientResponseError,
ContentTypeError,
InvalidURL,
ServerFingerprintMismatch,
)
from .formdata import FormData
from .hdrs import CONTENT_TYPE
from .helpers import (
BaseTimerContext,
BasicAuth,
HeadersMixin,
TimerNoop,
is_expected_content_type,
noop,
parse_mimetype,
reify,
set_result,
)
from .http import SERVER_SOFTWARE, HttpVersion10, HttpVersion11, StreamWriter
from .http_parser import HAS_BROTLI
from .log import client_logger
from .streams import StreamReader
from .typedefs import (
DEFAULT_JSON_DECODER,
JSONDecoder,
LooseCookies,
LooseHeaders,
RawHeaders,
)
try:
import ssl
from ssl import SSLContext
except ImportError: # pragma: no cover
ssl = None # type: ignore[assignment]
SSLContext = object # type: ignore[misc,assignment]
try:
import cchardet as chardet
except ImportError: # pragma: no cover
import charset_normalizer as chardet # type: ignore[no-redef]
__all__ = ("ClientRequest", "ClientResponse", "RequestInfo", "Fingerprint")
if TYPE_CHECKING: # pragma: no cover
from .client import ClientSession
from .connector import Connection
from .tracing import Trace
def _gen_default_accept_encoding() -> str:
return "gzip, deflate, br" if HAS_BROTLI else "gzip, deflate"
@dataclasses.dataclass(frozen=True)
class ContentDisposition:
type: Optional[str]
parameters: "MappingProxyType[str, str]"
filename: Optional[str]
@dataclasses.dataclass(frozen=True)
class RequestInfo:
url: URL
method: str
headers: "CIMultiDictProxy[str]"
real_url: URL
class Fingerprint:
HASHFUNC_BY_DIGESTLEN = {
16: md5,
20: sha1,
32: sha256,
}
def __init__(self, fingerprint: bytes) -> None:
digestlen = len(fingerprint)
hashfunc = self.HASHFUNC_BY_DIGESTLEN.get(digestlen)
if not hashfunc:
raise ValueError("fingerprint has invalid length")
elif hashfunc is md5 or hashfunc is sha1:
raise ValueError(
"md5 and sha1 are insecure and " "not supported. Use sha256."
)
self._hashfunc = hashfunc
self._fingerprint = fingerprint
@property
def fingerprint(self) -> bytes:
return self._fingerprint
def check(self, transport: asyncio.Transport) -> None:
if not transport.get_extra_info("sslcontext"):
return
sslobj = transport.get_extra_info("ssl_object")
cert = sslobj.getpeercert(binary_form=True)
got = self._hashfunc(cert).digest()
if got != self._fingerprint:
host, port, *_ = transport.get_extra_info("peername")
raise ServerFingerprintMismatch(self._fingerprint, got, host, port)
if ssl is not None:
SSL_ALLOWED_TYPES = (ssl.SSLContext, bool, Fingerprint, type(None))
else: # pragma: no cover
SSL_ALLOWED_TYPES = type(None)
@dataclasses.dataclass(frozen=True)
class ConnectionKey:
# the key should contain an information about used proxy / TLS
# to prevent reusing wrong connections from a pool
host: str
port: Optional[int]
is_ssl: bool
ssl: Union[SSLContext, None, bool, Fingerprint]
proxy: Optional[URL]
proxy_auth: Optional[BasicAuth]
proxy_headers_hash: Optional[int] # hash(CIMultiDict)
class ClientRequest:
GET_METHODS = {
hdrs.METH_GET,
hdrs.METH_HEAD,
hdrs.METH_OPTIONS,
hdrs.METH_TRACE,
}
POST_METHODS = {hdrs.METH_PATCH, hdrs.METH_POST, hdrs.METH_PUT}
ALL_METHODS = GET_METHODS.union(POST_METHODS).union({hdrs.METH_DELETE})
DEFAULT_HEADERS = {
hdrs.ACCEPT: "*/*",
hdrs.ACCEPT_ENCODING: _gen_default_accept_encoding(),
}
body = b""
auth = None
response = None
_writer = None # async task for streaming data
_continue = None # waiter future for '100 Continue' response
# N.B.
# Adding __del__ method with self._writer closing doesn't make sense
# because _writer is instance method, thus it keeps a reference to self.
# Until writer has finished finalizer will not be called.
def __init__(
self,
method: str,
url: URL,
*,
params: Optional[Mapping[str, str]] = None,
headers: Optional[LooseHeaders] = None,
skip_auto_headers: Iterable[str] = frozenset(),
data: Any = None,
cookies: Optional[LooseCookies] = None,
auth: Optional[BasicAuth] = None,
version: http.HttpVersion = http.HttpVersion11,
compress: Optional[str] = None,
chunked: Optional[bool] = None,
expect100: bool = False,
loop: asyncio.AbstractEventLoop,
response_class: Optional[Type["ClientResponse"]] = None,
proxy: Optional[URL] = None,
proxy_auth: Optional[BasicAuth] = None,
timer: Optional[BaseTimerContext] = None,
session: Optional["ClientSession"] = None,
ssl: Union[SSLContext, bool, Fingerprint, None] = None,
proxy_headers: Optional[LooseHeaders] = None,
traces: Optional[List["Trace"]] = None,
):
assert isinstance(url, URL), url
assert isinstance(proxy, (URL, type(None))), proxy
# FIXME: session is None in tests only, need to fix tests
# assert session is not None
self._session = cast("ClientSession", session)
if params:
q = MultiDict(url.query)
url2 = url.with_query(params)
q.extend(url2.query)
url = url.with_query(q)
self.original_url = url
self.url = url.with_fragment(None)
self.method = method.upper()
self.chunked = chunked
self.compress = compress
self.loop = loop
self.length = None
if response_class is None:
real_response_class = ClientResponse
else:
real_response_class = response_class
self.response_class = real_response_class # type: Type[ClientResponse]
self._timer = timer if timer is not None else TimerNoop()
self._ssl = ssl
if loop.get_debug():
self._source_traceback = traceback.extract_stack(sys._getframe(1))
self.update_version(version)
self.update_host(url)
self.update_headers(headers)
self.update_auto_headers(skip_auto_headers)
self.update_cookies(cookies)
self.update_content_encoding(data)
self.update_auth(auth)
self.update_proxy(proxy, proxy_auth, proxy_headers)
self.update_body_from_data(data)
if data is not None or self.method not in self.GET_METHODS:
self.update_transfer_encoding()
self.update_expect_continue(expect100)
if traces is None:
traces = []
self._traces = traces
def is_ssl(self) -> bool:
return self.url.scheme in ("https", "wss")
@property
def ssl(self) -> Union["SSLContext", None, bool, Fingerprint]:
return self._ssl
@property
def connection_key(self) -> ConnectionKey:
proxy_headers = self.proxy_headers
if proxy_headers:
h = hash(
tuple((k, v) for k, v in proxy_headers.items())
) # type: Optional[int]
else:
h = None
return ConnectionKey(
self.host,
self.port,
self.is_ssl(),
self.ssl,
self.proxy,
self.proxy_auth,
h,
)
@property
def host(self) -> str:
ret = self.url.raw_host
assert ret is not None
return ret
@property
def port(self) -> Optional[int]:
return self.url.port
@property
def request_info(self) -> RequestInfo:
headers = CIMultiDictProxy(self.headers) # type: CIMultiDictProxy[str]
return RequestInfo(self.url, self.method, headers, self.original_url)
def update_host(self, url: URL) -> None:
"""Update destination host, port and connection type (ssl)."""
# get host/port
if not url.raw_host:
raise InvalidURL(url)
# basic auth info
username, password = url.user, url.password
if username:
self.auth = helpers.BasicAuth(username, password or "")
def update_version(self, version: Union[http.HttpVersion, str]) -> None:
"""Convert request version to two elements tuple.
parser HTTP version '1.1' => (1, 1)
"""
if isinstance(version, str):
v = [part.strip() for part in version.split(".", 1)]
try:
version = http.HttpVersion(int(v[0]), int(v[1]))
except ValueError:
raise ValueError(
f"Can not parse http version number: {version}"
) from None
self.version = version
def update_headers(self, headers: Optional[LooseHeaders]) -> None:
"""Update request headers."""
self.headers = CIMultiDict() # type: CIMultiDict[str]
# add host
netloc = cast(str, self.url.raw_host)
if helpers.is_ipv6_address(netloc):
netloc = f"[{netloc}]"
if self.url.port is not None and not self.url.is_default_port():
netloc += ":" + str(self.url.port)
self.headers[hdrs.HOST] = netloc
if headers:
if isinstance(headers, (dict, MultiDictProxy, MultiDict)):
headers = headers.items() # type: ignore[assignment]
for key, value in headers: # type: ignore[misc]
# A special case for Host header
if key.lower() == "host":
self.headers[key] = value
else:
self.headers.add(key, value)
def update_auto_headers(self, skip_auto_headers: Iterable[str]) -> None:
self.skip_auto_headers = CIMultiDict(
(hdr, None) for hdr in sorted(skip_auto_headers)
)
used_headers = self.headers.copy()
used_headers.extend(self.skip_auto_headers) # type: ignore[arg-type]
for hdr, val in self.DEFAULT_HEADERS.items():
if hdr not in used_headers:
self.headers.add(hdr, val)
if hdrs.USER_AGENT not in used_headers:
self.headers[hdrs.USER_AGENT] = SERVER_SOFTWARE
def update_cookies(self, cookies: Optional[LooseCookies]) -> None:
"""Update request cookies header."""
if not cookies:
return
c = SimpleCookie() # type: SimpleCookie[str]
if hdrs.COOKIE in self.headers:
c.load(self.headers.get(hdrs.COOKIE, ""))
del self.headers[hdrs.COOKIE]
if isinstance(cookies, Mapping):
iter_cookies = cookies.items()
else:
iter_cookies = cookies # type: ignore[assignment]
for name, value in iter_cookies:
if isinstance(value, Morsel):
# Preserve coded_value
mrsl_val = value.get(value.key, Morsel())
mrsl_val.set(value.key, value.value, value.coded_value)
c[name] = mrsl_val
else:
c[name] = value # type: ignore[assignment]
self.headers[hdrs.COOKIE] = c.output(header="", sep=";").strip()
def update_content_encoding(self, data: Any) -> None:
"""Set request content encoding."""
if data is None:
return
enc = self.headers.get(hdrs.CONTENT_ENCODING, "").lower()
if enc:
if self.compress:
raise ValueError(
"compress can not be set " "if Content-Encoding header is set"
)
elif self.compress:
if not isinstance(self.compress, str):
self.compress = "deflate"
self.headers[hdrs.CONTENT_ENCODING] = self.compress
self.chunked = True # enable chunked, no need to deal with length
def update_transfer_encoding(self) -> None:
"""Analyze transfer-encoding header."""
te = self.headers.get(hdrs.TRANSFER_ENCODING, "").lower()
if "chunked" in te:
if self.chunked:
raise ValueError(
"chunked can not be set "
'if "Transfer-Encoding: chunked" header is set'
)
elif self.chunked:
if hdrs.CONTENT_LENGTH in self.headers:
raise ValueError(
"chunked can not be set " "if Content-Length header is set"
)
self.headers[hdrs.TRANSFER_ENCODING] = "chunked"
else:
if hdrs.CONTENT_LENGTH not in self.headers:
self.headers[hdrs.CONTENT_LENGTH] = str(len(self.body))
def update_auth(self, auth: Optional[BasicAuth]) -> None:
"""Set basic auth."""
if auth is None:
auth = self.auth
if auth is None:
return
if not isinstance(auth, helpers.BasicAuth):
raise TypeError("BasicAuth() tuple is required instead")
self.headers[hdrs.AUTHORIZATION] = auth.encode()
def update_body_from_data(self, body: Any) -> None:
if body is None:
return
# FormData
if isinstance(body, FormData):
body = body()
try:
body = payload.PAYLOAD_REGISTRY.get(body, disposition=None)
except payload.LookupError:
boundary = None
if CONTENT_TYPE in self.headers:
boundary = parse_mimetype(self.headers[CONTENT_TYPE]).parameters.get(
"boundary"
)
body = FormData(body, boundary=boundary)()
self.body = body
# enable chunked encoding if needed
if not self.chunked:
if hdrs.CONTENT_LENGTH not in self.headers:
size = body.size
if size is None:
self.chunked = True
else:
if hdrs.CONTENT_LENGTH not in self.headers:
self.headers[hdrs.CONTENT_LENGTH] = str(size)
# copy payload headers
assert body.headers
for (key, value) in body.headers.items():
if key in self.headers:
continue
if key in self.skip_auto_headers:
continue
self.headers[key] = value
def update_expect_continue(self, expect: bool = False) -> None:
if expect:
self.headers[hdrs.EXPECT] = "100-continue"
elif self.headers.get(hdrs.EXPECT, "").lower() == "100-continue":
expect = True
if expect:
self._continue = self.loop.create_future()
def update_proxy(
self,
proxy: Optional[URL],
proxy_auth: Optional[BasicAuth],
proxy_headers: Optional[LooseHeaders],
) -> None:
if proxy_auth and not isinstance(proxy_auth, helpers.BasicAuth):
raise ValueError("proxy_auth must be None or BasicAuth() tuple")
self.proxy = proxy
self.proxy_auth = proxy_auth
self.proxy_headers = proxy_headers
def keep_alive(self) -> bool:
if self.version < HttpVersion10:
# keep alive not supported at all
return False
if self.version == HttpVersion10:
if self.headers.get(hdrs.CONNECTION) == "keep-alive":
return True
else: # no headers means we close for Http 1.0
return False
elif self.headers.get(hdrs.CONNECTION) == "close":
return False
return True
async def write_bytes(
self, writer: AbstractStreamWriter, conn: "Connection"
) -> None:
"""Support coroutines that yields bytes objects."""
# 100 response
if self._continue is not None:
await writer.drain()
await self._continue
protocol = conn.protocol
assert protocol is not None
try:
if isinstance(self.body, payload.Payload):
await self.body.write(writer)
else:
if isinstance(self.body, (bytes, bytearray)):
self.body = (self.body,) # type: ignore[assignment]
for chunk in self.body:
await writer.write(chunk) # type: ignore[arg-type]
await writer.write_eof()
except OSError as exc:
new_exc = ClientOSError(
exc.errno, "Can not write request body for %s" % self.url
)
new_exc.__context__ = exc
new_exc.__cause__ = exc
protocol.set_exception(new_exc)
except asyncio.CancelledError as exc:
if not conn.closed:
protocol.set_exception(exc)
except Exception as exc:
protocol.set_exception(exc)
finally:
self._writer = None
async def send(self, conn: "Connection") -> "ClientResponse":
# Specify request target:
# - CONNECT request must send authority form URI
# - not CONNECT proxy must send absolute form URI
# - most common is origin form URI
if self.method == hdrs.METH_CONNECT:
connect_host = self.url.raw_host
assert connect_host is not None
if helpers.is_ipv6_address(connect_host):
connect_host = f"[{connect_host}]"
path = f"{connect_host}:{self.url.port}"
elif self.proxy and not self.is_ssl():
path = str(self.url)
else:
path = self.url.raw_path
if self.url.raw_query_string:
path += "?" + self.url.raw_query_string
protocol = conn.protocol
assert protocol is not None
writer = StreamWriter(
protocol,
self.loop,
on_chunk_sent=functools.partial(
self._on_chunk_request_sent, self.method, self.url
),
on_headers_sent=functools.partial(
self._on_headers_request_sent, self.method, self.url
),
)
if self.compress:
writer.enable_compression(self.compress)
if self.chunked is not None:
writer.enable_chunking()
# set default content-type
if (
self.method in self.POST_METHODS
and hdrs.CONTENT_TYPE not in self.skip_auto_headers
and hdrs.CONTENT_TYPE not in self.headers
):
self.headers[hdrs.CONTENT_TYPE] = "application/octet-stream"
# set the connection header
connection = self.headers.get(hdrs.CONNECTION)
if not connection:
if self.keep_alive():
if self.version == HttpVersion10:
connection = "keep-alive"
else:
if self.version == HttpVersion11:
connection = "close"
if connection is not None:
self.headers[hdrs.CONNECTION] = connection
# status + headers
status_line = "{0} {1} HTTP/{2[0]}.{2[1]}".format(
self.method, path, self.version
)
await writer.write_headers(status_line, self.headers)
self._writer = self.loop.create_task(self.write_bytes(writer, conn))
response_class = self.response_class
assert response_class is not None
self.response = response_class(
self.method,
self.original_url,
writer=self._writer,
continue100=self._continue,
timer=self._timer,
request_info=self.request_info,
traces=self._traces,
loop=self.loop,
session=self._session,
)
return self.response
async def close(self) -> None:
if self._writer is not None:
try:
await self._writer
finally:
self._writer = None
def terminate(self) -> None:
if self._writer is not None:
if not self.loop.is_closed():
self._writer.cancel()
self._writer = None
async def _on_chunk_request_sent(self, method: str, url: URL, chunk: bytes) -> None:
for trace in self._traces:
await trace.send_request_chunk_sent(method, url, chunk)
async def _on_headers_request_sent(
self, method: str, url: URL, headers: "CIMultiDict[str]"
) -> None:
for trace in self._traces:
await trace.send_request_headers(method, url, headers)
class ClientResponse(HeadersMixin):
# from the Status-Line of the response
version = None # HTTP-Version
status = None # type: int # Status-Code
reason = None # Reason-Phrase
content = None # type: StreamReader # Payload stream
_headers = None # type: CIMultiDictProxy[str] # Response headers
_raw_headers = None # type: RawHeaders # Response raw headers
_connection = None # current connection
_source_traceback = None
# setted up by ClientRequest after ClientResponse object creation
# post-init stage allows to not change ctor signature
_closed = True # to allow __del__ for non-initialized properly response
_released = False
def __init__(
self,
method: str,
url: URL,
*,
writer: "asyncio.Task[None]",
continue100: Optional["asyncio.Future[bool]"],
timer: BaseTimerContext,
request_info: RequestInfo,
traces: List["Trace"],
loop: asyncio.AbstractEventLoop,
session: "ClientSession",
) -> None:
assert isinstance(url, URL)
super().__init__()
self.method = method
self.cookies = SimpleCookie() # type: SimpleCookie[str]
self._real_url = url
self._url = url.with_fragment(None)
self._body = None # type: Optional[bytes]
self._writer = writer # type: Optional[asyncio.Task[None]]
self._continue = continue100 # None by default
self._closed = True
self._history = () # type: Tuple[ClientResponse, ...]
self._request_info = request_info
self._timer = timer if timer is not None else TimerNoop()
self._cache = {} # type: Dict[str, Any]
self._traces = traces
self._loop = loop
# store a reference to session #1985
self._session = session # type: Optional[ClientSession]
if loop.get_debug():
self._source_traceback = traceback.extract_stack(sys._getframe(1))
@reify
def url(self) -> URL:
return self._url
@reify
def real_url(self) -> URL:
return self._real_url
@reify
def host(self) -> str:
assert self._url.host is not None
return self._url.host
@reify
def headers(self) -> "CIMultiDictProxy[str]":
return self._headers
@reify
def raw_headers(self) -> RawHeaders:
return self._raw_headers
@reify
def request_info(self) -> RequestInfo:
return self._request_info
@reify
def content_disposition(self) -> Optional[ContentDisposition]:
raw = self._headers.get(hdrs.CONTENT_DISPOSITION)
if raw is None:
return None
disposition_type, params_dct = multipart.parse_content_disposition(raw)
params = MappingProxyType(params_dct)
filename = multipart.content_disposition_filename(params)
return ContentDisposition(disposition_type, params, filename)
def __del__(self, _warnings: Any = warnings) -> None:
if self._closed:
return
if self._connection is not None:
self._connection.release()
self._cleanup_writer()
if self._loop.get_debug():
_warnings.warn(
f"Unclosed response {self!r}", ResourceWarning, source=self
)
context = {"client_response": self, "message": "Unclosed response"}
if self._source_traceback:
context["source_traceback"] = self._source_traceback
self._loop.call_exception_handler(context)
def __repr__(self) -> str:
out = io.StringIO()
ascii_encodable_url = str(self.url)
if self.reason:
ascii_encodable_reason = self.reason.encode(
"ascii", "backslashreplace"
).decode("ascii")
else:
ascii_encodable_reason = self.reason
print(
"<ClientResponse({}) [{} {}]>".format(
ascii_encodable_url, self.status, ascii_encodable_reason
),
file=out,
)
print(self.headers, file=out)
return out.getvalue()
@property
def connection(self) -> Optional["Connection"]:
return self._connection
@reify
def history(self) -> Tuple["ClientResponse", ...]:
"""A sequence of responses, if redirects occurred."""
return self._history
@reify
def links(self) -> "MultiDictProxy[MultiDictProxy[Union[str, URL]]]":
links_str = ", ".join(self.headers.getall("link", []))
if not links_str:
return MultiDictProxy(MultiDict())
links = MultiDict() # type: MultiDict[MultiDictProxy[Union[str, URL]]]
for val in re.split(r",(?=\s*<)", links_str):
match = re.match(r"\s*<(.*)>(.*)", val)
if match is None: # pragma: no cover
# the check exists to suppress mypy error
continue
url, params_str = match.groups()
params = params_str.split(";")[1:]
link = MultiDict() # type: MultiDict[Union[str, URL]]
for param in params:
match = re.match(r"^\s*(\S*)\s*=\s*(['\"]?)(.*?)(\2)\s*$", param, re.M)
if match is None: # pragma: no cover
# the check exists to suppress mypy error
continue
key, _, value, _ = match.groups()
link.add(key, value)
key = link.get("rel", url) # type: ignore[assignment]
link.add("url", self.url.join(URL(url)))
links.add(key, MultiDictProxy(link))
return MultiDictProxy(links)
async def start(self, connection: "Connection") -> "ClientResponse":
"""Start response processing."""
self._closed = False
self._protocol = connection.protocol
self._connection = connection
with self._timer:
while True:
# read response
try:
protocol = self._protocol
message, payload = await protocol.read() # type: ignore[union-attr]
except http.HttpProcessingError as exc:
raise ClientResponseError(
self.request_info,
self.history,
status=exc.code,
message=exc.message,
headers=exc.headers,
) from exc
if message.code < 100 or message.code > 199 or message.code == 101:
break
if self._continue is not None:
set_result(self._continue, True)
self._continue = None
# payload eof handler
payload.on_eof(self._response_eof)
# response status
self.version = message.version
self.status = message.code
self.reason = message.reason
# headers
self._headers = message.headers # type is CIMultiDictProxy
self._raw_headers = message.raw_headers # type is Tuple[bytes, bytes]
# payload
self.content = payload
# cookies
for hdr in self.headers.getall(hdrs.SET_COOKIE, ()):
try:
self.cookies.load(hdr)
except CookieError as exc:
client_logger.warning("Can not load response cookies: %s", exc)
return self
def _response_eof(self) -> None:
if self._closed:
return
if self._connection is not None:
# websocket, protocol could be None because
# connection could be detached
if (
self._connection.protocol is not None
and self._connection.protocol.upgraded
):
return
self._connection.release()
self._connection = None
self._closed = True
self._cleanup_writer()
@property
def closed(self) -> bool:
return self._closed
def close(self) -> None:
if not self._released:
self._notify_content()
if self._closed:
return
self._closed = True
if self._loop is None or self._loop.is_closed():
return
if self._connection is not None:
self._connection.close()
self._connection = None
self._cleanup_writer()
def release(self) -> Any:
if not self._released:
self._notify_content()
if self._closed:
return noop()
self._closed = True
if self._connection is not None:
self._connection.release()
self._connection = None
self._cleanup_writer()
return noop()
@property
def ok(self) -> bool:
"""Returns ``True`` if ``status`` is less than ``400``, ``False`` if not.
This is **not** a check for ``200 OK`` but a check that the response
status is under 400.
"""
return 400 > self.status
def raise_for_status(self) -> None:
if not self.ok:
# reason should always be not None for a started response
assert self.reason is not None
self.release()
raise ClientResponseError(
self.request_info,
self.history,
status=self.status,
message=self.reason,
headers=self.headers,
)
def _cleanup_writer(self) -> None:
if self._writer is not None:
self._writer.cancel()
self._writer = None
self._session = None
def _notify_content(self) -> None:
content = self.content
if content and content.exception() is None:
content.set_exception(ClientConnectionError("Connection closed"))
self._released = True
async def wait_for_close(self) -> None:
if self._writer is not None:
try:
await self._writer
finally:
self._writer = None
self.release()
async def read(self) -> bytes:
"""Read response payload."""
if self._body is None:
try:
self._body = await self.content.read()
for trace in self._traces:
await trace.send_response_chunk_received(
self.method, self.url, self._body
)
except BaseException:
self.close()
raise
elif self._released:
raise ClientConnectionError("Connection closed")
return self._body
def get_encoding(self) -> str:
ctype = self.headers.get(hdrs.CONTENT_TYPE, "").lower()
mimetype = helpers.parse_mimetype(ctype)
encoding = mimetype.parameters.get("charset")
if encoding:
try:
codecs.lookup(encoding)
except LookupError:
encoding = None
if not encoding:
if mimetype.type == "application" and (
mimetype.subtype == "json" or mimetype.subtype == "rdap"
):
# RFC 7159 states that the default encoding is UTF-8.
# RFC 7483 defines application/rdap+json
encoding = "utf-8"
elif self._body is None:
raise RuntimeError(
"Cannot guess the encoding of " "a not yet read body"
)
else:
encoding = chardet.detect(self._body)["encoding"]
if not encoding:
encoding = "utf-8"
return encoding
async def text(self, encoding: Optional[str] = None, errors: str = "strict") -> str:
"""Read response payload and decode."""
if self._body is None:
await self.read()
if encoding is None:
encoding = self.get_encoding()
return self._body.decode(encoding, errors=errors) # type: ignore[union-attr]
async def json(
self,
*,
encoding: Optional[str] = None,
loads: JSONDecoder = DEFAULT_JSON_DECODER,
content_type: Optional[str] = "application/json",
) -> Any:
"""Read and decodes JSON response."""
if self._body is None:
await self.read()
if content_type:
ctype = self.headers.get(hdrs.CONTENT_TYPE, "").lower()
if not is_expected_content_type(ctype, content_type):
raise ContentTypeError(
self.request_info,
self.history,
message=(
"Attempt to decode JSON with " "unexpected mimetype: %s" % ctype
),
headers=self.headers,
)
if encoding is None:
encoding = self.get_encoding()
return loads(self._body.decode(encoding)) # type: ignore[union-attr]
async def __aenter__(self) -> "ClientResponse":
return self
async def __aexit__(
self,
exc_type: Optional[Type[BaseException]],
exc_val: Optional[BaseException],
exc_tb: Optional[TracebackType],
) -> None:
# similar to _RequestContextManager, we do not need to check
# for exceptions, response object can close connection
# if state is broken
self.release()
|
from django.contrib import admin
from .models import Document
admin.site.register(Document)
|
#!/usr/bin/env python2.4
import sys
from z3c.rml import rml2pdf
for arg in sys.argv[1:]:
rml2pdf.go(arg)
|
from django.contrib import admin
from .models import Receipt, Item, User, Cover, Payment
admin.site.register(Receipt)
admin.site.register(Item)
admin.site.register(User)
admin.site.register(Cover)
admin.site.register(Payment)
|
from pathlib import Path
import subprocess
root_path = Path(__file__).parent.parent.resolve()
extensions = [
'sphinx.ext.todo',
'sphinxcontrib.drawio',
]
version = (root_path / 'VERSION').read_text().strip()
project = 'hat-stc'
copyright = '2020-2021, Hat Open AUTHORS'
master_doc = 'index'
html_theme = 'furo'
html_static_path = ['static']
html_css_files = ['custom.css']
html_use_index = False
html_show_sourcelink = False
html_show_sphinx = False
html_sidebars = {'**': ["sidebar/brand.html",
"sidebar/scroll-start.html",
"sidebar/navigation.html",
"sidebar/scroll-end.html"]}
todo_include_todos = True
p = subprocess.run(['which', 'drawio'], capture_output=True, check=True)
drawio_binary_path = p.stdout.decode('utf-8').strip()
|
"""***********************************************************************
This file was created by Astraea, Inc., 2018 from an excerpt of the
original:
Copyright (c) 2013-2018 Commonwealth Computer Research, Inc.
All rights reserved. This program and the accompanying materials
are made available under the terms of the Apache License, Version 2.0
which accompanies this distribution and is available at
http://www.opensource.org/licenses/apache2.0.php.
+ ***********************************************************************/"""
from pyspark.sql.types import UserDefinedType
from pyspark.sql import Row
from pyspark.sql.types import *
from pyrasterframes.context import RFContext
class GeometryUDT(UserDefinedType):
@classmethod
def sqlType(self):
# return StructField("wkb", BinaryType(), False)
return StructType([StructField("wkb", BinaryType(), True)])
@classmethod
def module(cls):
return 'geomesa_pyspark.types'
@classmethod
def scalaUDT(cls):
return 'org.apache.spark.sql.jts.' + cls.__name__
def serialize(self, obj):
if (obj is None): return None
return Row(obj.toBytes)
def deserialize(self, datum):
return RFContext._jvm_mirror().generate_geometry(datum[0])
class PointUDT(GeometryUDT):
pass
class LineStringUDT(GeometryUDT):
pass
class PolygonUDT(GeometryUDT):
pass
class MultiPointUDT(GeometryUDT):
pass
class MultiLineStringUDT(GeometryUDT):
pass
class MultiPolygonUDT(GeometryUDT):
pass
class GeometryUDT(GeometryUDT):
pass
class GeometryCollectionUDT(GeometryUDT):
pass
|
"""Joystick action space for controlling agent avatars."""
from . import abstract_action_space
from dm_env import specs
import numpy as np
class Joystick(abstract_action_space.AbstractActionSpace):
"""Joystick action space."""
def __init__(self, scaling_factor=1., action_layers='agent',
constrained_lr=False, control_velocity=False, momentum=0.):
"""Constructor.
Args:
scaling_factor: Scalar. Scaling factor multiplied to the action.
agent_layer: String or iterable of strings. Elements (or itself if
string) must be keys in the environment state. All sprites in
these layers will be acted upon by this action space.
control_velocity: Bool. Whether to control velocity (True) or force
(False).
constrained_lr: Bool. If True, joystick is contrained to actions
parallel to the x-axis, by zeroing out the y-axis (component 1)
of the action.
momentum: Float in [0, 1]. Discount factor for previous action. This
should be zero if control_velocity is False, because imparting
forces automatically gives momentum to the agent(s) being
controlled. If control_velocity is True, setting this greater
than zero gives the controlled agent(s) momentum. However, the
velocity is clipped at scaling_factor, so the agent only retains
momentum when stopping or changing direction and does not
accelerate.
"""
self._scaling_factor = scaling_factor
if not isinstance(action_layers, (list, tuple)):
action_layers = (action_layers,)
self._action_layers = action_layers
self._constrained_lr = constrained_lr
self._control_velocity = control_velocity
self._momentum = momentum
self._action_spec = specs.BoundedArray(
shape=(2,), dtype=np.float32, minimum=-1, maximum=1)
def step(self, state, action):
"""Apply action to environment state.
Args:
state: OrderedDict. Environment state.
action: Numpy float array of size (2) in [-1, 1]. Force to apply.
"""
if self._constrained_lr:
action[1] = 0.
self._action *= self._momentum
self._action += self._scaling_factor * action
self._action = np.clip(
self._action, -self._scaling_factor, self._scaling_factor)
for action_layer in self._action_layers:
for sprite in state[action_layer]:
if self._control_velocity:
sprite.velocity = self._action / sprite.mass
else:
sprite.velocity += self._action / sprite.mass
def reset(self, state):
"""Reset action space at start of new episode."""
del state
self._action = np.zeros(2)
def random_action(self):
"""Return randomly sampled action."""
return np.random.uniform(-1., 1., size=(2,))
def action_spec(self):
return self._action_spec
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
test_geocodio
----------------------------------
Tests for `geocodio.data` module.
"""
import json
import os
import unittest
from geocodio.data import Address
from geocodio.data import Location
from geocodio.data import LocationCollection
class TestDataTypes(unittest.TestCase):
def setUp(self):
"""
Read the test data from JSON files which are modified from actual
service response only for formatting. This makes this file much easier
to read, the data easier to inspect, and ensures that the data matches
what the service actually replies with.
"""
fixtures = os.path.join(os.path.dirname(os.path.abspath(__file__)), "response/")
with open(os.path.join(fixtures, "single.json"), "r") as single_json:
self.single_response = json.loads(single_json.read())
with open(os.path.join(fixtures, "batch.json"), "r") as batch_json:
self.batch_response = json.loads(batch_json.read())
with open(os.path.join(fixtures, "address.json"), "r") as address_json:
self.address_response = json.loads(address_json.read())
with open(os.path.join(fixtures, "missing_results.json"), "r") as missing_json:
self.missing_results = json.loads(missing_json.read())
with open(
os.path.join(fixtures, "batch_reverse.json"), "r"
) as batch_reverse_json:
self.batch_reverse_response = json.loads(batch_reverse_json.read())
def test_address_coords(self):
"""Ensure Address.coords property returns None when no location"""
x = Address(self.address_response)
self.assertEqual(None, x.coords)
def test_address_accuracy(self):
"""Ensure Address.accuracy property returns None when no location"""
x = Address(self.address_response)
self.assertEqual(None, x.accuracy)
def test_location_coords(self):
"""Ensure Location.coords property returns a suitable tuple"""
x = Location(self.single_response)
self.assertEqual(x.coords, (37.554895702703, -77.457561054054))
# Do the same with the order changed
x = Location(self.single_response, order="lng")
self.assertEqual(x.coords, (-77.457561054054, 37.554895702703))
def test_location_results_missing(self):
"""Ensure empty results are processed as a missing address"""
bad_results = Location(self.missing_results)
self.assertEqual(bad_results.coords, None)
def test_collection(self):
"""Ensure that the LocationCollection stores as a list of Locations"""
self.assertTrue(isinstance(self.batch_response, dict))
locations = LocationCollection(self.batch_response["results"])
self.assertTrue(isinstance(locations[0], Location))
locations = LocationCollection(self.batch_reverse_response["results"])
self.assertTrue(isinstance(locations[0], Location))
def test_collection_coords(self):
"""Ensure the coords property returns a list of suitable tuples"""
locations = LocationCollection(self.batch_response["results"])
self.assertEqual(
locations.coords,
[
(37.560890255102, -77.477400571429),
(37.554895702703, -77.457561054054),
None,
],
)
# Do the same with the order changed
locations = LocationCollection(self.batch_response["results"], order="lng")
self.assertEqual(
locations.coords,
[
(-77.477400571429, 37.560890255102),
(-77.457561054054, 37.554895702703),
None,
],
)
def test_collection_addresses(self):
"""Ensure that formatted addresses are returned"""
locations = LocationCollection(self.batch_response["results"])
self.assertEqual(
locations.formatted_addresses,
[
"3101 Patterson Ave, Richmond VA, 23221",
"1657 W Broad St, Richmond VA, 23220",
"",
],
)
def test_collection_get(self):
"""Ensure 'get' performs a key based lookup"""
locations = LocationCollection(self.batch_response["results"])
self.assertEqual(
locations.get("3101 patterson ave, richmond, va").coords,
(37.560890255102, -77.477400571429),
)
# Case sensitive on the specific query
self.assertRaises(KeyError, locations.get, "3101 Patterson Ave, richmond, va")
locations = LocationCollection(self.batch_reverse_response["results"])
# The rendred query string value is acceptable
self.assertEqual(
locations.get("37.538758,-77.433594").coords, (37.538758, -77.433594)
)
# A tuple of floats is acceptable
self.assertEqual(
locations.get((37.538758, -77.433594)).coords, (37.538758, -77.433594)
)
# If it can be coerced to a float it is acceptable
self.assertEqual(
locations.get(("37.538758", "-77.433594")).coords, (37.538758, -77.433594)
)
# This is unacceptable
self.assertRaises(ValueError, locations.get, ("37.538758 N", "-77.433594 W"))
if __name__ == "__main__":
unittest.main()
|
# -*- coding: utf-8 -*-
#
# Sphinx configuration
#
# This file is execfile()d with the current directory set to its
# containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import os
import sys
import time
import aiida_nwchem
from aiida.manage.configuration import load_documentation_profile
# -- AiiDA-related setup --------------------------------------------------
# Load the dummy profile even if we are running locally, this way the documentation will succeed even if the current
# default profile of the AiiDA installation does not use a Django backend.
load_documentation_profile()
# If we are not on READTHEDOCS load the Sphinx theme manually
if not os.environ.get('READTHEDOCS', None):
import sphinx_rtd_theme
html_theme = 'sphinx_rtd_theme'
html_theme_path = [sphinx_rtd_theme.get_html_theme_path()]
# -- General configuration ------------------------------------------------
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
# If your documentation needs a minimal Sphinx version, state it here.
needs_sphinx = '1.5'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
'sphinx.ext.autodoc',
'sphinx.ext.mathjax',
'sphinx.ext.intersphinx',
'sphinx.ext.viewcode',
'sphinxcontrib.contentui',
'aiida.sphinxext',
'sphinxcontrib.napoleon',
]
intersphinx_mapping = {
'python': ('https://docs.python.org/3', None),
'aiida': ('https://aiida-core.readthedocs.io/en/latest', None),
}
nitpick_ignore = [('py:obj', 'module')]
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8-sig'
# The master toctree document.
#~ master_doc = 'index'
master_doc = 'index'
# General information about the project.
project = u'aiida-nwchem'
copyright_first_year = "2021"
copyright_owners = "The AiiDA Team"
current_year = str(time.localtime().tm_year)
copyright_year_string = current_year if current_year == copyright_first_year else "{}-{}".format(
copyright_first_year, current_year)
# pylint: disable=redefined-builtin
copyright = u'{}, {}. All rights reserved'.format(copyright_year_string,
copyright_owners)
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The full version, including alpha/beta/rc tags.
release = aiida_nwchem.__version__
# The short X.Y version.
version = '.'.join(release.split('.')[:2])
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#
# This is also used if you do content translation via gettext catalogs.
# Usually you set "language" from the command line for these cases.
language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
# exclude_patterns = ['doc.rst']
#~ exclude_patterns = ['index.rst']
# The reST default role (used for this markup: `text`) to use for all
# documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
show_authors = True
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# If true, keep warnings as "system message" paragraphs in the built documents.
#keep_warnings = False
# -- Options for HTML output ----------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
#~ html_theme = 'basicstrap'
## SET BELOW
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
html_theme_options = {
'display_version': True,
}
# Add any paths that contain custom themes here, relative to this directory.
#~ html_theme_path = ["."]
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
# html_logo = "images/.png"
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
# html_favicon = "images/favicon.ico"
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
#html_static_path = ['_static']
# Add any extra paths that contain custom files (such as robots.txt or
# .htaccess) here, relative to this directory. These files are copied
# directly to the root of the documentation.
#html_extra_path = []
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_domain_indices = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
html_show_sourcelink = False
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
#html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
#~ html_show_copyright = False
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
html_use_opensearch = 'http://aiida-nwchem.readthedocs.io'
# This is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = None
# Language to be used for generating the HTML full-text search index.
# Sphinx supports the following languages:
# 'da', 'de', 'en', 'es', 'fi', 'fr', 'hu', 'it', 'ja'
# 'nl', 'no', 'pt', 'ro', 'ru', 'sv', 'tr'
html_search_language = 'en'
# A dictionary with options for the search language support, empty by default.
# Now only 'ja' uses this config value
#html_search_options = {'type': 'default'}
# The name of a javascript file (relative to the configuration directory) that
# implements a search results scorer. If empty, the default will be used.
#html_search_scorer = 'scorer.js'
# Output file base name for HTML help builder.
htmlhelp_basename = 'aiida-nwchem-doc'
# -- Options for LaTeX output ---------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#'preamble': '',
# Latex figure (float) alignment
#'figure_align': 'htbp',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
# latex_documents = [
# ]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# If true, show page references after internal links.
#latex_show_pagerefs = False
# If true, show URL addresses after external links.
#latex_show_urls = False
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_domain_indices = True
# NOTE: Diabling API docs
# def run_apidoc(_):
# """Runs sphinx-apidoc when building the documentation.
# Needs to be done in conf.py in order to include the APIdoc in the
# build on readthedocs.
# See also https://github.com/rtfd/readthedocs.org/issues/1139
# """
# source_dir = os.path.abspath(os.path.dirname(__file__))
# apidoc_dir = os.path.join(source_dir, 'apidoc')
# package_dir = os.path.join(source_dir, os.pardir, os.pardir, 'aiida_nwchem')
# # In #1139, they suggest the route below, but this ended up
# # calling sphinx-build, not sphinx-apidoc
# #from sphinx.apidoc import main
# #main([None, '-e', '-o', apidoc_dir, package_dir, '--force'])
# import subprocess
# cmd_path = 'sphinx-apidoc'
# if hasattr(sys, 'real_prefix'): # Check to see if we are in a virtualenv
# # If we are, assemble the path manually
# cmd_path = os.path.abspath(
# os.path.join(sys.prefix, 'bin', 'sphinx-apidoc'))
# options = [
# '-o',
# apidoc_dir,
# package_dir,
# '--private',
# '--force',
# '--no-toc',
# ]
# # See https://stackoverflow.com/a/30144019
# env = os.environ.copy()
# env["SPHINX_APIDOC_OPTIONS"] = 'members,special-members,private-members,undoc-members,show-inheritance'
# subprocess.check_call([cmd_path] + options, env=env)
# def setup(app):
# app.connect('builder-inited', run_apidoc)
# -- Options for manual page output ---------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
# man_pages = [
# ]
# If true, show URL addresses after external links.
#man_show_urls = False
# -- Options for Texinfo output -------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
# texinfo_documents = [
# ]
# Documents to append as an appendix to all manuals.
#texinfo_appendices = []
# If false, no module index is generated.
#texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
#texinfo_show_urls = 'footnote'
# If true, do not generate a @detailmenu in the "Top" node's menu.
#texinfo_no_detailmenu = False
# Warnings to ignore when using the -n (nitpicky) option
# We should ignore any python built-in exception, for instance
nitpick_ignore = []
|
from machinetranslation import translator
from flask import Flask, render_template, request
import json
app = Flask("Web Translator")
@app.route("/englishToFrench")
def englishToFrench():
textToTranslate = request.args.get('textToTranslate')
translation = translator.englishToFrench(englishText=textToTranslate)
return f"{translation} (Translated text to French)"
@app.route("/frenchToEnglish")
def frenchToEnglish():
textToTranslate = request.args.get('textToTranslate')
translation = translator.frenchToEnglish(frenchText=textToTranslate)
return f"{translation} (Translated text to English)"
@app.route("/")
def renderIndexPage():
return render_template('index.html')
if __name__ == "__main__":
app.run(host="0.0.0.0", port=8080)
|
import os
import pprint
import inspect
import tensorflow as tf
pp = pprint.PrettyPrinter().pprint
def class_vars(obj):
return (k:v for k, v in inspect.getmembers(obj))
if not k.startswith("__") and not callable()
class base_model(object):
def __init__(self, config):
self._saver = None
self.config = config
try:
self._attrs = config.__dict__["__flag"]
except:
self._attrs = class_vars(config)
pp(self._attrs)
self.config = config
for attr in self._attrs:
name = attr if not attr.startswith("_") else attr[1:]
setattr(self, name, getattr(self.config, attr))
def save_model(self, step=None):
print(" [*] Saving checkpoint --->>")
model_name = type(self).__name__
if not os.path.exists(self.checkpoint_dir):
os.makedirs(self.checkpoint_dir)
self.saver.save(self.sess, self.checkpoint_dir, global_step=step)
def load_model(self):
print(" [*] Loading checkpoint <<--- ")
ckpt = tf.train.get_checkpoint_state(self.checkpoint)
if ckpt and ckpt.model_checkpooint_path:
ckpt_name = os.path.basename(ckpt.model_checkpoint_path)
fname = os.path.jin(self.checkpoint_dir, ckpt_name)
self.saver.restore(self.sess, fname)
print(" [*] Load sucessed :D : %s" % fname)
return True
else:
print(" [*] Load failed :( : %s " % self.checkpoint_dir)
return False
@property
def model_dir(self):
model_dir = self.config.env_name
for k, v in self._attrs.items():
if not k.startswith("_") and k not in ["display"]:
model_dir += "/%s-%s" % (k, ",".join([str(i) for i in v ])
if type(v) == list else v)
return model_dir + "/"
@property
def saver(self):
if self._saver == None:
self._saver = tf.train.Saver(max_to_keep=10)
return self._saver
|
"""
This file offers the methods to automatically retrieve the graph Lachnospiraceae bacterium NK4A144.
The graph is automatically retrieved from the STRING repository.
References
---------------------
Please cite the following if you use the data:
```bib
@article{szklarczyk2019string,
title={STRING v11: protein--protein association networks with increased coverage, supporting functional discovery in genome-wide experimental datasets},
author={Szklarczyk, Damian and Gable, Annika L and Lyon, David and Junge, Alexander and Wyder, Stefan and Huerta-Cepas, Jaime and Simonovic, Milan and Doncheva, Nadezhda T and Morris, John H and Bork, Peer and others},
journal={Nucleic acids research},
volume={47},
number={D1},
pages={D607--D613},
year={2019},
publisher={Oxford University Press}
}
```
"""
from typing import Dict
from ..automatic_graph_retrieval import AutomaticallyRetrievedGraph
from ...ensmallen import Graph # pylint: disable=import-error
def LachnospiraceaeBacteriumNk4a144(
directed: bool = False,
preprocess: bool = True,
load_nodes: bool = True,
verbose: int = 2,
cache: bool = True,
cache_path: str = "graphs/string",
version: str = "links.v11.5",
**additional_graph_kwargs: Dict
) -> Graph:
"""Return new instance of the Lachnospiraceae bacterium NK4A144 graph.
The graph is automatically retrieved from the STRING repository.
Parameters
-------------------
directed: bool = False
Wether to load the graph as directed or undirected.
By default false.
preprocess: bool = True
Whether to preprocess the graph to be loaded in
optimal time and memory.
load_nodes: bool = True,
Whether to load the nodes vocabulary or treat the nodes
simply as a numeric range.
verbose: int = 2,
Wether to show loading bars during the retrieval and building
of the graph.
cache: bool = True
Whether to use cache, i.e. download files only once
and preprocess them only once.
cache_path: str = "graphs"
Where to store the downloaded graphs.
version: str = "links.v11.5"
The version of the graph to retrieve.
The available versions are:
- homology.v11.0
- homology.v11.5
- physical.links.v11.0
- physical.links.v11.5
- links.v11.0
- links.v11.5
additional_graph_kwargs: Dict
Additional graph kwargs.
Returns
-----------------------
Instace of Lachnospiraceae bacterium NK4A144 graph.
References
---------------------
Please cite the following if you use the data:
```bib
@article{szklarczyk2019string,
title={STRING v11: protein--protein association networks with increased coverage, supporting functional discovery in genome-wide experimental datasets},
author={Szklarczyk, Damian and Gable, Annika L and Lyon, David and Junge, Alexander and Wyder, Stefan and Huerta-Cepas, Jaime and Simonovic, Milan and Doncheva, Nadezhda T and Morris, John H and Bork, Peer and others},
journal={Nucleic acids research},
volume={47},
number={D1},
pages={D607--D613},
year={2019},
publisher={Oxford University Press}
}
```
"""
return AutomaticallyRetrievedGraph(
graph_name="LachnospiraceaeBacteriumNk4a144",
repository="string",
version=version,
directed=directed,
preprocess=preprocess,
load_nodes=load_nodes,
verbose=verbose,
cache=cache,
cache_path=cache_path,
additional_graph_kwargs=additional_graph_kwargs
)()
|
#
# Copyright 2019 Xilinx Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import xir_extra_ops
def jit(graph):
graph.set_attr("xmodel_preprocessor", "libxmodel_preprocessor_efficientnet.so.1")
graph.set_attr("need_preprocess", True)
graph.set_attr("mean", [127.0, 127.0, 127.0])
graph.set_attr("scale", [0.0078125, 0.0078125, 0.0078125])
graph.set_attr("is_rgb_input", True)
labels_list = open(os.path.join(
graph.get_attr("__dir__"), "word_list.txt"), "r").read().splitlines()
labels_list_1001 = ["background,"]
labels_list_1001.extend(labels_list)
graph.set_attr("labels", labels_list_1001)
xir_extra_ops.set_postprocessor(
graph, "libxmodel_postprocessor_classification.so.1", {"input": ["my_topk"]})
graph.remove_op(graph.get_op(
"logits_fix"))
conf_op = graph.get_op("efficientnet-edgetpu-S/model/head/dense/BiasAdd/aquant_logits")
graph.create_op("aquant_logits_softmax", "softmax",
attrs={"axis": -1},
input_ops={"input": [conf_op]},
subgraph=graph.get_leaf_subgraph(conf_op))
graph.create_op("my_topk", "topk",
attrs={"K": 5},
input_ops={"input": [graph.get_op("aquant_logits_softmax")]},
subgraph=graph.get_leaf_subgraph(graph.get_op("aquant_logits_softmax")))
#graph.save_as_image(os.path.join(graph.get_attr("__dir__"), graph.get_attr("__basename__") + ".jit.svg"), "svg")
#graph.serialize(os.path.join(graph.get_attr("__dir__"), graph.get_attr("__basename__") + ".jit.xmodel"))
print(graph.get_name())
|
# -*- coding: utf-8 -*-
from typing import Any, Iterable, List, Optional
from aiohttp import FormData as _FormData
import aiohttp.multipart as multipart
class FormData(_FormData):
def __init__(
self,
fields: Iterable[Any] = (),
quote_fields: bool = True,
charset: Optional[str] = None,
boundary: Optional[str] = None
) -> None:
self._writer = multipart.MultipartWriter("form-data", boundary=boundary)
self._fields = [] # type: List[Any]
self._is_multipart = False
self._is_processed = False
self._quote_fields = quote_fields
self._charset = charset
if isinstance(fields, dict):
fields = list(fields.items())
elif not isinstance(fields, (list, tuple)):
fields = (fields,)
self.add_fields(*fields)
|
# podd_utils.py
# Utility functions for Podd SCons build
import os
from SCons.Action import ActionFactory
from SCons.Script.SConscript import SConsEnvironment
SConsEnvironment.OSCommand = ActionFactory(os.system,
lambda command : 'os.system("%s")' % command)
import SCons.Util
def list_to_path(lst):
result = ''
if SCons.Util.is_List(lst):
for element in lst:
result += str(element)
if len(lst) > 1 and element != lst[-1]:
result += ':'
else:
result = lst
return result
def InstallWithRPATH(env, dest, files, rpath):
obj = env.Install(dest, files)
if env['PLATFORM'] == 'posix':
rpathstr = list_to_path(rpath)
if env.WhereIs('patchelf'):
if env.subst('$ADD_INSTALL_RPATH') and rpathstr:
patch_cmd = "patchelf --force-rpath --set-rpath '"+rpathstr+"' "
else:
patch_cmd = "patchelf --remove-rpath "
for i in obj:
env.AddPostAction(i, env.OSCommand(patch_cmd+str(i)))
elif env.WhereIs('chrpath') \
and not (env.subst('$ADD_INSTALL_RPATH') and rpathstr):
# chrpath can only reliably delete RPATH
for i in obj:
env.AddPostAction(i, env.OSCommand('chrpath -d '+str(i)))
else:
print('WARNING: patchelf not found, cannot set RPATH')
elif env['PLATFORM'] == 'darwin':
for i in obj:
env.AddPostAction(i, env.OSCommand("site_scons/clear_macos_rpath.sh "+str(i)))
if env.subst('$ADD_INSTALL_RPATH') and rpath:
tool_cmd = "install_name_tool"
# rpath could contain empty strings or be ['']
add_to_cmd = ''
for rp in rpath:
if rp:
add_to_cmd += " -add_rpath "+str(rp)
if add_to_cmd:
tool_cmd += add_to_cmd+" "+str(i)
env.AddPostAction(i, env.OSCommand(tool_cmd))
return obj
def create_uninstall_target(env, path, is_glob = False):
if is_glob:
all_files = env.Glob(path,strings=True)
for filei in all_files:
print('Delete(%s)' % filei)
env.Alias("uninstall", os.remove(filei))
else:
print('Delete(%s)' % path)
if os.path.exists(path):
env.Alias("uninstall", os.remove(path))
import SCons.Script
import re
def build_library(env, sotarget, src, extrahdrs = [], extradicthdrs = [],
dictname = None, useenv = True, versioned = False,
install_rpath = []):
"""
Build shared library lib<sotarget> of ROOT classes from given sources "src"
(space separated string). For each .cxx source file, a .h header file is
expected.
A ROOT dictionary is generated from the headers and compiled into the library.
The otional "extradicthdrs" headers will be added to the list of headers used
for generating the dictionary.
If the dictionary source file should have a different name than <sotarget>Dict.cxx,
specify that name as "dictname". The dictionary will expect a link definition file
<dictname>_LinkDef.h.
All sources and corresponding headers and the "extradicthdrs" will be installed
in the source and include file installation locations. If any additional headers
should be installed, specify them in "extrahdrs". "extrahdrs" will not passed to
the ROOT dictionary generator, which is useful e.g. for non-ROOT class standalone
headers such as local utility classes or global definitions.
If "useenv" is True, the library will link against libraries in $LIB and search for
libraires in $LIBPATH. Otherwise, no external libraries will be linked.
Other environment variables (compiler flags, include directives, RPATH etc.)
are not affected by this flag
"install_rpath" is a list of directories that will be set on the installed library
if RPATH installation is globally enabled with $ADD_INSTALL_RPATH. Literal "$" signs
in any of these list elements (e.g. $ORIGIN) need to be given as "$$" (e.g. $$ORIGIN).
"""
# Current location relative to top directory
thisdir_fullpath = env.Dir('.').path
thisdir = os.path.basename(os.path.normpath(thisdir_fullpath))
if not dictname:
dictname = sotarget
if useenv:
linklibs = env['LIBS']
linklibpath = env['LIBPATH']
else:
linklibs = ['']
linklibpath = ['']
# Sources and headers
srclist = env.Split(src)
hdr = re.sub(r'\.cxx','.h',src)
installhdrs = env.Split(hdr)
installhdrs.extend(extradicthdrs)
installhdrs.extend(extrahdrs)
dicthdrs = env.Split(hdr)
dicthdrs.extend(extradicthdrs)
dicthdrs.append(dictname+'_LinkDef.h')
# ROOT dictionary for this library
rootdict = dictname+'Dict.cxx'
libbase = env.subst('$SHLIBPREFIX')+sotarget
thedict = env.RootCint(rootdict, dicthdrs, PCMNAME=libbase)
# Versioned shared library symlink names
libname_so = libbase+env.subst('$SHLIBSUFFIX')
if env['PLATFORM'] == 'posix':
# Linux
if versioned:
libname_soname = libname_so+'.'+env.subst('$SOVERSION')
libname_versioned = libname_so+'.'+env.subst('$VERSION')
shlibsuffix = env.subst('$SHLIBSUFFIX')+'.'+env.subst('$VERSION')
else:
libname_soname = libname_so
shlibsuffix = env.subst('$SHLIBSUFFIX')
shlinkflags = ['-Wl,-soname='+libname_soname]
elif env['PLATFORM'] == 'darwin':
# macOS
if versioned:
libname_soname = libbase+'.'+env.subst('$SOVERSION')+env.subst('$SHLIBSUFFIX')
libname_versioned = libbase+'.'+env.subst('$VERSION')+env.subst('$SHLIBSUFFIX')
shlibsuffix = '.'+env.subst('$VERSION')+env.subst('$SHLIBSUFFIX')
else:
libname_soname = libname_so
shlibsuffix = env.subst('$SHLIBSUFFIX')
shlinkflags = ['-Wl,-install_name,'+'@rpath/'+libname_soname]
if versioned:
shlinkflags.append(['-Wl,-compatibility_version,'+env.subst('$SOVERSION'),
'-Wl,-current_version,'+env.subst('$VERSION')])
try:
for rp in env['RPATH']:
shlinkflags.append('-Wl,-rpath,'+rp)
except KeyError:
pass
else:
print('build_library: Error: unsupported platform')
Exit(3)
# Build the library
thislib = env.SharedLibrary(target = sotarget,
source = srclist+[rootdict],
LIBS = linklibs, LIBPATH = linklibpath,
SHLIBSUFFIX = shlibsuffix,
SONAME = libname_soname,
SHLINKFLAGS = env['SHLINKFLAGS']+shlinkflags)
if versioned:
# Create symlinks
env.SymLink(libname_soname,thislib)
env.SymLink(libname_so,libname_soname)
# Installation
install_prefix = env.subst('$INSTALLDIR')
lib_dir = os.path.join(install_prefix,env.subst('$LIBSUBDIR'))
#bin_dir = os.path.join(install_prefix,'bin')
inc_dir = os.path.join(install_prefix,'include')
src_dir = os.path.join(install_prefix,'src',thisdir)
InstallWithRPATH(env,lib_dir,thislib,install_rpath)
# Install PCM file generated by RootCint, if any
if len(thedict) > 1:
env.Install(lib_dir,thedict[1])
env.Install(inc_dir,installhdrs)
env.Install(src_dir,srclist)
libname_so_installpath = os.path.join(lib_dir,libname_so)
if versioned:
libname_soname_installpath = os.path.join(lib_dir,libname_soname)
libname_versioned_installpath = os.path.join(lib_dir,libname_versioned)
#Kludge for SCons's inability to install symlinks
env.SymLink(libname_soname_installpath,libname_versioned_installpath)
env.SymLink(libname_so_installpath,libname_soname_installpath)
if 'uninstall' in SCons.Script.COMMAND_LINE_TARGETS:
create_uninstall_target(env, libname_so_installpath)
create_uninstall_target(env, libname_soname_installpath)
return thislib
import sys
import subprocess
import platform
import time
def write_compiledata(env, compiledata):
if sys.version_info >= (2, 7):
try:
cmd = "git rev-parse HEAD 2>/dev/null"
gitrev = subprocess.check_output(cmd, shell=True).rstrip()
except:
gitrev = ''
try:
cmd = env.subst('$CXX') + " --version 2>/dev/null | head -1"
cxxver = subprocess.check_output(cmd, shell=True).rstrip()
except:
cxxver = ''
# subprocess gives us byte string literals in Python 3, but we'd like
# Unicode strings
if sys.version_info >= (3, 0):
gitrev = gitrev.decode()
cxxver = cxxver.decode()
else:
fnull = open(os.devnull, 'w')
try:
gitrev = subprocess.Popen(['git', 'rev-parse', 'HEAD', '2>dev/null'],
stdout=subprocess.PIPE, stderr=fnull).communicate()[0].rstrip()
except:
gitrev =''
try:
outp = subprocess.Popen([env.subst('$CXX'), '--version'],
stdout=subprocess.PIPE, stderr=fnull).communicate()[0]
lines = outp.splitlines()
cxxver = lines[0]
except:
cxxver = ''
f=open(compiledata,'w')
f.write('#ifndef ANALYZER_COMPILEDATA_H\n')
f.write('#define ANALYZER_COMPILEDATA_H\n')
f.write('\n')
f.write('#define HA_INCLUDEPATH "%s %s %s"\n' %
(env.subst('$HA_HallA'), env.subst('$HA_Podd'), env.subst('$HA_DC')))
f.write('#define HA_VERSION "%s"\n' % env.subst('$HA_VERSION'))
f.write('#define HA_DATE "%s"\n' % time.strftime("%b %d %Y"))
f.write('#define HA_DATETIME "%s"\n' % time.strftime("%a %b %d %Y"))
#f.write('#define HA_DATETIME "%s"\n' % time.strftime("%a %b %d %H:%M:%S %Z %Y"))
f.write('#define HA_PLATFORM "%s"\n' % platform.platform())
f.write('#define HA_BUILDNODE "%s"\n' % platform.node())
f.write('#define HA_BUILDDIR "%s"\n' % os.getcwd())
try:
builduser = env['ENV']['LOGNAME']
except:
builduser = ''
f.write('#define HA_BUILDUSER "%s"\n' % builduser)
f.write('#define HA_GITREV "%s"\n' % gitrev[:7])
f.write('#define HA_CXXVERS "%s"\n' % cxxver)
f.write('#define HA_ROOTVERS "%s"\n' % env.subst('$ROOTVERS'))
f.write('#define ANALYZER_VERSION_CODE %s\n' % env.subst('$VERCODE'))
f.write('#define ANALYZER_VERSION(a,b,c) (((a) << 16) + ((b) << 8) + (c))\n')
f.write('\n')
f.write('#endif\n')
f.close()
|
import os
import math
import torch
import argparse
import numpy as np
from tqdm import tqdm
from medpy import metric
import torch.nn.functional as F
from Configs.config import config
from Model.Vnet import VNet as Vnet
from cc3d import connected_components
from Dataloader.dataset import LAHeartDataset
"""
# https://github.com/kleinzcy/SASSnet/blob/master/code/test_util.py
def getLargestCC(segmentation):
# from skimage.measure import label as sm_label
labels = sm_label(segmentation)
assert (labels.max() != 0) # assume at least 1 CC
largestCC = labels == np.argmax(np.bincount(labels.flat)[1:]) + 1
return largestCC
"""
def cct(pseudo_label):
labels_out, N = connected_components(pseudo_label, connectivity=26, return_N=True)
for segid in range(1, N + 1):
extracted_image = labels_out * (labels_out == segid)
if extracted_image.sum() < 8000:
pseudo_label[labels_out == segid] = 0
return pseudo_label
def test_all_case(net, val_set, num_classes, patch_size=(112, 112, 80), stride_xy=18, stride_z=4,
post_process=False, visual=False):
total_metric = 0.0
assert val_set.aug is False, ">> no augmentation for test set"
dataloader = iter(val_set)
tbar = range(len(val_set))
tbar = tqdm(tbar, ncols=135)
for (idx, _) in enumerate(tbar):
image, label = next(dataloader)
prediction, score_map = test_single_case(net, image, stride_xy, stride_z, patch_size,
num_classes=num_classes,
post_process=post_process)
if np.sum(prediction) == 0:
single_metric = (0, 0, 0, 0)
else:
single_metric = calculate_metric_percase(np.array(prediction),
np.array(label[:]))
total_metric += np.asarray(single_metric)
if visual:
# import nibabel as nib
# struggle for where to save; modify it if you need.
raise NotImplementedError
avg_metric = total_metric / len(val_set)
print("|dice={:.4f}|mIoU={:.4f}|95HD={:.4f}|ASD={:.4f}|".format(avg_metric[0], avg_metric[1],
avg_metric[3], avg_metric[2]))
return avg_metric
def test_single_case(net, image, stride_xy, stride_z, patch_size, num_classes=1,
post_process=False):
image = image.squeeze()
w, h, d = image.shape
# if the size of image is less than patch_size, then padding it
add_pad = False
if w < patch_size[0]:
w_pad = patch_size[0] - w
add_pad = True
else:
w_pad = 0
if h < patch_size[1]:
h_pad = patch_size[1] - h
add_pad = True
else:
h_pad = 0
if d < patch_size[2]:
d_pad = patch_size[2] - d
add_pad = True
else:
d_pad = 0
wl_pad, wr_pad = w_pad // 2, w_pad - w_pad // 2
hl_pad, hr_pad = h_pad // 2, h_pad - h_pad // 2
dl_pad, dr_pad = d_pad // 2, d_pad - d_pad // 2
if add_pad:
image = np.pad(image, [(wl_pad, wr_pad), (hl_pad, hr_pad), (dl_pad, dr_pad)], mode='constant',
constant_values=0)
ww, hh, dd = image.shape
sx = math.ceil((ww - patch_size[0]) / stride_xy) + 1
sy = math.ceil((hh - patch_size[1]) / stride_xy) + 1
sz = math.ceil((dd - patch_size[2]) / stride_z) + 1
score_map = np.zeros((num_classes,) + image.shape).astype(np.float32)
cnt = np.zeros(image.shape).astype(np.float32)
for x in range(0, sx):
xs = min(stride_xy * x, ww - patch_size[0])
for y in range(0, sy):
ys = min(stride_xy * y, hh - patch_size[1])
for z in range(0, sz):
zs = min(stride_z * z, dd - patch_size[2])
test_patch = image[xs:xs + patch_size[0], ys:ys + patch_size[1], zs:zs + patch_size[2]]
test_patch = np.expand_dims(np.expand_dims(test_patch, axis=0), axis=0).astype(np.float32)
test_patch = torch.from_numpy(test_patch).cuda(non_blocking=True)
y1, _ = net(test_patch)
y = F.softmax(y1, dim=1)
y = y.cpu().data.numpy()
y = y[0, :, :, :, :]
score_map[:, xs:xs + patch_size[0], ys:ys + patch_size[1], zs:zs + patch_size[2]] \
= score_map[:, xs:xs + patch_size[0], ys:ys + patch_size[1], zs:zs + patch_size[2]] + y
cnt[xs:xs + patch_size[0], ys:ys + patch_size[1], zs:zs + patch_size[2]] \
= cnt[xs:xs + patch_size[0], ys:ys + patch_size[1], zs:zs + patch_size[2]] + 1
score_map = score_map / np.expand_dims(cnt, axis=0)
label_map = np.argmax(score_map, axis=0)
if post_process:
label_map = cct(label_map)
# label_map = getLargestCC(label_map) feel free to change the post-process approach
if add_pad:
label_map = label_map[wl_pad:wl_pad + w, hl_pad:hl_pad + h, dl_pad:dl_pad + d]
score_map = score_map[:, wl_pad:wl_pad + w, hl_pad:hl_pad + h, dl_pad:dl_pad + d]
return label_map, score_map
def calculate_metric_percase(pred, gt):
dice = metric.binary.dc(pred, gt)
jc = metric.binary.jc(pred, gt)
hd = metric.binary.hd95(pred, gt)
asd = metric.binary.asd(pred, gt)
return dice, jc, hd, asd
def test_calculate_metric(ckpt_path, vis=False, post=False):
net = Vnet(n_channels=1, n_classes=2,
normalization='batchnorm', has_dropout=True).cuda()
net.load_state_dict(torch.load(ckpt_path))
net.eval()
val_dataset = LAHeartDataset(os.path.join(config.code_path, "Dataloader"),
config.data_path,
split="eval", config=config)
# follows the previous works' setting
avg_metric = test_all_case(net, val_dataset, num_classes=2,
patch_size=(112, 112, 80), stride_xy=18, stride_z=4,
post_process=post, visual=vis)
return avg_metric
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='Medical Semi-supervised Semantic Segmentation (valid)')
parser.add_argument("--env_name", default="traCoCo(8-label,spatial_weight(kl)=0.3,hyp=0.1,iters=9000)",
type=str, help="your environment folder name for training")
parser.add_argument("--visual", action="store_true",
help="your environment folder name for training")
parser.add_argument("--post", action="store_true",
help="implement post process or not")
cmd_line = parser.parse_args()
default_path = os.path.join(config.code_path, "saved", cmd_line.env_name)
ckpt = os.listdir(default_path)
ckpt = [i for i in ckpt if ".pth" in str(i)][0]
print("validate {} for LA dataset ...".format(str(ckpt)))
metric = test_calculate_metric(os.path.join(default_path, ckpt), vis=cmd_line.visual,
post=cmd_line.post)
|
# coding=utf-8
# *** WARNING: this file was generated by the Pulumi SDK Generator. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union
from ... import _utilities, _tables
from . import outputs
from ._enums import *
from ._inputs import *
__all__ = ['LoadBalancer']
class LoadBalancer(pulumi.CustomResource):
def __init__(__self__,
resource_name: str,
opts: Optional[pulumi.ResourceOptions] = None,
backend_address_pools: Optional[pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['BackendAddressPoolArgs']]]]] = None,
etag: Optional[pulumi.Input[str]] = None,
frontend_ip_configurations: Optional[pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['FrontendIPConfigurationArgs']]]]] = None,
id: Optional[pulumi.Input[str]] = None,
inbound_nat_pools: Optional[pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['InboundNatPoolArgs']]]]] = None,
inbound_nat_rules: Optional[pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['InboundNatRuleArgs']]]]] = None,
load_balancer_name: Optional[pulumi.Input[str]] = None,
load_balancing_rules: Optional[pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['LoadBalancingRuleArgs']]]]] = None,
location: Optional[pulumi.Input[str]] = None,
outbound_nat_rules: Optional[pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['OutboundNatRuleArgs']]]]] = None,
probes: Optional[pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['ProbeArgs']]]]] = None,
provisioning_state: Optional[pulumi.Input[str]] = None,
resource_group_name: Optional[pulumi.Input[str]] = None,
resource_guid: Optional[pulumi.Input[str]] = None,
tags: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None,
__props__=None,
__name__=None,
__opts__=None):
"""
LoadBalancer resource
:param str resource_name: The name of the resource.
:param pulumi.ResourceOptions opts: Options for the resource.
:param pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['BackendAddressPoolArgs']]]] backend_address_pools: Collection of backend address pools used by a load balancer
:param pulumi.Input[str] etag: A unique read-only string that changes whenever the resource is updated.
:param pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['FrontendIPConfigurationArgs']]]] frontend_ip_configurations: Object representing the frontend IPs to be used for the load balancer
:param pulumi.Input[str] id: Resource ID.
:param pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['InboundNatPoolArgs']]]] inbound_nat_pools: Defines an external port range for inbound NAT to a single backend port on NICs associated with a load balancer. Inbound NAT rules are created automatically for each NIC associated with the Load Balancer using an external port from this range. Defining an Inbound NAT pool on your Load Balancer is mutually exclusive with defining inbound Nat rules. Inbound NAT pools are referenced from virtual machine scale sets. NICs that are associated with individual virtual machines cannot reference an inbound NAT pool. They have to reference individual inbound NAT rules.
:param pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['InboundNatRuleArgs']]]] inbound_nat_rules: Collection of inbound NAT Rules used by a load balancer. Defining inbound NAT rules on your load balancer is mutually exclusive with defining an inbound NAT pool. Inbound NAT pools are referenced from virtual machine scale sets. NICs that are associated with individual virtual machines cannot reference an Inbound NAT pool. They have to reference individual inbound NAT rules.
:param pulumi.Input[str] load_balancer_name: The name of the load balancer.
:param pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['LoadBalancingRuleArgs']]]] load_balancing_rules: Object collection representing the load balancing rules Gets the provisioning
:param pulumi.Input[str] location: Resource location.
:param pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['OutboundNatRuleArgs']]]] outbound_nat_rules: The outbound NAT rules.
:param pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['ProbeArgs']]]] probes: Collection of probe objects used in the load balancer
:param pulumi.Input[str] provisioning_state: Gets the provisioning state of the PublicIP resource. Possible values are: 'Updating', 'Deleting', and 'Failed'.
:param pulumi.Input[str] resource_group_name: The name of the resource group.
:param pulumi.Input[str] resource_guid: The resource GUID property of the load balancer resource.
:param pulumi.Input[Mapping[str, pulumi.Input[str]]] tags: Resource tags.
"""
if __name__ is not None:
warnings.warn("explicit use of __name__ is deprecated", DeprecationWarning)
resource_name = __name__
if __opts__ is not None:
warnings.warn("explicit use of __opts__ is deprecated, use 'opts' instead", DeprecationWarning)
opts = __opts__
if opts is None:
opts = pulumi.ResourceOptions()
if not isinstance(opts, pulumi.ResourceOptions):
raise TypeError('Expected resource options to be a ResourceOptions instance')
if opts.version is None:
opts.version = _utilities.get_version()
if opts.id is None:
if __props__ is not None:
raise TypeError('__props__ is only valid when passed in combination with a valid opts.id to get an existing resource')
__props__ = dict()
__props__['backend_address_pools'] = backend_address_pools
__props__['etag'] = etag
__props__['frontend_ip_configurations'] = frontend_ip_configurations
__props__['id'] = id
__props__['inbound_nat_pools'] = inbound_nat_pools
__props__['inbound_nat_rules'] = inbound_nat_rules
__props__['load_balancer_name'] = load_balancer_name
__props__['load_balancing_rules'] = load_balancing_rules
__props__['location'] = location
__props__['outbound_nat_rules'] = outbound_nat_rules
__props__['probes'] = probes
__props__['provisioning_state'] = provisioning_state
if resource_group_name is None and not opts.urn:
raise TypeError("Missing required property 'resource_group_name'")
__props__['resource_group_name'] = resource_group_name
__props__['resource_guid'] = resource_guid
__props__['tags'] = tags
__props__['name'] = None
__props__['type'] = None
alias_opts = pulumi.ResourceOptions(aliases=[pulumi.Alias(type_="azure-nextgen:network:LoadBalancer"), pulumi.Alias(type_="azure-nextgen:network/latest:LoadBalancer"), pulumi.Alias(type_="azure-nextgen:network/v20150501preview:LoadBalancer"), pulumi.Alias(type_="azure-nextgen:network/v20150615:LoadBalancer"), pulumi.Alias(type_="azure-nextgen:network/v20160330:LoadBalancer"), pulumi.Alias(type_="azure-nextgen:network/v20160601:LoadBalancer"), pulumi.Alias(type_="azure-nextgen:network/v20160901:LoadBalancer"), pulumi.Alias(type_="azure-nextgen:network/v20161201:LoadBalancer"), pulumi.Alias(type_="azure-nextgen:network/v20170601:LoadBalancer"), pulumi.Alias(type_="azure-nextgen:network/v20170801:LoadBalancer"), pulumi.Alias(type_="azure-nextgen:network/v20170901:LoadBalancer"), pulumi.Alias(type_="azure-nextgen:network/v20171001:LoadBalancer"), pulumi.Alias(type_="azure-nextgen:network/v20171101:LoadBalancer"), pulumi.Alias(type_="azure-nextgen:network/v20180101:LoadBalancer"), pulumi.Alias(type_="azure-nextgen:network/v20180201:LoadBalancer"), pulumi.Alias(type_="azure-nextgen:network/v20180401:LoadBalancer"), pulumi.Alias(type_="azure-nextgen:network/v20180601:LoadBalancer"), pulumi.Alias(type_="azure-nextgen:network/v20180701:LoadBalancer"), pulumi.Alias(type_="azure-nextgen:network/v20180801:LoadBalancer"), pulumi.Alias(type_="azure-nextgen:network/v20181001:LoadBalancer"), pulumi.Alias(type_="azure-nextgen:network/v20181101:LoadBalancer"), pulumi.Alias(type_="azure-nextgen:network/v20181201:LoadBalancer"), pulumi.Alias(type_="azure-nextgen:network/v20190201:LoadBalancer"), pulumi.Alias(type_="azure-nextgen:network/v20190401:LoadBalancer"), pulumi.Alias(type_="azure-nextgen:network/v20190601:LoadBalancer"), pulumi.Alias(type_="azure-nextgen:network/v20190701:LoadBalancer"), pulumi.Alias(type_="azure-nextgen:network/v20190801:LoadBalancer"), pulumi.Alias(type_="azure-nextgen:network/v20190901:LoadBalancer"), pulumi.Alias(type_="azure-nextgen:network/v20191101:LoadBalancer"), pulumi.Alias(type_="azure-nextgen:network/v20191201:LoadBalancer"), pulumi.Alias(type_="azure-nextgen:network/v20200301:LoadBalancer"), pulumi.Alias(type_="azure-nextgen:network/v20200401:LoadBalancer"), pulumi.Alias(type_="azure-nextgen:network/v20200501:LoadBalancer"), pulumi.Alias(type_="azure-nextgen:network/v20200601:LoadBalancer"), pulumi.Alias(type_="azure-nextgen:network/v20200701:LoadBalancer"), pulumi.Alias(type_="azure-nextgen:network/v20200801:LoadBalancer")])
opts = pulumi.ResourceOptions.merge(opts, alias_opts)
super(LoadBalancer, __self__).__init__(
'azure-nextgen:network/v20170301:LoadBalancer',
resource_name,
__props__,
opts)
@staticmethod
def get(resource_name: str,
id: pulumi.Input[str],
opts: Optional[pulumi.ResourceOptions] = None) -> 'LoadBalancer':
"""
Get an existing LoadBalancer resource's state with the given name, id, and optional extra
properties used to qualify the lookup.
:param str resource_name: The unique name of the resulting resource.
:param pulumi.Input[str] id: The unique provider ID of the resource to lookup.
:param pulumi.ResourceOptions opts: Options for the resource.
"""
opts = pulumi.ResourceOptions.merge(opts, pulumi.ResourceOptions(id=id))
__props__ = dict()
return LoadBalancer(resource_name, opts=opts, __props__=__props__)
@property
@pulumi.getter(name="backendAddressPools")
def backend_address_pools(self) -> pulumi.Output[Optional[Sequence['outputs.BackendAddressPoolResponse']]]:
"""
Collection of backend address pools used by a load balancer
"""
return pulumi.get(self, "backend_address_pools")
@property
@pulumi.getter
def etag(self) -> pulumi.Output[Optional[str]]:
"""
A unique read-only string that changes whenever the resource is updated.
"""
return pulumi.get(self, "etag")
@property
@pulumi.getter(name="frontendIPConfigurations")
def frontend_ip_configurations(self) -> pulumi.Output[Optional[Sequence['outputs.FrontendIPConfigurationResponse']]]:
"""
Object representing the frontend IPs to be used for the load balancer
"""
return pulumi.get(self, "frontend_ip_configurations")
@property
@pulumi.getter(name="inboundNatPools")
def inbound_nat_pools(self) -> pulumi.Output[Optional[Sequence['outputs.InboundNatPoolResponse']]]:
"""
Defines an external port range for inbound NAT to a single backend port on NICs associated with a load balancer. Inbound NAT rules are created automatically for each NIC associated with the Load Balancer using an external port from this range. Defining an Inbound NAT pool on your Load Balancer is mutually exclusive with defining inbound Nat rules. Inbound NAT pools are referenced from virtual machine scale sets. NICs that are associated with individual virtual machines cannot reference an inbound NAT pool. They have to reference individual inbound NAT rules.
"""
return pulumi.get(self, "inbound_nat_pools")
@property
@pulumi.getter(name="inboundNatRules")
def inbound_nat_rules(self) -> pulumi.Output[Optional[Sequence['outputs.InboundNatRuleResponse']]]:
"""
Collection of inbound NAT Rules used by a load balancer. Defining inbound NAT rules on your load balancer is mutually exclusive with defining an inbound NAT pool. Inbound NAT pools are referenced from virtual machine scale sets. NICs that are associated with individual virtual machines cannot reference an Inbound NAT pool. They have to reference individual inbound NAT rules.
"""
return pulumi.get(self, "inbound_nat_rules")
@property
@pulumi.getter(name="loadBalancingRules")
def load_balancing_rules(self) -> pulumi.Output[Optional[Sequence['outputs.LoadBalancingRuleResponse']]]:
"""
Object collection representing the load balancing rules Gets the provisioning
"""
return pulumi.get(self, "load_balancing_rules")
@property
@pulumi.getter
def location(self) -> pulumi.Output[Optional[str]]:
"""
Resource location.
"""
return pulumi.get(self, "location")
@property
@pulumi.getter
def name(self) -> pulumi.Output[str]:
"""
Resource name.
"""
return pulumi.get(self, "name")
@property
@pulumi.getter(name="outboundNatRules")
def outbound_nat_rules(self) -> pulumi.Output[Optional[Sequence['outputs.OutboundNatRuleResponse']]]:
"""
The outbound NAT rules.
"""
return pulumi.get(self, "outbound_nat_rules")
@property
@pulumi.getter
def probes(self) -> pulumi.Output[Optional[Sequence['outputs.ProbeResponse']]]:
"""
Collection of probe objects used in the load balancer
"""
return pulumi.get(self, "probes")
@property
@pulumi.getter(name="provisioningState")
def provisioning_state(self) -> pulumi.Output[Optional[str]]:
"""
Gets the provisioning state of the PublicIP resource. Possible values are: 'Updating', 'Deleting', and 'Failed'.
"""
return pulumi.get(self, "provisioning_state")
@property
@pulumi.getter(name="resourceGuid")
def resource_guid(self) -> pulumi.Output[Optional[str]]:
"""
The resource GUID property of the load balancer resource.
"""
return pulumi.get(self, "resource_guid")
@property
@pulumi.getter
def tags(self) -> pulumi.Output[Optional[Mapping[str, str]]]:
"""
Resource tags.
"""
return pulumi.get(self, "tags")
@property
@pulumi.getter
def type(self) -> pulumi.Output[str]:
"""
Resource type.
"""
return pulumi.get(self, "type")
def translate_output_property(self, prop):
return _tables.CAMEL_TO_SNAKE_CASE_TABLE.get(prop) or prop
def translate_input_property(self, prop):
return _tables.SNAKE_TO_CAMEL_CASE_TABLE.get(prop) or prop
|
import re
import urllib
import time
from django.shortcuts import render, redirect, get_object_or_404
from django import http
from django.db.utils import DatabaseError
from django.db import transaction
from django.db.models import Count
from django.conf import settings
from django.core.urlresolvers import reverse
from django.contrib.auth.decorators import login_required
from django.contrib import messages
from django.views.decorators.http import require_POST
from jsonview.decorators import json_view
from airmozilla.main.models import Event, Tag, Channel, get_profile_safely
from airmozilla.main.views import is_contributor
from airmozilla.base.utils import paginator
from airmozilla.main.utils import get_event_channels
from . import forms
from . import utils
from .models import LoggedSearch, SavedSearch
from .split_search import split_search
@transaction.atomic
def home(request):
context = {
'q': None,
'events_found': None,
'search_error': None,
'tags': None,
'possible_tags': None,
'channels': None,
'possible_channels': None,
'found_channels': [],
'found_channels_count': 0,
}
if request.GET.get('q'):
form = forms.SearchForm(request.GET)
else:
form = forms.SearchForm()
if request.GET.get('q') and form.is_valid():
context['q'] = form.cleaned_data['q']
privacy_filter = {}
privacy_exclude = {}
qs = Event.objects.scheduled_or_processing()
if request.user.is_active:
if is_contributor(request.user):
privacy_exclude = {'privacy': Event.PRIVACY_COMPANY}
else:
# privacy_filter = {'privacy': Event.PRIVACY_PUBLIC}
privacy_exclude = {'privacy': Event.PRIVACY_COMPANY}
qs = qs.approved()
extra = {}
rest, params = split_search(context['q'], ('tag', 'channel'))
if params.get('tag'):
tags = Tag.objects.filter(name__iexact=params['tag'])
if tags:
context['q'] = rest
context['tags'] = extra['tags'] = tags
else:
# is the search term possibly a tag?
all_tag_names = Tag.objects.all().values_list('name', flat=True)
tags_regex = re.compile(
r'\b(%s)\b' %
('|'.join(re.escape(x) for x in all_tag_names),),
re.I
)
# next we need to turn all of these into a Tag QuerySet
# because we can't do `filter(name__in=tags_regex.findall(...))`
# because that case sensitive.
tag_ids = []
for match in tags_regex.findall(rest):
tag_ids.extend(
Tag.objects.filter(name__iexact=match)
.values_list('id', flat=True)
)
possible_tags = Tag.objects.filter(
id__in=tag_ids
)
for tag in possible_tags:
regex = re.compile(re.escape(tag.name), re.I)
tag._query_string = regex.sub(
'',
context['q'],
)
tag._query_string += ' tag: %s' % tag.name
# reduce all excess whitespace into 1
tag._query_string = re.sub(
'\s\s+',
' ',
tag._query_string
)
tag._query_string = tag._query_string.strip()
context['possible_tags'] = possible_tags
if params.get('channel'):
channels = Channel.objects.filter(name__iexact=params['channel'])
if channels:
context['q'] = rest
context['channels'] = extra['channels'] = channels
else:
# is the search term possibly a channel?
all_channel_names = (
Channel.objects.all().values_list('name', flat=True)
)
channels_regex = re.compile(
r'\b(%s)\b' %
('|'.join(re.escape(x) for x in all_channel_names),),
re.I
)
channel_ids = []
for match in channels_regex.findall(rest):
channel_ids.extend(
Channel.objects
.filter(name__iexact=match).values_list('id', flat=True)
)
possible_channels = Channel.objects.filter(
id__in=channel_ids
)
for channel in possible_channels:
regex = re.compile(re.escape(channel.name), re.I)
channel._query_string = regex.sub(
'',
context['q'],
)
channel._query_string += ' channel: %s' % channel.name
# reduce all excess whitespace into 1
channel._query_string = re.sub(
'\s\s+',
' ',
channel._query_string
)
channel._query_string = channel._query_string.strip()
context['possible_channels'] = possible_channels
events = _search(
qs,
context['q'],
privacy_filter=privacy_filter,
privacy_exclude=privacy_exclude,
sort=request.GET.get('sort'),
**extra
)
if not events.count() and utils.possible_to_or_query(context['q']):
events = _search(
qs,
context['q'],
privacy_filter=privacy_filter,
privacy_exclude=privacy_exclude,
sort=request.GET.get('sort'),
fuzzy=True
)
found_channels = _find_channels(context['q'])
context['found_channels'] = found_channels # it's a list
context['found_channels_count'] = len(found_channels)
elif request.GET.get('ss'):
savedsearch = get_object_or_404(
SavedSearch,
id=request.GET.get('ss')
)
context['savedsearch'] = savedsearch
events = savedsearch.get_events()
# But if you're just browsing we want to make sure you don't
# see anything you're not supposed to see.
if request.user.is_active:
if is_contributor(request.user):
events = events.exclude(privacy=Event.PRIVACY_COMPANY)
else:
events = events.filter(privacy=Event.PRIVACY_PUBLIC)
# It's not obvious how to sort these. They all match the saved
# search.
# Let's keep it simple and sort by start time for now
events = events.order_by('-start_time')
else:
events = None
if events is not None:
try:
page = int(request.GET.get('page', 1))
if page < 1:
raise ValueError
except ValueError:
return http.HttpResponseBadRequest('Invalid page')
# we use the paginator() function to get the Paginator
# instance so we can avoid calling `events.count()` for the
# header of the page where it says "XX events found"
try:
with transaction.atomic():
pager, events_paged = paginator(events, page, 10)
_database_error_happened = False
except DatabaseError:
_database_error_happened = True
# don't feed the trolls, just return nothing found
pager, events_paged = paginator(Event.objects.none(), 1, 10)
next_page_url = prev_page_url = None
def url_maker(page):
querystring = {'page': page}
if context.get('savedsearch'):
querystring['ss'] = context['savedsearch'].id
else:
querystring['q'] = context['q'].encode('utf-8')
querystring = urllib.urlencode(querystring)
return '%s?%s' % (reverse('search:home'), querystring)
if events_paged.has_next():
next_page_url = url_maker(events_paged.next_page_number())
if events_paged.has_previous():
prev_page_url = url_maker(events_paged.previous_page_number())
context['events_paged'] = events_paged
context['next_page_url'] = next_page_url
context['prev_page_url'] = prev_page_url
context['events_found'] = pager.count
context['channels'] = get_event_channels(events_paged)
log_searches = settings.LOG_SEARCHES and '_nolog' not in request.GET
if (
log_searches and
not _database_error_happened and
request.GET.get('q', '').strip()
):
logged_search = LoggedSearch.objects.create(
term=request.GET['q'][:200],
results=events.count(),
page=page,
user=request.user.is_authenticated() and request.user or None
)
request.session['logged_search'] = (
logged_search.pk,
time.time()
)
elif request.GET.get('q'):
context['search_error'] = form.errors['q']
else:
context['events'] = []
context['form'] = form
return render(request, 'search/home.html', context)
def _find_channels(q):
search_escaped = utils.make_or_query(q)
sql = """
to_tsvector('english', name) @@ plainto_tsquery('english', %s)
OR
slug ILIKE %s
"""
channels_qs = Channel.objects.all().extra(
where=[sql],
params=[
search_escaped,
search_escaped,
],
select={
'name_highlit': (
"ts_headline('english', name, "
"plainto_tsquery('english', %s))"
),
'rank_name': (
"ts_rank_cd(to_tsvector('english', name), "
"plainto_tsquery('english', %s))"
),
},
select_params=[
search_escaped,
search_escaped,
]
)
# make a dict of parental counts
subchannel_counts = {}
qs = (
Channel.objects
.filter(parent__isnull=False)
.values('parent_id')
.order_by() # necessary because the model has a default ordering
.annotate(Count('parent'))
)
for each in qs:
subchannel_counts[each['parent_id']] = each['parent__count']
# make a dict of events counts by channel
event_counts = {}
qs = (
Event.channels.through.objects.filter(channel__in=channels_qs)
.values('channel_id')
.annotate(Count('channel'))
)
for each in qs:
event_counts[each['channel_id']] = each['channel__count']
channels = []
for channel in channels_qs[:5]:
channel._event_count = event_counts.get(channel.id, 0)
channel._subchannel_count = subchannel_counts.get(channel.id, 0)
channels.append(channel)
return channels
def _search(qs, q, **options):
# we only want to find upcoming or archived events
# some optional filtering
if 'tags' in options:
qs = qs.filter(tags__in=options['tags'])
if 'channels' in options:
qs = qs.filter(channels__in=options['channels'])
if options.get('privacy_filter'):
qs = qs.filter(**options['privacy_filter'])
elif options.get('privacy_exclude'):
qs = qs.exclude(**options['privacy_exclude'])
if q and options.get('fuzzy'):
sql = """
(
to_tsvector('english', title) @@ to_tsquery('english', %s)
OR
to_tsvector('english', description || ' ' || short_description)
@@ to_tsquery('english', %s)
OR
to_tsvector('english', transcript) @@ to_tsquery('english', %s)
)
"""
search_escaped = utils.make_or_query(q)
elif q:
sql = """
(
to_tsvector('english', title) @@ plainto_tsquery('english', %s)
OR
to_tsvector('english', description || ' ' || short_description)
@@ plainto_tsquery('english', %s)
OR
to_tsvector('english', transcript) @@ plainto_tsquery('english', %s)
)
"""
search_escaped = q
if q:
qs = qs.extra(
where=[sql],
params=[search_escaped, search_escaped, search_escaped],
select={
'title_highlit': (
"ts_headline('english', title, "
"plainto_tsquery('english', %s))"
),
'desc_highlit': (
"ts_headline('english', short_description, "
"plainto_tsquery('english', %s))"
),
'transcript_highlit': (
"ts_headline('english', transcript, "
"plainto_tsquery('english', %s))"
),
'rank_title': (
"ts_rank_cd(to_tsvector('english', title), "
"plainto_tsquery('english', %s))"
),
'rank_desc': (
"ts_rank_cd(to_tsvector('english', description "
"|| ' ' || short_description), "
"plainto_tsquery('english', %s))"
),
'rank_transcript': (
"ts_rank_cd(to_tsvector('english', transcript), "
"plainto_tsquery('english', %s))"
),
},
select_params=[
search_escaped,
search_escaped,
search_escaped,
search_escaped,
search_escaped,
search_escaped
],
)
qs = qs.order_by('-rank_title', '-start_time', '-rank_desc')
else:
qs = qs.order_by('-start_time')
return qs
@require_POST
@login_required
@transaction.atomic()
def savesearch(request):
q = request.POST.get('q', '').strip()
if not q:
return http.HttpResponseBadRequest('no q')
form = forms.SearchForm(request.POST)
if not form.is_valid():
return http.HttpResponseBadRequest(form.errors)
title = form.cleaned_data['q']
rest, params = split_search(title, ('tag', 'channel'))
tags = None
channels = None
if params.get('tag'):
tags = Tag.objects.filter(name__iexact=params['tag'])
if tags:
title = rest
if params.get('channel'):
channels = Channel.objects.filter(
name__iexact=params['channel']
)
if channels:
title = rest
filters = {}
if q:
filters['title'] = {
'include': title
}
if tags:
filters['tags'] = {
'include': [tag.id for tag in tags],
}
if channels:
filters['channels'] = {
'include': [channel.id for channel in channels],
}
for other in SavedSearch.objects.filter(user=request.user):
if other.filters == filters:
return redirect('search:savedsearch', id=other.id)
savedsearch = SavedSearch.objects.create(
user=request.user,
filters=filters,
)
messages.success(
request,
'Search saved'
)
return redirect('search:savedsearch', id=savedsearch.id)
@login_required
@transaction.atomic()
def savedsearch(request, id=None):
savedsearch = get_object_or_404(SavedSearch, id=id)
if request.method == 'POST':
forked = False
if savedsearch.user != request.user:
# fork the saved search
forked = True
savedsearch = SavedSearch.objects.create(
user=request.user,
name=savedsearch.name,
filters=savedsearch.filters,
)
form = forms.SavedSearchForm(request.POST)
if form.is_valid():
data = form.export_filters()
savedsearch.name = form.cleaned_data['name']
savedsearch.filters = data
savedsearch.save()
if forked:
messages.success(
request,
'Saved Search forked and saved'
)
else:
messages.success(
request,
'Saved Search saved'
)
return redirect('search:savedsearch', id=savedsearch.id)
elif request.GET.get('sample'):
events = savedsearch.get_events()
return http.JsonResponse({'events': events.count()})
else:
data = forms.SavedSearchForm.convert_filters(
savedsearch.filters,
pks=True
)
data['name'] = savedsearch.name
form = forms.SavedSearchForm(data)
context = {
'savedsearch': savedsearch,
'form': form,
'use_findable': True,
}
return render(request, 'search/savesearch.html', context)
@login_required
@transaction.atomic()
def new_savedsearch(request):
if request.method == 'POST':
form = forms.SavedSearchForm(request.POST)
if form.is_valid():
data = form.export_filters()
SavedSearch.objects.create(
user=request.user,
filters=data,
name=form.cleaned_data['name'],
)
messages.success(
request,
'Saved Search saved'
)
return redirect('search:savedsearches')
else:
form = forms.SavedSearchForm()
context = {
'form': form,
'use_findable': False,
}
return render(request, 'search/savesearch.html', context)
@login_required
def savedsearches(request):
context = {}
return render(request, 'search/savedsearches.html', context)
@login_required
@json_view
def savedsearches_data(request):
context = {}
qs = SavedSearch.objects.filter(
user=request.user
).order_by('-created')
searches = []
for savedsearch in qs:
item = {
'id': savedsearch.id,
'name': savedsearch.name,
'summary': savedsearch.summary,
'modified': savedsearch.modified.isoformat(),
}
searches.append(item)
# We need a general Feed URL that is tailored to this user
from airmozilla.main.context_processors import base
feed = base(request)['get_feed_data']()
if request.user.is_active:
profile = get_profile_safely(request.user)
if profile and profile.contributor:
calendar_privacy = 'contributors'
else:
calendar_privacy = 'company'
else:
calendar_privacy = 'public'
context['savedsearches'] = searches
context['urls'] = {
'search:savedsearch': reverse('search:savedsearch', args=(0,)),
'search:home': reverse('search:home'),
'feed': feed['url'],
'ical': reverse('main:calendar_ical', args=(calendar_privacy,)),
}
return context
@login_required
@json_view
def delete_savedsearch(request, id):
savedsearch = get_object_or_404(SavedSearch, id=id)
if savedsearch.user != request.user:
return http.HttpResponseForbidden('Not yours to delete')
savedsearch.delete()
return {'ok': True}
|
import spacy
from textblob import TextBlob
import pandas as pd
# Import functions from other files
from tweet_handlers import pullTweetsFromCSV, tweetPulls
### Declare functions to standardize, identify, and analyze input text
# Will ultimately take in a list of tweets and return:
# - Word counts
# - Split of positive / negative aspects
# - Brand identification?
#visualizeText() is a funtion to diagram sentences for help troubleshooting
# Inputs:
# - nlp: an NLP object,
# - txt = a string containing the sentence to be diagramed,
# - writeFilename: a string containing the filename to write the HTML diagram to
# Returns:
# - writeFilename: the path of the file that contains the HTML diagram
def visualizeText(nlp, txt, writeFilename):
doc = nlp(txt)
html = spacy.displacy.render(doc, style='dep')
filePath = './' + writeFilename + '.html'
with open(filePath, 'w') as f:
f.write(html)
return filePath
#extractDescriptors() is a funtion to pull aspects and descriptors from a list of sentences
# Inputs:
# - nlp: an NLP object,
# - sentenceList: a list of strinsg containing the sentences to be analyzed
# Outputs:
# - list of dictionaries containing 'aspect' and 'description' -- not broken by tweet
def extractDescriptors(nlp, sentenceList):
#We'll ultimately return this aspects list
aspects = []
aspects_lemma = []
attributes = []
attributes_lemma = []
#We will iterate through the sentences
for i, aSentence in enumerate( sentenceList ):
if i % 100 == 0: print("Tweet# ", str(i))
doc = nlp(aSentence)
for token in doc:
###TODO:
# Currently there's no standardization that makes it a 1:1 Noun + Adjective, so that needs to be fixed
# Also need to add in a case that checks for pronoun resolution and sees what we can do about that
# We need to identify each noun, and find its descendants that are (pos_ == 'ADJ' or pos_ == 'VERB') and (dep_ == 'amod' or dep_ == 'acl')
# Modifying rule to examine ALL nouns, not just the subject of the sentence
#if token.dep_ == 'nsubj' and token.pos_ == 'NOUN':
if (token.pos_ == 'ADJ' or token.pos_ == 'VERB') and (token.dep_ == 'amod' or token.dep_ == 'acl'):
#Now append the things
aspects.append (token.head.text)
aspects_lemma.append(token.head.lemma_)
attributes.append( token.text )
attributes_lemma.append( token.lemma_ )
return ( aspects , attributes, aspects_lemma, attributes_lemma )
# Need a function that pulls attributes for each keyword in the tweet DF, since we need them to be kept separate
# extractTweetAttributes:
# Takes a DF of tweets, keywords, etc. and pulls out adjectives for each
# Inputs:
# - nlp: an NLP object,
# - tweet_df: pandas dataframe containing colums:
# - Tweet
# - Keyword
# - Spanish
# - Date
# Returns:
# - attribute_df: dataframe containing the list of...
# ...aspects & attributes for each keyword / spanish pair
def extractTweetAttributes(nlp, tweet_df):
#define return df
attribute_df = pd.DataFrame( columns = [
'Keyword',
'Spanish',
'aspect',
'attribute',
'aspect_lemma',
'attribute_lemma'
])
# Now create a set for the different keywords and spanish words
keySet = set( tweet_df['Keyword'] )
for aKey in keySet:
print("Extracting ", aKey)
spanishWord = tweet_df.loc[ tweet_df['Keyword'] == aKey ]['Spanish'].iloc[0]
# And this is where we actually add the various analyses
( aspectList , attributeList, aspectList_lemma, attributeList_lemma ) = extractDescriptors( nlp, tweet_df[ tweet_df['Keyword'] == aKey ]['tweet'] )
# Now that we've got the data, create lookup lists for the Keyword & Spanish words
keyList = [aKey] * len(aspectList)
spanishList = [spanishWord] * len(aspectList)
temp_df = pd.DataFrame({
'Keyword': keyList,
'Spanish': spanishList,
'aspect': aspectList,
'attribute': attributeList,
'aspect_lemma': aspectList_lemma,
'attribute_lemma': attributeList_lemma
})
# Finally, append the data for this keyword to the attribute dataframe
attribute_df = attribute_df.append( temp_df )
return attribute_df
def countAttributes( aspect_df ):
temp_df = pd.DataFrame({
'Keyword': aspect_df['Keyword'],
'Spanish': aspect_df['Spanish'],
'aspect': aspect_df['aspect_lemma'],
'attribute': aspect_df['attribute_lemma']
})
return temp_df.value_counts()
# In the main, this is where the tweet files are loaded...
# ...and routed through the analysis functions
if __name__ == "__main__":
print("In the main")
# Create the NLP object that will be used for all the text processing
#nlp = spacy.load("en_core_web_sm")
# We're actually using a spanish NLP object instead of an English one
nlp = spacy.load("es_core_news_sm")
# Pull in CSV files that hold all the tweets
tweetFileList = [
'./tweet_data/tweet_db_08.27.2021.csv'
]
# Create the DF of tweets from the CSV File
tweet_df = pullTweetsFromCSV( tweetFileList )#, fileEncoding='ANSI' )
# Instead of pulling tweets from a file, we're going to get new tweets
# First we need to designate a list of english + spanish keywords to search for
keyword_df = pd.read_csv('./keyword_list.csv')
#tweet_df = tweetPulls( keyword_df )
#Save the tweet-df because of errors
#tweet_df.to_csv('./tweet_data/tweet_db_08.27.2021.csv')#, encoding='ANSI')
# Run the tweets through the attribute extractor
aspect_df = extractTweetAttributes ( nlp, tweet_df)
# Run the aspects & attributes through a modified version of the wordcount function
count_df = countAttributes( aspect_df )
# - Not to mention run some sort of pronoun resolution
count_df.to_csv('./tweet_data/aspect_count_08.27.2021.csv')
|
# X10G Development code
# UDP Receive with Trailer Recognition
# Rob Halsall 23-11-2011
import sys, socket
#from msvcrt import *
if len(sys.argv) == 3:
ip = sys.argv[1]
port = int(sys.argv[2])
else:
print "use: python udp_rx_ll_mon_trl.py 192.168.9.2 61650"
exit(0)
sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
sock.setsockopt(socket.SOL_SOCKET, socket.SO_RCVBUF, 80388608)
print "Receiving on ",ip,":",port
sock.bind((ip, port))
print"Entering packet receive loop - press 's' to exit....\n"
pkt_count = 0
headings = "%8s %8s %8s %8s %23s %23s %3s" % ('Pkt No' , 'Length','FNUM','PNUM', 'First 8', 'Last 8', 'MKR')
#print headings
chr =""
pkt_num_nxt = 0
try:
while chr != "s":
#receive packet up to 8K Bytes
pkt = sock.recv(9000)
pkt_len = len(pkt)
pkt_top = pkt_len - 8
frm_num = (ord(pkt[pkt_top+3]) << 24) + (ord(pkt[pkt_top+2]) << 16) + (ord(pkt[pkt_top+1]) << 8) + ord(pkt[pkt_top+0])
pkt_num = (ord(pkt[pkt_top+7]) << 24) + (ord(pkt[pkt_top+6]) << 16) + (ord(pkt[pkt_top+5]) << 8) + ord(pkt[pkt_top+4])
frm_sof = (pkt_num & 0x80000000) >> 31
frm_eof = (pkt_num & 0x40000000) >> 30
pkt_num = pkt_num & 0x3FFFFFFF
pkt_num_str = "%08X " % (pkt_num)
frm_num_str = "%08X " % (frm_num)
pkt_str_tmp = "%08i %08i " % (pkt_count, pkt_len)
pkt_str = pkt_str_tmp + frm_num_str + pkt_num_str
if frm_sof == 1:
print headings
#print out first 4 bytes of packet
for i in range(8):
pkt_str_tmp = "%02X " % ord(pkt[i+8])
pkt_str = pkt_str + pkt_str_tmp
pkt_str = pkt_str + '- '
#print out last 4 bytes of packet
for i in range(8):
pkt_str_tmp = "%02X " % ord(pkt[i+pkt_top])
pkt_str = pkt_str + pkt_str_tmp
if frm_sof == 1:
pkt_str = pkt_str + "SOF "
if frm_eof == 1:
pkt_str = pkt_str + "EOF\n"
if pkt_num == pkt_num_nxt:
#pkt_num_nxt = pkt_num_nxt + 1
pkt_num_nxt = pkt_num + 1
else:
#pkt_num_nxt = pkt_num_nxt + 1
pkt_num_nxt = pkt_num + 1
pkt_str = pkt_str + "*"
print pkt_str
if frm_eof == 1:
pkt_count = 0
else:
pkt_count = pkt_count + 1
#chr = getch()
except:
print "closing socket after exception"
sock.close()
#finally:
print "closing socket at end"
sock.close()
|
from anndata import read_h5ad
import sys
from time import time
from scipy import stats, sparse
import numpy as np
import collections
import pickle
from sklearn.preprocessing import normalize
import os
from collections import Counter
import pandas as pd
from sklearn.model_selection import train_test_split
from sklearn.metrics import roc_auc_score,accuracy_score,precision_recall_fscore_support, cohen_kappa_score, auc, average_precision_score,f1_score,precision_recall_curve
import time
import umap
import copy
from sklearn import preprocessing
from fbpca import pca
from sklearn.metrics import roc_auc_score, roc_curve
from sklearn.metrics.pairwise import cosine_similarity
from scanorama import VERBOSE, KNN, ALPHA, APPROX, SIGMA
#from libs import *
from scanorama import find_alignments,merge_datasets,process_data,transform,vstack
from sklearn.utils.graph_shortest_path import graph_shortest_path
from scipy.sparse.linalg import svds, eigs
nn_nhidden = [1000]
rsts = [0.5,0.6,0.7,0.8]
dfs_depth = 1
co_dim = 5
keep_prob = 1.0
use_diagonal = True
max_iter = 20
niter = 5
def translate_paramter(ps):
s = []
for p in ps:
if isinstance(p, list):
p = [str(i) for i in p]
p = '.'.join(p)
s.append(p)
else:
s.append(str(p))
s = '_'.join(s)
return s
pname = translate_paramter([max_iter])
def make_folder(folder):
if not os.path.exists(folder):
os.makedirs(folder)
return folder
def create_propagate_networks(dname, l2i, onto_net, cls2cls, ontology_nlp_file, rsts = [0.5,0.6,0.7,0.8], diss=[2,3], thress=[1,0.8]):
ncls = np.shape(cls2cls)[0]
if dname != 'allen':
onto_net_nlp, onto_net_bin, stack_net_nlp, stack_net_bin, onto_net_nlp_all_pairs = create_nlp_networks(l2i, onto_net, cls2cls, ontology_nlp_file)
#network = create_consensus_networks(rsts, stack_net_nlp, onto_net_nlp_all_pairs, cls2cls)
network = create_consensus_networks(rsts, stack_net_nlp, onto_net_nlp_all_pairs, cls2cls, diss = diss, thress = thress)
else:
stack_net_bin = np.zeros((ncls,ncls))
for n1 in onto_net:
for n2 in onto_net[n1]:
if n1==n2:
continue
stack_net_bin[n1,n2] = 1
stack_net_bin[n2,n1] = 1
network = [RandomWalkRestart(stack_net_bin, rst) for rst in rsts]
return network
def fine_nearest_co_using_nlp(sentences,co2emb,obo_file,nlp_mapping_cutoff=0.8):
co2name, name2co = get_ontology_name(obo_file = obo_file)
from sentence_transformers import SentenceTransformer
model = SentenceTransformer('bert-base-nli-mean-tokens')
sentences = np.array([sentence.lower() for sentence in sentences])
sentence_embeddings = model.encode(sentences)
co_embeddings = []
cos = []
for co in co2emb:
co_embeddings.append(co2emb[co])
cos.append(co)
co_embeddings = np.array(co_embeddings)
sent2co = {}
for sentence, embedding, ind in zip(sentences, sentence_embeddings, range(len(sentences))):
scs = cosine_similarity(co_embeddings, embedding.reshape(1,-1))
co_id = np.argmax(scs)
sc = scs[co_id]
if sc>nlp_mapping_cutoff:
sent2co[sentence.lower()] = cos[co_id]
names = set()
for name in name2co:
if name2co[name].upper() == cos[co_id]:
names.add(name)
#print (sentence, cos[co_id], sc, co2name[cos[co_id]],names)
return sent2co
def ImputeUnseenCls(y_vec, y_raw, cls2cls, nseen, knn=1):
nclass = np.shape(cls2cls)[0]
seen2unseen_sim = cls2cls[:nseen, nseen:]
nngh = np.argsort(seen2unseen_sim*-1, axis = 0)[0,:]
ncell = len(y_vec)
y_mat = np.zeros((ncell, nclass))
y_mat[:,:nseen] = y_raw[:, :nseen]
for i in range(ncell):
if y_vec[i] == -1:
#kngh = np.argsort(y_raw[i,:nseen]*-1)[0:knn]
#if len(kngh) == 0:
# continue
y_mat[i,nseen:] = y_mat[i,nngh]
y_mat[i,:nseen] -= 1000000
return y_mat
def ImputeUnseenCls_Backup(y_vec, y_raw, cls2cls, nseen, knn=1):
nclass = np.shape(cls2cls)[0]
seen2unseen_sim = cls2cls[:nseen, nseen:]
ncell = len(y_vec)
y_mat = np.zeros((ncell, nclass))
y_mat[:,:nseen] = y_raw[:, :nseen]
for i in range(ncell):
if y_vec[i] == -1:
kngh = np.argsort(y_raw[i,:nseen]*-1)[0:knn]
if len(kngh) == 0:
continue
y_mat[i,:nseen] -= 1000000
y_mat[i,nseen:] = np.dot(y_raw[i,kngh], seen2unseen_sim[kngh,:])
return y_mat
def find_gene_ind(genes, common_genes):
gid = []
for g in common_genes:
gid.append(np.where(genes == g)[0][0])
gid = np.array(gid)
return gid
def RandomWalkOntology(onto_net, l2i, ontology_nlp_file, ontology_nlp_emb_file, rst = 0.7):
ncls = len(l2i)
onto_net_nlp, _, onto_nlp_emb = read_cell_ontology_nlp(l2i, ontology_nlp_file, ontology_nlp_emb_file)
onto_net_nlp = (cosine_similarity(onto_nlp_emb) + 1 ) /2#1 - spatial.distance.cosine(onto_nlp_emb, onto_nlp_emb)
onto_net_mat = np.zeros((ncls, ncls))
for n1 in onto_net:
for n2 in onto_net[n1]:
if n1==n2:
continue
onto_net_mat[n1,n2] = onto_net_nlp[n1, n2]
onto_net_mat[n2,n1] = onto_net_nlp[n2, n1]
onto_net_rwr = RandomWalkRestart(onto_net_mat, rst)
return onto_net_rwr
def process_expression(c2g_list):
#this data process function is motivated by ACTINN, please check ACTINN for more information.
c2g = np.vstack(c2g_list)
c2g = c2g.T
#print ('onclass d0',np.shape(c2g))
c2g = c2g[np.sum(c2g, axis=1)>0, :]
#print (c2g)
#print ('onclass d1',np.shape(c2g))
c2g = np.divide(c2g, np.sum(c2g, axis=0, keepdims=True)) * 10000
c2g = np.log2(c2g+1)
expr = np.sum(c2g, axis=1)
#total_set = total_set[np.logical_and(expr >= np.percentile(expr, 1), expr <= np.percentile(expr, 99)),]
c2g = c2g[np.logical_and(expr >= np.percentile(expr, 1), expr <= np.percentile(expr, 99)),]
#print (c2g)
#print ('onclass d2',np.shape(c2g))
cv = np.std(c2g, axis=1) / np.mean(c2g, axis=1)
c2g = c2g[np.logical_and(cv >= np.percentile(cv, 1), cv <= np.percentile(cv, 99)),]
#print (c2g)
#print ('onclass d3',np.shape(c2g))
c2g = c2g.T
#print (c2g)
#print ('onclass d4',np.shape(c2g))
c2g_list_new = []
index = 0
for c in c2g_list:
ncell = np.shape(c)[0]
c2g_list_new.append(c2g[index:index+ncell,:])
index = ncell
return c2g_list_new
def read_ontology_file(dname, data_folder):
if 'allen' in dname:
cell_type_network_file = data_folder + 'allen.ontology'
cell_type_nlp_emb_file = None
cl_obo_file = None
if not os.path.isfile(cell_type_network_file):
sys.error(cell_type_network_file + ' not found!')
else:
cell_type_network_file = data_folder + 'cl.ontology'
cell_type_nlp_emb_file = data_folder + 'cl.ontology.nlp.emb'
cl_obo_file = data_folder + 'cl.obo'
if not os.path.isfile(cell_type_nlp_emb_file):
sys.exit(cell_type_nlp_emb_file + ' not found!')
if not os.path.isfile(cell_type_network_file):
sys.exit(cell_type_network_file + ' not found!')
if not os.path.isfile(cl_obo_file):
sys.exit(cl_obo_file + ' not found!')
return cell_type_nlp_emb_file, cell_type_network_file, cl_obo_file
def read_data_file(dname, data_dir):
if 'microcebus' in dname:
tech = '10x'
feature_file = data_dir + 'Lemur/' + dname +'.h5ad'
filter_key={'method':tech }
label_file = None
gene_file = ''
label_key = 'cell_ontology_class'
elif 'muris' in dname:
tech = dname.split('_')[1]
feature_file = data_dir + 'Tabula_Muris_Senis/' + 'tabula-muris-senis-'+tech+'-official-raw-obj.h5ad'
filter_key = {}
label_file = None
gene_file = ''
batch_key = ''
label_key = 'cell_ontology_class'
elif 'sapiens' in dname:
feature_file = data_dir + 'sapiens/' + 'Pilot1_Pilot2_decontX_Oct2020.h5ad'
filter_key = {}
label_file = None
gene_file = ''
batch_key = ''
label_key = 'cell_ontology_type'
elif 'allen' in dname:
feature_file = data_dir + '/Allen_Brain/features.pkl'
label_file = data_dir + '/Allen_Brain/labels.pkl'
gene_file = data_dir + '/Allen_Brain/genes.pkl'
label_key = ''
filter_key = {}
elif 'krasnow' in dname:
tech = dname.split('_')[1]
feature_file = data_dir + '/HLCA/'+tech+'_features.pkl'
label_file = data_dir + '/HLCA/'+tech+'_labels.pkl'
gene_file = data_dir + '/HLCA/'+tech+'_genes.pkl'
label_key = ''
filter_key = {}
else:
sys.exit('wrong dname '+dname)
if feature_file.endswith('.pkl'):
return feature_file, filter_key, label_key, label_file, gene_file
elif feature_file.endswith('.h5ad'):
return feature_file, filter_key, label_key, label_file, gene_file
sys.exit('wrong file suffix')
def read_singlecell_data(dname, data_dir, ontology_dir, nsample = 500000000, read_tissue = False, exclude_non_leaf_ontology = True):
if 'microcebus' in dname:
tech = '10x'
#file = data_dir + 'TMS_official_060520/' + 'tabula-microcebus_smartseq2-10x_combined_annotated_filtered_gene-labels-correct.h5ad'
file = data_dir + 'TMS_official_060520/' + dname +'.h5ad'
filter_key={'method':tech }
batch_key = ''#original_channel
ontology_nlp_file = ontology_dir + '/cell_ontology/cl.ontology.nlp'
ontology_file = ontology_dir + '/cell_ontology/cl.ontology'
cl_obo_file = ontology_dir + '/cell_ontology/cl.obo'
if not read_tissue:
feature, label, genes = parse_h5ad(file, nsample = nsample, read_tissue = read_tissue, label_key='cell_ontology_class', batch_key = batch_key, filter_key = filter_key, cell_ontology_file = ontology_file, exclude_non_leaf_ontology = exclude_non_leaf_ontology, exclude_non_ontology = True, cl_obo_file = cl_obo_file)
else:
feature, label, genes, tissues = parse_h5ad(file, nsample = nsample, read_tissue = read_tissue, label_key='cell_ontology_class', batch_key = batch_key, filter_key = filter_key, cell_ontology_file = ontology_file, exclude_non_leaf_ontology = exclude_non_leaf_ontology, exclude_non_ontology = True, cl_obo_file = cl_obo_file)
elif 'muris' in dname:
tech = dname.split('_')[1]
file = data_dir + 'TMS_official_060520/' + 'tabula-muris-senis-'+tech+'-official-raw-obj.h5ad'
filter_key = {}
batch_key = ''
ontology_nlp_file = ontology_dir + '/cell_ontology/cl.ontology.nlp'
ontology_file = ontology_dir + '/cell_ontology/cl.ontology'
cl_obo_file = ontology_dir + '/cell_ontology/cl.obo'
if not read_tissue:
feature, label, genes = parse_h5ad(file, nsample = nsample, read_tissue = read_tissue, label_key='cell_ontology_class', batch_key = batch_key, cell_ontology_file = ontology_file, filter_key=filter_key, exclude_non_leaf_ontology = exclude_non_leaf_ontology, exclude_non_ontology = True, cl_obo_file = cl_obo_file)
else:
feature, label, genes, tissues = parse_h5ad(file, nsample = nsample, read_tissue = read_tissue, label_key='cell_ontology_class', batch_key = batch_key, cell_ontology_file = ontology_file, filter_key=filter_key, exclude_non_leaf_ontology = exclude_non_leaf_ontology, exclude_non_ontology = True, cl_obo_file = cl_obo_file)
elif 'allen_part' in dname:
feature_file = data_dir + 'Allen/matrix_part.csv'
label_file = data_dir + 'Allen/metadata.csv'
ontology_file = data_dir + 'Allen/cell_type_ontology'
ontology_nlp_file = None
feature, label, genes = parse_csv(feature_file, label_file, nsample = nsample, label_key='cell_type_accession_label', exclude_non_ontology = True, exclude_non_leaf_ontology = True, cell_ontology_file=ontology_file)
elif 'allen' in dname:
feature_file = data_dir + 'Allen/features.pkl'
label_file = data_dir + 'Allen/labels.pkl'
gene_file = data_dir + 'Allen/genes.pkl'
ontology_file = data_dir + 'Allen/cell_type_ontology'
ontology_nlp_file = None
feature, label, genes = parse_pkl(feature_file, label_file, gene_file, nsample = nsample, exclude_non_leaf_ontology = True, cell_ontology_file=ontology_file)
elif 'krasnow' in dname:
tech = dname.split('_')[1]
feature_file = data_dir + 'Krasnow/'+tech+'_features.pkl'
label_file = data_dir + 'Krasnow/'+tech+'_labels.pkl'
gene_file = data_dir + 'Krasnow/'+tech+'_genes.pkl'
ontology_file = ontology_dir + '/cell_ontology/cl.ontology'
ontology_nlp_file = ontology_dir + '/cell_ontology/cl.ontology.nlp'
cl_obo_file = ontology_dir + '/cell_ontology/cl.obo'
feature, label, genes = parse_pkl(feature_file, label_file, gene_file, nsample = nsample, exclude_non_leaf_ontology = True, cell_ontology_file=ontology_file)
else:
sys.exit('wrong dname '+dname)
if read_tissue:
return feature, label, genes, tissues, ontology_nlp_file, ontology_file
else:
return feature, label, genes, ontology_nlp_file, ontology_file
def parse_krasnow(feature_file, label_file, gene_file, seed = 1, nsample = 1000,exclude_non_leaf_ontology = True, exclude_non_ontology = True, cell_ontology_file=None):
np.random.seed(seed)
if feature_file.endswith('.pkl'):
features = pickle.load(open(feature_file, 'rb'))
labels = pickle.load(open(label_file, 'rb'))
genes = pickle.load(open(gene_file, 'rb'))
ncell, ngene = np.shape(features)
assert(ncell == len(labels))
assert(ngene == len(genes))
index = np.random.choice(ncell,min(nsample,ncell),replace=False)
features = features[index, :]
labels = labels[index]
if exclude_non_leaf_ontology:
new_ids, exclude_terms = exclude_parent_child_nodes(cell_ontology_file, labels)
#print (len(exclude_terms),'non leaf terms are excluded')
features = features[new_ids, :]
labels = labels[new_ids]
genes = [x.upper() for x in genes]
genes = np.array(genes)
return features, labels, genes
def parse_pkl(feature_file, label_file, gene_file, seed = 1, nsample = 10000000,exclude_non_leaf_ontology = True, cell_ontology_file=None):
np.random.seed(seed)
if feature_file.endswith('.pkl'):
features = pickle.load(open(feature_file, 'rb'))
labels = pickle.load(open(label_file, 'rb'))
genes = pickle.load(open(gene_file, 'rb'))
ncell, ngene = np.shape(features)
assert(ncell == len(labels))
assert(ngene == len(genes))
index = np.random.choice(ncell,ncell,replace=False)
features = features[index, :]
labels = labels[index]
if exclude_non_leaf_ontology:
new_ids, exclude_terms = exclude_parent_child_nodes(cell_ontology_file, labels)
#print (len(exclude_terms),'non leaf terms are excluded')
features = features[new_ids, :]
labels = labels[new_ids]
genes = [x.upper() for x in genes]
genes = np.array(genes)
return features, labels, genes
def select_high_var_genes(train_X, test_X, ngene = 200):
mat = np.vstack((train_X, test_X))
#mat = mat.todense()
gstd = np.std(mat, axis=0)
best_genes = np.argsort(gstd*-1)
best_genes = best_genes[:ngene]
return train_X[:, best_genes], test_X[:, best_genes]
def emb_cells(train_X, test_X, dim=20):
if dim==-1:
return np.log1p(train_X.todense()), np.log1p(test_X.todense())
train_X = np.log1p(train_X)
test_X = np.log1p(test_X)
train_X = preprocessing.normalize(train_X, axis=1)
test_X = preprocessing.normalize(test_X, axis=1)
ntrain = np.shape(train_X)[0]
mat = sparse.vstack((train_X, test_X))
U, s, Vt = pca(mat, k=dim) # Automatically centers.
X = U[:, range(dim)] * s[range(dim)]
return X[:ntrain,:], X[ntrain:,:]
def write_markers(fname, markers):
## Write marker genes to file
fmarker_genes = open(fname,'w')
for t in markers:
fmarker_genes.write(t+'\t')
g2pv = sorted(markers[t].items(), key=lambda item: item[1])
for g,pv in g2pv:
fmarker_genes.write(g+'(pv:'+'{:.2e}'.format(pv)+')\t')
fmarker_genes.write('\n')
fmarker_genes.close()
def calculate_markers(cell2term, cell2gene, genes, terms, topk_cells=500, only_over_expressed = True, return_k_genes = 100):
ncell, nterm = np.shape(cell2term)
ngene = np.shape(cell2gene)[1]
assert(ncell == np.shape(cell2gene)[0])
markers = collections.defaultdict(dict)
for t in range(nterm):
scs = np.argsort(cell2term[:,t])
k_bot_cells = scs[:topk_cells]
k_top_cells = scs[ncell-topk_cells:]
pv = scipy.stats.ttest_ind(cell2gene[k_top_cells,:], cell2gene[k_bot_cells,:], axis=0)[1] #* ngene
top_mean = np.mean(cell2gene[k_top_cells,:],axis=0)
bot_mean = np.mean(cell2gene[k_bot_cells,:],axis=0)
if only_over_expressed:
for g in range(ngene):
if top_mean[g] < bot_mean[g]:
pv[g] = 1.
pv_sort = list(np.argsort(pv))
#for i in range(return_k_genes):
#markers[terms[t]][genes[pv_sort[i]]] = pv[pv_sort[i]]
markers[terms[t]] = pv
for i,p in enumerate(pv):
if np.isnan(p):
pv[i] = 1.
#markers[terms[t]][str(pv_sort[i])] = pv[pv_sort[i]]
return markers
def peak_h5ad(file):
'''
peak the number of cells, classes, genes in h5ad file
'''
x = read_h5ad(file)
#print (np.shape(x.X))
#print (x.X[:10][:10])
#print (x.obs.keys())
ncell, ngene = np.shape(x.X)
nclass = len(np.unique(x.obs['free_annotation']))
#print (np.unique(x.obs['free_annotation']))
f2name = {}
sel_cell = 0.
for i in range(ncell):
if x.obs['method'][i]!='10x':
continue
free = x.obs['free_annotation'][i]
name = x.obs['cell_ontology_class'][i]
f2name[free] = name
sel_cell += 1
#return f2name
#for key in x.obs.keys():
# print (key, np.unique(x.obs[key]))
return sel_cell, ngene, nclass
#for i in range(10):
# print (x.obs['method'][i], x.obs['channel_no_10x'][i])
#for key in x.obs.keys():
# print (key, np.unique(x.obs[key]))
#return index
def get_onotlogy_parents(GO_net, g):
term_valid = set()
ngh_GO = set()
ngh_GO.add(g)
while len(ngh_GO) > 0:
for GO in list(ngh_GO):
for GO1 in GO_net[GO]:
ngh_GO.add(GO1)
ngh_GO.remove(GO)
term_valid.add(GO)
return term_valid
def exclude_non_ontology_term(cl_obo_file, labels, label_key):
co2name, name2co = get_ontology_name(cl_obo_file)
new_labs = []
new_ids = []
if label_key!='cell_ontology_class' and label_key!='cell_ontology_id':
use_co = False
for kk in np.unique(labels):
if kk.lower().startswith('cl:'):
use_co = True
break
else:
if label_key == 'cell_ontology_class':
use_co = False
else:
use_co = True
for i in range(len(labels)):
l = labels[i]
if not use_co:
if l.lower() in name2co.keys():
new_labs.append(name2co[l.lower()])
new_ids.append(i)
else:
if l.lower() in co2name.keys():
new_labs.append(l.lower())
new_ids.append(i)
new_labs = np.array(new_labs)
new_ids = np.array(new_ids)
return new_ids, new_labs
def parse_raw_h5ad(file,seed=1,nsample=1e10,tissue_key='tissue',label_key='cell_ontology_class', read_tissue = True, batch_key = '', filter_key={}, cell_ontology_file = None, exclude_non_leaf_ontology = True, exclude_non_ontology=True, cl_obo_file = None):
np.random.seed(seed)
x = read_h5ad(file)
ncell = np.shape(x.raw.X)[0]
select_cells = set(range(ncell))
for key in filter_key:
value = filter_key[key]
select_cells = select_cells & set(np.where(np.array(x.obs[key])==value)[0])
select_cells = sorted(select_cells)
feature = x.raw.X[select_cells, :]
labels = np.array(x.obs[label_key].tolist())[select_cells]
if read_tissue:
tissues = np.array(x.obs[tissue_key].tolist())[select_cells]
if batch_key=='' or batch_key not in x.obs.keys():
batch_labels = np.ones(len(labels))
else:
batch_labels = np.array(x.obs[batch_key].tolist())[select_cells]
genes = x.var.index
ncell = len(select_cells)
if exclude_non_ontology:
new_ids, labels = exclude_non_ontology_term(cl_obo_file, labels, label_key)
feature = feature[new_ids, :]
batch_labels = batch_labels[new_ids]
if exclude_non_leaf_ontology:
new_ids, exclude_terms = exclude_parent_child_nodes(cell_ontology_file, labels)
#print (len(exclude_terms),'non leaf terms are excluded')
feature = feature[new_ids, :]
batch_labels = batch_labels[new_ids]
labels = labels[new_ids]
if read_tissue:
tissues = tissues[new_ids]
ncell = len(labels)
index = np.random.choice(ncell,min(nsample,ncell),replace=False)
batch_labels = batch_labels[index]
feature = feature[index, :] # cell by gene matrix
labels = labels[index]
if read_tissue:
tissues = tissues[index]
genes = x.var.index
corrected_feature = run_scanorama_same_genes(feature, batch_labels)
corrected_feature = corrected_feature.toarray()
genes = [x.upper() for x in genes]
genes = np.array(genes)
if read_tissue:
assert(len(tissues) == len(labels))
return corrected_feature, labels, genes, tissues
else:
return corrected_feature, labels, genes
def select_cells_based_on_keys(x, features, tissues = None, labels = None, filter_key = None):
ncell = np.shape(x.X)[0]
select_cells = set(range(ncell))
for key in filter_key:
value = filter_key[key]
select_cells = select_cells & set(np.where(np.array(x.obs[key])==value)[0])
select_cells = sorted(select_cells)
features = features[select_cells,: ]
if labels is not None:
labels = labels[select_cells]
if tissues is not None:
tissues = tissues[select_cells]
x = x[select_cells,:]
return features, labels, tissues, x
def find_marker_genes(train_X, pred_Y_all, genes, i2l, topk = 50):
cor = corr2_coeff(pred_Y_all[:,:].T, train_X[:,:].T)
cor = np.nan_to_num(cor) # cell type to gene
nl = len(i2l)
c2g = {}
for i in range(nl):
gl = np.argsort(cor[i,:]*-1)
c2g[i2l[i]] = {}
for j in range(topk):
c2g[i2l[i]][genes[gl[j]]] = cor[i, gl[j]]
return c2g, cor
def use_pretrained_model(OnClass, genes, test_X, models = []):
last_l2i = {}
last_i2l = {}
pred_Y_all_models = 0.
ngene = len(genes)
for model in models:
OnClass.BuildModel(OnClass.co2emb, ngene = ngene, use_pretrain = model)
print ('Build model finished for ',model)
pred_Y_seen, pred_Y_all, pred_label = OnClass.Predict(test_X, test_genes = genes)
print ('Predict for ',model)
pred_Y_all = pred_Y_all.T / (pred_Y_all.T.sum(axis=1)[:, np.newaxis] + 1)
pred_Y_all = pred_Y_all.T
if len(last_l2i)>0:
new_ct_ind = []
for i in range(len(last_i2l)):
l = last_i2l[i]
new_ct_ind.append(OnClass.co2i[l])
pred_Y_all = pred_Y_all[:, np.array(new_ct_ind)]
pred_Y_all_models += pred_Y_all
else:
last_l2i = OnClass.co2i
last_i2l = OnClass.i2co
pred_Y_all_models = pred_Y_all
return pred_Y_all_models
def read_data(feature_file, cell_ontology_ids, exclude_non_leaf_ontology = False, ct_mapping_key = {}, tissue_key = None, seed = 1, filter_key = None, AnnData_label_key=None, nlp_mapping = True, nlp_mapping_cutoff = 0.8, co2emb = None, label_file=None, cl_obo_file = None, cell_ontology_file = None):
np.random.seed(seed)
x = read_h5ad(feature_file)
ncell = np.shape(x.X)[0]
dataset = x.X.toarray()
genes = np.array([x.upper() for x in x.var.index])
if tissue_key is not None:
tissues = np.array(x.obs[tissue_key].tolist())
else:
tissues = None
if AnnData_label_key is None and label_file is None:
print ('no label file is provided')
labels = None
dataset, labels, tissues, x = select_cells_based_on_keys(x, dataset, labels = labels, tissues = tissues, filter_key = filter_key)
return dataset, genes, labels, tissues, x
if AnnData_label_key is not None:
labels = x.obs[AnnData_label_key].tolist()
else:
fin = open(label_file)
labels = []
for line in fin:
labels.append(line.strip())
fin.close()
labels = np.array(labels)
dataset, labels, tissues, x = select_cells_based_on_keys(x, dataset, labels = labels, tissues = tissues, filter_key = filter_key)
ind, labels, unfound_labs = map_and_select_labels(labels, cell_ontology_ids, cl_obo_file, ct_mapping_key = ct_mapping_key, nlp_mapping = nlp_mapping, co2emb = co2emb, nlp_mapping_cutoff = nlp_mapping_cutoff, cl_obo_file = cl_obo_file)
if tissue_key is not None:
tissues = tissues[ind]
dataset = dataset[ind, :]
x = x[ind, :]
if exclude_non_leaf_ontology:
new_ids, exclude_terms = exclude_parent_child_nodes(cell_ontology_file, labels)
tissues = tissues[new_ids]
dataset = dataset[new_ids, :]
labels = labels[new_ids]
x = x[new_ids, :]
ncell = np.shape(dataset)[0]
index = np.random.choice(ncell,ncell,replace=False)
dataset = dataset[index, :] # cell by gene matrix
labels = labels[index]
if tissue_key is not None:
tissues = tissues[index]
return dataset, genes, labels, tissues, x
def exact_match_co_name_2_co_id(labels, lab2co, cl_obo_file = None):
if cl_obo_file is None:
return lab2co
co2name, name2co = get_ontology_name(obo_file = cl_obo_file)
for label in labels:
if label.lower() in name2co:
lab2co[label.lower()] = name2co[label.lower()]
for name in name2co:
lab2co[name.lower()] = name2co[name]
return lab2co
def map_and_select_labels(labels, cell_ontology_ids, obo_file, ct_mapping_key = {}, nlp_mapping = True, nlp_mapping_cutoff = 0.8, co2emb = None, cl_obo_file = None):
lab2co = {}
if nlp_mapping:
if co2emb is None:
sys.exit('Please provide cell type embedding to do NLP-based mapping.')
lab2co = fine_nearest_co_using_nlp(np.unique(labels), co2emb, obo_file,nlp_mapping_cutoff = nlp_mapping_cutoff)
lab2co = exact_match_co_name_2_co_id(np.unique(labels), lab2co, cl_obo_file = cl_obo_file)
for ct in ct_mapping_key:
lab2co[ct_mapping_key[ct]] = lab2co[ct]
ind = []
lab_id = []
unfound_labs = set()
for i,l in enumerate(labels):
if l in cell_ontology_ids:
ind.append(i)
lab_id.append(l)
elif l.lower() in lab2co:
ind.append(i)
lab_id.append(lab2co[l.lower()])
else:
unfound_labs.add(l)
frac = len(ind) * 1. / len(labels)
ind = np.array(ind)
labels = np.array(lab_id)
unfound_labs = set(unfound_labs)
warn_message = 'Warning: Only: %f precentage of labels are in the Cell Ontology. The remaining cells are excluded! Consider using NLP mapping and choose a small mapping cutoff (nlp_mapping_cutoff)' % (frac * 100)
if frac < 0.5:
print (warn_message)
print ('Here are unfound labels:',unfound_labs)
return ind, labels, unfound_labs
def parse_h5ad(file,seed=1,nsample=1e10,label_key='cell_ontology_class', read_tissue = False, batch_key = '', filter_key={}, cell_ontology_file = None, exclude_non_leaf_ontology = True, exclude_non_ontology=True, cl_obo_file = None):
'''
read h5ad file
feature: cell by gene expression
label: cell ontology class
genes: gene names HGNC
'''
np.random.seed(seed)
x = read_h5ad(file)
ncell = np.shape(x.X)[0]
select_cells = set(range(ncell))
for key in filter_key:
value = filter_key[key]
select_cells = select_cells & set(np.where(np.array(x.obs[key])==value)[0])
select_cells = sorted(select_cells)
feature = x.X[select_cells, :]
labels = np.array(x.obs[label_key].tolist())[select_cells]
if read_tissue:
tissues = np.array(x.obs['tissue'].tolist())[select_cells]
if batch_key=='' or batch_key not in x.obs.keys():
batch_labels = np.ones(len(labels))
else:
batch_labels = np.array(x.obs[batch_key].tolist())[select_cells]
genes = x.var.index
ncell = len(select_cells)
if exclude_non_ontology:
new_ids, labels = exclude_non_ontology_term(cl_obo_file, labels, label_key)
feature = feature[new_ids, :]
batch_labels = batch_labels[new_ids]
if exclude_non_leaf_ontology:
new_ids, exclude_terms = exclude_parent_child_nodes(cell_ontology_file, labels)
#print (len(exclude_terms),'non leaf terms are excluded')
feature = feature[new_ids, :]
batch_labels = batch_labels[new_ids]
labels = labels[new_ids]
if read_tissue:
tissues = tissues[new_ids]
ncell = len(labels)
index = np.random.choice(ncell,min(nsample,ncell),replace=False)
batch_labels = batch_labels[index]
feature = feature[index, :] # cell by gene matrix
labels = labels[index]
if read_tissue:
tissues = tissues[index]
genes = x.var.index
#corrected_feature = run_scanorama_same_genes(feature, batch_labels)
corrected_feature = feature.toarray()
genes = [x.upper() for x in genes]
genes = np.array(genes)
if read_tissue:
assert(len(tissues) == len(labels))
return corrected_feature, labels, genes, tissues
else:
return corrected_feature, labels, genes
def exclude_parent_child_nodes(cell_ontology_file,labels):
uniq_labels = np.unique(labels)
excludes = set()
net = collections.defaultdict(dict)
fin = open(cell_ontology_file)
for line in fin:
s,p = line.strip().split('\t')
net[s][p] = 1 #p is parent
fin.close()
for n in list(net.keys()):
ngh = get_ontology_parents(net, n)
for n1 in ngh:
net[n][n1] = 1
for l1 in uniq_labels:
for l2 in uniq_labels:
if l1 in net[l2] and l1!=l2: #l1 is l2 parent
excludes.add(l1)
#print (excludes)
new_ids = []
for i in range(len(labels)):
if labels[i] not in excludes:
new_ids.append(i)
new_ids = np.array(new_ids)
return new_ids, excludes
def corr2_coeff(A, B):
# Rowwise mean of input arrays & subtract from input arrays themeselves
A_mA = A - A.mean(1)[:, None]
B_mB = B - B.mean(1)[:, None]
# Sum of squares across rows
ssA = (A_mA**2).sum(1)
ssB = (B_mB**2).sum(1)
# Finally get corr coeff
return np.dot(A_mA, B_mB.T) / np.sqrt(np.dot(ssA[:, None],ssB[None]))
def extract_data_based_on_class(feats, labels, sel_labels):
ind = []
for l in sel_labels:
id = np.where(labels == l)[0]
ind.extend(id)
np.random.shuffle(ind)
X = feats[ind,:]
Y = labels[ind]
return X, Y, ind
def SplitTrainTest(all_X, all_Y, all_tissues = None, random_state=10, nfold_cls = 0.3, nfold_sample = 0.2, nmin_size=10):
np.random.seed(random_state)
cls = np.unique(all_Y)
cls2ct = Counter(all_Y)
ncls = len(cls)
test_cls = list(np.random.choice(cls, int(ncls * nfold_cls), replace=False))
for c in cls2ct:
if cls2ct[c] < nmin_size:
test_cls.append(c)
test_cls = np.unique(test_cls)
#add rare class to test, since they cannot be split into train and test by using train_test_split(stratify=True)
train_cls = [x for x in cls if x not in test_cls]
train_cls = np.array(train_cls)
train_X, train_Y, train_ind = extract_data_based_on_class(all_X, all_Y, train_cls)
test_X, test_Y, test_ind = extract_data_based_on_class(all_X, all_Y, test_cls)
if all_tissues is not None:
train_tissues = all_tissues[train_ind]
test_tissues = all_tissues[test_ind]
train_X_train, train_X_test, train_Y_train, train_Y_test, train_tissues_train, train_tissues_test = train_test_split(
train_X, train_Y, train_tissues, test_size=nfold_sample, stratify = train_Y,random_state=random_state)
test_tissues = np.concatenate((test_tissues, train_tissues_test))
train_tissues = train_tissues_train
else:
train_X_train, train_X_test, train_Y_train, train_Y_test = train_test_split(
train_X, train_Y, test_size=nfold_sample, stratify = train_Y,random_state=random_state)
test_X = np.vstack((test_X, train_X_test))
test_Y = np.concatenate((test_Y, train_Y_test))
train_X = train_X_train
train_Y = train_Y_train
if all_tissues is not None:
return train_X, train_Y, train_tissues, test_X, test_Y, test_tissues
else:
return train_X, train_Y, test_X, test_Y
'''
def SplitTrainTest(all_X, all_Y, all_tissues = None, random_state=10, nfold_cls = 0.3, nfold_sample = 0.2, nmin_size=10):
np.random.seed(random_state)
cls = np.unique(all_Y)
cls2ct = Counter(all_Y)
ncls = len(cls)
rare_cls = []
not_rare_cls = []
for c in cls2ct:
if cls2ct[c] < 2:
continue
elif cls2ct[c] < nmin_size:
rare_cls.append(c)
else:
not_rare_cls.append(c)
cls = np.concatenate((rare_cls, not_rare_cls))
ncls = len(cls)
rare_cls = np.array(rare_cls)
not_rare_cls = np.array(not_rare_cls)
train_non_rare_cls = list(np.random.choice(not_rare_cls, int(len(not_rare_cls) * (1 - nfold_cls)), replace=False))
train_cls = np.concatenate((train_non_rare_cls, rare_cls))
test_cls = [x for x in cls if x not in train_cls]
test_cls = np.array(test_cls)
assert(len(test_cls) + len(train_cls) == ncls)
assert(len(set(test_cls) & set(train_cls)) == 0)
#add rare class to test, since they cannot be split into train and test by using train_test_split(stratify=True)
train_X, train_Y, train_ind = extract_data_based_on_class(all_X, all_Y, train_cls)
test_X, test_Y, test_ind = extract_data_based_on_class(all_X, all_Y, test_cls)
if all_tissues is not None:
train_tissues = all_tissues[train_ind]
test_tissues = all_tissues[test_ind]
train_X_train, train_X_test, train_Y_train, train_Y_test, train_tissues_train, train_tissues_test = train_test_split(
train_X, train_Y, train_tissues, test_size=nfold_sample, stratify = train_Y,random_state=random_state)
test_tissues = np.concatenate((test_tissues, train_tissues_test))
train_tissues = train_tissues_train
else:
train_X_train, train_X_test, train_Y_train, train_Y_test = train_test_split(
train_X, train_Y, test_size=nfold_sample, stratify = train_Y,random_state=random_state)
test_X = np.vstack((test_X, train_X_test))
test_Y = np.concatenate((test_Y, train_Y_test))
train_X = train_X_train
train_Y = train_Y_train
if all_tissues is not None:
return train_X, train_Y, train_tissues, test_X, test_Y, test_tissues
else:
return train_X, train_Y, test_X, test_Y
'''
def LeaveOneOutTrainTest(all_X, all_Y, test_Y, all_tissues = None, random_state=10, nfold_sample = 0.2, nmin_size=10):
np.random.seed(random_state)
cls = np.unique(all_Y)
cls2ct = Counter(all_Y)
ncls = len(cls)
test_cls = [test_Y]
test_cls = np.unique(test_cls)
#add rare class to test, since they cannot be split into train and test by using train_test_split(stratify=True)
train_cls = [x for x in cls if x not in test_cls]
train_cls = np.array(train_cls)
train_X, train_Y, train_ind = extract_data_based_on_class(all_X, all_Y, train_cls)
test_X, test_Y, test_ind = extract_data_based_on_class(all_X, all_Y, test_cls)
if all_tissues is not None:
train_tissues = all_tissues[train_ind]
test_tissues = all_tissues[test_ind]
train_X_train, train_X_test, train_Y_train, train_Y_test, train_tissues_train, train_tissues_test = train_test_split(
train_X, train_Y, train_tissues, test_size=nfold_sample, stratify = train_Y,random_state=random_state)
test_tissues = np.concatenate((test_tissues, train_tissues_test))
train_tissues = train_tissues_train
else:
train_X_train, train_X_test, train_Y_train, train_Y_test = train_test_split(
train_X, train_Y, test_size=nfold_sample, stratify = train_Y,random_state=random_state)
test_X = np.vstack((test_X, train_X_test))
test_Y = np.concatenate((test_Y, train_Y_test))
train_X = train_X_train
train_Y = train_Y_train
if all_tissues is not None:
return train_X, train_Y, train_tissues, test_X, test_Y, test_tissues
else:
return train_X, train_Y, test_X, test_Y
def renorm(X):
Y = X.copy()
Y = Y.astype(float)
ngene,nsample = Y.shape
s = np.sum(Y, axis=0)
#print s.shape()
for i in range(nsample):
if s[i]==0:
s[i] = 1
if i < ngene:
Y[i,i] = 1
else:
for j in range(ngene):
Y[j,i] = 1. / ngene
Y[:,i] = Y[:,i]/s[i]
return Y
def RandomWalkRestart(A, rst_prob, delta = 1e-4, reset=None, max_iter=50,use_torch=False,return_torch=False):
if use_torch:
device = torch.device("cuda:0")
nnode = A.shape[0]
#print nnode
if reset is None:
reset = np.eye(nnode)
nsample,nnode = reset.shape
#print nsample,nnode
P = renorm(A)
P = P.T
norm_reset = renorm(reset.T)
norm_reset = norm_reset.T
if use_torch:
norm_reset = torch.from_numpy(norm_reset).float().to(device)
P = torch.from_numpy(P).float().to(device)
Q = norm_reset
for i in range(1,max_iter):
#Q = gnp.garray(Q)
#P = gnp.garray(P)
if use_torch:
Q_new = rst_prob*norm_reset + (1-rst_prob) * torch.mm(Q, P)#.as_numpy_array()
delta = torch.norm(Q-Q_new, 2)
else:
Q_new = rst_prob*norm_reset + (1-rst_prob) * np.dot(Q, P)#.as_numpy_array()
delta = np.linalg.norm(Q-Q_new, 'fro')
Q = Q_new
#print (i,Q)
sys.stdout.flush()
if delta < 1e-4:
break
if use_torch and not return_torch:
Q = Q.cpu().numpy()
return Q
def DCA_vector(Q, dim):
nnode = Q.shape[0]
alpha = 1. / (nnode **2)
Q = np.log(Q + alpha) - np.log(alpha);
#Q = Q * Q';
[U, S, V] = svds(Q, dim);
S = np.diag(S)
X = np.dot(U, np.sqrt(S))
Y = np.dot(np.sqrt(S), V)
Y = np.transpose(Y)
return X,U,S,V,Y
def read_cell_ontology_nlp(l2i, ontology_nlp_file, ontology_nlp_emb_file):
ncls = len(l2i)
net = np.zeros((ncls, ncls))
bin_net = np.zeros((ncls, ncls))
fin = open(ontology_nlp_file)
for line in fin:
s,p,wt = line.upper().strip().split('\t')
wt = float(wt)
net[l2i[s], l2i[p]] = np.exp(wt)
net[l2i[p], l2i[s]] = np.exp(wt)
bin_net[l2i[s], l2i[p]] = 1
bin_net[l2i[p], l2i[s]] = 1
fin.close()
l2vec = {}
fin = open(ontology_nlp_emb_file)
for line in fin:
w = line.upper().strip().split('\t')
l2vec[w[0]] = []
dim = len(w)-1
for i in range(1,len(w)):
l2vec[w[0]].append(float(w[i]))
fin.close()
l2vec_mat = np.zeros((ncls, dim))
for l in l2vec:
if l.upper() not in l2i:
continue
l2vec_mat[l2i[l.upper()],:] = l2vec[l]
'''
net_sum = np.sum(net,axis=0)
for i in range(ncls):
if net_sum[i] == 0:
net[i,i] = 1.
net[:,i] /= np.sum(net[:,i])
#net = net / net.sum(axis=1)[:, np.newaxis]
'''
return net, bin_net, l2vec_mat
def GetReverseNet(onto_net):
onto_net_rev = collections.defaultdict(dict)
for a in onto_net:
for b in onto_net[a]:
onto_net_rev[b][a] = 1
return onto_net_rev
def ParseCLOnto(train_Y, ontology_nlp_file, ontology_file, co_dim=5, co_mi=3, dfs_depth = 1, combine_unseen = False, add_emb_diagonal = True, use_pretrain = None, use_seen_only = True):#
unseen_l, l2i, i2l, train_X2Y, onto_net, onto_net_mat = create_labels(train_Y, ontology_nlp_file, ontology_file, dfs_depth = dfs_depth, combine_unseen = combine_unseen)
Y_emb = emb_ontology(i2l, ontology_nlp_file, ontology_file, dim = co_dim, mi=co_mi, use_pretrain = use_pretrain, use_seen_only = True, unseen_l = unseen_l)
if add_emb_diagonal:
Y_emb = np.column_stack((np.eye(len(i2l)), Y_emb))
return unseen_l, l2i, i2l, onto_net, Y_emb, onto_net_mat
def graph_embedding(A, i2l, mi=0, dim=20,use_seen_only=True,unseen_l=None):
nl = np.shape(A)[0]
if use_seen_only:
seen_ind = []
unseen_ind = []
for i in range(nl):
if i2l[i] in unseen_l:
unseen_ind.append(i)
else:
seen_ind.append(i)
seen_ind = np.array(seen_ind)
unseen_ind = np.array(unseen_ind)
#if len(seen_ind) * 0.8 < dim:
# dim = int(len(seen_ind) * 0.8)
if mi==0 or mi == 1:
sp = graph_shortest_path(A,method='FW',directed =False)
else:
sp = RandomWalkRestart(A, 0.8)
if use_seen_only:
sp = sp[seen_ind, :]
sp = sp[:,seen_ind]
X = np.zeros((np.shape(sp)[0],dim))
svd_dim = min(dim, np.shape(sp)[0]-1)
if mi==0 or mi == 2:
X[:,:svd_dim] = svd_emb(sp, dim=svd_dim)
else:
X[:,:svd_dim] = DCA_vector(sp, dim=svd_dim)[0]
if use_seen_only:
X_ret = np.zeros((nl, dim))
X_ret[seen_ind,:] = X
else:
X_ret = X
if mi==2 or mi == 3:
sp *= -1
return sp, X_ret
def cal_ontology_emb(ontology_nlp_file, ontology_file, dim=20, mi=3, use_pretrain = None, use_seen_only = True, unseen_l = None):
if use_pretrain is None or not os.path.isfile(use_pretrain+'X.npy') or not os.path.isfile(use_pretrain+'sp.npy'):
cl_nlp = collections.defaultdict(dict)
if ontology_nlp_file is not None:
fin = open(ontology_nlp_file)
for line in fin:
s,p,wt = line.upper().strip().split('\t')
cl_nlp[s][p] = float(wt)
cl_nlp[p][s] = float(wt)
fin.close()
fin = open(ontology_file)
lset = set()
s2p = {}
for line in fin:
w = line.strip().split('\t')
s = w[0]
p = w[1]
if len(w)==2:
if p in cl_nlp and s in cl_nlp[p]:
wt = cl_nlp[p][s]
else:
wt = 1.
else:
wt = float(w[2])
if s not in s2p:
s2p[s] = {}
s2p[s][p] = wt
lset.add(s)
lset.add(p)
fin.close()
lset = np.sort(list(lset))
nl = len(lset)
l2i = dict(zip(lset, range(nl)))
i2l = dict(zip(range(nl), lset))
A = np.zeros((nl, nl))
for s in s2p:
for p in s2p[s]:
A[l2i[s], l2i[p]] = s2p[s][p]
A[l2i[p], l2i[s]] = s2p[s][p]
sp, X = graph_embedding(A, i2l, mi=mi, dim=dim, use_seen_only=use_seen_only, unseen_l=unseen_l)
if use_pretrain is not None:
i2l_file = use_pretrain+'i2l.npy'
l2i_file = use_pretrain+'l2i.npy'
X_file = use_pretrain+'X.npy'
sp_file = use_pretrain+'sp.npy'
np.save(X_file, X)
np.save(i2l_file, i2l)
np.save(l2i_file, l2i)
np.save(sp_file, sp)
else:
i2l_file = use_pretrain+'i2l.npy'
l2i_file = use_pretrain+'l2i.npy'
X_file = use_pretrain+'X.npy'
sp_file = use_pretrain+'sp.npy'
X = np.load(X_file)
i2l = np.load(i2l_file,allow_pickle=True).item()
l2i = np.load(l2i_file,allow_pickle=True).item()
sp = np.load(sp_file,allow_pickle=True)
return X, l2i, i2l, sp
def merge_26_datasets(datanames_26datasets, scan_dim = 50):
datasets, genes_list, n_cells = load_names(datanames_26datasets,verbose=False,log1p=True)
datasets, genes = merge_datasets(datasets, genes_list)
datasets_dimred, genes = process_data(datasets, genes, dimred=scan_dim)
datasets_dimred, expr_datasets = my_assemble(datasets_dimred, ds_names=datanames_26datasets, expr_datasets = datasets, sigma=150)
datasets_dimred = sparse.vstack(expr_datasets).toarray()
return datasets_dimred, genes
def emb_ontology(i2l, ontology_nlp_file, ontology_file, dim=20, mi=0, use_pretrain = None, use_seen_only = True, unseen_l = None):
X, ont_l2i, ont_i2l, A = cal_ontology_emb( ontology_nlp_file, ontology_file, dim=dim, mi=mi, use_pretrain = use_pretrain, use_seen_only = True, unseen_l = unseen_l)
i2emb = np.zeros((len(i2l),dim))
nl = len(i2l)
for i in range(nl):
ant = i2l[i]
if ant not in ont_l2i:
print (ant, ont_l2i)
assert('xxx' in ant.lower() or 'nan' in ant.lower())
continue
i2emb[i,:] = X[ont_l2i[ant],:]
'''
AA = np.zeros((nl, nl))
for i in range(nl):
for j in range(nl):
anti, antj = i2l[i], i2l[j]
if anti in ont_l2i and antj in ont_l2i:
AA[i,j] = A[ont_l2i[anti],ont_l2i[antj]]
'''
return i2emb
'''
def get_ontology_parents(GO_net, g):
term_valid = set()
ngh_GO = set()
ngh_GO.add(g)
while len(ngh_GO) > 0:
for GO in list(ngh_GO):
for GO1 in GO_net[GO]:
ngh_GO.add(GO1)
ngh_GO.remove(GO)
term_valid.add(GO)
return term_valid
'''
def get_ontology_parents(GO_net, g, dfs_depth=100):
term_valid = set()
ngh_GO = set()
ngh_GO.add(g)
depth = {}
depth[g] = 0
while len(ngh_GO) > 0:
for GO in list(ngh_GO):
for GO1 in GO_net[GO]:
ngh_GO.add(GO1)
depth[GO1] = depth[GO] + 1
ngh_GO.remove(GO)
if depth[GO] < dfs_depth:
term_valid.add(GO)
return term_valid
def create_labels(train_Y, ontology_nlp_file, ontology_file, combine_unseen = False, dfs_depth = 1000):
fin = open(ontology_file)
lset = set()
for line in fin:
s,p = line.strip().split('\t')
lset.add(s)
lset.add(p)
fin.close()
seen_l = sorted(np.unique(train_Y))
unseen_l = sorted(lset - set(train_Y))
ys = np.concatenate((seen_l, unseen_l))
i2l = {}
l2i = {}
for l in ys:
nl = len(i2l)
col = l
if combine_unseen and l in unseen_l:
nl = len(seen_l)
l2i[col] = nl
i2l[nl] = col
continue
l2i[col] = nl
i2l[nl] = col
train_Y = [l2i[y] for y in train_Y]
train_X2Y = ConvertLabels(train_Y, ncls = len(i2l))
onto_net, onto_net_mat = read_ontology(l2i, ontology_nlp_file, ontology_file, dfs_depth = dfs_depth)
return unseen_l, l2i, i2l, train_X2Y, onto_net, onto_net_mat
def query_depth_ontology(net, node, root='cl:0000000'):
depth = 0
while node != root:
if len(net[node]) == 0:
print (node)
node = sorted(list(net[node].keys()))[0]
depth += 1
if depth>100:
sys.error('root not found')
return depth
def read_ontology(l2i, ontology_nlp_file, ontology_file, dfs_depth = 1000):
nl = len(l2i)
net = collections.defaultdict(dict)
net_mat = np.zeros((nl,nl))
fin = open(ontology_file)
for line in fin:
s,p = line.strip().split('\t')
si = l2i[s]
pi = l2i[p]
net[si][pi] = 1
net_mat[si][pi] = 1
fin.close()
for n in range(nl):
ngh = get_ontology_parents(net, n, dfs_depth = dfs_depth)
net[n][n] = 1
for n1 in ngh:
net[n][n1] = 1
return net, net_mat
def extract_label_propagate_tree(onto_net, ncls):
tree = np.zeros((ncls,ncls))
for n1 in onto_net:
for n2 in onto_net[n1]:
tree[n1,n2] = 1
return tree
def ConvertLabels(labels, ncls=-1):
ncell = np.shape(labels)[0]
if len(np.shape(labels)) ==1 :
#bin to mat
if ncls == -1:
ncls = np.max(labels)
mat = np.zeros((ncell, ncls))
for i in range(ncell):
mat[i, labels[i]] = 1
return mat
else:
if ncls == -1:
ncls = np.shape(labels)[1]
vec = np.zeros(ncell)
for i in range(ncell):
ind = np.where(labels[i,:]!=0)[0]
assert(len(ind)<=1) # not multlabel classification
if len(ind)==0:
vec[i] = -1
else:
vec[i] = ind[0]
return vec
def MapLabel2CL(test_Y, l2i):
test_Y_new = np.array([l2i[y] for y in test_Y])
return test_Y_new
def get_ontology_name(obo_file, lower=True):
fin = open(obo_file)
co2name = {}
name2co = {}
tag_is_syn = {}
for line in fin:
if line.startswith('id: '):
co = line.strip().split('id: ')[1]
if line.startswith('name: '):
if lower:
name = line.strip().lower().split('name: ')[1]
else:
name = line.strip().split('name: ')[1]
co2name[co] = name
name2co[name] = co
if line.startswith('synonym: '):
if lower:
syn = line.strip().lower().split('synonym: "')[1].split('" ')[0]
else:
syn = line.strip().split('synonym: "')[1].split('" ')[0]
if syn in name2co:
continue
name2co[syn] = co
fin.close()
return co2name, name2co
def knn_ngh(Y2Y):
ind = np.argsort(Y2Y*-1, axis=1)
return ind
def extend_prediction_2unseen_normalize(pred_Y_seen, onto_net_rwr, nseen, ratio=200):
sys.exit(-1)#NOT USED
ncls = np.shape(onto_net_rwr)[0]
onto_net_rwr = onto_net_rwr - np.tile(np.mean(onto_net_rwr, axis = 1), (ncls, 1))
pred_Y_seen_norm = pred_Y_seen / pred_Y_seen.sum(axis=1)[:, np.newaxis]
pred_Y_all = np.dot(pred_Y_seen_norm, onto_net_rwr[:nseen,:])
pred_Y_all[:,:nseen] = normalize(pred_Y_all[:,:nseen],norm='l1',axis=1)
pred_Y_all[:,nseen:] = normalize(pred_Y_all[:,nseen:],norm='l1',axis=1) * ratio
return pred_Y_all
def create_nlp_networks(l2i, onto_net, cls2cls, ontology_nlp_file, ontology_nlp_emb_file):
ncls = np.shape(cls2cls)[0]
_, _, onto_nlp_emb = read_cell_ontology_nlp(l2i, ontology_nlp_file = ontology_nlp_file, ontology_nlp_emb_file = ontology_nlp_emb_file)
onto_net_nlp_all_pairs = (cosine_similarity(onto_nlp_emb) + 1 ) /2#1 - spatial.distance.cosine(onto_nlp_emb, onto_nlp_emb)
onto_net_nlp = np.zeros((ncls, ncls))
onto_net_bin = np.zeros((ncls, ncls))
stack_net_bin = np.zeros((ncls, ncls))
stack_net_nlp = np.zeros((ncls, ncls))
for n1 in onto_net:
for n2 in onto_net[n1]:
if n1==n2:
continue
stack_net_nlp[n2,n1] = onto_net_nlp_all_pairs[n2, n1]
stack_net_nlp[n1,n2] = onto_net_nlp_all_pairs[n1, n2]
stack_net_bin[n1,n2] = 1
stack_net_bin[n2,n1] = 1
for n1 in range(ncls):
for n2 in range(ncls):
if cls2cls[n1,n2] == 1 or cls2cls[n2,n1] == 1:
onto_net_nlp[n1,n2] = onto_net_nlp_all_pairs[n1, n2]
onto_net_nlp[n2,n1] = onto_net_nlp_all_pairs[n2, n1]
onto_net_bin[n1,n2] = 1
onto_net_bin[n2,n1] = 1
return onto_net_nlp, onto_net_bin, stack_net_nlp, stack_net_bin, onto_net_nlp_all_pairs
def create_consensus_networks(rsts, onto_net_mat, onto_net_nlp_all_pairs, cls2cls, diss=[2,3], thress=[1,0.8]):
cls2cls_sp = graph_shortest_path(cls2cls,method='FW',directed =False)
ncls = np.shape(onto_net_mat)[0]
networks = []
for rst in rsts:
for dis in diss:
for thres in thress:
use_net = np.copy(onto_net_mat)
use_net[(cls2cls_sp<=dis)&(onto_net_nlp_all_pairs > thres)] = onto_net_nlp_all_pairs[(cls2cls_sp<=dis)&(onto_net_nlp_all_pairs > thres)]
onto_net_rwr = RandomWalkRestart(use_net, rst)
networks.append(onto_net_rwr)
return networks
def extend_prediction_2unseen(pred_Y_seen, networks, nseen, ratio=200, use_normalize=False):
if not isinstance(networks, list):
networks = [networks]
pred_Y_all_totoal = 0.
for onto_net_rwr in networks:
if use_normalize:
onto_net_rwr = onto_net_rwr - np.tile(np.mean(onto_net_rwr, axis = 1), (np.shape(onto_net_rwr)[0], 1))
pred_Y_seen_norm = pred_Y_seen / pred_Y_seen.sum(axis=1)[:, np.newaxis]
pred_Y_all = np.dot(pred_Y_seen_norm, onto_net_rwr[:nseen,:])
pred_Y_all[:,:nseen] = normalize(pred_Y_all[:,:nseen],norm='l1',axis=1)
pred_Y_all[:,nseen:] = normalize(pred_Y_all[:,nseen:],norm='l1',axis=1) * ratio
pred_Y_all_totoal += pred_Y_all
return pred_Y_all_totoal
def my_auprc(y_true, y_pred):
precision, recall, thresholds = precision_recall_curve(y_true, y_pred)
area = auc(recall, precision)
return area
def sampled_auprc(truths,preds):
pos = np.where(truths == 1)[0]
neg = np.where(truths == 0)[0]
assert(len(pos) + len(neg) == len(truths))
nneg = len(neg)
npos = len(pos)
select_neg = np.random.choice(nneg, npos*3, replace = True)
select_ind = np.concatenate((pos, select_neg))
return average_precision_score(truths[select_ind], preds[select_ind])
def evaluate(Y_pred_mat, Y_truth_vec, unseen_l, nseen, Y_truth_bin_mat = None, Y_pred_vec = None, Y_ind=None, Y_net = None, Y_net_mat = None, write_screen = True, write_to_file = None, combine_unseen = False, prefix='', metrics = ['AUROC(seen)','AUPRC(seen)','AUROC','AUPRC','AUROC(unseen)', 'AUPRC(unseen)','Accuracy@3','Accuracy@5']):
#preprocess scores
unseen_l = np.array(list(unseen_l))
ncell,nclass = np.shape(Y_pred_mat)
nseen = nclass - len(unseen_l)
if Y_ind is not None:
non_Y_ind = np.array(list(set(range(nclass)) - set(Y_ind)))
if len(non_Y_ind)>0:
Y_pred_mat[:,non_Y_ind] = -1 * np.inf
if Y_pred_vec is None:
Y_pred_vec = np.argmax(Y_pred_mat, axis=1)
if Y_truth_bin_mat is None:
Y_truth_bin_mat = ConvertLabels(Y_truth_vec, nclass)
Y_pred_bin_mat = ConvertLabels(Y_pred_vec, nclass)
#class-based metrics
class_auc_macro = np.full(nclass, np.nan)
class_auprc_macro = np.full(nclass, np.nan)
class_f1 = np.full(nclass, np.nan)
for i in range(nclass):
if len(np.unique(Y_truth_bin_mat[:,i]))==2 and np.sum(Y_truth_bin_mat[:,i])>=10:
class_auc_macro[i] = roc_auc_score(Y_truth_bin_mat[:,i], Y_pred_mat[:,i])
class_auprc_macro[i] = sampled_auprc(Y_truth_bin_mat[:,i], Y_pred_mat[:,i])
class_f1[i] = f1_score(Y_truth_bin_mat[:,i], Y_pred_bin_mat[:,i])
#sample-based metrics
extend_acc, extend_Y = extend_accuracy(Y_truth_vec, Y_pred_vec, Y_net, unseen_l)
kappa = cohen_kappa_score(Y_pred_vec, Y_truth_vec)
extend_kappa = cohen_kappa_score(extend_Y, Y_truth_vec)
accuracy = accuracy_score(Y_truth_vec, Y_pred_vec)
prec_at_k_3 = precision_at_k(Y_pred_mat, Y_truth_vec, 3)
prec_at_k_5 = precision_at_k(Y_pred_mat, Y_truth_vec, 5)
#print ([(x,np.sum(Y_truth_bin_mat[:,unseen_l[i]])) for i,x in enumerate(class_auprc_macro[unseen_l]) if not np.isnan(x)])
seen_auc_macro = np.nanmean(class_auc_macro[:nseen])
seen_auprc_macro = np.nanmean(class_auprc_macro[:nseen])
seen_f1 = np.nanmean(class_f1[:nseen])
if len(unseen_l) == 0:
unseen_auc_macro = 0
unseen_auprc_macro = 0
unseen_f1 = 0
else:
unseen_auc_macro = np.nanmean(class_auc_macro[unseen_l])
#unseen_auprc_macro = np.nanmean([x for i,x in enumerate(class_auprc_macro[unseen_l]) if np.sum(Y_truth_bin_mat[:,unseen_l[i]])>100])#
unseen_auprc_macro = np.nanmean(class_auprc_macro[unseen_l])
unseen_f1 = np.nanmean(class_f1[unseen_l])
#metrics = ['AUROC','AUPRC','unseen_AUROC', 'unseen_AUPRC','Cohens Kappa','Accuracy@3','Accuracy@5']
#res_v = [seen_auc_macro, seen_auprc_macro, np.nanmean(class_auc_macro), np.nanmean(class_auprc_macro), extend_kappa, prec_at_k_3, prec_at_k_5, unseen_auc_macro, unseen_auprc_macro]
all_v = {'AUROC':np.nanmean(class_auc_macro), 'AUPRC': np.nanmean(class_auprc_macro), 'AUROC(seen)':seen_auc_macro, 'AUPRC(seen)': seen_auprc_macro, 'AUROC(unseen)':unseen_auc_macro, 'AUPRC(unseen)': unseen_auprc_macro, 'Cohens Kappa':extend_kappa, 'Accuracy@3':prec_at_k_3, 'Accuracy@5':prec_at_k_5}
res_v = {}
for metric in metrics:
res_v[metric] = all_v[metric]
#res_v = [seen_auc_macro, seen_auprc_macro, seen_f1, np.nanmean(class_auc_macro), np.nanmean(class_auprc_macro), np.nanmean(class_f1), unseen_auc_macro, unseen_auprc_macro, unseen_f1]
if write_screen:
print (prefix, end='\t')
for v in metrics:
print ('%.4f'%res_v[v], end='\t')
print ('')
sys.stdout.flush()
if write_to_file is not None:
write_to_file.write(prefix+'\t')
for v in metrics:
write_to_file.write('%.2f\t'%res_v[v])
write_to_file.write('\n')
write_to_file.flush()
return res_v
def precision_at_k(pred,truth,k):
ncell, nclass = np.shape(pred)
hit = 0.
for i in range(ncell):
x = np.argsort(pred[i,:]*-1)
rank = np.where(x==truth[i])[0][0]
if rank < k:
hit += 1.
prec = hit / ncell
return prec
def write_anndata_data(test_label, test_AnnData, cl_obo_file, label_name):
if len(np.shape(test_label))==2:
test_label = np.argmax(test_label, axis = 1)
co2name, name2co = get_ontology_name(cl_obo_file)
x = test_AnnData
ncell = np.shape(x.X)[0]
print (ncell, len(test_label))
assert(ncell == len(test_label))
test_name = []
test_label_id = []
for i in range(ncell):
xx = i2tp[test_label[i]]
test_label_id.append(xx)
test_name.append(co2name[xx])
test_name = np.array(test_name)
test_label_id = np.array(test_label_id)
x.obs['OnClass_annotation_ontology_ID'] = test_label
x.obs['OnClass_annotation_ontology_name'] = test_name
return x
def read_type2genes(g2i, marker_gene,cl_obo_file):
co2name, name2co = get_ontology_name(cl_obo_file)
c2cnew = {}
c2cnew['cd4+ t cell'] = 'CD4-positive, CXCR3-negative, CCR6-negative, alpha-beta T cell'.lower()
c2cnew['chromaffin cells (enterendocrine)'] = 'chromaffin cell'.lower()
c2cnew['mature NK T cell'] = 'mature NK T cell'.lower()
c2cnew['cd8+ t cell'] = 'CD8-positive, alpha-beta cytotoxic T cell'.lower()
fin = open(marker_gene)
fin.readline()
tp2genes = {}
unfound = set()
for line in fin:
w = line.strip().split('\t')
c1 = w[1].lower()
c2 = w[2].lower()
genes = []
for ww in w[8:]:
if ww.upper() in g2i:
genes.append(ww.upper())
if len(genes)==0:
continue
if c1.endswith('s') and c1[:-1] in name2co:
c1 = c1[:-1]
if c2.endswith('s') and c2[:-1] in name2co:
c2 = c2[:-1]
if c1 + ' cell' in name2co:
c1 +=' cell'
if c2 + ' cell' in name2co:
c2 +=' cell'
if c1 in c2cnew:
c1 = c2cnew[c1]
if c2 in c2cnew:
c2 = c2cnew[c2]
if c1 in name2co:
tp2genes[name2co[c1]] = genes
else:
unfound.add(c1)
if c2 in name2co:
tp2genes[name2co[c2]] = genes
else:
unfound.add(c2)
fin.close()
return tp2genes
def extend_accuracy(test_Y, test_Y_pred_vec, Y_net, unseen_l):
unseen_l = set(unseen_l)
n = len(test_Y)
acc = 0.
ntmp = 0.
new_pred = []
for i in range(n):
if test_Y[i] in unseen_l and test_Y_pred_vec[i] in unseen_l:
if test_Y_pred_vec[i] in Y_net[test_Y[i]] and Y_net[test_Y[i]][test_Y_pred_vec[i]] == 1:
acc += 1
ntmp += 1
new_pred.append(test_Y[i])
else:
new_pred.append(test_Y_pred_vec[i])
else:
if test_Y[i] == test_Y_pred_vec[i]:
acc += 1
new_pred.append(test_Y_pred_vec[i])
new_pred = np.array(new_pred)
return acc/n, new_pred
def run_scanorama_multiply_datasets(datasets, genes, scan_dim = 100):
sparse_datasets = []
for dataset in datasets:
sparse_datasets.append(sparse.csr_matrix(dataset))
datasets, genes = merge_datasets(sparse_datasets, genes)
datasets_dimred, genes = process_data(datasets, genes, dimred=scan_dim)
datasets_dimred, sparse_dataset_correct = my_assemble(datasets_dimred, expr_datasets = datasets, sigma=150)
dataset_correct = []
for sp in sparse_dataset_correct:
dataset_correct.append(np.power(sp.todense(), 2))
return datasets_dimred, dataset_correct
def run_scanorama_same_genes(features, batch_labels, scan_dim = 100):
batchs = np.unique(batch_labels)
nbatch = len(batchs)
if nbatch == 1:
return features
ncell, ngene = np.shape(features)
assert(ncell == len(batch_labels))
genes = []
datasets = []
indexs = []
for i in range(nbatch):
genes.append(np.array(range(ngene)))
index = np.where(batch_labels == batchs[i])[0]
dataset = features[index,:]
print (batchs[i], np.shape(dataset))
datasets.append(dataset)
indexs.append(index)
_, dataset_correct = run_scanorama_multiply_datasets(datasets, genes, scan_dim = scan_dim)
assert(len(dataset_correct)) == nbatch
for i in range(nbatch):
features[indexs[i],:] = dataset_correct[i]
return features
def my_assemble(datasets, verbose=VERBOSE, view_match=False, knn=KNN,
sigma=SIGMA, approx=APPROX, alpha=ALPHA, expr_datasets=None,
ds_names=None, batch_size=None,
geosketch=False, geosketch_max=20000, alignments=None, matches=None): # reimplement part of scanorama to return the corrected expression (instead of low-d vectors)
#this code is copy and paste from scanorama in order to output the expression. Please check their tool and cite their paper if you used this function.
if len(datasets) == 1:
return datasets
if alignments is None and matches is None:
alignments, matches = find_alignments(
datasets, knn=knn, approx=approx, alpha=alpha, verbose=verbose,
)
ds_assembled = {}
panoramas = []
ct = 0
for i, j in alignments:
ct += 1
print (ct)
sys.stdout.flush()
if verbose:
if ds_names is None:
print('Processing datasets {}'.format((i, j)))
else:
print('Processing datasets {} <=> {}'.
format(ds_names[i], ds_names[j]))
# Only consider a dataset a fixed amount of times.
if not i in ds_assembled:
ds_assembled[i] = 0
ds_assembled[i] += 1
if not j in ds_assembled:
ds_assembled[j] = 0
ds_assembled[j] += 1
if ds_assembled[i] > 3 and ds_assembled[j] > 3:
continue
# See if datasets are involved in any current panoramas.
panoramas_i = [ panoramas[p] for p in range(len(panoramas))
if i in panoramas[p] ]
assert(len(panoramas_i) <= 1)
panoramas_j = [ panoramas[p] for p in range(len(panoramas))
if j in panoramas[p] ]
assert(len(panoramas_j) <= 1)
if len(panoramas_i) == 0 and len(panoramas_j) == 0:
if datasets[i].shape[0] < datasets[j].shape[0]:
i, j = j, i
panoramas.append([ i ])
panoramas_i = [ panoramas[-1] ]
# Map dataset i to panorama j.
if len(panoramas_i) == 0:
curr_ds = datasets[i]
curr_ref = np.concatenate([ datasets[p] for p in panoramas_j[0] ])
match = []
base = 0
for p in panoramas_j[0]:
if i < p and (i, p) in matches:
match.extend([ (a, b + base) for a, b in matches[(i, p)] ])
elif i > p and (p, i) in matches:
match.extend([ (b, a + base) for a, b in matches[(p, i)] ])
base += datasets[p].shape[0]
ds_ind = [ a for a, _ in match ]
ref_ind = [ b for _, b in match ]
bias = transform(curr_ds, curr_ref, ds_ind, ref_ind, sigma=sigma,
batch_size=batch_size)
datasets[i] = curr_ds + bias
if expr_datasets:
curr_ds = expr_datasets[i]
curr_ref = vstack([ expr_datasets[p]
for p in panoramas_j[0] ])
bias = transform(curr_ds, curr_ref, ds_ind, ref_ind,
sigma=sigma, cn=True, batch_size=batch_size)
expr_datasets[i] = curr_ds + bias
panoramas_j[0].append(i)
# Map dataset j to panorama i.
elif len(panoramas_j) == 0:
curr_ds = datasets[j]
curr_ref = np.concatenate([ datasets[p] for p in panoramas_i[0] ])
match = []
base = 0
for p in panoramas_i[0]:
if j < p and (j, p) in matches:
match.extend([ (a, b + base) for a, b in matches[(j, p)] ])
elif j > p and (p, j) in matches:
match.extend([ (b, a + base) for a, b in matches[(p, j)] ])
base += datasets[p].shape[0]
ds_ind = [ a for a, _ in match ]
ref_ind = [ b for _, b in match ]
bias = transform(curr_ds, curr_ref, ds_ind, ref_ind, sigma=sigma,
batch_size=batch_size)
datasets[j] = curr_ds + bias
if expr_datasets:
curr_ds = expr_datasets[j]
curr_ref = vstack([ expr_datasets[p]
for p in panoramas_i[0] ])
bias = transform(curr_ds, curr_ref, ds_ind, ref_ind, sigma=sigma,
cn=True, batch_size=batch_size)
expr_datasets[j] = curr_ds + bias
panoramas_i[0].append(j)
# Merge two panoramas together.
else:
curr_ds = np.concatenate([ datasets[p] for p in panoramas_i[0] ])
curr_ref = np.concatenate([ datasets[p] for p in panoramas_j[0] ])
# Find base indices into each panorama.
base_i = 0
for p in panoramas_i[0]:
if p == i: break
base_i += datasets[p].shape[0]
base_j = 0
for p in panoramas_j[0]:
if p == j: break
base_j += datasets[p].shape[0]
# Find matching indices.
match = []
base = 0
for p in panoramas_i[0]:
if p == i and j < p and (j, p) in matches:
match.extend([ (b + base, a + base_j)
for a, b in matches[(j, p)] ])
elif p == i and j > p and (p, j) in matches:
match.extend([ (a + base, b + base_j)
for a, b in matches[(p, j)] ])
base += datasets[p].shape[0]
base = 0
for p in panoramas_j[0]:
if p == j and i < p and (i, p) in matches:
match.extend([ (a + base_i, b + base)
for a, b in matches[(i, p)] ])
elif p == j and i > p and (p, i) in matches:
match.extend([ (b + base_i, a + base)
for a, b in matches[(p, i)] ])
base += datasets[p].shape[0]
ds_ind = [ a for a, _ in match ]
ref_ind = [ b for _, b in match ]
# Apply transformation to entire panorama.
bias = transform(curr_ds, curr_ref, ds_ind, ref_ind, sigma=sigma,
batch_size=batch_size)
curr_ds += bias
base = 0
for p in panoramas_i[0]:
n_cells = datasets[p].shape[0]
datasets[p] = curr_ds[base:(base + n_cells), :]
base += n_cells
if not expr_datasets is None:
curr_ds = vstack([ expr_datasets[p]
for p in panoramas_i[0] ])
curr_ref = vstack([ expr_datasets[p]
for p in panoramas_j[0] ])
bias = transform(curr_ds, curr_ref, ds_ind, ref_ind,
sigma=sigma, cn=True, batch_size=batch_size)
curr_ds += bias
base = 0
for p in panoramas_i[0]:
n_cells = expr_datasets[p].shape[0]
expr_datasets[p] = curr_ds[base:(base + n_cells), :]
base += n_cells
# Merge panoramas i and j and delete one.
if panoramas_i[0] != panoramas_j[0]:
panoramas_i[0] += panoramas_j[0]
panoramas.remove(panoramas_j[0])
# Visualize.
if view_match:
plot_mapping(curr_ds, curr_ref, ds_ind, ref_ind)
return datasets, expr_datasets
|
import endpoints
from google.appengine.ext import ndb
def get_by_urlsafe(urlsafe, model):
try:
key = ndb.Key(urlsafe=urlsafe)
except TypeError:
raise endpoints.BadRequestException('Invalid Key')
except Exception, e:
if e.__class__.__name__ == 'ProtocolBufferDecodeError':
raise endpoints.BadRequestException('Invalid Key')
else:
raise
entity = key.get()
if not entity:
return None
if not isinstance(entity, model):
raise ValueError('Incorrect Kind')
return entity
def check_placement(ship_placement, ship_number):
"""Returns true if ship placed inside the board"""
ships_position_check = [0,10,20,30,40,50,60,70,80,90]
if ship_placement > 99 or ship_placement < 0:
return False
if ship_number in [2,3,4]:
if ship_placement in ships_position_check:
return False
return True
|
import glob
import os
import warnings
from datetime import datetime
from copy import deepcopy
import numpy as np
import pyedflib
import scipy.io as sio
from config import cfg
from thirdparty.cerebus import NsxFile, NevFile
from thirdparty.nex import Reader as NexReader
from .utils import find_nearest_time
def _load_neuracle(data_dir):
"""
neuracle file loader
:param data_dir: root data dir for the experiment
:return:
data: ndarray, (channels, timesteps)
ch_name: list, name of channels
timestamp: list, index of trigger
"""
f = {
'data': os.path.join(data_dir, 'data.bdf'),
'evt': os.path.join(data_dir, 'evt.bdf')
}
# read data
f_data = pyedflib.EdfReader(f['data'])
ch_names = f_data.getSignalLabels()
data = np.array([f_data.readSignal(i) for i in range(f_data.signals_in_file)])
# sample frequiencies
sfreq = f_data.getSampleFrequencies()
assert np.unique(sfreq).size == 1
if cfg.amp_info.samplerate != sfreq[0]:
warnings.warn('Samplerate in config file does not equal to data file record')
cfg.amp_info.samplerate = int(sfreq[0])
# read event
f_evt = pyedflib.EdfReader(f['evt'])
event, _, _ = f_evt.readAnnotations()
event = list(map(lambda x: int(x * cfg.amp_info.samplerate), event))
return data, ch_names, event
def _load_usbamp(data_dir):
"""
USBAmp file loader
:param data_dir: root dir
:return:
data: ndarray, (channels, timesteps)
ch_name: list, name of channels
timestamp: list, index of trigger
"""
# edf USBAmp
files = glob.glob(os.path.join(data_dir, '*.edf'))
assert len(files) == 1
f = pyedflib.EdfReader(files[0])
ch_names = f.getSignalLabels()
# filter channel
# find trigger channel
triggers = []
sig = []
for i, chan in enumerate(ch_names):
if 'trigger' in chan:
triggers.append(i)
else:
sig.append(i)
sigbuf = np.array([f.readSignal(i) for i in range(len(ch_names))])
ch_names = [ch_names[i] for i in sig]
trigger = -1
for ch_ind in triggers:
if not np.allclose(np.diff(sigbuf[ch_ind]), 0):
trigger = ch_ind
break
diff = np.diff(sigbuf[trigger])
timestamp = np.nonzero(np.logical_and(diff <= 1, diff >= 0.2))[0].tolist()
data = sigbuf[sig]
return data, ch_names, timestamp
def _load_nex(data_dir):
"""
nex file loader
:param data_dir:
:return:
data: ndarray, shape (ch, timesteps)
ch_names: list, name of each channel
timestamps: list, stimulation onset
"""
files = glob.glob(os.path.join(data_dir, '*.nex'))
assert len(files) == 1
reader = NexReader(useNumpy=True)
data = reader.ReadNexFile(files[0])
var = data['Variables']
ch_names = []
trigger_ch = None
con_data = []
samplerate = cfg.amp_info.samplerate
for i, ch in enumerate(var):
if 'CH' in ch['Header']['Name']:
ch_names.append(ch['Header']['Name'])
con_data.append(ch['ContinuousValues'])
samplerate = ch['Header']['SamplingRate']
if 'digin' == ch['Header']['Name']:
trigger_ch = i
if samplerate != cfg.amp_info.samplerate:
warnings.warn('Samplerate in config file does not equal to data file record, recorded value is %d' % samplerate)
assert trigger_ch is not None
timestamp = np.round(data['Variables'][trigger_ch]['Timestamps'] * samplerate).astype(np.int32).tolist()
con_data = np.array(con_data)
return con_data, ch_names, timestamp
def _load_cerebus(data_dir):
# search data_dir
nsx_files = glob.glob(os.path.join(data_dir, '*.ns*'))
nev_files = glob.glob(os.path.join(data_dir, '*.nev'))
assert len(nsx_files) == len(nev_files) == 1
# loading
f_data = NsxFile(nsx_files[0])
f_evt = NevFile(nev_files[0])
data = f_data.getdata()
evt = f_evt.getdata()
f_data.close()
f_evt.close()
# some basic information
samplerate = data['samp_per_s']
if cfg.amp_info.samplerate != samplerate:
warnings.warn('Samplerate in config file does not equal to data file record')
cfg.amp_info.samplerate = samplerate
timestampresolution = f_evt.basic_header['TimeStampResolution']
ch_names = []
for info in f_data.extended_headers:
ch_names.append(info['ElectrodeLabel'])
event = evt['dig_events']['TimeStamps'][0]
event = list(map(lambda x: int(x / timestampresolution * cfg.amp_info.samplerate), event))
return data['data'], ch_names, event
class Dataset:
"""
for loading data and event order.
"""
data_format = {
'nex': _load_nex,
'ns3': _load_cerebus,
'nev': _load_cerebus,
'edf': _load_usbamp,
'bdf': _load_neuracle
}
def __init__(self, subject, date=None, loaddata=True):
self.subject = subject
self._subj_path = os.path.dirname(__file__) + '/../data/' + subject
if date is None:
self._date = find_nearest_time(self._subj_path)
else:
if isinstance(date, datetime):
# convert datetime to str
self._date = date.strftime("%Y-%m-%d-%H-%M-%S")
else:
self._date = date
print(self._date)
self.root_dir = os.path.join(self._subj_path, self._date)
# self.montage = OrderedSet(cfg.subj_info.montage)
self.montage = deepcopy(cfg.subj_info.montage)
# load stim order
self.events = self.load_event()
if loaddata:
self.load_all()
else:
self.data, self.ch_names, self.timestamp, self.montage_indices, self.events_backup = [None] * 5
def load_all(self):
# load data and timestamps
dataarray, ch_names, timestamp = self._load_data()
timestamp = Dataset.ts_check(timestamp)
self.data = dataarray
# list to set
self.ch_names = ch_names
self.timestamp = timestamp
self.montage_indices = self.get_channel_indices(self.montage, self.ch_names)
self.events_backup = self.events.copy()
if cfg.exp_config.bidir:
assert 2 * len(timestamp) == self.events.size, print('Dual-directional: ', len(timestamp), self.events.size)
self.events = self.events[:, ::2]
else:
assert len(timestamp) == self.events.size, print('Unidirectional: ', len(timestamp), self.events.size)
def _load_data(self):
"""
Read data according to file format
:return:
dataext: str, data file name
"""
walk_path = self.root_dir
loader = None
for f in os.listdir(walk_path):
_ext = f.split('.')[-1]
try:
loader = Dataset.data_format[_ext]
break
except KeyError:
pass
if loader is None:
raise FileNotFoundError('No matching data format found')
return loader(walk_path)
def load_event(self):
walk_path = self.root_dir
file = glob.glob(os.path.join(walk_path, self.subject) + '*')
assert len(file) == 1
file = file[0]
if file.endswith('.mat'):
raw = sio.loadmat(file)
order = raw['stim_order']
order -= 1
return order.reshape((-1, 12))
else:
with open(file) as f:
stim_order = [[int(x) for x in line.split()] for line in f if len(line) > 1]
return np.array(stim_order)
@staticmethod
def get_channel_indices(target_channels, channels_in_data):
"""
Get corresponding index number for channels in target channels
:param target_channels: list, target channel names
:param channels_in_data: list, all channel names in data source.
:return:
"""
indices = []
# build a dictionary for indexing
channel_book = {name: i for i, name in enumerate(channels_in_data)}
for ch in target_channels:
try:
indices.append(channel_book[ch])
except ValueError as err:
print(err)
return indices
@staticmethod
def ts_check(ts):
# check time stamp intervals.
# In our experience, sometimes an accidental wrong trigger may appear at the beginning during recording.
fs = cfg.amp_info.samplerate
while len(ts) % 12 and (not (fs * 0.1 <= ts[1] - ts[0] <= fs * 0.3)):
del ts[0]
return ts
|
#!/usr/bin/python
#
# bitehist.py Block I/O size histogram.
# For Linux, uses BCC, eBPF. See .c file.
#
# USAGE: bitesize
#
# Ctrl-C will print the partially gathered histogram then exit.
#
# Copyright (c) 2016 Allan McAleavy
# Licensed under the Apache License, Version 2.0 (the "License")
#
# 05-Feb-2016 Allan McAleavy ran pep8 against file
from bcc import BPF
from time import sleep
bpf_text = """
#include <uapi/linux/ptrace.h>
#include <linux/blkdev.h>
struct proc_key_t {
char name[TASK_COMM_LEN];
u64 slot;
};
struct val_t {
char name[TASK_COMM_LEN];
};
BPF_HISTOGRAM(dist, struct proc_key_t);
BPF_HASH(commbyreq, struct request *, struct val_t);
int trace_pid_start(struct pt_regs *ctx, struct request *req)
{
struct val_t val = {};
if (bpf_get_current_comm(&val.name, sizeof(val.name)) == 0) {
commbyreq.update(&req, &val);
}
return 0;
}
int do_count(struct pt_regs *ctx, struct request *req)
{
struct val_t *valp;
valp = commbyreq.lookup(&req);
if (valp == 0) {
return 0;
}
if (req->__data_len > 0) {
struct proc_key_t key = {.slot = bpf_log2l(req->__data_len / 1024)};
bpf_probe_read(&key.name, sizeof(key.name),valp->name);
dist.increment(key);
}
return 0;
}
"""
# load BPF program
b = BPF(text=bpf_text)
b.attach_kprobe(event="blk_account_io_start", fn_name="trace_pid_start")
b.attach_kprobe(event="blk_account_io_completion", fn_name="do_count")
print("Tracing... Hit Ctrl-C to end.")
# trace until Ctrl-C
dist = b.get_table("dist")
try:
sleep(99999999)
except KeyboardInterrupt:
dist.print_log2_hist("Kbytes", "Process Name",
section_print_fn=bytes.decode)
|
import torch.nn as nn
class LSTMClassifier(nn.Module):
"""
This is the simple RNN model we will be using to perform Sentiment Analysis.
"""
def __init__(self, embedding_dim, hidden_dim, vocab_size):
"""
Initialize the model by settingg up the various layers.
"""
super(LSTMClassifier, self).__init__()
self.embedding = nn.Embedding(vocab_size, embedding_dim, padding_idx=0)
self.lstmA = nn.LSTM(embedding_dim, hidden_dim)
self.lstmB = nn.LSTM(hidden_dim, hidden_dim)
self.dense = nn.Linear(in_features=hidden_dim, out_features=1)
self.sig = nn.Sigmoid()
self.word_dict = None
def forward(self, x):
"""
Perform a forward pass of our model on some input.
"""
x = x.t()
lengths = x[0,:]
reviews = x[1:,:]
embeds = self.embedding(reviews)
lstm_out1, _ = self.lstmA(embeds)
lstm_out, _ = self.lstmB(lstm_out1)
out = self.dense(lstm_out)
out = out[lengths - 1, range(len(lengths))]
return self.sig(out.squeeze())
|
import KratosMultiphysics
import KratosMultiphysics.RomApplication as romapp
import KratosMultiphysics.StructuralMechanicsApplication
from KratosMultiphysics.RomApplication.empirical_cubature_method import EmpiricalCubatureMethod
from KratosMultiphysics.RomApplication import python_solvers_wrapper_rom as solver_wrapper
from KratosMultiphysics.StructuralMechanicsApplication.structural_mechanics_analysis import StructuralMechanicsAnalysis
import json
import numpy as np
class StructuralMechanicsAnalysisROM(StructuralMechanicsAnalysis):
def __init__(self,model,project_parameters, hyper_reduction_element_selector = None):
super().__init__(model,project_parameters)
if hyper_reduction_element_selector != None :
if hyper_reduction_element_selector == "EmpiricalCubature":
self.hyper_reduction_element_selector = EmpiricalCubatureMethod()
self.time_step_residual_matrix_container = []
else:
err_msg = "The requested element selection method \"" + hyper_reduction_element_selector + "\" is not in the rom application\n"
err_msg += "Available options are: \"EmpiricalCubature\""
raise Exception(err_msg)
else:
self.hyper_reduction_element_selector = None
#### Internal functions ####
def _CreateSolver(self):
""" Create the Solver (and create and import the ModelPart if it is not alread in the model) """
## Solver construction
with open('RomParameters.json') as rom_parameters:
rom_settings = KratosMultiphysics.Parameters(rom_parameters.read())
self.project_parameters["solver_settings"].AddValue("rom_settings", rom_settings["rom_settings"])
return solver_wrapper.CreateSolverByParameters(self.model, self.project_parameters["solver_settings"],self.project_parameters["problem_data"]["parallel_type"].GetString())
def _GetSimulationName(self):
return "::[ROM Simulation]:: "
def ModifyAfterSolverInitialize(self):
"""Here is where the ROM_BASIS is imposed to each node"""
super().ModifyAfterSolverInitialize()
computing_model_part = self._solver.GetComputingModelPart()
with open('RomParameters.json') as f:
data = json.load(f)
nodal_dofs = len(data["rom_settings"]["nodal_unknowns"])
nodal_modes = data["nodal_modes"]
counter = 0
rom_dofs= self.project_parameters["solver_settings"]["rom_settings"]["number_of_rom_dofs"].GetInt()
for node in computing_model_part.Nodes:
aux = KratosMultiphysics.Matrix(nodal_dofs, rom_dofs)
for j in range(nodal_dofs):
Counter=str(node.Id)
for i in range(rom_dofs):
aux[j,i] = nodal_modes[Counter][j][i]
node.SetValue(romapp.ROM_BASIS, aux ) # ROM basis
counter+=1
if self.hyper_reduction_element_selector != None:
if self.hyper_reduction_element_selector.Name == "EmpiricalCubature":
self.ResidualUtilityObject = romapp.RomResidualsUtility(self._GetSolver().GetComputingModelPart(), self.project_parameters["solver_settings"]["rom_settings"], self._GetSolver().get_solution_scheme())
def FinalizeSolutionStep(self):
if self.hyper_reduction_element_selector != None:
if self.hyper_reduction_element_selector.Name == "EmpiricalCubature":
print('\n\n\n\nGenerating matrix of residuals')
ResMat = self.ResidualUtilityObject.GetResiduals()
NP_ResMat = np.array(ResMat, copy=False)
self.time_step_residual_matrix_container.append(NP_ResMat)
super().FinalizeSolutionStep()
def Finalize(self):
super().Finalize()
if self.hyper_reduction_element_selector != None:
if self.hyper_reduction_element_selector.Name == "EmpiricalCubature":
OriginalNumberOfElements = self._GetSolver().GetComputingModelPart().NumberOfElements()
ModelPartName = self._GetSolver().settings["model_import_settings"]["input_filename"].GetString()
self. hyper_reduction_element_selector.SetUp(self.time_step_residual_matrix_container, OriginalNumberOfElements, ModelPartName)
self.hyper_reduction_element_selector.Run()
|
# -*- coding: utf-8 -*-
"""
JoystickButton is a button with x/y values. When the button is depressed and the
mouse dragged, the x/y values change to follow the mouse.
When the mouse button is released, the x/y values change to 0,0 (rather like
letting go of the joystick).
"""
import initExample ## Add path to library (just for examples; you do not need this)
from pyqtgraph.Qt import QtGui, QtCore
import pyqtgraph as pg
app = QtGui.QApplication([])
mw = QtGui.QMainWindow()
mw.resize(300,50)
mw.setWindowTitle('pyqtgraph example: JoystickButton')
cw = QtGui.QWidget()
mw.setCentralWidget(cw)
layout = QtGui.QGridLayout()
cw.setLayout(layout)
l1 = pg.ValueLabel(siPrefix=True, suffix='m')
l2 = pg.ValueLabel(siPrefix=True, suffix='m')
jb = pg.JoystickButton()
jb.setFixedWidth(30)
jb.setFixedHeight(30)
layout.addWidget(l1, 0, 0)
layout.addWidget(l2, 0, 1)
layout.addWidget(jb, 0, 2)
x = 0
y = 0
def update():
global x, y, l1, l2, jb
dx, dy = jb.getState()
x += dx * 1e-3
y += dy * 1e-3
l1.setValue(x)
l2.setValue(y)
timer = QtCore.QTimer()
timer.timeout.connect(update)
timer.start(30)
#show() moved to end of file to get around this bug:
# https://bugreports.qt-project.org/browse/QTBUG-39019
mw.show()
## Start Qt event loop unless running in interactive mode or using pyside.
if __name__ == '__main__':
import sys
if (sys.flags.interactive != 1) or not hasattr(QtCore, 'PYQT_VERSION'):
QtGui.QApplication.instance().exec_()
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.