text
stringlengths 2
999k
|
|---|
"""
MIT License
Copyright (c) 2020-present shay (shayypy)
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
------------------------------------------------------------------------------
This project includes code from https://github.com/Rapptz/discord.py, which is
available under the MIT license:
The MIT License (MIT)
Copyright (c) 2015-present Rapptz
Permission is hereby granted, free of charge, to any person obtaining a
copy of this software and associated documentation files (the "Software"),
to deal in the Software without restriction, including without limitation
the rights to use, copy, modify, merge, publish, distribute, sublicense,
and/or sell copies of the Software, and to permit persons to whom the
Software is furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
DEALINGS IN THE SOFTWARE.
"""
import guilded.abc
class Context(guilded.abc.Messageable):
def __init__(self, **attrs):
self.message = attrs.pop('message', None)
self._state = attrs.pop('state', self.message._state)
self.bot = attrs.pop('bot', None)
self.args = attrs.pop('args', [])
self.kwargs = attrs.pop('kwargs', {})
self.prefix = attrs.pop('prefix')
self.command = attrs.pop('command', None)
self.view = attrs.pop('view', None)
self.invoked_with = attrs.pop('invoked_with', None)
self.invoked_parents = attrs.pop('invoked_parents', [])
self.invoked_subcommand = attrs.pop('invoked_subcommand', None)
self.subcommand_passed = attrs.pop('subcommand_passed', None)
self.command_failed = attrs.pop('command_failed', False)
@property
def valid(self):
return self.prefix is not None and self.command is not None
@property
def cog(self):
if self.command is None:
return None
return self.command.cog
@property
def channel(self):
return self.message.channel
@property
def _channel_id(self):
return self.message.channel_id
@property
def team(self):
return self.message.team
@property
def guild(self):
return self.team
@property
def author(self):
return self.message.author
@property
def me(self):
return self.team.me if self.team else self.bot.user
#def reply(self, *content, **kwargs):
# return self.message.reply(*content, **kwargs)
|
#!/usr/bin/env python
# coding=utf-8
import logging
from typing import NamedTuple, List
from dataclasses import dataclass
from collections import OrderedDict as odict, defaultdict
import numpy as np
from ioos_qc.qartod import QartodFlags
L = logging.getLogger(__name__) # noqa
class CallResult(NamedTuple):
package: str
test: str
function: callable
results: np.ndarray
def __repr__(self):
return f'<CallResult package={self.package} test={self.test}>'
class ContextResult(NamedTuple):
stream_id: str
results: List[CallResult]
subset_indexes: np.ndarray
data: np.ndarray = None
tinp: np.ndarray = None
zinp: np.ndarray = None
lat: np.ndarray = None
lon: np.ndarray = None
def __repr__(self):
return f'<ContextResult stream_id={self.stream_id}>'
@dataclass
class CollectedResult:
stream_id: str
package: str
test: str
function: callable
results: np.ma.core.MaskedArray = None
data: np.ndarray = None
tinp: np.ndarray = None
zinp: np.ndarray = None
lat: np.ndarray = None
lon: np.ndarray = None
def __repr__(self):
return f'<CollectedResult stream_id={self.stream_id} package={self.package} test={self.test}>'
def function_name(self) -> str:
return self.function.__name__
@property
def hash_key(self) -> str:
return f'{self.stream_id}:{self.package}.{self.test}'
def collect_results(results, how='list'):
if how in ['list', list]:
return collect_results_list(results)
elif how in ['dict', dict]:
return collect_results_dict(results)
def collect_results_list(results):
""" Turns a list of ContextResult objects into an iterator of CollectedResult objects
by combining the subset_index information in each ContextResult together into
a single array of results.
"""
collected = odict()
# ContextResults
for r in results:
cr = None
# Shortcut for CallResult objects when someone uses QcConfig.run() directly
# and doesn't go through a Stream object
if isinstance(r, CallResult):
cr = CollectedResult(
stream_id=None,
package=r.package,
test=r.test,
function=r.function,
results=r.results,
)
collected[cr.hash_key] = cr
continue
# CallResults
for tr in r.results:
cr = CollectedResult(
stream_id=r.stream_id,
package=tr.package,
test=tr.test,
function=tr.function
)
if cr.hash_key not in collected:
# Set the initial values
cr.results = np.ma.masked_all(shape=r.subset_indexes.shape, dtype=tr.results.dtype)
cr.data = np.ma.masked_all(shape=r.subset_indexes.shape, dtype=r.data.dtype)
cr.tinp = np.ma.masked_all(shape=r.subset_indexes.shape, dtype=r.tinp.dtype)
cr.zinp = np.ma.masked_all(shape=r.subset_indexes.shape, dtype=r.zinp.dtype)
cr.lat = np.ma.masked_all(shape=r.subset_indexes.shape, dtype=r.lat.dtype)
cr.lon = np.ma.masked_all(shape=r.subset_indexes.shape, dtype=r.lon.dtype)
collected[cr.hash_key] = cr
collected[cr.hash_key].results[r.subset_indexes] = tr.results
if cr is not None:
if r.subset_indexes.all():
collected[cr.hash_key].data = r.data
collected[cr.hash_key].tinp = r.tinp
collected[cr.hash_key].zinp = r.zinp
collected[cr.hash_key].lat = r.lat
collected[cr.hash_key].lon = r.lon
else:
collected[cr.hash_key].data[r.subset_indexes] = r.data
collected[cr.hash_key].tinp[r.subset_indexes] = r.tinp
collected[cr.hash_key].zinp[r.subset_indexes] = r.zinp
collected[cr.hash_key].lat[r.subset_indexes] = r.lat
collected[cr.hash_key].lon[r.subset_indexes] = r.lon
return list(collected.values())
def collect_results_dict(results):
""" Turns a list of ContextResult objects into a dictionary of test results
by combining the subset_index information in each ContextResult together into
a single array of results. This is mostly here for historical purposes. Users
should migrate to using the Result objects directly.
"""
# Magic for nested key generation
# https://stackoverflow.com/a/27809959
collected = defaultdict(lambda: defaultdict(odict))
# ContextResults
for r in results:
# Shortcut for CallResult objects when someone uses QcConfig.run() directly
# and doesn't go through a Stream object
if isinstance(r, CallResult):
collected[r.package][r.test] = r.results
continue
flag_arr = np.ma.empty_like(r.subset_indexes, dtype='uint8')
flag_arr.fill(QartodFlags.UNKNOWN)
# iterate over the CallResults
for tr in r.results:
testpackage = tr.package
testname = tr.test
testresults = tr.results
if testname not in collected[r.stream_id][testpackage]:
collected[r.stream_id][testpackage][testname] = np.copy(flag_arr)
collected[r.stream_id][testpackage][testname][r.subset_indexes] = testresults
return collected
|
from bs4 import BeautifulSoup as bs
import requests
from urllib.request import urlopen
from urllib.parse import quote
import re
import time
def getProductInfo(productNameIndex):
headers = {"User-Agent": "Mozilla/5.0"}
color = []
size = []
price = ""
instruction = ""
sizeGuide = ""
category = ""
url = (
"https://www.ficelle.co.kr/product/"
+ quote(productNameIndex["productName"])
+ "/"
+ quote(productNameIndex["productIndex"])
+ "/category/25/display/1/"
)
response = requests.get(url, headers=headers)
if response.status_code == 200:
html = urlopen(url)
soup = bs(html, "html.parser")
# Color Crawling
c = soup.find("ul", attrs={"ec-dev-id": "product_option_id1"})
colors = c.find_all("span")
# print(colors)
for i in colors:
productColor = i.text
# print("productColor : ", productColor)
color.append(productColor)
# c = soup.find_all("ul", attrs={"class": "ec-product-button ec-product-preview"})
# if not c:
# print(soup)
# c = soup.find_all("select", attrs={"id": "product_option_id1"})
# if c:
# colors = c[0].find_all("option")
# for i in range(2, len(colors)):
# productColor = colors[i].text
# print(productColor)
# color.append(productColor)
# else:
# colors = c[0].find_all("li")
# for i in colors:
# productColor = i.find("span").text
# print(productColor)
# color.append(productColor)
# Size Crawling
sizes = soup.find_all("li", attrs={"class": "ec-product-disabled"})
if not sizes:
sizes = soup.find_all("select", attrs={"id": "product_option_id2"})
if sizes:
s = sizes[0].find_all("option")
for i in range(2, len(s)):
productSize = s[i].text
# print(productSize)
size.append(productSize)
else:
size.append("Free")
else:
for i in sizes:
productSize = i.find("span").text
# print(productSize)
size.append(productSize)
# Product Name Crawling
# productName = soup.find(
# "span", attrs={"style": "font-size:16px;color:#555555;"}
# ).text
# category
# productName์ผ๋ก ๋ถ๋ฅํ ๊ฒ!
try:
productNameSplitList = productNameIndex["productName"].split(" ")
# print(productNameSplitList)
productNameSplitList.sort()
# print(productNameSplitList)
pants = ["Pants", "Slacks"]
knit_sweater = ["Knit", "Sweater"]
blouse_shirt = ["Blouse", "Shirt", "Shirts"]
skirt = ["Skirt"]
onepiece = ["Onepiece", "Dress"]
jacket = ["Jacket"]
jumper = ["Jumper"]
jumpsuit = ["Jumpsuit"]
jeans = ["Denim", "Jeans"]
cardigan = ["Cardigan"]
coat = ["Coat"]
sports_wear = ["Jogger"]
t_shirt = ["T", "Sweat shirt", "Top", "Sleeveless", "MTM"]
codie_set = ["Set", "&"]
bag = ["Bag"]
sandal = ["Sandal"]
slipper = ["slipper", "Flip"]
middle_boots = ["Middle"]
long_boots = ["Long"]
bloafaer = ["Bloafer"]
flat = ["Flat"]
for productNameValue in productNameSplitList:
if productNameValue in codie_set:
category = "ํจ์
์๋ฅ ์ฌ์ฑ์๋ฅ ์ฝ๋์ธํธ"
break
else:
if productNameValue in pants:
category = "ํจ์
์๋ฅ ์ฌ์ฑ์๋ฅ ๋ฐ์ง"
break
elif productNameValue in blouse_shirt:
category = "ํจ์
์๋ฅ ์ฌ์ฑ์๋ฅ ๋ธ๋ผ์ฐ์ค/์
์ธ "
break
elif productNameValue in skirt:
category = "ํจ์
์๋ฅ ์ฌ์ฑ์๋ฅ ์ค์ปคํธ"
break
elif productNameValue in onepiece:
category = "ํจ์
์๋ฅ ์ฌ์ฑ์๋ฅ ์ํผ์ค"
break
elif productNameValue in jacket:
category = "ํจ์
์๋ฅ ์ฌ์ฑ์๋ฅ ์ฌํท"
break
elif productNameValue in jumper:
category = "ํจ์
์๋ฅ ์ฌ์ฑ์๋ฅ ์ ํผ"
break
elif productNameValue in jeans:
category = "ํจ์
์๋ฅ ์ฌ์ฑ์๋ฅ ์ฒญ๋ฐ์ง"
break
elif productNameValue in cardigan:
category = "ํจ์
์๋ฅ ์ฌ์ฑ์๋ฅ ์นด๋๊ฑด"
break
elif productNameValue in coat:
category = "ํจ์
์๋ฅ ์ฌ์ฑ์๋ฅ ์ฝํธ"
break
elif productNameValue in sports_wear:
category = "ํจ์
์๋ฅ ์ฌ์ฑ์๋ฅ ํธ๋ ์ด๋๋ณต"
break
elif productNameValue in knit_sweater:
category = "ํจ์
์๋ฅ ์ฌ์ฑ์๋ฅ ๋ํธ/์ค์จํฐ"
break
elif productNameValue in jumpsuit:
category = "ํจ์
์๋ฅ ์ฌ์ฑ์๋ฅ ์ ํ์ํธ"
break
elif productNameValue in t_shirt:
category = "ํจ์
์๋ฅ ์ฌ์ฑ์๋ฅ ํฐ์
์ธ "
break
elif productNameValue in bag:
category = "ํจ์
์กํ ์ฌ์ฑ๊ฐ๋ฐฉ ์๋๋ฐฑ"
break
elif productNameValue in sandal:
category = "ํจ์
์กํ ์ฌ์ฑ์ ๋ฐ ์๋ค ์คํธ๋ฉ์๋ค"
break
elif productNameValue in slipper:
category = "ํจ์
์กํ ์ฌ์ฑ์ ๋ฐ ์ฌ๋ฆฌํผ"
break
elif productNameValue in middle_boots:
category = "ํจ์
์กํ ์ฌ์ฑ์ ๋ฐ ๋ถ์ธ ๋ฏธ๋ค๋ถ์ธ "
break
elif productNameValue in long_boots:
category = "ํจ์
์กํ ์ฌ์ฑ์ ๋ฐ ๋ถ์ธ ๋กฑ๋ถ์ธ "
break
elif productNameValue in bloafaer:
category = "ํจ์
์กํ ์ฌ์ฑ์ ๋ฐ ์๋ค ๋ฎฌ/๋ธ๋กํผ"
break
elif productNameValue in flat:
category = "ํจ์
์กํ ์ฌ์ฑ์ ๋ฐ ๋จํ ํ๋ซ"
break
except:
print("Non-Existent Categories")
# Instruction and Size Guide Crawling
price = soup.find("strong", attrs={"id": "span_product_price_text"}).text
# price string process
price = re.sub(",|์", "", price)
price = int(price) + 500
instruction = soup.find("div", attrs={"id": "view1"}).find("p").text
sizeGuide = soup.find("div", attrs={"id": "view2"}).find("p").text
time.sleep(3)
return {
"productName": productNameIndex["productName"],
"price": price,
"colors": color,
"sizes": size,
"instruction": instruction,
"sizeGuide": sizeGuide,
"category": category,
}
else:
print(response.status_code)
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
import git
import os
import re
import sys
import toml
from pathlib import Path
from alchemist_py.brokergen import createProject
from alchemist_py.deviceinfo import searchDevice
from alchemist_py.plugin_manager import PluginManager
class Manager(object):
def __init__(self):
config = toml.load(open("Alchemist.toml"))
self.board = config["board"]
self.nodes = config["nodes"]
self.topics = config["topics"]
self.fpga, self.clock = searchDevice(self.board)
self.topic_table = {}
for topic in self.topics:
self.topic_table[topic["name"]] =\
"struct {name} {{\n {message}}};".format(
name=topic["name"], message=topic["message"]
)
self.p_manager = PluginManager()
self.ports = []
for ps in list(map(lambda x:x["ports"], self.nodes)):
self.ports.extend(ps)
def updateNode(self, node):
path_to_project = Path("nodes")/node["name"]
# make mini alchemist data for the node
mini_alchemist = {
"device": {
"board": self.board,
"fpga": self.fpga,
"clock": self.clock
},
"node": node,
"topics": []
}
for port in node["ports"]:
for topic in self.topics:
if port["attribute"] in ["wire"]:
break
elif port["attribute"] in ["publisher", "subscriber"] and port["topic"] == topic["name"]:
mini_alchemist["topics"].append(topic)
break
else:
print("Unknown topic:", port["topic"], file=sys.stderr)
print("node:", node["name"])
exit(1)
# write mini alchemist to TOML
os.makedirs(path_to_project)
toml.dump(mini_alchemist, open(path_to_project/".Alchemist.toml", "w"))
# update project
plugin = self.p_manager.loadPlugin(node["plugin"])
plugin.createProject(node["name"])
def updateNodes(self):
# update projects for nodes
for node in self.nodes:
path_to_project = Path("nodes")/node["name"]
# if no project for a node, make a directory and Alchemist.toml
if not os.path.exists(path_to_project):
if "repo" in node.keys():
git.Repo.clone_from(node["repo"], "nodes")
else:
self.updateNode(node)
# if Alchemist.toml was updated, update mini Alchemist.toml
t_alchemist = os.path.getatime("Alchemist.toml")
t_mini_alchemist = os.path.getatime(path_to_project/".Alchemist.toml")
if t_alchemist > t_mini_alchemist:
if "repo" in node.keys():
git.Repo.clone_from(node["repo"], "nodes")
else:
self.updateNode(node)
def updateTopic(self, topic:dict):
path_to_project = Path("brokers") / ("broker"+topic["name"])
if not os.path.exists(path_to_project):
byte = 0
for m in re.finditer(r"(?P<type>((unsigned\s+){0,1}(char|short|int|long)|(float|double)|(ap_(u){0,1}int\s*\<\s*[1-9]{1,4}\s*>)))\s+(?P<var>([a-zA-Z_][a-zA-Z0-9_]*(\s*\[\s*([0-9]|[1-9][0-9]*)\s*\]){0,1}))\s*;", topic["message"]):
byte += self.getByte(m.group("type"), m.group("var"))
mini_alchemist = {
"device": {
"board": self.board,
"fpga": self.fpga,
"clock": self.clock
},
"topic": topic,
}
mini_alchemist["topic"]["pub"] = len(list(filter(
lambda x: x["attribute"] == "publisher" and x["topic"] == topic["name"],
self.ports
)))
mini_alchemist["topic"]["sub"] = len(list(filter(
lambda x: x["attribute"] == "subscriber" and x["topic"] == topic["name"],
self.ports
)))
mini_alchemist["topic"]["width"] = 64
mini_alchemist["topic"]["count"] = int(byte / 8)
os.makedirs(path_to_project)
toml.dump(mini_alchemist, open(path_to_project / ".Alchemist.toml", "w"))
createProject(topic["name"])
def updateTopics(self):
for topic in self.topics:
self.updateTopic(topic)
def getByte(self, vType:str, var:str):
width_of_type = 0
if vType == "char":
width_of_type = 1
elif vType == "short":
width_of_type = 2
elif vType == "int":
width_of_type = 4
elif vType == "long":
width_of_type = 8
elif vType.split()[0] == "unsigned":
if vType.split()[1] == "char":
width_of_type = 1
elif vType.split()[1] == "short":
width_of_type = 2
elif vType.split()[1] == "int":
width_of_type = 4
elif vType.split()[1] == "long":
width_of_type = 8
else:
print("Unknown type!")
exit(1)
else:
print("Unknown type!")
exit(1)
length_of_var = 1
m = re.match(
r"[a-zA-Z_][a-zA-Z0-9_]*\s*\[\s*(?P<length>[1-9][0-9]*)\s*\]",
var
)
if m:
length_of_var = int(m.group("length"))
return width_of_type * length_of_var
|
import pymongo
from bson import ObjectId
from src.services import config
collection = config.db.incomes
def search_by_user_email(user_email, itype):
return collection.find({"user_email": user_email, "itype": itype})
def sum_amounts_by_user(user_email, itype):
pipeline = [{"$match": {"user_email": user_email, "itype": itype}}, {"$group": {"_id": "null", "total": {"$sum": "$amount"}}}]
return collection.aggregate(pipeline)
def save(income):
collection.insert_one(income.__dict__)
def save_all(incomes):
collection.insert_many(incomes)
def update(income_id, income):
collection.find_one_and_update(
{"_id": ObjectId(income_id)},
{"$set": income.__dict__},
upsert=True)
def delete(income_id):
collection.delete_one({"_id": ObjectId(income_id)})
def filter(user_email, category, date, account, itype):
pipeline = [{
"$match": {
"user_email": user_email,
"category": category,
"date": date,
"account": account,
"itype": itype
}},
{"$sort": {"date": pymongo.DESCENDING}}
]
return collection.aggregate(pipeline)
|
def SC_DFA(y):
N = len(y)
tau = int(np.floor(N/2))
y = y - np.mean(y)
x = np.cumsum(y)
taus = np.arange(5,tau+1)
ntau = len(taus)
F = np.zeros(ntau)
for i in range(ntau):
t = int(taus[i])
x_buff = x[:N - N % t]
x_buff = x_buff.reshape((int(N / t),t))
y_buff = np.zeros((int(N / t),t))
for j in range(int(N / t)):
tt = range(0,int(t))
p = np.polyfit(tt,x_buff[j,:],1)
y_buff[j,:] = np.power(x_buff[j,:] - np.polyval(p,tt),2)
y_buff.reshape((N - N % t,1))
F[i] = np.sqrt(np.mean(y_buff))
logtaur = np.log(taus)
logF = np.log(F)
p = np.polyfit(logtaur,logF,1)
return p[0]
|
import subprocess
from django.conf import settings
from django.core.management.base import BaseCommand
class Command(BaseCommand):
help = "Automatically fixes formatting issues and reports any other linting errors"
def handle(self, *args, **options):
subprocess.run(["isort", "--apply", "--quiet"], cwd=settings.ROOT_DIR)
subprocess.run(["black", "--quiet", "."], cwd=settings.ROOT_DIR)
subprocess.run(["flake8"], cwd=settings.ROOT_DIR)
|
"""
WSGI config for template_extends project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/1.11/howto/deployment/wsgi/
"""
import os
from django.core.wsgi import get_wsgi_application
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "template_extends.settings")
application = get_wsgi_application()
|
#
# Copyright 2015 Quantopian, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Tests for the zipline.assets package
"""
import sys
from unittest import TestCase
from datetime import datetime, timedelta
import pickle
import uuid
import warnings
import pandas as pd
from pandas.tseries.tools import normalize_date
from pandas.util.testing import assert_frame_equal
from nose_parameterized import parameterized
from numpy import full
from zipline.assets import Asset, Equity, Future, AssetFinder
from zipline.assets.futures import FutureChain
from zipline.errors import (
SymbolNotFound,
MultipleSymbolsFound,
SidAssignmentError,
RootSymbolNotFound,
)
from zipline.finance.trading import with_environment
from zipline.utils.test_utils import (
all_subindices,
make_rotating_asset_info,
)
def build_lookup_generic_cases():
"""
Generate test cases for AssetFinder test_lookup_generic.
"""
unique_start = pd.Timestamp('2013-01-01', tz='UTC')
unique_end = pd.Timestamp('2014-01-01', tz='UTC')
dupe_0_start = pd.Timestamp('2013-01-01', tz='UTC')
dupe_0_end = dupe_0_start + timedelta(days=1)
dupe_1_start = pd.Timestamp('2013-01-03', tz='UTC')
dupe_1_end = dupe_1_start + timedelta(days=1)
frame = pd.DataFrame.from_records(
[
{
'sid': 0,
'file_name': 'duplicated',
'company_name': 'duplicated_0',
'start_date_nano': dupe_0_start.value,
'end_date_nano': dupe_0_end.value,
'exchange': '',
},
{
'sid': 1,
'file_name': 'duplicated',
'company_name': 'duplicated_1',
'start_date_nano': dupe_1_start.value,
'end_date_nano': dupe_1_end.value,
'exchange': '',
},
{
'sid': 2,
'file_name': 'unique',
'company_name': 'unique',
'start_date_nano': unique_start.value,
'end_date_nano': unique_end.value,
'exchange': '',
},
],
)
finder = AssetFinder(metadata=frame)
dupe_0, dupe_1, unique = assets = [
finder.retrieve_asset(i)
for i in range(3)
]
dupe_0_start = dupe_0.start_date
dupe_1_start = dupe_1.start_date
cases = [
##
# Scalars
# Asset object
(finder, assets[0], None, assets[0]),
(finder, assets[1], None, assets[1]),
(finder, assets[2], None, assets[2]),
# int
(finder, 0, None, assets[0]),
(finder, 1, None, assets[1]),
(finder, 2, None, assets[2]),
# Duplicated symbol with resolution date
(finder, 'duplicated', dupe_0_start, dupe_0),
(finder, 'duplicated', dupe_1_start, dupe_1),
# Unique symbol, with or without resolution date.
(finder, 'unique', unique_start, unique),
(finder, 'unique', None, unique),
##
# Iterables
# Iterables of Asset objects.
(finder, assets, None, assets),
(finder, iter(assets), None, assets),
# Iterables of ints
(finder, (0, 1), None, assets[:-1]),
(finder, iter((0, 1)), None, assets[:-1]),
# Iterables of symbols.
(finder, ('duplicated', 'unique'), dupe_0_start, [dupe_0, unique]),
(finder, ('duplicated', 'unique'), dupe_1_start, [dupe_1, unique]),
# Mixed types
(finder,
('duplicated', 2, 'unique', 1, dupe_1),
dupe_0_start,
[dupe_0, assets[2], unique, assets[1], dupe_1]),
]
return cases
class AssetTestCase(TestCase):
def test_asset_object(self):
self.assertEquals({5061: 'foo'}[Asset(5061)], 'foo')
self.assertEquals(Asset(5061), 5061)
self.assertEquals(5061, Asset(5061))
self.assertEquals(Asset(5061), Asset(5061))
self.assertEquals(int(Asset(5061)), 5061)
self.assertEquals(str(Asset(5061)), 'Asset(5061)')
def test_asset_is_pickleable(self):
# Very wow
s = Asset(
1337,
symbol="DOGE",
asset_name="DOGECOIN",
start_date=pd.Timestamp('2013-12-08 9:31AM', tz='UTC'),
end_date=pd.Timestamp('2014-06-25 11:21AM', tz='UTC'),
first_traded=pd.Timestamp('2013-12-08 9:31AM', tz='UTC'),
exchange='THE MOON',
)
s_unpickled = pickle.loads(pickle.dumps(s))
attrs_to_check = ['end_date',
'exchange',
'first_traded',
'end_date',
'asset_name',
'start_date',
'sid',
'start_date',
'symbol']
for attr in attrs_to_check:
self.assertEqual(getattr(s, attr), getattr(s_unpickled, attr))
def test_asset_comparisons(self):
s_23 = Asset(23)
s_24 = Asset(24)
self.assertEqual(s_23, s_23)
self.assertEqual(s_23, 23)
self.assertEqual(23, s_23)
self.assertNotEqual(s_23, s_24)
self.assertNotEqual(s_23, 24)
self.assertNotEqual(s_23, "23")
self.assertNotEqual(s_23, 23.5)
self.assertNotEqual(s_23, [])
self.assertNotEqual(s_23, None)
self.assertLess(s_23, s_24)
self.assertLess(s_23, 24)
self.assertGreater(24, s_23)
self.assertGreater(s_24, s_23)
def test_lt(self):
self.assertTrue(Asset(3) < Asset(4))
self.assertFalse(Asset(4) < Asset(4))
self.assertFalse(Asset(5) < Asset(4))
def test_le(self):
self.assertTrue(Asset(3) <= Asset(4))
self.assertTrue(Asset(4) <= Asset(4))
self.assertFalse(Asset(5) <= Asset(4))
def test_eq(self):
self.assertFalse(Asset(3) == Asset(4))
self.assertTrue(Asset(4) == Asset(4))
self.assertFalse(Asset(5) == Asset(4))
def test_ge(self):
self.assertFalse(Asset(3) >= Asset(4))
self.assertTrue(Asset(4) >= Asset(4))
self.assertTrue(Asset(5) >= Asset(4))
def test_gt(self):
self.assertFalse(Asset(3) > Asset(4))
self.assertFalse(Asset(4) > Asset(4))
self.assertTrue(Asset(5) > Asset(4))
def test_type_mismatch(self):
if sys.version_info.major < 3:
self.assertIsNotNone(Asset(3) < 'a')
self.assertIsNotNone('a' < Asset(3))
else:
with self.assertRaises(TypeError):
Asset(3) < 'a'
with self.assertRaises(TypeError):
'a' < Asset(3)
class TestFuture(TestCase):
future = Future(
2468,
symbol='OMH15',
root_symbol='OM',
notice_date=pd.Timestamp('2014-01-20', tz='UTC'),
expiration_date=pd.Timestamp('2014-02-20', tz='UTC'),
contract_multiplier=500
)
def test_str(self):
strd = self.future.__str__()
self.assertEqual("Future(2468 [OMH15])", strd)
def test_repr(self):
reprd = self.future.__repr__()
self.assertTrue("Future" in reprd)
self.assertTrue("2468" in reprd)
self.assertTrue("OMH15" in reprd)
self.assertTrue("root_symbol='OM'" in reprd)
self.assertTrue(("notice_date=Timestamp('2014-01-20 00:00:00+0000', "
"tz='UTC')") in reprd)
self.assertTrue("expiration_date=Timestamp('2014-02-20 00:00:00+0000'"
in reprd)
self.assertTrue("contract_multiplier=500" in reprd)
def test_reduce(self):
reduced = self.future.__reduce__()
self.assertEqual(Future, reduced[0])
def test_to_and_from_dict(self):
dictd = self.future.to_dict()
self.assertTrue('root_symbol' in dictd)
self.assertTrue('notice_date' in dictd)
self.assertTrue('expiration_date' in dictd)
self.assertTrue('contract_multiplier' in dictd)
from_dict = Future.from_dict(dictd)
self.assertTrue(isinstance(from_dict, Future))
self.assertEqual(self.future, from_dict)
def test_root_symbol(self):
self.assertEqual('OM', self.future.root_symbol)
class AssetFinderTestCase(TestCase):
def test_lookup_symbol_fuzzy(self):
as_of = pd.Timestamp('2013-01-01', tz='UTC')
frame = pd.DataFrame.from_records(
[
{
'sid': i,
'file_name': 'TEST@%d' % i,
'company_name': "company%d" % i,
'start_date_nano': as_of.value,
'end_date_nano': as_of.value,
'exchange': uuid.uuid4().hex,
}
for i in range(3)
]
)
finder = AssetFinder(frame, fuzzy_char='@')
asset_0, asset_1, asset_2 = (
finder.retrieve_asset(i) for i in range(3)
)
for i in range(2): # we do it twice to test for caching bugs
self.assertIsNone(finder.lookup_symbol('test', as_of))
self.assertEqual(
asset_1,
finder.lookup_symbol('test@1', as_of)
)
# Adding an unnecessary fuzzy shouldn't matter.
self.assertEqual(
asset_1,
finder.lookup_symbol('test@1', as_of, fuzzy=True)
)
# Shouldn't find this with no fuzzy_str passed.
self.assertIsNone(finder.lookup_symbol('test1', as_of))
# Should find exact match.
self.assertEqual(
asset_1,
finder.lookup_symbol('test1', as_of, fuzzy=True),
)
def test_lookup_symbol_resolve_multiple(self):
# Incrementing by two so that start and end dates for each
# generated Asset don't overlap (each Asset's end_date is the
# day after its start date.)
dates = pd.date_range('2013-01-01', freq='2D', periods=5, tz='UTC')
df = pd.DataFrame.from_records(
[
{
'sid': i,
'file_name': 'existing',
'company_name': 'existing',
'start_date_nano': date.value,
'end_date_nano': (date + timedelta(days=1)).value,
'exchange': 'NYSE',
}
for i, date in enumerate(dates)
]
)
finder = AssetFinder(df)
for _ in range(2): # Run checks twice to test for caching bugs.
with self.assertRaises(SymbolNotFound):
finder.lookup_symbol_resolve_multiple('non_existing', dates[0])
with self.assertRaises(MultipleSymbolsFound):
finder.lookup_symbol_resolve_multiple('existing', None)
for i, date in enumerate(dates):
# Verify that we correctly resolve multiple symbols using
# the supplied date
result = finder.lookup_symbol_resolve_multiple(
'existing',
date,
)
self.assertEqual(result.symbol, 'existing')
self.assertEqual(result.sid, i)
@parameterized.expand(
build_lookup_generic_cases()
)
def test_lookup_generic(self, finder, symbols, reference_date, expected):
"""
Ensure that lookup_generic works with various permutations of inputs.
"""
results, missing = finder.lookup_generic(symbols, reference_date)
self.assertEqual(results, expected)
self.assertEqual(missing, [])
def test_lookup_generic_handle_missing(self):
data = pd.DataFrame.from_records(
[
# Sids that will be found when we do lookups.
{
'sid': 0,
'file_name': 'real',
'company_name': 'real',
'start_date_nano': pd.Timestamp('2013-1-1', tz='UTC'),
'end_date_nano': pd.Timestamp('2014-1-1', tz='UTC'),
'exchange': '',
},
{
'sid': 1,
'file_name': 'also_real',
'company_name': 'also_real',
'start_date_nano': pd.Timestamp('2013-1-1', tz='UTC'),
'end_date_nano': pd.Timestamp('2014-1-1', tz='UTC'),
'exchange': '',
},
# Sid whose end date is before our query date. We should
# still correctly find it.
{
'sid': 2,
'file_name': 'real_but_old',
'company_name': 'real_but_old',
'start_date_nano': pd.Timestamp('2002-1-1', tz='UTC'),
'end_date_nano': pd.Timestamp('2003-1-1', tz='UTC'),
'exchange': '',
},
# Sid whose end date is before our query date. We should
# still correctly find it.
{
'sid': 3,
'file_name': 'real_but_in_the_future',
'company_name': 'real_but_in_the_future',
'start_date_nano': pd.Timestamp('2014-1-1', tz='UTC'),
'end_date_nano': pd.Timestamp('2020-1-1', tz='UTC'),
'exchange': 'THE FUTURE',
},
]
)
finder = AssetFinder(data)
results, missing = finder.lookup_generic(
['real', 1, 'fake', 'real_but_old', 'real_but_in_the_future'],
pd.Timestamp('2013-02-01', tz='UTC'),
)
self.assertEqual(len(results), 3)
self.assertEqual(results[0].symbol, 'real')
self.assertEqual(results[0].sid, 0)
self.assertEqual(results[1].symbol, 'also_real')
self.assertEqual(results[1].sid, 1)
self.assertEqual(len(missing), 2)
self.assertEqual(missing[0], 'fake')
self.assertEqual(missing[1], 'real_but_in_the_future')
def test_insert_metadata(self):
finder = AssetFinder()
finder.insert_metadata(0,
asset_type='equity',
start_date='2014-01-01',
end_date='2015-01-01',
symbol="PLAY",
foo_data="FOO",)
# Test proper insertion
equity = finder.retrieve_asset(0)
self.assertIsInstance(equity, Equity)
self.assertEqual('PLAY', equity.symbol)
self.assertEqual(pd.Timestamp('2015-01-01', tz='UTC'),
equity.end_date)
# Test invalid field
self.assertFalse('foo_data' in finder.metadata_cache[0])
def test_consume_metadata(self):
# Test dict consumption
finder = AssetFinder()
dict_to_consume = {0: {'symbol': 'PLAY'},
1: {'symbol': 'MSFT'}}
finder.consume_metadata(dict_to_consume)
equity = finder.retrieve_asset(0)
self.assertIsInstance(equity, Equity)
self.assertEqual('PLAY', equity.symbol)
finder = AssetFinder()
# Test dataframe consumption
df = pd.DataFrame(columns=['asset_name', 'exchange'], index=[0, 1])
df['asset_name'][0] = "Dave'N'Busters"
df['exchange'][0] = "NASDAQ"
df['asset_name'][1] = "Microsoft"
df['exchange'][1] = "NYSE"
finder.consume_metadata(df)
self.assertEqual('NASDAQ', finder.metadata_cache[0]['exchange'])
self.assertEqual('Microsoft', finder.metadata_cache[1]['asset_name'])
def test_consume_asset_as_identifier(self):
# Build some end dates
eq_end = pd.Timestamp('2012-01-01', tz='UTC')
fut_end = pd.Timestamp('2008-01-01', tz='UTC')
# Build some simple Assets
equity_asset = Equity(1, symbol="TESTEQ", end_date=eq_end)
future_asset = Future(200, symbol="TESTFUT", end_date=fut_end)
# Consume the Assets
finder = AssetFinder()
finder.consume_identifiers([equity_asset, future_asset])
# Test equality with newly built Assets
self.assertEqual(equity_asset, finder.retrieve_asset(1))
self.assertEqual(future_asset, finder.retrieve_asset(200))
self.assertEqual(eq_end, finder.retrieve_asset(1).end_date)
self.assertEqual(fut_end, finder.retrieve_asset(200).end_date)
def test_sid_assignment(self):
# This metadata does not contain SIDs
metadata = {'PLAY': {'symbol': 'PLAY'},
'MSFT': {'symbol': 'MSFT'}}
today = normalize_date(pd.Timestamp('2015-07-09', tz='UTC'))
# Build a finder that is allowed to assign sids
finder = AssetFinder(metadata=metadata,
allow_sid_assignment=True)
# Verify that Assets were built and different sids were assigned
play = finder.lookup_symbol('PLAY', today)
msft = finder.lookup_symbol('MSFT', today)
self.assertEqual('PLAY', play.symbol)
self.assertIsNotNone(play.sid)
self.assertNotEqual(play.sid, msft.sid)
def test_sid_assignment_failure(self):
# This metadata does not contain SIDs
metadata = {'PLAY': {'symbol': 'PLAY'},
'MSFT': {'symbol': 'MSFT'}}
# Build a finder that is not allowed to assign sids, asserting failure
with self.assertRaises(SidAssignmentError):
AssetFinder(metadata=metadata, allow_sid_assignment=False)
def test_security_dates_warning(self):
# Build an asset with an end_date
eq_end = pd.Timestamp('2012-01-01', tz='UTC')
equity_asset = Equity(1, symbol="TESTEQ", end_date=eq_end)
# Catch all warnings
with warnings.catch_warnings(record=True) as w:
# Cause all warnings to always be triggered
warnings.simplefilter("always")
equity_asset.security_start_date
equity_asset.security_end_date
equity_asset.security_name
# Verify the warning
self.assertEqual(3, len(w))
for warning in w:
self.assertTrue(issubclass(warning.category,
DeprecationWarning))
def test_lookup_future_chain(self):
metadata = {
# Notice day is today, so not valid
2: {
'symbol': 'ADN15',
'root_symbol': 'AD',
'asset_type': 'future',
'notice_date': pd.Timestamp('2015-05-14', tz='UTC'),
'start_date': pd.Timestamp('2015-01-01', tz='UTC')
},
1: {
'symbol': 'ADV15',
'root_symbol': 'AD',
'asset_type': 'future',
'notice_date': pd.Timestamp('2015-08-14', tz='UTC'),
'start_date': pd.Timestamp('2015-01-01', tz='UTC')
},
# Starts trading today, so should be valid.
0: {
'symbol': 'ADF16',
'root_symbol': 'AD',
'asset_type': 'future',
'notice_date': pd.Timestamp('2015-11-16', tz='UTC'),
'start_date': pd.Timestamp('2015-05-14', tz='UTC')
},
# Copy of the above future, but starts trading in August,
# so it isn't valid.
3: {
'symbol': 'ADF16',
'root_symbol': 'AD',
'asset_type': 'future',
'notice_date': pd.Timestamp('2015-11-16', tz='UTC'),
'start_date': pd.Timestamp('2015-08-01', tz='UTC')
},
}
finder = AssetFinder(metadata=metadata)
dt = pd.Timestamp('2015-05-14', tz='UTC')
last_year = pd.Timestamp('2014-01-01', tz='UTC')
first_day = pd.Timestamp('2015-01-01', tz='UTC')
# Check that we get the expected number of contracts, in the
# right order
ad_contracts = finder.lookup_future_chain('AD', dt, dt)
self.assertEqual(len(ad_contracts), 2)
self.assertEqual(ad_contracts[0].sid, 1)
self.assertEqual(ad_contracts[1].sid, 0)
# Check that we get nothing if our knowledge date is last year
ad_contracts = finder.lookup_future_chain('AD', dt, last_year)
self.assertEqual(len(ad_contracts), 0)
# Check that we get things that start on the knowledge date
ad_contracts = finder.lookup_future_chain('AD', dt, first_day)
self.assertEqual(len(ad_contracts), 1)
def test_map_identifier_index_to_sids(self):
# Build an empty finder and some Assets
dt = pd.Timestamp('2014-01-01', tz='UTC')
finder = AssetFinder()
asset1 = Equity(1, symbol="AAPL")
asset2 = Equity(2, symbol="GOOG")
asset200 = Future(200, symbol="CLK15")
asset201 = Future(201, symbol="CLM15")
# Check for correct mapping and types
pre_map = [asset1, asset2, asset200, asset201]
post_map = finder.map_identifier_index_to_sids(pre_map, dt)
self.assertListEqual([1, 2, 200, 201], post_map)
for sid in post_map:
self.assertIsInstance(sid, int)
# Change order and check mapping again
pre_map = [asset201, asset2, asset200, asset1]
post_map = finder.map_identifier_index_to_sids(pre_map, dt)
self.assertListEqual([201, 2, 200, 1], post_map)
@with_environment()
def test_compute_lifetimes(self, env=None):
num_assets = 4
trading_day = env.trading_day
first_start = pd.Timestamp('2015-04-01', tz='UTC')
frame = make_rotating_asset_info(
num_assets=num_assets,
first_start=first_start,
frequency=env.trading_day,
periods_between_starts=3,
asset_lifetime=5
)
finder = AssetFinder(frame)
all_dates = pd.date_range(
start=first_start,
end=frame.end_date.max(),
freq=trading_day,
)
for dates in all_subindices(all_dates):
expected_mask = full(
shape=(len(dates), num_assets),
fill_value=False,
dtype=bool,
)
for i, date in enumerate(dates):
it = frame[['start_date', 'end_date']].itertuples()
for j, start, end in it:
if start <= date <= end:
expected_mask[i, j] = True
# Filter out columns with all-empty columns.
expected_result = pd.DataFrame(
data=expected_mask,
index=dates,
columns=frame.sid.values,
)
actual_result = finder.lifetimes(dates)
assert_frame_equal(actual_result, expected_result)
class TestFutureChain(TestCase):
metadata = {
0: {
'symbol': 'CLG06',
'root_symbol': 'CL',
'asset_type': 'future',
'start_date': pd.Timestamp('2005-12-01', tz='UTC'),
'notice_date': pd.Timestamp('2005-12-20', tz='UTC'),
'expiration_date': pd.Timestamp('2006-01-20', tz='UTC')},
1: {
'root_symbol': 'CL',
'symbol': 'CLK06',
'asset_type': 'future',
'start_date': pd.Timestamp('2005-12-01', tz='UTC'),
'notice_date': pd.Timestamp('2006-03-20', tz='UTC'),
'expiration_date': pd.Timestamp('2006-04-20', tz='UTC')},
2: {
'symbol': 'CLQ06',
'root_symbol': 'CL',
'asset_type': 'future',
'start_date': pd.Timestamp('2005-12-01', tz='UTC'),
'notice_date': pd.Timestamp('2006-06-20', tz='UTC'),
'expiration_date': pd.Timestamp('2006-07-20', tz='UTC')},
3: {
'symbol': 'CLX06',
'root_symbol': 'CL',
'asset_type': 'future',
'start_date': pd.Timestamp('2006-02-01', tz='UTC'),
'notice_date': pd.Timestamp('2006-09-20', tz='UTC'),
'expiration_date': pd.Timestamp('2006-10-20', tz='UTC')}
}
asset_finder = AssetFinder(metadata=metadata)
def test_len(self):
""" Test the __len__ method of FutureChain.
"""
# None of the contracts have started yet.
cl = FutureChain(self.asset_finder, lambda: '2005-11-30', 'CL')
self.assertEqual(len(cl), 0)
# Sids 0, 1, & 2 have started, 3 has not yet started.
cl = FutureChain(self.asset_finder, lambda: '2005-12-01', 'CL')
self.assertEqual(len(cl), 3)
# Sid 0 is still valid the day before its notice date.
cl = FutureChain(self.asset_finder, lambda: '2005-12-19', 'CL')
self.assertEqual(len(cl), 3)
# Sid 0 is now invalid, leaving only Sids 1 & 2 valid.
cl = FutureChain(self.asset_finder, lambda: '2005-12-20', 'CL')
self.assertEqual(len(cl), 2)
# Sid 3 has started, so 1, 2, & 3 are now valid.
cl = FutureChain(self.asset_finder, lambda: '2006-02-01', 'CL')
self.assertEqual(len(cl), 3)
# All contracts are no longer valid.
cl = FutureChain(self.asset_finder, lambda: '2006-09-20', 'CL')
self.assertEqual(len(cl), 0)
def test_getitem(self):
""" Test the __getitem__ method of FutureChain.
"""
cl = FutureChain(self.asset_finder, lambda: '2005-12-01', 'CL')
self.assertEqual(cl[0], 0)
self.assertEqual(cl[1], 1)
self.assertEqual(cl[2], 2)
with self.assertRaises(IndexError):
cl[3]
cl = FutureChain(self.asset_finder, lambda: '2005-12-19', 'CL')
self.assertEqual(cl[0], 0)
cl = FutureChain(self.asset_finder, lambda: '2005-12-20', 'CL')
self.assertEqual(cl[0], 1)
cl = FutureChain(self.asset_finder, lambda: '2006-02-01', 'CL')
self.assertEqual(cl[-1], 3)
def test_root_symbols(self):
""" Test that different variations on root symbols are handled
as expected.
"""
# Make sure this successfully gets the chain for CL.
cl = FutureChain(self.asset_finder, lambda: '2005-12-01', 'CL')
self.assertEqual(cl.root_symbol, 'CL')
# These root symbols don't exist, so RootSymbolNotFound should
# be raised immediately.
with self.assertRaises(RootSymbolNotFound):
FutureChain(self.asset_finder, lambda: '2005-12-01', 'CLZ')
with self.assertRaises(RootSymbolNotFound):
FutureChain(self.asset_finder, lambda: '2005-12-01', '')
def test_repr(self):
""" Test the __repr__ method of FutureChain.
"""
cl = FutureChain(self.asset_finder, lambda: '2005-12-01', 'CL')
cl_feb = FutureChain(self.asset_finder, lambda: '2005-12-01', 'CL',
as_of_date='2006-02-01')
# The default chain should not include the as of date.
self.assertEqual(repr(cl), "FutureChain(root_symbol='CL')")
# An explicit as of date should show up in the repr.
self.assertEqual(
repr(cl_feb),
("FutureChain(root_symbol='CL', "
"as_of_date='2006-02-01 00:00:00+00:00')")
)
def test_as_of(self):
""" Test the as_of method of FutureChain.
"""
cl = FutureChain(self.asset_finder, lambda: '2005-12-01', 'CL')
# Test that the as_of_date is set correctly to the future
feb = '2006-02-01'
cl_feb = cl.as_of(feb)
self.assertEqual(
cl_feb.as_of_date,
pd.Timestamp(feb, tz='UTC')
)
# Test that the as_of_date is set correctly to the past, with
# args of str, datetime.datetime, and pd.Timestamp.
feb_prev = '2005-02-01'
cl_feb_prev = cl.as_of(feb_prev)
self.assertEqual(
cl_feb_prev.as_of_date,
pd.Timestamp(feb_prev, tz='UTC')
)
feb_prev = datetime(year=2005, month=2, day=1)
cl_feb_prev = cl.as_of(feb_prev)
self.assertEqual(
cl_feb_prev.as_of_date,
pd.Timestamp(feb_prev, tz='UTC')
)
feb_prev = pd.Timestamp('2005-02-01')
cl_feb_prev = cl.as_of(feb_prev)
self.assertEqual(
cl_feb_prev.as_of_date,
pd.Timestamp(feb_prev, tz='UTC')
)
# The chain as of the current dt should always be the same as
# the defualt chain. Tests date as str, pd.Timestamp, and
# datetime.datetime.
self.assertEqual(cl[0], cl.as_of('2005-12-01')[0])
self.assertEqual(cl[0], cl.as_of(pd.Timestamp('2005-12-01'))[0])
self.assertEqual(
cl[0],
cl.as_of(datetime(year=2005, month=12, day=1))[0]
)
def test_offset(self):
""" Test the offset method of FutureChain.
"""
cl = FutureChain(self.asset_finder, lambda: '2005-12-01', 'CL')
# Test that an offset forward sets as_of_date as expected
self.assertEqual(
cl.offset('3 days').as_of_date,
cl.as_of_date + pd.Timedelta(days=3)
)
# Test that an offset backward sets as_of_date as expected, with
# time delta given as str, datetime.timedelta, and pd.Timedelta.
self.assertEqual(
cl.offset('-1000 days').as_of_date,
cl.as_of_date + pd.Timedelta(days=-1000)
)
self.assertEqual(
cl.offset(timedelta(days=-1000)).as_of_date,
cl.as_of_date + pd.Timedelta(days=-1000)
)
self.assertEqual(
cl.offset(pd.Timedelta('-1000 days')).as_of_date,
cl.as_of_date + pd.Timedelta(days=-1000)
)
# An offset of zero should give the original chain.
self.assertEqual(cl[0], cl.offset(0)[0])
self.assertEqual(cl[0], cl.offset("0 days")[0])
# A string that doesn't represent a time delta should raise a
# ValueError.
with self.assertRaises(ValueError):
cl.offset("blah")
|
#!/usr/bin/env python
import os
import sys
if __name__ == "__main__":
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "allfeed.settings")
try:
from django.core.management import execute_from_command_line
except ImportError as exc:
raise ImportError(
"Couldn't import Django. Are you sure it's installed and "
"available on your PYTHONPATH environment variable? Did you "
"forget to activate a virtual environment?"
) from exc
execute_from_command_line(sys.argv)
|
#!/usr/bin/env python
__author__ = "Amir Savvy"
__copyright__ = "Copyright 2021, MVP Vending Machine Project"
__credits__ = ["amir savvy"]
__license__ = "GPL"
__version__ = "1.0.1"
__maintainer__ = "Amir Savvy"
__email__ = "mianamirlahore@gmail.com"
__status__ = "Production"
# User info
TEST_NORMAL_USER_EMAIL = f"normal@user.com"
TEST_SUPER_USER_EMAIL = f"super@user.com"
TEST_PASSWORD = f"@#$%123456)(*!@#$"
ADMIN = 1
SELLER = 2
BUYER = 3
AMOUNT_DATA = (5, 10, 20, 50, 100)
UNSAFE_REQUEST_METHODS = ('POST', 'PUT', 'PATCH', 'DELETE')
SAFE_REQUEST_METHODS = ('GET', 'HEAD', 'OPTIONS')
EMPTY_RESPONSE = dict()
MESSAGE_KEY = f'message'
ERROR_MESSAGE_KEY = f'error_message'
DATA_KEY = f'data'
IS_SUCCESSFULL = "is_successfull"
IS_FAILED = "is_failed"
|
"""
This module is mainly used to conduct feature engineering for predicting air quality index model
"""
import warnings
import helpers
warnings.filterwarnings('ignore')
if __name__ == '__main__':
PATH = r'air_pollution_death_rate_related/data/data_air_raw/daily_aqi_by_county_'
### use most recent 3 years to train model
RAW_DATA = helpers.read_raw_data(PATH, [2016, 2017, 2018])
DATA = helpers.data_cleaning(RAW_DATA) ### clean data before doing feature engineering
for county_name in list(DATA["state_county"].unique()): #### we do feature engineering
#### on each county independently
#### feature engineering for model
df = (helpers.feature_engineering_for_aqi(DATA, 30, county_name,\
"air_pollution_death_rate_related/data/county_features_data/county_features_train/"))
|
###############################################################################
#
# Tests for XlsxWriter.
#
# Copyright (c), 2013-2019, John McNamara, jmcnamara@cpan.org
#
from ..excel_comparsion_test import ExcelComparisonTest
from ...workbook import Workbook
class TestCompareXLSXFiles(ExcelComparisonTest):
"""
Test file created by XlsxWriter against a file created by Excel.
"""
def setUp(self):
self.set_filename('autofilter09.xlsx')
self.set_text_file('autofilter_data.txt')
def test_create_file(self):
"""
Test the creation of a simple XlsxWriter file with an autofilter.
This test checks a filter list.
"""
workbook = Workbook(self.got_filename)
worksheet = workbook.add_worksheet()
# Set the autofilter.
worksheet.autofilter('A1:D51')
# Add filter criteria.
worksheet.filter_column_list(0, ['East', 'South', 'North'])
# Open a text file with autofilter example data.
textfile = open(self.txt_filename)
# Read the headers from the first line of the input file.
headers = textfile.readline().strip("\n").split()
# Write out the headers.
worksheet.write_row('A1', headers)
# Start writing data after the headers.
row = 1
# Read the rest of the text file and write it to the worksheet.
for line in textfile:
# Split the input data based on whitespace.
data = line.strip("\n").split()
# Convert the number data from the text file.
for i, item in enumerate(data):
try:
data[i] = float(item)
except ValueError:
pass
# Simulate a blank cell in the data.
if row == 6:
data[0] = ''
# Get some of the field data.
region = data[0]
# Check for rows that match the filter.
if region == 'North' or region == 'South' or region == 'East':
# Row matches the filter, no further action required.
pass
else:
# We need to hide rows that don't match the filter.
worksheet.set_row(row, options={'hidden': True})
# Write out the row data.
worksheet.write_row(row, 0, data)
# Move on to the next worksheet row.
row += 1
textfile.close()
workbook.close()
self.assertExcelEqual()
|
from lxml.builder import ElementMaker
from moai.metadata.mods import NL_MODS, XSI_NS
class DIDL(object):
"""A metadata prefix implementing the DARE DIDL metadata format
this format is registered under the name "didl"
Note that this format re-uses oai_dc and mods formats that come with
MOAI by default
"""
def __init__(self, prefix, config, db):
self.prefix = prefix
self.config = config
self.db = db
self.ns = {'didl': "urn:mpeg:mpeg21:2002:02-DIDL-NS",
'dii': "urn:mpeg:mpeg21:2002:01-DII-NS",
'dip': "urn:mpeg:mpeg21:2005:01-DIP-NS",
'dcterms': "http://purl.org/dc/terms/",
'xsi': "http://www.w3.org/2001/XMLSchema-instance",
'rdf': "http://www.w3.org/1999/02/22-rdf-syntax-ns#",
'dc': 'http://purl.org/dc/elements/1.1/',
}
self.schemas = {'didl':'http://standards.iso.org/ittf/PubliclyAvailableStandards/MPEG-21_schema_files/did/didl.xsd',
'dii': 'http://standards.iso.org/ittf/PubliclyAvailableStandards/MPEG-21_schema_files/dii/dii.xsd',
'dip': 'http://standards.iso.org/ittf/PubliclyAvailableStandards/MPEG-21_schema_files/dip/dip.xsd'}
def get_namespace(self):
return self.ns[self.prefix]
def get_schema_location(self):
return self.schemas[self.prefix]
def __call__(self, element, metadata):
data = metadata.record
DIDL = ElementMaker(namespace=self.ns['didl'], nsmap=self.ns)
DII = ElementMaker(namespace=self.ns['dii'])
DIP = ElementMaker(namespace=self.ns['dip'])
RDF = ElementMaker(namespace=self.ns['rdf'])
DCTERMS = ElementMaker(namespace=self.ns['dcterms'])
oai_url = (self.config.url+'?verb=GetRecord&'
'metadataPrefix=%s&identifier=%s' % (
self.prefix,
data['id']))
id_url = data['metadata'].get('url', [None])[0]
# generate mods for this feed
mods_data = DIDL.Resource(mimeType="application/xml")
NL_MODS('mods', self.config, self.db)(mods_data, metadata)
asset_data = []
descriptive_metadata = RDF.type()
descriptive_metadata.attrib['{%s}resource' % self.ns['rdf']] = (
'info:eu-repo/semantics/descriptiveMetadata')
didl = DIDL.DIDL(
DIDL.Item(
DIDL.Descriptor(
DIDL.Statement(
DCTERMS.modified(data['modified'].isoformat().split('.')[0]),
mimeType="application/xml"
)
),
DIDL.Component(
DIDL.Resource(ref=id_url or oai_url,mimeType="application/xml")
),
DIDL.Item(
DIDL.Descriptor(
DIDL.Statement(descriptive_metadata, mimeType="application/xml")
),
DIDL.Component(
DIDL.Descriptor(
DIDL.Statement("mods", mimeType="text/plain")),
mods_data)
),
)
)
object_file = RDF.type()
object_file.attrib['{%s}resource' % self.ns['rdf']] = (
'info:eu-repo/semantics/objectFile')
for root_item in didl:
for asset in data['metadata'].get('asset', []):
url = asset['url']
if not url.startswith('http://'):
url = self.config.url.rstrip('/') + '/' + url.lstrip('/')
item = DIDL.Item(
DIDL.Descriptor(
DIDL.Statement(object_file, mimeType="application/xml")
)
)
access = asset.get('access')
if access == 'open':
access = (
'http://purl.org/eprint/accessRights/OpenAccess')
elif access == 'restricted':
access = (
'http://purl.org/eprint/accessRights/RestrictedAccess')
elif access == 'closed':
access = (
'http://purl.org/eprint/accessRights/ClosedAccess')
if access:
item.append(
DIDL.Descriptor(
DIDL.Statement(DCTERMS.accessRights(access),
mimeType="application/xml")))
for modified in asset.get('modified', []):
item.append(
DIDL.Descriptor(
DIDL.Statement(DCTERMS.modified(modified),
mimeType="application/xml")))
item.append(
DIDL.Component(
DIDL.Resource(mimeType=asset['mimetype'],
ref=url)
)
)
root_item.append(item)
break
human_start_page = RDF.type()
human_start_page.attrib['{%s}resource' % self.ns['rdf']] = (
'info:eu-repo/semantics/humanStartPage')
if data['metadata'].get('url'):
item = DIDL.Item(
DIDL.Descriptor(
DIDL.Statement(human_start_page, mimeType="application/xml")
),
DIDL.Component(
DIDL.Resource(mimeType="text/html", ref=data['metadata']['url'][0])
)
)
root_item.append(item)
didl.attrib['{%s}schemaLocation' % XSI_NS] = (
'%s %s %s %s %s %s' % (self.ns['didl'],
self.schemas['didl'],
self.ns['dii'],
self.schemas['dii'],
self.ns['dip'],
self.schemas['dip']))
element.append(didl)
|
# -*- coding: utf-8 -*-
import sys
from os.path import dirname, abspath, normpath, join, realpath
from os import listdir, remove, system
import json
from datetime import datetime
begin = len(normpath(abspath(join(dirname(__file__), "../.."))))
end = len(normpath(abspath(join(dirname(__file__), ".."))))
MAIN_DIR = dirname(realpath(__file__))
package_name = MAIN_DIR[begin + 1 : end]
# Add the directory to the python path
sys.path.append(MAIN_DIR[:begin])
exec(
"from "
+ package_name
+ ".Generator.ClassGenerator.class_generator import generate_class"
)
exec("from " + package_name + ".Generator.read_fct import read_all")
exec("from " + package_name + ".definitions import MAIN_DIR, DOC_DIR, INT_DIR")
# List of the main packages (to sort the classes)
PACKAGE_LIST = ["Geometry", "Machine", "Material", "Slot", "Import"]
def generate_code(root_path, gen_dict=None):
"""Generate pyleecan Classes code according to doc in root_path
Parameters
----------
root_path : str
Path to the main folder of Pyleecan
gen_dict : dict
Generation dictionary (contains all the csv data)
Returns
-------
None
"""
CLASS_DIR = join(root_path, "Classes")
FUNC_DIR = join(root_path, "Functions")
DOC_DIR = join(root_path, "Generator", "ClassesRef")
print("Reading classes csv in: " + DOC_DIR)
print("Saving generated files in: " + CLASS_DIR)
path = __file__[__file__.index(package_name) :]
path = path.replace("\\", "/")
# Deleting all the previous class
print("Deleting old class files...")
for file_name in listdir(CLASS_DIR):
if file_name[0] != "_":
remove(join(CLASS_DIR, file_name))
# A file to import every classes quickly
import_file = open(join(CLASS_DIR, "import_all.py"), "w")
import_file.write("# -*- coding: utf-8 -*-\n\n")
import_file.write('"""File generated by generate_code() - \n')
import_file.write('WARNING! All changes made in this file will be lost!\n"""\n\n')
# A file to select the constructor according to a string
load_file = open(join(FUNC_DIR, "load_switch.py"), "w")
load_file.write("# -*- coding: utf-8 -*-\n")
load_file.write('"""File generated by generate_code() - \n')
load_file.write('WARNING! All changes made in this file will be lost!\n"""\n\n')
load_file.write("from ..Classes.import_all import *\n\n")
load_file.write("load_switch = {\n")
# Read all the csv files
if gen_dict is None:
gen_dict = read_all(DOC_DIR)
# Generate all the class files (sorted to remove "commit noise")
for class_name, _ in iter(sorted(list(gen_dict.items()))):
import_file.write(
"from ..Classes." + class_name + " import " + class_name + "\n"
)
load_file.write(' "' + class_name + '": ' + class_name + ",\n")
print("Generation of " + class_name + " class")
generate_class(gen_dict, class_name, CLASS_DIR)
import_file.close()
load_file.write("}\n")
load_file.close()
print("Generation of load_switch.py")
print("Generation of import_all.py")
# Save gen_dict
class_dict_file = join(CLASS_DIR, "Class_Dict.json")
with open(class_dict_file, "w") as json_file:
json.dump(gen_dict, json_file, sort_keys=True, indent=4, separators=(",", ": "))
if __name__ == "__main__":
gen_dict = read_all(DOC_DIR, is_internal=False, in_path=INT_DIR)
generate_code(MAIN_DIR, gen_dict)
# Run black
try:
import black
system('"{}" -m black .'.format(sys.executable))
if black.__version__.split(".")[0] != "20":
print("\n############################################")
print(
"WARNING: The official version of black for pyleecan is 20, please update your black version"
)
print("############################################\n")
except ImportError:
print("/!\\ Please install and run black (version 20) /!\\")
now = datetime.now()
print("End at: ", now.strftime("%H:%M:%S"))
|
file=open("sample.txt","r")
d=dict()
for lines in file:
lines=lines.strip()
lines=lines.lower()
words=lines.split(" ")
for word in words:
if word in d:
d[word]=d[word]+1
else:
d[word]=1
find=str(input("enter the word to count: "))
find=find.lower()
if find in list(d.keys()):
print(f"{find} : "+ str(d.get(find)))
else:
print("word not present!! ")
|
#
# Class for full thermal submodel
#
import pybamm
from .base_x_full import BaseModel
class NoCurrentCollector(BaseModel):
"""Class for full x-direction thermal submodel without current collectors
Parameters
----------
param : parameter class
The parameters to use for this submodel
**Extends:** :class:`pybamm.thermal.x_full.BaseModel`
"""
def __init__(self, param):
super().__init__(param)
def set_rhs(self, variables):
T = variables["Cell temperature"]
q = variables["Heat flux"]
Q = variables["Total heating"]
self.rhs = {
T: (-pybamm.div(q) / self.param.delta ** 2 + self.param.B * Q)
/ (self.param.C_th * self.param.rho_k)
}
def set_boundary_conditions(self, variables):
T = variables["Cell temperature"]
T_n_left = pybamm.boundary_value(T, "left")
T_p_right = pybamm.boundary_value(T, "right")
T_amb = variables["Ambient temperature"]
self.boundary_conditions = {
T: {
"left": (
self.param.h * (T_n_left - T_amb) / self.param.lambda_n,
"Neumann",
),
"right": (
-self.param.h * (T_p_right - T_amb) / self.param.lambda_p,
"Neumann",
),
}
}
def _current_collector_heating(self, variables):
"""Returns zeros for current collector heat source terms"""
Q_s_cn = pybamm.Scalar(0)
Q_s_cp = pybamm.Scalar(0)
return Q_s_cn, Q_s_cp
def _yz_average(self, var):
"""
Computes the y-z average by integration over y and z
In this case this is just equal to the input variable
"""
return var
def _x_average(self, var, var_cn, var_cp):
"""
Computes the X-average over the whole cell *not* including current
collectors. This overwrites the default behaviour of 'base_thermal'.
"""
return pybamm.x_average(var)
|
""" Summary
"""
class Solution(object):
"""
Problem:
https://leetcode.com/problems/combination-sum/
Example:
given candidate set [2, 3, 6, 7] and target 7,
A solution set is:
[
[7],
[2, 2, 3]
]
"""
def combinationSum(self, candidates, target):
"""
:type candidates: List[int]
:type target: int
:rtype: List[List[int]]
"""
candidates.sort()
rets = []
for i in candidates:
if i > target:
break
elif i == target:
rets.append([i])
else:
rets += ([sorted([i] + x) for x in self.combinationSum(candidates, target - i)])
result = []
for r in rets:
if r not in result:
result.append(r)
return result
if __name__ == '__main__':
candidates = [2, 3, 6, 7]
target = 7
result = Solution().combinationSum(candidates, 7)
print(result)
|
import pandas as pd
import youtube_api_comments_to_mongodb as ym
import text_classification_and_sentiment_analysis as ta
dbpw = 'kpop'
collection_name = 'comments'
data = ym.mongo_to_dataframe(dbpw, collection_name)
allcomments, englishcomments = ta.dataframe_preparation(data)
tt_set, englishcomments = ta.classify_facilitator(englishcomments, 300,
['quality', 'nationalist_ethnicist', 'kpop'])
allcomments.to_pickle('allcomments.pickle')
englishcomments.to_pickle('englishcomments.pickle')
tt_set.to_pickle('tt_set.pickle')
|
class Person:
def __init__(self, name, age):
self.name = name
self.age = age
def __str__(self):
return "{" + self.name + " " + str(self.age) + "}"
p1 = Person("John", 36)
print(p1)
|
import logging
import random
from typing import List, Tuple
import numpy as np
from skimage.transform import resize
from scipy.ndimage import zoom
from toolbox import images
from toolbox.images import crop, mask_bbox
from .poisson_disk import sample_poisson_uniform
logger = logging.getLogger(__name__)
class PatchType:
S2F_MASKED_BLACK = 'cropped_scaled_to_fit'
S2F_MASKED_WHITE = 'cropped_scaled_to_fit_white'
S2F = 'scaled_to_fit'
RANDOM = 'random2'
def sample_poisson_mask(mask, r, k):
ymin, ymax, xmin, xmax = mask_bbox(mask)
height = ymax - ymin
width = xmax - xmin
points = np.array(sample_poisson_uniform(height, width, r, k,
mask[ymin:ymax, xmin:xmax]))
points[:, 0] += ymin
points[:, 1] += xmin
points = np.floor(points).astype(int)
return points
def generate_dense_bboxes(
mask: np.ndarray,
scale=0.23,
min_dist=0.091):
mask_height, mask_width = mask.shape
min_length = min(mask_height, mask_width)
patch_sample_size = scale * min_length
centers = sample_poisson_mask(mask, min_length * min_dist, 1000)
half = int(patch_sample_size / 2)
bboxes = []
for center in centers:
ycent, xcent = center
bbox = (ycent - half,
ycent + half + 1,
xcent - half,
xcent + half + 1)
if (bbox[0] >= 0 and bbox[1] < mask_height
and bbox[2] >= 0 and bbox[3] < mask_width):
bboxes.append(bbox)
print('bboxes={} centers={}, mask_size={}, min_dist={}'.format(
len(bboxes), len(centers), mask.shape, min_length * min_dist))
return bboxes
def random_crops(image, patch_size, num_crops):
border_mask = np.ones(image.shape[:2], dtype=bool)
left = patch_size/2
right = image.shape[1] - patch_size/2
top = patch_size/2
bottom = image.shape[0] - patch_size/2
border_mask[:, :left] = False
border_mask[:, right:] = False
border_mask[:top, :] = False
border_mask[bottom:, :] = False
yinds, xinds = np.where(border_mask)
bboxes = []
for i in range(num_crops):
point_idx = np.random.randint(0, len(yinds))
ycent, xcent = yinds[point_idx], xinds[point_idx]
half = int(patch_size / 2)
# Just squash the patch if it's out of bounds.
bbox = (ycent - half,
ycent + half + 1,
xcent - half,
xcent + half + 1)
bboxes.append(bbox)
return bboxes_to_patches(image, bboxes, patch_size)
def generate_random_bboxes(mask: np.ndarray, scale_range=(1.0, 1.0),
num_patches=5, fixed_size=None):
"""
Generates random bounding boxes at random scales with centroid within the
mask.
:param mask: The contrained area for the centroid of the patch.
:param min_scale: The min scale (multiple of the minimum length of the
input mask) of the sampling.
:param max_scale: The max scale (multiple of the minimum length of the
input mask) of the sampling.
:param num_patches: Number of patches to generate.
:return: Bounding boxes.
"""
mask_height, mask_width = mask.shape[:2]
min_length = min(mask_height, mask_width)
yinds, xinds = np.where(mask)
patch_bboxes = []
patch_scales = []
tries = 0
while len(patch_bboxes) < num_patches:
scale = random.uniform(*scale_range)
patch_scales.append(scale)
patch_size = scale * fixed_size if fixed_size else int(scale * min_length)
point_idx = np.random.randint(0, len(yinds))
ycent, xcent = yinds[point_idx], xinds[point_idx]
half = int(patch_size / 2)
# Just squash the patch if it's out of bounds.
if (ycent - half < 0 or ycent + half > mask.shape[0] or
xcent - half < 0 or xcent + half > mask.shape[1]):
if tries < 100:
tries += 1
continue
bbox = (max(ycent - half, 0),
min(ycent + half + 1, mask.shape[0]),
max(xcent - half, 0),
min(xcent + half + 1, mask.shape[1]))
patch_bboxes.append(bbox)
return patch_bboxes, patch_scales
def bboxes_to_patches(im: np.ndarray,
bboxes: List[Tuple[int, int, int, int]],
patch_size: int, use_pil=False):
"""
Converts bounding boxes to actual patches. Patches are all resized to the
patch size regardless of the original bounding box size.
:param im: To crop patch from.
:param bboxes: Boxes defining the patch.
:param patch_size: Patch size to return.
:return: Image patches.
"""
patches = []
for bbox in bboxes:
cropped = crop(im, bbox)
if cropped.shape[0] != patch_size or cropped.shape[1] != patch_size:
scale = [patch_size/cropped.shape[0], patch_size/cropped.shape[1]]
if len(im.shape) == 3:
scale.append(1.0)
if use_pil:
cropped = resize(cropped, (patch_size, patch_size)) \
.astype(dtype=np.float32)
else:
cropped = zoom(cropped, scale, im.dtype, order=1)
patches.append(cropped)
return patches
def compute_mask_tight_patch(im: np.ndarray,
mask: np.ndarray,
patch_size: int):
"""
Computes a patch which contains all the pixels active in the mask scaled to
the patch size.
:param im:
:param mask:
:param patch_size:
:return:
"""
bbox = images.compute_mask_bbox(mask)
cropped = images.crop(im, bbox)
resized = imresize(cropped, (patch_size, patch_size, cropped.shape[2]))
return resized
def compute_minmax_thickness(mask):
max_width = 0
max_height = 0
for row_id in range(mask.shape[0]):
row = mask[row_id, :]
split_locs = np.where(np.diff(row) != 0)[0] + 1
for segment in (np.split(row, split_locs)):
if segment[0] != 0:
max_width = max(max_width, len(segment))
for col_id in range(mask.shape[1]):
col = mask[:, col_id]
split_locs = np.where(np.diff(col) != 0)[0] + 1
for segment in (np.split(col, split_locs)):
if segment[0] != 0:
max_height = max(max_height, len(segment))
return min(max_width, max_height), max(max_width, max_height)
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Tue Dec 7 12:02:50 2021
@author: ministudio
"""
from datetime import datetime, timezone
import pandas as pd
import numpy as np
from alive_progress import alive_bar
def get_all_futures(ftx_client):
tickers = ftx_client.fetchMarkets()
list_perp =[]
#with alive_bar(len(tickers),length=20) as bar:
for ticker in tickers:
if 'PERP' in ticker['id']:
list_perp.append(ticker['id'])
#bar()
return list_perp
def scanner(day,month,year,ticker,ftx):
results = pd.DataFrame(columns=['P/L %'])
start_trade = datetime(year, month, day, 0, 0, 0)
timestamp = start_trade.replace(tzinfo=timezone.utc).timestamp()
candles = ftx.fetchOHLCV(ticker, timeframe='1h', since=timestamp*1000, limit=5000)
candles_df = pd.DataFrame(candles, columns=['MTS','OPEN','HIGH','LOW','CLOSE','VOLUME'])
volume = candles_df.VOLUME.sum()
for j in range(0,24):
# algoritmo per andare di candela in candela
ledger = pd.DataFrame(columns=['POSITION','ENTRY PRICE','P_L SINGLE','P_L TOTAL'])
long = True
time_scanner = ''
# calcolo l'offset tra una candela e l'altra di mio interesse
offset = 12
if j != 0:
candles = candles[1:]
try:
for i in range(0,len(candles),offset):
entry_price = candles[i][1]
if i == 0:
start = datetime.utcfromtimestamp(candles[i][0]/1000)
end = datetime.utcfromtimestamp(candles[i+offset][0]/1000) #datetime.utcfromtimestamp(candles[i+offset+10][0]/1000)
#print('FROM',start.strftime("%H:%M"),'TO',end.strftime("%H:%M"))
var_pct = p_l_total = 0
position = 'LONG'
time_scanner = f'{start.strftime("%H:%M")} to {end.strftime("%H:%M")}'
else:
#r_exit_entry = candles[i][4]/candles[i-offset][4] #if not long else candles[i][4]/candles[i-offset][4]
# calcolo il profitto
if long:
var_pct = round((candles[i-offset][1] - candles[i][1])/candles[i-offset][1]*100, 3)
p_l_total = ledger['P_L TOTAL'].iloc[-1] + var_pct
if not long:
var_pct = round((candles[i][1]-candles[i-offset][1])/candles[i][1]*100, 3)
p_l_total = ledger['P_L TOTAL'].iloc[-1] + var_pct
if long:
date = datetime.utcfromtimestamp(candles[i][0]/1000)
position = 'LONG'
long = False
else:
# quindi vado in short
date = datetime.utcfromtimestamp(candles[i][0]/1000) #candles[i+10][0]/1000
position = 'SHORT'
long = True
ledger.loc[date] = [position, entry_price, var_pct, p_l_total]
results.loc[time_scanner] = round(ledger['P_L TOTAL'][-1],2)
#print('P/L TOTAL :\t',round(ledger['P_L TOTAL'][-1],2), '%\n')
except Exception as e:
results.loc[time_scanner] = np.NAN
return results, volume
|
HTTP_OK = 200
HTTP_CREATED = 201
HTTP_NO_CONTENT = 204
HTTP_BAD_REQUEST = 400
HTTP_UNAUTHORIZED = 401
HTTP_NOT_FOUND = 404
HTTP_CONFLICT = 409
|
#!/usr/bin/python
# coding=utf-8
#
# <bitbar.title>Dashcoin Ticker (ยฃ1GBP)</bitbar.title>
# <bitbar.version>v1.0</bitbar.version>
# <bitbar.author>impshum</bitbar.author>
# <bitbar.author.github>impshum</bitbar.author.github>
# <bitbar.desc>Displays current Dashcoin price for ยฃ1 from Coinmarketcap</bitbar.desc>
# <bitbar.image>https://i.imgur.com/KZH5B8s.jpg/bitbar.image>
#
# by impshum
from urllib import urlopen
url = urlopen('https://coinmarketcap-nexuist.rhcloud.com/api/dash').read()
import json
result = json.loads(url)
def flow():
if result ['change'] > '0':
print (' ยฃ%.4f | image=iVBORw0KGgoAAAANSUhEUgAAABAAAAAQCAYAAAAf8/9hAAAABmJLR0QAyQACAALwzISXAAAACXBIWXMAAAsTAAALEwEAmpwYAAAAB3RJTUUH4AQHACkSBTjB+AAAALNJREFUOMvVk70NAjEMhb87WYiGBZAQU7ABNSVSWpZgEEagsJDoKBELUCEKFuBuCKTw0xyQC0lICe5i+/k9/wT+3opUUJQhcAUqa8I5ZQT4tANwioGTCkQZA9vmOQE2oUJFhL0DXBz33RpKUfCLfLTQJMx9IlEWuQr6QB3prGtNS1lwiMvEYo7ekNsKRBkB+y+rH1hDFVOwy7ids+gbVzrsM6CXeYDTF85xroB1ZoHb73ymB5RhJkpZTihGAAAAAElFTkSuQmCC color=#000000'% float(result['price']['gbp']))
else:
print (' ยฃ%.4f | image=iVBORw0KGgoAAAANSUhEUgAAABAAAAAQCAYAAAAf8/9hAAAABmJLR0QABACnAADQ9FZaAAAACXBIWXMAAAsTAAALEwEAmpwYAAAAB3RJTUUH4AQHACQ1FZwK3gAAAMRJREFUOMvNkjEKAjEQRZ+jKNjYKh5AbzCdjVcQj+BFPIKlp7EMeAJrUbASQVCEr80uG9cNbqe/Cgn/5WUI/DqNfBHM+kCzbs+lPUAr2pwBq5qABbB+M8gszkDvS/kOdAG5VBgEM4ApsP0CGLukjxlEoA0wSZR3Lo0qhxhZDIBDAmDA0wsBLD51CZeOwLKivHbprZx6AkAHuEXbD5fawYwywMqAzOKeDTTPvKqcTGZBMLsGs0utn5gADYEHcKp9e9ni//MCDtNCE3qjsIwAAAAASUVORK5CYII= color=#000000'% float(result['price']['gbp']))
flow()
|
#Tests proper handling of Verifications with Transactions which don't exist.
from typing import Dict, List, Any
import json
from pytest import raises
from e2e.Libs.Minisketch import Sketch
from e2e.Classes.Merit.Block import Block
from e2e.Classes.Merit.Merit import Merit
from e2e.Classes.Consensus.VerificationPacket import VerificationPacket
from e2e.Meros.RPC import RPC
from e2e.Meros.Meros import MessageType
from e2e.Meros.Liver import Liver
from e2e.Tests.Errors import TestError, SuccessError
#pylint: disable=too-many-statements
def VUnknownInBlockTest(
rpc: RPC
) -> None:
vectors: Dict[str, Any]
with open("e2e/Vectors/Consensus/Verification/Parsable.json", "r") as file:
vectors = json.loads(file.read())
merit: Merit = Merit.fromJSON(vectors["blockchain"])
#Custom function to send the last Block and verify it errors at the right place.
def checkFail() -> None:
#This Block should cause the node to disconnect us AFTER it attempts to sync our Transaction.
syncedTX: bool = False
#Grab the Block.
block: Block = merit.blockchain.blocks[2]
#Send the Block.
rpc.meros.liveBlockHeader(block.header)
rpc.meros.handleBlockBody(block)
#Handle sync requests.
reqHash: bytes = bytes()
while True:
if syncedTX:
#Try receiving from the Live socket, where Meros sends keep-alives.
try:
if len(rpc.meros.live.recv()) != 0:
raise Exception()
except TestError:
raise SuccessError("Node disconnected us after we sent a parsable, yet invalid, Verification.")
except Exception:
raise TestError("Meros sent a keep-alive.")
msg: bytes = rpc.meros.sync.recv()
if MessageType(msg[0]) == MessageType.SketchHashesRequest:
if not block.body.packets:
raise TestError("Meros asked for Sketch Hashes from a Block without any.")
reqHash = msg[1 : 33]
if reqHash != block.header.hash:
raise TestError("Meros asked for Sketch Hashes that didn't belong to the Block we just sent it.")
#Create the hashes.
hashes: List[int] = []
for packet in block.body.packets:
hashes.append(Sketch.hash(block.header.sketchSalt, packet))
#Send the Sketch Hashes.
rpc.meros.sketchHashes(hashes)
elif MessageType(msg[0]) == MessageType.SketchHashRequests:
if not block.body.packets:
raise TestError("Meros asked for Verification Packets from a Block without any.")
reqHash = msg[1 : 33]
if reqHash != block.header.hash:
raise TestError("Meros asked for Verification Packets that didn't belong to the Block we just sent it.")
#Create a lookup of hash to packets.
packets: Dict[int, VerificationPacket] = {}
for packet in block.body.packets:
packets[Sketch.hash(block.header.sketchSalt, packet)] = packet
#Look up each requested packet and respond accordingly.
for h in range(int.from_bytes(msg[33 : 37], byteorder="little")):
sketchHash: int = int.from_bytes(msg[37 + (h * 8) : 45 + (h * 8)], byteorder="little")
if sketchHash not in packets:
raise TestError("Meros asked for a non-existent Sketch Hash.")
rpc.meros.packet(packets[sketchHash])
elif MessageType(msg[0]) == MessageType.TransactionRequest:
rpc.meros.dataMissing()
syncedTX = True
else:
raise TestError("Unexpected message sent: " + msg.hex().upper())
with raises(SuccessError):
Liver(rpc, vectors["blockchain"], callbacks={1: checkFail}).live()
|
B_4002_10 = {0: {'A': 0.22337100816803507, 'C': -0.08721732138853625, 'E': -0.05776024940539231, 'D': -0.8062336491499029, 'G': -0.22235775138309136, 'F': 0.41616940014979253, 'I': -0.2625598958640791, 'H': -0.2842266678402531, 'K': -0.11806916630138095, 'M': 0.3503963704784862, 'L': -0.11175681610077592, 'N': -0.6559751061375433, 'Q': 0.42709232284615184, 'P': -0.6562206710837208, 'S': -0.02028872713419685, 'R': 0.7053425369818895, 'T': -0.16988396865190242, 'W': 0.5294490014218092, 'V': -0.5397379396163317, 'Y': 0.6224062516023391}, 1: {'A': -4.0, 'C': -1.4792466334792438, 'E': 1.9520597704545073, 'D': -1.3065688764576122, 'G': -4.0, 'F': -1.2998595004729445, 'I': -1.7137811098439848, 'H': -1.5503864274444812, 'K': -1.5503864274444812, 'M': -1.7137811098439848, 'L': -1.5873303359490254, 'N': -4.0, 'Q': -4.0, 'P': -1.485221375614014, 'S': -1.4792466334792438, 'R': -1.516442783779311, 'T': -1.3722901525124491, 'W': -1.2998595004729445, 'V': -1.7027070282103178, 'Y': -1.2998595004729445}, 2: {'A': 0.2563438176533894, 'C': 0.005946632197619582, 'E': -0.01583980936870634, 'D': -0.28769896803687756, 'G': -0.6954066625927517, 'F': 0.12061097626119485, 'I': 0.3350620409473355, 'H': -0.1694896011839807, 'K': 0.3362119351909843, 'M': 0.21123743247221569, 'L': 0.3569719895865599, 'N': 0.5952112576301342, 'Q': 0.07334278898807628, 'P': -0.4863848827377961, 'S': 0.5774056906967757, 'R': -0.6603164669657029, 'T': -0.5333967423641524, 'W': 0.07155631156720803, 'V': -0.3025209922484634, 'Y': -0.3412456411532286}, 3: {'A': 0.5977554346220839, 'C': 0.014950504192544605, 'E': -0.14276836811036525, 'D': -0.566829217593357, 'G': 0.43366216673597924, 'F': 0.18735599610913023, 'I': -0.5941476733420843, 'H': 0.7148611685591905, 'K': -0.25892998681258395, 'M': 0.24255037248622957, 'L': 0.1922371468778731, 'N': -0.8992543313554157, 'Q': -0.0066294697791371, 'P': -0.17868447116149977, 'S': 0.5575118094930324, 'R': 0.4354350798832712, 'T': -0.6863999529014213, 'W': -0.9040043361451602, 'V': -0.09610557652101945, 'Y': 0.25017165150118675}, 4: {'A': -0.36211990116689025, 'C': 1.0140227890402689, 'E': -0.10425685745040862, 'D': 0.07615994218018855, 'G': -0.8009857839941741, 'F': 0.24667787490362253, 'I': -0.17420628325418536, 'H': -0.7132294203626169, 'K': 0.12801265861459374, 'M': 0.7179341611730891, 'L': 0.2421722453426991, 'N': 0.25183605308664186, 'Q': -0.7166747952837604, 'P': -0.130679376377445, 'S': 0.3742768715087381, 'R': -0.44531439192302325, 'T': 1.0778574915916541, 'W': -0.7242004826221311, 'V': -0.10127761501276543, 'Y': -0.5187144195614529}, 5: {'A': 0.5744121217096468, 'C': 0.5415630199991066, 'E': -0.5530536302143234, 'D': -0.28640127477331273, 'G': -0.729203233404597, 'F': -0.11418154127673937, 'I': 0.37603616107858134, 'H': -0.9148359315846256, 'K': -0.18293738749007366, 'M': 0.4430551493441148, 'L': 0.028940205572376906, 'N': -0.015688177174764985, 'Q': 0.3334643637995271, 'P': 0.3653661968849176, 'S': 0.24310899420114637, 'R': 0.3683640838816875, 'T': -0.41546224081805533, 'W': -0.17011116673092944, 'V': -0.16028570036270884, 'Y': 0.037898267913432676}, 6: {'A': -0.2449698680605102, 'C': 0.21891284135185457, 'E': -0.1914970740107789, 'D': -0.6845824833815898, 'G': -0.23680284287562992, 'F': -0.047735228056870374, 'I': -0.14535092817743472, 'H': -0.7904575078177513, 'K': -0.3522379408807177, 'M': 0.4651584476619752, 'L': 0.3633365762076467, 'N': -0.1906297329391477, 'Q': -0.17917612566613594, 'P': -0.09502957757299856, 'S': 0.1613073465286862, 'R': -0.2735299808531706, 'T': 0.577678919761243, 'W': 0.21111680192906904, 'V': 0.24561358020466897, 'Y': 0.5422742451008902}, 7: {'A': -0.26126715312869003, 'C': 0.04523513286573463, 'E': 0.3009863034413461, 'D': -0.23595975352186618, 'G': -0.04401611182001157, 'F': 0.8106155298979435, 'I': -0.6959114020958657, 'H': 0.7274217689457967, 'K': -1.0948083223532759, 'M': 0.7971560783910433, 'L': -0.4799785717728068, 'N': -1.047191366836869, 'Q': 0.03006318067260729, 'P': 0.6499374087495984, 'S': 0.09020424788565452, 'R': -0.6399431218454593, 'T': 0.09387374649172615, 'W': 0.38231537787910685, 'V': 0.29085420864742834, 'Y': 0.10502029689790073}, 8: {'A': -0.17624591714060261, 'C': -0.44594096205809025, 'E': 0.2717227979727722, 'D': -0.012845762584315317, 'G': -0.2375535720710233, 'F': 0.16487310250932152, 'I': 0.00804494192498933, 'H': -0.8499150101901889, 'K': -0.8296394058347988, 'M': -0.5893452296325081, 'L': 0.24782037761046985, 'N': -0.42682194513580807, 'Q': -0.2002625627126248, 'P': 0.7689731259954051, 'S': 0.29368829704065275, 'R': -0.6530871271743546, 'T': 0.4318928627874784, 'W': -0.9240865611446291, 'V': 0.26557804589733297, 'Y': 0.038742804015257794}, 9: {'A': 0.39435936592627074, 'C': -0.2506580204205583, 'E': -4.0, 'D': -4.0, 'G': -0.3259787590539208, 'F': -0.16992688879892542, 'I': 0.2967586631856834, 'H': -1.787811063406423, 'K': -1.757918849655444, 'M': 0.8174924984834613, 'L': 1.0839069131169379, 'N': -4.0, 'Q': -4.0, 'P': -4.0, 'S': -0.007550469044766957, 'R': -1.787811063406423, 'T': -0.31612993413343005, 'W': -0.9384717898759554, 'V': 0.1245946934278608, 'Y': -1.046424620597781}, -1: {'slope': 0.13311575086207222, 'intercept': -0.5859339389711538}}
|
import sys
from ingenialink.ethercat.network import EthercatNetwork
def connect_slave():
net = EthercatNetwork("\\Device\\NPF_{192D1D2F-C684-467D-A637-EC07BD434A63}")
servo = net.connect_to_slave(
target=1,
dictionary='../../resources/dictionaries/cap-net-e_eoe_0.7.1.xdf')
return servo, net
def load_config_example():
"""Loads a given configuration file into the drive."""
servo, net = connect_slave()
servo.load_configuration('ecat_config.xcf')
servo.load_configuration('ecat_config_0.xcf', subnode=0)
servo.load_configuration('ecat_config_1.xcf', subnode=1)
net.disconnect_from_slave(servo)
def save_config_example():
"""Saves the drive configuration into a file."""
servo, net = connect_slave()
servo.save_configuration('ecat_config.xcf')
servo.save_configuration('ecat_config_0.xcf', subnode=0)
servo.save_configuration('ecat_config_1.xcf', subnode=1)
net.disconnect_from_slave(servo)
if __name__ == '__main__':
save_config_example()
load_config_example()
sys.exit()
|
# -*- Python -*-
# This file is licensed under a pytorch-style license
# See LICENSE.pytorch for license information.
from torch_mlir.dialects.torch.importer.jit_ir import ModuleBuilder
from utils import create_script_function
# RUN: %PYTHON %s | torch-mlir-opt | FileCheck %s
mb = ModuleBuilder()
# CHECK-LABEL: func @__torch__.refined_block_arg(
# CHECK-SAME: %[[ARG:.*]]: !torch.tensor) -> !torch.tensor {
# CHECK: %[[REFINED:.*]] = torch.tensor_static_info_cast %[[ARG]] : !torch.tensor to !torch.tensor<[1,384],f32>
# CHECK: %[[RESULT:.*]] = torch.tensor_static_info_cast %[[REFINED]] : !torch.tensor<[1,384],f32> to !torch.tensor
# CHECK: return %[[RESULT]] : !torch.tensor
mb.import_function(create_script_function("__torch__.refined_block_arg", """
graph(%0 : Float(1, 384)):
return (%0)
"""))
mb.module.operation.print()
print()
|
#Program to find the rank of a matrix.
#Developed by: SRIJITH R
#RegisterNumber: 21004191
import numpy as np
A=np.array([[5,-3,-10],[2,2,-3],[-3,-1,5]])
val=np.linalg.matrix_rank(A)
print(val)
|
"""
File upload page using a png file
"""
from selenium.webdriver.common.by import By
from pages.base_page import BasePage
class FileUpload(BasePage):
FILE_UP = (By.ID, 'file-upload-field')
def upload_file(self):
file_up = self.driver.find_element(*self.FILE_UP)
file_up.send_keys('C:/Users/anton/PycharmProjects/Automation_testing/exercices_todo/blue.png')
|
import os
import re
import sys
import time
import datetime
from .MiscUtils import commands_get_status_output
try:
long()
except Exception:
long = int
from . import PLogger
from .LocalJobSpec import LocalJobSpec
from .LocalJobsetSpec import LocalJobsetSpec
class PdbProxy:
# constructor
def __init__(self,verbose=False):
# database engine
self.engine = 'sqlite3'
# version of database schema
self.version = '0_0_1'
# database file name
self.filename = 'pandajob.db'
# database dir
self.database_dir = os.path.expanduser(os.environ['PANDA_CONFIG_ROOT'])
# full path of database file
self.database = '%s/%s' % (self.database_dir,self.filename)
# table name
self.tablename = 'jobtable_%s' % self.version
# verbose
self.verbose = verbose
# connection
self.con = None
# logger
self.log = PLogger.getPandaLogger()
# set verbose
def setVerbose(self,verbose):
# verbose
self.verbose = verbose
# execute SQL
def execute(self,sql,var={}):
# logger
tmpLog = PLogger.getPandaLogger()
# expand variables
for tmpKey in var:
tmpVal = var[tmpKey]
sql = sql.replqce(tmpKey,str(tmpVal))
# construct command
com = '%s %s "%s"' % (self.engine,self.database,sql)
if self.verbose:
tmpLog.debug("DB Req : " + com)
# execute
nTry = 5
status =0
for iTry in range(nTry):
if self.verbose:
tmpLog.debug(" Try : %s/%s" % (iTry,nTry))
status,output = commands_get_status_output(com)
status %= 255
if status == 0:
break
if iTry+1 < nTry:
time.sleep(2)
# return
if status != 0:
tmpLog.error(status)
tmpLog.error(output)
return False,output
else:
if self.verbose:
tmpLog.debug(" Ret : " + output)
outList = output.split('\n')
# remove ''
try:
outList.remove('')
except Exception:
pass
# remove junk messages
ngStrings = ['Loading resources from']
for tmpStr in tuple(outList):
# look for NG strings
flagNG = False
for ngStr in ngStrings:
match = re.search(ngStr,tmpStr,re.I)
if match is not None:
flagNG = True
break
# remove
if flagNG:
try:
outList.remove(tmpStr)
except Exception:
pass
return True,outList
# execute SQL
def execute_direct(self, sql, var=None, fetch=False):
if self.con is None:
import sqlite3
self.con = sqlite3.connect(self.database, check_same_thread=False)
if self.verbose:
self.log.debug("DB Req : {0} var={1}".format(sql, str(var)))
cur = self.con.cursor()
try:
if var is None:
var = {}
cur.execute(sql, var)
retVal = True
except Exception:
retVal = False
if not self.verbose:
self.log.error("DB Req : {0} var={1}".format(sql, str(var)))
err_type, err_value = sys.exc_info()[:2]
err_str = "{0} {1}".format(err_type.__name__, err_value)
self.log.error(err_str)
if self.verbose:
self.log.debug(retVal)
outList = []
if retVal:
if fetch:
outList = cur.fetchall()
if self.verbose:
for item in outList:
self.log.debug(" Ret : " + str(item))
self.con.commit()
return retVal, outList
# remove old database
def deleteDatabase(self):
commands_get_status_output('rm -f %s' % self.database)
# initialize database
def initialize(self):
# import sqlite3
# check if sqlite3 is available
com = 'which %s' % self.engine
status,output = commands_get_status_output(com)
if status != 0:
errstr = "\n\n"
errstr += "ERROR : %s is not available in PATH\n\n" % self.engine
errstr += "There are some possible solutions\n"
errstr += " * run this application under Athena runtime with Release 14 or higher. e.g.,\n"
errstr += " $ source setup.sh -tag=14.2.24,32,setup\n"
errstr += " $ source .../etc/panda/panda_setup.sh\n\n"
errstr += " * set PATH and LD_LIBRARY_PATH to include %s. e.g., at CERN\n" % self.engine
errstr += " $ export PATH=/afs/cern.ch/sw/lcg/external/sqlite/3.4.0/slc3_ia32_gcc323/bin:$PATH\n"
errstr += " $ export LD_LIBRARY_PATH=/afs/cern.ch/sw/lcg/external/sqlite/3.4.0/slc3_ia32_gcc323/lib:$LD_LIBRARY_PATH\n"
errstr += " $ source .../etc/panda/panda_setup.sh\n\n"
errstr += " * install %s from the standard SL4 repository. e.g.,\n" % self.engine
errstr += " $ yum install %s\n\n" % self.engine
errstr += " * use SLC5\n"
raise RuntimeError(errstr)
# create dir for DB
if not os.path.exists(self.database_dir):
os.makedirs(self.database_dir)
# the table already exist
if self.checkTable():
return
# create table
self.createTable()
return
# check table
def checkTable(self):
# get tables
retS,retV = self.execute('.table')
if not retS:
raise RuntimeError("cannot get tables")
# the table already exist or not
if retV == []:
return False
if self.tablename not in retV[-1].split():
return False
# check schema
self.checkSchema()
return True
# check schema
def checkSchema(self,noAdd=False):
# get colum names
retS,retV = self.execute('PRAGMA table_info(%s)' % self.tablename)
if not retS:
raise RuntimeError("cannot get table_info")
# parse
columns = []
for line in retV:
items = line.split('|')
if len(items) > 1:
columns.append(items[1])
# check
for tmpC in LocalJobSpec.appended:
tmpA = LocalJobSpec.appended[tmpC]
if tmpC not in columns:
if noAdd:
raise RuntimeError("%s not found in database schema" % tmpC)
# add column
retS,retV = self.execute("ALTER TABLE %s ADD COLUMN '%s' %s" % \
(self.tablename,tmpC,tmpA))
if not retS:
raise RuntimeError("cannot add %s to database schema" % tmpC)
if noAdd:
return
# check whole schema just in case
self.checkSchema(noAdd=True)
# create table
def createTable(self):
# ver 0_1_1
sql = "CREATE TABLE %s (" % self.tablename
sql += "'id' INTEGER PRIMARY KEY,"
sql += "'JobID' INTEGER,"
sql += "'PandaID' TEXT,"
sql += "'jobStatus' TEXT,"
sql += "'site' VARCHAR(128),"
sql += "'cloud' VARCHAR(20),"
sql += "'jobType' VARCHAR(20),"
sql += "'jobName' VARCHAR(128),"
sql += "'inDS' TEXT,"
sql += "'outDS' TEXT,"
sql += "'libDS' VARCHAR(255),"
sql += "'jobParams' TEXT,"
sql += "'retryID' INTEGER,"
sql += "'provenanceID' INTEGER,"
sql += "'creationTime' TIMESTAMP,"
sql += "'lastUpdate' TIMESTAMP,"
sql += "'dbStatus' VARCHAR(20),"
sql += "'buildStatus' VARCHAR(20),"
sql += "'commandToPilot' VARCHAR(20),"
for tmpC in LocalJobSpec.appended:
tmpA = LocalJobSpec.appended[tmpC]
sql += "'%s' %s," % (tmpC,tmpA)
sql = sql[:-1]
sql += ")"
# execute
retS,retV = self.execute(sql)
if not retS:
raise RuntimeError("failed to create %s" % self.tablename)
# confirm
if not self.checkTable():
raise RuntimeError("failed to confirm %s" % self.tablename)
# convert Panda jobs to DB representation
def convertPtoD(pandaJobList,pandaIDstatus,localJob=None,fileInfo={},pandaJobForSiteID=None):
statusOnly = False
if localJob is not None:
# update status only
ddata = localJob
statusOnly = True
else:
# create new spec
ddata = LocalJobSpec()
# sort by PandaID
pandIDs = list(pandaIDstatus)
pandIDs.sort()
pStr = ''
sStr = ''
ddata.commandToPilot = ''
for tmpID in pandIDs:
# PandaID
pStr += '%s,' % tmpID
# status
sStr += '%s,' % pandaIDstatus[tmpID][0]
# commandToPilot
if pandaIDstatus[tmpID][1] == 'tobekilled':
ddata.commandToPilot = 'tobekilled'
pStr = pStr[:-1]
sStr = sStr[:-1]
# job status
ddata.jobStatus = sStr
# PandaID
ddata.PandaID = pStr
# get panda Job
pandaJob = None
if pandaJobList != []:
# look for buildJob since it doesn't have the first PandaID when retried
for pandaJob in pandaJobList:
if pandaJob.prodSourceLabel == 'panda':
break
elif pandaJobForSiteID is not None:
pandaJob = pandaJobForSiteID
# extract libDS
if pandaJob is not None:
if pandaJob.prodSourceLabel == 'panda':
# build Jobs
ddata.buildStatus = pandaJob.jobStatus
for tmpFile in pandaJob.Files:
if tmpFile.type == 'output':
ddata.libDS = tmpFile.dataset
break
else:
# noBuild or libDS
ddata.buildStatus = ''
for tmpFile in pandaJob.Files:
if tmpFile.type == 'input' and tmpFile.lfn.endswith('.lib.tgz'):
ddata.libDS = tmpFile.dataset
break
# release
ddata.releaseVar = pandaJob.AtlasRelease
# cache
tmpCache = re.sub('^[^-]+-*','',pandaJob.homepackage)
tmpCache = re.sub('_','-',tmpCache)
ddata.cacheVar = tmpCache
# return if update status only
if statusOnly:
# build job
if ddata.buildStatus != '':
ddata.buildStatus = sStr.split(',')[0]
# set computingSite mainly for rebrokerage
if pandaJobForSiteID is not None:
ddata.site = pandaJobForSiteID.computingSite
ddata.nRebro = pandaJobForSiteID.specialHandling.split(',').count('rebro') + \
pandaJobForSiteID.specialHandling.split(',').count('sretry')
# return
return ddata
# job parameters
ddata.jobParams = pandaJob.metadata
# extract datasets
iDSlist = []
oDSlist = []
if fileInfo != {}:
if 'inDS' in fileInfo:
iDSlist = fileInfo['inDS']
if 'outDS' in fileInfo:
oDSlist = fileInfo['outDS']
else:
for pandaJob in pandaJobList:
for tmpFile in pandaJob.Files:
if tmpFile.type == 'input' and not tmpFile.lfn.endswith('.lib.tgz'):
if tmpFile.dataset not in iDSlist:
iDSlist.append(tmpFile.dataset)
elif tmpFile.type == 'output' and not tmpFile.lfn.endswith('.lib.tgz'):
if tmpFile.dataset not in oDSlist:
oDSlist.append(tmpFile.dataset)
# convert to string
ddata.inDS = ''
for iDS in iDSlist:
ddata.inDS += '%s,' % iDS
ddata.inDS = ddata.inDS[:-1]
ddata.outDS = ''
for oDS in oDSlist:
ddata.outDS += '%s,' % oDS
ddata.outDS = ddata.outDS[:-1]
# job name
ddata.jobName = pandaJob.jobName
# creation time
ddata.creationTime = pandaJob.creationTime
# job type
ddata.jobType = pandaJob.prodSeriesLabel
# site
ddata.site = pandaJob.computingSite
# cloud
ddata.cloud = pandaJob.cloud
# job ID
ddata.JobID = pandaJob.jobDefinitionID
# retry ID
ddata.retryID = 0
# provenance ID
ddata.provenanceID = pandaJob.jobExecutionID
# groupID
ddata.groupID = pandaJob.jobsetID
ddata.retryJobsetID = -1
if pandaJob.sourceSite not in ['NULL',None,'']:
ddata.parentJobsetID = long(pandaJob.sourceSite)
else:
ddata.parentJobsetID = -1
# job type
ddata.jobType = pandaJob.processingType
# the number of rebrokerage actions
ddata.nRebro = pandaJob.specialHandling.split(',').count('rebro')
# jediTaskID
ddata.jediTaskID = -1
# return
return ddata
# convert JediTask to DB representation
def convertJTtoD(jediTaskDict,localJob=None):
statusOnly = False
if localJob is not None:
# update status only
ddata = localJob
statusOnly = True
else:
# create new spec
ddata = LocalJobSpec()
# max IDs
maxIDs = 20
# task status
ddata.taskStatus = jediTaskDict['status']
# statistic
ddata.jobStatus = jediTaskDict['statistics']
# PandaID
ddata.PandaID = ''
for tmpPandaID in jediTaskDict['PandaID'][:maxIDs]:
ddata.PandaID += '%s,' % tmpPandaID
ddata.PandaID = ddata.PandaID[:-1]
if len(jediTaskDict['PandaID']) > maxIDs:
ddata.PandaID += ',+%sIDs' % (len(jediTaskDict['PandaID'])-maxIDs)
# merge status
if 'mergeStatus' not in jediTaskDict or jediTaskDict['mergeStatus'] is None:
ddata.mergeJobStatus = 'NA'
else:
ddata.mergeJobStatus = jediTaskDict['mergeStatus']
# merge PandaID
ddata.mergeJobID = ''
for tmpPandaID in jediTaskDict['mergePandaID'][:maxIDs]:
ddata.mergeJobID += '%s,' % tmpPandaID
ddata.mergeJobID = ddata.mergeJobID[:-1]
if len(jediTaskDict['mergePandaID']) > maxIDs:
ddata.mergeJobID += ',+%sIDs' % (len(jediTaskDict['mergePandaID'])-maxIDs)
# return if update status only
if statusOnly:
return ddata
# release
ddata.releaseVar = jediTaskDict['transUses']
# cache
if jediTaskDict['transHome'] is None:
tmpCache = ''
else:
tmpCache = re.sub('^[^-]+-*','',jediTaskDict['transHome'])
tmpCache = re.sub('_','-',tmpCache)
ddata.cacheVar = tmpCache
# job parameters
try:
if isinstance(jediTaskDict['cliParams'],unicode):
ddata.jobParams = jediTaskDict['cliParams'].encode('utf_8')
else:
ddata.jobParams = jediTaskDict['cliParams']
# truncate
ddata.jobParams = ddata.jobParams[:1024]
except Exception:
pass
# input datasets
try:
# max number of datasets to show
maxDS = 20
inDSs = jediTaskDict['inDS'].split(',')
strInDS = ''
# concatenate
for tmpInDS in inDSs[:maxDS]:
strInDS += "%s," % tmpInDS
strInDS = strInDS[:-1]
# truncate
if len(inDSs) > maxDS:
strInDS += ',+{0}DSs'.format(len(inDSs)-maxDS)
ddata.inDS = strInDS
except Exception:
ddata.inDS = jediTaskDict['inDS']
# output datasets
ddata.outDS = jediTaskDict['outDS']
# job name
ddata.jobName = jediTaskDict['taskName']
# creation time
ddata.creationTime = jediTaskDict['creationDate']
# job type
ddata.jobType = jediTaskDict['processingType']
# site
ddata.site = jediTaskDict['site']
# cloud
ddata.cloud = jediTaskDict['cloud']
# job ID
ddata.JobID = jediTaskDict['reqID']
# retry ID
ddata.retryID = 0
# provenance ID
ddata.provenanceID = 0
# groupID
ddata.groupID = jediTaskDict['reqID']
# jediTaskID
ddata.jediTaskID = jediTaskDict['jediTaskID']
# IDs for retry
ddata.retryJobsetID = -1
ddata.parentJobsetID = -1
# the number of rebrokerage actions
ddata.nRebro = 0
# return
return ddata
# instantiate database proxy
pdbProxy = PdbProxy()
# just initialize DB
def initialzieDB(verbose=False,restoreDB=False):
if restoreDB:
pdbProxy.deleteDatabase()
pdbProxy.initialize()
pdbProxy.setVerbose(verbose)
# insert job info to DB
def insertJobDB(job,verbose=False):
tmpLog = PLogger.getPandaLogger()
# set update time
job.lastUpdate = datetime.datetime.utcnow()
# make sql
sql1 = "INSERT INTO %s (%s) " % (pdbProxy.tablename,LocalJobSpec.columnNames())
sql1+= "VALUES " + job.values()
status,out = pdbProxy.execute_direct(sql1)
if not status:
raise RuntimeError("failed to insert job")
# update job info in DB
def updateJobDB(job,verbose=False,updateTime=None):
# make sql
sql1 = "UPDATE %s SET " % pdbProxy.tablename
sql1 += job.values(forUpdate=True)
sql1 += " WHERE JobID=%s " % job.JobID
# set update time
if updateTime is not None:
job.lastUpdate = updateTime
sql1 += " AND lastUpdate<'%s' " % updateTime.strftime('%Y-%m-%d %H:%M:%S')
else:
job.lastUpdate = datetime.datetime.utcnow()
status,out = pdbProxy.execute_direct(sql1)
if not status:
raise RuntimeError("failed to update job")
# set retryID
def setRetryID(job,verbose=False):
# make sql
sql1 = "UPDATE %s SET " % pdbProxy.tablename
sql1 += "retryID=%s,retryJobsetID=%s " % (job.JobID,job.groupID)
sql1 += " WHERE JobID=%s AND (nRebro IS NULL OR nRebro=%s)" % (job.provenanceID,job.nRebro)
status,out = pdbProxy.execute(sql1)
if not status:
raise RuntimeError("failed to set retryID")
# delete old jobs
def deleteOldJobs(days,verbose=False):
# time limit
limit = datetime.datetime.utcnow() - datetime.timedelta(days=days)
# make sql
sql1 = "DELETE FROM %s " % pdbProxy.tablename
sql1 += " WHERE creationTime<'%s' " % limit.strftime('%Y-%m-%d %H:%M:%S')
status,out = pdbProxy.execute_direct(sql1)
if not status:
raise RuntimeError("failed to delete old jobs")
# read job info from DB
def readJobDB(JobID,verbose=False):
# make sql
sql1 = "SELECT %s FROM %s " % (LocalJobSpec.columnNames(),pdbProxy.tablename)
sql1+= "WHERE JobID=%s" % JobID
# execute
status,out = pdbProxy.execute_direct(sql1, fetch=True)
if not status:
raise RuntimeError("failed to get JobID=%s" % JobID)
if len(out) == 0:
return None
# instantiate LocalJobSpec
for values in out:
job = LocalJobSpec()
job.pack(values)
# return frozen job if exists
if job.dbStatus == 'frozen':
return job
# return any
return job
# read jobset info from DB
def readJobsetDB(JobsetID,verbose=False):
# make sql
sql1 = "SELECT %s FROM %s " % (LocalJobSpec.columnNames(),pdbProxy.tablename)
sql1+= "WHERE groupID=%s" % JobsetID
# execute
status,out = pdbProxy.execute(sql1)
if not status:
raise RuntimeError("failed to get JobsetID=%s" % JobsetID)
if len(out) == 0:
return None
# instantiate LocalJobSpec
tmpJobMap = {}
for tmpStr in out:
values = tmpStr.split('|')
job = LocalJobSpec()
job.pack(values)
# return frozen job if exists
if job.dbStatus == 'frozen' or job.JobID not in tmpJobMap:
tmpJobMap[job.JobID] = job
# make jobset
jobset = LocalJobsetSpec()
# set jobs
jobset.setJobs(tmpJobMap.values())
# return any
return jobset
# check jobset status in DB
def checkJobsetStatus(JobsetID,verbose=False):
# logger
tmpLog = PLogger.getPandaLogger()
# make sql
sql1 = "SELECT %s FROM %s " % (LocalJobSpec.columnNames(),pdbProxy.tablename)
sql1+= "WHERE groupID=%s" % JobsetID
failedRet = False,None
# execute
status,out = pdbProxy.execute(sql1)
if not status:
tmpLog.error(out)
tmpLog.error("failed to access local DB")
return failedRet
if len(out) == 0:
tmpLog.error("failed to get JobsetID=%s from local DB" % JobsetID)
return None
# instantiate LocalJobSpec
jobMap = {}
for tmpStr in out:
values = tmpStr.split('|')
job = LocalJobSpec()
job.pack(values)
# use frozen job if exists
if job.JobID not in jobMap or job.dbStatus == 'frozen':
jobMap[job.JobID] = job
# check all job status
for tmpJobID in jobMap:
tmpJobSpec = jobMap[tmpJobID]
if tmpJobSpec != 'frozen':
return True,'running'
# return
return True,'frozen'
# bulk read job info from DB
def bulkReadJobDB(verbose=False):
# make sql
sql1 = "SELECT %s FROM %s " % (LocalJobSpec.columnNames(),pdbProxy.tablename)
# execute
status,out = pdbProxy.execute_direct(sql1, fetch=True)
if not status:
raise RuntimeError("failed to get jobs")
if len(out) == 0:
return []
# instantiate LocalJobSpec
retMap = {}
jobsetMap = {}
for values in out:
job = LocalJobSpec()
job.pack(values)
# use frozen job if exists
if job.JobID not in retMap or job.dbStatus == 'frozen':
if job.groupID in [0,'0','NULL',-1,'-1']:
retMap[long(job.JobID)] = job
else:
# add jobset
tmpJobsetID = long(job.groupID)
if tmpJobsetID not in retMap or tmpJobsetID not in jobsetMap:
jobsetMap[tmpJobsetID] = []
jobset = LocalJobsetSpec()
retMap[tmpJobsetID] = jobset
# add job
jobsetMap[tmpJobsetID].append(job)
# add jobs to jobset
for tmpJobsetID in jobsetMap:
tmpJobList = jobsetMap[tmpJobsetID]
retMap[tmpJobsetID].setJobs(tmpJobList)
# sort
ids = list(retMap)
ids.sort()
retVal = []
for id in ids:
retVal.append(retMap[id])
# return
return retVal
# get list of JobID
def getListOfJobIDs(nonFrozen=False,verbose=False):
# make sql
sql1 = "SELECT JobID,dbStatus FROM %s " % pdbProxy.tablename
# execute
status,out = pdbProxy.execute_direct(sql1, fetch=True)
if not status:
raise RuntimeError("failed to get list of JobIDs")
allList = []
frozenList = []
for item in out:
# extract JobID
tmpID = long(item[0])
# status in DB
tmpStatus = item[-1]
# keep all jobs
if tmpID not in allList:
allList.append(tmpID)
# keep frozen jobs
if nonFrozen and tmpStatus == 'frozen':
if tmpID not in frozenList:
frozenList.append(tmpID)
# remove redundant jobs
retVal = []
for item in allList:
if item not in frozenList:
retVal.append(item)
# sort
retVal.sort()
# return
return retVal
# get map of jobsetID and JobIDs
def getMapJobsetIDJobIDs(verbose=False):
# make sql
sql1 = "SELECT groupID,JobID FROM %s WHERE groupID is not NULL and groupID != 0 and groupID != ''" % pdbProxy.tablename
# execute
status,out = pdbProxy.execute(sql1)
if not status:
raise RuntimeError("failed to get list of JobIDs")
allMap = {}
for item in out:
# JobsetID
tmpJobsetID = long(item.split('|')[0])
# JobID
tmpJobID = long(item.split('|')[-1])
# append
if tmpJobsetID not in allMap:
allMap[tmpJobsetID] = []
if tmpJobID not in allMap[tmpJobsetID]:
allMap[tmpJobsetID].append(tmpJobID)
# sort
for tmpKey in allMap.keys():
allMap[tmpKey].sort()
# return
return allMap
# make JobSetSpec
def makeJobsetSpec(jobList):
jobset = LocalJobsetSpec()
jobset.setJobs(jobList)
return jobset
# get map of jobsetID and jediTaskID
def getJobsetTaskMap(verbose=False):
# make sql
sql1 = "SELECT groupID,jediTaskID FROM %s WHERE groupID is not NULL and groupID != 0 and groupID != '' and jediTaskID is not null and jediTaskID != ''" % pdbProxy.tablename
# execute
status,out = pdbProxy.execute_direct(sql1, fetch=True)
if not status:
raise RuntimeError("failed to get list of JobIDs")
allMap = {}
for item in out:
# JobsetID
tmpJobsetID = long(item[0])
# JobID
jediTaskID = long(item[-1])
# append
allMap[jediTaskID] = tmpJobsetID
# return
return allMap
|
r"""
Gnomonic
========
The point of perspective of the gnomonic projection lies at the center of the
earth. As a consequence great circles (orthodromes) on the surface of the Earth
are displayed as straight lines, which makes it suitable for distance
estimation for navigational purposes. It is neither conformal nor equal-area
and the distortion increases greatly with distance to the projection center. It
follows that the scope of application is restricted to a small area around the
projection center (at a maximum of 60ยฐ).
**f**\ *lon0/lat0*\ [*/horizon*\ ]\ */scale*
or **F**\ *lon0/lat0*\ [*/horizon*\ ]\ */width*
**f** or **F** specifies the projection type, *lon0/lat0* specifies the
projection center, the optional parameter *horizon* specifies the maximum
distance from projection center (in degrees, < 90, default 60), and *scale* or
*width* sets the size of the figure.
"""
import pygmt
fig = pygmt.Figure()
fig.coast(projection="F-90/15/12c", region="g", frame="20g20", land="gray")
fig.show()
|
import flask
from flask import Flask,jsonify,request
import json
from data_input import data_in
import numpy as np
import pickle
def load_models():
file_name = './models/model_file.p'
with open(file_name,'rb') as pickled:
data = pickle.load(pickled)
model = data['model']
return model
app = Flask(__name__)
@app.route('/predict',methods=['GET'])
def predict():
request_json = request.get_json()
x = request_json['input']
x_in = np.array(x).reshape(1,-1)
model = load_models()
prediction = model.predict(x_in)[0]
response = json.dumps({'response': prediction})
return response,200
if __name__ == '__main__':
application.run(debug=True)
|
import os
import pytest
from sap.aibus.dar.client.data_manager_client import DataManagerClient
from sap.aibus.dar.client.inference_client import InferenceClient
from sap.aibus.dar.client.model_manager_client import ModelManagerClient
from sap.aibus.dar.client.util.credentials import OnlineCredentialsSource
from sap.aibus.dar.client.workflow.model import ModelCreator
@pytest.fixture()
def dar_url():
return os.environ["DAR_URL"]
@pytest.fixture()
def dar_client_id():
return os.environ["DAR_CLIENT_ID"]
@pytest.fixture()
def dar_client_secret():
return os.environ["DAR_CLIENT_SECRET"]
@pytest.fixture()
def dar_uaa_url():
return os.environ["DAR_AUTH_URL"]
# For the following fixtures, the parameters to the functions
# will be provided by existing fixtures of the same name!
@pytest.fixture()
def credentials_source(dar_client_id, dar_client_secret, dar_uaa_url):
return OnlineCredentialsSource(dar_uaa_url, dar_client_id, dar_client_secret)
@pytest.fixture()
def data_manager_client(dar_url, credentials_source):
client = DataManagerClient(dar_url, credentials_source)
return client
@pytest.fixture()
def model_manager_client(dar_url, credentials_source):
client = ModelManagerClient(dar_url, credentials_source)
return client
@pytest.fixture()
def inference_client(dar_url, credentials_source):
client = InferenceClient(dar_url, credentials_source)
return client
@pytest.fixture()
def model_creator(dar_url, credentials_source):
create_model = ModelCreator(dar_url, credentials_source)
return create_model
|
import json
import os
import sys
from datetime import datetime
import numpy as np
import pandas as pd
from objects.task import Task
from objects.workflow import Workflow
from objects.workload import Workload
pd.set_option('display.max_columns', None)
USAGE = 'Usage: python(3) ./galaxy_to_parquet.py galaxy_folder'
NAME = 'Galaxy'
TARGET_DIR = os.path.join(os.path.dirname(os.getcwd()), 'output_parquet', NAME)
DATETIME_FORMAT = '%Y-%m-%d %H:%M:%S.%f'
EPOCH = datetime(1970, 1, 1)
JOBS = None
METRICS = None
WORKFLOWS = None
WORKFLOW_INVOCATIONS = None
WORKFLOW_STEPS = None
WORKFLOW_INVOKE_STEPS = None
WORKFLOW_CONNECTIONS = None
WORKFLOW_STEP_INPUT = None
def read_files(folder_path):
global METRICS
METRICS = pd.read_csv(os.path.join(folder_path, 'job_metrics_numeric.csv'),
names=["id", "job_id", "plugin", "metric_name", "metric_value"], dtype={
"id": np.float,
"job_id": np.float,
"plugin": np.str,
"metric_name": np.str,
"metric_value": np.float,
})
print("Done with reading metrics")
global WORKFLOWS
WORKFLOWS = pd.read_csv(os.path.join(folder_path, 'workflows.csv'),
names=["id", "create_time", "update_time", "stored_workflow_id", "has_cycles", "has_errors",
"parent_workflow_id", "uuid"], dtype={
"id": np.float,
"create_time": np.str,
"update_time": np.str,
"stored_workflow_id": np.float,
"has_cycles": np.str,
"has_errors": np.str,
"parent_workflow_id": np.float,
"uuid": np.str,
})
print("Done with reading workflows")
global WORKFLOW_INVOCATIONS
WORKFLOW_INVOCATIONS = pd.read_csv(os.path.join(folder_path, 'workflow-invocations.csv'),
names=["id", "create_time", "update_time", "workflow_id", "state", "scheduler",
"handler"], dtype={
"id": np.float,
"create_time": np.str,
"update_time": np.str,
"workflow_id": np.float,
"state": np.str,
"scheduler": np.str,
"handler": np.str,
})
print("Done with reading workflow invocations")
global WORKFLOW_STEPS
WORKFLOW_STEPS = pd.read_csv(os.path.join(folder_path, 'workflow-steps.csv'),
names=["id", "create_time", "update_time", "workflow_id", "type", "tool_id",
"tool_version", "order_index", "subworkflow_id", "dynamic_tool_id"], dtype={
"id": np.float,
"create_time": np.str,
"update_time": np.str,
"workflow_id": np.float,
"type": np.str,
"tool_id": np.str,
"tool_version": np.str,
"order_index": np.float,
"subworkflow_id": np.str,
"dynamic_tool_id": np.str,
})
print("Done with reading workflow steps")
global WORKFLOW_INVOKE_STEPS
WORKFLOW_INVOKE_STEPS = pd.read_csv(os.path.join(folder_path, 'workflow-invoke-steps.csv'), keep_default_na=True,
names=["id", "create_time", "update_time", "workflow_invocation_id",
"workflow_step_id", "job_id", "state"], dtype={
"id": np.float,
"create_time": np.str,
"update_time": np.str,
"workflow_invocation_id": np.float,
"workflow_step_id": np.float,
"job_id": np.float,
"state": np.str,
})
print("Done with reading workflow invocation steps")
global WORKFLOW_CONNECTIONS
WORKFLOW_CONNECTIONS = pd.read_csv(os.path.join(folder_path, 'workflow-connections.csv'),
names=["id", "output_step_id", "input_step_input_id", "output_name",
"input_subworkflow_step_id"], dtype={
"id": np.float,
"output_step_id": np.float,
"input_step_input_id": np.float,
"output_name": np.str,
"input_subworkflow_step_id": np.float,
})
print("Done with reading workflow connections")
global WORKFLOW_STEP_INPUT
WORKFLOW_STEP_INPUT = pd.read_csv(os.path.join(folder_path, 'workflow-step-input.csv'),
names=["id", "workflow_step_id", "name"], dtype={
"id": np.float,
"workflow_step_id": np.float,
"name": np.str,
})
print("Done with reading workflow step input")
def check_if_empty(*args):
for field in args:
if np.isnan(field):
return True
def compute_children(step_job_ids, tasks_in_workflow):
for task in tasks_in_workflow:
step_id = None
for pair in step_job_ids:
# find task id's corresponding step id
if pair[1] == task.id:
step_id = pair[0]
children = set()
df = WORKFLOW_CONNECTIONS.loc[(WORKFLOW_CONNECTIONS["output_step_id"] == step_id)]
if df.empty:
task.children = children
continue
for wc_row in df.itertuples():
# find id for subsequent connected step
row = WORKFLOW_STEP_INPUT.loc[(WORKFLOW_STEP_INPUT["id"] == wc_row[3])]
child_step_id = row.iloc[0]["workflow_step_id"]
# find child_step_id in step-job pairs and add corresponding job_id to children set
for pair2 in step_job_ids:
if pair2[0] == child_step_id:
children.add(np.int64(pair2[1]))
for child in tasks_in_workflow:
if child.id == pair2[1]:
child.parents.append(np.int64(task.id))
break
break
task.children = children
for task2 in tasks_in_workflow:
unique_parents = set(task2.parents)
unique_parents_list = list(unique_parents)
task2.parents = unique_parents_list
return tasks_in_workflow
def parse():
os.makedirs(TARGET_DIR, exist_ok=True)
task_counter = 0
workflow_counter = 0
processed_workflows = []
final_workflows = []
final_tasks = []
task_offset = 0
workflow_offset = None
for wi_row in WORKFLOW_INVOCATIONS.itertuples():
flag = False
# only use one execution of a workflow
if wi_row[4] in processed_workflows:
continue
# check if workflow contains cycles
workflow_row = WORKFLOWS.loc[(WORKFLOWS["id"] == getattr(wi_row, "workflow_id"))]
if workflow_row.iloc[0]["has_cycles"] == "t":
continue
# workflows contain a number of workflow steps but this is not the ID of their actual execution
# this list is used to tie the workflow steps to their actual execution ID
step_job_ids = []
tasks_in_workflow = []
workflow_index = wi_row[4]
# check if workflow id is null
if pd.isnull(workflow_index):
continue
df = WORKFLOW_INVOKE_STEPS.loc[(WORKFLOW_INVOKE_STEPS["workflow_invocation_id"] == getattr(wi_row, "id"))]
# check if workflow is not empty
if df.empty:
processed_workflows.append(workflow_index)
continue
for wis_row in df.itertuples():
# check if entry in WF_INVOKE_STEPS has the same wf_invocation_id
if getattr(wis_row, "workflow_invocation_id") == getattr(wi_row, "id"):
# check if required fields are not empty
if check_if_empty(getattr(wis_row, "workflow_step_id"), getattr(wis_row, "job_id")):
processed_workflows.append(workflow_index)
flag = True
break
# get step id and corresponding execution id
step_job_pair = [getattr(wis_row, "workflow_step_id"), getattr(wis_row, "job_id")]
step_job_ids.append(step_job_pair)
job_id = getattr(wis_row, "job_id")
submit_time = int(((datetime.strptime(getattr(wis_row, "create_time"),DATETIME_FORMAT) - EPOCH).total_seconds()) * 1000)
job_metrics = METRICS.loc[(METRICS["job_id"] == job_id)]
runtime = job_metrics.loc[(job_metrics["metric_name"] == "runtime_seconds"), 'metric_value'] * 1000
memory = job_metrics.loc[(job_metrics["metric_name"] == "memory.memsw.max_usage_in_bytes"), 'metric_value']
cpu_time = job_metrics.loc[(job_metrics["metric_name"] == "cpuacct.usage"), 'metric_value']
# check if any required fields are empty
if runtime.empty or memory.empty or cpu_time.empty:
processed_workflows.append(workflow_index)
flag = True
break
# used to find the task with lowest submit time, this time will be used ass offset
if task_offset == 0:
task_offset = submit_time
elif submit_time < task_offset:
task_offset = submit_time
runtime = runtime.iloc[0]
memory = memory.iloc[0]
cpu_time = cpu_time.iloc[0] / 1000000
if cpu_time > runtime:
cpu_time = runtime
task = Task(np.int64(job_id), "Composite", submit_time, 0, runtime, 1, None, workflow_index, -1, "cpu-time",resource=cpu_time, memory_requested=memory)
task_counter += 1
tasks_in_workflow.append(task)
flag = False
# if flag is true, a task in the workflow is not usable to we skip it
if flag:
processed_workflows.append((workflow_index))
continue
# compute children of tasks
final_tasks.extend(compute_children(step_job_ids, tasks_in_workflow))
workflow_submit_time = int(((datetime.strptime(getattr(wi_row, "create_time"),DATETIME_FORMAT) - EPOCH).total_seconds()) * 1000)
# find smallest workflow submit time as offset
if workflow_offset is None:
workflow_offset = workflow_submit_time
elif workflow_submit_time < workflow_offset:
workflow_offset = workflow_submit_time
workflow = Workflow(workflow_index, workflow_submit_time, tasks_in_workflow, "core", "Engineering",
"Galaxy", "Biological Engineering")
workflow.compute_critical_path()
processed_workflows.append(workflow_index)
final_workflows.append(workflow)
workflow_counter += 1
# apply offset
for x in final_tasks:
x.ts_submit = x.ts_submit - task_offset
# apply offset
for y in final_workflows:
y.ts_submit = y.ts_submit - workflow_offset
# make tasks dataframe
task_df = pd.DataFrame([t.get_parquet_dict() for t in final_tasks])
# create parquet file in specified folder
os.makedirs(os.path.join(TARGET_DIR, Task.output_path()), exist_ok=True)
task_df.to_parquet(os.path.join(TARGET_DIR, Task.output_path(), "part.0.parquet"), engine="pyarrow")
# make workflows dataframe
workflow_df = pd.DataFrame([w.get_parquet_dict() for w in final_workflows])
# create parquet file in specified folder
os.makedirs(os.path.join(TARGET_DIR, Workflow.output_path()), exist_ok=True)
workflow_df.to_parquet(os.path.join(TARGET_DIR, Workflow.output_path(), "part.0.parquet"), engine="pyarrow")
json_dict = Workload.get_json_dict_from_pandas_task_dataframe(task_df,
domain="Biological Engineering",
authors=["Jaro Bosch", "Laurens Versluis"],
workload_description="Traces from different biomedical research workflows, executed on the public Galaxy server in Europe."
)
os.makedirs(os.path.join(TARGET_DIR, Workload.output_path()), exist_ok=True)
with open(os.path.join(TARGET_DIR, Workload.output_path(), "generic_information.json"), "w") as file:
# Need this on 32-bit python.
def default(o):
if isinstance(o, np.int64): return int(o)
raise TypeError
file.write(json.dumps(json_dict, default=default))
if __name__ == '__main__':
if len(sys.argv) != 2:
print(USAGE)
sys.exit(1)
folder_path = sys.argv[1]
read_files(folder_path)
parse()
|
import seaborn as sns
import matplotlib.pyplot as plt
import torch
import numpy as np
import cv2
from .cam import GradCAM
# def load_gradcam(images, labels, model, device, target_layers):
def load_gradcam(test, model, device, target_layers,size = 25,classified = True):
_images = []
_target = []
_pred = []
# model, device = self.trainer.model, self.trainer.device
# set the model to evaluation mode
model.eval()
# turn off gradients
with torch.no_grad():
for data, target in test:
# move them to respective device
data, target = data.to(device), target.to(device)
# do inferencing
output = model(data)
# print("output:",output[0])
# get the predicted output
pred = output.argmax(dim=1, keepdim=True)
# print(pred,pred.view_as(target))
# get the current misclassified in this batch
list_images = (target.eq(pred.view_as(target)) == classified)
batch_misclassified = data[list_images]
batch_mis_pred = pred[list_images]
batch_mis_target = target[list_images]
# batch_misclassified =
_images.append(batch_misclassified)
_pred.append(batch_mis_pred)
_target.append(batch_mis_target)
# group all the batched together
img = torch.cat(_images)
pred = torch.cat(_pred)
tar = torch.cat(_target)
# move the model to device
images = img[:size]
labels = tar[:size]
model.to(device)
# set the model in evaluation mode
model.eval()
# get the grad cam
gcam = GradCAM(model=model, candidate_layers=target_layers)
# images = torch.stack(images).to(device)
# predicted probabilities and class ids
pred_probs, pred_ids = gcam.forward(images)
# actual class ids
# target_ids = torch.LongTensor(labels).view(len(images), -1).to(device)
target_ids = labels.view(len(images), -1).to(device)
# backward pass wrt to the actual ids
gcam.backward(ids=target_ids)
# we will store the layers and correspondings images activations here
layers_region = {}
# fetch the grad cam layers of all the images
for target_layer in target_layers:
# Grad-CAM
regions = gcam.generate(target_layer=target_layer)
layers_region[target_layer] = regions
# we are done here, remove the hooks
gcam.remove_hook()
return layers_region, pred_probs, pred_ids,images, labels
sns.set()
# plt.style.use("dark_background")
def plot_gradcam(gcam_layers, images, target_labels, predicted_labels, class_labels, denormalize):
images = images.cpu()
# convert BCHW to BHWC for plotting stufffff
images = images.permute(0, 2, 3, 1)
target_labels = target_labels.cpu()
fig, axs = plt.subplots(nrows=len(images), ncols=len(
gcam_layers.keys())+1, figsize=((len(gcam_layers.keys()) + 2)*3, len(images)*3))
fig.suptitle("Grad-CAM", fontsize=16)
for image_idx, image in enumerate(images):
# denormalize the imaeg
denorm_img = denormalize(image.permute(2, 0, 1)).permute(1, 2, 0)
# axs[image_idx, 0].text(
# 0.5, 0.5, f'predicted: {class_labels[predicted_labels[image_idx][0] ]}\nactual: {class_labels[target_labels[image_idx]] }', horizontalalignment='center', verticalalignment='center', fontsize=14, )
# axs[image_idx, 0].axis('off')
axs[image_idx, 0].imshow(
(denorm_img.numpy() * 255).astype(np.uint8), interpolation='bilinear')
axs[image_idx, 0].axis('off')
for layer_idx, layer_name in enumerate(gcam_layers.keys()):
# gets H X W of the cam layer
_layer = gcam_layers[layer_name][image_idx].cpu().numpy()[0]
heatmap = 1 - _layer
heatmap = np.uint8(255 * heatmap)
heatmap_img = cv2.applyColorMap(heatmap, cv2.COLORMAP_JET)
superimposed_img = cv2.addWeighted(
(denorm_img.numpy() * 255).astype(np.uint8), 0.6, heatmap_img, 0.4, 0)
axs[image_idx, layer_idx +
1].imshow(superimposed_img, interpolation='bilinear')
axs[image_idx, layer_idx+1].set_title(f'layer: {layer_name}')
axs[image_idx, layer_idx+1].axis('off')
axs[image_idx, 0].set_title(f'Predicted: {class_labels[predicted_labels[image_idx][0] ]}\nTarget: {class_labels[target_labels[image_idx]] }')
plt.tight_layout()
plt.subplots_adjust(top=0.95, wspace=0.2, hspace=0.2)
plt.show()
|
"""Factory classes for easily generating test objects."""
from .activation import Activation
from .annotation import Annotation
from .annotation_moderation import AnnotationModeration
from .auth_client import AuthClient, ConfidentialAuthClient
from .auth_ticket import AuthTicket
from .authz_code import AuthzCode
from .base import set_session
from .document import Document, DocumentMeta, DocumentURI
from .feature import Feature
from .flag import Flag
from .group import Group, OpenGroup, RestrictedGroup
from .group_scope import GroupScope
from .job import Job, SyncAnnotationJob
from .organization import Organization
from .setting import Setting
from .token import DeveloperToken, OAuth2Token
from .user import User
from .user_identity import UserIdentity
__all__ = (
"Activation",
"Annotation",
"AnnotationModeration",
"AuthClient",
"AuthTicket",
"AuthzCode",
"ConfidentialAuthClient",
"DeveloperToken",
"Document",
"DocumentMeta",
"DocumentURI",
"Feature",
"Flag",
"Group",
"GroupScope",
"Job",
"OAuth2Token",
"OpenGroup",
"Organization",
"RestrictedGroup",
"Setting",
"SyncAnnotationJob",
"User",
"UserIdentity",
"set_session",
)
|
# -------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
# --------------------------------------------------------------------------
import msrest.serialization
from .._generated.models import (
LexicalAnalyzer,
LexicalTokenizer,
AnalyzeRequest,
CustomAnalyzer as _CustomAnalyzer,
PatternAnalyzer as _PatternAnalyzer,
PatternTokenizer as _PatternTokenizer,
SearchResourceEncryptionKey as _SearchResourceEncryptionKey,
SearchIndexerDataSource as _SearchIndexerDataSource,
SynonymMap as _SynonymMap,
DataSourceCredentials,
AzureActiveDirectoryApplicationCredentials
)
DELIMITER = "|"
class AnalyzeTextOptions(msrest.serialization.Model):
"""Specifies some text and analysis components used to break that text into tokens.
All required parameters must be populated in order to send to Azure.
:param text: Required. The text to break into tokens.
:type text: str
:param analyzer_name: The name of the analyzer to use to break the given text. If this parameter is
not specified, you must specify a tokenizer instead. The tokenizer and analyzer parameters are
mutually exclusive. Possible values include: "ar.microsoft", "ar.lucene", "hy.lucene",
"bn.microsoft", "eu.lucene", "bg.microsoft", "bg.lucene", "ca.microsoft", "ca.lucene", "zh-
Hans.microsoft", "zh-Hans.lucene", "zh-Hant.microsoft", "zh-Hant.lucene", "hr.microsoft",
"cs.microsoft", "cs.lucene", "da.microsoft", "da.lucene", "nl.microsoft", "nl.lucene",
"en.microsoft", "en.lucene", "et.microsoft", "fi.microsoft", "fi.lucene", "fr.microsoft",
"fr.lucene", "gl.lucene", "de.microsoft", "de.lucene", "el.microsoft", "el.lucene",
"gu.microsoft", "he.microsoft", "hi.microsoft", "hi.lucene", "hu.microsoft", "hu.lucene",
"is.microsoft", "id.microsoft", "id.lucene", "ga.lucene", "it.microsoft", "it.lucene",
"ja.microsoft", "ja.lucene", "kn.microsoft", "ko.microsoft", "ko.lucene", "lv.microsoft",
"lv.lucene", "lt.microsoft", "ml.microsoft", "ms.microsoft", "mr.microsoft", "nb.microsoft",
"no.lucene", "fa.lucene", "pl.microsoft", "pl.lucene", "pt-BR.microsoft", "pt-BR.lucene", "pt-
PT.microsoft", "pt-PT.lucene", "pa.microsoft", "ro.microsoft", "ro.lucene", "ru.microsoft",
"ru.lucene", "sr-cyrillic.microsoft", "sr-latin.microsoft", "sk.microsoft", "sl.microsoft",
"es.microsoft", "es.lucene", "sv.microsoft", "sv.lucene", "ta.microsoft", "te.microsoft",
"th.microsoft", "th.lucene", "tr.microsoft", "tr.lucene", "uk.microsoft", "ur.microsoft",
"vi.microsoft", "standard.lucene", "standardasciifolding.lucene", "keyword", "pattern",
"simple", "stop", "whitespace".
:type analyzer_name: str or ~azure.search.documents.indexes.models.LexicalAnalyzerName
:param tokenizer_name: The name of the tokenizer to use to break the given text. If this parameter
is not specified, you must specify an analyzer instead. The tokenizer and analyzer parameters
are mutually exclusive. Possible values include: "classic", "edgeNGram", "keyword_v2",
"letter", "lowercase", "microsoft_language_tokenizer", "microsoft_language_stemming_tokenizer",
"nGram", "path_hierarchy_v2", "pattern", "standard_v2", "uax_url_email", "whitespace".
:type tokenizer_name: str or ~azure.search.documents.indexes.models.LexicalTokenizerName
:param token_filters: An optional list of token filters to use when breaking the given text.
This parameter can only be set when using the tokenizer parameter.
:type token_filters: list[str or ~azure.search.documents.indexes.models.TokenFilterName]
:param char_filters: An optional list of character filters to use when breaking the given text.
This parameter can only be set when using the tokenizer parameter.
:type char_filters: list[str]
"""
_validation = {
'text': {'required': True},
}
_attribute_map = {
'text': {'key': 'text', 'type': 'str'},
'analyzer_name': {'key': 'analyzerName', 'type': 'str'},
'tokenizer_name': {'key': 'tokenizerName', 'type': 'str'},
'token_filters': {'key': 'tokenFilters', 'type': '[str]'},
'char_filters': {'key': 'charFilters', 'type': '[str]'},
}
def __init__(
self,
**kwargs
):
super(AnalyzeTextOptions, self).__init__(**kwargs)
self.text = kwargs['text']
self.analyzer_name = kwargs.get('analyzer_name', None)
self.tokenizer_name = kwargs.get('tokenizer_name', None)
self.token_filters = kwargs.get('token_filters', None)
self.char_filters = kwargs.get('char_filters', None)
def _to_analyze_request(self):
return AnalyzeRequest(
text=self.text,
analyzer=self.analyzer_name,
tokenizer=self.tokenizer_name,
token_filters=self.token_filters,
char_filters=self.char_filters
)
class CustomAnalyzer(LexicalAnalyzer):
"""Allows you to take control over the process of converting text into indexable/searchable tokens.
It's a user-defined configuration consisting of a single predefined tokenizer and one or more filters.
The tokenizer is responsible for breaking text into tokens, and the filters for modifying tokens
emitted by the tokenizer.
All required parameters must be populated in order to send to Azure.
:param odata_type: Required. Identifies the concrete type of the analyzer.Constant filled by
server.
:type odata_type: str
:param name: Required. The name of the analyzer. It must only contain letters, digits, spaces,
dashes or underscores, can only start and end with alphanumeric characters, and is limited to
128 characters.
:type name: str
:param tokenizer_name: Required. The name of the tokenizer to use to divide continuous text into a
sequence of tokens, such as breaking a sentence into words. Possible values include: "classic",
"edgeNGram", "keyword_v2", "letter", "lowercase", "microsoft_language_tokenizer",
"microsoft_language_stemming_tokenizer", "nGram", "path_hierarchy_v2", "pattern",
"standard_v2", "uax_url_email", "whitespace".
:type tokenizer_name: str or ~azure.search.documents.indexes.models.LexicalTokenizerName
:param token_filters: A list of token filters used to filter out or modify the tokens generated
by a tokenizer. For example, you can specify a lowercase filter that converts all characters to
lowercase. The filters are run in the order in which they are listed.
:type token_filters: list[str or ~azure.search.documents.indexes.models.TokenFilterName]
:param char_filters: A list of character filters used to prepare input text before it is
processed by the tokenizer. For instance, they can replace certain characters or symbols. The
filters are run in the order in which they are listed.
:type char_filters: list[str]
"""
_validation = {
'odata_type': {'required': True},
'name': {'required': True},
'tokenizer_name': {'required': True},
}
_attribute_map = {
'odata_type': {'key': '@odata\\.type', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'tokenizer_name': {'key': 'tokenizerName', 'type': 'str'},
'token_filters': {'key': 'tokenFilters', 'type': '[str]'},
'char_filters': {'key': 'charFilters', 'type': '[str]'},
}
def __init__(
self,
**kwargs
):
super(CustomAnalyzer, self).__init__(**kwargs)
self.odata_type = '#Microsoft.Azure.Search.CustomAnalyzer'
self.tokenizer_name = kwargs['tokenizer_name']
self.token_filters = kwargs.get('token_filters', None)
self.char_filters = kwargs.get('char_filters', None)
def _to_generated(self):
return _CustomAnalyzer(
name=self.name,
odata_type=self.odata_type,
tokenizer=self.tokenizer_name,
token_filters=self.token_filters,
char_filters=self.char_filters
)
@classmethod
def _from_generated(cls, custom_analyzer):
if not custom_analyzer:
return None
return cls(
name=custom_analyzer.name,
odata_type=custom_analyzer.odata_type,
tokenizer_name=custom_analyzer.tokenizer,
token_filters=custom_analyzer.token_filters,
char_filters=custom_analyzer.char_filters
)
class PatternAnalyzer(LexicalAnalyzer):
"""Flexibly separates text into terms via a regular expression.
This analyzer is implemented using Apache Lucene.
All required parameters must be populated in order to send to Azure.
:param name: Required. The name of the analyzer. It must only contain letters, digits, spaces,
dashes or underscores, can only start and end with alphanumeric characters, and is limited to
128 characters.
:type name: str
:param lower_case_terms: A value indicating whether terms should be lower-cased. Default is
true.
:type lower_case_terms: bool
:param pattern: A regular expression to match token separators. Default is an
expression that matches one or more white space characters.
:type pattern: str
:param flags: List of regular expression flags. Possible values of each flag include: 'CANON_EQ',
'CASE_INSENSITIVE', 'COMMENTS', 'DOTALL', 'LITERAL', 'MULTILINE', 'UNICODE_CASE', 'UNIX_LINES'.
:type flags: list[str] or list[~search_service_client.models.RegexFlags]
:param stopwords: A list of stopwords.
:type stopwords: list[str]
"""
_validation = {"odata_type": {"required": True}, "name": {"required": True}}
_attribute_map = {
"odata_type": {"key": "@odata\\.type", "type": "str"},
"name": {"key": "name", "type": "str"},
"lower_case_terms": {"key": "lowercase", "type": "bool"},
"pattern": {"key": "pattern", "type": "str"},
"flags": {"key": "flags", "type": "[str]"},
"stopwords": {"key": "stopwords", "type": "[str]"},
}
def __init__(self, **kwargs):
super(PatternAnalyzer, self).__init__(**kwargs)
self.odata_type = "#Microsoft.Azure.Search.PatternAnalyzer"
self.lower_case_terms = kwargs.get("lower_case_terms", True)
self.pattern = kwargs.get("pattern", r"\W+")
self.flags = kwargs.get("flags", None)
self.stopwords = kwargs.get("stopwords", None)
def _to_generated(self):
if not self.flags:
flags = None
else:
flags = DELIMITER.join(self.flags)
return _PatternAnalyzer(
name=self.name,
lower_case_terms=self.lower_case_terms,
pattern=self.pattern,
flags=flags,
stopwords=self.stopwords,
)
@classmethod
def _from_generated(cls, pattern_analyzer):
if not pattern_analyzer:
return None
if not pattern_analyzer.flags:
flags = None
else:
flags = pattern_analyzer.flags.split(DELIMITER)
return cls(
name=pattern_analyzer.name,
lower_case_terms=pattern_analyzer.lower_case_terms,
pattern=pattern_analyzer.pattern,
flags=flags,
stopwords=pattern_analyzer.stopwords,
)
class PatternTokenizer(LexicalTokenizer):
"""Tokenizer that uses regex pattern matching to construct distinct tokens.
This tokenizer is implemented using Apache Lucene.
All required parameters must be populated in order to send to Azure.
:param name: Required. The name of the tokenizer. It must only contain letters, digits, spaces,
dashes or underscores, can only start and end with alphanumeric characters, and is limited to
128 characters.
:type name: str
:param pattern: A regular expression to match token separators. Default is an
expression that matches one or more white space characters.
:type pattern: str
:param flags: List of regular expression flags. Possible values of each flag include: 'CANON_EQ',
'CASE_INSENSITIVE', 'COMMENTS', 'DOTALL', 'LITERAL', 'MULTILINE', 'UNICODE_CASE', 'UNIX_LINES'.
:type flags: list[str] or list[~search_service_client.models.RegexFlags]
:param group: The zero-based ordinal of the matching group in the regular expression to
extract into tokens. Use -1 if you want to use the entire pattern to split the input into
tokens, irrespective of matching groups. Default is -1.
:type group: int
"""
_validation = {"odata_type": {"required": True}, "name": {"required": True}}
_attribute_map = {
"odata_type": {"key": "@odata\\.type", "type": "str"},
"name": {"key": "name", "type": "str"},
"pattern": {"key": "pattern", "type": "str"},
"flags": {"key": "flags", "type": "[str]"},
"group": {"key": "group", "type": "int"},
}
def __init__(self, **kwargs):
super(PatternTokenizer, self).__init__(**kwargs)
self.odata_type = "#Microsoft.Azure.Search.PatternTokenizer"
self.pattern = kwargs.get("pattern", r"\W+")
self.flags = kwargs.get("flags", None)
self.group = kwargs.get("group", -1)
def _to_generated(self):
if not self.flags:
flags = None
else:
flags = DELIMITER.join(self.flags)
return _PatternTokenizer(
name=self.name,
pattern=self.pattern,
flags=flags,
group=self.group,
)
@classmethod
def _from_generated(cls, pattern_tokenizer):
if not pattern_tokenizer:
return None
if not pattern_tokenizer.flags:
flags = None
else:
flags = pattern_tokenizer.flags.split(DELIMITER)
return cls(
name=pattern_tokenizer.name,
pattern=pattern_tokenizer.pattern,
flags=flags,
group=pattern_tokenizer.group,
)
class SearchResourceEncryptionKey(msrest.serialization.Model):
"""A customer-managed encryption key in Azure Key Vault. Keys that you create and manage can be
used to encrypt or decrypt data-at-rest in Azure Cognitive Search, such as indexes and synonym maps.
All required parameters must be populated in order to send to Azure.
:param key_name: Required. The name of your Azure Key Vault key to be used to encrypt your data
at rest.
:type key_name: str
:param key_version: Required. The version of your Azure Key Vault key to be used to encrypt
your data at rest.
:type key_version: str
:param vault_uri: Required. The URI of your Azure Key Vault, also referred to as DNS name, that
contains the key to be used to encrypt your data at rest. An example URI might be https://my-
keyvault-name.vault.azure.net.
:type vault_uri: str
:param application_id: Required. An AAD Application ID that was granted the required access
permissions to the Azure Key Vault that is to be used when encrypting your data at rest. The
Application ID should not be confused with the Object ID for your AAD Application.
:type application_id: str
:param application_secret: The authentication key of the specified AAD application.
:type application_secret: str
"""
_validation = {
'key_name': {'required': True},
'key_version': {'required': True},
'vault_uri': {'required': True},
}
_attribute_map = {
'key_name': {'key': 'keyVaultKeyName', 'type': 'str'},
'key_version': {'key': 'keyVaultKeyVersion', 'type': 'str'},
'vault_uri': {'key': 'keyVaultUri', 'type': 'str'},
'application_id': {'key': 'applicationId', 'type': 'str'},
'application_secret': {'key': 'applicationSecret', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(SearchResourceEncryptionKey, self).__init__(**kwargs)
self.key_name = kwargs['key_name']
self.key_version = kwargs['key_version']
self.vault_uri = kwargs['vault_uri']
self.application_id = kwargs.get('application_id', None)
self.application_secret = kwargs.get('application_secret', None)
def _to_generated(self):
if self.application_id and self.application_secret:
access_credentials = AzureActiveDirectoryApplicationCredentials(
application_id=self.application_id,
application_secret=self.application_secret
)
else:
access_credentials = None
return _SearchResourceEncryptionKey(
key_name=self.key_name,
key_version=self.key_version,
vault_uri=self.vault_uri,
access_credentials=access_credentials
)
@classmethod
def _from_generated(cls, search_resource_encryption_key):
if not search_resource_encryption_key:
return None
if search_resource_encryption_key.access_credentials:
application_id = search_resource_encryption_key.access_credentials.application_id
application_secret = search_resource_encryption_key.access_credentials.application_secret
else:
application_id = None
application_secret = None
return cls(
key_name=search_resource_encryption_key.key_name,
key_version=search_resource_encryption_key.key_version,
vault_uri=search_resource_encryption_key.vault_uri,
application_id=application_id,
application_secret=application_secret
)
class SynonymMap(msrest.serialization.Model):
"""Represents a synonym map definition.
Variables are only populated by the server, and will be ignored when sending a request.
All required parameters must be populated in order to send to Azure.
:param name: Required. The name of the synonym map.
:type name: str
:ivar format: Required. The format of the synonym map. Only the 'solr' format is currently
supported. Default value: "solr".
:vartype format: str
:param synonyms: Required. A series of synonym rules in the specified synonym map format. The
rules must be separated by newlines.
:type synonyms: list[str]
:param encryption_key: A description of an encryption key that you create in Azure Key Vault.
This key is used to provide an additional level of encryption-at-rest for your data when you
want full assurance that no one, not even Microsoft, can decrypt your data in Azure Cognitive
Search. Once you have encrypted your data, it will always remain encrypted. Azure Cognitive
Search will ignore attempts to set this property to null. You can change this property as
needed if you want to rotate your encryption key; Your data will be unaffected. Encryption with
customer-managed keys is not available for free search services, and is only available for paid
services created on or after January 1, 2019.
:type encryption_key: ~azure.search.documents.indexes.models.SearchResourceEncryptionKey
:param e_tag: The ETag of the synonym map.
:type e_tag: str
"""
_validation = {
'name': {'required': True},
'format': {'required': True, 'constant': True},
'synonyms': {'required': True},
}
_attribute_map = {
'name': {'key': 'name', 'type': 'str'},
'format': {'key': 'format', 'type': 'str'},
'synonyms': {'key': 'synonyms', 'type': '[str]'},
'encryption_key': {'key': 'encryptionKey', 'type': 'SearchResourceEncryptionKey'},
'e_tag': {'key': '@odata\\.etag', 'type': 'str'},
}
format = "solr"
def __init__(
self,
**kwargs
):
super(SynonymMap, self).__init__(**kwargs)
self.name = kwargs['name']
self.synonyms = kwargs['synonyms']
self.encryption_key = kwargs.get('encryption_key', None)
self.e_tag = kwargs.get('e_tag', None)
def _to_generated(self):
return _SynonymMap(
name=self.name,
synonyms="\n".join(self.synonyms),
encryption_key=self.encryption_key._to_generated() if self.encryption_key else None, # pylint:disable=protected-access
e_tag=self.e_tag
)
@classmethod
def _from_generated(cls, synonym_map):
if not synonym_map:
return None
return cls(
name=synonym_map.name,
synonyms=synonym_map.synonyms.split("\n"),
# pylint:disable=protected-access
encryption_key=SearchResourceEncryptionKey._from_generated(synonym_map.encryption_key),
e_tag=synonym_map.e_tag
)
@classmethod
def create_from_file(cls, name, file_path):
with open(file_path, "r") as f:
solr_format_synonyms = f.read()
synonyms = solr_format_synonyms.split("\n")
return cls(
name=name,
synonyms=synonyms
)
class SearchIndexerDataSourceConnection(msrest.serialization.Model):
"""Represents a datasource connection definition, which can be used to configure an indexer.
All required parameters must be populated in order to send to Azure.
:param name: Required. The name of the datasource connection.
:type name: str
:param description: The description of the datasource connection.
:type description: str
:param type: Required. The type of the datasource connection. Possible values include: "azuresql",
"cosmosdb", "azureblob", "azuretable", "mysql", "adlsgen2".
:type type: str or ~azure.search.documents.indexes.models.SearchIndexerDataSourceType
:param connection_string: The connection string for the datasource connection.
:type connection_string: str
:param container: Required. The data container for the datasource connection.
:type container: ~azure.search.documents.indexes.models.SearchIndexerDataContainer
:param data_change_detection_policy: The data change detection policy for the datasource connection.
:type data_change_detection_policy: ~azure.search.documents.models.DataChangeDetectionPolicy
:param data_deletion_detection_policy: The data deletion detection policy for the datasource connection.
:type data_deletion_detection_policy:
~azure.search.documents.models.DataDeletionDetectionPolicy
:param e_tag: The ETag of the data source.
:type e_tag: str
"""
_validation = {
'name': {'required': True},
'type': {'required': True},
'connection_string': {'required': True},
'container': {'required': True},
}
_attribute_map = {
'name': {'key': 'name', 'type': 'str'},
'description': {'key': 'description', 'type': 'str'},
'type': {'key': 'type', 'type': 'str'},
'connection_string': {'key': 'connectionString', 'type': 'str'},
'container': {'key': 'container', 'type': 'SearchIndexerDataContainer'},
'data_change_detection_policy': {'key': 'dataChangeDetectionPolicy', 'type': 'DataChangeDetectionPolicy'},
'data_deletion_detection_policy': {'key': 'dataDeletionDetectionPolicy', 'type': 'DataDeletionDetectionPolicy'},
'e_tag': {'key': '@odata\\.etag', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(SearchIndexerDataSourceConnection, self).__init__(**kwargs)
self.name = kwargs['name']
self.description = kwargs.get('description', None)
self.type = kwargs['type']
self.connection_string = kwargs['connection_string']
self.container = kwargs['container']
self.data_change_detection_policy = kwargs.get('data_change_detection_policy', None)
self.data_deletion_detection_policy = kwargs.get('data_deletion_detection_policy', None)
self.e_tag = kwargs.get('e_tag', None)
def _to_generated(self):
if self.connection_string is None or self.connection_string == "":
connection_string = "<unchanged>"
else:
connection_string = self.connection_string
credentials = DataSourceCredentials(
connection_string=connection_string
)
return _SearchIndexerDataSource(
name=self.name,
description=self.description,
type=self.type,
credentials=credentials,
container=self.container,
data_change_detection_policy=self.data_change_detection_policy,
data_deletion_detection_policy=self.data_deletion_detection_policy,
e_tag=self.e_tag
)
@classmethod
def _from_generated(cls, search_indexer_data_source):
if not search_indexer_data_source:
return None
connection_string = search_indexer_data_source.credentials.connection_string \
if search_indexer_data_source.credentials else None
return cls(
name=search_indexer_data_source.name,
description=search_indexer_data_source.description,
type=search_indexer_data_source.type,
connection_string=connection_string,
container=search_indexer_data_source.container,
data_change_detection_policy=search_indexer_data_source.data_change_detection_policy,
data_deletion_detection_policy=search_indexer_data_source.data_deletion_detection_policy,
e_tag=search_indexer_data_source.e_tag
)
def pack_analyzer(analyzer):
if not analyzer:
return None
if isinstance(analyzer, (PatternAnalyzer, CustomAnalyzer)):
return analyzer._to_generated() # pylint:disable=protected-access
return analyzer
def unpack_analyzer(analyzer):
if not analyzer:
return None
if isinstance(analyzer, _PatternAnalyzer):
return PatternAnalyzer._from_generated(analyzer) # pylint:disable=protected-access
if isinstance(analyzer, _CustomAnalyzer):
return CustomAnalyzer._from_generated(analyzer) # pylint:disable=protected-access
return analyzer
|
import os
import nltk
import re
from gensim import corpora, models, similarities
from cleaning import clean
def train():
#Loads the data from the local storage
synopses = []
for filename in os.listdir('cnn-stories'):
with open('cnn-stories/' + filename, 'r') as infile:
synopses.append(infile.read())
#Cleans the data
corpus, dictionary = clean(synopses)
#Saves the model and the dictionary in local storage
corpora.Dictionary.save(dictionary, 'dictionary.dict')
lda = models.LdaModel(corpus, num_topics=10, id2word=dictionary, update_every=5, chunksize=10000, passes=100)
lda.save('lda.model')
if __name__ == "__main__":
train()
|
# -*- coding: utf-8 -*-
# Copyright 2022 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import warnings
from typing import Callable, Dict, Optional, Sequence, Tuple, Union
from google.api_core import grpc_helpers
from google.api_core import operations_v1
from google.api_core import gapic_v1
import google.auth # type: ignore
from google.auth import credentials as ga_credentials # type: ignore
from google.auth.transport.grpc import SslCredentials # type: ignore
import grpc # type: ignore
from google.cloud.aiplatform_v1.types import specialist_pool
from google.cloud.aiplatform_v1.types import specialist_pool_service
from google.longrunning import operations_pb2 # type: ignore
from .base import SpecialistPoolServiceTransport, DEFAULT_CLIENT_INFO
class SpecialistPoolServiceGrpcTransport(SpecialistPoolServiceTransport):
"""gRPC backend transport for SpecialistPoolService.
A service for creating and managing Customer SpecialistPools.
When customers start Data Labeling jobs, they can reuse/create
Specialist Pools to bring their own Specialists to label the
data. Customers can add/remove Managers for the Specialist Pool
on Cloud console, then Managers will get email notifications to
manage Specialists and tasks on CrowdCompute console.
This class defines the same methods as the primary client, so the
primary client can load the underlying transport implementation
and call it.
It sends protocol buffers over the wire using gRPC (which is built on
top of HTTP/2); the ``grpcio`` package must be installed.
"""
_stubs: Dict[str, Callable]
def __init__(
self,
*,
host: str = "aiplatform.googleapis.com",
credentials: ga_credentials.Credentials = None,
credentials_file: str = None,
scopes: Sequence[str] = None,
channel: grpc.Channel = None,
api_mtls_endpoint: str = None,
client_cert_source: Callable[[], Tuple[bytes, bytes]] = None,
ssl_channel_credentials: grpc.ChannelCredentials = None,
client_cert_source_for_mtls: Callable[[], Tuple[bytes, bytes]] = None,
quota_project_id: Optional[str] = None,
client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO,
always_use_jwt_access: Optional[bool] = False,
) -> None:
"""Instantiate the transport.
Args:
host (Optional[str]):
The hostname to connect to.
credentials (Optional[google.auth.credentials.Credentials]): The
authorization credentials to attach to requests. These
credentials identify the application to the service; if none
are specified, the client will attempt to ascertain the
credentials from the environment.
This argument is ignored if ``channel`` is provided.
credentials_file (Optional[str]): A file with credentials that can
be loaded with :func:`google.auth.load_credentials_from_file`.
This argument is ignored if ``channel`` is provided.
scopes (Optional(Sequence[str])): A list of scopes. This argument is
ignored if ``channel`` is provided.
channel (Optional[grpc.Channel]): A ``Channel`` instance through
which to make calls.
api_mtls_endpoint (Optional[str]): Deprecated. The mutual TLS endpoint.
If provided, it overrides the ``host`` argument and tries to create
a mutual TLS channel with client SSL credentials from
``client_cert_source`` or application default SSL credentials.
client_cert_source (Optional[Callable[[], Tuple[bytes, bytes]]]):
Deprecated. A callback to provide client SSL certificate bytes and
private key bytes, both in PEM format. It is ignored if
``api_mtls_endpoint`` is None.
ssl_channel_credentials (grpc.ChannelCredentials): SSL credentials
for the grpc channel. It is ignored if ``channel`` is provided.
client_cert_source_for_mtls (Optional[Callable[[], Tuple[bytes, bytes]]]):
A callback to provide client certificate bytes and private key bytes,
both in PEM format. It is used to configure a mutual TLS channel. It is
ignored if ``channel`` or ``ssl_channel_credentials`` is provided.
quota_project_id (Optional[str]): An optional project to use for billing
and quota.
client_info (google.api_core.gapic_v1.client_info.ClientInfo):
The client info used to send a user-agent string along with
API requests. If ``None``, then default info will be used.
Generally, you only need to set this if you're developing
your own client library.
always_use_jwt_access (Optional[bool]): Whether self signed JWT should
be used for service account credentials.
Raises:
google.auth.exceptions.MutualTLSChannelError: If mutual TLS transport
creation failed for any reason.
google.api_core.exceptions.DuplicateCredentialArgs: If both ``credentials``
and ``credentials_file`` are passed.
"""
self._grpc_channel = None
self._ssl_channel_credentials = ssl_channel_credentials
self._stubs: Dict[str, Callable] = {}
self._operations_client: Optional[operations_v1.OperationsClient] = None
if api_mtls_endpoint:
warnings.warn("api_mtls_endpoint is deprecated", DeprecationWarning)
if client_cert_source:
warnings.warn("client_cert_source is deprecated", DeprecationWarning)
if channel:
# Ignore credentials if a channel was passed.
credentials = False
# If a channel was explicitly provided, set it.
self._grpc_channel = channel
self._ssl_channel_credentials = None
else:
if api_mtls_endpoint:
host = api_mtls_endpoint
# Create SSL credentials with client_cert_source or application
# default SSL credentials.
if client_cert_source:
cert, key = client_cert_source()
self._ssl_channel_credentials = grpc.ssl_channel_credentials(
certificate_chain=cert, private_key=key
)
else:
self._ssl_channel_credentials = SslCredentials().ssl_credentials
else:
if client_cert_source_for_mtls and not ssl_channel_credentials:
cert, key = client_cert_source_for_mtls()
self._ssl_channel_credentials = grpc.ssl_channel_credentials(
certificate_chain=cert, private_key=key
)
# The base transport sets the host, credentials and scopes
super().__init__(
host=host,
credentials=credentials,
credentials_file=credentials_file,
scopes=scopes,
quota_project_id=quota_project_id,
client_info=client_info,
always_use_jwt_access=always_use_jwt_access,
)
if not self._grpc_channel:
self._grpc_channel = type(self).create_channel(
self._host,
# use the credentials which are saved
credentials=self._credentials,
# Set ``credentials_file`` to ``None`` here as
# the credentials that we saved earlier should be used.
credentials_file=None,
scopes=self._scopes,
ssl_credentials=self._ssl_channel_credentials,
quota_project_id=quota_project_id,
options=[
("grpc.max_send_message_length", -1),
("grpc.max_receive_message_length", -1),
],
)
# Wrap messages. This must be done after self._grpc_channel exists
self._prep_wrapped_messages(client_info)
@classmethod
def create_channel(
cls,
host: str = "aiplatform.googleapis.com",
credentials: ga_credentials.Credentials = None,
credentials_file: str = None,
scopes: Optional[Sequence[str]] = None,
quota_project_id: Optional[str] = None,
**kwargs,
) -> grpc.Channel:
"""Create and return a gRPC channel object.
Args:
host (Optional[str]): The host for the channel to use.
credentials (Optional[~.Credentials]): The
authorization credentials to attach to requests. These
credentials identify this application to the service. If
none are specified, the client will attempt to ascertain
the credentials from the environment.
credentials_file (Optional[str]): A file with credentials that can
be loaded with :func:`google.auth.load_credentials_from_file`.
This argument is mutually exclusive with credentials.
scopes (Optional[Sequence[str]]): A optional list of scopes needed for this
service. These are only used when credentials are not specified and
are passed to :func:`google.auth.default`.
quota_project_id (Optional[str]): An optional project to use for billing
and quota.
kwargs (Optional[dict]): Keyword arguments, which are passed to the
channel creation.
Returns:
grpc.Channel: A gRPC channel object.
Raises:
google.api_core.exceptions.DuplicateCredentialArgs: If both ``credentials``
and ``credentials_file`` are passed.
"""
return grpc_helpers.create_channel(
host,
credentials=credentials,
credentials_file=credentials_file,
quota_project_id=quota_project_id,
default_scopes=cls.AUTH_SCOPES,
scopes=scopes,
default_host=cls.DEFAULT_HOST,
**kwargs,
)
@property
def grpc_channel(self) -> grpc.Channel:
"""Return the channel designed to connect to this service.
"""
return self._grpc_channel
@property
def operations_client(self) -> operations_v1.OperationsClient:
"""Create the client designed to process long-running operations.
This property caches on the instance; repeated calls return the same
client.
"""
# Quick check: Only create a new client if we do not already have one.
if self._operations_client is None:
self._operations_client = operations_v1.OperationsClient(self.grpc_channel)
# Return the client from cache.
return self._operations_client
@property
def create_specialist_pool(
self,
) -> Callable[
[specialist_pool_service.CreateSpecialistPoolRequest], operations_pb2.Operation
]:
r"""Return a callable for the create specialist pool method over gRPC.
Creates a SpecialistPool.
Returns:
Callable[[~.CreateSpecialistPoolRequest],
~.Operation]:
A function that, when called, will call the underlying RPC
on the server.
"""
# Generate a "stub function" on-the-fly which will actually make
# the request.
# gRPC handles serialization and deserialization, so we just need
# to pass in the functions for each.
if "create_specialist_pool" not in self._stubs:
self._stubs["create_specialist_pool"] = self.grpc_channel.unary_unary(
"/google.cloud.aiplatform.v1.SpecialistPoolService/CreateSpecialistPool",
request_serializer=specialist_pool_service.CreateSpecialistPoolRequest.serialize,
response_deserializer=operations_pb2.Operation.FromString,
)
return self._stubs["create_specialist_pool"]
@property
def get_specialist_pool(
self,
) -> Callable[
[specialist_pool_service.GetSpecialistPoolRequest],
specialist_pool.SpecialistPool,
]:
r"""Return a callable for the get specialist pool method over gRPC.
Gets a SpecialistPool.
Returns:
Callable[[~.GetSpecialistPoolRequest],
~.SpecialistPool]:
A function that, when called, will call the underlying RPC
on the server.
"""
# Generate a "stub function" on-the-fly which will actually make
# the request.
# gRPC handles serialization and deserialization, so we just need
# to pass in the functions for each.
if "get_specialist_pool" not in self._stubs:
self._stubs["get_specialist_pool"] = self.grpc_channel.unary_unary(
"/google.cloud.aiplatform.v1.SpecialistPoolService/GetSpecialistPool",
request_serializer=specialist_pool_service.GetSpecialistPoolRequest.serialize,
response_deserializer=specialist_pool.SpecialistPool.deserialize,
)
return self._stubs["get_specialist_pool"]
@property
def list_specialist_pools(
self,
) -> Callable[
[specialist_pool_service.ListSpecialistPoolsRequest],
specialist_pool_service.ListSpecialistPoolsResponse,
]:
r"""Return a callable for the list specialist pools method over gRPC.
Lists SpecialistPools in a Location.
Returns:
Callable[[~.ListSpecialistPoolsRequest],
~.ListSpecialistPoolsResponse]:
A function that, when called, will call the underlying RPC
on the server.
"""
# Generate a "stub function" on-the-fly which will actually make
# the request.
# gRPC handles serialization and deserialization, so we just need
# to pass in the functions for each.
if "list_specialist_pools" not in self._stubs:
self._stubs["list_specialist_pools"] = self.grpc_channel.unary_unary(
"/google.cloud.aiplatform.v1.SpecialistPoolService/ListSpecialistPools",
request_serializer=specialist_pool_service.ListSpecialistPoolsRequest.serialize,
response_deserializer=specialist_pool_service.ListSpecialistPoolsResponse.deserialize,
)
return self._stubs["list_specialist_pools"]
@property
def delete_specialist_pool(
self,
) -> Callable[
[specialist_pool_service.DeleteSpecialistPoolRequest], operations_pb2.Operation
]:
r"""Return a callable for the delete specialist pool method over gRPC.
Deletes a SpecialistPool as well as all Specialists
in the pool.
Returns:
Callable[[~.DeleteSpecialistPoolRequest],
~.Operation]:
A function that, when called, will call the underlying RPC
on the server.
"""
# Generate a "stub function" on-the-fly which will actually make
# the request.
# gRPC handles serialization and deserialization, so we just need
# to pass in the functions for each.
if "delete_specialist_pool" not in self._stubs:
self._stubs["delete_specialist_pool"] = self.grpc_channel.unary_unary(
"/google.cloud.aiplatform.v1.SpecialistPoolService/DeleteSpecialistPool",
request_serializer=specialist_pool_service.DeleteSpecialistPoolRequest.serialize,
response_deserializer=operations_pb2.Operation.FromString,
)
return self._stubs["delete_specialist_pool"]
@property
def update_specialist_pool(
self,
) -> Callable[
[specialist_pool_service.UpdateSpecialistPoolRequest], operations_pb2.Operation
]:
r"""Return a callable for the update specialist pool method over gRPC.
Updates a SpecialistPool.
Returns:
Callable[[~.UpdateSpecialistPoolRequest],
~.Operation]:
A function that, when called, will call the underlying RPC
on the server.
"""
# Generate a "stub function" on-the-fly which will actually make
# the request.
# gRPC handles serialization and deserialization, so we just need
# to pass in the functions for each.
if "update_specialist_pool" not in self._stubs:
self._stubs["update_specialist_pool"] = self.grpc_channel.unary_unary(
"/google.cloud.aiplatform.v1.SpecialistPoolService/UpdateSpecialistPool",
request_serializer=specialist_pool_service.UpdateSpecialistPoolRequest.serialize,
response_deserializer=operations_pb2.Operation.FromString,
)
return self._stubs["update_specialist_pool"]
def close(self):
self.grpc_channel.close()
__all__ = ("SpecialistPoolServiceGrpcTransport",)
|
# Base path for log files and sessions
base_path = '~/.weevely/'
# History path
history_path = '~/.weevely/history'
# Session path
sessions_path = '~/.weevely/sessions/'
sessions_ext = '.session'
# Supported Channels
channels = [
# Obfuscated channel inside POST requests introduced
# in Weevely 3.6
'ObfPost',
]
# Append random GET parameters to every request to
# make sure the page is not cache by proxies.
add_random_param_nocache = False
# Add additional headers to be sent at every request e.g.
# additional_headers = [
# ( 'Authentication', 'Basic QWxhZGRpbjpvcGVuIHNlc2FtBl==' )
# ]
additional_headers = []
# Agents and obfuscators used by generator.py
agent_templates_folder_path = 'bd/agents/'
obfuscators_templates_folder_path = 'bd/obfuscators/'
#######################################
# Resolve given paths - DO NOT CHANGE #
#######################################
import os, sys
base_path = os.path.expanduser(base_path)
history_path = os.path.expanduser(history_path)
sessions_path = os.path.expanduser(sessions_path)
weevely_path = os.path.dirname(os.path.realpath(sys.argv[0]))
agent_templates_folder_path = os.path.join(
weevely_path,
agent_templates_folder_path
)
obfuscators_templates_folder_path = os.path.join(
weevely_path,
obfuscators_templates_folder_path
)
|
# -*- coding: utf-8 -*-
# file: train_text_classification_bert.py
# time: 2021/8/5
# author: yangheng <yangheng@m.scnu.edu.cn>
# github: https://github.com/yangheng95
# Copyright (C) 2021. All Rights Reserved.
from pyabsa import TextClassificationTrainer, ClassificationConfigManager, ClassificationDatasetList
from pyabsa.functional import BERTClassificationModelList
classification_config_english = ClassificationConfigManager.get_classification_config_english()
classification_config_english.model = BERTClassificationModelList.BERT
classification_config_english.num_epoch = 10
classification_config_english.evaluate_begin = 0
classification_config_english.max_seq_len = 512
classification_config_english.log_step = 200
classification_config_english.dropout = 0.5
classification_config_english.cache_dataset = False
classification_config_english.seed = {42, 56, 1}
classification_config_english.l2reg = 1e-5
classification_config_english.learning_rate = 1e-5
classification_config_english.cross_validate_fold = 5
dataset = ClassificationDatasetList.SST2
text_classifier = TextClassificationTrainer(config=classification_config_english,
dataset=dataset,
checkpoint_save_mode=1,
auto_device=True
).load_trained_model()
|
# Copyright 2018 The Cornac Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
import unittest
import random
import time
from cornac.datasets import movielens
class TestMovieLens(unittest.TestCase):
def test_load_feedback(self):
# only run data download tests 20% of the time to speed up frequent testing
random.seed(time.time())
if random.random() > 0.8:
ml_100k = movielens.load_feedback()
self.assertEqual(len(ml_100k), 100000)
if random.random() > 0.8:
ml_1m = movielens.load_feedback(variant='1M')
self.assertEqual(len(ml_1m), 1000209)
def test_load_plot(self):
# only run data download tests 20% of the time to speed up frequent testing
random.seed(time.time())
if random.random() > 0.8:
plots, ids = movielens.load_plot()
self.assertEqual(len(ids), 10076)
if __name__ == '__main__':
unittest.main()
|
# -*- coding: utf-8 -*-
"""
Created on Sat Apr 4 15:37:43 2020
@author: moder
"""
import os
from datetime import datetime
import pandas as pd
import urllib.request
from bs4 import BeautifulSoup
user_agent = "user_agent = 'Mozilla/5.0 (Windows NT 6.1; Win64; x64)"
def scrap_wikipedia_text(url):
request = urllib.request.Request(url, data=None, headers={'User-Agent' : user_agent})
html = urllib.request.urlopen(request).read().decode('utf-8')
soup = BeautifulSoup(html, 'html.parser')
content_div = soup.find('div', attrs={'id': 'mw-content-text'})
# remove tables and graphs
if content_div is not None:
for s in content_div.select('table'):
s.extract()
for s in content_div.select('img'):
s.extract()
# remove references
for s in content_div.select('div.reflist'):
s.extract()
print('div.reflist extracted from %s...' % url)
# iterate all p tags and append to text
tags = ['h1', 'h2', 'h3', 'li', 'p']
bodytext = ''
for con in content_div.find_all(tags):
bodytext += con.text
return bodytext
return None
if __name__ == '__main__':
print('store data started...')
# load containment history file from kaggle
df_contain = pd.read_csv(r'data/COVID 19 Containment measures data.csv')
# cfilter = df_contain['Country'].isin(['Austria', 'Germany', 'Italy', 'Spain', 'Denmark'])
# df_c = df_contain[cfilter]
df_c = df_contain
df = df_c[df_c['Source'].notna()]
df_drop = df.drop_duplicates(subset='Source', keep='last')
wfilter = df_drop['Source'].str.contains('en.wikipedia.org')
df_red = df_drop[wfilter]
df_res = df_red[['Date Start', 'Country', 'Keywords', 'Source']]
df_res.to_csv(r'data/covid19-all-countries.csv')
for index, row in df_res.iterrows():
text = scrap_wikipedia_text(row['Source'])
time = datetime.now().strftime('%Y%m%d_%H%M%S')
filename = '%s_%s_covid19-wikipedia.txt' % (time, row['Country'])
with open(os.path.join('data',filename), 'w', encoding='utf-8') as file:
file.write(text)
print('saved file %s ...' % filename)
file.close()
# \[\d+\]
|
import torch
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
LAMBDA_COORD = 5
LAMBDA_NOOBJ = 0.5
def calc_loss(inp , target, opt):
if inp.size(0) != target.size(0):
raise Exception("Batch size does not match")
total_loss = torch.tensor(0.0)
#total_loss = total_loss.dtype(tensor)
for i in range(inp.size(0)):
inp = inp[i]
target = target[i]
Q = predict_one_bbox(inp, target, opt)
total_loss = total_loss + calc_loss_single(Q, target, opt)
return total_loss
def predict_one_bbox(inp, target, opt):
Q = torch.zeros(opt.S, opt.S, 5 + opt.C)
select = torch.tensor(0).to(device)
for i in range(opt.S):
for j in range(opt.S):
for b in range(opt.B):
if b==0:
boxes = inp[i, j, b*5 : b*5+5].to(device)
else:
boxes = torch.stack((boxes, inp[i, j, b*5 : b*5+5])).to(device)
if len(target[i, j, :].nonzero()) > 1:
max_iou = torch.tensor([0.]).to(device)
groundtruth_box = target[i, j, :4].clone()
for b in range(opt.B):
iou = calc_IOU(groundtruth_box, boxes[b][:-1], device)
if iou > max_iou:
max_iou = iou
select = torch.tensor(b).to(device)
else:
max_confidence = torch.tensor(0.).to(device)
for b in range(opt.B):
confidence = boxes[b][-1]
if confidence > max_confidence:
max_confidence = confidence
select = torch.tensor(b).to(device)
Q[i, j, :5] = boxes[select]
Q[i, j, 5:] = inp[i, j, -opt.C:]
return Q
def calc_loss_single(inp, target, opt):
loss = torch.zeros(1)
for i in range(opt.S):
for j in range(opt.S):
# case 1: grid cell HAS object
if len(target[i, j, :].nonzero()) > 1:
# localization
loss = loss + LAMBDA_COORD * (torch.pow(inp[i, j, 0] - target[i, j, 0], 2) + torch.pow(inp[i, j, 1] - target[i, j, 1], 2))
loss = loss + LAMBDA_COORD * (torch.pow(torch.sqrt(torch.abs(inp[i, j, 2])) - torch.sqrt(torch.abs(target[i, j,2])), 2) \
+ torch.pow(torch.sqrt(torch.abs(inp[i, j, 3])) - torch.sqrt(torch.abs(target[i, j, 3])), 2)) # org
# loss = loss + LAMBDA_COORD * (torch.sqrt(torch.abs(P[i, j, 2] - G[i, j, 2])) +
# torch.sqrt(torch.abs(P[i, j, 3] - G[i, j, 3]))) # ZZ
loss = loss + torch.pow(inp[i, j, 4]-1, 2) # Ground truth confidence is constant 1
# classification
true_cls = target[i, j, -1].type(torch.int64)
true_cls_vec = torch.zeros(opt.C)
true_cls_vec[true_cls] = torch.tensor(1)
pred_cls_vec = inp[i, j, -opt.C:]
loss = loss + torch.sum(torch.pow(pred_cls_vec - true_cls_vec, 2))
# case 2: grid cell NO object
# classification
else:
loss = loss + LAMBDA_NOOBJ * torch.pow(inp[i, j, 4] - 0, 2) # Ground truth confidence is constant 0
return loss
def calc_IOU(box_1, box_2, device=torch.device('cpu'), use_float64=False):
"""
Tensor version of calc_IOU()
compute IOU between two bounding boxes
:param box_1: Detection x, y, w, h image coordinates in [0, 1]
:param box_2: GroundTruth x, y, w, h image coordinates in [0, 1]
:return:
"""
'''
x_min_1 = torch.clamp((box_1[0] - box_1[2] / 2), 0, 1).to(device)
x_max_1 = torch.clamp((box_1[0] + box_1[2] / 2), 0, 1).to(device)
y_min_1 = torch.clamp((box_1[1] - box_1[3] / 2), 0, 1).to(device)
y_max_1 = torch.clamp((box_1[1] + box_1[3] / 2), 0, 1).to(device)
'''
x_min_1 = torch.clamp((abs(box_1[0]) - abs(box_1[2]) / 2), 0, 1).to(device)
x_max_1 = torch.clamp((abs(box_1[0]) + abs(box_1[2]) / 2), 0, 1).to(device)
y_min_1 = torch.clamp((abs(box_1[1]) - abs(box_1[3]) / 2), 0, 1).to(device)
y_max_1 = torch.clamp((abs(box_1[1]) + abs(box_1[3]) / 2), 0, 1).to(device)
x_min_2 = torch.clamp((box_2[0] - box_2[2] / 2), 0, 1).to(device)
x_max_2 = torch.clamp((box_2[0] + box_2[2] / 2), 0, 1).to(device)
y_min_2 = torch.clamp((box_2[1] - box_2[3] / 2), 0, 1).to(device)
y_max_2 = torch.clamp((box_2[1] + box_2[3] / 2), 0, 1).to(device)
# z = torch.tensor(0, dtype=torch.float).to(device)
z = torch.tensor(0.).to(device)
a = torch.min(x_max_1, x_max_2)
b = torch.max(x_min_1, x_min_2)
c = torch.min(y_max_1, y_max_2)
d = torch.max(y_min_1, y_min_2)
overlap_width = torch.max(a-b, z)
overlap_height = torch.max(c-d, z)
overlap_area = overlap_width * overlap_height
union_area = (x_max_1 - x_min_1) * (y_max_1 - y_min_1) \
+ (x_max_2 - x_min_2) * (y_max_2 - y_min_2) \
- overlap_area
intersection_over_union = overlap_area / union_area
return intersection_over_union
|
from django.apps import AppConfig
class BasicApiConfig(AppConfig):
default_auto_field = 'django.db.models.BigAutoField'
name = 'basic_api'
|
# coding: utf-8
"""
Definition of the job dashboard interface.
"""
__all__ = ["BaseJobDashboard", "NoJobDashboard", "cache_by_status"]
import time
import functools
from contextlib import contextmanager
from abc import ABCMeta, abstractmethod
import six
def cache_by_status(func):
"""
Decorator for :py:meth:`BaseJobDashboard.publish` (and inheriting classes) that caches the last
published status to decide if the a new publication is necessary or not. When the status did not
change since the last call, the actual publish method is not invoked and *None* is returned.
"""
@functools.wraps(func)
def wrapper(self, job_data, event, job_num, *args, **kwargs):
job_id = job_data["job_id"]
dashboard_status = self.map_status(job_data.get("status"), event)
# nothing to do when the status is invalid or did not change
if not dashboard_status or self._last_states.get(job_id) == dashboard_status:
return None
# set the new status
self._last_states[job_id] = dashboard_status
return func(self, job_data, event, job_num, *args, **kwargs)
return wrapper
@six.add_metaclass(ABCMeta)
class BaseJobDashboard(object):
"""
Base class of a minimal job dashboard interface that is used from within
:py:class:`law.workflow.remote.BaseRemoteWorkflow`'s.
.. py:classattribute:: persistent_attributes
type: list
List of instance attributes that should be marked as being persistent. This is (e.g.) used in
the :py:class:`law.workflow.remote.BaseRemoteWorkflow` when saving job and submission
information to submission files. Common use cases are user information.
.. py:attribute:: max_rate
type: int
Maximum number of events that can be published per second. :py:meth:`rate_guard` uses this
value to delay function calls.
"""
cache_by_status = None
persistent_attributes = []
def __init__(self, max_rate=0):
super(BaseJobDashboard, self).__init__()
# maximum number of events per second
self.max_rate = max_rate
# timestamp of last event, used to ensure that max_rate is not exceeded
self._last_event_time = 0.
# last dashboard status per job_id, used to prevent subsequent requests for jobs
# without any status change
self._last_states = {}
def get_persistent_config(self):
"""
Returns the values of all :py:attr:`persistent_attributes` of this instance in a dictionary.
"""
return {attr: getattr(self, attr) for attr in self.persistent_attributes}
def apply_config(self, config):
"""
Sets all attributes in a dictionary *config* to this instance. This can be understand as the
counterpart of :py:meth:`get_persistent_config`.
"""
for attr, value in six.iteritems(config):
if hasattr(self, attr):
setattr(self, attr, value)
@contextmanager
def rate_guard(self):
"""
Context guard that ensures that decorated contexts are delayed in order to limit the number
of status publications per second, defined by :py:attr:`max_rate`. Example:
.. code-block:: python
# print some numbers, which will take 10 / max_rate seconds
for i in range(10):
with self.rate_guard():
print(i)
"""
now = 0.
if self.max_rate > 0:
now = time.time()
diff = self._last_event_time + 1. / self.max_rate - now
if diff > 0:
time.sleep(diff)
try:
yield
finally:
self._last_event_time = now
def remote_hook_file(self):
"""
This method can return the path to a file that is considered as an input file to remote
jobs. This file can contain bash functions, environment variables, etc., that are necessary
to communicate with the implemented job dashboard. When *None* is returned, no file is sent.
"""
return None
def remote_hook_data(self, job_num, attempt):
"""
This method can return a dictionary that is sent with remote jobs in the format
``key1=value1 key2=value2 ...``. The returned dictionary should (but does not have to)
include the job number *job_num* and the retry *attempt*.
"""
return None
def create_tracking_url(self):
"""
This method can return a tracking url that refers to a web page that visualizes jobs. When
set, the url is shown in the central luigi scheduler.
"""
return None
@abstractmethod
def map_status(self, job_status, event):
"""
Maps the *job_status* (see :py:class:`law.job.base.BaseJobManager`) for a particular *event*
to the status name that is accepted by the implemented job dashobard. Possible events are:
- action.submit
- action.cancel
- status.pending
- status.running
- status.finished
- status.retry
- status.failed
"""
return
@abstractmethod
def publish(self, job_data, event, job_num, *args, **kwargs):
"""
Publishes the status of a job to the implemented job dashboard. *job_data* is a dictionary
that contains a *job_id* and a *status* string (see
:py:meth:`law.workflow.remote.StatusData.job_data`).
"""
return
BaseJobDashboard.cache_by_status = staticmethod(cache_by_status)
class NoJobDashboard(BaseJobDashboard):
"""
Null job dashboard implementation. Instances of this class actually does not publish any job
status. It can rather be used as a placeholder in situations where a job dashboard is required,
such as in :py:class:`law.workflow.remote.BaseRemoteWorkflow`.
"""
def map_status(self, *args, **kwargs):
""""""
return
def publish(self, *args, **kwargs):
""""""
return
|
"""Output formatters."""
import os
from pathlib import Path
from typing import TYPE_CHECKING, Generic, TypeVar, Union
import rich
if TYPE_CHECKING:
from ansiblelint.errors import MatchError
T = TypeVar('T', bound='BaseFormatter')
class BaseFormatter(Generic[T]):
"""Formatter of ansible-lint output.
Base class for output formatters.
Args:
base_dir (str|Path): reference directory against which display relative path.
display_relative_path (bool): whether to show path as relative or absolute
"""
def __init__(self, base_dir: Union[str, Path], display_relative_path: bool) -> None:
"""Initialize a BaseFormatter instance."""
if isinstance(base_dir, str):
base_dir = Path(base_dir)
if base_dir: # can be None
base_dir = base_dir.absolute()
# Required 'cause os.path.relpath() does not accept Path before 3.6
if isinstance(base_dir, Path):
base_dir = str(base_dir) # Drop when Python 3.5 is no longer supported
self._base_dir = base_dir if display_relative_path else None
def _format_path(self, path: Union[str, Path]) -> str:
# Required 'cause os.path.relpath() does not accept Path before 3.6
if isinstance(path, Path):
path = str(path) # Drop when Python 3.5 is no longer supported
if not self._base_dir:
return path
# Use os.path.relpath 'cause Path.relative_to() misbehaves
return os.path.relpath(path, start=self._base_dir)
def format(self, match: "MatchError") -> str:
return str(match)
def escape(self, text: str) -> str:
"""Escapes a string to avoid processing it as markup."""
return rich.markup.escape(text)
class Formatter(BaseFormatter):
def format(self, match: "MatchError") -> str:
_id = getattr(match.rule, 'id', '000')
result = (
f"[error_code]{_id}[/][dim]:[/] [error_title]{self.escape(match.message)}[/]")
if match.tag:
result += f" [dim][error_code]({match.tag})[/][/]"
result += (
"\n"
f"[filename]{self._format_path(match.filename or '')}[/]:{match.position}")
if match.details:
result += f" [dim]{match.details}[/]"
result += "\n"
return result
class QuietFormatter(BaseFormatter):
def format(self, match: "MatchError") -> str:
return (
f"[error_code]{match.rule.id}[/] "
f"[filename]{self._format_path(match.filename or '')}[/]:{match.position}")
class ParseableFormatter(BaseFormatter):
"""Parseable uses PEP8 compatible format."""
def format(self, match: "MatchError") -> str:
result = (
f"[filename]{self._format_path(match.filename or '')}[/]:{match.position}: "
f"[error_code]E{match.rule.id}[/] [dim]{self.escape(match.message)}[/]")
if match.tag:
result += f" [dim][error_code]({match.tag})[/][/]"
return result
class AnnotationsFormatter(BaseFormatter):
# https://docs.github.com/en/actions/reference/workflow-commands-for-github-actions#setting-a-warning-message
"""Formatter for emitting violations as GitHub Workflow Commands.
These commands trigger the GHA Workflow runners platform to post violations
in a form of GitHub Checks API annotations that appear rendered in pull-
request files view.
::debug file={name},line={line},col={col},severity={severity}::{message}
::warning file={name},line={line},col={col},severity={severity}::{message}
::error file={name},line={line},col={col},severity={severity}::{message}
Supported levels: debug, warning, error
"""
def format(self, match: "MatchError") -> str:
"""Prepare a match instance for reporting as a GitHub Actions annotation."""
level = self._severity_to_level(match.rule.severity)
file_path = self._format_path(match.filename or "")
line_num = match.linenumber
rule_id = match.rule.id
severity = match.rule.severity
violation_details = self.escape(match.message)
if match.column:
col = f",col={match.column}"
else:
col = ""
return (
f"::{level} file={file_path},line={line_num}{col},severity={severity}"
f"::E{rule_id} {violation_details}"
)
@staticmethod
def _severity_to_level(severity: str) -> str:
if severity in ['VERY_LOW', 'LOW']:
return 'warning'
if severity in ['INFO']:
return 'debug'
# ['MEDIUM', 'HIGH', 'VERY_HIGH'] or anything else
return 'error'
class ParseableSeverityFormatter(BaseFormatter):
def format(self, match: "MatchError") -> str:
filename = self._format_path(match.filename or "")
position = match.position
rule_id = u"E{0}".format(match.rule.id)
severity = match.rule.severity
message = self.escape(str(match.message))
return (
f"[filename]{filename}[/]:{position}: [[error_code]{rule_id}[/]] "
f"[[error_code]{severity}[/]] [dim]{message}[/]")
|
import pytest
from pytest import approx
from math import radians, inf
import pymap3d as pm
@pytest.mark.parametrize(
"geodetic_lat,alt_m,geocentric_lat",
[(0, 0, 0), (90, 0, 90), (-90, 0, -90), (45, 0, 44.80757678), (-45, 0, -44.80757678)],
)
def test_geodetic_alt_geocentric(geodetic_lat, alt_m, geocentric_lat):
assert pm.geod2geoc(geodetic_lat, alt_m) == approx(geocentric_lat)
r = pm.geocentric_radius(geodetic_lat)
assert pm.geoc2geod(geocentric_lat, r) == approx(geodetic_lat)
assert pm.geoc2geod(geocentric_lat, 1e5 + r) == approx(
pm.geocentric2geodetic(geocentric_lat, 1e5 + alt_m)
)
assert pm.geod2geoc(geodetic_lat, 1e5 + alt_m) == approx(
pm.geodetic2geocentric(geodetic_lat, 1e5 + alt_m)
)
@pytest.mark.parametrize(
"geodetic_lat,geocentric_lat",
[(0, 0), (90, 90), (-90, -90), (45, 44.80757678), (-45, -44.80757678)],
)
def test_geodetic_geocentric(geodetic_lat, geocentric_lat):
assert pm.geodetic2geocentric(geodetic_lat, 0) == approx(geocentric_lat)
assert pm.geodetic2geocentric(radians(geodetic_lat), 0, deg=False) == approx(
radians(geocentric_lat)
)
assert pm.geocentric2geodetic(geocentric_lat, 0) == approx(geodetic_lat)
assert pm.geocentric2geodetic(radians(geocentric_lat), 0, deg=False) == approx(
radians(geodetic_lat)
)
def test_numpy_geodetic_geocentric():
pytest.importorskip("numpy")
assert pm.geodetic2geocentric([45, 0], 0) == approx([44.80757678, 0])
assert pm.geocentric2geodetic([44.80757678, 0], 0) == approx([45, 0])
@pytest.mark.parametrize(
"geodetic_lat, isometric_lat",
[(0, 0), (90, inf), (-90, -inf), (45, 50.227466), (-45, -50.227466), (89, 271.275)],
)
def test_geodetic_isometric(geodetic_lat, isometric_lat):
isolat = pm.geodetic2isometric(geodetic_lat)
assert isolat == approx(isometric_lat)
assert isinstance(isolat, float)
assert pm.geodetic2isometric(radians(geodetic_lat), deg=False) == approx(radians(isometric_lat))
assert pm.isometric2geodetic(isometric_lat) == approx(geodetic_lat)
assert pm.isometric2geodetic(radians(isometric_lat), deg=False) == approx(radians(geodetic_lat))
def test_numpy_geodetic_isometric():
pytest.importorskip("numpy")
assert pm.geodetic2isometric([45, 0]) == approx([50.227466, 0])
assert pm.isometric2geodetic([50.227466, 0]) == approx([45, 0])
@pytest.mark.parametrize(
"geodetic_lat,conformal_lat",
[(0, 0), (90, 90), (-90, -90), (45, 44.80768406), (-45, -44.80768406), (89, 88.99327)],
)
def test_geodetic_conformal(geodetic_lat, conformal_lat):
clat = pm.geodetic2conformal(geodetic_lat)
assert clat == approx(conformal_lat)
assert isinstance(clat, float)
assert pm.geodetic2conformal(radians(geodetic_lat), deg=False) == approx(radians(conformal_lat))
assert pm.conformal2geodetic(conformal_lat) == approx(geodetic_lat)
assert pm.conformal2geodetic(radians(conformal_lat), deg=False) == approx(radians(geodetic_lat))
def test_numpy_geodetic_conformal():
pytest.importorskip("numpy")
assert pm.geodetic2conformal([45, 0]) == approx([44.80768406, 0])
assert pm.conformal2geodetic([44.80768406, 0]) == approx([45, 0])
@pytest.mark.parametrize(
"geodetic_lat,rectifying_lat",
[(0, 0), (90, 90), (-90, -90), (45, 44.855682), (-45, -44.855682)],
)
def test_geodetic_rectifying(geodetic_lat, rectifying_lat):
assert pm.geodetic2rectifying(geodetic_lat) == approx(rectifying_lat)
assert pm.geodetic2rectifying(radians(geodetic_lat), deg=False) == approx(
radians(rectifying_lat)
)
assert pm.rectifying2geodetic(rectifying_lat) == approx(geodetic_lat)
assert pm.rectifying2geodetic(radians(rectifying_lat), deg=False) == approx(
radians(geodetic_lat)
)
def test_numpy_geodetic_rectifying():
pytest.importorskip("numpy")
assert pm.geodetic2rectifying([45, 0]) == approx([44.855682, 0])
assert pm.rectifying2geodetic([44.855682, 0]) == approx([45, 0])
@pytest.mark.parametrize(
"geodetic_lat,authalic_lat",
[(0, 0), (90, 90), (-90, -90), (45, 44.87170288), (-45, -44.87170288)],
)
def test_geodetic_authalic(geodetic_lat, authalic_lat):
assert pm.geodetic2authalic(geodetic_lat) == approx(authalic_lat)
assert pm.geodetic2authalic(radians(geodetic_lat), deg=False) == approx(radians(authalic_lat))
assert pm.authalic2geodetic(authalic_lat) == approx(geodetic_lat)
assert pm.authalic2geodetic(radians(authalic_lat), deg=False) == approx(radians(geodetic_lat))
def test_numpy_geodetic_authalic():
pytest.importorskip("numpy")
assert pm.geodetic2authalic([45, 0]) == approx([44.87170288, 0])
assert pm.authalic2geodetic([44.87170288, 0]) == approx([45, 0])
@pytest.mark.parametrize(
"geodetic_lat,parametric_lat",
[(0, 0), (90, 90), (-90, -90), (45, 44.9037878), (-45, -44.9037878)],
)
def test_geodetic_parametric(geodetic_lat, parametric_lat):
assert pm.geodetic2parametric(geodetic_lat) == approx(parametric_lat)
assert pm.geodetic2parametric(radians(geodetic_lat), deg=False) == approx(
radians(parametric_lat)
)
assert pm.parametric2geodetic(parametric_lat) == approx(geodetic_lat)
assert pm.parametric2geodetic(radians(parametric_lat), deg=False) == approx(
radians(geodetic_lat)
)
def test_numpy_geodetic_parametric():
pytest.importorskip("numpy")
assert pm.geodetic2parametric([45, 0]) == approx([44.9037878, 0])
assert pm.parametric2geodetic([44.9037878, 0]) == approx([45, 0])
@pytest.mark.parametrize("lat", [91, -91])
def test_badvals(lat):
# geodetic_isometric is not included on purpose
with pytest.raises(ValueError):
pm.geodetic2geocentric(lat, 0)
with pytest.raises(ValueError):
pm.geocentric2geodetic(lat, 0)
with pytest.raises(ValueError):
pm.geodetic2conformal(lat)
with pytest.raises(ValueError):
pm.conformal2geodetic(lat)
with pytest.raises(ValueError):
pm.geodetic2rectifying(lat)
with pytest.raises(ValueError):
pm.rectifying2geodetic(lat)
with pytest.raises(ValueError):
pm.geodetic2authalic(lat)
with pytest.raises(ValueError):
pm.authalic2geodetic(lat)
with pytest.raises(ValueError):
pm.geodetic2parametric(lat)
with pytest.raises(ValueError):
pm.parametric2geodetic(lat)
|
"""An Abstract Base Class for Managers
"""
import abc
from py2c.utils import verify_attribute
__all__ = ["Manager"]
class Manager(object, metaclass=abc.ABCMeta):
"""Base class of all managers
"""
def __init__(self):
super().__init__()
verify_attribute(self, "options", dict)
@abc.abstractmethod # coverage: no partial
def run(self, options, *args, **kwargs):
"""Perform the task that manager is supposed to do.
Arguments:
options
A dictionary object with the relavent options passed with
values.
"""
raise NotImplementedError()
|
# AUTOGENERATED! DO NOT EDIT! File to edit: nbs/022_data.mixed.ipynb (unless otherwise specified).
__all__ = ['MixedDataLoader', 'MixedDataLoaders', 'get_mixed_dls']
# Cell
from ..imports import *
# Cell
# This implementation of a mixed dataloader is based on a great implementation created by Zach Mueller in this fastai thread:
# https://forums.fast.ai/t/combining-tabular-images-in-fastai2-and-should-work-with-almost-any-other-type/73197
from packaging import version
from fastai.data.load import _FakeLoader
from torch.utils.data.dataloader import _MultiProcessingDataLoaderIter, _SingleProcessDataLoaderIter, _DatasetKind
_loaders = (_MultiProcessingDataLoaderIter, _SingleProcessDataLoaderIter)
class MixedDataLoader():
def __init__(self, *loaders, path='.', shuffle=False, device=None, bs=None):
"Accepts any number of `DataLoader` and a device"
self.path = path
device = ifnone(device, default_device())
self.device = device
self.c = None
self.d = None
self.bs = ifnone(bs, min([dl.bs for dl in loaders]))
for i, dl in enumerate(loaders): # ensure all dls have the same bs
if hasattr(dl, 'vars'):
self.vars = dl.vars
if hasattr(dl, 'len'):
self.len = dl.len
if hasattr(dl, 'split_idxs'):
self.split_idxs = dl.split_idxs
dl.bs = self.bs
dl.shuffle_fn = self.shuffle_fn
if self.c is None and hasattr(dl, "c"):
self.c = dl.c
if self.d is None and hasattr(dl, "d"):
self.d = dl.d
if i == 0:
self.dataset = dl.dataset
dl.to(device=device)
self.shuffle = shuffle
if not self.shuffle:
self.rng = np.arange(len(self.dataset)).tolist()
self.loaders = loaders
self.count = 0
self.fake_l = _FakeLoader(self, False, 0, 0, 0) if version.parse(
fastai.__version__) >= version.parse("2.1") else _FakeLoader(self, False, 0, 0)
if sum([len(dl.dataset) for dl in loaders]) > 0:
self._get_idxs() # Do not apply on an empty dataset
def new(self, *args, **kwargs):
loaders = [dl.new(*args, **kwargs) for dl in self.loaders]
return type(self)(*loaders, path=self.path, device=self.device)
# def __len__(self): return len(self.loaders[0])
def __len__(self): return self.loaders[0].__len__()
def _get_vals(self, x):
"Checks for duplicates in batches"
idxs, new_x = [], []
for i, o in enumerate(x):
x[i] = o.cpu().numpy().flatten()
for idx, o in enumerate(x):
if not self._arrayisin(o, new_x):
idxs.append(idx)
new_x.append(o)
return idxs
def _get_idxs(self):
"Get `x` and `y` indices for batches of data"
self.n_inps = [dl.n_inp for dl in self.loaders]
self.x_idxs = self._split_idxs(self.n_inps)
# Identify duplicate targets
dl_dict = dict(zip(range(0, len(self.loaders)), self.n_inps))
outs = L([])
for key, n_inp in dl_dict.items():
b = next(iter(self.loaders[key]))
outs += L(b[n_inp:])
self.y_idxs = self._get_vals(outs)
def __iter__(self):
z = zip(*[_loaders[i.fake_l.num_workers == 0](i.fake_l) for i in self.loaders])
for b in z:
inps = []
outs = []
if self.device is not None:
b = to_device(b, self.device)
for batch, dl in zip(b, self.loaders):
if hasattr(dl, 'idxs'): self.idxs = dl.idxs
if hasattr(dl, 'input_idxs'): self.input_idxs = dl.input_idxs
batch = dl.after_batch(batch)
inps += batch[:dl.n_inp]
outs += batch[dl.n_inp:]
inps = tuple([tuple(L(inps)[idx]) if isinstance(idx, list) else inps[idx]
for idx in self.x_idxs]) if len(self.x_idxs) > 1 else tuple(L(outs)[self.x_idxs][0])
outs = tuple(L(outs)[self.y_idxs]) if len(self.y_idxs) > 1 else L(outs)[self.y_idxs][0]
yield inps, outs
def one_batch(self):
"Grab one batch of data"
with self.fake_l.no_multiproc():
res = first(self)
if hasattr(self, 'it'):
delattr(self, 'it')
return res
def shuffle_fn(self, idxs):
"Generate the same idxs for all dls in each batch when shuffled"
if self.count == 0:
self.shuffled_idxs = np.random.permutation(idxs)
# sort each batch
for i in range(len(self.shuffled_idxs)//self.bs + 1):
self.shuffled_idxs[i*self.bs:(i+1)*self.bs] = np.sort(self.shuffled_idxs[i*self.bs:(i+1)*self.bs])
self.count += 1
if self.count == len(self.loaders):
self.count = 0
return self.shuffled_idxs
def show_batch(self):
"Show a batch of data"
for dl in self.loaders:
dl.show_batch()
def to(self, device): self.device = device
def _arrayisin(self, arr, arr_list):
"Checks if `arr` is in `arr_list`"
for a in arr_list:
if np.array_equal(arr, a):
return True
return False
def _split_idxs(self, a):
a_cum = np.array(a).cumsum().tolist()
b = np.arange(sum(a)).tolist()
start = 0
b_ = []
for i, idx in enumerate(range(len(a))):
end = a_cum[i]
b_.append(b[start:end] if end - start > 1 else b[start])
start = end
return b_
class MixedDataLoaders(DataLoaders):
pass
# Cell
def get_mixed_dls(*dls, device=None, shuffle_train=None, shuffle_valid=None, **kwargs):
_mixed_train_dls = []
_mixed_valid_dls = []
for dl in dls:
_mixed_train_dls.append(dl.train)
_mixed_valid_dls.append(dl.valid)
if shuffle_train is None: shuffle_train = dl.train.shuffle
if shuffle_valid is None: shuffle_valid = dl.valid.shuffle
if device is None: device = dl.train.device
mixed_train_dl = MixedDataLoader(*_mixed_train_dls, shuffle=shuffle_train, **kwargs)
mixed_valid_dl = MixedDataLoader(*_mixed_valid_dls, shuffle=shuffle_valid, **kwargs)
mixed_dls = MixedDataLoaders(mixed_train_dl, mixed_valid_dl, device=device)
return mixed_dls
|
"""
GENERATED FILE - DO NOT EDIT (created via @build_stack_rules_proto//cmd/depsgen)
"""
load("@bazel_tools//tools/build_defs/repo:http.bzl", "http_archive")
def _maybe(repo_rule, name, **kwargs):
if name not in native.existing_rules():
repo_rule(name = name, **kwargs)
def prebuilt_protoc_deps():
prebuilt_protoc_linux() # via <TOP>
prebuilt_protoc_osx() # via <TOP>
prebuilt_protoc_windows() # via <TOP>
def prebuilt_protoc_linux():
_maybe(
http_archive,
name = "prebuilt_protoc_linux",
sha256 = "6003de742ea3fcf703cfec1cd4a3380fd143081a2eb0e559065563496af27807",
urls = [
"https://github.com/google/protobuf/releases/download/v3.6.1/protoc-3.6.1-linux-x86_64.zip",
],
build_file_content = """
filegroup(
name = "protoc",
srcs = ["bin/protoc"],
visibility = ["//visibility:public"],
)
""",
)
def prebuilt_protoc_osx():
_maybe(
http_archive,
name = "prebuilt_protoc_osx",
sha256 = "0decc6ce5beed07f8c20361ddeb5ac7666f09cf34572cca530e16814093f9c0c",
urls = [
"https://github.com/google/protobuf/releases/download/v3.6.1/protoc-3.6.1-osx-x86_64.zip",
],
build_file_content = """
filegroup(
name = "protoc",
srcs = ["bin/protoc"],
visibility = ["//visibility:public"],
)
""",
)
def prebuilt_protoc_windows():
_maybe(
http_archive,
name = "prebuilt_protoc_windows",
sha256 = "0decc6ce5beed07f8c20361ddeb5ac7666f09cf34572cca530e16814093f9c0c",
urls = [
"https://github.com/google/protobuf/releases/download/v3.6.1/protoc-3.6.1-win32.zip",
],
build_file_content = """
filegroup(
name = "protoc",
srcs = ["bin/protoc.exe"],
visibility = ["//visibility:public"],
)
""",
)
|
'''
Timings Class
Arrival and departure times for all Route Sections on a Route on a particular
schedule and shows the time into a section and the time out of a section
Model Operations Processing System. Copyright Brian Fairbairn 2009-2010. Licenced under the EUPL.
You may not use this work except in compliance with the Licence. You may obtain a copy of the
Licence at http://ec.europa.eu/idabc/eupl or as attached with this application (see Licence file).
Unless required by applicable law or agreed to in writing, software distributed under the Licence
is distributed on an 'AS IS' basis WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either expressed
or implied. See the Licence governing permissions and limitations under the Licence.
Changes:
15/08/2010 Ver 1 Removed unused variables
Added handling of bad database return codes
'''
import MOPS_Element
class cTimings(MOPS_Element.cElement):
"""Details about Timings. Inherits from ListHandler class.
Timings are contained in fixed-length data records.
Id 10 Automatically generated reference
Section 10 link to Section that timing is for
Schedule 10 Link to Schedule
DepartStation 10 Copied from Route Section.
ArrivalStation 10 Copied from Route Section.
PlannedDepartTime 12 Planned departure time from station
PlannedArriveTime 12 Planned arrival time at station
"""
extract_code = 'select * from timings'
extract_header = 'id|section|schedule|depart_station|arrive_station|planned_depart|planned_arrive\n'
def adtims(self, message):
"""add timings to a section. this is a basic addition process;
other facilities will help copy/duplicate timings. this process is a special
process as, having been given a route, it will prompt for subsequent departure
and arrival times until the route is complete. the process can be abandoned by
entering an x at the input prompt
"""
if self.show_access(message, 'ADTIMS schedule', 'S') != 0:
return
#schedule code -----------------------------------------------------------------------------
schedule, rc = self.extract_field(message, 0, 'SCHEDULE CODE')
if rc > 0:
return
#check it exists
data = (schedule, 'I')
sql = 'select id, direction, route from schedule where schedule = ? and status = ?'
count, dummy = self.db_read(sql, data)
if count < 0:
return
if count == 0:
print('* SCHEDULE CODE DOES NOT EXIST OR NOT IN INACTIVE STATUS')
return
print('SCHEDULE ENTRY MODE: ENTER TIME HHMM OR <X> TO QUIT')
data = (schedule,)
sql = 'select id, section, depart_station, arrive_station from timings ' +\
'where schedule = ? order by id'
count, ds_timings = self.db_read(sql, data)
if count < 0:
return
last_time = '0000'
for timing_row in ds_timings:
#build the input prompt strings
depart_station = timing_row[2]
arrive_station = timing_row[3]
t2 = (depart_station,)
sql = 'select short_name from station where station = ?'
count, ds_departs = self.db_read(sql, t2)
if count < 0:
return
for station_row in ds_departs:
depart_name = station_row[0]
t2 = (arrive_station,)
sql = 'select short_name from station where station = ?'
count, ds_arrives = self.db_read(sql, t2)
if count < 0:
return
for station_row in ds_arrives:
arrive_name = station_row[0]
#get the departing time
re_enter = True
while re_enter:
new_time = raw_input('TIME DEPARTING ' + depart_station + ' ' + depart_name + ' >')
if new_time == 'x':
print('EXITING INPUT OF TIMINGS FOR SCHEDULE')
return
if self.validate_time(new_time, last_time) == 0:
departure_time = new_time
last_time = new_time
re_enter = False
#get the arriving time
re_enter = True
while re_enter:
new_time = raw_input('TIME ARRIVING ' + arrive_station + ' ' + arrive_name + ' >')
if new_time == 'x':
print('EXITING INPUT OF TIMINGS FOR SCHEDULE')
return
if self.validate_time(new_time, last_time) == 0:
arrival_time = new_time
last_time = new_time
re_enter = False
data = (departure_time, arrival_time, timing_row[0])
sql = 'update timings set planned_depart = ?, planned_arrive = ? where id = ?'
if self.db_update(sql, data) != 0:
return
print('UPDATE OF SCHEDULE TIMINGS FOR ' + schedule + ' COMPLETED')
return
def chtims(self, message):
"""allows changes to the timings of an individual section. This routine can also
be used for batch loading times from a file. Enter the route, section and depart
and arrive times. note that there is no validation on timings on previous or
following sections, only within the section itself.
"""
if self.show_access(message, 'CHTIMS schedule;section;depart;arrive', 'S') != 0:
return
#schedule code -----------------------------------------------------------------------------
schedule, rc = self.extract_field(message, 0, 'SCHEDULE CODE')
if rc > 0:
return
#read the database
data = (schedule, 'I')
sql = 'select id from schedule where schedule = ? and status = ?'
count, dummy = self.db_read(sql, data)
if count < 0:
return
if count == 0:
print('* SCHEDULE DOES NOT EXIST OR IS ACTIVE AND CANNOT BE AMENDED')
return
#section code-------------------------------------------------------------------------------
section, rc = self.extract_field(message, 1, 'SECTION CODE')
if rc > 0:
return
#read the database
data = (schedule, section)
sql = 'select depart_station, arrive_station, id from timings ' +\
'where schedule = ? and section = ?'
count, ds_sections = self.db_read(sql, data)
if count < 0:
return
if count == 0:
print('* SCHEDULE/SECTION DOES NOT EXIST')
return
for row in ds_sections:
departing = row[0]
arriving = row[1]
timings_id = row[2]
#depart time -----------------------------------------------------------------
depart_time, rc = self.extract_field(message, 2, 'DEPARTURE TIME')
if rc > 0:
return
if len(depart_time) != 4:
print('* TIME MUST BE ENTERED IN FORMAT HHMM')
return
hours = int(depart_time[0:2])
if hours < 0 or hours > 23:
print('* HOURS MUST BE ENTERED IN RANGE 00-23')
return
minutes = int(depart_time[2:4])
if minutes < 0 or minutes > 59:
print('* MINUTES MUST BE ENTERED IN RANGE 00-59')
return
#arrival time -----------------------------------------------------------------
arrive_time, rc = self.extract_field(message, 3, 'ARRIVAL TIME')
if rc > 0:
return
if self.validate_time(arrive_time, depart_time) != 0:
return
#carry out the update and report ----------------------------------------------
data = (depart_time, arrive_time, timings_id)
sql = 'update timings set planned_depart = ?, planned_arrive = ? where id = ?'
if self.db_update(sql, data) != 0:
return
print('SCHEDULE TIMINGS CHANGED FOR:' + schedule, departing + ':' + depart_time + arriving + ':' + arrive_time)
return
def validate_time(self, hhmm, prev_time):
"""internal routine to validate a given time to make sure it corresponds
to an hhmm format. if a previous_time is entered then it makes sure that the
new time is later, unless the previous time > 2000 (8pm) and the new time is
less than 0400 (4am), in which case a new day is assumed
"""
if len(hhmm) != 4:
print('* TIME MUST BE ENTERED IN FORMAT HHMM')
return 1
try:
hours = int(hhmm[0:2])
if hours < 0 or hours > 23:
print('* HOURS MUST BE ENTERED IN RANGE 00-23')
return 2
minutes = int(hhmm[2:4])
if minutes < 0 or minutes > 59:
print('* MINUTES MUST BE ENTERED IN RANGE 00-59')
return 3
except:
print('* TIME MUST BE ENTERED IN MINUTES AND HOURS')
return 5
if prev_time > '2100':
if hhmm < '0300':
return 0
if hhmm < prev_time:
print('* NEW TIME MUST BE LATE THAN PREVIOUS TIME')
return 4
return 0
def timing(self, message):
"""Lists times and associated information for a schedule, including station type,
instructions
"""
if self.show_access(message, 'TIMING schedule', 'R') != 0:
return
#schedule code -----------------------------------------------------------------------------
schedule, rc = self.extract_field(message, 0, 'SCHEDULE CODE')
if rc > 0:
return
#get the schedule detail to display
data = (schedule,)
sql = 'select name, direction, status, route, run_days from schedule where schedule = ?'
count, ds_schedules = self.db_read(sql, data)
if count < 0:
return
if count == 0:
print('NO SCHEDULE TO DISPLAY')
return
else:
for row in ds_schedules:
schedule_name = row[0]
schedule_dirn = row[1]
schedule_stat = row[2]
schedule_route = row[3]
schedule_days = row[4]
data = (schedule_route,)
sql = 'select default_direction from route where route = ?'
count, ds_routes = self.db_read(sql, data)
if count < 0:
return
for row in ds_routes:
default_direction = row[0]
if schedule_dirn == 'N':
direction = 'NORTH'
elif schedule_dirn == 'S':
direction = 'SOUTH'
elif schedule_dirn == 'E':
direction = 'EAST'
elif schedule_dirn == 'W':
direction = 'WEST'
elif schedule_dirn == 'U':
direction = 'UP'
elif schedule_dirn == 'D':
direction = 'DOWN'
else:
direction = 'NOT KNOWN'
if schedule_stat == 'I':
status = 'INACTIVE'
elif schedule_stat == 'A':
status = 'ACTIVE'
elif schedule_stat == 'R':
status = 'RUNNING'
else:
status = 'NOT KNOWN'
rundays = ''
if schedule_days[0:1] == '1':
rundays = ' MON'
if schedule_days[1:2] == '2':
rundays = rundays + ' TUE'
if schedule_days[2:3] == '3':
rundays = rundays + ' WED'
if schedule_days[3:4] == '4':
rundays = rundays + ' THU'
if schedule_days[4:5] == '5':
rundays = rundays + ' FRI'
if schedule_days[5:6] == '6':
rundays = rundays + ' SAT'
if schedule_days[6:7] == '7':
rundays = rundays + ' SUN'
if schedule_days[7:8] == '8':
rundays = rundays + ' HOL'
print('SCHEDULE:', schedule, schedule_name,' (SCHEDULE STATUS:' + status + ')')
print('DIRECTION:',direction, ' RUNS:', rundays)
data = (schedule,)
sql = 'select instruction from instructions where schedule = ?'
count, ds_instructions = self.db_read(sql, data)
for row in ds_instructions:
print(' - ', row[0])
data = (schedule_route,)
sql = 'select instruction from instructions where route = ?'
count, ds_instructions = self.db_read(sql, data)
for row in ds_instructions:
print(' - ', row[0])
print(' ' )
# build the column titles ------------------------------------------
titles = self.x_field('STATION===', self.staxsize) + ' ' + \
self.x_field('NAME====', 8) + ' ' +\
self.x_field('TYPE======', self.statsize) + ' ' +\
self.x_field('=ARR', 4) + ' ' +\
self.x_field('=DEP', 4) + ' ' +\
self.x_field('INSTRUCTIONS =========================', 40)
data = (schedule,)
if default_direction == schedule_dirn:
sql = 'select id, section, depart_station, arrive_station, planned_depart, ' +\
'planned_arrive from timings where schedule = ? order by section'
else:
sql = 'select id, section, depart_station, arrive_station, planned_depart, ' +\
'planned_arrive from timings where schedule = ? order by section DESC'
timing_count, ds_timings = self.db_read(sql, data)
if count < 0:
return
#report the extracted data -----------------------------------------
line_count = 0
arrival = ' '
depart_station = ''
arrive_station = ''
arrive_name = ''
depart_name = ''
station_type = ''
planned_arrive = ''
dummy = ''
instructions = ''
for row in ds_timings:
depart_station = row[2]
arrive_station = row[3]
planned_depart = row[4]
planned_arrive = row[5]
if line_count == 0:
print(titles)
#get the name for the departure station
data = (depart_station,)
sql = 'select short_name, stationtype from station where station = ?'
stax_count, ds_departs = self.db_read(sql, data)
if stax_count < 0:
return
for stax_row in ds_departs:
depart_name = stax_row[0]
station_type = stax_row[1]
#get any station instructions - just print the first one
sql = 'select instruction from instructions where station = ? limit 1'
count, ds_instructions = self.db_read(sql, data)
instructions = ' '
for inst_row in ds_instructions:
instructions = inst_row[0]
if not(planned_depart.strip() == '' and planned_arrive.strip() == ''):
print(self.x_field(row[2], self.staxsize) + " " +
self.x_field(depart_name, 8) + " " +
self.x_field(station_type, self.statsize) + " " +
self.x_field(arrival, 4) + " " +
self.x_field(row[4], 4) + " " +
self.x_field(instructions, 40))
arrival = planned_arrive
#get any station instructions - now print the rest
sql = 'select instruction from instructions where station = ?'
count, ds_instructions = self.db_read(sql, data)
line = 0
dummy = ' '
for inst_row in ds_instructions:
line = line + 1
instructions = inst_row[0]
if line != 1:
print(self.x_field(dummy, self.staxsize) + " " +
self.x_field(dummy, 8) + " " +
self.x_field(dummy, self.statsize) + " " +
self.x_field(dummy, 4) + " " +
self.x_field(dummy, 4) + " " +
self.x_field(instructions, 40))
line_count = line_count + 1
if line_count > 20:
line_count = 0
reply = raw_input('+')
if reply == 'x':
break
#get the long name for the arrive station (for the last entry)
sql = 'select short_name, stationtype from station where station = ?'
data = (arrive_station,)
stax_count, ds_arrives = self.db_read(sql, data)
for stax_row in ds_arrives:
arrive_name = stax_row[0]
station_type = stax_row[1]
#get any station instructions - just print the first one
sql = 'select instruction from instructions where station = ? limit 1'
instructions = ' '
count, ds_instructions = self.db_read(sql, data)
for row in ds_instructions:
instructions = row[0]
print(self.x_field(arrive_station, self.staxsize) + " " +
self.x_field(arrive_name, 8) + " " +
self.x_field(station_type, self.statsize) + " " +
self.x_field(planned_arrive, 4) + " " +
self.x_field(dummy, 4) + " " +
self.x_field(instructions, 40))
#get any station instructions - now print the rest
sql = 'select instruction from instructions where station = ?'
count, ds_instructions = self.db_read(sql, data)
line = 0
for row in ds_instructions:
line = line + 1
instructions = row[0]
if line != 1:
print(self.x_field(dummy, self.staxsize) + " " +
self.x_field(dummy, 8) + " " +
self.x_field(dummy, self.statsize) + " " +
self.x_field(dummy, 4) + " " +
self.x_field(dummy, 4) + " " +
self.x_field(instructions, 40))
print(' ** END OF DATA: ' + str(timing_count) + ' RECORDS DISPLAYED **')
return
def ldtims(self, message):
"""Gives detail of Timing records for checking timetables vs routes
"""
if self.show_access(message, 'LDTIMS schedule', 'R') != 0:
return
#schedule code -----------------------------------------------------------------------------
schedule, rc = self.extract_field(message, 0, 'SCHEDULE CODE')
if rc > 0:
return
#get the schedule detail to display
data = (schedule,)
sql = 'select name, direction, status, route from schedule where schedule = ?'
count, ds_schedules = self.db_read(sql, data)
if count < 0:
return
if count == 0:
print('NO SCHEDULE TO DISPLAY')
return
else:
for row in ds_schedules:
schedule_name = row[0]
schedule_dirn = row[1]
schedule_stat = row[2]
if schedule_dirn == 'N':
direction = 'NORTH'
elif schedule_dirn == 'S':
direction = 'SOUTH'
elif schedule_dirn == 'E':
direction = 'EAST'
elif schedule_dirn == 'WEST':
direction = 'WEST'
elif schedule_dirn == 'U':
direction = 'UP'
elif schedule_dirn == 'D':
direction = 'DOWN'
else:
direction = 'NOT KNOWN'
if schedule_stat == 'I':
status = 'INACTIVE'
elif schedule_stat == 'A':
status = 'ACTIVE'
elif schedule_stat == 'R':
status = 'RUNNING'
else:
status = 'NOT KNOWN'
print('SCHEDULE: ', schedule, schedule_name,' (SCHEDULE STATUS: ' + status + ')')
print(' DIRECTION:',direction)
# build the column titles ------------------------------------------
titles = self.x_field('SECTION===', 10) + ' ' + \
self.x_field('DEPARTS===', self.staxsize) + ' ' +\
self.x_field('=DEP', 4) + ' ' +\
self.x_field('ARRIVES===', self.staxsize) + ' ' +\
self.x_field('=ARR', 4)
data = (schedule,)
sql = 'select id, section, depart_station, arrive_station, planned_depart, ' +\
'planned_arrive from timings where schedule = ? order by section'
timing_count, ds_timings = self.db_read(sql, data)
if count < 0:
return
#report the extracted data -----------------------------------------
line_count = 0
for row in ds_timings:
section = row[1]
depart_station = row[2]
arrive_station = row[3]
planned_depart = row[4]
planned_arrive = row[5]
if line_count == 0:
print(titles)
print(self.x_field(section , 10) + " " +
self.x_field(depart_station, self.staxsize) + " " +
self.x_field(planned_depart, 4) + " " +
self.x_field(arrive_station, self.staxsize) + " " +
self.x_field(planned_arrive, 4))
line_count = line_count + 1
if line_count > 20:
line_count = 0
reply = raw_input('+')
if reply == 'x':
break
print(' ** END OF DATA: ' + str(timing_count) + ' RECORDS DISPLAYED **')
return
def prtims(self, message, Params):
"""Prints times and associated information for a schedule, including station type,
instructions
"""
if self.show_access(message, 'PRTIMS schedule', 'R') != 0:
return
#schedule code -----------------------------------------------------------------------------
schedule, rc = self.extract_field(message, 0, 'SCHEDULE CODE')
if rc > 0:
return
self.temp = {}
i = 0
#get the schedule detail to display
data = (schedule,)
sql = 'select name, direction, status, route from schedule where schedule = ?'
count, ds_schedules = self.db_read(sql, data)
if count < 0:
return
if count == 0:
print('NO SCHEDULE TO DISPLAY')
return
else:
for row in ds_schedules:
schedule_name = row[0]
schedule_dirn = row[1]
schedule_stat = row[2]
schedule_route = row[3]
if schedule_dirn == 'N':
direction = 'NORTH'
elif schedule_dirn == 'S':
direction = 'SOUTH'
elif schedule_dirn == 'E':
direction = 'EAST'
elif schedule_dirn == 'WEST':
direction = 'WEST'
elif schedule_dirn == 'U':
direction = 'UP'
elif schedule_dirn == 'D':
direction = 'DOWN'
else:
direction = 'NOT KNOWN'
if schedule_stat == 'I':
status = 'INACTIVE'
elif schedule_stat == 'A':
status = 'ACTIVE'
elif schedule_stat == 'R':
status = 'RUNNING'
else:
status = 'NOT KNOWN'
print_line = ('SCHEDULE: ' + schedule + ' ' + schedule_name +' (SCHEDULE STATUS:' + status + ')')
self.temp[i]= print_line
i = i + 1
print_line = (' DIRECTION: ' + direction)
self.temp[i]= print_line
i = i + 1
t = (schedule,)
sql = 'select instruction from instructions where schedule = ?'
count, ds_instructions = self.db_read(sql, t)
for row in ds_instructions:
print_line = (' - ' + row[0])
self.temp[i]= print_line
i = i + 1
t = (schedule_route,)
sql = 'select instruction from instructions where route = ?'
count, ds_instructions = self.db_read(sql, t)
for row in ds_instructions:
print_line = (' - ' + row[0])
self.temp[i]= print_line
i = i + 1
print_line = (' ' )
self.temp[i]= print_line
i = i + 1
# build the column titles ------------------------------------------
titles = self.x_field('STATION===', self.staxsize) + ' ' + \
self.x_field('NAME====', 8) + ' ' +\
self.x_field('TYPE======', self.statsize) + ' ' +\
self.x_field('=ARR', 4) + ' ' +\
self.x_field('=DEP', 4) + ' ' +\
self.x_field('INSTRUCTIONS =========================', 40)
data = (schedule,)
sql = 'select id, section, depart_station, arrive_station, planned_depart, ' +\
'planned_arrive from timings where schedule = ? order by id'
timing_count, ds_timings = self.db_read(sql, data)
if timing_count < 0:
return
#report the extracted data -----------------------------------------
arrival = ' '
for row in ds_timings:
depart_station = row[2]
arrive_station = row[3]
planned_depart = row[4]
planned_arrive = row[5]
#get the name for the departure station
data = (depart_station,)
sql = 'select short_name, stationtype from station where station = ?'
stax_count, ds_departs = self.db_read(sql, data)
if stax_count < 0:
return
for stax_row in ds_departs:
depart_name = stax_row[0]
station_type = stax_row[1]
#get any station instructions - just print the first one
sql = 'select instruction from instructions where station = ? limit 1'
count, ds_instructions = self.db_read(sql, data)
instructions = ' '
for inst_row in ds_instructions:
instructions = inst_row[0]
if not(planned_depart.strip() == '' and planned_arrive.strip() == ''):
print_line = (self.x_field(depart_station, self.staxsize) + ' ' +
self.x_field(depart_name, 8) + ' ' +
self.x_field(station_type, self.statsize) + ' ' +
self.x_field(arrival, 4) + ' ' +
self.x_field(planned_depart, 4) + ' ' +
self.x_field(instructions, 40))
arrival = planned_arrive
self.temp[i]= print_line
i = i + 1
#get any station instructions - now print the rest
sql = 'select instruction from instructions where station = ?'
count, ds_instructions = self.db_read(sql, data)
line = 0
dummy = ' '
for inst_row in ds_instructions:
line = line + 1
instructions = inst_row[0]
if line != 1:
print_line = (self.x_field(dummy, self.staxsize) + ' ' +
self.x_field(dummy, 8) + ' ' +
self.x_field(dummy, self.statsize) + ' ' +
self.x_field(dummy, 4) + ' ' +
self.x_field(dummy, 4) + ' ' +
self.x_field(instructions, 40))
self.temp[i]= print_line
i = i + 1
#get the long name for the arrive station (for the last entry)
sql = 'select short_name, stationtype from station where station = ?'
data = (arrive_station,)
stax_count, ds_arrives = self.db_read(sql, data)
for stax_row in ds_arrives:
arrive_name = stax_row[0]
station_type = stax_row[1]
#get any station instructions - just print the first one
sql = 'select instruction from instructions where station = ? limit 1'
instructions = ' '
count, ds_instructions = self.db_read(sql, data)
for row in ds_instructions:
instructions = row[0]
print_line = (self.x_field(arrive_station, self.staxsize) + ' ' +
self.x_field(arrive_name, 8) + ' ' +
self.x_field(station_type, self.statsize) + ' ' +
self.x_field(planned_arrive, 4) + ' ' +
self.x_field(dummy, 4) + ' ' +
self.x_field(instructions, 40))
self.temp[i]= print_line
i = i + 1
#get any station instructions - now print the rest
sql = 'select instruction from instructions where station = ?'
count, ds_instructions = self.db_read(sql, data)
line = 0
for row in ds_instructions:
line = line + 1
instructions = row[0]
if line != 1:
print_line = (self.x_field(dummy, self.staxsize) + ' ' +
self.x_field(dummy, 8) + ' ' +
self.x_field(dummy, self.statsize) + ' ' +
self.x_field(dummy, 4) + ' ' +
self.x_field(dummy, 4) + ' ' +
self.x_field(instructions, 40))
self.temp[i]= print_line
i = i + 1
#report the extracted data ---------------------------------------
self.print_report (titles = titles,
report_id = 'PRTIMS',
report_name = 'TIMETABLE FOR ' + schedule,
Params = Params)
return
|
import numpy as np
from graphidx.idx import BiAdjacent
def square():
head = np.array([0, 0, 1, 2])
tail = np.array([1, 2, 3, 3])
return BiAdjacent(head, tail)
def test_sqare():
neigh = square()
assert repr(neigh) == "BiAdjacent[m = 4, n = 4]"
assert set(neigh[0]) == {1, 2}
assert set(neigh[1]) == {0, 3}
assert set(neigh[2]) == {0, 3}
assert set(neigh[3]) == {1, 2}
def test_1():
head = np.array([0, 1, 2, 3], dtype=np.int32)
tail = np.array([1, 3, 1, 2], dtype=np.int32)
index = BiAdjacent(head, tail)
assert repr(index) == "BiAdjacent[m = 4, n = 4]"
i2 = index[2]
assert len(i2) == 2
assert list(i2) == [1, 3]
assert list(index[0]) == [1]
assert list(index[1]) == [0, 3, 2]
|
##############################################################################
#
# Copyright (c) 2001 Zope Foundation and Contributors
#
# This software is subject to the provisions of the Zope Public License,
# Version 2.1 (ZPL). A copy of the ZPL should accompany this
# distribution.
# THIS SOFTWARE IS PROVIDED "AS IS" AND ANY AND ALL EXPRESS OR IMPLIED
# WARRANTIES ARE DISCLAIMED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF TITLE, MERCHANTABILITY, AGAINST INFRINGEMENT, AND FITNESS
# FOR A PARTICULAR PURPOSE
#
##############################################################################
""" PluginRegistry interface declarations
"""
from zope.interface import Interface
class IPluginRegistry(Interface):
""" Manage a set of plugin definitions, grouped by type.
"""
def listPluginTypeInfo():
""" Return a sequence of mappings describing our plugin types.
o Keys for the mappings must include:
'id' -- a string used to identify the plugin type (should be
the __name__ of the interface)
'interface' -- the plugin type interface
'methods' -- the methods expected by the plugin type interface
'title' -- a display title for the plugin type
'description' -- a description of what the plugins do
"""
def listPlugins(plugin_type):
""" Return a sequence of tuples, one for each plugin of the given type.
o 'plugin_type' must be one of the known types, else raise KeyError.
o Tuples will be of the form, '(plugin_id, plugin)'.
"""
def listPluginIds(plugin_type):
""" Return a sequence of plugin ids
o Return ids for each active plugin of the given type.
o 'plugin_type' must be one of the known types, else raise KeyError.
"""
def activatePlugin(plugin_type, plugin_id):
""" Activate a plugin of the given type.
o 'plugin_type' must be one of the known types, else raise KeyError.
o 'plugin_id' must be the ID of an available plugin, else raise
KeyError.
o Append 'plugin_id' to the list of active plugins for the given
'plugin_type'.
"""
def deactivatePlugin(plugin_type, plugin_id):
""" Deactivate a plugin of the given type.
o 'plugin_type' must be one of the known types, else raise KeyError.
o 'plugin_id' must be an ID of an existing plugin of that type,
else raise KeyError.
"""
def movePluginsUp(plugin_type, ids_to_move):
""" Move a set of plugins "up" in their list.
o 'plugin_type' must be one of the known types, else raise KeyError.
o 'ids_to_move' must be a sequence of ids of current plugins
for that type.
- If any item is not the ID of a current plugin, raise ValueError.
"""
def movePluginsTop(plugin_type, ids_to_move):
""" Move a set of plugins to the "top" in their list.
o 'plugin_type' must be one of the known types, else raise KeyError.
o 'ids_to_move' must be a sequence of ids of current plugins
for that type.
- If any item is not the ID of a current plugin, raise ValueError.
- Moving one plugin to top has obvious result;
moving more than one plugin to top puts them one by one at the top
iow, last in the list gets to top
"""
def movePluginsDown(plugin_type, ids_to_move):
""" Move a set of plugins "down" in their list.
o 'plugin_type' must be one of the known types, else raise KeyError.
o 'ids_to_move' must be a sequence of indexes of items in the current
list of plugins for that type.
- If any item is not the ID of a current plugin, raise ValueError.
"""
|
# -*- coding: utf-8 -
#
# This file is part of gunicorn released under the MIT license.
# See the NOTICE for more information.
try:
import unittest.mock as mock
except ImportError:
import mock
from gunicorn import sock
@mock.patch('os.stat')
def test_create_sockets_unix_bytes(stat):
conf = mock.Mock(address=[b'127.0.0.1:8000'])
log = mock.Mock()
with mock.patch.object(sock.UnixSocket, '__init__', lambda *args: None):
listeners = sock.create_sockets(conf, log)
assert len(listeners) == 1
print(type(listeners[0]))
assert isinstance(listeners[0], sock.UnixSocket)
@mock.patch('os.stat')
def test_create_sockets_unix_strings(stat):
conf = mock.Mock(address=['127.0.0.1:8000'])
log = mock.Mock()
with mock.patch.object(sock.UnixSocket, '__init__', lambda *args: None):
listeners = sock.create_sockets(conf, log)
assert len(listeners) == 1
assert isinstance(listeners[0], sock.UnixSocket)
def test_socket_close():
listener1 = mock.Mock()
listener1.getsockname.return_value = ('127.0.0.1', '80')
listener2 = mock.Mock()
listener2.getsockname.return_value = ('192.168.2.5', '80')
sock.close_sockets([listener1, listener2])
listener1.close.assert_called_with()
listener2.close.assert_called_with()
@mock.patch('os.unlink')
def test_unix_socket_close_unlink(unlink):
listener = mock.Mock()
listener.getsockname.return_value = '/var/run/test.sock'
sock.close_sockets([listener])
listener.close.assert_called_with()
unlink.assert_called_once_with('/var/run/test.sock')
@mock.patch('os.unlink')
def test_unix_socket_close_without_unlink(unlink):
listener = mock.Mock()
listener.getsockname.return_value = '/var/run/test.sock'
sock.close_sockets([listener], False)
listener.close.assert_called_with()
assert not unlink.called, 'unlink should not have been called'
|
#!/usr/bin/env python3
###############################################################################
# #
# RMG - Reaction Mechanism Generator #
# #
# Copyright (c) 2002-2021 Prof. William H. Green (whgreen@mit.edu), #
# Prof. Richard H. West (r.west@neu.edu) and the RMG Team (rmg_dev@mit.edu) #
# #
# Permission is hereby granted, free of charge, to any person obtaining a #
# copy of this software and associated documentation files (the 'Software'), #
# to deal in the Software without restriction, including without limitation #
# the rights to use, copy, modify, merge, publish, distribute, sublicense, #
# and/or sell copies of the Software, and to permit persons to whom the #
# Software is furnished to do so, subject to the following conditions: #
# #
# The above copyright notice and this permission notice shall be included in #
# all copies or substantial portions of the Software. #
# #
# THE SOFTWARE IS PROVIDED 'AS IS', WITHOUT WARRANTY OF ANY KIND, EXPRESS OR #
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, #
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE #
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER #
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING #
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER #
# DEALINGS IN THE SOFTWARE. #
# #
###############################################################################
"""
This module provides classes for fitting atom energies based on a very
small, predetermined set of molecules.
"""
import importlib
import json
import logging
from collections import Counter
from typing import Dict, Hashable, List, Union
import numpy as np
from scipy.stats import distributions
from rmgpy import constants
from rmgpy.molecule import get_element, Molecule
import arkane.encorr.data as data
from arkane.encorr.reference import ReferenceDatabase
from arkane.modelchem import LevelOfTheory, CompositeLevelOfTheory
# List of species labels that will be used for fitting (labels should match reference database)
SPECIES_LABELS = [
'Dihydrogen',
'Dinitrogen',
'Dioxygen',
'Disulfur',
'Difluorine',
'Dichlorine',
'Dibromine',
'Hydrogen fluoride',
'Hydrogen chloride',
'Hydrogen bromide',
'Hydrogen sulfide',
'Water',
'Methane',
'Methyl',
'Ammonia',
'Chloromethane'
]
class AEJob:
"""
A job for fitting atom energies.
"""
def __init__(self,
species_energies: Dict[str, float],
level_of_theory: Union[LevelOfTheory, CompositeLevelOfTheory] = None,
write_to_database: bool = False,
overwrite: bool = False):
"""
Initialize an AEJob instance.
Notes:
The species energies should be provided as a dictionary
containing the species labels as keys and their single-
point electronic energies in Hartree as values. The
energies should be calculated using the experimental
geometry provided for the species in the reference
database, and the zero-point energy should not be included
in the electronic energy.
Args:
species_energies: Dictionary of species labels with single-point electronic energies (Hartree).
level_of_theory: Dictionary key for saving atom energies to the database.
write_to_database: Save the fitted atom energies directly to the RMG database.
overwrite: Overwrite atom energies in the RMG database if they already exist.
"""
self.spcs_energies = species_energies
self.level_of_theory = level_of_theory
self.write_to_database = write_to_database
self.overwrite = overwrite
self.ae = AE(species_energies)
def execute(self, output_file: str = None):
"""
Execute the atom energy job.
Args:
output_file: Write the fitted energies to this file.
"""
if self.level_of_theory is None:
logging.info('Fitting atom energies')
else:
logging.info(f'Fitting atom energies for {self.level_of_theory}')
self.ae.fit()
if output_file is not None:
with open(output_file, 'a') as f:
if self.level_of_theory is not None:
f.write(f'# {self.level_of_theory}\n')
for element, energy in self.ae.atom_energies.items():
f.write(f'# {element:2}: {energy:15.8f} +/- {self.ae.confidence_intervals[element]:.8f} Hartree\n')
f.writelines(self.ae.format_atom_energies(
'atom_energies' if self.level_of_theory is None else self.level_of_theory))
if self.write_to_database:
if self.level_of_theory is None:
raise Exception('Level of theory is required for writing to database')
try:
self.ae.write_to_database(self.level_of_theory, overwrite=self.overwrite)
except ValueError as e:
logging.warning('Could not write atom energies to database. Captured error:')
logging.warning(str(e))
class AE:
"""
A class for fitting atom energies.
"""
ref_data_src = 'CCCBDB' # Use CCCBDB data
ref_data = None # Dictionary of reference data entries
def __init__(self, species_energies: Dict[str, float]):
self.species_energies = species_energies # Hartree
self.atom_energies = None
self.confidence_intervals = None
for lbl in SPECIES_LABELS:
if lbl not in self.species_energies:
logging.warning(f'{lbl} missing from provided species energies!')
@classmethod
def _load_refdata(cls):
if cls.ref_data is None:
logging.info('Loading reference database')
db = ReferenceDatabase()
db.load()
cls.ref_data = {lbl: spc for lbl, spc in zip(SPECIES_LABELS, db.get_species_from_label(SPECIES_LABELS))}
def fit(self):
"""
Fit atom energies using the provided species energies and
corresponding atomization energies from the reference data.
"""
self._load_refdata()
mols = [
Molecule().from_adjacency_list(
self.ref_data[lbl].adjacency_list,
raise_atomtype_exception=False,
raise_charge_exception=False
) for lbl in self.species_energies
]
atom_counts = [Counter(atom.element.symbol for atom in mol.atoms) for mol in mols]
elements = sorted({element for ac in atom_counts for element in ac}, key=lambda s: get_element(s).number)
x = np.array([[ac[element] for element in elements] for ac in atom_counts]) # Nmols x Nelements
atomization_energies = np.array([
self.ref_data[lbl].reference_data[self.ref_data_src].atomization_energy.value_si
/ constants.E_h / constants.Na for lbl in self.species_energies
])
zpes = np.array([
self.ref_data[lbl].reference_data[self.ref_data_src].zpe.value_si
/ constants.E_h / constants.Na for lbl in self.species_energies
])
elec_energies = np.array(list(self.species_energies.values())) # Should already be in Hartree
y = atomization_energies + elec_energies + zpes
w = np.linalg.solve(x.T @ x, x.T @ y)
self.atom_energies = dict(zip(elements, w))
# Get confidence intervals
n = len(y) # Ndata
k = len(w) # Nparam
ypred = x @ w
sigma2 = np.sum((y - ypred)**2) / (n - k - 1) # MSE
cov = sigma2 * np.linalg.inv(x.T @ x) # covariance matrix
se = np.sqrt(np.diag(cov)) # standard error
alpha = 0.05 # 95% confidence level
tdist = distributions.t.ppf(1 - alpha/2, n - k - 1) # student-t
ci = tdist * se # confidence interval half-width
self.confidence_intervals = dict(zip(elements, ci)) # Parameter estimates are w +/- ci
def write_to_database(self, key: Hashable, overwrite: bool = False, alternate_path: str = None):
"""
Write atom energies to database.
Args:
key: Dictionary key to use for atom energies in database.
overwrite: Overwrite existing atom energies.
alternate_path: Write atom energies and existing database to this path instead.
"""
if self.atom_energies is None:
raise ValueError('No atom energies available for writing')
data_path = data.quantum_corrections_path
with open(data_path) as f:
lines = f.readlines()
ae_formatted = self.format_atom_energies(key, indent=True)
# Add new atom energies to file without changing existing formatting
for i, line in enumerate(lines):
if 'atom_energies' in line:
if key in data.atom_energies:
if overwrite:
# Does not overwrite comments
del_idx_start = del_idx_end = None
for j, line2 in enumerate(lines[i:]):
if repr(key) in line2:
del_idx_start = i + j
del_idx_end = None
elif line2.rstrip() == ' },': # Can't have a comment after final brace
del_idx_end = i + j + 1
if del_idx_start is not None and del_idx_end is not None:
if (lines[del_idx_start - 1].lstrip().startswith('#')
or lines[del_idx_end + 1].lstrip().startswith('#')):
logging.warning('There may be left over comments from previous atom energies')
lines[del_idx_start:del_idx_end] = ae_formatted
break
else:
raise ValueError(f'{key} already exists. Set `overwrite` to True.')
else:
lines[(i+1):(i+1)] = ['\n'] + ae_formatted
break
with open(data_path if alternate_path is None else alternate_path, 'w') as f:
f.writelines(lines)
# Reload data to update atom energy dictionary
if alternate_path is None:
importlib.reload(data)
def format_atom_energies(self, key: Hashable, indent: bool = False) -> List[str]:
"""
Obtain a list of nicely formatted atom energies suitable for
writelines.
Args:
key: Dictionary key to use for formatting dictionary.
indent: Indent each line.
Returns:
Formatted list of atom energies.
"""
ae_formatted = json.dumps(self.atom_energies, indent=4).replace('"', "'").split('\n')
ae_formatted[0] = f'"{key}": ' + ae_formatted[0]
ae_formatted[-1] += ','
ae_formatted = [e + '\n' for e in ae_formatted]
if indent:
ae_formatted = [' ' + e for e in ae_formatted]
return ae_formatted
|
import json
import os
from tempfile import mkstemp
import pytest
from guillotina import testing
from guillotina.commands import get_settings
from guillotina.commands.run import RunCommand
DATABASE = os.environ.get('DATABASE', 'DUMMY')
def test_run_command(command_arguments):
_, filepath = mkstemp(suffix='.py')
_, filepath2 = mkstemp()
with open(filepath, 'w') as fi:
fi.write(f'''
async def run(app):
with open("{filepath2}", 'w') as fi:
fi.write("foobar")
''')
command_arguments.script = filepath
command = RunCommand(command_arguments)
settings = testing.get_settings()
command.run_command(settings=settings)
with open(filepath2) as fi:
assert fi.read() == 'foobar'
@pytest.mark.skipif(DATABASE != 'postgres', reason="Cockroach does not have cascade support")
def test_run_command_with_container(command_arguments, container_command):
_, filepath = mkstemp(suffix='.py')
_, filepath2 = mkstemp()
with open(filepath, 'w') as fi:
fi.write(f'''
async def run(container):
with open("{filepath2}", 'w') as fi:
fi.write('foobar')
''')
command_arguments.script = filepath
command = RunCommand(command_arguments)
command.run_command(settings=container_command['settings'])
with open(filepath2) as fi:
assert fi.read() == 'foobar'
def test_get_settings():
settings = get_settings('doesnotexist.json', [
'foobar=foobar',
'foo.bar=foobar'
])
assert settings['foobar'] == 'foobar'
assert settings['foo']['bar'] == 'foobar'
def test_get_settings_with_environment_variables():
os.environ.update({
'G_foobar': 'foobar',
'G_foo__bar': 'foobar',
'G_foo__bar1__bar2': json.dumps({
'foo': 'bar'
})
})
settings = get_settings('doesnotexist.json')
assert settings['foobar'] == 'foobar'
assert settings['foo']['bar'] == 'foobar'
assert settings['foo']['bar1']['bar2'] == {'foo': 'bar'}
|
import torch
import torch.nn as nn
import torch.nn.quantized as nnq
from torch.quantization import QuantStub, DeQuantStub
import torchvision
import unittest
import os
from neural_compressor.adaptor import FRAMEWORKS
from neural_compressor.model import MODELS
from neural_compressor.adaptor.pytorch import PyTorchVersionMode
import neural_compressor.adaptor.pytorch as nc_torch
from neural_compressor.experimental import Quantization, common
from neural_compressor.conf.config import Quantization_Conf
from neural_compressor.utils.pytorch import load
from neural_compressor.utils.utility import recover
import shutil
import copy
import numpy as np
import yaml
try:
try:
import intel_pytorch_extension as ipex
except:
import intel_extension_for_pytorch as ipex
TEST_IPEX = True
except:
TEST_IPEX = False
PT_VERSION = nc_torch.get_torch_version()
if PT_VERSION >= PyTorchVersionMode.PT18.value:
FX_MODE = True
else:
FX_MODE = False
fake_dyn_yaml = '''
model:
name: imagenet
framework: pytorch
quantization:
approach: post_training_dynamic_quant
op_wise: {
'decoder': {
'activation': {'dtype': ['fp32']},
'weight': {'dtype': ['fp32']}
}
}
evaluation:
accuracy:
metric:
topk: 1
performance:
warmup: 5
iteration: 10
tuning:
accuracy_criterion:
relative: 0.01
exit_policy:
timeout: 0
random_seed: 9527
workspace:
path: saved
'''
fake_ptq_yaml = '''
model:
name: imagenet
framework: pytorch
quantization:
op_wise: {
'quant': {
'activation': {'dtype': ['fp32']},
'weight': {'dtype': ['fp32']}
},
'layer1.0.conv1': {
'activation': {'dtype': ['fp32']},
'weight': {'dtype': ['fp32']}
},
'layer1.0.conv2': {
'activation': {'dtype': ['fp32']},
'weight': {'dtype': ['fp32']}
},
'layer2.0.conv1': {
'activation': {'dtype': ['uint8'], 'algorithm': ['minmax'], 'granularity': ['per_tensor'], 'scheme':['sym']},
'weight': {'dtype': ['int8'], 'algorithm': ['minmax'], 'granularity': ['per_channel'], 'scheme':['sym']}
},
'layer3.0.conv1': {
'activation': {'dtype': ['uint8'], 'algorithm': ['kl'], 'granularity': ['per_tensor'], 'scheme':['sym']},
'weight': {'dtype': ['int8'], 'algorithm': ['minmax'], 'granularity': ['per_channel'], 'scheme':['sym']}
},
'layer1.0.add_relu': {
'activation': {'dtype': ['fp32']},
'weight': {'dtype': ['fp32']}
},
}
evaluation:
accuracy:
metric:
topk: 1
performance:
warmup: 1
iteration: 10
tuning:
accuracy_criterion:
relative: 0.01
exit_policy:
timeout: 0
random_seed: 9527
workspace:
path: saved
'''
fake_ptq_yaml_for_fx = '''
model:
name: imagenet
framework: pytorch_fx
quantization:
op_wise: {
'quant': {
'activation': {'dtype': ['fp32']},
'weight': {'dtype': ['fp32']}
},
'layer1.0.conv1': {
'activation': {'dtype': ['fp32']},
'weight': {'dtype': ['fp32']}
},
'layer1.0.conv2': {
'activation': {'dtype': ['fp32']},
'weight': {'dtype': ['fp32']}
},
'layer2.0.conv1': {
'activation': {'dtype': ['uint8'], 'algorithm': ['minmax'], 'granularity': ['per_tensor'], 'scheme':['sym']},
'weight': {'dtype': ['int8'], 'algorithm': ['minmax'], 'granularity': ['per_channel'], 'scheme':['sym']}
},
'layer3.0.conv1': {
'activation': {'dtype': ['uint8'], 'algorithm': ['kl'], 'granularity': ['per_tensor'], 'scheme':['sym']},
'weight': {'dtype': ['int8'], 'algorithm': ['minmax'], 'granularity': ['per_channel'], 'scheme':['sym']}
},
'layer1.0.add_relu': {
'activation': {'dtype': ['fp32']},
'weight': {'dtype': ['fp32']}
},
'default_qconfig': {
'activation': {'dtype': ['fp32']},
'weight': {'dtype': ['fp32']}
}
}
evaluation:
accuracy:
metric:
topk: 1
performance:
warmup: 5
iteration: 10
tuning:
accuracy_criterion:
relative: 0.01
exit_policy:
timeout: 0
random_seed: 9527
workspace:
path: saved
'''
fake_qat_yaml = '''
model:
name: imagenet
framework: pytorch
quantization:
approach: quant_aware_training
train:
end_epoch: 1
iteration: 1
optimizer:
SGD:
learning_rate: 0.0001
criterion:
CrossEntropyLoss:
reduction: mean
op_wise: {
'quant': {
'activation': {'dtype': ['fp32']},
'weight': {'dtype': ['fp32']}
},
'layer1.0.conv1': {
'activation': {'dtype': ['fp32']},
'weight': {'dtype': ['fp32']}
},
'layer1.0.conv2': {
'activation': {'dtype': ['fp32']},
'weight': {'dtype': ['fp32']}
},
'layer2.0.conv1': {
'activation': {'dtype': ['uint8'], 'algorithm': ['minmax'], 'granularity': ['per_tensor'], 'scheme':['sym']},
'weight': {'dtype': ['int8'], 'algorithm': ['minmax'], 'granularity': ['per_channel'], 'scheme':['sym']}
},
'layer3.0.conv1': {
'activation': {'dtype': ['uint8'], 'algorithm': ['kl'], 'granularity': ['per_tensor'], 'scheme':['sym']},
'weight': {'dtype': ['int8'], 'algorithm': ['minmax'], 'granularity': ['per_channel'], 'scheme':['sym']}
},
'layer1.0.add_relu': {
'activation': {'dtype': ['fp32']},
'weight': {'dtype': ['fp32']}
}
}
evaluation:
accuracy:
metric:
topk: 1
tuning:
accuracy_criterion:
relative: 0.01
exit_policy:
timeout: 0
random_seed: 9527
workspace:
path: saved
'''
def build_pytorch_yaml():
with open('ptq_yaml.yaml', 'w', encoding="utf-8") as f:
f.write(fake_ptq_yaml)
with open('dynamic_yaml.yaml', 'w', encoding="utf-8") as f:
f.write(fake_dyn_yaml)
with open('qat_yaml.yaml', 'w', encoding="utf-8") as f:
f.write(fake_qat_yaml)
def build_pytorch_fx_yaml():
if PT_VERSION >= PyTorchVersionMode.PT19.value:
fake_fx_ptq_yaml = fake_ptq_yaml_for_fx
else:
fake_fx_ptq_yaml = fake_ptq_yaml.replace('pytorch', 'pytorch_fx')
with open('fx_ptq_yaml.yaml', 'w', encoding="utf-8") as f:
f.write(fake_fx_ptq_yaml)
fake_fx_dyn_yaml = fake_dyn_yaml.replace('pytorch', 'pytorch_fx')
with open('fx_dynamic_yaml.yaml', 'w', encoding="utf-8") as f:
f.write(fake_fx_dyn_yaml)
fake_fx_qat_yaml = fake_qat_yaml.replace('pytorch', 'pytorch_fx')
with open('fx_qat_yaml.yaml', 'w', encoding="utf-8") as f:
f.write(fake_fx_qat_yaml)
def build_ipex_yaml():
fake_yaml = '''
model:
name: imagenet
framework: pytorch_ipex
evaluation:
accuracy:
metric:
topk: 1
performance:
warmup: 5
iteration: 10
tuning:
accuracy_criterion:
relative: 0.01
exit_policy:
timeout: 0
random_seed: 9527
workspace:
path: saved
'''
with open('ipex_yaml.yaml', 'w', encoding="utf-8") as f:
f.write(fake_yaml)
def build_dump_tensors_yaml():
fake_yaml = '''
model:
name: imagenet
framework: pytorch
evaluation:
accuracy:
metric:
topk: 1
tuning:
accuracy_criterion:
relative: 0.01
exit_policy:
timeout: 0
random_seed: 9527
workspace:
path: saved
tensorboard: true
'''
with open('dump_yaml.yaml', 'w', encoding="utf-8") as f:
f.write(fake_yaml)
class M(torch.nn.Module):
def __init__(self):
super().__init__()
self.quant = QuantStub()
self.conv = nn.Conv2d(3, 1, 1)
self.linear = nn.Linear(224 * 224, 5)
self.dequant = DeQuantStub()
def forward(self, x):
x = self.quant(x)
x = self.conv(x)
x = x.view(1, -1)
x = self.linear(x)
x = self.dequant(x)
return x
class FP32Model(torch.nn.Module):
def __init__(self):
super().__init__()
def forward(self, x):
times = x.size(1)
if times == 1:
return x + x
return x
class DynamicModel(torch.nn.Module):
def __init__(self):
super().__init__()
self.conv = nn.Conv2d(1, 1, 1)
def forward(self, x):
if x is not None:
x = self.conv(x)
return x
class SubModel(torch.nn.Module):
def __init__(self, bypass=True):
super().__init__()
self.quant = QuantStub()
self.conv = nn.Conv2d(1, 1, 1)
self.conv1 = nn.Conv2d(1, 1, 1)
self.bn = nn.BatchNorm2d(1)
self.relu = nn.ReLU()
self.fp32 = FP32Model()
self.norm = nn.LayerNorm([1, 224, 224])
self.dequant = DeQuantStub()
self.bypass = bypass
def forward(self, x):
x = self.conv(x)
x = self.bn(x)
x = self.quant(x)
x = self.relu(x)
x = self.conv1(x)
x = self.dequant(x)
if not self.bypass:
x = self.fp32(x)
x = self.norm(x)
return x
class PartialQuantModel(torch.nn.Module):
def __init__(self):
super().__init__()
self.quant = QuantStub()
self.conv = nn.Conv2d(3, 1, 1)
self.bn = nn.BatchNorm2d(1)
self.conv1 = nn.Conv2d(1, 1, 1)
self.bn1 = nn.BatchNorm2d(1)
self.conv2 = nn.Conv2d(1, 1, 1)
self.linear = nn.Linear(224 * 224, 1)
self.dequant = DeQuantStub()
self.sub = SubModel(bypass=False)
def forward(self, x):
x = self.conv(x)
x = self.bn(x)
x = self.conv1(x)
x = self.bn1(x)
x = self.sub(x)
x = self.quant(x)
x = self.conv2(x)
x = x.view(1, -1)
x = self.linear(x)
x = self.dequant(x)
return x
class DynamicControlModel(torch.nn.Module):
def __init__(self):
super().__init__()
self.conv = nn.Conv2d(3, 1, 1)
self.bn = nn.BatchNorm2d(1)
self.linear = nn.Linear(224 * 224, 1)
self.sub = SubModel()
self.fp32 = FP32Model()
self.dyn = DynamicModel()
def forward(self, x):
x = self.conv(x)
x = self.dyn(x)
x = self.bn(x)
x = self.sub(x)
x = self.fp32(x)
x = x.view(1, -1)
x = self.linear(x)
return x
def eval_func(model):
# switch to evaluate mode
model.eval()
with torch.no_grad():
input = torch.randn(1, 3, 224, 224)
# compute output
output = model(input)
return 0.0
def q_func(model):
optimizer = torch.optim.SGD(model.parameters(), lr=0.0001)
# switch to evaluate mode
model.train()
input = torch.randn(1, 3, 224, 224)
# compute output
output = model(input)
loss = output.mean()
optimizer.zero_grad()
loss.backward()
optimizer.step()
return model
class TestPytorchAdaptor(unittest.TestCase):
framework_specific_info = {"device": "cpu",
"approach": "post_training_static_quant",
"random_seed": 1234,
"q_dataloader": None,
"workspace_path": "./"}
framework = "pytorch"
adaptor = FRAMEWORKS[framework](framework_specific_info)
model = torchvision.models.quantization.resnet18()
nc_model = MODELS['pytorch'](model)
@classmethod
def setUpClass(self):
build_pytorch_yaml()
build_dump_tensors_yaml()
@classmethod
def tearDownClass(self):
os.remove('ptq_yaml.yaml')
os.remove('dynamic_yaml.yaml')
os.remove('qat_yaml.yaml')
os.remove('dump_yaml.yaml')
shutil.rmtree('./saved', ignore_errors=True)
shutil.rmtree('runs', ignore_errors=True)
def test_get_all_weight_name(self):
assert len(list(self.nc_model.get_all_weight_names())) == 62
def test_get_weight(self):
for name, param in self.model.named_parameters():
if name == "layer4.1.conv2.weight":
param.data.fill_(0.0)
if name == "fc.bias":
param.data.fill_(0.1)
assert int(torch.sum(self.nc_model.get_weight("layer4.1.conv2.weight"))) == 0
assert torch.allclose(
torch.sum(
self.nc_model.get_weight("fc.bias")),
torch.tensor(100.))
def test_get_input(self):
model = MODELS['pytorch'](torchvision.models.quantization.resnet18())
model.model.eval().fuse_model()
model.register_forward_pre_hook()
rand_input = torch.rand(100, 3, 224, 224).float()
model.model(rand_input)
assert torch.equal(model.get_inputs('x'), rand_input)
model.remove_hooks()
def test_update_weights(self):
self.nc_model.update_weights('fc.bias', torch.zeros([1000]))
assert int(torch.sum(self.nc_model.get_weight("fc.bias"))) == 0
def test_get_gradient(self):
with self.assertRaises(AssertionError):
self.nc_model.get_gradient('fc.bias')
for name, tensor in self.nc_model._model.named_parameters():
if name == 'fc.bias':
tensor.grad = torch.zeros_like(tensor)
break
assert torch.equal(torch.Tensor(self.nc_model.get_gradient('fc.bias')), torch.zeros_like(tensor))
rand_input = torch.rand(100, 3, 224, 224).float()
rand_input.grad = torch.ones_like(rand_input)
assert torch.equal(torch.Tensor(self.nc_model.get_gradient(rand_input)),
torch.ones_like(rand_input))
def test_report_sparsity(self):
df, total_sparsity = self.nc_model.report_sparsity()
self.assertTrue(total_sparsity > 0)
self.assertTrue(len(df) == 22)
def test_quantization_saved(self):
for fake_yaml in ['dynamic_yaml.yaml', 'qat_yaml.yaml', 'ptq_yaml.yaml']:
model = M()
quantizer = Quantization(fake_yaml)
quantizer.conf.usr_cfg.tuning.exit_policy['performance_only'] = True
dataset = quantizer.dataset('dummy', (100, 3, 224, 224), label=True)
quantizer.model = model
quantizer.calib_dataloader = common.DataLoader(dataset)
quantizer.eval_dataloader = common.DataLoader(dataset)
q_model = quantizer.fit()
eval_func(q_model)
q_model.save('./saved')
# Load configure and weights by neural_compressor.utils
saved_model = load("./saved", model)
eval_func(saved_model)
# recover int8 model from history
history_file = './saved/history.snapshot'
model_recover = recover(model, history_file, 0)
eval_func(model_recover)
self.assertEqual(type(saved_model.conv), \
type(model_recover.conv))
shutil.rmtree('./saved', ignore_errors=True)
from neural_compressor.experimental import Benchmark
evaluator = Benchmark('ptq_yaml.yaml')
# Load configure and weights by neural_compressor.model
evaluator.model = model
evaluator.b_dataloader = common.DataLoader(dataset)
evaluator()
evaluator.model = model
evaluator()
for fake_yaml in ['qat_yaml.yaml', 'ptq_yaml.yaml']:
model = copy.deepcopy(self.model)
if fake_yaml == 'ptq_yaml.yaml':
model.eval().fuse_model()
conf = Quantization_Conf(fake_yaml)
quantizer = Quantization(conf)
dataset = quantizer.dataset('dummy', (100, 3, 224, 224))
quantizer.model = model
if fake_yaml == 'qat_yaml.yaml':
quantizer.q_func = q_func
else:
quantizer.calib_dataloader = common.DataLoader(dataset)
quantizer.eval_func = eval_func
q_model = quantizer.fit()
q_model.save('./saved')
# Load configure and weights by neural_compressor.utils
saved_model = load("./saved", model)
eval_func(saved_model)
shutil.rmtree('./saved', ignore_errors=True)
def test_quantization_new_saved(self):
for fake_yaml in ['dynamic_yaml.yaml', 'qat_yaml.yaml', 'ptq_yaml.yaml']:
model = M()
quantizer = Quantization(fake_yaml)
quantizer.conf.usr_cfg.tuning.exit_policy['performance_only'] = True
dataset = quantizer.dataset('dummy', (100, 3, 224, 224), label=True)
quantizer.model = model
quantizer.calib_dataloader = common.DataLoader(dataset)
quantizer.eval_dataloader = common.DataLoader(dataset)
q_model = quantizer.fit()
eval_func(q_model)
torch.save(q_model.quantized_state_dict(), './saved/model.pt')
# Load configure and weights by neural_compressor.utils
from neural_compressor.experimental.common import Model
common_model = Model(model)
common_model.load_quantized_state_dict(torch.load('./saved/model.pt'))
eval_func(common_model)
self.assertEqual(type(q_model._model.linear), \
type(common_model._model.linear))
shutil.rmtree('./saved', ignore_errors=True)
def test_non_quant_module(self):
for fake_yaml in ['qat_yaml.yaml', 'ptq_yaml.yaml']:
model = PartialQuantModel()
conf = Quantization_Conf(fake_yaml)
quantizer = Quantization(conf)
dataset = quantizer.dataset('dummy', (1, 3, 224, 224))
non_quant_dict = {'non_quant_module_name': ['conv', 'conv1', 'sub.conv'], \
'non_quant_module_class': ['BatchNorm2d', 'FP32Model']}
quantizer.model = common.Model(model, **non_quant_dict)
if fake_yaml == 'qat_yaml.yaml':
quantizer.q_func = q_func
else:
quantizer.calib_dataloader = common.DataLoader(dataset)
quantizer.eval_func = eval_func
q_model = quantizer.fit()
q_model.save('./saved')
saved_model = load("./saved", model, **non_quant_dict)
eval_func(saved_model)
shutil.rmtree('./saved', ignore_errors=True)
def test_workspace_path(self):
model = M()
quantizer = Quantization('ptq_yaml.yaml')
quantizer.conf.usr_cfg.tuning.exit_policy['performance_only'] = True
dataset = quantizer.dataset('dummy', (100, 3, 224, 224), label=True)
quantizer.model = model
quantizer.calib_dataloader = common.DataLoader(dataset)
quantizer.eval_dataloader = common.DataLoader(dataset)
q_model = quantizer.fit()
eval_func(q_model)
torch.save(q_model.quantized_state_dict(), './saved/best_model.pt')
# Load configure and weights by workspace_path
from neural_compressor.experimental.common import Model
common_model = Model(model)
common_model.workspace_path = './saved'
eval_func(common_model)
self.assertEqual(type(q_model._model.linear), \
type(common_model._model.linear))
shutil.rmtree('./saved', ignore_errors=True)
def test_get_graph_info(self):
from neural_compressor.model.torch_model import PyTorchModel
model = PyTorchModel(self.model)
op_map = model.graph_info
self.assertTrue(op_map['conv1'] == 'Conv2d')
def test_tensorboard(self):
model = copy.deepcopy(self.nc_model)
model.model.eval().fuse_model()
quantizer = Quantization('dump_yaml.yaml')
dataset = quantizer.dataset('dummy', (100, 3, 224, 224), label=True)
quantizer.model = model.model
quantizer.calib_dataloader = common.DataLoader(dataset)
quantizer.eval_func = eval_func
quantizer.fit()
self.assertTrue(True if os.path.exists('runs/eval/baseline_acc0.0') else False)
quantizer.eval_dataloader = common.DataLoader(dataset)
quantizer.eval_func = None
quantizer.fit()
self.assertTrue(True if os.path.exists('runs/eval/baseline_acc0.0') else False)
def test_tensor_dump_and_set(self):
model = copy.deepcopy(self.nc_model)
model.model.eval().fuse_model()
quantizer = Quantization('ptq_yaml.yaml')
dataset = quantizer.dataset('dummy', (100, 3, 224, 224), label=True)
dataloader = common.DataLoader(dataset)
dataloader = common._generate_common_dataloader(dataloader, 'pytorch')
quantizer.eval_dataloader = dataloader
quantizer.calib_dataloader = dataloader
quantizer.model = model.model
q_model = quantizer.fit()
quantizer.strategy.adaptor.inspect_tensor(
model, dataloader, op_list=['conv1.0', 'layer1.0.conv1.0'],
iteration_list=[1, 2], inspect_type='all', save_to_disk=True)
load_array = lambda *a, **k: np.load(*a, allow_pickle=True, **k)
a = load_array('saved/dump_tensor/activation_iter1.npz')
w = load_array('saved/dump_tensor/weight.npz')
if PT_VERSION >= PyTorchVersionMode.PT18.value:
self.assertTrue(w['conv1.0'].item()['conv1.0.weight'].shape[0] ==
a['conv1.0'].item()['conv1.0.output0'].shape[1])
else:
self.assertTrue(w['conv1.0'].item()['conv1.0.weight'].shape[0] ==
a['conv1.0'].item()['conv1.1.output0'].shape[1])
data = np.random.random(w['conv1.0'].item()['conv1.0.weight'].shape).astype(np.float32)
quantizer.strategy.adaptor.set_tensor(q_model, {'conv1.0.weight': data})
changed_tensor = q_model.get_weight('conv1.weight')
scales = changed_tensor.q_per_channel_scales()
changed_tensor_fp32 = torch.dequantize(changed_tensor)
self.assertTrue(np.allclose(data, changed_tensor_fp32.numpy(), atol=2 / np.min(scales.numpy())))
quantizer.strategy.adaptor.inspect_tensor(
q_model, dataloader, op_list=['conv1.0', 'layer1.0.conv1.0'],
iteration_list=[1, 2], inspect_type='all', save_to_disk=False)
def test_get_graph_info(self):
from neural_compressor.adaptor.pytorch import get_ops_recursively
model = copy.deepcopy(self.model)
op_map = {}
get_ops_recursively(model, '', op_map)
self.assertTrue(op_map['conv1'] == 'Conv2d')
def test_forward_wrapper(self):
vision_model = torchvision.models.resnet18()
class dummymodel(torch.nn.Module):
def __init__(self, model):
super(dummymodel, self).__init__()
self._model = model
def forward(self,input=None):
return self._model(input)
data = [[{'input': torch.rand(3,224,224)}, torch.ones(1,1)], ]
# dataloader.batch_size=100
dataloader = common.DataLoader(data, batch_size=1)
quantizer = Quantization('dynamic_yaml.yaml')
model = dummymodel(vision_model)
quantizer.model = model
quantizer.calib_dataloader = dataloader
quantizer.eval_dataloader = dataloader
quantizer.fit()
def test_floatfunctions_fallback(self):
class ModelWithFunctionals(torch.nn.Module):
def __init__(self):
super(ModelWithFunctionals, self).__init__()
self.mycat = nnq.FloatFunctional()
self.myadd = nnq.FloatFunctional()
self.myadd_relu = nnq.FloatFunctional()
# Tracing doesnt work yet for c10 ops with scalar inputs
# https://github.com/pytorch/pytorch/issues/27097
self.my_scalar_add = nnq.FloatFunctional()
self.mymul = nnq.FloatFunctional()
self.my_scalar_mul = nnq.FloatFunctional()
self.quant = QuantStub()
self.dequant = DeQuantStub()
def forward(self, x):
x = self.quant(x)
y = self.mycat.cat([x, x, x])
z = self.myadd.add(y, y)
w = self.myadd_relu.add_relu(z, z)
# Tracing doesnt work yet for c10 ops with scalar inputs
# https://github.com/pytorch/pytorch/issues/27097
w = self.my_scalar_add.add_scalar(w, -0.5)
w = self.mymul.mul(w, w)
w = self.my_scalar_mul.mul_scalar(w, 0.5)
w = self.dequant(w)
return w
model = ModelWithFunctionals()
model = MODELS['pytorch'](model)
x = torch.rand(10, 1, dtype=torch.float)
y = model.model(x)
fallback_ops = []
q_capability = self.adaptor.query_fw_capability(model)
for k, v in q_capability["opwise"].items():
if k[0] != "quant" and k[0] != "dequant":
fallback_ops.append(k[0])
model.model.qconfig = torch.quantization.default_qconfig
model.model.quant.qconfig = torch.quantization.default_qconfig
if PT_VERSION >= PyTorchVersionMode.PT18.value:
model.model.dequant.qconfig = torch.quantization.default_qconfig
nc_torch._fallback_quantizable_ops_recursively(
model.model, '', fallback_ops, op_qcfgs={})
torch.quantization.add_observer_(model.model)
model.model(x)
torch.quantization.convert(model.model, self.adaptor.q_mapping, inplace=True)
qy = model.model(x)
tol = {'atol': 1e-01, 'rtol': 1e-03}
self.assertTrue(np.allclose(y, qy, **tol))
@unittest.skipIf(not TEST_IPEX, "Unsupport Intel PyTorch Extension")
class TestPytorchIPEXAdaptor(unittest.TestCase):
@classmethod
def setUpClass(self):
build_ipex_yaml()
@classmethod
def tearDownClass(self):
os.remove('ipex_yaml.yaml')
shutil.rmtree('./saved', ignore_errors=True)
shutil.rmtree('runs', ignore_errors=True)
def test_tuning_ipex(self):
from neural_compressor.experimental import Quantization
model = M()
quantizer = Quantization('ipex_yaml.yaml')
quantizer.conf.usr_cfg.tuning.exit_policy['performance_only'] = True
dataset = quantizer.dataset('dummy', (100, 3, 224, 224), label=True)
quantizer.model = model
quantizer.calib_dataloader = common.DataLoader(dataset)
quantizer.eval_dataloader = common.DataLoader(dataset)
nc_model = quantizer.fit()
nc_model.save('./saved')
try:
script_model = torch.jit.script(model.to(ipex.DEVICE))
except:
script_model = torch.jit.trace(model.to(ipex.DEVICE), torch.randn(10, 3, 224, 224).to(ipex.DEVICE))
from neural_compressor.experimental import Benchmark
evaluator = Benchmark('ipex_yaml.yaml')
evaluator.model = script_model
evaluator.b_dataloader = common.DataLoader(dataset)
results = evaluator()
@unittest.skipIf(not FX_MODE, "Unsupport Fx Mode with PyTorch Version Below 1.8")
class TestPytorchFXAdaptor(unittest.TestCase):
@classmethod
def setUpClass(self):
build_pytorch_fx_yaml()
@classmethod
def tearDownClass(self):
os.remove('fx_ptq_yaml.yaml')
os.remove('fx_dynamic_yaml.yaml')
shutil.rmtree('./saved', ignore_errors=True)
shutil.rmtree('runs', ignore_errors=True)
def test_fx_quant(self):
for fake_yaml in ['fx_qat_yaml.yaml', 'fx_ptq_yaml.yaml']:
model_origin = torchvision.models.resnet18()
# run fx_quant in neural_compressor and save the quantized GraphModule
quantizer = Quantization(fake_yaml)
dataset = quantizer.dataset('dummy', (10, 3, 224, 224), label=True)
quantizer.eval_func = eval_func
if fake_yaml == 'fx_qat_yaml.yaml':
quantizer.q_func = q_func
else:
quantizer.calib_dataloader = common.DataLoader(dataset)
quantizer.model = common.Model(model_origin,
**{'prepare_custom_config_dict': \
{'non_traceable_module_name': ['a']},
'convert_custom_config_dict': \
{'preserved_attributes': []}
})
q_model = quantizer.fit()
q_model.save('./saved')
# Load configure and weights with neural_compressor.utils
model_fx = load('./saved', model_origin,
**{'prepare_custom_config_dict': \
{'non_traceable_module_name': ['a']},
'convert_custom_config_dict': \
{'preserved_attributes': []}
})
self.assertTrue(isinstance(model_fx, torch.fx.graph_module.GraphModule))
# recover int8 model with only tune_cfg
history_file = './saved/history.snapshot'
model_fx_recover = recover(model_origin, history_file, 0,
**{'prepare_custom_config_dict': \
{'non_traceable_module_name': ['a']},
'convert_custom_config_dict': \
{'preserved_attributes': []}
})
self.assertEqual(model_fx.code, model_fx_recover.code)
shutil.rmtree('./saved', ignore_errors=True)
for fake_yaml in ['fx_qat_yaml.yaml', 'fx_ptq_yaml.yaml']:
model_origin = M()
# run fx_quant in neural_compressor and save the quantized GraphModule
quantizer = Quantization(fake_yaml)
quantizer.conf.usr_cfg.tuning.exit_policy['performance_only'] = True
dataset = quantizer.dataset('dummy', (10, 3, 224, 224), label=True)
quantizer.calib_dataloader = common.DataLoader(dataset)
quantizer.eval_dataloader = common.DataLoader(dataset)
quantizer.model = common.Model(model_origin,
**{'prepare_custom_config_dict': \
{'non_traceable_module_name': ['a']},
'convert_custom_config_dict': \
{'preserved_attributes': []}
})
q_model = quantizer.fit()
q_model.save('./saved')
# Load configure and weights with neural_compressor.utils
model_fx = load('./saved', model_origin,
**{'prepare_custom_config_dict': \
{'non_traceable_module_name': ['a']},
'convert_custom_config_dict': \
{'preserved_attributes': []}
})
self.assertTrue(isinstance(model_fx, torch.fx.graph_module.GraphModule))
shutil.rmtree('./saved', ignore_errors=True)
@unittest.skipIf(PT_VERSION < PyTorchVersionMode.PT19.value,
"Please use PyTroch 1.9 or higher version for dynamic quantization with pytorch_fx backend")
def test_fx_dynamic_quant(self):
# Model Definition
class LSTMModel(nn.Module):
'''Container module with an encoder, a recurrent module, and a decoder.'''
def __init__(self, ntoken, ninp, nhid, nlayers, dropout=0.5):
super(LSTMModel, self).__init__()
self.drop = nn.Dropout(dropout)
self.encoder = nn.Embedding(ntoken, ninp)
self.rnn = nn.LSTM(ninp, nhid, nlayers, dropout=dropout)
self.decoder = nn.Linear(nhid, ntoken)
self.init_weights()
self.nhid = nhid
self.nlayers = nlayers
def init_weights(self):
initrange = 0.1
self.encoder.weight.data.uniform_(-initrange, initrange)
self.decoder.bias.data.zero_()
self.decoder.weight.data.uniform_(-initrange, initrange)
def forward(self, input, hidden):
emb = self.drop(self.encoder(input))
output, hidden = self.rnn(emb, hidden)
output = self.drop(output)
decoded = self.decoder(output)
return decoded, hidden
model = LSTMModel(
ntoken = 10,
ninp = 512,
nhid = 256,
nlayers = 5,
)
# run fx_quant in neural_compressor and save the quantized GraphModule
model.eval()
quantizer = Quantization('fx_dynamic_yaml.yaml')
quantizer.model = common.Model(model,
**{'prepare_custom_config_dict': \
{'non_traceable_module_name': ['a']},
'convert_custom_config_dict': \
{'preserved_attributes': []}
})
q_model = quantizer.fit()
q_model.save('./saved')
# Load configure and weights by neural_compressor.utils
model_fx = load("./saved", model,
**{'prepare_custom_config_dict': \
{'non_traceable_module_name': ['a']},
'convert_custom_config_dict': \
{'preserved_attributes': []}
})
self.assertTrue(isinstance(model_fx, torch.fx.graph_module.GraphModule))
# recover int8 model with only tune_cfg
history_file = './saved/history.snapshot'
model_fx_recover = recover(model, history_file, 0,
**{'prepare_custom_config_dict': \
{'non_traceable_module_name': ['a']},
'convert_custom_config_dict': \
{'preserved_attributes': []}
})
self.assertEqual(model_fx.code, model_fx_recover.code)
shutil.rmtree('./saved', ignore_errors=True)
def test_fx_sub_module_quant(self):
for fake_yaml in ['fx_qat_yaml.yaml', 'fx_ptq_yaml.yaml']:
model_origin = DynamicControlModel()
# run fx_quant in neural_compressor and save the quantized GraphModule
quantizer = Quantization(fake_yaml)
dataset = quantizer.dataset('dummy', (1, 3, 224, 224), label=True)
quantizer.eval_func = eval_func
if fake_yaml == 'fx_qat_yaml.yaml':
quantizer.q_func = q_func
else:
quantizer.calib_dataloader = common.DataLoader(dataset)
quantizer.model = common.Model(model_origin,
**{'prepare_custom_config_dict': \
{'non_traceable_module_name': ['a']},
'convert_custom_config_dict': \
{'preserved_attributes': []}
})
q_model = quantizer.fit()
q_model.save('./saved')
# Load configure and weights with neural_compressor.utils
model_fx = load('./saved/best_model.pt', model_origin,
**{'prepare_custom_config_dict': \
{'non_traceable_module_name': ['a']},
'convert_custom_config_dict': \
{'preserved_attributes': []}
})
self.assertTrue(isinstance(model_fx.sub, torch.fx.graph_module.GraphModule))
# recover int8 model with only tune_cfg
history_file = './saved/history.snapshot'
model_fx_recover = recover(model_origin, history_file, 0,
**{'prepare_custom_config_dict': \
{'non_traceable_module_name': ['a']},
'convert_custom_config_dict': \
{'preserved_attributes': []}
})
self.assertEqual(model_fx.sub.code, model_fx_recover.sub.code)
shutil.rmtree('./saved', ignore_errors=True)
if __name__ == "__main__":
unittest.main()
|
# -*- coding: utf-8 -*-
# Generated by Django 1.11.4 on 2017-08-23 16:50
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='Lekarstv',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=150, verbose_name='ะะฐะธะผะตะฝะพะฒะฐะฝะธะต')),
('price', models.IntegerField(verbose_name='ัะตะฝะฐ')),
('address', models.TextField(verbose_name='ะะดัะตั ะฐะฟัะตะบะธ')),
('photo', models.ImageField(blank=True, default='', upload_to='Lekarstv/images', verbose_name='ะธะทะพะฑัะฐะถะตะฝะธะต')),
],
options={
'verbose_name': 'ะะตะบะฐัััะฒะพ',
'verbose_name_plural': 'ะะตะบะฐัััะฒะฐ',
},
),
]
|
from resources import relu, learnFunc, dot
class HiddenBlock:
def __init__(self, weights, bias):
self.weights = weights
self.bias = bias
def feedForward(self, hidden_inputs):
output = [
relu(
dot(hidden_inputs, weights) + self.bias
)
for weights in self.weights]
return output
def train(self, hidden_inputs, hidden_errors):
error = sum(hidden_errors) / len(hidden_errors)
predictions = self.feedForward(hidden_inputs)
prevErrors = []
for y in range(len(self.weights)):
for x in range(len(self.weights[0])):
prevError = error*relu(predictions[y], deriv=True)*self.weights[y][x]
prevErrors.append(prevError)
for y in range(len(self.weights)):
for x in range(len(self.weights[0])):
update = error*relu(predictions[y], deriv=True)*hidden_inputs[x]
learn_rate = learnFunc(update)
self.weights[y][x] -= learn_rate*update
biasUpdate = 0
for x in range(len(self.weights)):
biasUpdate += error*relu(predictions[x], deriv=True)/len(predictions)
learn_rate = learnFunc(biasUpdate)
self.bias -= learn_rate*biasUpdate
return prevErrors
|
# pep8: disable=E501
from __future__ import print_function
import collections
import os
import pandas
import shutil
import unittest
import pandas as pd
import sklearn.datasets as datasets
import tensorflow as tf
from mlflow import tensorflow, pyfunc
from mlflow import tracking
from mlflow.utils.file_utils import TempDir
class TestModelExport(unittest.TestCase):
def helper(self, feature_spec, tmp, estimator, df):
"""
This functions handles exporting, logging, loading back, and predicting on an estimator for
testing purposes.
"""
receiver_fn = tf.estimator.export.build_raw_serving_input_receiver_fn(feature_spec)
saved_estimator_path = tmp.path("model")
os.makedirs(saved_estimator_path)
# Saving TensorFlow model.
saved_estimator_path = estimator.export_savedmodel(saved_estimator_path,
receiver_fn).decode("utf-8")
# Logging the TensorFlow model just saved.
tensorflow.log_saved_model(saved_model_dir=saved_estimator_path,
signature_def_key="predict",
artifact_path=tmp.path("hello"))
# Loading the saved TensorFlow model as a pyfunc.
x = pyfunc.load_pyfunc(saved_estimator_path)
# Predicting on the dataset using the pyfunc.
return x.predict(df)
def test_log_saved_model(self):
# This tests model logging capabilities on the sklearn.iris dataset.
iris = datasets.load_iris()
X = iris.data[:, :2] # we only take the first two features.
y = iris.target
trainingFeatures = {}
for i in range(0, 2):
# TensorFlow is fickle about feature names, so we remove offending characters
iris.feature_names[i] = iris.feature_names[i].replace(" ", "")
iris.feature_names[i] = iris.feature_names[i].replace("(", "")
iris.feature_names[i] = iris.feature_names[i].replace(")", "")
trainingFeatures[iris.feature_names[i]] = iris.data[:, i:i+1]
tf_feat_cols = []
feature_names = iris.feature_names[:2]
# Creating TensorFlow-specific numeric columns for input.
for col in iris.feature_names[:2]:
tf_feat_cols.append(tf.feature_column.numeric_column(col))
# Creating input training function.
input_train = tf.estimator.inputs.numpy_input_fn(trainingFeatures,
y,
shuffle=False,
batch_size=1)
# Creating Deep Neural Network Regressor.
estimator = tf.estimator.DNNRegressor(feature_columns=tf_feat_cols,
hidden_units=[1])
# Training and creating expected predictions on training dataset.
estimator.train(input_train, steps=10)
# Saving the estimator's prediction on the training data; assume the DNNRegressor
# produces a single output column named 'predictions'
pred_col = "predictions"
estimator_preds = [s[pred_col] for s in estimator.predict(input_train)]
estimator_preds_df = pd.DataFrame({pred_col: estimator_preds})
old_tracking_uri = tracking.get_tracking_uri()
# should_start_run tests whether or not calling log_model() automatically starts a run.
for should_start_run in [False, True]:
with TempDir(chdr=True, remove_on_exit=True) as tmp:
try:
# Creating dict of features names (str) to placeholders (tensors)
feature_spec = {}
for name in feature_names:
feature_spec[name] = tf.placeholder("float", name=name, shape=[150])
tracking.set_tracking_uri("test")
if should_start_run:
tracking.start_run()
pyfunc_preds_df = self.helper(feature_spec, tmp, estimator,
pandas.DataFrame(data=X, columns=feature_names))
# Asserting that the loaded model predictions are as expected.
assert estimator_preds_df.equals(pyfunc_preds_df)
finally:
# Restoring the old logging location.
tracking.end_run()
tracking.set_tracking_uri(old_tracking_uri)
def test_categorical_columns(self):
"""
This tests logging capabilities on datasets with categorical columns.
See https://github.com/tensorflow/tensorflow/blob/master/tensorflow/examples/get_started/\
regression/imports85.py
for reference code.
"""
with TempDir(chdr=False, remove_on_exit=True) as tmp:
path = os.path.abspath("tests/data/uci-autos-imports-85.data")
# Order is important for the csv-readers, so we use an OrderedDict here.
defaults = collections.OrderedDict([
("body-style", [""]),
("curb-weight", [0.0]),
("highway-mpg", [0.0]),
("price", [0.0])
])
types = collections.OrderedDict((key, type(value[0]))
for key, value in defaults.items())
df = pandas.read_csv(path, names=types.keys(), dtype=types, na_values="?")
df = df.dropna()
# Extract the label from the features dataframe.
y_train = df.pop("price")
# Creating the input training function required.
trainingFeatures = {}
for i in df:
trainingFeatures[i] = df[i].values
input_train = tf.estimator.inputs.numpy_input_fn(trainingFeatures,
y_train.values,
shuffle=False,
batch_size=1)
# Creating the feature columns required for the DNNRegressor.
body_style_vocab = ["hardtop", "wagon", "sedan", "hatchback", "convertible"]
body_style = tf.feature_column.categorical_column_with_vocabulary_list(
key="body-style", vocabulary_list=body_style_vocab)
feature_columns = [
tf.feature_column.numeric_column(key="curb-weight"),
tf.feature_column.numeric_column(key="highway-mpg"),
# Since this is a DNN model, convert categorical columns from sparse
# to dense.
# Wrap them in an `indicator_column` to create a
# one-hot vector from the input.
tf.feature_column.indicator_column(body_style)
]
# Build a DNNRegressor, with 2x20-unit hidden layers, with the feature columns
# defined above as input.
estimator = tf.estimator.DNNRegressor(
hidden_units=[20, 20], feature_columns=feature_columns)
# Training the estimator.
estimator.train(input_fn=input_train, steps=10)
# Saving the estimator's prediction on the training data; assume the DNNRegressor
# produces a single output column named 'predictions'
pred_col = "predictions"
estimator_preds = [s[pred_col] for s in estimator.predict(input_train)]
estimator_preds_df = pd.DataFrame({pred_col: estimator_preds})
# Setting the logging such that it is in the temp folder and deleted after the test.
old_tracking_dir = tracking.get_tracking_uri()
tracking_dir = os.path.abspath(tmp.path("mlruns"))
tracking.set_tracking_uri("file://%s" % tracking_dir)
tracking.start_run()
try:
# Creating dict of features names (str) to placeholders (tensors)
feature_spec = {}
feature_spec["body-style"] = tf.placeholder("string",
name="body-style",
shape=[None])
feature_spec["curb-weight"] = tf.placeholder("float",
name="curb-weight",
shape=[None])
feature_spec["highway-mpg"] = tf.placeholder("float",
name="highway-mpg",
shape=[None])
pyfunc_preds_df = self.helper(feature_spec, tmp, estimator, df)
# Asserting that the loaded model predictions are as expected. Allow for some
# imprecision as this is expected with TensorFlow.
pandas.testing.assert_frame_equal(
pyfunc_preds_df, estimator_preds_df, check_less_precise=6)
finally:
# Restoring the old logging location.
tracking.end_run()
tracking.set_tracking_uri(old_tracking_dir)
|
#!/usr/bin/env python3
# Copyright (c) 2018 The Refnet Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""Test the behavior of RPC importprivkey on set and unset labels of
addresses.
It tests different cases in which an address is imported with importaddress
with or without a label and then its private key is imported with importprivkey
with and without a label.
"""
from test_framework.test_framework import RefnetTestFramework
from test_framework.wallet_util import test_address
class ImportWithLabel(RefnetTestFramework):
def set_test_params(self):
self.num_nodes = 2
self.setup_clean_chain = True
def skip_test_if_missing_module(self):
self.skip_if_no_wallet()
def run_test(self):
"""Main test logic"""
self.log.info(
"Test importaddress with label and importprivkey without label."
)
self.log.info("Import a watch-only address with a label.")
address = self.nodes[0].getnewaddress()
label = "Test Label"
self.nodes[1].importaddress(address, label)
test_address(self.nodes[1],
address,
iswatchonly=True,
ismine=False,
label=label)
self.log.info(
"Import the watch-only address's private key without a "
"label and the address should keep its label."
)
priv_key = self.nodes[0].dumpprivkey(address)
self.nodes[1].importprivkey(priv_key)
test_address(self.nodes[1],
address,
label=label)
self.log.info(
"Test importaddress without label and importprivkey with label."
)
self.log.info("Import a watch-only address without a label.")
address2 = self.nodes[0].getnewaddress()
self.nodes[1].importaddress(address2)
test_address(self.nodes[1],
address2,
iswatchonly=True,
ismine=False,
label="")
self.log.info(
"Import the watch-only address's private key with a "
"label and the address should have its label updated."
)
priv_key2 = self.nodes[0].dumpprivkey(address2)
label2 = "Test Label 2"
self.nodes[1].importprivkey(priv_key2, label2)
test_address(self.nodes[1],
address2,
label=label2)
self.log.info("Test importaddress with label and importprivkey with label.")
self.log.info("Import a watch-only address with a label.")
address3 = self.nodes[0].getnewaddress()
label3_addr = "Test Label 3 for importaddress"
self.nodes[1].importaddress(address3, label3_addr)
test_address(self.nodes[1],
address3,
iswatchonly=True,
ismine=False,
label=label3_addr)
self.log.info(
"Import the watch-only address's private key with a "
"label and the address should have its label updated."
)
priv_key3 = self.nodes[0].dumpprivkey(address3)
label3_priv = "Test Label 3 for importprivkey"
self.nodes[1].importprivkey(priv_key3, label3_priv)
test_address(self.nodes[1],
address3,
label=label3_priv)
self.log.info(
"Test importprivkey won't label new dests with the same "
"label as others labeled dests for the same key."
)
self.log.info("Import a watch-only legacy address with a label.")
address4 = self.nodes[0].getnewaddress()
label4_addr = "Test Label 4 for importaddress"
self.nodes[1].importaddress(address4, label4_addr)
test_address(self.nodes[1],
address4,
iswatchonly=True,
ismine=False,
label=label4_addr,
embedded=None)
self.log.info(
"Import the watch-only address's private key without a "
"label and new destinations for the key should have an "
"empty label while the 'old' destination should keep "
"its label."
)
priv_key4 = self.nodes[0].dumpprivkey(address4)
self.nodes[1].importprivkey(priv_key4)
embedded_addr = self.nodes[1].getaddressinfo(address4)['embedded']['address']
test_address(self.nodes[1],
embedded_addr,
label="")
test_address(self.nodes[1],
address4,
label=label4_addr)
self.stop_nodes()
if __name__ == "__main__":
ImportWithLabel().main()
|
from matplotlib import cm, rcParams
import matplotlib.pyplot as plt
import numpy as np
import math as math
import random as rand
""" G(phi) function in Rinzel & Lewis' article (2003) under weak coupling """
""" This is under weak coupling theory, although one can note that gamma only serves to scale the function """
c = ['#aa3863', '#d97020', '#ef9f07', '#449775', '#3b7d86']
rcParams.update({'figure.autolayout': True})
def T(I):
return math.log(I/(I-1))
def G(phi, I, gamma):
if phi != 0 and phi != 1:
return gamma*(2/T(I))*(phi*math.sinh((1-phi)*T(I)) - (1-phi)*math.sinh(phi*T(I))) + gamma*(beta/(I*T(I)*T(I)))*(math.exp(phi*T(I)) - math.exp((1-phi)*T(I)))
else :
return 0
""" Varying Gamma """
gamma = [0.4, 0.3, 0.2, 0.1, 0.01]
beta = 0.1
I = 1.8
plt.figure(figsize=(8,5))
vector_phi = np.linspace(0,1,1000)
zero_line = np.zeros(len(vector_phi))
plt.plot(vector_phi, zero_line, color='black', linestyle='--')
k = 0
for g in gamma :
vector_G = []
for el in vector_phi:
vector_G.append(G(el, I, g))
vector_G = np.array(vector_G)
plt.plot(vector_phi, vector_G, label=f'$\gamma = {g}$', color = c[k])
k += 1
plt.xlabel('$\phi$', size=14)
plt.ylabel('$G(\phi)$', size=14)
plt.title(f'G function for $I={I}, \\beta={beta}$')
zero_crossings = np.where(np.diff(np.sign(vector_G-zero_line)))[0]
print(zero_crossings)
plt.legend(loc='upper left')
plt.savefig(f'G_function_range_gammas_I={I}.png', dpi=600)
plt.show()
plt.close()
""" Varying I """
"""
gamma = 1
beta = 0.2
I = [1.15, 1.2, 1.4]
plt.figure(figsize=(8,5))
vector_phi = np.linspace(0,1,1000)
zero_line = np.zeros(len(vector_phi))
plt.plot(vector_phi, zero_line, linestyle='--', color='k')
k = 0
for current in I :
vector_G = []
for el in vector_phi:
vector_G.append(G(el, current, gamma))
vector_G = np.array(vector_G)
plt.plot(vector_phi, vector_G, label=f'$I = {current}$', color = c[k])
k += 1
plt.xlabel('$\phi$', size=14)
plt.ylabel('$G(\phi)$', size=14)
zero_crossings = np.where(np.diff(np.sign(vector_G-zero_line)))[0]
print(zero_crossings)
plt.legend()
plt.show()
"""
|
# -*- coding: utf-8 -*-
from __future__ import division, print_function
__all__ = ["StarryBaseOp"]
import pkg_resources
from theano import gof
from ..build_utils import get_compile_args, get_cache_version
class StarryBaseOp(gof.COp):
__props__ = ()
func_file = None
func_name = None
def __init__(self):
super(StarryBaseOp, self).__init__(self.func_file, self.func_name)
def c_code_cache_version(self):
return get_cache_version()
def c_headers(self, compiler):
return ["theano_helpers.h"]
def c_header_dirs(self, compiler):
return [
pkg_resources.resource_filename(__name__, "include"),
pkg_resources.resource_filename(__name__, "starry/starry"),
pkg_resources.resource_filename(__name__,
"starry/lib/eigen_3.3.3"),
pkg_resources.resource_filename(__name__,
"starry/lib/boost_1_66_0"),
]
def c_compile_args(self, compiler):
return get_compile_args(compiler)
|
from tkinter import *
from tkinter.filedialog import askopenfilename
import time
Tk().withdraw() # we don't want a full GUI, so keep the root window from appearing
filename = askopenfilename() # show an "Open" dialog box and return the path to the selected file
print(filename)
time.sleep(1)
|
# Filename: analyst.py
"""Analyst is a tool to look up (and export selected) data and insights
from exported data from chats and channels in Telegram
using Python and PyQt5."""
import sys
import pandas as pd
from pathlib import Path
from PyQt5 import QtWidgets, QtCore
from PyQt5 import uic
from backend import (
converter,
handler,
)
__version__ = '0.1'
__author__ = 'Artyom Filippenko'
df = pd.DataFrame({'a': ['Mary', 'Jim', 'John'],
'b': [100, 200, 300],
'c': ['a', 'b', 'c']})
# VARS SECTION
# IMPORT LOCALE
IMPORT_WINDOW_TITLE = 'TelegramData Analyst - Import'
IMPORT_WINDOW_MSG = 'This software is designed for analysis of Telegram channels and chats.'
IMPORT_BROWSE_MSG = 'Open file'
IMPORT_PATHLINE_MSG = 'Please, add path to JSON file, exported from Telegram Application...'
IMPORT_BROWSE_BTN_NAME = 'Browse'
IMPORT_ANALYSE_BTN_NAME = 'Analyze'
IMPORT_PATH_MSG = 'File'
# ANALYST LOCALE
ANALYST_WINDOW_TITLE = 'TelegramData Analyst - Explorer'
ANALYST_STATUSBAR_PREFIX_MSG = 'Exploring data from json-file:'
ANALYST_WINDOW_MSG = 'Analyzing file'
ANALYST_RETURN_BTN_NAME = 'Return to import...'
ANALYST_EXPORT_BTN_NAME = 'Export results...'
# ANALYST LOCALE
#ALERT_WINDOW_TITLE = 'Alert!'
# UI path
IMPORT_UI_PATH = './frontend/import_data.ui'
MAIN_UI_PATH = './frontend/workspace.ui'
#ALERT_UI_PATH = './frontend/alert.ui'
class ImportWindow(QtWidgets.QDialog):
def __init__(self, parent=None):
super().__init__(parent)
self._build()
self.ui.show()
def _build(self):
self.ui = uic.loadUi(IMPORT_UI_PATH)
# Locale
self.ui.setWindowTitle(IMPORT_WINDOW_TITLE)
self.ui.import_description_message.setText(IMPORT_WINDOW_MSG)
self.ui.browse_files_btn.setText(IMPORT_BROWSE_BTN_NAME)
self.ui.analyse_file_btn.setText(IMPORT_ANALYSE_BTN_NAME)
self.ui.import_file_pathline.setText(IMPORT_PATHLINE_MSG)
# Loading UI logic
self.ui.browse_files_btn.clicked.connect(self._browse_files)
self.ui.analyse_file_btn.clicked.connect(self._open_analyst)
def _browse_files(self):
import_file = QtWidgets.QFileDialog.getOpenFileName(self, IMPORT_BROWSE_MSG,
'./', "Json file (*.json)")
self.ui.import_file_pathline.setText(import_file[0])
def _open_analyst(self):
if self.ui.import_file_pathline.text() == IMPORT_PATHLINE_MSG:
json_file_path = ''
else:
json_file_path = Path(self.ui.import_file_pathline.text())
self.analyst = AnalysisWindow(self)
self.analyst.import_json_file(json_file_path)
self.analyst.update_table_view
self.analyst.ui.statusbar.showMessage(ANALYST_STATUSBAR_PREFIX_MSG + ' ' + \
str(json_file_path))
self.ui.hide()
class AnalysisWindow(QtWidgets.QMainWindow):
def __init__(self, parent=None):
super().__init__(parent, QtCore.Qt.Window)
self._build()
self.ui.show()
#self.import_json_file()
#self.update_table_view()
def _build(self):
self.ui = uic.loadUi(MAIN_UI_PATH)
# Locale
self.ui.setWindowTitle(ANALYST_WINDOW_TITLE)
self.ui.return_btn.setText(ANALYST_RETURN_BTN_NAME)
self.ui.export_btn.setText(ANALYST_EXPORT_BTN_NAME)
# Loading UI logic
self.ui.return_btn.clicked.connect(self._return_to_import)
def _return_to_import(self):
self.ui.close()
self.parent().ui.show()
def import_json_file(self, json_file_path):
self._data = converter.convert_tg_json(json_file_path)
def update_table_view(self):
self.ui.test_msg.setText(str(df.columns))
self.model = handler.pandasModel(self._data)
self.ui.table_view.setModel(self.model)
self.ui.table_view.show()
def main():
app = QtWidgets.QApplication(sys.argv)
window = ImportWindow()
#window.show()
sys.exit(app.exec_())
if __name__ == '__main__':
main()
|
###############################################################
# pytest -v --capture=no tests/1_local/test_name.py
# pytest -v tests/1_local/test_name.py
# pytest -v --capture=no tests/1_local/test_name.py:Test_name.<METHIDNAME>
###############################################################
import pytest
from cloudmesh.common.StopWatch import StopWatch
from cloudmesh.common3.host import Host
from cloudmesh.common.Printer import Printer
from cloudmesh.common3.Benchmark import Benchmark
from cloudmesh.common.util import HEADING
Benchmark.debug()
# multiping only works if you have root, so we can not use it
# from multiping import MultiPing
hosts = ['127.0.0.1',
'localhost',
'www.indiana.edu',
'www.pbs.org',
'www.github.com',
'www.redhat.com',
'www.openstack.org',
'www.bbc.com',
'www.ec2instances.info',
'aws.amazon.com']
@pytest.mark.incremental
class TestPing:
def ping(self, processors=1):
StopWatch.start(f"total p={processors} c=1")
r = Host.ping(hosts, processors=processors, count=1)
StopWatch.stop(f"total p={processors} c=1")
return r
def test_internal_ping(self):
HEADING()
StopWatch.start("total _ping")
for host in hosts:
location = {
'ip': host,
'count': 1,
}
StopWatch.start(f"ping {host}")
result = Host._ping(location)
StopWatch.stop(f"ping {host}")
StopWatch.stop("total _ping")
assert result['success']
def test_ping_processor(self):
HEADING()
print()
for processors in range(1, len(hosts)):
print("Processors:", processors)
results = self.ping(processors=processors)
print(Printer.write(results,
order=['host',
'success',
'max',
'min',
'stddev']
))
for result in results:
assert result['success']
#
# only works if you have root, so not suitable
#
# def test_multi_ping(self):
# ping = MultiPing(hosts)
# responses, no_responses = ping(hosts, timeout=2, retry=1)
def test_benchmark(self):
HEADING()
StopWatch.benchmark(csv=True, sysinfo=False)
|
import tensorflow as tf
from networks.network import Network
#define
n_classes = 21
_feat_stride = [16,]
anchor_scales = [8, 16, 32]
class VGGnet_train(Network):
def __init__(self, trainable=True):
self.inputs = []
self.data = tf.placeholder(tf.float32, shape=[None, None, None, 3])
#self.im_info = tf.placeholder(tf.float32, shape=[None, 3])
#self.gt_boxes = tf.placeholder(tf.float32, shape=[None, 5])
self.keep_prob = tf.placeholder(tf.float32)
self.segmentation=tf.placeholder(tf.float32,shape=[None,900])
self.rois=tf.placeholder(tf.float32,shape=[None,5])
#self.mweights=tf.placeholder(tf.float32,shape=[None,2])
self.sweights=tf.placeholder(tf.bool,shape=[None])
self.labels=tf.placeholder(tf.int32,shape=[None])
self.layers = dict({'data':self.data, 'segmentation':self.segmentation, 'sweight':self.sweights, 'labels': self.labels, "rois": self.rois})
self.trainable = trainable
self.setup()
def setup(self):
(self.feed('data')
.conv(3, 3, 64, 1, 1, name='conv1_1', trainable=False)
.conv(3, 3, 64, 1, 1, name='conv1_2', trainable=False)
.max_pool(2, 2, 2, 2, padding='VALID', name='pool1')
.conv(3, 3, 128, 1, 1, name='conv2_1', trainable=False)
.conv(3, 3, 128, 1, 1, name='conv2_2', trainable=False)
.max_pool(2, 2, 2, 2, padding='VALID', name='pool2')
.conv(3, 3, 256, 1, 1, name='conv3_1')
.conv(3, 3, 256, 1, 1, name='conv3_2')
.conv(3, 3, 256, 1, 1, name='conv3_3')
.max_pool(2, 2, 2, 2, padding='VALID', name='pool3')
.conv(3, 3, 512, 1, 1, name='conv4_1')
.conv(3, 3, 512, 1, 1, name='conv4_2')
.conv(3, 3, 512, 1, 1, name='conv4_3'))
#=========ROIPOOLING=======
(self.feed('conv4_3','rois')
.roi_pool(7, 7, 1.0/16, name='pool_4')
.conv(3, 3, 512, 1, 1, name='conv5_1')
.conv(3, 3, 512, 1, 1, name='conv5_2')
.conv(3, 3, 512, 1, 1, name='conv5_3')
.max_pool(2, 2, 2, 2, padding='VALID', name='pool5'))
#========= RPN ============
# (self.feed('conv5_3')
# .conv(3,3,512,1,1,name='rpn_conv/3x3')
# .conv(1,1,len(anchor_scales)*3*2 ,1 , 1, padding='VALID', relu = False, name='rpn_cls_score'))#
# (self.feed('rpn_cls_score','gt_boxes','im_info','data')
# .anchor_target_layer(_feat_stride, anchor_scales, name = 'rpn-data' ))#
# # Loss of rpn_cls & rpn_boxes
# (self.feed('rpn_conv/3x3')
# .conv(1,1,len(anchor_scales)*3*4, 1, 1, padding='VALID', relu = False, name='rpn_bbox_pred'))
#========= RoI Proposal ============
# (self.feed('rpn_cls_score')
# .reshape_layer(2,name = 'rpn_cls_score_reshape')
# .softmax(name='rpn_cls_prob'))
#
# (self.feed('rpn_cls_prob')
# .reshape_layer(len(anchor_scales)*3*2,name = 'rpn_cls_prob_reshape'))
#
# (self.feed('rpn_cls_prob_reshape','rpn_bbox_pred','im_info')
# .proposal_layer(_feat_stride, anchor_scales, 'TRAIN',name = 'rpn_rois'))
#
# (self.feed('rpn_rois','gt_boxes')
# .proposal_target_layer(n_classes,name = 'roi-data'))
#========= RCNN ============
(self.feed('pool5')
.fc(1024, name='fc6')
.dropout(0.5, name='drop6')
.fc(1024, name='fc7')
.dropout(0.5, name='drop7')
.fc(n_classes, relu=False, name='cls_score')
.softmax(name='cls_prob'))
# (self.feed('drop7')
# .fc(n_classes*4, relu=False, name='bbox_pred'))
#==========segment network===
(self.feed('conv5_3')
.conv(1,1,512,1 , 1, padding='VALID', name='conv5_4')
.fc(512, name='fc8')
.fc(900, relu=False, name='seg_score'))
|
"""
KMP pattern matching algorithm.
Finds matching patterns in text in linear time.
Text: A longer string of length n. (n > m)
Pattern: Substring to be searched for of length m.
Works by precompiling the pattern string to create a LPS string array.
LPS: Longest Proper Prefix. Longest prefix string that is also a suffix
Time Complexity: O(n+m)
Space Complexity: O(m)
"""
def compute_lps(pattern: str, m: int) -> list:
"""
Algorithm to compute LPS for given pattern.
"""
lps = [0] * m
i, j = 1, 0 # j = length of previous longest prefix-suffix
while i < m:
if pattern[i] == pattern[j]:
j += 1
lps[i] = j
i += 1
else:
# backtrack j. It cannot suddenly reduce to 0 as we might have a
# suffix - prefix pair ending at j
if j > 0:
j = lps[j - 1]
else:
i += 1
return lps
def kmp(text: str, pattern: str) -> None:
n, m = len(text), len(pattern)
lps = compute_lps(pattern, m)
i, j = 0, 0
while i < n:
if text[i] == pattern[j]:
i += 1
j += 1
if j == m:
print("pattern", pattern, "found at location", i - j)
j = lps[j - 1]
elif i < n and pattern[j] != text[i]:
if j > 0:
j = lps[j - 1]
else:
i += 1
if __name__ == "__main__":
text = "ABABABCABABABCABABABCABABACABABAC"
pattern = "ABABAC"
kmp(text, pattern)
pattern = "AAACAAAAAC"
kmp(text, pattern)
|
class HTTPException(Exception):
"""
Exception which happens when HTTP status code is not 200 (OK).
"""
def __init__(self, code, url) -> None:
self.error = f"While requesting to {url}, request returned status {code}."
def __str__(self) -> str:
return self.error
class NoCatalogResult(Exception):
"""
Exception which happens when there is no product with given product id.
"""
def __init__(self, product_id) -> None:
self.error = f"There is no catalog result with id {product_id}."
def __str__(self) -> str:
return self.error
|
# Copyright 2013-2014 MongoDB, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Maximum # of documents to process before recording timestamp
# default = -1 (no maximum)
DEFAULT_BATCH_SIZE = -1
# Interval in seconds between doc manager flushes (i.e. auto commit)
# default = None (never auto commit)
DEFAULT_COMMIT_INTERVAL = None
# Maximum # of documents to send in a single bulk request through a
# DocManager.
DEFAULT_MAX_BULK = 1000
# The default MongoDB field that will serve as the unique key for the
# target system.
DEFAULT_UNIQUE_KEY = "_id"
# Default host and facility for logging to the syslog.
DEFAULT_SYSLOG_HOST = "localhost:512"
DEFAULT_SYSLOG_FACILITY = "user"
# ROTATING LOGFILE
# The type of interval
# (seconds, minutes, hours... c.f. logging.handlers.TimedRotatingFileHandler)
DEFAULT_LOGFILE_WHEN = "midnight"
# The rollover interval
DEFAULT_LOGFILE_INTERVAL = 1
# Number of log files to keep
DEFAULT_LOGFILE_BACKUPCOUNT = 7
|
"""
mcpython - a minecraft clone written in python licenced under the MIT-licence
(https://github.com/mcpython4-coding/core)
Contributors: uuk, xkcdjerry (inactive)
Based on the game of fogleman (https://github.com/fogleman/Minecraft), licenced under the MIT-licence
Original game "minecraft" by Mojang Studios (www.minecraft.net), licenced under the EULA
(https://account.mojang.com/documents/minecraft_eula)
Mod loader inspired by "Minecraft Forge" (https://github.com/MinecraftForge/MinecraftForge) and similar
This project is not official by mojang and does not relate to it.
"""
import asyncio
import mcpython.engine.ResourceLoader
import mcpython.util.texture
import PIL.Image
import pyglet
from mcpython.engine.rendering.RenderingLayerManager import MIDDLE_GROUND
from mcpython.util.annotation import onlyInClient
from pyglet.window import mouse
from .AbstractUIPart import AbstractUIPart
IMAGE = asyncio.get_event_loop().run_until_complete(
mcpython.engine.ResourceLoader.read_image(
"assets/minecraft/textures/gui/container/creative_inventory/tabs.png"
)
)
scroll_active = mcpython.util.texture.to_pyglet_image(
IMAGE.crop((233, 0, 243, 14)).resize((20, 28), PIL.Image.NEAREST)
)
scroll_inactive = mcpython.util.texture.to_pyglet_image(
IMAGE.crop((244, 0, 255, 14)).resize((20, 28), PIL.Image.NEAREST)
)
class UIScrollBar(AbstractUIPart):
"""
Class representing a scroll bar in a gui-state of the game
The user is needed to work with the values returned by this system (on_scroll)
"""
def __init__(self, position: tuple, scroll_distance: int, on_scroll=None):
super().__init__(position, (0, 0))
self.selected = False
self.bar_position = position
self.bar_sprite = pyglet.sprite.Sprite(scroll_active)
self.scroll_distance = scroll_distance
self.on_scroll = on_scroll
self.active = True
def move(self, delta: int):
x, y = self.bar_position
self.bar_position = x, max(
self.position[1], min(self.position[1] + self.scroll_distance, y + delta)
)
if self.on_scroll:
self.on_scroll(0, 0, 0, delta, 0, 0, self.get_status())
def bind_to_eventbus(self):
self.master[0].eventbus.subscribe("user:mouse:press", self.on_mouse_press)
self.master[0].eventbus.subscribe("user:mouse:release", self.on_mouse_release)
self.master[0].eventbus.subscribe("user:mouse:drag", self.on_mouse_drag)
self.master[0].eventbus.subscribe(
MIDDLE_GROUND.getRenderingEvent(), self.on_draw
)
def on_mouse_press(self, x, y, button, mod):
if not self.active:
return
if button != mouse.LEFT:
return
bx, by = self.bar_position
if 0 <= x - bx <= 20 and 0 <= y - by <= 28:
self.selected = True
def on_mouse_release(self, x, y, button, mod):
self.selected = False
def on_mouse_drag(self, x, y, dx, dy, button, mod):
if not self.active:
return
if button == mouse.LEFT and self.selected:
self.bar_position = (
self.position[0],
max(self.position[1], min(self.position[1] + self.scroll_distance, y)),
)
if self.on_scroll:
self.on_scroll(x, y, dx, dy, button, mod, self.get_status())
def on_draw(self):
if not self.active:
return
if self.bar_sprite.position != self.bar_position:
self.bar_sprite.position = self.bar_position
self.bar_sprite.draw()
def get_status(self) -> float:
"""
Will return the status as an float between 0 and 1 where 0 is the downer end and 1 the upper
"""
if not self.active:
return 0
return (self.bar_position[1] - self.position[1]) / self.scroll_distance
def set_status(self, status: float):
self.bar_position = (
self.bar_position[0],
self.position[1] + status * self.scroll_distance,
)
def set_size_respective(self, position: tuple, scroll_distance: int):
if not self.active:
return
status = self.get_status()
self.position = position
self.bar_position = (
self.position[0],
self.position[1] + status * scroll_distance,
)
self.scroll_distance = scroll_distance
|
#! /usr/bin/env python
# Copyright 2010, 2011 Martin C. Frith
import fileinput, itertools, optparse, os, signal, sys
def batches(lines):
for line in lines:
if line.startswith("# batch"):
yield line
else:
print line,
while True:
yield None
def lastMergeBatches(fileNames):
files = map(fileinput.input, fileNames)
b = map(batches, files)
for i in itertools.izip(*b):
j = filter(None, i)
if j: print j[0],
else: break
if __name__ == "__main__":
signal.signal(signal.SIGPIPE, signal.SIG_DFL) # avoid silly error message
usage = "%prog files"
description = "Read files of lastal output, merge corresponding batches, and write them."
op = optparse.OptionParser(usage=usage, description=description)
opts, args = op.parse_args()
if not args: op.error("please give me some file names")
try: lastMergeBatches(args)
except KeyboardInterrupt: pass # avoid silly error message
except Exception, e:
prog = os.path.basename(sys.argv[0])
sys.exit(prog + ": error: " + str(e))
|
# Copyright 2021, joshiayus Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of joshiayus Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import re
import os
import sys
import click
import pathlib
import logging
import subprocess
from urllib import (request, parse)
try:
from gettext import gettext as _ # pylint: disable=unused-import
except ImportError:
_ = lambda msg: msg
CONNECTION_LIMIT_EXCEED_EXCEPTION_MESSAGE = """Invalid connection limit %d.
LinkedIn does not allow to send over 80 invitations per-day to a non-premium
account.
Please be patient and make sure that the connection limit is between (0, 80]
and you are not running the bot in a day more than once otherwise LinkedIn
will block your IP."""
LOG_DIR_PATH = pathlib.Path(__file__).resolve().parent.parent.parent / 'logs'
# Variable's value decides whether logging to stream is allowed in the entire
# project.
#
# Note: You must not update the value of this variable directly, you must call
# the `TurnOnLoggingLevelDebug()` function to update its value otherwise you may
# update the value of this variable but this particular module will not have any
# effect of that change.
LOGGING_TO_STREAM_ENABLED = False
# We want to create the log directory if it does not exists otherwise the file
# handlers for loggers used in other modules will complain about its absence.
if not os.path.exists(LOG_DIR_PATH):
os.mkdir(LOG_DIR_PATH)
LOG_FORMAT_STR = '%(asctime)s:%(name)s:%(levelname)s:%(funcName)s\n%(message)s' # pylint: disable=line-too-long
INB_VERSION = '1.0.0'
logger = logging.getLogger(__name__)
logger.setLevel(logging.DEBUG)
file_handler = logging.FileHandler(LOG_DIR_PATH / __name__, mode='a')
file_handler.setFormatter(logging.Formatter(LOG_FORMAT_STR))
logger.addHandler(file_handler)
def TurnOnLoggingToStream() -> None:
global LOGGING_TO_STREAM_ENABLED
LOGGING_TO_STREAM_ENABLED = True
stream_handler = logging.StreamHandler(sys.stderr)
stream_handler.setFormatter(logging.Formatter(LOG_FORMAT_STR))
logger.addHandler(stream_handler)
_CHROME_BINARY_NOT_FOUND_MSG = _('Google Chrome binary is not present in path %s.')
_CHROME_BINARIES_NOT_FOUND_MSG = _(
'Google Chrome binary is not present in the following paths\n'
'%s')
_CHROME_DRIVER_BINARY = 'chromedriver'
_CHROME_DRIVER_ZIP_FILE = None
# Chromedriver that comes with the repository is only compatible with the Google
# Chrome version _GOOGLE_CHROME_COMPATIBLE_VERSION_WITH_INSTALLED_CHROMEDRIVER.
#
# This version must be changed with the installed 'chromedriver' version that
# comes with the repository.
_GOOGLE_CHROME_COMPATIBLE_VERSION_WITH_INSTALLED_CHROMEDRIVER = '96.0.4664.110'
def _ExtractChromeDriverZip(chromedriver_zip: str) -> None:
"""Utility routine to `unzip` the downloaded `chromedriver` archive present
at path `chromedriver_zip`.
This function will extract all the contents of `chromedriver` archive in the
same directory where the archive is installed.
Args:
chromedriver_zip: `Chromedriver` archive file path.
"""
import zipfile # pylint: disable=import-outside-toplevel
driver_dir = pathlib.PurePath(chromedriver_zip).parent
with zipfile.ZipFile(chromedriver_zip, 'r') as zip_f:
zip_f.extractall(driver_dir)
def _RetrieveChromeDriverZip(url: str, dest: str, verbose: bool = True) -> str:
"""Utility function to download `chromedriver` zip file at the specified URL.
Utility function to download and store `chromedriver` zip file in the
destination `dest`. This function also sets the value of
`_CHROME_DRIVER_ZIP_FILE` variable equals to the `chromedriver` zip file name
at the specified URL so to later use the archive file name to extract the
`chromedriver` executable from it.
Args:
url: URL to download the file from.
dest: Destination where to place the file after downloading.
verbose: If `True` shows the downloading status.
Returns:
Destination where the file is placed after installing.
"""
u = request.urlopen(url)
scheme, netloc, path, query, fragment = parse.urlsplit(url) # pylint: disable=unused-variable
filename = os.path.basename(path)
if not filename:
filename = 'downloaded'
global _CHROME_DRIVER_ZIP_FILE
_CHROME_DRIVER_ZIP_FILE = filename
if dest:
filename = os.path.join(dest, filename)
with open(filename, 'wb') as f:
if verbose:
meta = u.info()
if hasattr(meta, 'getheaders'):
meta_func = meta.getheaders
else:
meta_func = meta.get_all
meta_length = meta_func('Content-Length')
file_size = None
if meta_length:
file_size = int(meta_length[0])
click.echo(_('Downloading: %s Bytes: %s') % (url, file_size))
file_size_dl = 0
block_size = 8192
while True:
buffer = u.read(block_size)
if not buffer:
break
file_size_dl += len(buffer)
f.write(buffer)
if verbose:
status = '{0:16}'.format(file_size_dl) # pylint: disable=consider-using-f-string
if file_size:
status += ' [{0:6.2f}%]'.format(file_size_dl * 100 / file_size) # pylint: disable=consider-using-f-string
status += chr(13)
click.echo(f'{status}\r', None, False)
if verbose:
click.echo('')
return filename
def _GetGoogleChromeBinaryVersion() -> str:
"""Returns the `Google Chrome` version the user is using in its system.
This function returns the `Google Chrome` version independent of the platform
the user is running. This function creates a child process using `subprocess`
module to talk to the shell and retrieve the `Google Chrome` version present
in the system.
This function checks the following locations where the `Google Chrome`
executable could be present in user's system.
* `Linux`
On `linux` platform this function checks if the binary `google-chrome` and
`google-chrome-stable` is present, if yes this function in its child process
will provide a flag `--version` to the `Google Chrome` binary present in
order to retrieve the version string.
The child process calls for `linux` platform looks something like the
following:
* If `google-chrome` is present.
```shell
google-chrome --version
```
* If `google-chrome` is not present.
```shell
google-chrome-stable --version
```
* `MacOS`
On `MacOs` platform this function will create a child process and will
provide `--version` flag to the `Google Chrome` executable present in the
path `/Applications/Google Chrome.app/Contents/MacOS/Google Chrome`.
The child process call for `linux` platform looks something like the
following:
```shell
/Applications/Google Chrome.app/Contents/MacOS/Google Chrome --version
```
@TODO(joshiayush): Find alternative paths on `MacOS`.
* `Windows`
God forbid if you are on `Windows` because there is no tested version of
this function on `Windows` but so far what we've come up with is the
following:
This function will search for the `Google Chrome` executable in the
following paths:
```python
chrome_binary_path = (
'%ProgramFiles%\\Google\\Chrome\\Application\\chrome.exe',
'%ProgramFiles(x86)%\\Google\\Chrome\\Application\\chrome.exe',
'%LocalAppData%\\Google\\Chrome\\Application\\chrome.exe',
'C:\\Users\\USER\\AppData\\Local\\Google\\Chrome\\Application\\chrome.exe'
)
```
and will try to execute the following commands in its child process to
retrieve the `Google Chrome` version.
```shell
wmic datafile where name=${path} get Version /value
```
where path is the `element` of `chrome_binary_path` tuple on `Windows`.
Returns:
`Google Chrome` version.
"""
version_regex = r'[0-9]{2}.[0-9]{1}.[0-9]{4}.[0-9]{3}'
if sys.platform == 'linux':
chrome_binaries = ['google-chrome', 'google-chrome-stable']
chrome_binary_path = []
for binary in chrome_binaries:
try:
chrome_binary_path.append(
subprocess.check_output(['whereis', '-b',
binary]).decode('utf-8')[len(binary) +
1::].strip())
except subprocess.CalledProcessError as exc:
logger.error(('CalledProcessError: Exit code %d.'
'\n%s.'), exc.returncode, exc.output)
continue
for i in range(len(chrome_binary_path)):
if chrome_binary_path[i] == '':
chrome_binary_path = chrome_binary_path[0:i:] + chrome_binary_path[i +
1::]
for path in chrome_binary_path:
try:
version = subprocess.check_output([path, '--version']).decode('utf-8')
except subprocess.CalledProcessError:
logger.error(_CHROME_BINARY_NOT_FOUND_MSG, path)
continue
else:
version = re.search(version_regex, version)
return version.group(0)
raise FileNotFoundError(_CHROME_BINARIES_NOT_FOUND_MSG %
(', '.join(chrome_binary_path)))
elif sys.platform == 'darwin':
chrome_binary_path = (
r'/Applications/Google Chrome.app/Contents/MacOS/Google Chrome')
for path in chrome_binary_path:
try:
version = subprocess.check_output([path, '--version']).decode('utf-8')
except subprocess.CalledProcessError:
logger.error(_CHROME_BINARY_NOT_FOUND_MSG, path)
continue
else:
version = re.search(version_regex, version)
return version.group(0)
raise FileNotFoundError(_CHROME_BINARIES_NOT_FOUND_MSG %
(', '.join(chrome_binary_path)))
elif sys.platform in ('win32', 'cygwin'):
chrome_binary_path = (
r'%ProgramFiles%\Google\Chrome\Application\chrome.exe',
r'%ProgramFiles(x86)%\Google\Chrome\Application\chrome.exe',
r'%LocalAppData%\Google\Chrome\Application\chrome.exe',
r'C:\Users\USER\AppData\Local\Google\Chrome\Application\chrome.exe')
for path in chrome_binary_path:
try:
version = subprocess.check_output([
'wmic', 'datafile', 'where', f'name={path}', 'get', 'Version',
'/value'
]).decode('utf-8')
except subprocess.CalledProcessError:
logger.error(_CHROME_BINARY_NOT_FOUND_MSG, path)
continue
else:
version = re.search(version_regex, version)
return version.group(0)
raise FileNotFoundError(_CHROME_BINARIES_NOT_FOUND_MSG %
(', '.join(chrome_binary_path)))
def _CheckIfChromeDriverIsCompatibleWithGoogleChromeInstalled() -> str:
"""Checks if the `chromedriver` that comes with the `inb` repository is
compatible with the `Google Chrome` version the user is using in its system.
This function checks if the `Google Chrome` version the user is using in its
system matches against the `Google Chrome` version supported by `chromedriver`
that comes with the `inb` repository which is
`_GOOGLE_CHROME_COMPATIBLE_VERSION_WITH_INSTALLED_CHROMEDRIVER`.
Returns:
True if the `chromedriver` is compatible with the `Google Chrome` installed.
"""
google_chrome_version = _GetGoogleChromeBinaryVersion()
if google_chrome_version == _GOOGLE_CHROME_COMPATIBLE_VERSION_WITH_INSTALLED_CHROMEDRIVER: # pylint: disable=line-too-long
return True
return False
def _GetPlatformSpecificChromeDriverUrlForGoogleChromeMajor(major: str) -> str:
"""Returns the platform specific `chromedriver` version that is compatible
with the `Google Chrome` major given as `major`.
This function only supports `Google Chrome` major that is present in the
following list of `chromedriver` releases:
```python
(
'95.0.4638.69',
'96.0.4664.45',
'97.0.4692.36',
)
```
`Google Chrome` version against a major that is not present in the above list
will not receive a compatible version of `chromedriver` through this function.
Args:
major: `Google Chrome` major.
Returns:
Platform specific `chromedriver` file URL that is compatible with
`Google Chrome` with the give `major` as major.
"""
chromedriver_storage_googleapis = 'https://chromedriver.storage.googleapis.com' # pylint: disable=line-too-long
for release in (
'95.0.4638.69',
'96.0.4664.45',
'97.0.4692.36',
):
if release.startswith(major):
if sys.platform == 'linux':
return f'{chromedriver_storage_googleapis}/{release}/{_CHROME_DRIVER_BINARY}_linux64.zip' # pylint: disable=line-too-long
elif sys.platform == 'darwin':
return f'{chromedriver_storage_googleapis}/{release}/{_CHROME_DRIVER_BINARY}_mac64.zip' # pylint: disable=line-too-long
elif sys.platform in ('win32', 'cygwin'):
return f'{chromedriver_storage_googleapis}/{release}/{_CHROME_DRIVER_BINARY}_win32.zip' # pylint: disable=line-too-long
def _GetPlatformSpecificChromeDriverCompatibleVersionUrl(
google_chrome_version: str) -> str:
"""Returns the platform specific `chromedriver` version URL that is
compatible with the `Google Chrome` version given as `google_chrome_version`.
This function takes out the `major` version from the `google_chrome_version`
string and calls the function
`_GetPlatformSpecificChromeDriverUrlForGoogleChromeMajor()` with the major
that we just took out to receive a compatible `chromedriver` version URL.
Args:
google_chrome_version: `Google Chrome` version.
Returns:
`Chromedriver` version URL that is compatible with the `Google Chrome`
version given as `google_chrome_version`.
"""
major_regex = re.compile(r'^[0-9]{2}')
google_chrome_major = re.search(major_regex, google_chrome_version).group(0)
return _GetPlatformSpecificChromeDriverUrlForGoogleChromeMajor(
google_chrome_major)
def _InstallGoogleChromeCompatibleChromeDriver() -> None:
"""Installs `Google Chrome` compatible `chromedriver`.
This function installs a `Google Chrome` compatible `chromedriver` version.
Because user's can have different versions of `Google Chrome` installed in
their system so we need to handle the case where the `chromedriver` that
comes with the `inb` repository is not compatible with the `Google Chrome`
version they are using on their system.
To handle the above case we install the compatible version of `chromedriver`
from the `googleapis` by calling the function
`_GetPlatformSpecificChromeDriverCompatibleVersionUrl()` to return the URL
for `chromedriver` and then later using that URL with function
`_RetrieveChromeDriverZip()` to install `chromedriver` from `googleapis`.
Once the `chromedriver` is installed we know that it is in a form of zip so
we need to extract it and we do so by calling the function
`_ExtractChromeDriverZip()` with the zip file path.
"""
_RetrieveChromeDriverZip(
_GetPlatformSpecificChromeDriverCompatibleVersionUrl(
_GetGoogleChromeBinaryVersion()),
True if LOGGING_TO_STREAM_ENABLED else False)
_ExtractChromeDriverZip(
os.path.join(_GetInstalledChromeDriverDirectoryPath(),
_CHROME_DRIVER_ZIP_FILE))
def _GetInstalledChromeDriverDirectoryPath() -> str:
"""Returns the absolute filesystem path to the directory where `chromedriver`
that comes with the `inb` repository is installed.
Returns:
Absolute filesystem path the `chromedriver` directory.
"""
dir_path = os.path.dirname(os.path.abspath(__file__))
last_inb_indx = dir_path.rfind('inb')
return os.path.join(dir_path[:last_inb_indx:], 'driver')
def ChromeDriverAbsolutePath() -> str:
"""Returns the absolute filesystem path to the `chromedriver` installed inside
the `driver` directory.
This function checks if the `chromedriver` that comes with the `inb`
repository is compatible with the `Google Chrome` installed in the user's
system; if yes it returns the absolute filesystem path to the `chromedriver`
installed inside the `driver` directory.
If the `chromedriver` if not compatible with the `Google Chrome` version the
user is using in its system then this function tries to install a compatible
`chromedriver` inside the directory `driver` and if successful, it returns the
absolute filesystem path to the `chromedriver`.
Returns:
Absolute path to `chromedriver`.
"""
if _CheckIfChromeDriverIsCompatibleWithGoogleChromeInstalled():
return os.path.join(_GetInstalledChromeDriverDirectoryPath(),
_CHROME_DRIVER_BINARY)
_InstallGoogleChromeCompatibleChromeDriver()
return os.path.join(_GetInstalledChromeDriverDirectoryPath(),
_CHROME_DRIVER_BINARY)
def GetLinkedInUrl() -> str:
"""Returns URL to LinkedIn."""
return 'https://www.linkedin.com'
def GetLinkedInLoginPageUrl() -> str:
"""Returns URL to LinkedIn's login page."""
return GetLinkedInUrl() + '/login/'
def GetLinkedInMyNetworkPageUrl() -> str:
"""Returns URL to LinkedIn's `MyNetwork` page."""
return GetLinkedInUrl() + '/mynetwork/'
|
# -*- coding: utf-8 -*-
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import proto # type: ignore
from google.ads.googleads.v7.enums.types import response_content_type as gage_response_content_type
from google.ads.googleads.v7.resources.types import feed_item as gagr_feed_item
from google.protobuf import field_mask_pb2 # type: ignore
from google.rpc import status_pb2 # type: ignore
__protobuf__ = proto.module(
package='google.ads.googleads.v7.services',
marshal='google.ads.googleads.v7',
manifest={
'GetFeedItemRequest',
'MutateFeedItemsRequest',
'FeedItemOperation',
'MutateFeedItemsResponse',
'MutateFeedItemResult',
},
)
class GetFeedItemRequest(proto.Message):
r"""Request message for
[FeedItemService.GetFeedItem][google.ads.googleads.v7.services.FeedItemService.GetFeedItem].
Attributes:
resource_name (str):
Required. The resource name of the feed item
to fetch.
"""
resource_name = proto.Field(
proto.STRING,
number=1,
)
class MutateFeedItemsRequest(proto.Message):
r"""Request message for
[FeedItemService.MutateFeedItems][google.ads.googleads.v7.services.FeedItemService.MutateFeedItems].
Attributes:
customer_id (str):
Required. The ID of the customer whose feed
items are being modified.
operations (Sequence[google.ads.googleads.v7.services.types.FeedItemOperation]):
Required. The list of operations to perform
on individual feed items.
partial_failure (bool):
If true, successful operations will be
carried out and invalid operations will return
errors. If false, all operations will be carried
out in one transaction if and only if they are
all valid. Default is false.
validate_only (bool):
If true, the request is validated but not
executed. Only errors are returned, not results.
response_content_type (google.ads.googleads.v7.enums.types.ResponseContentTypeEnum.ResponseContentType):
The response content type setting. Determines
whether the mutable resource or just the
resource name should be returned post mutation.
"""
customer_id = proto.Field(
proto.STRING,
number=1,
)
operations = proto.RepeatedField(
proto.MESSAGE,
number=2,
message='FeedItemOperation',
)
partial_failure = proto.Field(
proto.BOOL,
number=3,
)
validate_only = proto.Field(
proto.BOOL,
number=4,
)
response_content_type = proto.Field(
proto.ENUM,
number=5,
enum=gage_response_content_type.ResponseContentTypeEnum.ResponseContentType,
)
class FeedItemOperation(proto.Message):
r"""A single operation (create, update, remove) on an feed item.
Attributes:
update_mask (google.protobuf.field_mask_pb2.FieldMask):
FieldMask that determines which resource
fields are modified in an update.
create (google.ads.googleads.v7.resources.types.FeedItem):
Create operation: No resource name is
expected for the new feed item.
update (google.ads.googleads.v7.resources.types.FeedItem):
Update operation: The feed item is expected
to have a valid resource name.
remove (str):
Remove operation: A resource name for the removed feed item
is expected, in this format:
``customers/{customer_id}/feedItems/{feed_id}~{feed_item_id}``
"""
update_mask = proto.Field(
proto.MESSAGE,
number=4,
message=field_mask_pb2.FieldMask,
)
create = proto.Field(
proto.MESSAGE,
number=1,
oneof='operation',
message=gagr_feed_item.FeedItem,
)
update = proto.Field(
proto.MESSAGE,
number=2,
oneof='operation',
message=gagr_feed_item.FeedItem,
)
remove = proto.Field(
proto.STRING,
number=3,
oneof='operation',
)
class MutateFeedItemsResponse(proto.Message):
r"""Response message for an feed item mutate.
Attributes:
partial_failure_error (google.rpc.status_pb2.Status):
Errors that pertain to operation failures in the partial
failure mode. Returned only when partial_failure = true and
all errors occur inside the operations. If any errors occur
outside the operations (e.g. auth errors), we return an RPC
level error.
results (Sequence[google.ads.googleads.v7.services.types.MutateFeedItemResult]):
All results for the mutate.
"""
partial_failure_error = proto.Field(
proto.MESSAGE,
number=3,
message=status_pb2.Status,
)
results = proto.RepeatedField(
proto.MESSAGE,
number=2,
message='MutateFeedItemResult',
)
class MutateFeedItemResult(proto.Message):
r"""The result for the feed item mutate.
Attributes:
resource_name (str):
Returned for successful operations.
feed_item (google.ads.googleads.v7.resources.types.FeedItem):
The mutated feed item with only mutable fields after mutate.
The field will only be returned when response_content_type
is set to "MUTABLE_RESOURCE".
"""
resource_name = proto.Field(
proto.STRING,
number=1,
)
feed_item = proto.Field(
proto.MESSAGE,
number=2,
message=gagr_feed_item.FeedItem,
)
__all__ = tuple(sorted(__protobuf__.manifest))
|
"""distutils.command.config
Implements the Distutils 'config' command, a (mostly) empty command class
that exists mainly to be sub-classed by specific module distributions and
applications. The idea is that while every "config" command is different,
at least they're all named the same, and users always see "config" in the
list of standard commands. Also, this is a good place to put common
configure-like tasks: "try to compile this C code", or "figure out where
this header file lives".
"""
import os, re
from distutils.core import Command
from distutils.errors import DistutilsExecError
from distutils.sysconfig import customize_compiler
from distutils import log
LANG_EXT = {"c": ".c", "c++": ".cxx"}
class config(Command):
description = "prepare to build"
user_options = [
("compiler=", None, "specify the compiler type"),
("cc=", None, "specify the compiler executable"),
("include-dirs=", "I", "list of directories to search for header files"),
("define=", "D", "C preprocessor macros to define"),
("undef=", "U", "C preprocessor macros to undefine"),
("libraries=", "l", "external C libraries to link with"),
("library-dirs=", "L", "directories to search for external C libraries"),
("noisy", None, "show every action (compile, link, run, ...) taken"),
(
"dump-source",
None,
"dump generated source files before attempting to compile them",
),
]
# The three standard command methods: since the "config" command
# does nothing by default, these are empty.
def initialize_options(self):
self.compiler = None
self.cc = None
self.include_dirs = None
self.libraries = None
self.library_dirs = None
# maximal output for now
self.noisy = 1
self.dump_source = 1
# list of temporary files generated along-the-way that we have
# to clean at some point
self.temp_files = []
def finalize_options(self):
if self.include_dirs is None:
self.include_dirs = self.distribution.include_dirs or []
elif isinstance(self.include_dirs, str):
self.include_dirs = self.include_dirs.split(os.pathsep)
if self.libraries is None:
self.libraries = []
elif isinstance(self.libraries, str):
self.libraries = [self.libraries]
if self.library_dirs is None:
self.library_dirs = []
elif isinstance(self.library_dirs, str):
self.library_dirs = self.library_dirs.split(os.pathsep)
def run(self):
pass
# Utility methods for actual "config" commands. The interfaces are
# loosely based on Autoconf macros of similar names. Sub-classes
# may use these freely.
def _check_compiler(self):
"""Check that 'self.compiler' really is a CCompiler object;
if not, make it one.
"""
# We do this late, and only on-demand, because this is an expensive
# import.
from distutils.ccompiler import CCompiler, new_compiler
if not isinstance(self.compiler, CCompiler):
self.compiler = new_compiler(
compiler=self.compiler, dry_run=self.dry_run, force=1
)
customize_compiler(self.compiler)
if self.include_dirs:
self.compiler.set_include_dirs(self.include_dirs)
if self.libraries:
self.compiler.set_libraries(self.libraries)
if self.library_dirs:
self.compiler.set_library_dirs(self.library_dirs)
def _gen_temp_sourcefile(self, body, headers, lang):
filename = "_configtest" + LANG_EXT[lang]
with open(filename, "w") as file:
if headers:
for header in headers:
file.write("#include <%s>\n" % header)
file.write("\n")
file.write(body)
if body[-1] != "\n":
file.write("\n")
return filename
def _preprocess(self, body, headers, include_dirs, lang):
src = self._gen_temp_sourcefile(body, headers, lang)
out = "_configtest.i"
self.temp_files.extend([src, out])
self.compiler.preprocess(src, out, include_dirs=include_dirs)
return (src, out)
def _compile(self, body, headers, include_dirs, lang):
src = self._gen_temp_sourcefile(body, headers, lang)
if self.dump_source:
dump_file(src, "compiling '%s':" % src)
(obj,) = self.compiler.object_filenames([src])
self.temp_files.extend([src, obj])
self.compiler.compile([src], include_dirs=include_dirs)
return (src, obj)
def _link(self, body, headers, include_dirs, libraries, library_dirs, lang):
(src, obj) = self._compile(body, headers, include_dirs, lang)
prog = os.path.splitext(os.path.basename(src))[0]
self.compiler.link_executable(
[obj],
prog,
libraries=libraries,
library_dirs=library_dirs,
target_lang=lang,
)
if self.compiler.exe_extension is not None:
prog = prog + self.compiler.exe_extension
self.temp_files.append(prog)
return (src, obj, prog)
def _clean(self, *filenames):
if not filenames:
filenames = self.temp_files
self.temp_files = []
log.info("removing: %s", " ".join(filenames))
for filename in filenames:
try:
os.remove(filename)
except OSError:
pass
# XXX these ignore the dry-run flag: what to do, what to do? even if
# you want a dry-run build, you still need some sort of configuration
# info. My inclination is to make it up to the real config command to
# consult 'dry_run', and assume a default (minimal) configuration if
# true. The problem with trying to do it here is that you'd have to
# return either true or false from all the 'try' methods, neither of
# which is correct.
# XXX need access to the header search path and maybe default macros.
def try_cpp(self, body=None, headers=None, include_dirs=None, lang="c"):
"""Construct a source file from 'body' (a string containing lines
of C/C++ code) and 'headers' (a list of header files to include)
and run it through the preprocessor. Return true if the
preprocessor succeeded, false if there were any errors.
('body' probably isn't of much use, but what the heck.)
"""
from distutils.ccompiler import CompileError
self._check_compiler()
ok = True
try:
self._preprocess(body, headers, include_dirs, lang)
except CompileError:
ok = False
self._clean()
return ok
def search_cpp(self, pattern, body=None, headers=None, include_dirs=None, lang="c"):
"""Construct a source file (just like 'try_cpp()'), run it through
the preprocessor, and return true if any line of the output matches
'pattern'. 'pattern' should either be a compiled regex object or a
string containing a regex. If both 'body' and 'headers' are None,
preprocesses an empty file -- which can be useful to determine the
symbols the preprocessor and compiler set by default.
"""
self._check_compiler()
src, out = self._preprocess(body, headers, include_dirs, lang)
if isinstance(pattern, str):
pattern = re.compile(pattern)
with open(out) as file:
match = False
while True:
line = file.readline()
if line == "":
break
if pattern.search(line):
match = True
break
self._clean()
return match
def try_compile(self, body, headers=None, include_dirs=None, lang="c"):
"""Try to compile a source file built from 'body' and 'headers'.
Return true on success, false otherwise.
"""
from distutils.ccompiler import CompileError
self._check_compiler()
try:
self._compile(body, headers, include_dirs, lang)
ok = True
except CompileError:
ok = False
log.info(ok and "success!" or "failure.")
self._clean()
return ok
def try_link(
self,
body,
headers=None,
include_dirs=None,
libraries=None,
library_dirs=None,
lang="c",
):
"""Try to compile and link a source file, built from 'body' and
'headers', to executable form. Return true on success, false
otherwise.
"""
from distutils.ccompiler import CompileError, LinkError
self._check_compiler()
try:
self._link(body, headers, include_dirs, libraries, library_dirs, lang)
ok = True
except (CompileError, LinkError):
ok = False
log.info(ok and "success!" or "failure.")
self._clean()
return ok
def try_run(
self,
body,
headers=None,
include_dirs=None,
libraries=None,
library_dirs=None,
lang="c",
):
"""Try to compile, link to an executable, and run a program
built from 'body' and 'headers'. Return true on success, false
otherwise.
"""
from distutils.ccompiler import CompileError, LinkError
self._check_compiler()
try:
src, obj, exe = self._link(
body, headers, include_dirs, libraries, library_dirs, lang
)
self.spawn([exe])
ok = True
except (CompileError, LinkError, DistutilsExecError):
ok = False
log.info(ok and "success!" or "failure.")
self._clean()
return ok
# -- High-level methods --------------------------------------------
# (these are the ones that are actually likely to be useful
# when implementing a real-world config command!)
def check_func(
self,
func,
headers=None,
include_dirs=None,
libraries=None,
library_dirs=None,
decl=0,
call=0,
):
"""Determine if function 'func' is available by constructing a
source file that refers to 'func', and compiles and links it.
If everything succeeds, returns true; otherwise returns false.
The constructed source file starts out by including the header
files listed in 'headers'. If 'decl' is true, it then declares
'func' (as "int func()"); you probably shouldn't supply 'headers'
and set 'decl' true in the same call, or you might get errors about
a conflicting declarations for 'func'. Finally, the constructed
'main()' function either references 'func' or (if 'call' is true)
calls it. 'libraries' and 'library_dirs' are used when
linking.
"""
self._check_compiler()
body = []
if decl:
body.append("int %s ();" % func)
body.append("int main () {")
if call:
body.append(" %s();" % func)
else:
body.append(" %s;" % func)
body.append("}")
body = "\n".join(body) + "\n"
return self.try_link(body, headers, include_dirs, libraries, library_dirs)
def check_lib(
self,
library,
library_dirs=None,
headers=None,
include_dirs=None,
other_libraries=[],
):
"""Determine if 'library' is available to be linked against,
without actually checking that any particular symbols are provided
by it. 'headers' will be used in constructing the source file to
be compiled, but the only effect of this is to check if all the
header files listed are available. Any libraries listed in
'other_libraries' will be included in the link, in case 'library'
has symbols that depend on other libraries.
"""
self._check_compiler()
return self.try_link(
"int main (void) { }",
headers,
include_dirs,
[library] + other_libraries,
library_dirs,
)
def check_header(self, header, include_dirs=None, library_dirs=None, lang="c"):
"""Determine if the system header file named by 'header_file'
exists and can be found by the preprocessor; return true if so,
false otherwise.
"""
return self.try_cpp(
body="/* No body */", headers=[header], include_dirs=include_dirs
)
def dump_file(filename, head=None):
"""Dumps a file content into log.info.
If head is not None, will be dumped before the file content.
"""
if head is None:
log.info("%s", filename)
else:
log.info(head)
file = open(filename)
try:
log.info(file.read())
finally:
file.close()
|
import _plotly_utils.basevalidators
class HoverinfosrcValidator(_plotly_utils.basevalidators.SrcValidator):
def __init__(self, plotly_name="hoverinfosrc", parent_name="splom", **kwargs):
super(HoverinfosrcValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
edit_type=kwargs.pop("edit_type", "none"),
**kwargs,
)
|
from PIL import Image
from datetime import datetime
import sys
import base64
from io import BytesIO
import platform
import urllib.parse
IMG_FOLDER = ''
if platform.system() == 'Linux':
IMG_FOLDER = 'images/'
elif platform.system() == 'Windows':
IMG_FOLDER = '.\\images\\'
def get_base64_image(filename: str = '.\\images\\face_dither.png') -> str:
try:
encoded_image = b''
image_format = ''
with Image.open(filename) as image:
image_format = image.format
# print(f'Format is: {image_format}')
# print(f'Mode is: {image.mode}')
buffer = BytesIO()
image.save(buffer, image.format)
image_bytes = buffer.getvalue()
encoded_image = base64.b64encode(image_bytes)
# ****** Below is simply for testing if the image ******
# data stored in the file is correct or not.
# ------------------------------------------------------
# image_buffer = BytesIO(base64.b64decode(encoded_image))
# with Image.open(image_buffer) as fil_image:
# new_filename = 'Robert' + datetime.now().strftime('_%Y%m%d_%H%M%S') \
# + '.' + image_format.lower()
# fil_image.save(IMG_FOLDER + new_filename, image_format)
# ------------------------------------------------------
print(f'The Base64 image = {urllib.parse.quote(encoded_image.decode())}')
return encoded_image.decode()
except Exception as ex:
print(f'No image found: {ex}')
if __name__ == '__main__':
if len(sys.argv) == 2:
# print(f'The param = {sys.argv[1]}')
get_base64_image(sys.argv[1])
else:
get_base64_image()
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
# -------------------------------------------------------------------
# Date: 2021/01/06
# Author: Shaun
# ๆชๆกๅ่ฝๆ่ฟฐ:
# ่ชๅๆๅๆฉๅฐ็ๆ
๏ผๆดๆฐ่ณๆๅบซ๏ผ้
ๅbash่
ณๆฌ๏ผๅฏซๅ
ฅ็ณป็ตฑๆ็จcrontab -e
# -------------------------------------------------------------------
import socket
# import pymysql
import os
import time
from datetime import datetime
# cnc_config = [('cnc27', "192.168.3.27"), ('cnc28', "192.168.3.28"), ('cnc29', "192.168.3.29"), ('cnc43', "192.168.3.43"),
# ('cnc44', "192.168.3.44"), ('cnc45', "192.168.3.45"), ('cnc46', "192.168.3.46")]
# cnc_config = [('cnc27', "192.168.3.27"), ('cnc28', "192.168.3.28"), ('cnc29', "192.168.3.29"), ('cnc46', "192.168.3.46")]
cnc_config = [('cnc46', "192.168.3.46")]
def get_from_brother(ip='127.0.0.1', port=10000):
client = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
client.settimeout(10)
try:
client.connect((ip, port))
# ๅๅพๅทฅไปถๆธ
# instruct = '%CLOD WKCNTR ย ' + os.linesep + '00%'
instruct = '%CLOD WKCNTR 00\r\n%'
# instruct = '%CLOD PRD3 00\r\n%'
# instruct = '%CIOCREF GRN 00\r\n%'
client.send(instruct.encode())
# lines = client.recv(3096).decode().split(os.linesep)
lines = client.recv(1500).decode()
# arr= [line.strip() for line in lines]
# n=0
# for e in arr:
# v1=e.split(',')
# if n>1:
# # v1[1]=datetime.fromtimestamp(int(v1[1]) / 1e3)
# v1[1]=datetime.fromtimestamp(int(v1[1]))
# # date=v1[1]
# n+=1
# print(v1)
# print(lines)
# lines = client.recv(1024).decode()
print(lines)
lines = lines.split(os.linesep)
lines = [line for line in lines if line.startswith('A01')] # ้ธๅบไปฅA01้้ ญ็่ก
fields = lines[0].split(',') # ๆๅๅบๅญๆฎต๏ผ็ฌฌ3ๅๅญๆฎตๅฐฑๆฏ็ฎๆจ[ๅทฅไปถ่จๆธ]
parts = int(fields[2].strip())
print('้จๅๆธ้:',int(fields[2].strip()),'\n')
# ๅๅพ็ๆ
# instruct = '%CLOD WKCNTR 00\r\n%'
instruct = '%CLOD PRD3 00\r\n%'
client.sendall(instruct.encode())
flag = True
data=''
while flag:
lines = client.recv(1500).decode()
# print('len:',len(lines),lines)
data+=lines
if lines[-1]=='%':
flag = False
log=data.split('\n')
# print(data,'len:',len(data))
for i in range(10):
print(log[i])
return parts
except Exception as e:
print(ip, e)
return -1
finally:
client.close()
# def save_db(name='J44', qty=-1):
# try:
# conn = pymysql.Connect(user='root', password='1234', database='dademes', charset='utf8')
# cus = conn.cursor()
# if qty == -1:
# cus.execute('update kbequipment set running=%s where name=%s', ('ๅ
ณๆบ', name))
# else:
# cus.execute('update kbequipment set running=%s, status=%s where name=%s', ('ๆญฃๅธธ', qty, name))
# conn.commit()
# cus.close()
# conn.close()
# except Exception as e:
# print('ๆบๅฐๅท=%sไฟๅญๆฐๆฎๅผๅธธ,%s' % (name, e))
if __name__ == '__main__':
try:
for cnc_name, ip in cnc_config:
print('ๆญฃๅจ่ฎๅๆฉๅฐ่=%s,ip=%s' % (cnc_name, ip))
qty = get_from_brother(ip=ip)
print(qty)
# save_db(qty=qty, name=cnc_name)
except Exception as e:
print('__main__', e)
finally:
print('CNCๆธๆ่ฎๅๅฎ็ข... 30็งๅพๅๆฌก่ฎๅ...')
# time.sleep(10)
|
"""
Package defining various dynamic forward models as well as convenience methods to generate the
right hand sides (RHS) of the related partial differential equations.
Currently, the following forward models are implemented:
#. An advection equation for images
#. An advection equation for maps
#. The EPDiff-equation parameterized using the vector-valued momentum for images
#. The EPDiff-equation parameterized using the vector-valued momentum for maps
#. The EPDiff-equation parameterized using the scalar-valued momentum for images
#. The EPDiff-equation parameterized using the scalar-valued momentum for maps
The images are expected to be tensors of dimension: BxCxXxYxZ (or BxCxX in 1D and BxCxXxY in 2D),
where B is the batch-size, C the number of channels, and X, Y, and Z are the spatial coordinate indices.
Futhermore the following (RHSs) are provided
#. Image advection
#. Map advection
#. Scalar conservation law
#. EPDiff
"""
from __future__ import print_function
from __future__ import absolute_import
from builtins import range
from builtins import object
from abc import ABCMeta, abstractmethod
import numpy as np
from . import finite_differences_multi_channel as fdm
from . import utils
from .data_wrapper import MyTensor
from future.utils import with_metaclass
import torch.nn as nn
import torch
class RHSLibrary(object):
"""
Convenience class to quickly generate various right hand sides (RHSs) of popular partial differential
equations. In this way new forward models can be written with minimal code duplication.
"""
def __init__(self, spacing, use_neumann_BC_for_map=False):
"""
Constructor
:param spacing: Spacing for the images. This will be an array with 1, 2, or 3 entries in 1D, 2D, and 3D respectively.
"""
self.spacing = spacing
"""spatial spacing"""
self.spacing_min = np.min(spacing)
""" min of the spacing"""
self.spacing_ratio = spacing/self.spacing_min
self.fdt_ne = fdm.FD_torch_multi_channel(spacing,mode='neumann_zero')
"""torch finite differencing support neumann zero"""
self.fdt_le = fdm.FD_torch_multi_channel( spacing, mode='linear')
"""torch finite differencing support linear extrapolation"""
self.fdt_di = fdm.FD_torch_multi_channel(spacing, mode='dirichlet_zero')
"""torch finite differencing support dirichlet zero"""
self.dim = len(self.spacing)
"""spatial dimension"""
self.use_neumann_BC_for_map = use_neumann_BC_for_map
"""If True uses zero Neumann boundary conditions also for evolutions of the map, if False uses linear extrapolation"""
def rhs_advect_image_multiNC(self,I,v):
'''
Advects a batch of images which can be multi-channel. Expected image format here, is
BxCxXxYxZ, where B is the number of images (batch size), C, the number of channels
per image and X, Y, Z are the spatial coordinates (X only in 1D; X,Y only in 2D)
:math:`-\\nabla I^Tv`
:param I: Image batch BxCIxXxYxZ
:param v: Velocity fields (this will be one velocity field per image) BxCxXxYxZ
:return: Returns the RHS of the advection equations involved BxCxXxYxZ
'''
rhs_ret= self._rhs_advect_image_multiN(I, v )
return rhs_ret
def _rhs_advect_image_multiN(self,I,v):
"""
:param I: One-channel input image: Bx1xXxYxZ
:param v: velocity field BxCxXxYxZ
:return: Returns the RHS of the advection equation for one channel BxXxYxZ
"""
if self.dim == 1:
rhs_ret = -self.fdt_ne.dXc(I) * v[:,0:1]
elif self.dim == 2:
rhs_ret = -self.fdt_ne.dXc(I) * v[:,0:1] -self.fdt_ne.dYc(I)*v[:,1:2]
elif self.dim == 3:
rhs_ret = -self.fdt_ne.dXc(I) * v[:,0:1] -self.fdt_ne.dYc(I)*v[:,1:2]-self.fdt_ne.dZc(I)*v[:,2:3]
else:
raise ValueError('Only supported up to dimension 3')
return rhs_ret
def rhs_scalar_conservation_multiNC(self, I, v):
"""
Scalar conservation law for a batch of images which can be multi-channel. Expected image format here, is
BxCxXxYxZ, where B is the number of images (batch size), C, the number of channels
per image and X, Y, Z are the spatial coordinates (X only in 1D; X,Y only in 2D)
:math:`-div(Iv)`
:param I: Image batch BxCIxXxYxZ
:param v: Velocity fields (this will be one velocity field per image) BxCxXxYxZ
:return: Returns the RHS of the scalar conservation law equations involved BxCxXxYxZ
"""
rhs_ret=self._rhs_scalar_conservation_multiN(I, v)
return rhs_ret
def _rhs_scalar_conservation_multiN(self, I, v):
"""
:param I: One-channel input image: Bx1xXxYxZ
:param v: velocity field BxCxXxYxZ
:return: Returns the RHS of the scalar-conservation law equation for one channel BxXxYxZ
"""
if self.dim==1:
rhs_ret = -self.fdt_ne.dXc(I*v[:,0:1])
elif self.dim==2:
rhs_ret = -self.fdt_ne.dXc(I*v[:,0:1]) -self.fdt_ne.dYc(I*v[:,1:2])
elif self.dim==3:
rhs_ret = -self.fdt_ne.dXc(I* v[:,0:1]) -self.fdt_ne.dYc(I*v[:,1:2])-self.fdt_ne.dZc(I*v[:,2:3])
else:
raise ValueError('Only supported up to dimension 3')
return rhs_ret
def rhs_lagrangian_evolve_map_multiNC(self, phi, v):
"""
Evolves a set of N maps (for N images). Expected format here, is
BxCxXxYxZ, where B is the number of images/maps (batch size), C, the number of channels
per (here the spatial dimension for the map coordinate functions),
and X, Y, Z are the spatial coordinates (X only in 1D; X,Y only in 2D).
This is used to evolve the map going from source to target image. Requires interpolation
so should if at all possible not be used as part of an optimization.
the idea of compute inverse map is due to the map is defined
in the source space, referring to point move to where,(compared with the target space, refers to where it comes from)
in this situation, we only need to capture the velocity at that place and accumulate along the time step
since advecton function is moves the image (or phi based image) by v step, which means v is shared by different coordinate,
so it is safe to compute in this way.
:math:`v\circ\phi`
:param phi: map batch BxCxXxYxZ
:param v: Velocity fields (this will be one velocity field per map) BxCxXxYxZ
:return: Returns the RHS of the evolution equations involved BxCxXxYxZ
:param phi:
:param v:
:return:
"""
rhs_ret = utils.compute_warped_image_multiNC(v, phi, spacing=self.spacing, spline_order=1,zero_boundary=False)
return rhs_ret
def rhs_advect_map_multiNC(self, phi, v):
'''
Advects a set of N maps (for N images). Expected format here, is
BxCxXxYxZ, where B is the number of images/maps (batch size), C, the number of channels
per (here the spatial dimension for the map coordinate functions),
and X, Y, Z are the spatial coordinates (X only in 1D; X,Y only in 2D)
:math:`-D\\phi v`
:param phi: map batch BxCxXxYxZ
:param v: Velocity fields (this will be one velocity field per map) BxCxXxYxZ
:return: Returns the RHS of the advection equations involved BxCxXxYxZ
'''
sz = phi.size()
rhs_ret = self._rhs_advect_map_call(phi, v)
return rhs_ret
def _rhs_advect_map_call(self,phi,v):
"""
:param phi: map batch BxCxXxYxZ
:param v: Velocity fields (this will be one velocity field per map) BxCxXxYxZ
:return rhsphi: Returns the RHS of the advection equations involved BxCxXxYxZ
"""
fdc = self.fdt_le # use order boundary conditions (interpolation)
if self.dim==1:
dxc_phi = -fdc.dXc(phi)
rhsphi = v[:, 0:1] * dxc_phi
elif self.dim==2:
dxc_phi = -fdc.dXc(phi)
dyc_phi = -fdc.dYc(phi)
rhsphi = v[:, 0:1] * dxc_phi + v[:, 1:2] * dyc_phi
elif self.dim==3:
dxc_phi = -fdc.dXc(phi)
dyc_phi = -fdc.dYc(phi)
dzc_phi = -fdc.dZc(phi)
rhsphi = v[:,0:1]*dxc_phi + v[:,1:2]*dyc_phi + v[:,2:3]*dzc_phi
else:
raise ValueError('Only supported up to dimension 3')
return rhsphi
def rhs_epdiff_multiNC(self, m, v):
'''
Computes the right hand side of the EPDiff equation for of N momenta (for N images).
Expected format here, is BxCxXxYxZ, where B is the number of momenta (batch size), C,
the number of channels per (here the spatial dimension for the momenta),
and X, Y, Z are the spatial coordinates (X only in 1D; X,Y only in 2D)
a new version, where batch is no longer calculated separately
:math:`-(div(m_1v),...,div(m_dv))^T-(Dv)^Tm`
:param m: momenta batch BxCXxYxZ
:param v: Velocity fields (this will be one velocity field per momentum) BxCXxYxZ
:return: Returns the RHS of the EPDiff equations involved BxCXxYxZ
'''
sz = m.size()
rhs_ret = MyTensor(sz).zero_()
rhs_ret = self._rhs_epdiff_call(m, v, rhs_ret)
return rhs_ret
def _rhs_epdiff_call(self, m, v,rhsm):
"""
:param m: momenta batch BxCxXxYxZ
:param v: Velocity fields (this will be one velocity field per momentum) BxCxXxYxZ
:return rhsm: Returns the RHS of the EPDiff equations involved BxCxXxYxZ
"""
# if self.use_neumann_BC_for_map:
# fdc = self.fdt_ne # use zero Neumann boundary conditions
# else:
# fdc = self.fdt_le # do linear extrapolation
fdc = self.fdt_ne
#fdc = self.fdt_le
if self.dim == 1:
dxc_mv0 = -fdc.dXc(m*v[:,0:1])
dxc_v = -fdc.dXc(v)
dxc_v_multi_m = dxc_v * m
rhsm[:]= dxc_mv0 + dxc_v_multi_m
elif self.dim == 2:
# (m_1,...,m_d)^T_t = -(div(m_1v),...,div(m_dv))^T-(Dv)^Tm (EPDiff equation)
dxc_mv0 = -fdc.dXc(m*v[:,0:1])
dyc_mv1 = -fdc.dYc(m*v[:,1:2])
dc_mv_sum = dxc_mv0 + dyc_mv1
dxc_v = -fdc.dXc(v)
dyc_v = -fdc.dYc(v)
dxc_v_multi_m = dxc_v * m
dyc_v_multi_m = dyc_v * m
dxc_v_multi_m_sum = torch.sum(dxc_v_multi_m, 1)
dyc_v_multi_m_sum = torch.sum(dyc_v_multi_m, 1)
rhsm[:,0, :, :] = dc_mv_sum[:,0] + dxc_v_multi_m_sum
rhsm[:,1, :, :] = dc_mv_sum[:,1] + dyc_v_multi_m_sum
elif self.dim == 3:
dxc_mv0 = -fdc.dXc(m*v[:,0:1])
dyc_mv1 = -fdc.dYc(m*v[:,1:2])
dzc_mv2 = -fdc.dZc(m*v[:,2:3])
dc_mv_sum = dxc_mv0 + dyc_mv1 + dzc_mv2
dxc_v = -fdc.dXc(v)
dyc_v = -fdc.dYc(v)
dzc_v = -fdc.dZc(v)
dxc_v_multi_m = dxc_v*m
dyc_v_multi_m = dyc_v*m
dzc_v_multi_m = dzc_v*m
dxc_v_multi_m_sum = torch.sum(dxc_v_multi_m,1)
dyc_v_multi_m_sum = torch.sum(dyc_v_multi_m,1)
dzc_v_multi_m_sum = torch.sum(dzc_v_multi_m,1)
rhsm[:, 0] = dc_mv_sum[:,0] + dxc_v_multi_m_sum
rhsm[:, 1] = dc_mv_sum[:,1] + dyc_v_multi_m_sum
rhsm[:, 2] = dc_mv_sum[:,2] + dzc_v_multi_m_sum
else:
raise ValueError('Only supported up to dimension ')
return rhsm
def rhs_adapt_epdiff_wkw_multiNC(self, m, v,w, sm_wm,smoother):
'''
Computes the right hand side of the EPDiff equation for of N momenta (for N images).
Expected format here, is BxCxXxYxZ, where B is the number of momenta (batch size), C,
the number of channels per (here the spatial dimension for the momenta),
and X, Y, Z are the spatial coordinates (X only in 1D; X,Y only in 2D)
a new version, where batch is no longer calculated separately
:math:`-(div(m_1v),...,div(m_dv))^T-(Dv)^Tm`
:param m: momenta batch BxCXxYxZ
:param v: Velocity fields (this will be one velocity field per momentum) BxCXxYxZ
:return: Returns the RHS of the EPDiff equations involved BxCXxYxZ
'''
sz = m.size()
rhs_ret = MyTensor(sz).zero_()
rhs_ret = self._rhs_adapt_epdiff_wkw_call(m, v,w,sm_wm,smoother, rhs_ret)
return rhs_ret
def _rhs_adapt_epdiff_wkw_call(self, m, v,w,sm_wm, smoother, rhsm):
"""
:param m: momenta batch BxCxXxYxZ
:param sm_wm: smoothed(wm) batch x K x dim x X x Y x ...
:param w: smoothed(wm) batch x K x X x Y x ...
:param v: Velocity fields (this will be one velocity field per momentum) BxCxXxYxZ
:return rhsm: Returns the RHS of the EPDiff equations involved BxCxXxYxZ
"""
# if self.use_neumann_BC_for_map:
# fdc = self.fdt_ne # use zero Neumann boundary conditions
# else:
# fdc = self.fdt_le # do linear extrapolation
fdc = self.fdt_ne
rhs = self._rhs_epdiff_call(m,v,rhsm)
ret_var = torch.empty_like(rhs)
# ret_var, rhs should batch x dim x X x Yx ..
dim = m.shape[1]
sz = [m.shape[0]]+[1]+list(m.shape[1:]) # batchx1xdimx X x Y
m = m.view(*sz)
m_sm_wm = m* sm_wm
m_sm_wm = m_sm_wm.sum(dim=2)
sm_m_sm_wm = smoother.smooth(m_sm_wm) # batchx K x X xY...
dxc_w = fdc.dXc(w)
dc_w_list = [dxc_w]
if dim == 2 or dim == 3:
dyc_w = fdc.dYc(w)
dc_w_list.append(dyc_w)
if dim == 3:
dzc_w = fdc.dZc(w) # batch x K x X xY ...
dc_w_list.append(dzc_w)
for i in range(dim):
ret_var[:, i] = rhs[:, i] + (sm_m_sm_wm* dc_w_list[i]).sum(1)
return ret_var
class ForwardModel(with_metaclass(ABCMeta, object)):
"""
Abstract forward model class. Should never be instantiated.
Derived classes require the definition of f(self,t,x,u,pars) and u(self,t,pars).
These functions will be used for integration: x'(t) = f(t,x(t),u(t))
"""
def __init__(self, sz, spacing, params=None):
'''
Constructor of abstract forward model class
:param sz: size of images
:param spacing: numpy array for spacing in x,y,z directions
'''
self.dim = spacing.size # spatial dimension of the problem
"""spatial dimension"""
self.spacing = spacing
"""spatial spacing"""
self.sz = sz
"""image size (BxCxXxYxZ)"""
self.params = params
"""ParameterDict instance holding parameters"""
self.rhs = RHSLibrary(self.spacing)
"""rhs library support"""
if self.dim>3 or self.dim<1:
raise ValueError('Forward models are currently only supported in dimensions 1 to 3')
self.debug_mode_on =False
@abstractmethod
def f(self,t,x,u,pars,variables_from_optimizer=None):
"""
Function to be integrated
:param t: time
:param x: state
:param u: input
:param pars: optional parameters
:param variables_from_optimizer: variables that can be passed from the optimizer
:return: the function value, should return a list (to support easy concatenations of states)
"""
pass
def u(self,t,pars,variables_from_optimizer=None):
"""
External input
:param t: time
:param pars: parameters
:param variables_from_optimizer: variables that can be passed from the optimizer
:return: the external input
"""
return []
class AdvectMap(ForwardModel):
"""
Forward model to advect an n-D map using a transport equation: :math:`\\Phi_t + D\\Phi v = 0`.
v is treated as an external argument and \Phi is the state
"""
def __init__(self, sz, spacing, params=None,compute_inverse_map=False):
super(AdvectMap,self).__init__(sz,spacing,params)
self.compute_inverse_map = compute_inverse_map
"""If True then computes the inverse map on the fly for a map-based solution"""
def u(self,t, pars, variables_from_optimizer=None):
"""
External input, to hold the velocity field
:param t: time (ignored; not time-dependent)
:param pars: assumes an n-D velocity field is passed as the only input argument
:param variables_from_optimizer: variables that can be passed from the optimizer
:return: Simply returns this velocity field
"""
return pars['v']
def f(self,t, x, u, pars=None, variables_from_optimizer=None):
"""
Function to be integrated, i.e., right hand side of transport equation:
:math:`-D\\phi v`
:param t: time (ignored; not time-dependent)
:param x: state, here the map, \Phi, itself (assumes 3D-5D array; [nrI,0,:,:] x-coors; [nrI,1,:,:] y-coors; ...
:param u: external input, will be the velocity field here
:param pars: ignored (does not expect any additional inputs)
:param variables_from_optimizer: variables that can be passed from the optimizer
:return: right hand side [phi]
"""
if self.compute_inverse_map:
return [self.rhs.rhs_advect_map_multiNC(x[0], u),self.rhs.rhs_lagrangian_evolve_map_multiNC(x[1], u)]
else:
return [self.rhs.rhs_advect_map_multiNC(x[0],u)]
class AdvectImage(ForwardModel):
"""
Forward model to advect an image using a transport equation: :math:`I_t + \\nabla I^Tv = 0`.
v is treated as an external argument and I is the state
"""
def __init__(self, sz, spacing, params=None):
super(AdvectImage, self).__init__(sz, spacing,params)
def u(self,t, pars, variables_from_optimizer=None):
"""
External input, to hold the velocity field
:param t: time (ignored; not time-dependent)
:param pars: assumes an n-D velocity field is passed as the only input argument
:param variables_from_optimizer: variables that can be passed from the optimizer
:return: Simply returns this velocity field
"""
return pars['v']
def f(self,t, x, u, pars=None, variables_from_optimizer=None):
"""
Function to be integrated, i.e., right hand side of transport equation: :math:`-\\nabla I^T v`
:param t: time (ignored; not time-dependent)
:param x: state, here the image, I, itself (supports multiple images and channels)
:param u: external input, will be the velocity field here
:param pars: ignored (does not expect any additional inputs)
:param variables_from_optimizer: variables that can be passed from the optimizer
:return: right hand side [I]
"""
return [self.rhs.rhs_advect_image_multiNC(x[0],u)]
class EPDiffImage(ForwardModel):
"""
Forward model for the EPdiff equation. State is the momentum, m, and the image I:
:math:`(m_1,...,m_d)^T_t = -(div(m_1v),...,div(m_dv))^T-(Dv)^Tm`
:math:`v=Km`
:math:`I_t+\\nabla I^Tv=0`
"""
def __init__(self, sz, spacing, smoother, params=None):
super(EPDiffImage, self).__init__(sz, spacing,params)
self.smoother = smoother
def f(self,t, x, u, pars=None, variables_from_optimizer=None):
"""
Function to be integrated, i.e., right hand side of the EPDiff equation:
:math:`-(div(m_1v),...,div(m_dv))^T-(Dv)^Tm`
:math:`-\\nabla I^Tv`
:param t: time (ignored; not time-dependent)
:param x: state, here the vector momentum, m, and the image, I
:param u: ignored, no external input
:param pars: ignored (does not expect any additional inputs)
:param variables_from_optimizer: variables that can be passed from the optimizer
:return: right hand side [m,I]
"""
# assume x[0] is m and x[1] is I for the state
m = x[0]
I = x[1]
v = self.smoother.smooth(m,None,utils.combine_dict(pars,{'I': I}),variables_from_optimizer)
# print('max(|v|) = ' + str( v.abs().max() ))
return [self.rhs.rhs_epdiff_multiNC(m,v), self.rhs.rhs_advect_image_multiNC(I,v)]
class EPDiffMap(ForwardModel):
"""
Forward model for the EPDiff equation. State is the momentum, m, and the transform, :math:`\\phi`
(mapping the source image to the target image).
:math:`(m_1,...,m_d)^T_t = -(div(m_1v),...,div(m_dv))^T-(Dv)^Tm`
:math:`v=Km`
:math:`\\phi_t+D\\phi v=0`
"""
def __init__(self, sz, spacing, smoother, params=None,compute_inverse_map=False):
super(EPDiffMap, self).__init__(sz,spacing,params)
self.compute_inverse_map = compute_inverse_map
"""If True then computes the inverse map on the fly for a map-based solution"""
self.smoother = smoother
self.use_net = True if self.params['smoother']['type'] == 'adaptiveNet' else False
def debugging(self,input,t):
x = utils.checkNan(input)
if np.sum(x):
print("find nan at {} step".format(t))
print("flag m: {}, ".format(x[0]))
print("flag v: {},".format(x[1]))
print("flag phi: {},".format(x[2]))
print("flag new_m: {},".format(x[3]))
print("flag new_phi: {},".format(x[4]))
raise ValueError("nan error")
def f(self,t, x, u, pars=None, variables_from_optimizer=None):
"""
Function to be integrated, i.e., right hand side of the EPDiff equation:
:math:`-(div(m_1v),...,div(m_dv))^T-(Dv)^Tm'
:math:`-D\\phi v`
:param t: time (ignored; not time-dependent)
:param x: state, here the image, vector momentum, m, and the map, :math:`\\phi`
:param u: ignored, no external input
:param pars: ignored (does not expect any additional inputs)
:param variables_from_optimizer: variables that can be passed from the optimizer
:return: right hand side [m,phi]
"""
# assume x[0] is m and x[1] is phi for the state
m = x[0]
m = m.clamp(max=1., min=-1.)
phi = x[1]
if self.compute_inverse_map:
phi_inv = x[2]
if not self.use_net:
v = self.smoother.smooth(m,None,utils.combine_dict(pars,{'phi':phi}),variables_from_optimizer)
else:
v = self.smoother.adaptive_smooth(m, phi, using_map=True)
# print('max(|v|) = ' + str( v.abs().max() ))
if self.compute_inverse_map:
ret_val= [self.rhs.rhs_epdiff_multiNC(m,v),
self.rhs.rhs_advect_map_multiNC(phi,v),
self.rhs.rhs_lagrangian_evolve_map_multiNC(phi_inv,v)]
else:
new_m = self.rhs.rhs_epdiff_multiNC(m,v)
new_phi = self.rhs.rhs_advect_map_multiNC(phi,v)
ret_val= [new_m, new_phi]
return ret_val
class EPDiffAdaptMap(ForwardModel):
"""
Forward model for the EPDiff equation. State is the momentum, m, and the transform, :math:`\\phi`
(mapping the source image to the target image).
:math:`(m_1,...,m_d)^T_t = -(div(m_1v),...,div(m_dv))^T-(Dv)^Tm`
:math:`v=Km`
:math:`\\phi_t+D\\phi v=0`
"""
def __init__(self, sz, spacing, smoother, params=None, compute_inverse_map=False, update_sm_by_advect= True, update_sm_with_interpolation=True,compute_on_initial_map=True):
super(EPDiffAdaptMap, self).__init__(sz, spacing, params)
from . import module_parameters as pars
from . import smoother_factory as sf
self.compute_inverse_map = compute_inverse_map
"""If True then computes the inverse map on the fly for a map-based solution"""
self.smoother = smoother
self.update_sm_by_advect = update_sm_by_advect
self.use_the_first_step_penalty = True
self.update_sm_with_interpolation = update_sm_with_interpolation
self.compute_on_initial_map=compute_on_initial_map
self.update_sm_weight=None
self.velocity_mask = None
self.debug_mode_on = False
s_m_params = pars.ParameterDict()
s_m_params['smoother']['type'] = 'gaussian'
s_m_params['smoother']['gaussian_std'] =self.params['smoother']['deep_smoother']['deep_network_local_weight_smoothing']
self.embedded_smoother = sf.SmootherFactory(sz[2:], spacing).create_smoother(
s_m_params)
""" if only take the first step penalty as the total penalty, otherwise accumluate the penalty"""
def debug_nan(self, input, t,name=''):
x = utils.checkNan([input])
if np.sum(x):
# print(input[0])
print("find nan at {} step, {} with number {}".format(t,name,x[0]))
raise ValueError("nan error")
def init_zero_sm_weight(self,sm_weight):
self.update_sm_weight = torch.zeros_like(sm_weight).detach()
def init_velocity_mask(self,velocity_mask):
self.velocity_mask = velocity_mask
def debug_distrib(self,var,name):
var = var.detach().cpu().numpy()
density,_= np.histogram(var,[-100,-10,-1,0,1,10,100],density=True)
print("{} distri:{}".format(name,density))
def f(self, t, x, u, pars=None, variables_from_optimizer=None):
"""
Function to be integrated, i.e., right hand side of the EPDiff equation:
:math:`-(div(m_1v),...,div(m_dv))^T-(Dv)^Tm'
:math:`-D\\phi v`
:param t: time (ignored; not time-dependent)
:param x: state, here the image, vector momentum, m, and the map, :math:`\\phi`
:param u: ignored, no external input
:param pars: ignored (does not expect any additional inputs)
:param variables_from_optimizer: variables that can be passed from the optimizer
:return: right hand side [m,phi]
"""
# assume x[0] is m and x[1] is phi for the state
m = x[0]
m=m.clamp(max=1., min=-1.)
phi = x[1]
return_val_name = []
sm_weight = None
if self.update_sm_by_advect:
if not self.update_sm_with_interpolation:
sm_weight_pre = x[2]
sm_weight = self.embedded_smoother.smooth(sm_weight_pre)
v, extra_ret = self.smoother.smooth(m, None, {'w':sm_weight},multi_output=True)
if self.velocity_mask is not None:
v = v* self.velocity_mask
new_phi = self.rhs.rhs_advect_map_multiNC(phi, v)
new_sm_weight_pre = self.rhs.rhs_advect_map_multiNC(sm_weight_pre, v)
new_m = self.rhs.rhs_adapt_epdiff_wkw_multiNC(m, v, new_sm_weight_pre, extra_ret,
self.embedded_smoother)
ret_val = [new_m, new_phi,new_sm_weight_pre]
return_val_name =['new_m','new_phi','new_sm_weight']
else:
if self.compute_on_initial_map:
sm_weight = x[2]
sm_phi = x[3]
new_sm_weight = utils.compute_warped_image_multiNC(sm_weight, sm_phi, self.spacing, 1,
zero_boundary=False)
pre_weight = sm_weight
new_sm_weight = self.embedded_smoother.smooth(new_sm_weight)
#print('t{},m min, mean,max {} {} {}'.format(t,m.min().item(),m.mean().item(),m.max().item()))
v,extra_ret = self.smoother.smooth(m,None,{'w': new_sm_weight},multi_output=True)
if self.velocity_mask is not None:
v = v * self.velocity_mask
new_m = self.rhs.rhs_adapt_epdiff_wkw_multiNC(m,v,pre_weight,extra_ret,self.embedded_smoother)
new_phi = self.rhs.rhs_advect_map_multiNC(phi, v)
new_sm_phi = self.rhs.rhs_advect_map_multiNC(sm_phi, v)
new_sm_weight = self.update_sm_weight.detach()
ret_val = [new_m, new_phi,new_sm_weight,new_sm_phi]
return_val_name = ['new_m', 'new_phi', 'new_sm_weight','new_sm_phi']
else: #todo just attention here is what we currently used
sm_weight = x[2]
new_sm_weight = utils.compute_warped_image_multiNC(sm_weight, phi, self.spacing, 1,
zero_boundary=False)
pre_weight = sm_weight
new_sm_weight = self.embedded_smoother.smooth(new_sm_weight)
v, extra_ret = self.smoother.smooth(m, None,{'w':new_sm_weight}, multi_output=True)
if self.velocity_mask is not None:
v = v * self.velocity_mask
new_m = self.rhs.rhs_adapt_epdiff_wkw_multiNC(m,v,pre_weight,extra_ret,self.embedded_smoother)
new_phi = self.rhs.rhs_advect_map_multiNC(phi, v)
new_sm_weight = self.update_sm_weight.detach()
ret_val = [new_m, new_phi, new_sm_weight]
return_val_name = ['new_m', 'new_phi', 'new_sm_weight']
else:
if not t==0:
if self.use_the_first_step_penalty:
self.smoother.disable_penalty_computation()
else:
self.smoother.enable_accumulated_penalty()
I = utils.compute_warped_image_multiNC(pars['I0'], phi, self.spacing, 1,zero_boundary=True)
pars['I'] = I.detach() # TODO check whether I should be detached here
v = self.smoother.smooth(m, None, pars, variables_from_optimizer)
if self.velocity_mask is not None:
v = v * self.velocity_mask
new_m = self.rhs.rhs_epdiff_multiNC(m, v)
new_phi = self.rhs.rhs_advect_map_multiNC(phi, v)
ret_val = [new_m, new_phi]
return_val_name =['new_m','new_phi']
if self.debug_mode_on:
toshows = [m, v,phi]+ret_val if sm_weight is None else [m, v,phi]+ret_val +[sm_weight]
name = ['m', 'v','phi']+return_val_name if sm_weight is None else ['m', 'v','phi']+return_val_name +['sm_weight']
for i, toshow in enumerate(toshows):
print('t{},{} min, mean,max {} {} {}'.format(t, name[i], toshow.min().item(), toshow.mean().item(),
toshow.max().item()))
self.debug_distrib(toshow, name[i])
self.debug_nan(toshow,t,name[i])
return ret_val
# print('max(|v|) = ' + str( v.abs().max() ))
class EPDiffScalarMomentum(ForwardModel):
"""
Base class for scalar momentum EPDiff solutions. Defines a smoother that can be commonly used.
"""
def __init__(self, sz, spacing, smoother, params):
super(EPDiffScalarMomentum,self).__init__(sz,spacing,params)
self.smoother = smoother
class EPDiffScalarMomentumImage(EPDiffScalarMomentum):
"""
Forward model for the scalar momentum EPdiff equation. State is the scalar momentum, lam, and the image I
:math:`(m_1,...,m_d)^T_t = -(div(m_1v),...,div(m_dv))^T-(Dv)^Tm`
:math:`v=Km`
:math:'m=\\lambda\\nabla I`
:math:`I_t+\\nabla I^Tv=0`
:math:`\\lambda_t + div(\\lambda v)=0`
"""
def __init__(self, sz, spacing, smoother, params=None):
super(EPDiffScalarMomentumImage, self).__init__(sz, spacing, smoother, params)
def f(self, t, x, u, pars=None, variables_from_optimizer=None):
"""
Function to be integrated, i.e., right hand side of the EPDiff equation:
:math:`-(div(m_1v),...,div(m_dv))^T-(Dv)^Tm`
:math:`-\\nabla I^Tv`
:math: `-div(\\lambda v)`
:param t: time (ignored; not time-dependent)
:param x: state, here the scalar momentum, lam, and the image, I, itself
:param u: no external input
:param pars: ignored (does not expect any additional inputs)
:param variables_from_optimizer: variables that can be passed from the optimizer
:return: right hand side [lam,I]
"""
# assume x[0] is \lambda and x[1] is I for the state
lam = x[0]
I = x[1]
# now compute the momentum
m = utils.compute_vector_momentum_from_scalar_momentum_multiNC(lam, I, self.sz, self.spacing)
v = self.smoother.smooth(m,None,utils.combine_dict(pars,{'I':I}),variables_from_optimizer)
# advection for I, scalar-conservation law for lam
return [self.rhs.rhs_scalar_conservation_multiNC(lam, v), self.rhs.rhs_advect_image_multiNC(I, v)]
class EPDiffScalarMomentumMap(EPDiffScalarMomentum):
"""
Forward model for the scalar momentum EPDiff equation. State is the scalar momentum, lam, the image, I, and the transform, phi.
:math:`(m_1,...,m_d)^T_t = -(div(m_1v),...,div(m_dv))^T-(Dv)^Tm`
:math:`v=Km`
:math:`m=\\lambda\\nabla I`
:math:`I_t+\\nabla I^Tv=0`
:math:`\\lambda_t + div(\\lambda v)=0`
:math:`\\Phi_t+D\\Phi v=0`
"""
def __init__(self, sz, spacing, smoother, params=None, compute_inverse_map=False):
super(EPDiffScalarMomentumMap, self).__init__(sz,spacing, smoother, params)
self.compute_inverse_map = compute_inverse_map
"""If True then computes the inverse map on the fly for a map-based solution"""
def f(self,t, x, u, pars=None, variables_from_optimizer=None):
"""
Function to be integrated, i.e., right hand side of the EPDiff equation:
:math:`-(div(m_1v),...,div(m_dv))^T-(Dv)^Tm`
:math:`-\\nabla I^Tv`
:math:`-div(\\lambda v)`
:math:`-D\\Phi v`
:param t: time (ignored; not time-dependent)
:param x: state, here the scalar momentum, lam, the image, I, and the transform, :math:`\\phi`
:param u: ignored, no external input
:param pars: ignored (does not expect any additional inputs)
:param variables_from_optimizer: variables that can be passed from the optimizer
:return: right hand side [lam,I,phi]
"""
# assume x[0] is lam and x[1] is I and x[2] is phi for the state
lam = x[0]
I = x[1]
phi = x[2]
if self.compute_inverse_map:
phi_inv = x[3]
# now compute the momentum
m = utils.compute_vector_momentum_from_scalar_momentum_multiNC(lam, I, self.sz, self.spacing)
# todo: replace this by phi again
#v = self.smoother.smooth(m,None,[phi,True],variables_from_optimizer)
v = self.smoother.smooth(m,None,utils.combine_dict(pars,{'I':I}),variables_from_optimizer)
if self.compute_inverse_map:
ret_val = [self.rhs.rhs_scalar_conservation_multiNC(lam,v),
self.rhs.rhs_advect_image_multiNC(I,v),
self.rhs.rhs_advect_map_multiNC(phi,v),
self.rhs.rhs_lagrangian_evolve_map_multiNC(phi_inv,v)]
else:
ret_val = [self.rhs.rhs_scalar_conservation_multiNC(lam,v),
self.rhs.rhs_advect_image_multiNC(I,v),
self.rhs.rhs_advect_map_multiNC(phi,v)]
return ret_val
|
from collections.abc import Mapping
from datetime import datetime
import logging
from typing import Optional
from stackdriver_log_formatter.serializer import DefaultFunc, dumps
class StackdriverLogFormatter(logging.Formatter):
"""Log formatter suitable for Stackdriver Logging.
This formatter print log as a single-line json with appropriate fields.
For detailed information about each fields, refer to Stackdriver's API document [1]_
and fluent-plugin-google-cloud source [2]_.
References
----------
.. [1]: https://cloud.google.com/logging/docs/reference/v2/rest/v2/LogEntry
.. [2]: https://github.com/GoogleCloudPlatform/fluent-plugin-google-cloud
Example
-------
>>> # setup
>>> logging.basicConfig(level=logging.INFO, stream=sys.stdout)
>>> logging.root.handlers[0].setFormatter(StackdriverLogFormatter())
>>> # logging
>>> logger = logging.getLogger(__name__)
>>> logger.info('Hello world')
>>> # With custom fields (shown in 'jsonPayload' in Stackdriver)
>>> logger.info('bla bla bla', {'customFiled': 123})
>>> logger.info('bla bla bla: %(customeField)s', {'customFiled': 123})
>>> # With exception
>>> try:
... 1 / 0
... except Exception:
... logger.exception('Oops, an error occured!')
"""
DATE_FORMAT = '%Y-%m-%dT%H:%M:%S.%fZ'
def __init__(self, *, default: DefaultFunc=None):
"""Initialize formatter.
Keyword Arguments
-----------------
default: function or None, optional
A function called to serialize non-standard objects.
It should return a json serializable version of the object or raise a TypeError.
"""
self.default = default
def formatTime(self, record: logging.LogRecord, datefmt: Optional[str]=None) -> str:
"""Return the creation time of the specified LogRecord as formatted text.
The format is always ISO8601 in UTC ('Z'-suffixed), so `datefmt` argument is ignored.
We use `datetime.datetime` rather than `time.time` to print subseconds.
"""
return datetime.utcfromtimestamp(record.created).strftime(self.DATE_FORMAT)
def usesTime(self) -> bool:
"""Check if the format uses the creation time of the record.
This is always true.
"""
return True
def format(self, record: logging.LogRecord) -> str:
"""Format the specified record as text.
This will be a single-line json with appropriate fields.
"""
record.message = record.getMessage()
record.asctime = self.formatTime(record)
if record.exc_info and not record.exc_text:
record.exc_text = self.formatException(record.exc_info)
log_obj = {
'severity': record.levelname,
'time': record.asctime,
'message': record.message,
'logger': record.name,
'module': record.module,
'logging.googleapis.com/sourceLocation': {
'file': record.pathname,
'line': record.lineno,
'function': record.funcName,
},
'process': {
'name': record.processName,
'id': record.process,
},
'thread': {
'name': record.threadName,
'id': record.thread,
},
}
if record.exc_info:
log_obj['exceptionType'] = type(record.exc_info[1]).__name__
if record.exc_text:
log_obj['stackTrace'] = record.exc_text
if record.stack_info:
log_obj['stackInfo'] = self.formatStack(record.stack_info)
if isinstance(record.args, Mapping):
for k, v in record.args.items():
if k in log_obj or k in ('exceptionType', 'stackTrace', 'stackInfo'):
continue
log_obj.setdefault(k, v)
return dumps(log_obj, default=self.default)
|
# Copyright 2018 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""This script is used to synthesize generated parts of this library."""
import logging
from pathlib import Path
import subprocess
import synthtool as s
from synthtool.languages import php
from synthtool import _tracked_paths
logging.basicConfig(level=logging.DEBUG)
src = Path(f"../{php.STAGING_DIR}/Redis").resolve()
dest = Path().resolve()
# Added so that we can pass copy_excludes in the owlbot_main() call
_tracked_paths.add(src)
php.owlbot_main(
src=src,
dest=dest,
copy_excludes=[
src / "*/src/V1/CloudRedisClient.php",
src / "*/src/V1beta1/CloudRedisClient.php"
]
)
# document and utilize apiEndpoint instead of serviceAddress
s.replace(
"**/Gapic/*GapicClient.php",
r"'serviceAddress' =>",
r"'apiEndpoint' =>")
s.replace(
"**/Gapic/*GapicClient.php",
r"@type string \$serviceAddress\n\s+\*\s+The address",
r"""@type string $serviceAddress
* **Deprecated**. This option will be removed in a future major release. Please
* utilize the `$apiEndpoint` option instead.
* @type string $apiEndpoint
* The address""")
s.replace(
"**/Gapic/*GapicClient.php",
r"\$transportConfig, and any \$serviceAddress",
r"$transportConfig, and any `$apiEndpoint`")
# V1 is GA, so remove @experimental tags
s.replace(
'src/V1/**/*Client.php',
r'^(\s+\*\n)?\s+\*\s@experimental\n',
'')
# Change the wording for the deprecation warning.
s.replace(
'src/*/*_*.php',
r'will be removed in the next major release',
'will be removed in a future release')
# Fix class references in gapic samples
for version in ['V1', 'V1beta1']:
pathExpr = 'src/' + version + '/Gapic/CloudRedisGapicClient.php'
types = {
'new CloudRedisClient': r'new Google\\Cloud\\Redis\\'+ version + r'\\CloudRedisClient',
'new Instance': r'new Google\\Cloud\\Redis\\' + version + r'\\Instance',
'= Tier::': r'= Google\\Cloud\\Redis\\' + version + r'\\Instance\\Tier::',
'new FieldMask': r'new Google\\Protobuf\\FieldMask',
'new InputConfig': r'new Google\\Cloud\\Redis\\' + version + r'\\InputConfig',
'new OutputConfig': r'new Google\\Cloud\\Redis\\' + version + r'\\OutputConfig',
'= DataProtectionMode': r'= Google\\Cloud\\Redis\\' + version + r'\\FailoverInstanceRequest\\DataProtectionMode::'
}
for search, replace in types.items():
s.replace(
pathExpr,
search,
replace
)
### [START] protoc backwards compatibility fixes
# roll back to private properties.
s.replace(
"src/**/V*/**/*.php",
r"Generated from protobuf field ([^\n]{0,})\n\s{5}\*/\n\s{4}protected \$",
r"""Generated from protobuf field \1
*/
private $""")
# prevent proto messages from being marked final
s.replace(
"src/**/V*/**/*.php",
r"final class",
r"class")
# Replace "Unwrapped" with "Value" for method names.
s.replace(
"src/**/V*/**/*.php",
r"public function ([s|g]\w{3,})Unwrapped",
r"public function \1Value"
)
### [END] protoc backwards compatibility fixes
# fix relative cloud.google.com links
s.replace(
"src/**/V*/**/*.php",
r"(.{0,})\]\((/.{0,})\)",
r"\1](https://cloud.google.com\2)"
)
|
#import libraries of python opencv
import cv2
# capture video/ video path
cap = cv2.VideoCapture('cars.mp4')
#use trained cars XML classifiers
car_cascade = cv2.CascadeClassifier('haarcascade_cars.xml')
#read until video is completed
while True:
#capture frame by frame
ret, frame = cap.read()
#convert video into gray scale of each frames
gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
#detect cars in the video
cars = car_cascade.detectMultiScale(gray, 1.1, 3)
#cv2.im_write(cars)
#to draw a rectangle in each cars
for (x,y,w,h) in cars:
cv2.rectangle(frame,(x,y),(x+w,y+h),(0,255,0),2)
cv2.imshow('video', frame)
crop_img = frame[y:y+h,x:x+w]
#press Q on keyboard to exit
if cv2.waitKey(25) & 0xFF == ord('q'):
break
#release the video-capture object
cap.release()
#close all the frames
cv2.destroyAllWindows()
|
"""
A WebSub Hub is an implementation that handles subscription requests and distributes
the content to subscribers when the corresponding topic URL has been updated. Hubs
MUST support subscription requests with a secret and deliver
[authenticated requests](https://www.w3.org/TR/websub/#authenticated-content-distribution)
when requested. Hubs MUST deliver the full contents of the topic URL in the request, and
MAY reduce the payload to a diff if the content type supports it. The conformance
criteria are described in Conformance Classes above.
"""
class Hub:
...
|
#!/usr/bin/env python
import os
import os.path
import re
import sys
import time
import json
import random
from subprocess import check_call, call, check_output, Popen, PIPE, CalledProcessError
netmask = ('127.0.0.1', '127.0.0.0')
rpc_param = {
'target_ip': '127.0.0.1',
'port': 3260,
'initiator_name': 'ANY',
'netmask': netmask,
'lun_total': 3,
'malloc_bdev_size': 64,
'malloc_block_size': 512,
'queue_depth': 64,
'target_name': 'Target3',
'alias_name': 'Target3_alias',
'chap_disable': 1,
'chap_mutal': 0,
'chap_required': 0,
'chap_auth_group': 0,
'trace_flag': 'rpc'
}
class RpcException(Exception):
def __init__(self, retval, *args):
super(RpcException, self).__init__(*args)
self.retval = retval
class spdk_rpc(object):
def __init__(self, rpc_py):
self.rpc_py = rpc_py
def __getattr__(self, name):
def call(*args):
cmd = "python {} {}".format(self.rpc_py, name)
for arg in args:
cmd += " {}".format(arg)
return check_output(cmd, shell=True)
return call
def verify(expr, retcode, msg):
if not expr:
raise RpcException(retcode, msg)
def verify_trace_flag_rpc_methods(rpc_py, rpc_param):
rpc = spdk_rpc(rpc_py)
output = rpc.get_trace_flags()
jsonvalue = json.loads(output)
verify(not jsonvalue[rpc_param['trace_flag']], 1,
"get_trace_flags returned {}, expected false".format(jsonvalue))
rpc.set_trace_flag(rpc_param['trace_flag'])
output = rpc.get_trace_flags()
jsonvalue = json.loads(output)
verify(jsonvalue[rpc_param['trace_flag']], 1,
"get_trace_flags returned {}, expected true".format(jsonvalue))
rpc.clear_trace_flag(rpc_param['trace_flag'])
output = rpc.get_trace_flags()
jsonvalue = json.loads(output)
verify(not jsonvalue[rpc_param['trace_flag']], 1,
"get_trace_flags returned {}, expected false".format(jsonvalue))
print "verify_trace_flag_rpc_methods passed"
def verify_iscsi_connection_rpc_methods(rpc_py):
rpc = spdk_rpc(rpc_py)
output = rpc.get_iscsi_connections()
jsonvalue = json.loads(output)
verify(not jsonvalue, 1,
"get_iscsi_connections returned {}, expected empty".format(jsonvalue))
portal_tag = '1'
initiator_tag = '1'
rpc.construct_malloc_bdev(rpc_param['malloc_bdev_size'], rpc_param['malloc_block_size'])
rpc.add_portal_group(portal_tag, "{}:{}".format(rpc_param['target_ip'], str(rpc_param['port'])))
rpc.add_initiator_group(initiator_tag, rpc_param['initiator_name'], rpc_param['netmask'][0])
lun_mapping = "Malloc" + str(rpc_param['lun_total']) + ":0"
net_mapping = portal_tag + ":" + initiator_tag
rpc.construct_target_node(rpc_param['target_name'], rpc_param['alias_name'], lun_mapping, net_mapping, rpc_param['queue_depth'],
rpc_param['chap_disable'], rpc_param['chap_mutal'], rpc_param['chap_required'], rpc_param['chap_auth_group'])
check_output('iscsiadm -m discovery -t st -p {}'.format(rpc_param['target_ip']), shell=True)
check_output('iscsiadm -m node --login', shell=True)
name = json.loads(rpc.get_target_nodes())[0]['name']
output = rpc.get_iscsi_connections()
jsonvalues = json.loads(output)
verify(jsonvalues[0]['target_node_name'] == rpc_param['target_name'], 1,
"target node name vaule is {}, expected {}".format(jsonvalues[0]['target_node_name'], rpc_param['target_name']))
verify(jsonvalues[0]['id'] == 0, 1,
"device id value is {}, expected 0".format(jsonvalues[0]['id']))
verify(jsonvalues[0]['initiator_addr'] == rpc_param['target_ip'], 1,
"initiator address values is {}, expected {}".format(jsonvalues[0]['initiator_addr'], rpc_param['target_ip']))
verify(jsonvalues[0]['target_addr'] == rpc_param['target_ip'], 1,
"target address values is {}, expected {}".format(jsonvalues[0]['target_addr'], rpc_param['target_ip']))
check_output('iscsiadm -m node --logout', shell=True)
check_output('iscsiadm -m node -o delete', shell=True)
rpc.delete_initiator_group(initiator_tag)
rpc.delete_portal_group(portal_tag)
rpc.delete_target_node(name)
output = rpc.get_iscsi_connections()
jsonvalues = json.loads(output)
verify(not jsonvalues, 1,
"get_iscsi_connections returned {}, expected empty".format(jsonvalues))
print "verify_iscsi_connection_rpc_methods passed"
def verify_scsi_devices_rpc_methods(rpc_py):
rpc = spdk_rpc(rpc_py)
output = rpc.get_scsi_devices()
jsonvalue = json.loads(output)
verify(not jsonvalue, 1,
"get_scsi_devices returned {}, expected empty".format(jsonvalue))
portal_tag = '1'
initiator_tag = '1'
rpc.construct_malloc_bdev(rpc_param['malloc_bdev_size'], rpc_param['malloc_block_size'])
rpc.add_portal_group(portal_tag, "{}:{}".format(rpc_param['target_ip'], str(rpc_param['port'])))
rpc.add_initiator_group(initiator_tag, rpc_param['initiator_name'], rpc_param['netmask'][0])
lun_mapping = "Malloc" + str(rpc_param['lun_total']) + ":0"
net_mapping = portal_tag + ":" + initiator_tag
rpc.construct_target_node(rpc_param['target_name'], rpc_param['alias_name'], lun_mapping, net_mapping, rpc_param['queue_depth'],
rpc_param['chap_disable'], rpc_param['chap_mutal'], rpc_param['chap_required'], rpc_param['chap_auth_group'])
check_output('iscsiadm -m discovery -t st -p {}'.format(rpc_param['target_ip']), shell=True)
check_output('iscsiadm -m node --login', shell=True)
name = json.loads(rpc.get_target_nodes())[0]['name']
output = rpc.get_scsi_devices()
jsonvalues = json.loads(output)
verify(jsonvalues[0]['device_name'] == rpc_param['target_name'], 1,
"device name vaule is {}, expected {}".format(jsonvalues[0]['device_name'], rpc_param['target_name']))
verify(jsonvalues[0]['id'] == 0, 1,
"device id value is {}, expected 0".format(jsonvalues[0]['id']))
check_output('iscsiadm -m node --logout', shell=True)
check_output('iscsiadm -m node -o delete', shell=True)
rpc.delete_initiator_group(initiator_tag)
rpc.delete_portal_group(portal_tag)
rpc.delete_target_node(name)
output = rpc.get_scsi_devices()
jsonvalues = json.loads(output)
verify(not jsonvalues, 1,
"get_scsi_devices returned {}, expected empty".format(jsonvalues))
print "verify_scsi_devices_rpc_methods passed"
def verify_luns_rpc_methods(rpc_py, rpc_param):
rpc = spdk_rpc(rpc_py)
output = rpc.get_luns()
jsonvalue = json.loads(output)
verify(not jsonvalue, 1,
"get_luns returned {}, expected empty".format(jsonvalue))
for i in range(1, rpc_param['lun_total'] + 1):
rpc.construct_malloc_bdev(rpc_param['malloc_bdev_size'], rpc_param['malloc_block_size'])
output = rpc.get_luns()
jsonvalue = json.loads(output)
verify(not jsonvalue, 1,
"get_luns returned {}, expected empty".format(jsonvalue))
print "verify_luns_rpc_methods passed"
def verify_portal_groups_rpc_methods(rpc_py, rpc_param):
rpc = spdk_rpc(rpc_py)
output = rpc.get_portal_groups()
jsonvalues = json.loads(output)
verify(not jsonvalues, 1,
"get_portal_groups returned {} groups, expected empty".format(jsonvalues))
lo_ip = ('127.0.0.1', '127.0.0.6')
nics = json.loads(rpc.get_interfaces())
for x in nics:
if x["ifc_index"] == 'lo':
rpc.add_ip_address(x["ifc_index"], lo_ip[1])
for idx, value in enumerate(lo_ip):
# The portal group tag must start at 1
tag = idx + 1
rpc.add_portal_group(tag, "{}:{}".format(value, rpc_param['port']))
output = rpc.get_portal_groups()
jsonvalues = json.loads(output)
verify(len(jsonvalues) == tag, 1,
"get_portal_groups returned {} groups, expected {}".format(len(jsonvalues), tag))
tag_list = []
for idx, value in enumerate(jsonvalues):
verify(value['portals'][0]['host'] == lo_ip[idx], 1,
"host value is {}, expected {}".format(value['portals'][0]['host'], rpc_param['target_ip']))
verify(value['portals'][0]['port'] == str(rpc_param['port']), 1,
"port value is {}, expected {}".format(value['portals'][0]['port'], str(rpc_param['port'])))
tag_list.append(value['tag'])
verify(value['tag'] == idx + 1, 1,
"tag value is {}, expected {}".format(value['tag'], idx + 1))
for idx, value in enumerate(tag_list):
rpc.delete_portal_group(value)
output = rpc.get_portal_groups()
jsonvalues = json.loads(output)
verify(len(jsonvalues) == (len(tag_list) - (idx + 1)), 1,
"get_portal_group returned {} groups, expected {}".format(len(jsonvalues), (len(tag_list) - (idx + 1))))
if not jsonvalues:
break
for jidx, jvalue in enumerate(jsonvalues):
verify(jvalue['portals'][0]['host'] == lo_ip[idx + jidx + 1], 1,
"host value is {}, expected {}".format(jvalue['portals'][0]['host'], lo_ip[idx + jidx + 1]))
verify(jvalue['portals'][0]['port'] == str(rpc_param['port']), 1,
"port value is {}, expected {}".format(jvalue['portals'][0]['port'], str(rpc_param['port'])))
verify(jvalue['tag'] != value or jvalue['tag'] == tag_list[idx + jidx + 1], 1,
"tag value is {}, expected {} and not {}".format(jvalue['tag'], tag_list[idx + jidx + 1], value))
for x in nics:
if x["ifc_index"] == 'lo':
rpc.delete_ip_address(x["ifc_index"], lo_ip[1])
print "verify_portal_groups_rpc_methods passed"
def verify_initiator_groups_rpc_methods(rpc_py, rpc_param):
rpc = spdk_rpc(rpc_py)
output = rpc.get_initiator_groups()
jsonvalues = json.loads(output)
verify(not jsonvalues, 1,
"get_initiator_groups returned {}, expected empty".format(jsonvalues))
for idx, value in enumerate(rpc_param['netmask']):
# The initiator group tag must start at 1
tag = idx + 1
rpc.add_initiator_group(tag, rpc_param['initiator_name'], value)
output = rpc.get_initiator_groups()
jsonvalues = json.loads(output)
verify(len(jsonvalues) == tag, 1,
"get_initiator_groups returned {} groups, expected {}".format(len(jsonvalues), tag))
tag_list = []
for idx, value in enumerate(jsonvalues):
verify(value['initiators'][0] == rpc_param['initiator_name'], 1,
"initiator value is {}, expected {}".format(value['initiators'][0], rpc_param['initiator_name']))
tag_list.append(value['tag'])
verify(value['tag'] == idx + 1, 1,
"tag value is {}, expected {}".format(value['tag'], idx + 1))
verify(value['netmasks'][0] == rpc_param['netmask'][idx], 1,
"netmasks value is {}, expected {}".format(value['netmasks'][0], rpc_param['netmask'][idx]))
for idx, value in enumerate(tag_list):
rpc.delete_initiator_group(value)
output = rpc.get_initiator_groups()
jsonvalues = json.loads(output)
verify(len(jsonvalues) == (len(tag_list) - (idx + 1)), 1,
"get_initiator_groups returned {} groups, expected {}".format(len(jsonvalues), (len(tag_list) - (idx + 1))))
if not jsonvalues:
break
for jidx, jvalue in enumerate(jsonvalues):
verify(jvalue['initiators'][0] == rpc_param['initiator_name'], 1,
"initiator value is {}, expected {}".format(jvalue['initiators'][0], rpc_param['initiator_name']))
verify(jvalue['tag'] != value or jvalue['tag'] == tag_list[idx + jidx + 1], 1,
"tag value is {}, expected {} and not {}".format(jvalue['tag'], tag_list[idx + jidx + 1], value))
verify(jvalue['netmasks'][0] == rpc_param['netmask'][idx + jidx + 1], 1,
"netmasks value is {}, expected {}".format(jvalue['netmasks'][0], rpc_param['netmask'][idx + jidx + 1]))
print "verify_initiator_groups_rpc_method passed."
def verify_target_nodes_rpc_methods(rpc_py, rpc_param):
rpc = spdk_rpc(rpc_py)
portal_tag = '1'
initiator_tag = '1'
output = rpc.get_target_nodes()
jsonvalues = json.loads(output)
verify(not jsonvalues, 1,
"get_target_nodes returned {}, expected empty".format(jsonvalues))
rpc.construct_malloc_bdev(rpc_param['malloc_bdev_size'], rpc_param['malloc_block_size'])
rpc.add_portal_group(portal_tag, "{}:{}".format(rpc_param['target_ip'], str(rpc_param['port'])))
rpc.add_initiator_group(initiator_tag, rpc_param['initiator_name'], rpc_param['netmask'][0])
lun_mapping = "Malloc" + str(rpc_param['lun_total']) + ":0"
net_mapping = portal_tag + ":" + initiator_tag
rpc.construct_target_node(rpc_param['target_name'], rpc_param['alias_name'], lun_mapping, net_mapping, rpc_param['queue_depth'],
rpc_param['chap_disable'], rpc_param['chap_mutal'], rpc_param['chap_required'], rpc_param['chap_auth_group'])
output = rpc.get_target_nodes()
jsonvalues = json.loads(output)
verify(len(jsonvalues) == 1, 1,
"get_target_nodes returned {} nodes, expected 1".format(len(jsonvalues)))
verify(jsonvalues[0]['luns'][0]['name'] == "Malloc" + str(rpc_param['lun_total']), 1,
"lun_name value is {}, expected Malloc{}".format(jsonvalues[0]['luns'][0]['name'], str(rpc_param['lun_total'])))
name = jsonvalues[0]['name']
verify(name == "iqn.2016-06.io.spdk:" + rpc_param['target_name'], 1,
"target name value is {}, expected {}".format(name, "iqn.2016-06.io.spdk:" + rpc_param['target_name']))
verify(jsonvalues[0]['alias_name'] == rpc_param['alias_name'], 1,
"target alias_name value is {}, expected {}".format(jsonvalues[0]['alias_name'], rpc_param['alias_name']))
verify(jsonvalues[0]['luns'][0]['id'] == 0, 1,
"lun id value is {}, expected 0".format(jsonvalues[0]['luns'][0]['id']))
verify(jsonvalues[0]['pg_ig_maps'][0]['ig_tag'] == int(initiator_tag), 1,
"initiator group tag value is {}, expected {}".format(jsonvalues[0]['pg_ig_maps'][0]['ig_tag'], initiator_tag))
verify(jsonvalues[0]['queue_depth'] == rpc_param['queue_depth'], 1,
"queue depth value is {}, expected {}".format(jsonvalues[0]['queue_depth'], rpc_param['queue_depth']))
verify(jsonvalues[0]['pg_ig_maps'][0]['pg_tag'] == int(portal_tag), 1,
"portal group tag value is {}, expected {}".format(jsonvalues[0]['pg_ig_maps'][0]['pg_tag'], portal_tag))
verify(jsonvalues[0]['chap_disabled'] == rpc_param['chap_disable'], 1,
"chap disable value is {}, expected {}".format(jsonvalues[0]['chap_disabled'], rpc_param['chap_disable']))
verify(jsonvalues[0]['chap_mutual'] == rpc_param['chap_mutal'], 1,
"chap mutual value is {}, expected {}".format(jsonvalues[0]['chap_mutual'], rpc_param['chap_mutal']))
verify(jsonvalues[0]['chap_required'] == rpc_param['chap_required'], 1,
"chap required value is {}, expected {}".format(jsonvalues[0]['chap_required'], rpc_param['chap_required']))
verify(jsonvalues[0]['chap_auth_group'] == rpc_param['chap_auth_group'], 1,
"chap auth group value is {}, expected {}".format(jsonvalues[0]['chap_auth_group'], rpc_param['chap_auth_group']))
rpc.delete_target_node(name)
output = rpc.get_target_nodes()
jsonvalues = json.loads(output)
verify(not jsonvalues, 1,
"get_target_nodes returned {}, expected empty".format(jsonvalues))
rpc.construct_target_node(rpc_param['target_name'], rpc_param['alias_name'], lun_mapping, net_mapping, rpc_param['queue_depth'],
rpc_param['chap_disable'], rpc_param['chap_mutal'], rpc_param['chap_required'], rpc_param['chap_auth_group'])
rpc.delete_portal_group(portal_tag)
rpc.delete_initiator_group(initiator_tag)
rpc.delete_target_node(name)
output = rpc.get_target_nodes()
jsonvalues = json.loads(output)
if not jsonvalues:
print "This issue will be fixed later."
print "verify_target_nodes_rpc_methods passed."
def verify_get_interfaces(rpc_py):
rpc = spdk_rpc(rpc_py)
nics = json.loads(rpc.get_interfaces())
nics_names = set(x["name"].encode('ascii', 'ignore') for x in nics)
# parse ip link show to verify the get_interfaces result
ifcfg_nics = set(re.findall("\S+:\s(\S+):\s<.*", check_output(["ip", "link", "show"])))
verify(nics_names == ifcfg_nics, 1, "get_interfaces returned {}".format(nics))
print "verify_get_interfaces passed."
def help_get_interface_ip_list(rpc_py, nic_name):
rpc = spdk_rpc(rpc_py)
nics = json.loads(rpc.get_interfaces())
nic = filter(lambda x: x["name"] == nic_name, nics)
verify(len(nic) != 0, 1,
"Nic name: {} is not found in {}".format(nic_name, [x["name"] for x in nics]))
return nic[0]["ip_addr"]
def verify_add_delete_ip_address(rpc_py):
rpc = spdk_rpc(rpc_py)
nics = json.loads(rpc.get_interfaces())
# add ip on up to first 2 nics
for x in nics[:2]:
faked_ip = "123.123.{}.{}".format(random.randint(1, 254), random.randint(1, 254))
rpc.add_ip_address(x["ifc_index"], faked_ip)
verify(faked_ip in help_get_interface_ip_list(rpc_py, x["name"]), 1,
"add ip {} to nic {} failed.".format(faked_ip, x["name"]))
try:
check_call(["ping", "-c", "1", "-W", "1", faked_ip])
except:
verify(False, 1,
"ping ip {} for {} was failed(adding was successful)".format
(faked_ip, x["name"]))
rpc.delete_ip_address(x["ifc_index"], faked_ip)
verify(faked_ip not in help_get_interface_ip_list(rpc_py, x["name"]), 1,
"delete ip {} from nic {} failed.(adding and ping were successful)".format
(faked_ip, x["name"]))
# ping should be failed and throw an CalledProcessError exception
try:
check_call(["ping", "-c", "1", "-W", "1", faked_ip])
except CalledProcessError as _:
pass
except Exception as e:
verify(False, 1,
"Unexpected exception was caught {}(adding/ping/delete were successful)".format
(str(e)))
else:
verify(False, 1,
"ip {} for {} could be pinged after delete ip(adding/ping/delete were successful)".format
(faked_ip, x["name"]))
print "verify_add_delete_ip_address passed."
def verify_add_nvme_bdev_rpc_methods(rpc_py):
rpc = spdk_rpc(rpc_py)
test_pass = 0
output = check_output(["lspci", "-mm", "-nn"])
addrs = re.findall('^([0-9]{2}:[0-9]{2}.[0-9]) "Non-Volatile memory controller \[0108\]".*-p02', output, re.MULTILINE)
for addr in addrs:
ctrlr_address = "-b Nvme0 -t pcie -a 0000:{}".format(addr)
rpc.construct_nvme_bdev(ctrlr_address)
print "add nvme device passed first time"
test_pass = 0
try:
rpc.construct_nvme_bdev(ctrlr_address)
except Exception as e:
print "add nvme device passed second time"
test_pass = 1
pass
else:
pass
verify(test_pass == 1, 1, "add nvme device passed second time")
print "verify_add_nvme_bdev_rpc_methods passed."
if __name__ == "__main__":
rpc_py = sys.argv[1]
try:
verify_trace_flag_rpc_methods(rpc_py, rpc_param)
verify_get_interfaces(rpc_py)
verify_add_delete_ip_address(rpc_py)
verify_luns_rpc_methods(rpc_py, rpc_param)
verify_portal_groups_rpc_methods(rpc_py, rpc_param)
verify_initiator_groups_rpc_methods(rpc_py, rpc_param)
verify_target_nodes_rpc_methods(rpc_py, rpc_param)
verify_scsi_devices_rpc_methods(rpc_py)
verify_iscsi_connection_rpc_methods(rpc_py)
verify_add_nvme_bdev_rpc_methods(rpc_py)
except RpcException as e:
print "{}. Exiting with status {}".format(e.message, e.retval)
raise e
except Exception as e:
raise e
sys.exit(0)
|
# Copyright (c) 2019-2021 Micro Focus or one of its affiliates.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import print_function, division, absolute_import
import mock
from .base import VerticaPythonUnitTestCase
from aiovertica.messages import NoticeResponse
from aiovertica.errors import QueryError
class NoticeTestCase(VerticaPythonUnitTestCase):
SAMPLE_DATA = {b'S': 'FATAL',
b'H': 'This is a test hint',
b'L': '9999',
b'M': 'Failure is on purpose'}
@mock.patch.object(NoticeResponse, '_unpack_data')
def test_error_message(self, mock_unpack_data):
mock_unpack_data.return_value = NoticeTestCase.SAMPLE_DATA
notice = NoticeResponse(b'ignored-due-to-mock')
self.assertEqual(
notice.error_message(),
'Severity: FATAL, Message: Failure is on purpose, Hint: This is a test hint, Line: 9999'
)
@mock.patch.object(NoticeResponse, '_unpack_data')
def test_attribute_properties(self, mock_unpack_data):
mock_unpack_data.return_value = NoticeTestCase.SAMPLE_DATA
notice = NoticeResponse(b'ignored-due-to-mock')
self.assertEqual(notice.severity, 'FATAL')
self.assertEqual(notice.hint, 'This is a test hint')
# yes, line is still a string.
self.assertEqual(notice.line, '9999')
self.assertEqual(notice.message, 'Failure is on purpose')
self.assertIsNone(notice.detail)
self.assertIsNone(notice.sqlstate)
@mock.patch.object(NoticeResponse, '_unpack_data')
def test_labeled_values(self, mock_unpack_data):
mock_unpack_data.return_value = NoticeTestCase.SAMPLE_DATA
notice = NoticeResponse(b'ignored-due-to-mock')
self.assertEqual(notice.values, {
'Severity': 'FATAL',
'Hint': 'This is a test hint',
'Line': '9999',
'Message': 'Failure is on purpose'})
@mock.patch.object(NoticeResponse, '_unpack_data')
def test_query_error(self, mock_unpack_data):
mock_unpack_data.return_value = NoticeTestCase.SAMPLE_DATA
notice = NoticeResponse(b'ignored-due-to-mock')
query_error = QueryError(notice, 'Select Fake();')
self.assertEqual(query_error.severity, 'FATAL')
self.assertEqual(query_error.hint, 'This is a test hint')
self.assertEqual(query_error.line, '9999')
self.assertEqual(query_error.message, 'Failure is on purpose')
self.assertIsNone(query_error.detail)
self.assertIsNone(query_error.sqlstate)
self.assertEqual(
str(query_error),
'Severity: FATAL, Message: Failure is on purpose, Hint: This is a test hint, Line: 9999, SQL: \'Select Fake();\'')
|
import operator
class SegmentTree(object):
def __init__(self, capacity, operation, neutral_element):
"""Build a Segment Tree data structure.
https://en.wikipedia.org/wiki/Segment_tree
Can be used as regular array, but with two
important differences:
a) setting item's value is slightly slower.
It is O(lg capacity) instead of O(1).
b) user has access to an efficient ( O(log segment size) )
`reduce` operation which reduces `operation` over
a contiguous subsequence of items in the array.
Paramters
---------
capacity: int
Total size of the array - must be a power of two.
operation: lambda obj, obj -> obj
and operation for combining elements (eg. sum, max)
must form a mathematical group together with the set of
possible values for array elements (i.e. be associative)
neutral_element: obj
neutral element for the operation above. eg. float('-inf')
for max and 0 for sum.
"""
assert capacity > 0 and capacity & (capacity - 1) == 0, "capacity must be positive and a power of 2."
self._capacity = capacity
self._value = [neutral_element for _ in range(2 * capacity)]
self._operation = operation
def _reduce_helper(self, start, end, node, node_start, node_end):
if start == node_start and end == node_end:
return self._value[node]
mid = (node_start + node_end) // 2
if end <= mid:
return self._reduce_helper(start, end, 2 * node, node_start, mid)
else:
if mid + 1 <= start:
return self._reduce_helper(start, end, 2 * node + 1, mid + 1, node_end)
else:
return self._operation(
self._reduce_helper(start, mid, 2 * node, node_start, mid),
self._reduce_helper(mid + 1, end, 2 * node + 1, mid + 1, node_end)
)
def reduce(self, start=0, end=None):
"""Returns result of applying `self.operation`
to a contiguous subsequence of the array.
self.operation(arr[start], operation(arr[start+1], operation(... arr[end])))
Parameters
----------
start: int
beginning of the subsequence
end: int
end of the subsequences
Returns
-------
reduced: obj
result of reducing self.operation over the specified range of array elements.
"""
if end is None:
end = self._capacity
if end < 0:
end += self._capacity
end -= 1
return self._reduce_helper(start, end, 1, 0, self._capacity - 1)
def __setitem__(self, idx, val):
# index of the leaf
idx += self._capacity
self._value[idx] = val
idx //= 2
while idx >= 1:
self._value[idx] = self._operation(
self._value[2 * idx],
self._value[2 * idx + 1]
)
idx //= 2
def __getitem__(self, idx):
assert 0 <= idx < self._capacity
return self._value[self._capacity + idx]
class SumSegmentTree(SegmentTree):
def __init__(self, capacity):
super(SumSegmentTree, self).__init__(
capacity=capacity,
operation=operator.add,
neutral_element=0.0
)
def sum(self, start=0, end=None):
"""Returns arr[start] + ... + arr[end]"""
return super(SumSegmentTree, self).reduce(start, end)
def find_prefixsum_idx(self, prefixsum):
"""Find the highest index `i` in the array such that
sum(arr[0] + arr[1] + ... + arr[i - i]) <= prefixsum
if array values are probabilities, this function
allows to sample indexes according to the discrete
probability efficiently.
Parameters
----------
perfixsum: float
upperbound on the sum of array prefix
Returns
-------
idx: int
highest index satisfying the prefixsum constraint
"""
assert 0 <= prefixsum <= self.sum() + 1e-5
idx = 1
while idx < self._capacity: # while non-leaf
if self._value[2 * idx] > prefixsum:
idx = 2 * idx
else:
prefixsum -= self._value[2 * idx]
idx = 2 * idx + 1
return idx - self._capacity
class MinSegmentTree(SegmentTree):
def __init__(self, capacity):
super(MinSegmentTree, self).__init__(
capacity=capacity,
operation=min,
neutral_element=float('inf')
)
def min(self, start=0, end=None):
"""Returns min(arr[start], ..., arr[end])"""
return super(MinSegmentTree, self).reduce(start, end)
|
# coding=utf-8
r"""
This code was generated by
\ / _ _ _| _ _
| (_)\/(_)(_|\/| |(/_ v1.0.0
/ /
"""
from tests import IntegrationTestCase
from tests.holodeck import Request
from twilio.base.exceptions import TwilioException
from twilio.http.response import Response
class BucketTestCase(IntegrationTestCase):
def test_create_request(self):
self.holodeck.mock(Response(500, ''))
with self.assertRaises(TwilioException):
self.client.verify.v2.services("VAXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX") \
.rate_limits("RKXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX") \
.buckets.create(max=1, interval=1)
values = {'Max': 1, 'Interval': 1, }
self.holodeck.assert_has_request(Request(
'post',
'https://verify.twilio.com/v2/Services/VAXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX/RateLimits/RKXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX/Buckets',
data=values,
))
def test_create_bucket_response(self):
self.holodeck.mock(Response(
201,
'''
{
"sid": "BLaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa",
"rate_limit_sid": "RKaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa",
"service_sid": "VAaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa",
"account_sid": "ACaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa",
"max": 5,
"interval": 60,
"date_created": "2015-07-30T20:00:00Z",
"date_updated": "2015-07-30T20:00:00Z",
"url": "https://verify.twilio.com/v2/Services/VAaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa/RateLimits/RKaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa/Buckets/BLaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa"
}
'''
))
actual = self.client.verify.v2.services("VAXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX") \
.rate_limits("RKXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX") \
.buckets.create(max=1, interval=1)
self.assertIsNotNone(actual)
def test_update_request(self):
self.holodeck.mock(Response(500, ''))
with self.assertRaises(TwilioException):
self.client.verify.v2.services("VAXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX") \
.rate_limits("RKXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX") \
.buckets("BLXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX").update()
self.holodeck.assert_has_request(Request(
'post',
'https://verify.twilio.com/v2/Services/VAXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX/RateLimits/RKXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX/Buckets/BLXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX',
))
def test_update_bucket_response(self):
self.holodeck.mock(Response(
200,
'''
{
"sid": "BLaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa",
"rate_limit_sid": "RKaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa",
"service_sid": "VAaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa",
"account_sid": "ACaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa",
"max": 5,
"interval": 60,
"date_created": "2015-07-30T20:00:00Z",
"date_updated": "2015-07-30T20:00:00Z",
"url": "https://verify.twilio.com/v2/Services/VAaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa/RateLimits/RKaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa/Buckets/BLaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa"
}
'''
))
actual = self.client.verify.v2.services("VAXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX") \
.rate_limits("RKXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX") \
.buckets("BLXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX").update()
self.assertIsNotNone(actual)
def test_fetch_request(self):
self.holodeck.mock(Response(500, ''))
with self.assertRaises(TwilioException):
self.client.verify.v2.services("VAXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX") \
.rate_limits("RKXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX") \
.buckets("BLXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX").fetch()
self.holodeck.assert_has_request(Request(
'get',
'https://verify.twilio.com/v2/Services/VAXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX/RateLimits/RKXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX/Buckets/BLXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX',
))
def test_fetch_bucket_response(self):
self.holodeck.mock(Response(
200,
'''
{
"sid": "BLaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa",
"rate_limit_sid": "RKaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa",
"service_sid": "VAaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa",
"account_sid": "ACaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa",
"max": 5,
"interval": 60,
"date_created": "2015-07-30T20:00:00Z",
"date_updated": "2015-07-30T20:00:00Z",
"url": "https://verify.twilio.com/v2/Services/VAaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa/RateLimits/RKaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa/Buckets/BLaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa"
}
'''
))
actual = self.client.verify.v2.services("VAXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX") \
.rate_limits("RKXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX") \
.buckets("BLXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX").fetch()
self.assertIsNotNone(actual)
def test_list_request(self):
self.holodeck.mock(Response(500, ''))
with self.assertRaises(TwilioException):
self.client.verify.v2.services("VAXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX") \
.rate_limits("RKXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX") \
.buckets.list()
self.holodeck.assert_has_request(Request(
'get',
'https://verify.twilio.com/v2/Services/VAXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX/RateLimits/RKXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX/Buckets',
))
def test_read_empty_response(self):
self.holodeck.mock(Response(
200,
'''
{
"buckets": [],
"meta": {
"page": 0,
"page_size": 50,
"first_page_url": "https://verify.twilio.com/v2/Services/VAaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa/RateLimits/RKaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa/Buckets?PageSize=50&Page=0",
"previous_page_url": null,
"url": "https://verify.twilio.com/v2/Services/VAaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa/RateLimits/RKaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa/Buckets?PageSize=50&Page=0",
"next_page_url": null,
"key": "buckets"
}
}
'''
))
actual = self.client.verify.v2.services("VAXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX") \
.rate_limits("RKXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX") \
.buckets.list()
self.assertIsNotNone(actual)
def test_read_full_response(self):
self.holodeck.mock(Response(
200,
'''
{
"buckets": [
{
"sid": "BLaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa",
"rate_limit_sid": "RKaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa",
"service_sid": "VAaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa",
"account_sid": "ACaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa",
"max": 5,
"interval": 60,
"date_created": "2015-07-30T20:00:00Z",
"date_updated": "2015-07-30T20:00:00Z",
"url": "https://verify.twilio.com/v2/Services/VAaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa/RateLimits/RKaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa/Buckets/BLaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa"
}
],
"meta": {
"page": 0,
"page_size": 50,
"first_page_url": "https://verify.twilio.com/v2/Services/VAaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa/RateLimits/RKaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa/Buckets?PageSize=50&Page=0",
"previous_page_url": null,
"url": "https://verify.twilio.com/v2/Services/VAaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa/RateLimits/RKaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa/Buckets?PageSize=50&Page=0",
"next_page_url": null,
"key": "buckets"
}
}
'''
))
actual = self.client.verify.v2.services("VAXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX") \
.rate_limits("RKXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX") \
.buckets.list()
self.assertIsNotNone(actual)
def test_delete_request(self):
self.holodeck.mock(Response(500, ''))
with self.assertRaises(TwilioException):
self.client.verify.v2.services("VAXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX") \
.rate_limits("RKXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX") \
.buckets("BLXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX").delete()
self.holodeck.assert_has_request(Request(
'delete',
'https://verify.twilio.com/v2/Services/VAXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX/RateLimits/RKXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX/Buckets/BLXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX',
))
def test_delete_response(self):
self.holodeck.mock(Response(
204,
None,
))
actual = self.client.verify.v2.services("VAXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX") \
.rate_limits("RKXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX") \
.buckets("BLXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX").delete()
self.assertTrue(actual)
|
import numpy as np
# This class generating new list item given first of list item row and second of list item row
class Crossover:
@staticmethod
def crossover(best):
row_begin_index = 0
row_half = 2
cross_list = []
for i in range(len(best) - 1):
first_part1 = best[i][row_begin_index:row_half, :]
first_part2 = best[i + 1][row_half:, :]
cross_list.append(np.concatenate((first_part1, first_part2)))
second_part1 = best[i][row_half:, :]
second_part2 = best[i + 1][row_begin_index:row_half, :]
cross_list.append(np.concatenate((second_part2, second_part1)))
return cross_list
|
from typing import Dict, Any
from telegram import Update, ParseMode
from telegram.ext import CallbackContext
from config.constants import (
USER_DATA_V1_SETTINGS_CAMPUS,
USER_DATA_V1_SETTINGS_ONLINE,
USER_DATA_V1_INTRA_LOGIN,
USER_DATA_V1_INTRA_CAMPUS,
USER_DATA_V1_SETTINGS_ACTIVE,
USER_DATA_V1_AUTHORIZED,
USER_DATA_V1_TELEGRAM_USERNAME,
USER_DATA_V1_MATCH_WITH,
)
from config.env import ADMIN_IDS
from utils.lang import COMMAND_DENIED_NOT_AUTHORIZED
def info(data: Dict[str, Any], is_admin_request: bool = False) -> str:
fields = [
USER_DATA_V1_INTRA_LOGIN,
USER_DATA_V1_INTRA_CAMPUS,
USER_DATA_V1_SETTINGS_CAMPUS,
USER_DATA_V1_SETTINGS_ONLINE,
USER_DATA_V1_SETTINGS_ACTIVE,
USER_DATA_V1_TELEGRAM_USERNAME,
]
if is_admin_request:
fields.append(USER_DATA_V1_MATCH_WITH)
return '\n'.join(['{}: {}'.format(x, data.get(x, '???')) for x in fields])
def info_other(upd: Update, ctx: CallbackContext) -> None:
param = ctx.args[0]
user = None
for uid, udata in ctx.dispatcher.user_data.items():
if USER_DATA_V1_INTRA_LOGIN not in udata:
continue
if udata[USER_DATA_V1_INTRA_LOGIN] == param:
user = udata
break
if str(uid) == param:
user = udata
break
if not user:
ctx.bot.send_message(upd.effective_user.id, text='{} not found'.format(param))
return
message = info(user, is_admin_request=True)
ctx.bot.send_message(
upd.effective_user.id,
text='```\ntelegram.id: {}\n{}\n```'.format(
uid,
message
),
parse_mode=ParseMode.MARKDOWN
)
def info_self(upd: Update, ctx: CallbackContext) -> None:
message = info(ctx.user_data)
ctx.bot.send_message(upd.effective_user.id, text='```\n{}\n```'.format(message), parse_mode=ParseMode.MARKDOWN)
def handler_command_info(upd: Update, ctx: CallbackContext) -> None:
if not ctx.user_data.get(USER_DATA_V1_AUTHORIZED, False):
ctx.bot.send_message(upd.effective_user.id, text=COMMAND_DENIED_NOT_AUTHORIZED)
return
if ctx.args and upd.effective_user.id in ADMIN_IDS:
return info_other(upd, ctx)
return info_self(upd, ctx)
|
import logging
import os
import re
import sys
from typing import Any, Dict
import PySimpleGUI as sg # type: ignore
from PySimpleGUI.PySimpleGUI import Column # type: ignore
from .utils.encryption import encrypt_password, generate_key
logger = logging.getLogger(__name__)
def login_gui() -> Dict[str, Any]:
sg.theme('DarkTeal12')
def collapse(layout: list, key: str, visible: bool) -> Column:
"""
Helper function to hide and un-hide layouts
"""
return sg.pin(sg.Column(layout, key=key, visible=visible))
def main() -> Dict[str, Any]:
"""
Main GUI function
"""
new_user_section = [
[sg.Text('Username'), sg.Input(key='_USERNAME_', tooltip='What is your myASNB account username?')],
[sg.Text('Password'), sg.Input(key='_PASSWORD_', password_char="*", tooltip='What is your myASNB account password?')],
[sg.Text('Investment Amount (RM)'), sg.Input(key='_INVESTMENT_AMOUNT_', tooltip='How much do you want to invest?', change_submits=True, do_not_clear=True)],
]
layout = [
[sg.Text('myASNB Unit Holder Login', font='Helvetica 20', justification='center')],
[sg.Checkbox('Login as new user', enable_events=True, key='_CHECKBOX_KEY_', tooltip='Tick to login.')],
[collapse(new_user_section, '_SECTION_KEY_', False)],
[sg.OK('Start', tooltip='Start the bot (Press: ENTER)', size=(10, 1), bind_return_key=True, focus=True), sg.Cancel('Quit', tooltip='Goodbye.', size=(5, 1))],
]
window = sg.Window(
'Six Percent',
layout,
auto_size_text=False,
default_element_size=(25, 1),
text_justification='l',
return_keyboard_events=True,
grab_anywhere=False,
)
user_credentials_template = dict(username='', password='', investment_amount='')
user_credentials = user_credentials_template.copy()
section_toggle = False
while True:
event, values = window.read()
if event == '_CHECKBOX_KEY_':
section_toggle = not section_toggle
window['_SECTION_KEY_'].update(visible=section_toggle)
elif event == '_INVESTMENT_AMOUNT_':
window.FindElement(event).Update(re.sub("[^0-9]", "", values[event]))
user_credentials = {
**user_credentials,
'username': values['_USERNAME_'],
'password': values['_PASSWORD_'],
'investment_amount': values['_INVESTMENT_AMOUNT_'],
}
if event in (sg.WIN_CLOSED, 'Quit'):
logger.info('Exiting program gracefully')
window.close()
sys.exit()
elif event == 'Start':
break
window.close()
if not os.path.isfile('secret.key'):
generate_key()
# Encrypts user password before storing it
if user_credentials['password']:
user_credentials['password'] = encrypt_password(user_credentials['password'])
return dict() if user_credentials == user_credentials_template else user_credentials
user_info = main()
return user_info
if __name__ == '__main__':
logger.info(login_gui())
|
from random import shuffle
class Deck(object):
CARD_VALUES = ['A', '2', '3', '4', '5', '6', '7', '8', '9', '10', 'J', 'Q', 'K']
CARD_SUITS = ['H', 'D', 'S', 'C']
@staticmethod
def get_shuffled_deck():
deck = Deck()
deck.shuffle()
return deck
def __init__(self):
self.cards = []
for cardSuit in self.__class__.CARD_SUITS:
for cardValue in self.__class__.CARD_VALUES:
self.cards.append(Card(cardValue, cardSuit))
def shuffle(self):
shuffle(self.cards)
def draw(self):
return self.cards.pop()
class Card(object):
def __init__(self, value, suit):
self.value = value
self.suit = suit
def get_value(self):
return self.value
def get_suit(self):
return self.suit
def __str__(self):
return self.value + self.suit
def __eq__(self, other):
try:
return self.get_value() + self.get_suit() == other.get_value() + other.get_suit()
except AttributeError:
return False
|
# Copyright 2020 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Model script to test TF-TensorRT integration."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from unittest import SkipTest # pylint: disable=g-importing-member
from tensorflow.compiler.tf2tensorrt.wrap_py_utils import get_linked_tensorrt_version
from tensorflow.python.compiler.tensorrt.test import tf_trt_integration_test_base as trt_test
from tensorflow.python.framework import dtypes
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.platform import test
class TrtModeTestBase(trt_test.TfTrtIntegrationTestBase):
"""Test squeeze on batch dim and some unary operations in TF-TRT."""
def GraphFn(self, x1):
q = math_ops.abs(x1)
q = q + 1.0
q = q * 3.0
q = array_ops.squeeze(q, 0)
q = math_ops.abs(q)
q = q + 5.0
return array_ops.identity(q, name="output_0")
def GetParams(self):
"""The input has 1 as a first dimension, which is removed by the squeeze.
op in the graph.
In explicit batch mode, TensorRT can convert the whole graph. In this mode
it is possible to manipulate the batch dimension using the squeeze op.
In implicit batch mode TensorRT cannot convert the whole graph. We are not
allowed to manipulate (squeeze) the first dimension in implicit batch mode.
Therefore the graph will be converted using multiple segments.
"""
return self.BuildParams(self.GraphFn, dtypes.float32, [[1, 12, 5]],
[[12, 5]])
def GetConversionParams(self, run_params, implicit_batch=False):
"""Return a TrtConversionParams for test."""
conversion_params = super(TrtModeTestBase,
self).GetConversionParams(run_params)
rewriter_config = self.GetTrtRewriterConfig(
run_params=run_params,
conversion_params=conversion_params,
use_implicit_batch=implicit_batch)
return conversion_params._replace(rewriter_config_template=rewriter_config)
@classmethod
def setUpClass(cls):
if cls is TrtModeTestBase:
raise SkipTest("TrtModeTestBase defines base class for other test.")
super(TrtModeTestBase, cls).setUpClass()
class ImplicitBatchTest(TrtModeTestBase):
def GetConversionParams(self, run_params):
"""Return a TrtConversionParams for test using implicit batch mdoe."""
return super(ImplicitBatchTest, self).GetConversionParams(run_params, True)
def ExpectedEnginesToBuild(self, run_params):
"""Check that the expected engine is built.
Args:
run_params: the run parameters.
Returns:
the expected engines to build.
The squeeze op is not converted by TensorRT in implicit batch mode.
Because of this we have two TRTEngineOp in the graphs: one for the
subgraph before 'squeeze(q,0)', and another one for the rest of the ops
after the 'squeeze(q,0)'.
"""
return ["TRTEngineOp_0", "TRTEngineOp_1"]
class ExplicitBatchTest(TrtModeTestBase):
def GetParams(self):
"""We specify input/output masks with static (known) shapes."""
return self.BuildParamsWithMask(
self.GraphFn,
dtypes.float32, [[1, 12, 5]], [[12, 5]],
input_mask=[[True, True, True]],
output_mask=[[True, True]])
def GetConversionParams(self, run_params):
"""Return a TrtConversionParams for test that enables explicit batch."""
return super(ExplicitBatchTest, self).GetConversionParams(run_params, False)
def ExpectedEnginesToBuild(self, run_params):
"""Check that the expected engine is built.
Args:
run_params: the run parameters.
Returns:
the expected engines to build.
In explicit batch mode the whole graph is converted using a single engine.
"""
return ["TRTEngineOp_0"]
def ShouldRunTest(self, run_params):
# Only run for TRT 6 and above.
ver = get_linked_tensorrt_version()
return ver[0] >= 6 and (not run_params.use_calibration)
class DynamicShapesTest(TrtModeTestBase):
"""Test with dynamic input shapes.
DynamicShapesTest is different from ExplicitBatchTest in that it uses input
and output masks to change the input and output shapes to unknown shapes.
"""
def GetParams(self):
"""We specify input/output mask with dynamic (unknown) shapes."""
return self.BuildParamsWithMask(
self.GraphFn,
dtypes.float32, [[1, 12, 5]], [[12, 5]],
input_mask=[[False, False, False]],
output_mask=[[False, False]])
def GetConversionParams(self, run_params):
"""Return a TrtConversionParams for test that enables explicit batch."""
return super(DynamicShapesTest, self).GetConversionParams(run_params, False)
def ExpectedEnginesToBuild(self, run_params):
"""Return the expected engines to build."""
return ["TRTEngineOp_0"]
def ShouldRunTest(self, run_params):
# Only run for TRT 6 and above.
ver = get_linked_tensorrt_version()
return ver[0] >= 6 and (not run_params.use_calibration)
if __name__ == "__main__":
test.main()
|
#!/usr/bin/env python3
# Copyright (c) Meta Platforms, Inc. and affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
from typing import List
from ax.modelbridge.array import ArrayModelBridge
from ax.models.numpy_base import NumpyModel
# pyre-fixme[13]: Attribute `model` is never initialized.
# pyre-fixme[13]: Attribute `outcomes` is never initialized.
# pyre-fixme[13]: Attribute `parameters` is never initialized.
class NumpyModelBridge(ArrayModelBridge):
"""A model bridge for using numpy array-based models.
This model bridge interfaces with NumpyModel.
Requires that all parameters have been transformed to RangeParameters
or FixedParameters with float type and no log scale.
"""
model: NumpyModel
outcomes: List[str]
parameters: List[str]
|
# Copyright 2020 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import dataclasses
import json
import os
from dataclasses import dataclass, field
from enum import Enum
from typing import Any, Dict, List, Optional, Tuple
from .file_utils import cached_property, is_torch_available, is_torch_tpu_available, torch_required
from .trainer_utils import EvaluationStrategy
from .utils import logging
if is_torch_available():
import torch
if is_torch_tpu_available():
import torch_xla.core.xla_model as xm
logger = logging.get_logger(__name__)
def default_logdir() -> str:
"""
Same default as PyTorch
"""
import socket
from datetime import datetime
current_time = datetime.now().strftime("%b%d_%H-%M-%S")
return os.path.join("runs", current_time + "_" + socket.gethostname())
@dataclass
class TrainingArguments:
"""
TrainingArguments is the subset of the arguments we use in our example scripts **which relate to the training loop
itself**.
Using :class:`~transformers.HfArgumentParser` we can turn this class into argparse arguments to be able to specify
them on the command line.
Parameters:
output_dir (:obj:`str`):
The output directory where the model predictions and checkpoints will be written.
overwrite_output_dir (:obj:`bool`, `optional`, defaults to :obj:`False`):
If :obj:`True`, overwrite the content of the output directory. Use this to continue training if
:obj:`output_dir` points to a checkpoint directory.
do_train (:obj:`bool`, `optional`, defaults to :obj:`False`):
Whether to run training or not. This argument is not directly used by :class:`~transformers.Trainer`, it's
intended to be used by your training/evaluation scripts instead. See the `example scripts
<https://github.com/huggingface/transformers/tree/master/examples>`__ for more details.
do_eval (:obj:`bool`, `optional`):
Whether to run evaluation on the dev set or not. Will be set to :obj:`True` if :obj:`evaluation_strategy`
is different from :obj:`"no"`. This argument is not directly used by :class:`~transformers.Trainer`, it's
intended to be used by your training/evaluation scripts instead. See the `example scripts
<https://github.com/huggingface/transformers/tree/master/examples>`__ for more details.
do_predict (:obj:`bool`, `optional`, defaults to :obj:`False`):
Whether to run predictions on the test set or not. This argument is not directly used by
:class:`~transformers.Trainer`, it's intended to be used by your training/evaluation scripts instead. See
the `example scripts <https://github.com/huggingface/transformers/tree/master/examples>`__ for more
details.
evaluation_strategy (:obj:`str` or :class:`~transformers.trainer_utils.EvaluationStrategy`, `optional`, defaults to :obj:`"no"`):
The evaluation strategy to adopt during training. Possible values are:
* :obj:`"no"`: No evaluation is done during training.
* :obj:`"steps"`: Evaluation is done (and logged) every :obj:`eval_steps`.
* :obj:`"epoch"`: Evaluation is done at the end of each epoch.
prediction_loss_only (:obj:`bool`, `optional`, defaults to `False`):
When performing evaluation and predictions, only returns the loss.
per_device_train_batch_size (:obj:`int`, `optional`, defaults to 8):
The batch size per GPU/TPU core/CPU for training.
per_device_eval_batch_size (:obj:`int`, `optional`, defaults to 8):
The batch size per GPU/TPU core/CPU for evaluation.
gradient_accumulation_steps (:obj:`int`, `optional`, defaults to 1):
Number of updates steps to accumulate the gradients for, before performing a backward/update pass.
.. warning::
When using gradient accumulation, one step is counted as one step with backward pass. Therefore,
logging, evaluation, save will be conducted every ``gradient_accumulation_steps * xxx_step`` training
examples.
eval_accumulation_steps (:obj:`int`, `optional`):
Number of predictions steps to accumulate the output tensors for, before moving the results to the CPU. If
left unset, the whole predictions are accumulated on GPU/TPU before being moved to the CPU (faster but
requires more memory).
learning_rate (:obj:`float`, `optional`, defaults to 5e-5):
The initial learning rate for Adam.
weight_decay (:obj:`float`, `optional`, defaults to 0):
The weight decay to apply (if not zero).
adam_beta1 (:obj:`float`, `optional`, defaults to 0.9):
The beta1 for the Adam optimizer.
adam_beta2 (:obj:`float`, `optional`, defaults to 0.999):
The beta2 for the Adam optimizer.
adam_epsilon (:obj:`float`, `optional`, defaults to 1e-8):
Epsilon for the Adam optimizer.
max_grad_norm (:obj:`float`, `optional`, defaults to 1.0):
Maximum gradient norm (for gradient clipping).
num_train_epochs(:obj:`float`, `optional`, defaults to 3.0):
Total number of training epochs to perform (if not an integer, will perform the decimal part percents of
the last epoch before stopping training).
max_steps (:obj:`int`, `optional`, defaults to -1):
If set to a positive number, the total number of training steps to perform. Overrides
:obj:`num_train_epochs`.
warmup_steps (:obj:`int`, `optional`, defaults to 0):
Number of steps used for a linear warmup from 0 to :obj:`learning_rate`.
logging_dir (:obj:`str`, `optional`):
Tensorboard log directory. Will default to `runs/**CURRENT_DATETIME_HOSTNAME**`.
logging_first_step (:obj:`bool`, `optional`, defaults to :obj:`False`):
Whether to log and evaluate the first :obj:`global_step` or not.
logging_steps (:obj:`int`, `optional`, defaults to 500):
Number of update steps between two logs.
save_steps (:obj:`int`, `optional`, defaults to 500):
Number of updates steps before two checkpoint saves.
save_total_limit (:obj:`int`, `optional`):
If a value is passed, will limit the total amount of checkpoints. Deletes the older checkpoints in
:obj:`output_dir`.
no_cuda (:obj:`bool`, `optional`, defaults to :obj:`False`):
Whether to not use CUDA even when it is available or not.
seed (:obj:`int`, `optional`, defaults to 42):
Random seed for initialization.
fp16 (:obj:`bool`, `optional`, defaults to :obj:`False`):
Whether to use 16-bit (mixed) precision training (through NVIDIA apex) instead of 32-bit training.
fp16_opt_level (:obj:`str`, `optional`, defaults to 'O1'):
For :obj:`fp16` training, apex AMP optimization level selected in ['O0', 'O1', 'O2', and 'O3']. See details
on the `apex documentation <https://nvidia.github.io/apex/amp.html>`__.
local_rank (:obj:`int`, `optional`, defaults to -1):
During distributed training, the rank of the process.
tpu_num_cores (:obj:`int`, `optional`):
When training on TPU, the number of TPU cores (automatically passed by launcher script).
debug (:obj:`bool`, `optional`, defaults to :obj:`False`):
When training on TPU, whether to print debug metrics or not.
dataloader_drop_last (:obj:`bool`, `optional`, defaults to :obj:`False`):
Whether to drop the last incomplete batch (if the length of the dataset is not divisible by the batch size)
or not.
eval_steps (:obj:`int`, `optional`):
Number of update steps between two evaluations if :obj:`evaluation_strategy="steps"`. Will default to the
same value as :obj:`logging_steps` if not set.
dataloader_num_workers (:obj:`int`, `optional`, defaults to 0):
Number of subprocesses to use for data loading (PyTorch only). 0 means that the data will be loaded in the
main process.
past_index (:obj:`int`, `optional`, defaults to -1):
Some models like :doc:`TransformerXL <../model_doc/transformerxl>` or :doc`XLNet <../model_doc/xlnet>` can
make use of the past hidden states for their predictions. If this argument is set to a positive int, the
``Trainer`` will use the corresponding output (usually index 2) as the past state and feed it to the model
at the next training step under the keyword argument ``mems``.
run_name (:obj:`str`, `optional`):
A descriptor for the run. Notably used for wandb logging.
disable_tqdm (:obj:`bool`, `optional`):
Whether or not to disable the tqdm progress bars. Will default to :obj:`True` if the logging level is set
to warn or lower (default), :obj:`False` otherwise.
remove_unused_columns (:obj:`bool`, `optional`, defaults to :obj:`True`):
If using `nlp.Dataset` datasets, whether or not to automatically remove the columns unused by the model
forward method.
(Note that this behavior is not implemented for :class:`~transformers.TFTrainer` yet.)
label_names (:obj:`List[str]`, `optional`):
The list of keys in your dictionary of inputs that correspond to the labels.
Will eventually default to :obj:`["labels"]` except if the model used is one of the
:obj:`XxxForQuestionAnswering` in which case it will default to :obj:`["start_positions",
"end_positions"]`.
load_best_model_at_end (:obj:`bool`, `optional`, defaults to :obj:`False`):
Whether or not to load the best model found during training at the end of training.
.. note::
When set to :obj:`True`, the parameters :obj:`save_steps` will be ignored and the model will be saved
after each evaluation.
metric_for_best_model (:obj:`str`, `optional`):
Use in conjunction with :obj:`load_best_model_at_end` to specify the metric to use to compare two different
models. Must be the name of a metric returned by the evaluation with or without the prefix :obj:`"eval_"`.
Will default to :obj:`"loss"` if unspecified and :obj:`load_best_model_at_end=True` (to use the evaluation
loss).
If you set this value, :obj:`greater_is_better` will default to :obj:`True`. Don't forget to set it to
:obj:`False` if your metric is better when lower.
greater_is_better (:obj:`bool`, `optional`):
Use in conjunction with :obj:`load_best_model_at_end` and :obj:`metric_for_best_model` to specify if better
models should have a greater metric or not. Will default to:
- :obj:`True` if :obj:`metric_for_best_model` is set to a value that isn't :obj:`"loss"` or
:obj:`"eval_loss"`.
- :obj:`False` if :obj:`metric_for_best_model` is not set, or set to :obj:`"loss"` or :obj:`"eval_loss"`.
model_parallel (:obj:`bool`, `optional`, defaults to :obj:`False`):
If there are more than one devices, whether to use model parallelism to distribute the model's modules
across devices or not.
ignore_data_skip (:obj:`bool`, `optional`, defaults to :obj:`False`):
When resuming training, whether or not to skip the epochs and batches to get the data loading at the same
stage as in the previous training. If set to :obj:`True`, the training will begin faster (as that skipping
step can take a long time) but will not yield the same results as the interrupted training would have.
"""
output_dir: str = field(
metadata={"help": "The output directory where the model predictions and checkpoints will be written."}
)
overwrite_output_dir: bool = field(
default=False,
metadata={
"help": (
"Overwrite the content of the output directory."
"Use this to continue training if output_dir points to a checkpoint directory."
)
},
)
do_train: bool = field(default=False, metadata={"help": "Whether to run training."})
do_eval: bool = field(default=None, metadata={"help": "Whether to run eval on the dev set."})
do_predict: bool = field(default=False, metadata={"help": "Whether to run predictions on the test set."})
model_parallel: bool = field(
default=False,
metadata={
"help": (
"If there are more than one devices, whether to use model parallelism to distribute the "
"model's modules across devices."
)
},
)
evaluation_strategy: EvaluationStrategy = field(
default="no",
metadata={"help": "Run evaluation during training at each logging step."},
)
prediction_loss_only: bool = field(
default=False,
metadata={"help": "When performing evaluation and predictions, only returns the loss."},
)
per_device_train_batch_size: int = field(
default=8, metadata={"help": "Batch size per GPU/TPU core/CPU for training."}
)
per_device_eval_batch_size: int = field(
default=8, metadata={"help": "Batch size per GPU/TPU core/CPU for evaluation."}
)
per_gpu_train_batch_size: Optional[int] = field(
default=None,
metadata={
"help": "Deprecated, the use of `--per_device_train_batch_size` is preferred. "
"Batch size per GPU/TPU core/CPU for training."
},
)
per_gpu_eval_batch_size: Optional[int] = field(
default=None,
metadata={
"help": "Deprecated, the use of `--per_device_eval_batch_size` is preferred."
"Batch size per GPU/TPU core/CPU for evaluation."
},
)
gradient_accumulation_steps: int = field(
default=1,
metadata={"help": "Number of updates steps to accumulate before performing a backward/update pass."},
)
eval_accumulation_steps: Optional[int] = field(
default=None,
metadata={"help": "Number of predictions steps to accumulate before moving the tensors to the CPU."},
)
learning_rate: float = field(default=5e-5, metadata={"help": "The initial learning rate for Adam."})
weight_decay: float = field(default=0.0, metadata={"help": "Weight decay if we apply some."})
adam_beta1: float = field(default=0.9, metadata={"help": "Beta1 for Adam optimizer"})
adam_beta2: float = field(default=0.999, metadata={"help": "Beta2 for Adam optimizer"})
adam_epsilon: float = field(default=1e-8, metadata={"help": "Epsilon for Adam optimizer."})
max_grad_norm: float = field(default=1.0, metadata={"help": "Max gradient norm."})
num_train_epochs: float = field(default=3.0, metadata={"help": "Total number of training epochs to perform."})
max_steps: int = field(
default=-1,
metadata={"help": "If > 0: set total number of training steps to perform. Override num_train_epochs."},
)
warmup_steps: int = field(default=0, metadata={"help": "Linear warmup over warmup_steps."})
logging_dir: Optional[str] = field(default_factory=default_logdir, metadata={"help": "Tensorboard log dir."})
logging_first_step: bool = field(default=False, metadata={"help": "Log the first global_step"})
logging_steps: int = field(default=500, metadata={"help": "Log every X updates steps."})
save_steps: int = field(default=500, metadata={"help": "Save checkpoint every X updates steps."})
save_total_limit: Optional[int] = field(
default=None,
metadata={
"help": (
"Limit the total amount of checkpoints."
"Deletes the older checkpoints in the output_dir. Default is unlimited checkpoints"
)
},
)
no_cuda: bool = field(default=False, metadata={"help": "Do not use CUDA even when it is available"})
seed: int = field(default=42, metadata={"help": "random seed for initialization"})
fp16: bool = field(
default=False,
metadata={"help": "Whether to use 16-bit (mixed) precision (through NVIDIA apex) instead of 32-bit"},
)
fp16_opt_level: str = field(
default="O1",
metadata={
"help": (
"For fp16: Apex AMP optimization level selected in ['O0', 'O1', 'O2', and 'O3']."
"See details at https://nvidia.github.io/apex/amp.html"
)
},
)
local_rank: int = field(default=-1, metadata={"help": "For distributed training: local_rank"})
tpu_num_cores: Optional[int] = field(
default=None, metadata={"help": "TPU: Number of TPU cores (automatically passed by launcher script)"}
)
tpu_metrics_debug: bool = field(
default=False,
metadata={"help": "Deprecated, the use of `--debug` is preferred. TPU: Whether to print debug metrics"},
)
debug: bool = field(default=False, metadata={"help": "Whether to print debug metrics on TPU"})
dataloader_drop_last: bool = field(
default=False, metadata={"help": "Drop the last incomplete batch if it is not divisible by the batch size."}
)
eval_steps: int = field(default=None, metadata={"help": "Run an evaluation every X steps."})
dataloader_num_workers: int = field(
default=0,
metadata={
"help": "Number of subprocesses to use for data loading (PyTorch only). 0 means that the data will be loaded in the main process."
},
)
past_index: int = field(
default=-1,
metadata={"help": "If >=0, uses the corresponding part of the output as the past state for next step."},
)
run_name: Optional[str] = field(
default=None, metadata={"help": "An optional descriptor for the run. Notably used for wandb logging."}
)
disable_tqdm: Optional[bool] = field(
default=None, metadata={"help": "Whether or not to disable the tqdm progress bars."}
)
remove_unused_columns: Optional[bool] = field(
default=True, metadata={"help": "Remove columns not required by the model when using an nlp.Dataset."}
)
label_names: Optional[List[str]] = field(
default=None, metadata={"help": "The list of keys in your dictionary of inputs that correspond to the labels."}
)
load_best_model_at_end: Optional[bool] = field(
default=False,
metadata={"help": "Whether or not to load the best model found during training at the end of training."},
)
metric_for_best_model: Optional[str] = field(
default=None, metadata={"help": "The metric to use to compare two different models."}
)
greater_is_better: Optional[bool] = field(
default=None, metadata={"help": "Whether the `metric_for_best_model` should be maximized or not."}
)
ignore_data_skip: bool = field(
default=False,
metadata={
"help": "When resuming training, whether or not to skip the first epochs and batches to get to the same training data."
},
)
def __post_init__(self):
if self.disable_tqdm is None:
self.disable_tqdm = logger.getEffectiveLevel() > logging.WARN
self.evaluation_strategy = EvaluationStrategy(self.evaluation_strategy)
if self.do_eval is False and self.evaluation_strategy != EvaluationStrategy.NO:
self.do_eval = True
if self.eval_steps is None:
self.eval_steps = self.logging_steps
if self.load_best_model_at_end and self.metric_for_best_model is None:
self.metric_for_best_model = "loss"
if self.greater_is_better is None and self.metric_for_best_model is not None:
self.greater_is_better = self.metric_for_best_model not in ["loss", "eval_loss"]
if self.run_name is None:
self.run_name = self.output_dir
if is_torch_available() and self.device.type != "cuda" and self.fp16:
raise ValueError("AMP (`--fp16`) can only be used on CUDA devices.")
@property
def train_batch_size(self) -> int:
"""
The actual batch size for training (may differ from :obj:`per_gpu_train_batch_size` in distributed training).
"""
if self.per_gpu_train_batch_size:
logger.warning(
"Using deprecated `--per_gpu_train_batch_size` argument which will be removed in a future "
"version. Using `--per_device_train_batch_size` is preferred."
)
per_device_batch_size = self.per_gpu_train_batch_size or self.per_device_train_batch_size
if not self.model_parallel:
train_batch_size = per_device_batch_size * max(1, self.n_gpu)
else:
train_batch_size = per_device_batch_size
return train_batch_size
@property
def eval_batch_size(self) -> int:
"""
The actual batch size for evaluation (may differ from :obj:`per_gpu_eval_batch_size` in distributed training).
"""
if self.per_gpu_eval_batch_size:
logger.warning(
"Using deprecated `--per_gpu_eval_batch_size` argument which will be removed in a future "
"version. Using `--per_device_eval_batch_size` is preferred."
)
per_device_batch_size = self.per_gpu_eval_batch_size or self.per_device_eval_batch_size
if not self.model_parallel:
eval_batch_size = per_device_batch_size * max(1, self.n_gpu)
else:
eval_batch_size = per_device_batch_size
return eval_batch_size
@cached_property
@torch_required
def _setup_devices(self) -> Tuple["torch.device", int]:
logger.info("PyTorch: setting up devices")
if self.no_cuda:
device = torch.device("cpu")
n_gpu = 0
elif is_torch_tpu_available():
device = xm.xla_device()
n_gpu = 0
elif self.local_rank == -1:
# if n_gpu is > 1 we'll use nn.DataParallel.
# If you only want to use a specific subset of GPUs use `CUDA_VISIBLE_DEVICES=0`
# Explicitly set CUDA to the first (index 0) CUDA device, otherwise `set_device` will
# trigger an error that a device index is missing. Index 0 takes into account the
# GPUs available in the environment, so `CUDA_VISIBLE_DEVICES=1,2` with `cuda:0`
# will use the first GPU in that env, i.e. GPU#1
device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
n_gpu = torch.cuda.device_count()
else:
# Here, we'll use torch.distributed.
# Initializes the distributed backend which will take care of synchronizing nodes/GPUs
torch.distributed.init_process_group(backend="nccl")
device = torch.device("cuda", self.local_rank)
n_gpu = 1
if device.type == "cuda":
torch.cuda.set_device(device)
return device, n_gpu
@property
@torch_required
def device(self) -> "torch.device":
"""
The device used by this process.
"""
return self._setup_devices[0]
@property
@torch_required
def n_gpu(self):
"""
The number of GPUs used by this process.
Note:
This will only be greater than one when you have multiple GPUs available but are not using distributed
training. For distributed training, it will always be 1.
"""
return self._setup_devices[1]
@property
@torch_required
def parallel_mode(self):
"""
The current mode used for parallelism if multiple GPUs/TPU cores are available. One of:
- :obj:`ParallelMode.NOT_PARALLEL`: no parallelism (CPU or one GPU).
- :obj:`ParallelMode.NOT_DISTRIBUTED`: several GPUs in one single process (uses :obj:`torch.nn.DataParallel`).
- :obj:`ParallelMode.DISTRIBUTED`: several GPUs, each ahving its own process (uses
:obj:`torch.nn.DistributedDataParallel`).
- :obj:`ParallelMode.TPU`: several TPU cores.
"""
if is_torch_tpu_available():
return ParallelMode.TPU
elif self.local_rank != -1:
return ParallelMode.DISTRIBUTED
elif self.n_gpu > 1:
return ParallelMode.NOT_DISTRIBUTED
else:
return ParallelMode.NOT_PARALLEL
def to_dict(self):
"""
Serializes this instance while replace `Enum` by their values (for JSON serialization support).
"""
d = dataclasses.asdict(self)
for k, v in d.items():
if isinstance(v, Enum):
d[k] = v.value
return d
def to_json_string(self):
"""
Serializes this instance to a JSON string.
"""
return json.dumps(self.to_dict(), indent=2)
def to_sanitized_dict(self) -> Dict[str, Any]:
"""
Sanitized serialization to use with TensorBoardโs hparams
"""
d = self.to_dict()
d = {**d, **{"train_batch_size": self.train_batch_size, "eval_batch_size": self.eval_batch_size}}
valid_types = [bool, int, float, str]
if is_torch_available():
valid_types.append(torch.Tensor)
return {k: v if type(v) in valid_types else str(v) for k, v in d.items()}
class ParallelMode(Enum):
NOT_PARALLEL = "not_parallel"
NOT_DISTRIBUTED = "not_distributed"
DISTRIBUTED = "distributed"
TPU = "tpu"
|
# coding: utf-8
"""
Influx API Service
No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501
OpenAPI spec version: 0.1.0
Generated by: https://openapi-generator.tech
"""
import pprint
import re # noqa: F401
import six
class VariableAssignment(object):
"""NOTE: This class is auto generated by OpenAPI Generator.
Ref: https://openapi-generator.tech
Do not edit the class manually.
"""
"""
Attributes:
openapi_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
openapi_types = {
'type': 'str',
'id': 'Identifier',
'init': 'Expression'
}
attribute_map = {
'type': 'type',
'id': 'id',
'init': 'init'
}
def __init__(self, type=None, id=None, init=None): # noqa: E501
"""VariableAssignment - a model defined in OpenAPI""" # noqa: E501
self._type = None
self._id = None
self._init = None
self.discriminator = None
if type is not None:
self.type = type
if id is not None:
self.id = id
if init is not None:
self.init = init
@property
def type(self):
"""Gets the type of this VariableAssignment. # noqa: E501
type of AST node # noqa: E501
:return: The type of this VariableAssignment. # noqa: E501
:rtype: str
"""
return self._type
@type.setter
def type(self, type):
"""Sets the type of this VariableAssignment.
type of AST node # noqa: E501
:param type: The type of this VariableAssignment. # noqa: E501
:type: str
"""
self._type = type
@property
def id(self):
"""Gets the id of this VariableAssignment. # noqa: E501
:return: The id of this VariableAssignment. # noqa: E501
:rtype: Identifier
"""
return self._id
@id.setter
def id(self, id):
"""Sets the id of this VariableAssignment.
:param id: The id of this VariableAssignment. # noqa: E501
:type: Identifier
"""
self._id = id
@property
def init(self):
"""Gets the init of this VariableAssignment. # noqa: E501
:return: The init of this VariableAssignment. # noqa: E501
:rtype: Expression
"""
return self._init
@init.setter
def init(self, init):
"""Sets the init of this VariableAssignment.
:param init: The init of this VariableAssignment. # noqa: E501
:type: Expression
"""
self._init = init
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.openapi_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, VariableAssignment):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
|
pkgname = "zlib"
version = "1.2.11"
revision = 0
build_style = "configure"
short_desc = "Compression/decompression Library"
maintainer = "q66 <q66@chimera-linux.org>"
license = "Zlib"
homepage = "http://www.zlib.net"
distfiles = [f"{homepage}/{pkgname}-{version}.tar.gz"]
checksum = ["c3e5e9fdd5004dcb542feda5ee4f0ff0744628baf8ed2dd5d66f8ca1197cb1a1"]
options = ["bootstrap"]
def do_configure(self):
self.do(self.chroot_cwd / "configure", [
"--prefix=/usr", "--shared"
])
@subpackage("zlib-devel")
def _devel(self):
self.depends = [f"zlib={version}-r{revision}"]
self.short_desc = short_desc + " - development files"
return [
"usr/include",
"usr/lib/pkgconfig",
"usr/lib/*.a",
"usr/lib/*.so",
"usr/share",
]
|
from nba_py import shotchart
from nba_py.player import get_player
def test():
pid = get_player('Kevin', 'Durant')
assert shotchart.ShotChart(pid)
|
from __future__ import unicode_literals
import unittest
from django.test import TestCase
from django.core.urlresolvers import reverse
from django.contrib.auth import get_user_model
from django.contrib.auth.models import Group, Permission
from django.utils import six
from wagtail.tests.utils import WagtailTestUtils
from wagtail.wagtailcore import hooks
from wagtail.wagtailusers.models import UserProfile
from wagtail.wagtailcore.models import Page, GroupPagePermission
class TestUserIndexView(TestCase, WagtailTestUtils):
def setUp(self):
# create a user that should be visible in the listing
self.test_user = get_user_model().objects.create_user(username='testuser', email='testuser@email.com', password='password')
self.login()
def get(self, params={}):
return self.client.get(reverse('wagtailusers_users:index'), params)
def test_simple(self):
response = self.get()
self.assertEqual(response.status_code, 200)
self.assertTemplateUsed(response, 'wagtailusers/users/index.html')
self.assertContains(response, 'testuser')
def test_allows_negative_ids(self):
# see https://github.com/torchbox/wagtail/issues/565
get_user_model().objects.create_user('guardian', 'guardian@example.com', 'gu@rd14n', id=-1)
response = self.get()
self.assertEqual(response.status_code, 200)
self.assertContains(response, 'testuser')
self.assertContains(response, 'guardian')
def test_search(self):
response = self.get({'q': "Hello"})
self.assertEqual(response.status_code, 200)
self.assertEqual(response.context['query_string'], "Hello")
def test_pagination(self):
pages = ['0', '1', '-1', '9999', 'Not a page']
for page in pages:
response = self.get({'p': page})
self.assertEqual(response.status_code, 200)
class TestUserCreateView(TestCase, WagtailTestUtils):
def setUp(self):
self.login()
def get(self, params={}):
return self.client.get(reverse('wagtailusers_users:add'), params)
def post(self, post_data={}):
return self.client.post(reverse('wagtailusers_users:add'), post_data)
def test_simple(self):
response = self.get()
self.assertEqual(response.status_code, 200)
self.assertTemplateUsed(response, 'wagtailusers/users/create.html')
def test_create(self):
response = self.post({
'username': "testuser",
'email': "test@user.com",
'first_name': "Test",
'last_name': "User",
'password1': "password",
'password2': "password",
})
# Should redirect back to index
self.assertRedirects(response, reverse('wagtailusers_users:index'))
# Check that the user was created
users = get_user_model().objects.filter(username='testuser')
self.assertEqual(users.count(), 1)
self.assertEqual(users.first().email, 'test@user.com')
class TestUserEditView(TestCase, WagtailTestUtils):
def setUp(self):
# Create a user to edit
self.test_user = get_user_model().objects.create_user(username='testuser', email='testuser@email.com', password='password')
# Login
self.login()
def get(self, params={}, user_id=None):
return self.client.get(reverse('wagtailusers_users:edit', args=(user_id or self.test_user.id, )), params)
def post(self, post_data={}, user_id=None):
return self.client.post(reverse('wagtailusers_users:edit', args=(user_id or self.test_user.id, )), post_data)
def test_simple(self):
response = self.get()
self.assertEqual(response.status_code, 200)
self.assertTemplateUsed(response, 'wagtailusers/users/edit.html')
def test_nonexistant_redirect(self):
self.assertEqual(self.get(user_id=100000).status_code, 404)
def test_edit(self):
response = self.post({
'username': "testuser",
'email': "test@user.com",
'first_name': "Edited",
'last_name': "User",
'password1': "password",
'password2': "password",
})
# Should redirect back to index
self.assertRedirects(response, reverse('wagtailusers_users:index'))
# Check that the user was edited
user = get_user_model().objects.get(id=self.test_user.id)
self.assertEqual(user.first_name, 'Edited')
def test_edit_validation_error(self):
# Leave "username" field blank. This should give a validation error
response = self.post({
'username': "",
'email': "test@user.com",
'first_name': "Teset",
'last_name': "User",
'password1': "password",
'password2': "password",
})
# Should not redirect to index
self.assertEqual(response.status_code, 200)
class TestUserProfileCreation(TestCase, WagtailTestUtils):
def setUp(self):
# Create a user
self.test_user = get_user_model().objects.create_user(username='testuser', email='testuser@email.com', password='password')
def test_user_created_without_profile(self):
self.assertEqual(UserProfile.objects.filter(user=self.test_user).count(), 0)
with self.assertRaises(UserProfile.DoesNotExist):
self.test_user.userprofile
def test_user_profile_created_when_method_called(self):
self.assertIsInstance(UserProfile.get_for_user(self.test_user), UserProfile)
# and get it from the db too
self.assertEqual(UserProfile.objects.filter(user=self.test_user).count(), 1)
class TestGroupIndexView(TestCase, WagtailTestUtils):
def setUp(self):
self.login()
def get(self, params={}):
return self.client.get(reverse('wagtailusers_groups:index'), params)
def test_simple(self):
response = self.get()
self.assertEqual(response.status_code, 200)
self.assertTemplateUsed(response, 'wagtailusers/groups/index.html')
def test_search(self):
response = self.get({'q': "Hello"})
self.assertEqual(response.status_code, 200)
self.assertEqual(response.context['query_string'], "Hello")
def test_pagination(self):
pages = ['0', '1', '-1', '9999', 'Not a page']
for page in pages:
response = self.get({'p': page})
self.assertEqual(response.status_code, 200)
class TestGroupCreateView(TestCase, WagtailTestUtils):
def setUp(self):
self.login()
def get(self, params={}):
return self.client.get(reverse('wagtailusers_groups:add'), params)
def post(self, post_data={}):
post_defaults = {
'page_permissions-TOTAL_FORMS': ['0'],
'page_permissions-MAX_NUM_FORMS': ['1000'],
'page_permissions-INITIAL_FORMS': ['0'],
}
for k, v in six.iteritems(post_defaults):
post_data[k] = post_data.get(k, v)
return self.client.post(reverse('wagtailusers_groups:add'), post_data)
def test_simple(self):
response = self.get()
self.assertEqual(response.status_code, 200)
self.assertTemplateUsed(response, 'wagtailusers/groups/create.html')
def test_create_group(self):
response = self.post({'name': "test group"})
# Should redirect back to index
self.assertRedirects(response, reverse('wagtailusers_groups:index'))
# Check that the user was created
groups = Group.objects.filter(name='test group')
self.assertEqual(groups.count(), 1)
def test_group_create_adding_permissions(self):
response = self.post({
'name': "test group",
'page_permissions-0-id': [''],
'page_permissions-0-page': ['1'],
'page_permissions-0-permission_type': ['publish'],
'page_permissions-1-id': [''],
'page_permissions-1-page': ['1'],
'page_permissions-1-permission_type': ['edit'],
'page_permissions-TOTAL_FORMS': ['2'],
})
self.assertRedirects(response, reverse('wagtailusers_groups:index'))
# The test group now exists, with two page permissions
new_group = Group.objects.get(name='test group')
self.assertEqual(new_group.page_permissions.all().count(), 2)
@unittest.expectedFailure
def test_duplicate_page_permissions_error(self):
# Try to submit duplicate page permission entries
response = self.post({
'name': "test group",
'page_permissions-0-id': [''],
'page_permissions-0-page': ['1'],
'page_permissions-0-permission_type': ['publish'],
'page_permissions-1-id': [''],
'page_permissions-1-page': ['1'],
'page_permissions-1-permission_type': ['publish'],
'page_permissions-TOTAL_FORMS': ['2'],
})
self.assertEqual(response.status_code, 200)
# the second form should have errors
self.assertEqual(bool(response.context['formset'].errors[0]), False)
self.assertEqual(bool(response.context['formset'].errors[1]), True)
class TestGroupEditView(TestCase, WagtailTestUtils):
def setUp(self):
# Create a group to edit
self.test_group = Group.objects.create(name='test group')
self.root_page = Page.objects.get(id=1)
self.root_add_permission = GroupPagePermission.objects.create(page=self.root_page,
permission_type='add',
group=self.test_group)
# Get the hook-registered permissions, and add one to this group
self.registered_permissions = Permission.objects.none()
for fn in hooks.get_hooks('register_permissions'):
self.registered_permissions = self.registered_permissions | fn()
self.existing_permission = self.registered_permissions.order_by('pk')[0]
self.another_permission = self.registered_permissions.order_by('pk')[1]
self.test_group.permissions.add(self.existing_permission)
# Login
self.login()
def get(self, params={}, group_id=None):
return self.client.get(reverse('wagtailusers_groups:edit', args=(group_id or self.test_group.id, )), params)
def post(self, post_data={}, group_id=None):
post_defaults = {
'name': 'test group',
'permissions': [self.existing_permission.id],
'page_permissions-TOTAL_FORMS': ['1'],
'page_permissions-MAX_NUM_FORMS': ['1000'],
'page_permissions-INITIAL_FORMS': ['1'], # as we have one page permission already
'page_permissions-0-id': [self.root_add_permission.id],
'page_permissions-0-page': [self.root_add_permission.page.id],
'page_permissions-0-permission_type': [self.root_add_permission.permission_type]
}
for k, v in six.iteritems(post_defaults):
post_data[k] = post_data.get(k, v)
return self.client.post(reverse('wagtailusers_groups:edit', args=(group_id or self.test_group.id, )), post_data)
def add_non_registered_perm(self):
# Some groups may have django permissions assigned that are not
# hook-registered as part of the wagtail interface. We need to ensure
# that these permissions are not overwritten by our views.
# Tests that use this method are testing the aforementioned
# functionality.
self.non_registered_perms = Permission.objects.exclude(id__in=self.registered_permissions)
self.non_registered_perm = self.non_registered_perms[0]
self.test_group.permissions.add(self.non_registered_perm)
def test_simple(self):
response = self.get()
self.assertEqual(response.status_code, 200)
self.assertTemplateUsed(response, 'wagtailusers/groups/edit.html')
def test_nonexistant_group_redirect(self):
self.assertEqual(self.get(group_id=100000).status_code, 404)
def test_group_edit(self):
response = self.post({'name': "test group edited"})
# Should redirect back to index
self.assertRedirects(response, reverse('wagtailusers_groups:index'))
# Check that the group was edited
group = Group.objects.get(id=self.test_group.id)
self.assertEqual(group.name, 'test group edited')
def test_group_edit_validation_error(self):
# Leave "name" field blank. This should give a validation error
response = self.post({'name': ""})
# Should not redirect to index
self.assertEqual(response.status_code, 200)
def test_group_edit_adding_page_permissions(self):
# The test group has one page permission to begin with
self.assertEqual(self.test_group.page_permissions.count(), 1)
response = self.post({
'page_permissions-1-id': [''],
'page_permissions-1-page': ['1'],
'page_permissions-1-permission_type': ['publish'],
'page_permissions-2-id': [''],
'page_permissions-2-page': ['1'],
'page_permissions-2-permission_type': ['edit'],
'page_permissions-TOTAL_FORMS': ['3'],
})
self.assertRedirects(response, reverse('wagtailusers_groups:index'))
# The test group now has three page permissions
self.assertEqual(self.test_group.page_permissions.count(), 3)
def test_group_edit_deleting_page_permissions(self):
# The test group has one page permissions to begin with
self.assertEqual(self.test_group.page_permissions.count(), 1)
response = self.post({
'page_permissions-0-DELETE': ['1'],
})
self.assertRedirects(response, reverse('wagtailusers_groups:index'))
# The test group now has zero page permissions
self.assertEqual(self.test_group.page_permissions.count(), 0)
def test_group_edit_loads_with_page_permissions_shown(self):
# The test group has one page permission to begin with
self.assertEqual(self.test_group.page_permissions.count(), 1)
response = self.get()
self.assertEqual(response.context['formset'].management_form['INITIAL_FORMS'].value(), 1)
self.assertEqual(response.context['formset'].forms[0].instance, self.root_add_permission)
root_edit_perm = GroupPagePermission.objects.create(page=self.root_page,
permission_type='edit',
group=self.test_group)
# The test group now has two page permissions
self.assertEqual(self.test_group.page_permissions.count(), 2)
# Reload the page and check the form instances
response = self.get()
self.assertEqual(response.context['formset'].management_form['INITIAL_FORMS'].value(), 2)
self.assertEqual(response.context['formset'].forms[0].instance, self.root_add_permission)
self.assertEqual(response.context['formset'].forms[1].instance, root_edit_perm)
def test_duplicate_page_permissions_error(self):
# Try to submit duplicate page permission entries
response = self.post({
'page_permissions-1-id': [''],
'page_permissions-1-page': [self.root_add_permission.page.id],
'page_permissions-1-permission_type': [self.root_add_permission.permission_type],
'page_permissions-TOTAL_FORMS': ['2'],
})
self.assertEqual(response.status_code, 200)
# the second form should have errors
self.assertEqual(bool(response.context['formset'].errors[0]), False)
self.assertEqual(bool(response.context['formset'].errors[1]), True)
def test_group_add_registered_django_permissions(self):
# The test group has one django permission to begin with
self.assertEqual(self.test_group.permissions.count(), 1)
response = self.post({
'permissions': [self.existing_permission.id, self.another_permission.id]
})
self.assertRedirects(response, reverse('wagtailusers_groups:index'))
self.assertEqual(self.test_group.permissions.count(), 2)
def test_group_form_includes_non_registered_permissions_in_initial_data(self):
self.add_non_registered_perm()
original_permissions = self.test_group.permissions.all()
self.assertEqual(original_permissions.count(), 2)
response = self.get()
# See that the form is set up with the correct initial data
self.assertEqual(response.context['form'].initial.get('permissions'), list(original_permissions.values_list('id', flat=True)))
def test_group_retains_non_registered_permissions_when_editing(self):
self.add_non_registered_perm()
original_permissions = list(self.test_group.permissions.all()) # list() to force evaluation
# submit the form with no changes (only submitting the exsisting
# permission, as in the self.post function definition)
self.post()
# See that the group has the same permissions as before
self.assertEqual(list(self.test_group.permissions.all()), original_permissions)
self.assertEqual(self.test_group.permissions.count(), 2)
def test_group_retains_non_registered_permissions_when_adding(self):
self.add_non_registered_perm()
# Add a second registered permission
self.post({
'permissions': [self.existing_permission.id, self.another_permission.id]
})
# See that there are now three permissions in total
self.assertEqual(self.test_group.permissions.count(), 3)
# ...including the non-registered one
self.assertIn(self.non_registered_perm, self.test_group.permissions.all())
def test_group_retains_non_registered_permissions_when_deleting(self):
self.add_non_registered_perm()
# Delete all registered permissions
self.post({'permissions': []})
# See that the non-registered permission is still there
self.assertEqual(self.test_group.permissions.count(), 1)
self.assertEqual(self.test_group.permissions.all()[0], self.non_registered_perm)
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.