text
stringlengths 2
999k
|
|---|
#
# This source file is part of the EdgeDB open source project.
#
# Copyright 2018-present MagicStack Inc. and the EdgeDB authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import edgedb
from edb.testbase import server as tb
class TestEdgeQLUserDDL(tb.DDLTestCase):
INTERNAL_TESTMODE = False
async def test_edgeql_userddl_01(self):
# testing anytype polymorphism
with self.assertRaisesRegex(
edgedb.InvalidFunctionDefinitionError,
r'cannot create.*test::func_01.*'
r'generic types are not supported in '
r'user-defined functions'):
await self.con.execute('''
CREATE FUNCTION test::func_01(
a: anytype
) -> bool
USING EdgeQL $$
SELECT a IS float32
$$;
''')
async def test_edgeql_userddl_02(self):
# testing anyreal polymorphism, which is an actual abstract
with self.assertRaisesRegex(
edgedb.InvalidFunctionDefinitionError,
r'cannot create.*test::func_02.*'
r'generic types are not supported in '
r'user-defined functions'):
await self.con.execute('''
CREATE FUNCTION test::func_02(
a: anyreal
) -> bool
USING EdgeQL $$
SELECT a IS float32
$$;
''')
async def test_edgeql_userddl_03(self):
# testing anytype as return type
with self.assertRaisesRegex(
edgedb.InvalidFunctionDefinitionError,
r'cannot create.*test::func_03.*'
r'generic types are not supported in '
r'user-defined functions'):
await self.con.execute('''
CREATE FUNCTION test::func_03(
a: str
) -> anytype
USING EdgeQL $$
SELECT a
$$;
''')
async def test_edgeql_userddl_04(self):
# testing anyreal as return type
with self.assertRaisesRegex(
edgedb.InvalidFunctionDefinitionError,
r'cannot create.*test::func_04.*'
r'generic types are not supported in '
r'user-defined functions'):
await self.con.execute('''
CREATE FUNCTION test::func_04(
a: str
) -> anyscalar
USING EdgeQL $$
SELECT a
$$;
''')
async def test_edgeql_userddl_05(self):
with self.assertRaisesRegex(
edgedb.InvalidFunctionDefinitionError,
r'cannot create.*test::func_05.*'
r'USING SQL FUNCTION.*not supported in '
r'user-defined functions'):
await self.con.execute('''
CREATE FUNCTION test::func_05(
a: str
) -> str
USING SQL FUNCTION 'lower';
''')
async def test_edgeql_userddl_06(self):
with self.assertRaisesRegex(
edgedb.InvalidFunctionDefinitionError,
r'cannot create.*test::func_06.*'
r'USING SQL.*not supported in '
r'user-defined functions'):
await self.con.execute('''
CREATE FUNCTION test::func_06(
a: str
) -> str
USING SQL $$ SELECT "a" $$;
''')
async def test_edgeql_userddl_07(self):
with self.assertRaisesRegex(
edgedb.UnsupportedFeatureError,
r'user-defined operators are not supported'):
await self.con.execute('''
CREATE INFIX OPERATOR
std::`+` (l: std::str, r: std::str) -> std::str
USING SQL OPERATOR r'||';
''')
async def test_edgeql_userddl_08(self):
with self.assertRaisesRegex(
edgedb.UnsupportedFeatureError,
r'user-defined casts are not supported'):
await self.con.execute('''
CREATE CAST FROM std::int64 TO std::duration {
USING SQL CAST;
ALLOW ASSIGNMENT;
};
''')
async def test_edgeql_userddl_09(self):
with self.assertRaisesRegex(
edgedb.SchemaDefinitionError,
r'cannot create.*module std is read-only'):
await self.con.execute('''
CREATE FUNCTION std::func_09(
a: str
) -> str
USING EdgeQL $$
SELECT a
$$;
''')
async def test_edgeql_userddl_10(self):
with self.assertRaisesRegex(
edgedb.SchemaDefinitionError,
r'cannot create.*module math is read-only'):
await self.con.execute('''
CREATE FUNCTION math::func_10(
a: str
) -> str
USING EdgeQL $$
SELECT a
$$;
''')
async def test_edgeql_userddl_11(self):
with self.assertRaisesRegex(
edgedb.SchemaDefinitionError,
r'cannot create.*module std is read-only'):
await self.con.execute('''
CREATE TYPE std::Foo_11;
''')
async def test_edgeql_userddl_12(self):
with self.assertRaisesRegex(
edgedb.SchemaDefinitionError,
r'cannot create.*module math is read-only'):
await self.con.execute('''
CREATE TYPE math::Foo_11;
''')
async def test_edgeql_userddl_13(self):
with self.assertRaisesRegex(
edgedb.SchemaDefinitionError,
r'cannot delete.*module std is read-only'):
await self.con.execute('''
DROP TYPE std::Object;
''')
async def test_edgeql_userddl_14(self):
with self.assertRaisesRegex(
edgedb.SchemaDefinitionError,
r'cannot delete.*module stdgraphql is read-only'):
await self.con.execute('''
DROP TYPE stdgraphql::Query;
''')
async def test_edgeql_userddl_15(self):
with self.assertRaisesRegex(
edgedb.SchemaDefinitionError,
r'cannot alter.*module std is read-only'):
await self.con.execute('''
ALTER TYPE std::Object {
CREATE PROPERTY foo_15 -> std::str;
};
''')
async def test_edgeql_userddl_16(self):
with self.assertRaisesRegex(
edgedb.SchemaDefinitionError,
r'cannot alter.*module stdgraphql is read-only'):
await self.con.execute('''
ALTER TYPE stdgraphql::Query {
CREATE PROPERTY foo_15 -> std::str;
};
''')
async def test_edgeql_userddl_17(self):
with self.assertRaisesRegex(
edgedb.SchemaDefinitionError,
r'cannot delete.*module std is read-only'):
await self.con.execute('''
DROP MODULE std;
''')
async def test_edgeql_userddl_18(self):
with self.assertRaisesRegex(
edgedb.SchemaDefinitionError,
r'cannot delete.*module math is read-only'):
await self.con.execute('''
DROP MODULE math;
''')
async def test_edgeql_userddl_19(self):
with self.assertRaisesRegex(
edgedb.UnsupportedFeatureError,
r'cannot create.*test::func_19.*'
r'SET OF parameters in user-defined EdgeQL '
r'functions are not supported'):
await self.con.execute('''
CREATE FUNCTION test::func_19(
a: SET OF str
) -> bool
USING EdgeQL $$
SELECT EXISTS a
$$;
''')
async def test_edgeql_userddl_20(self):
await self.con.execute('''
CREATE FUNCTION test::func_20(
a: str
) -> SET OF str
USING EdgeQL $$
SELECT {a, 'a'}
$$;
''')
await self.assert_query_result(
r'''
SELECT test::func_20('q');
''',
{'q', 'a'},
)
await self.assert_query_result(
r'''
SELECT count(test::func_20({'q', 'w'}));
''',
{4},
)
async def test_edgeql_userddl_21(self):
with self.assertRaisesRegex(
edgedb.SchemaDefinitionError,
r"'force_return_cast' is not a valid field"):
await self.con.execute('''
CREATE FUNCTION test::func(
a: str
) -> bool
{
USING EdgeQL $$
SELECT True;
$$;
SET force_return_cast := true;
};
''')
async def test_edgeql_userddl_22(self):
await self.con.execute('''
CREATE ABSTRACT CONSTRAINT test::uppercase {
CREATE ANNOTATION title := "Upper case constraint";
USING (str_upper(__subject__) = __subject__);
SET errmessage := "{__subject__} is not in upper case";
};
CREATE SCALAR TYPE test::upper_str EXTENDING str {
CREATE CONSTRAINT test::uppercase
};
''')
await self.assert_query_result(
r'''
SELECT <test::upper_str>'123_HELLO';
''',
{'123_HELLO'},
)
|
# coding: utf-8
import copy
from timeit import default_timer as timer
import numpy as np
# Main function
##########
class Pair:
def __init__(self, lhs, rhs, parent):
self.lhs = lhs
self.rhs = rhs
self.parent = parent
def __str__(self):
return "["+str(self.lhs)+","+str(self.rhs)+"]"
class RegularNumber:
def __init__(self, value, parent):
self.value = value
self.parent = parent
def __str__(self):
return str(self.value)
def increase_depth(n, d_map):
if isinstance(n, RegularNumber):
return None
else:
if d_map[n] == 4:
return n
d_map[n.lhs] = d_map[n] + 1
d_map[n.rhs] = d_map[n] + 1
ret = increase_depth(n.lhs, d_map)
if ret != None:
return ret
ret = increase_depth(n.rhs, d_map)
if ret != None:
return ret
def find_deep_pair(root):
d_map = {}
d_map[root] = 0
pair = increase_depth(root, d_map)
return pair
def look_for_ten(n, n_map):
if isinstance(n, RegularNumber) and n.value >= 10:
return n
elif isinstance(n, RegularNumber):
return None
else:
ret = look_for_ten(n.lhs, n_map)
if ret != None:
return ret
ret = look_for_ten(n.rhs, n_map)
if ret != None:
return ret
def find_big_number(root):
n_map = {}
n_map[root] = 0
pair = look_for_ten(root, n_map)
return pair
def reduce(pair, DBG=True):
cont = True
while cont:
while cont:
cont = False
# If any pair is nested inside four pairs, the leftmost such pair explodes.
l_to_r = build_left_to_right(pair)
# find first pair that has depth >=4
to_explode = find_deep_pair(pair)
# explode
if to_explode != None:
explode(to_explode, l_to_r)
cont = True
cont = False
# If any regular number is 10 or greater, the leftmost such regular number splits
# find first reg num >= 10
bigger_than_ten = find_big_number(pair)
# split
if bigger_than_ten != None:
split(bigger_than_ten)
cont = True
def explore(n, l_to_r):
if isinstance(n, RegularNumber):
l_to_r.append(n)
else:
explore(n.lhs, l_to_r)
explore(n.rhs, l_to_r)
def build_left_to_right(root):
l_to_r = []
explore(root, l_to_r)
return l_to_r
def fing_reg_num_to_the_left(regnum, l_to_r):
l = len(l_to_r)
for i in range(l):
if l_to_r[i] == regnum and i > 0:
return l_to_r[i-1]
return None
def fing_reg_num_to_the_right(regnum, l_to_r):
l = len(l_to_r)
for i in range(l):
if l_to_r[i] == regnum and i < l-1:
return l_to_r[i+1]
return None
def explode(pair, l_to_r):
# To explode a pair, the pair's left value is added to the first regular number
# to the left of the exploding pair (if any), and the pair's right value is added
# to the first regular number to the right of the exploding pair (if any). Exploding pairs
# will always consist of two regular numbers. Then, the entire exploding pair is replaced
# with the regular number 0.
regnum_left = fing_reg_num_to_the_left(pair.lhs, l_to_r)
regnum_right = fing_reg_num_to_the_right(pair.rhs, l_to_r)
if regnum_left != None:
regnum_left.value += pair.lhs.value
if regnum_right != None:
regnum_right.value += pair.rhs.value
if pair.parent.lhs == pair:
pair.parent.lhs = RegularNumber(0, pair.parent)
else:
pair.parent.rhs = RegularNumber(0, pair.parent)
def split(regnum):
# To split a regular number, replace it with a pair; the left element of the pair
# should be the regular number divided by two and rounded down, while the right
# element of the pair should be the regular number divided by two and rounded up.
# For example, 10 becomes [5,5], 11 becomes [5,6], 12 becomes [6,6], and so on.
newpair = Pair(None, None, None)
newpair.lhs = RegularNumber(regnum.value//2, newpair)
newpair.rhs = RegularNumber(
(regnum.value//2) + (regnum.value % 2), newpair)
if regnum.parent.lhs == regnum:
regnum.parent.lhs = newpair
newpair.parent = regnum.parent
else:
regnum.parent.rhs = newpair
newpair.parent = regnum.parent
def sf_add(lhsf, rhsf, DBG=True):
ret = Pair(lhsf, rhsf, None)
lhsf.parent = ret
rhsf.parent = ret
reduce(ret, DBG)
return ret
def parse_sf(lll, DBG=True):
idx = 0
l = len(lll)
root = Pair(None, None, None)
idx += 1
cur = root
while idx < l:
c = lll[idx]
if c == '[':
node = Pair(None, None, cur)
if cur.lhs == None:
cur.lhs = node
else:
cur.rhs = node
cur = node
elif c == ',':
cur = cur.parent
elif c == ']':
cur = cur.parent
else:
num = RegularNumber(int(c), cur)
if cur.lhs == None:
cur.lhs = num
else:
cur.rhs = num
cur = num
idx += 1
if DBG:
print(str(root))
return root
def magnitude(n):
if isinstance(n, RegularNumber):
return n.value
else:
return 3*magnitude(n.lhs)+2*magnitude(n.rhs)
def boom_part1(input_val, DBG=True):
sum_sf = parse_sf(input_val[0])
for lll in input_val[1:]:
to_add = parse_sf(lll, DBG)
new_sum_sf = sf_add(sum_sf, to_add, DBG)
if DBG:
print("= ", str(new_sum_sf))
sum_sf = new_sum_sf
return str(sum_sf)
def boom_part2(input_val, DBG=True):
all_fishes = []
sum_sf = parse_sf(input_val[0], DBG)
for lll in input_val:
all_fishes.append(parse_sf(lll, DBG))
l = len(all_fishes)
max_val = 0
for i in range(l):
for j in range(l):
if i != j:
max_val = max(max_val, magnitude(
sf_add(copy.deepcopy(all_fishes[i]), copy.deepcopy(all_fishes[j]))))
return max_val
# Testing and timing
##########
def print_time(t_start, t_end):
s = t_end-t_start
print(int(s*1000), "ms = ", int(s), "s = ", int(s/60), "min")
RED_FG = '\x1b[91m'
GREEN_FG = '\x1b[92m'
YELLOW_FG = '\x1b[93m'
DEFAULT_FG = '\x1b[39m'
def output_test(cc, t_start, t_end, result, expected):
result = str(result)
expected = str(expected)
flag = (result == expected)
sflag = ""
if flag == True:
sflag = GREEN_FG+str(flag)+DEFAULT_FG
else:
sflag = RED_FG+str(flag)+DEFAULT_FG
if(expected == "None"):
print("*** "+str(cc) + " *** -> Result = "+str(result))
else:
print("*** "+str(cc) + " *** -> Result = "+str(result) +
" -> success = " + sflag + " -> expected " + expected)
print_time(t_start, t_end)
return flag
def test_part1(cc=None, expected=None, DBG=False):
t_start = timer()
result = boom_part1(cc, DBG)
t_end = timer()
return output_test(cc, t_start, t_end, result, expected)
def test_part2(cc=None, expected=None, DBG=False):
t_start = timer()
result = boom_part2(cc, DBG)
t_end = timer()
return output_test(cc, t_start, t_end, result, expected)
# Test cases
##########
# tests explode
root = parse_sf('[[[[[9,8],1],2],3],4]')
l_to_r = build_left_to_right(root)
to_explode = find_deep_pair(root)
explode(to_explode, l_to_r)
print(str(root)) # [[[[0,9],2],3],4]
root = parse_sf('[7,[6,[5,[4,[3,2]]]]]')
l_to_r = build_left_to_right(root)
to_explode = find_deep_pair(root)
explode(to_explode, l_to_r)
print(str(root)) # [7,[6,[5,[7,0]]]]
root = parse_sf('[[6,[5,[4,[3,2]]]],1]')
l_to_r = build_left_to_right(root)
to_explode = find_deep_pair(root)
explode(to_explode, l_to_r)
print(str(root)) # [[6,[5,[7,0]]],3]
root = parse_sf('[[3,[2,[1,[7,3]]]],[6,[5,[4,[3,2]]]]]')
l_to_r = build_left_to_right(root)
to_explode = find_deep_pair(root)
explode(to_explode, l_to_r)
print(str(root)) # [[3,[2,[8,0]]],[9,[5,[4,[3,2]]]]]
root = parse_sf('[[3,[2,[8,0]]],[9,[5,[4,[3,2]]]]]')
l_to_r = build_left_to_right(root)
to_explode = find_deep_pair(root)
explode(to_explode, l_to_r)
print(str(root)) # [[3,[2,[8,0]]],[9,[5,[7,0]]]]
# tests sums
tt1 = """[[[[4,3],4],4],[7,[[8,4],9]]]
[1,1]"""
tt1 = tt1.splitlines()
test_part1(tt1, "[[[[0,7],4],[[7,8],[6,0]]],[8,1]]", True)
tt1 = """[1,1]
[2,2]
[3,3]
[4,4]"""
tt1 = tt1.splitlines()
test_part1(tt1, "[[[[1,1],[2,2]],[3,3]],[4,4]]", True)
tt1 = """[1,1]
[2,2]
[3,3]
[4,4]
[5,5]"""
tt1 = tt1.splitlines()
test_part1(tt1, "[[[[3,0],[5,3]],[4,4]],[5,5]]", True)
tt1 = """[1,1]
[2,2]
[3,3]
[4,4]
[5,5]
[6,6]"""
tt1 = tt1.splitlines()
test_part1(tt1, "[[[[5,0],[7,4]],[5,5]],[6,6]]", True)
tt1 = """[[[0,[4,5]],[0,0]],[[[4,5],[2,6]],[9,5]]]
[7,[[[3,7],[4,3]],[[6,3],[8,8]]]]
[[2,[[0,8],[3,4]]],[[[6,7],1],[7,[1,6]]]]
[[[[2,4],7],[6,[0,5]]],[[[6,8],[2,8]],[[2,1],[4,5]]]]
[7,[5,[[3,8],[1,4]]]]
[[2,[2,2]],[8,[8,1]]]
[2,9]
[1,[[[9,3],9],[[9,0],[0,7]]]]
[[[5,[7,4]],7],1]
[[[[4,2],2],6],[8,7]]"""
tt1 = tt1.splitlines()
test_part1(tt1, "[[[[8,7],[7,7]],[[8,6],[7,7]]],[[[0,7],[6,6]],[8,7]]]", True)
# Test magnitudes
mag = magnitude(parse_sf("[[1,2],[[3,4],5]]"))
print(mag, mag == 143)
mag = magnitude(parse_sf("[[[[0,7],4],[[7,8],[6,0]]],[8,1]]"))
print(mag, mag == 1384)
mag = magnitude(parse_sf("[[[[1,1],[2,2]],[3,3]],[4,4]]"))
print(mag, mag == 445)
mag = magnitude(parse_sf("[[[[3,0],[5,3]],[4,4]],[5,5]]"))
print(mag, mag == 791)
mag = magnitude(parse_sf("[[[[5,0],[7,4]],[5,5]],[6,6]]"))
print(mag, mag == 1137)
mag = magnitude(
parse_sf("[[[[8,7],[7,7]],[[8,6],[7,7]]],[[[0,7],[6,6]],[8,7]]]"))
print(mag, mag == 3488)
tt1 = """[[[0,[5,8]],[[1,7],[9,6]]],[[4,[1,2]],[[1,4],2]]]
[[[5,[2,8]],4],[5,[[9,9],0]]]
[6,[[[6,2],[5,6]],[[7,6],[4,7]]]]
[[[6,[0,7]],[0,9]],[4,[9,[9,0]]]]
[[[7,[6,4]],[3,[1,3]]],[[[5,5],1],9]]
[[6,[[7,3],[3,2]]],[[[3,8],[5,7]],4]]
[[[[5,4],[7,7]],8],[[8,3],8]]
[[9,3],[[9,9],[6,[4,9]]]]
[[2,[[7,7],7]],[[5,8],[[9,3],[0,2]]]]
[[[[5,2],5],[8,[3,7]]],[[5,[7,5]],[4,4]]]"""
tt1 = tt1.splitlines()
test_part1(
tt1, "[[[[6,6],[7,6]],[[7,7],[7,0]]],[[[7,7],[7,7]],[[7,8],[9,9]]]]", True)
mag = magnitude(
parse_sf("[[[[6,6],[7,6]],[[7,7],[7,0]]],[[[7,7],[7,7]],[[7,8],[9,9]]]]"))
print(mag, mag == 4140)
# test part 2
tt1 = """[[[0,[5,8]],[[1,7],[9,6]]],[[4,[1,2]],[[1,4],2]]]
[[[5,[2,8]],4],[5,[[9,9],0]]]
[6,[[[6,2],[5,6]],[[7,6],[4,7]]]]
[[[6,[0,7]],[0,9]],[4,[9,[9,0]]]]
[[[7,[6,4]],[3,[1,3]]],[[[5,5],1],9]]
[[6,[[7,3],[3,2]]],[[[3,8],[5,7]],4]]
[[[[5,4],[7,7]],8],[[8,3],8]]
[[9,3],[[9,9],[6,[4,9]]]]
[[2,[[7,7],7]],[[5,8],[[9,3],[0,2]]]]
[[[[5,2],5],[8,[3,7]]],[[5,[7,5]],[4,4]]]"""
tt1 = tt1.splitlines()
test_part2(tt1, 3993, True)
# Real data
##########
INPUT_FILE = "input-d18.txt"
f = open(INPUT_FILE, "r")
contents = f.read()
puzzle_input = contents.splitlines()
f.close()
# part 1
t_start = timer()
ret = boom_part1(puzzle_input, DBG=False)
t_end = timer()
print_time(t_start, t_end)
print(ret)
print(magnitude(parse_sf(ret)))
# part 2
t_start = timer()
ret = boom_part2(puzzle_input, DBG=False)
t_end = timer()
print_time(t_start, t_end)
print(ret)
# PART 1 OK = 4137
# PART 2 OK = 4573
|
import os
import sys
import json
import yaml
import mkdocs
import logging
from mkdocs.plugins import BasePlugin
from mkdocs.utils import warning_filter
from jinja2 import Template
from pathlib import Path
from itertools import chain
log = logging.getLogger(__name__)
log.addFilter(warning_filter)
CONFIG_KEYS = ["site_name", "site_author", "site_url", "repo_url", "repo_name"]
if sys.version_info[0] >= 3:
str_type = str
else:
str_type = mkdocs.utils.string_types
class MarkdownExtraDataPlugin(BasePlugin):
"""
Inject certain config variables into the markdown
"""
config_scheme = (
("data", mkdocs.config.config_options.Type(str_type, default=None)),
)
def __add_data__(self, config, namespace, data):
# creates the namespace and adds the data there
namespace = ["extra"] + namespace.split(os.sep)
holder = config
while len(namespace) > 1:
if not namespace[0] in holder:
holder[namespace[0]] = {}
holder = holder[namespace[0]]
del namespace[0]
holder[namespace[0]] = data
def on_pre_build(self, config):
# Loads all data from the supplied data directories
# or, otherwise a _data directory next to mkdocs.yml and/or inside the docs_dir.
# Does nothing if the dir does not exist.
# assume an empty list if not defined
data_source_folders = self.config.get("data")
# cast as a list if is defined but is a string
if isinstance(data_source_folders, str):
data_source_folders = data_source_folders.split(',')
# if we have not value, then proceed to look in default folders
# and assume a _data folder, add to list of folders to check
if not data_source_folders:
for datadir in [
os.path.dirname(config["config_file_path"]),
config["docs_dir"],
]:
ds_folder = os.path.join(datadir, "_data")
if os.path.exists(ds_folder):
data_source_folders.append(ds_folder)
if not data_source_folders:
return
# iterate of a list of folders and look for data files
for ds_folder in data_source_folders:
if os.path.exists(ds_folder):
path = Path(ds_folder)
for filename in chain(
path.glob("**/*.yaml"),
path.glob("**/*.yml"),
path.glob("**/*.json"),
):
namespace = os.path.splitext(os.path.relpath(filename, ds_folder))[0]
# add data into dict based on its path as a namespace
self.__add_data__(
config,
namespace,
(
yaml.load(filename.read_bytes(), Loader=yaml.FullLoader)
if filename.suffix in [".yml", ".yaml"]
else json.loads(filename.read_bytes())
),
)
def on_page_read_source(self, page, config, **kwargs):
context = {key: config.get(key) for key in CONFIG_KEYS if key in config}
context.update(config.get("extra", {}))
try:
with open(page.file.abs_src_path, 'r', encoding='utf-8-sig', errors='strict') as f:
md_template = Template(f.read())
return md_template.render(**config.get("extra"))
except OSError:
log.error('File not found: {}'.format(self.file.src_path))
raise
except ValueError:
log.error('Encoding error reading file: {}'.format(self.file.src_path))
raise
|
from flask import Flask
from flask_graphql import GraphQLView
from models import db_session
from schema import schema, Department
app = Flask(__name__)
app.debug = True
@app.route('/')
def index():
return '<p> Hello World!</p>'
app.add_url_rule(
'/graphql',
view_func=GraphQLView.as_view(
'graphql',
schema=schema,
graphiql=True # for having the GraphiQL interface
)
)
@app.teardown_appcontext
def shutdown_session(exception=None):
db_session.remove()
if __name__ == '__main__':
app.run()
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
###########################################################################
#
# Copyright (c) 2001, 2002 Zope Corporation and Contributors.
# Copyright (c) 2005-2010 Thierry Benita - atReal <contact@atreal.net>
# All Rights Reserved.
#
# This software is subject to the provisions of the Zope Public License,
# Version 2.1 (ZPL). A copy of the ZPL should accompany this distribution.
# THIS SOFTWARE IS PROVIDED "AS IS" AND ANY AND ALL EXPRESS OR IMPLIED
# WARRANTIES ARE DISCLAIMED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF TITLE, MERCHANTABILITY, AGAINST INFRINGEMENT, AND FITNESS
# FOR A PARTICULAR PURPOSE.
#
###########################################################################
"""Zope External Editor Helper Application
http://plone.org/products/zope-externaleditor-client"""
APP_NAME = 'zopeedit'
import sys
import os
# get the path of zopeedit.py
try:
# try to get the python file path
system_path=os.path.split(__file__)[0]
except NameError:
system_path = os.path.realpath( os.path.dirname( sys.argv[0] ) )
# Open the VERSION file for reading.
if os.path.exists(os.path.join(system_path,'docs/VERSION.txt')):
f=open(os.path.join(system_path,'docs/VERSION.txt'), 'r')
elif os.path.exists(os.path.join(system_path,'../../docs/VERSION.txt')):
# zopeedit is not properly installed : try uninstalled path
f=open(os.path.join(system_path,'../../docs/VERSION.txt'), 'r')
elif os.path.exists(os.path.join(system_path,'collective/zopeedit/docs/VERSION.txt')):
f=open(os.path.join(system_path,'collective/zopeedit/docs/VERSION.txt'), 'r')
else:
f = None
if f is not None:
__version__ = f.readline()[:-1]
f.close()
else:
__version__ = "0"
# Where am i ?
# The windows version is used with py2exe and a python 2.x (actually 2.6)
# So the possibilities are:
# - under windows with a python 2.x
# - under Linux/unix with python 2.x (>= 2.6 is assumed) or python 3.x
from sys import platform, version_info
py3 = version_info[0] == 3
win32 = sys.platform == 'win32'
osx = sys.platform == 'darwin'
linux = sys.platform == 'linux2'
# Windows specifics
if win32:
from os import startfile
# import pywin32 stuff first so it never looks into system32
import pythoncom, pywintypes
# prevent warnings from being turned into errors by py2exe
import warnings
warnings.filterwarnings('ignore')
# Mac OSX specifics
if osx:
# Launch Services binding
# find the application needed for a file and open the file into it
from LaunchServices import LSOpenFSRef
import re
import subprocess
from subprocess import Popen, call
import time
import rfc822
import traceback
import logging
import urllib
import shutil
import glob
from time import sleep
from tempfile import mktemp
from ConfigParser import ConfigParser
from httplib import HTTPConnection, HTTPSConnection,FakeSocket
import socket
import base64
from urlparse import urlparse
from hashlib import md5, sha1
from urllib2 import parse_http_list, parse_keqv_list, getproxies
import ssl
import locale
import gettext
## gettext start init
# Retrieve full path
local_path = os.path.join( system_path, 'locales' )
LOG_LEVELS = {'debug': logging.DEBUG,
'info': logging.INFO,
'warning': logging.WARNING,
'error': logging.ERROR,
'critical': logging.CRITICAL}
logger = logging.getLogger('zopeedit')
log_file = None
# Retrieve locale from system
lc, encoding = locale.getdefaultlocale()
if lc is None:
lc="en_EN"
encoding="UTF-8"
# Should work without that but it seems to be here most of time
gettext.bindtextdomain( APP_NAME, local_path )
gettext.textdomain( APP_NAME )
# Initialization of translations
lang = gettext.translation( APP_NAME, local_path,
languages = [ lc ], fallback = True)
# fallback = true avoids to raise an IOError Exception
# when APP_NAME is not found.
_ = lang.lgettext
#__builtins__._ = _
## gettext end init
class Configuration:
def __init__(self, path):
# Create/read config file on instantiation
self.path = path
if not os.path.exists(path):
f = open(path, 'w')
f.write(default_configuration)
f.close()
self.config = ConfigParser()
self.config.readfp(open(path))
logger.info("init at: %s" % time.asctime(time.localtime()) )
logger.info("local_path: %r" % local_path)
def save(self):
"""Save config options to disk"""
self.config.write(open(self.path, 'w'))
logger.info("save at: %s" % time.asctime(time.localtime()) )
def set(self, section, option, value):
self.config.set(section, option, value)
def __getattr__(self, name):
# Delegate to the ConfigParser instance
return getattr(self.config, name)
def getAllOptions(self, meta_type, content_type, title, extension, host_domain):
"""Return a dict of all applicable options for the
given meta_type, content_type and host_domain
"""
opt = {}
sep = content_type.find('/')
general_type = '%s/*' % content_type[:sep]
# Divide up the domains segments and create a
# list of domains from the bottom up
host_domain = host_domain.split('.')
domains = []
for i in range(len(host_domain)):
domains.append('domain:%s' % '.'.join(host_domain[i:]))
domains.reverse()
sections = ['general']
sections.extend(domains)
sections.append('meta-type:%s' % meta_type)
sections.append('general-type:%s' % general_type)
sections.append('content-type:%s' % content_type)
sections.append('title:%s' % title)
for section in sections:
if self.config.has_section(section):
for option in self.config.options(section):
opt[option] = self.config.get(section, option)
logger.debug("option %s: %s" %( option, opt[option]))
# No extension and there is an extension in the metadata
if opt.get('extension') is None and extension is not None:
opt['extension'] = extension
return opt
class NullResponse:
""" Fake Response in case of http error
"""
def getheader(self, n, d = None):
return d
def read(self):
return '(No Response From Server)'
class ExternalEditor:
""" ExternalEditor is the main class of zopeedit.
It is in charge of making the link between the client editor
and the server file object.
There are 2 main actions :
- launch('filename') : starts the edition process
- editConfig() : allows the end user to edit a local options file
"""
def __init__(self, input_file = ''):
""" arguments :
- 'input_file' is the main file received from the server.
"""
self.networkerror = False
self.input_file = input_file
# Setup logging.
global log_file
log_file = mktemp(suffix = '-zopeedit-log.txt')
# print log_file
log_filehandler = logging.FileHandler(log_file)
log_formatter = logging.Formatter(
'%(asctime)s %(levelname)s %(message)s')
log_filehandler.setFormatter(log_formatter)
logger.addHandler(log_filehandler)
logger.setLevel(logging.DEBUG)
logger.info(_(
"\n"
"|-----------------------------------------------------------|\n"
"| |\n"
"| ZopeEdit version %s |\n"
"| |\n"
"| This file is a log file. |\n"
"| |\n"
"| Please save it and send it to your administrator. |\n"
"| |\n"
"|-----------------------------------------------------------|\n"
"| This version is maintained by atReal contact@atreal.net |\n"
"|-----------------------------------------------------------|\n"
"\n\n\n\n"
)% __version__ )
logger.info('Opening %r.', self.input_file)
# If there is no filename, don't try to use it !
if self.input_file == '':
self.metadata = {}
self.host=''
self.loadConfig()
return
try:
# Open the input file and read the metadata headers
in_f = open(self.input_file, 'rb')
m = rfc822.Message(in_f)
self.metadata = m.dict.copy()
# Special care for Dexterity Item content type, which
# is encapsuled as its own rfc2822 message by plone.rfc822
if self.metadata["meta_type"] == "Dexterity Item":
import email, email.header, StringIO
msg = email.message_from_string(in_f.read())
self.dexterity = dict(msg.items())
self.metadata["title"] = self.dexterity.get(
"title", self.metadata.get("title", ""))
self.metadata["content_type"] = self.dexterity.get(
"Content-Type", self.metadata.get("content_type", "text/plain"))
in_f = StringIO.StringIO()
in_f.write(msg.get_payload(decode=True))
in_f.seek(0)
logger.debug("metadata: %s" % repr(self.metadata))
# Parse the incoming url
scheme, self.host, self.path = urlparse(self.metadata['url'])[:3]
# Keep the full url for proxy
self.url = self.metadata['url']
self.ssl = scheme == 'https'
# initialyze configuration based on the config file and default values
self.loadConfig()
# Get last-modified
last_modified = None
if self.metadata.has_key("last-modified"):
last_modified = self.metadata['last-modified']
self.last_modified = http_date_to_datetime(last_modified)
logger.debug('last_modified: %s' % str(self.last_modified))
# Retrieve original title
self.title = self.metadata["title"].decode(self.server_charset).\
encode(self.client_charset,'ignore')
# Write the body of the input file to a separate file
if self.long_file_name:
sep = self.options.get('file_name_separator', ',')
content_file = urllib.unquote('-%s%s' % (self.host,
self.path))
content_file = content_file.replace(
'/', sep).replace(':',sep).replace(' ','_')
else:
content_file = '-' + urllib.unquote(
self.path.split('/')[-1]).replace(' ','_')
extension = self.options.get('extension')
if extension and not content_file.endswith(extension):
content_file = content_file + extension
if self.options.has_key('temp_dir'):
while 1:
temp = os.path.expanduser(self.options['temp_dir'])
temp = os.tempnam(temp)
content_file = '%s%s' % (temp, content_file)
if not os.path.exists(content_file):
break
else:
content_file = mktemp(content_file,'rw')
logger.debug('Destination filename will be: %r.', content_file)
body_f = open(content_file, 'wb')
shutil.copyfileobj(in_f, body_f)
self.content_file = content_file
self.saved = False
body_f.close()
in_f.close()
# cleanup the input file if the clean_up option is active
if self.clean_up:
try:
logger.debug('Cleaning up %r.', self.input_file)
os.chmod(self.input_file, 0777)
os.remove(self.input_file)
except OSError:
logger.exception('Failed to clean up %r.',
self.input_file)
pass # Sometimes we aren't allowed to delete it
# See if ssl is available
if self.ssl:
try:
from socket import ssl
except ImportError:
fatalError('SSL support is not available on this \
system.\n'
'Make sure openssl is installed '
'and reinstall Python.')
self.lock_token = None
self.did_lock = False
except:
# for security, always delete the input file even if
# a fatal error occurs, unless explicitly stated otherwise
# in the config file
if getattr(self, 'clean_up', 1):
try:
exc, exc_data = sys.exc_info()[:2]
os.remove(self.input_file)
except OSError:
# Sometimes we aren't allowed to delete it
raise exc, exc_data
raise
def __del__(self):
logger.info("ZopeEdit ends at: %s" %
time.asctime(time.localtime()) )
def loadConfig(self):
""" Read the configuration file and set default values """
config_path = self.getConfigPath()
self.config = Configuration(config_path)
# Get all configuration options
self.options = self.config.getAllOptions(
self.metadata.get('meta_type', ''),
self.metadata.get('content_type',''),
self.metadata.get('title',''),
self.metadata.get('extension'),
self.host)
logger.info("loadConfig: all options : %r" % self.options)
# Log level
logger.setLevel(LOG_LEVELS[self.options.get('log_level',
'info')])
# Get autolauncher in case of an unknown file
self.autolauncher = self.options.get('autolauncher',
'gnome-open;kde-open;xdg-open')
logger.debug("loadConfig: autolauncher: %r" % self.autolauncher)
# Get default editors, in case none is found
if win32:
self.defaulteditors = self.options.get('defaulteditors',
'notepad')
else:
self.defaulteditors = self.options.get('defaulteditors',
'gedit;kedit;gvim;vim;emacs;nano')
logger.debug("loadConfig: defaulteditors: %s" % self.defaulteditors)
# Get autoproxy option : do we want to configure proxy from system ?
self.autoproxy = self.options.get('autoproxy','')
logger.debug("loadConfig: autoproxy: %r" % self.autoproxy)
# Get proxy from options
self.proxy = self.options.get('proxy','')
proxies = getproxies()
logger.debug("loadConfig: system proxies : %r" % proxies)
if self.proxy == '' and self.autoproxy:
if proxies.has_key('http') :
self.proxy = proxies["http"]
if self.proxy.startswith('http://'):
self.proxy = self.proxy[7:]
if self.proxy.find('/') > -1:
self.proxy = self.proxy[:self.proxy.find('/')]
logger.debug("loadConfig: Proxy set to : %s" % self.proxy)
# Lock file name for editors that create a lock file
self.lock_file_schemes = self.options.get(
'lock_file_schemes',
'.~lock.%s#;~%s.lock;.%s.swp').split(';')
logger.debug("loadConfig: lock_files_schemes: %s" % self.lock_file_schemes)
# Proxy user and pass
self.proxy_user = self.options.get('proxy_user', '')
logger.debug("loadConfig: proxy_user: %s" % self.proxy_user)
self.proxy_pass = self.options.get('proxy_pass', '')
logger.debug("loadConfig: proxy_pass: %s" % self.proxy_pass)
# Create a new version when the file is closed ?
self.version_control = int(self.options.get('version_control', 0 ))
logger.debug("loadConfig: version_control: %s" % self.version_control)
self.version_command = self.options.get('version_command',
'/saveasnewversion')
self.version_command += '?versioncomment=ZopeEdit%%20%s' % \
__version__
logger.debug("loadConfig: version_command: %s" % self.version_command)
# Should we keep the log file?
self.keep_log = int(self.options.get('keep_log', 1))
logger.debug("loadConfig: keep_log: %s" % self.keep_log)
# Should we always borrow the lock when it does exist ?
self.use_locks = int(self.options.get('use_locks', 1))
logger.debug("loadConfig: use_locks: %s" % self.use_locks)
self.always_borrow_locks = int(self.options.get(
'always_borrow_locks', 0))
logger.debug("loadConfig: always_borrow_locks: %s" % self.always_borrow_locks)
# Should we inform the user about lock issues ans allow him to edit the file ?
self.manage_locks = int(self.options.get('manage_locks',1))
logger.debug("loadConfig: manage_locks: %s" % self.manage_locks)
self.lock_timeout = self.options.get('lock_timeout',
'86400')
logger.debug("loadConfig: lock_timeout: %s" % self.lock_timeout)
# Should we clean-up temporary files ?
self.clean_up = int(self.options.get('cleanup_files', 1))
logger.debug("loadConfig: cleanup_files: %s" % self.clean_up)
self.save_interval = float(self.options.get('save_interval',2))
logger.debug("loadConfig: save_interval: %s" % self.save_interval)
self.max_is_alive_counter = int(self.options.get(
'max_isalive_counter', 5) )
logger.debug("loadConfig: max_isalive_counter: %s" % self.max_is_alive_counter)
# Server charset
self.server_charset = self.options.get('server_charset',
'utf-8')
logger.debug("loadConfig: server_charset: %s" % self.server_charset)
# Client charset
self.client_charset = encoding
logger.debug("loadConfig: client_charset: %s" % self.client_charset)
# Do we use long file name ? If not we use a generated file name.
self.long_file_name = int(self.options.get('long_file_name', 0))
logger.debug("loadConfig: long_filename: %s" % self.long_file_name)
# Editors for the current content type
self.editor = self.options.get('editor')
if self.editor is not None:
self.editor = self.findAvailableEditor(self.editor)
logger.debug("loadConfig: editor: %s" % self.editor)
def findAvailableEditor(self, editors_list):
""" Find an available editor (Linux only)
"""
if not linux:
return editors_list
editors = editors_list.split(';')
for editor in editors:
for path in os.environ["PATH"].split(":"):
if editor in os.listdir(path):
return editor
# no editor found
return None
def getConfigPath(self, force_local_config = False):
""" Retrieve the configuration path
It may be local if there is a user configuration file
or global for all users
"""
if win32:
# Get Application Data
app_data = os.environ['APPDATA']
# Create application folder in Application Data if it isn't here
if not os.path.isdir(os.path.expanduser(os.path.join(app_data,
'collective.zopeedit'))):
os.makedirs(os.path.expanduser(os.path.join(app_data,
'collective.zopeedit')))
# Check the AppData first and then the program dir
config_path = os.path.expanduser(os.path.join(app_data ,
'collective.zopeedit','ZopeEdit.ini'))
# sys.path[0] might be library.zip!!!!
app_dir = sys.path[0]
if app_dir.lower().endswith('library.zip'):
app_dir = os.path.dirname(app_dir)
global_config = os.path.join(app_dir or '', 'ZopeEdit.ini')
if not force_local_config and not os.path.exists(config_path):
logger.info('getConfigPath: Config file %r does not exist. '
'Using global configuration file: %r.',
config_path, global_config)
# Don't check for the existence of the global
# config file. It will be created anyway.
config_path = global_config
elif osx:
config_path = os.path.expanduser('~/ZopeEdit.ini')
else:
# make config file using freedesktop config folders
if not os.path.isdir(os.path.expanduser('~/.config/collective.zopeedit')):
os.makedirs(os.path.expanduser('~/.config/collective.zopeedit'))
config_path = os.path.expanduser('~/.config/collective.zopeedit/ZopeEdit.ini')
logger.info('getConfigPath: Using user configuration file: %r.',
config_path)
return config_path
def cleanContentFile(self, tried_cleanup = False):
if self.clean_up and hasattr(self, 'content_file'):
# for security we always delete the files by default
try:
os.remove(self.content_file)
logger.info("Content File cleaned up %r at %s" % (
self.content_file,
time.asctime(time.localtime())))
return True
except OSError:
if tried_cleanup :
logger.exception("Failed to clean up %r at %s" % (
self.content_file,
time.asctime(time.localtime())))
# Issue logged, but it's already the second try.
# So continue.
return False
else:
logger.debug("Failed to clean up %r at %s ;\
retry in 10 sec" % (
self.content_file,
time.asctime(time.localtime())))
# Some editors close first and save the file ;
# This may last few seconds
time.sleep(10)
# This is the first try. It may be an editor issue.
# Let's retry later.
return self.cleanContentFile(tried_cleanup = True)
def getEditorCommand(self):
""" Return the editor command
"""
editor = self.editor
if win32 and editor is None:
from _winreg import HKEY_CLASSES_ROOT, OpenKeyEx, \
QueryValueEx, EnumKey
from win32api import FindExecutable, ExpandEnvironmentStrings
# Find editor application based on mime type and extension
content_type = self.metadata.get('content_type')
extension = self.options.get('extension')
logger.debug('Have content type: %r, extension: %r',
content_type, extension)
if content_type:
# Search registry for the extension by MIME type
try:
key = 'MIME\\Database\\Content Type\\%s' % content_type
key = OpenKeyEx(HKEY_CLASSES_ROOT, key)
extension, nil = QueryValueEx(key, 'Extension')
logger.debug('Registry has extension %r for '
'content type %r',
extension, content_type)
except EnvironmentError:
pass
if extension is None and self.metadata.has_key('url'):
url = self.metadata['url']
dot = url.rfind('.')
if dot != -1 and dot > url.rfind('/'):
extension = url[dot:]
logger.debug('Extracted extension from url: %r',
extension)
classname = editor = None
if extension is not None:
try:
key = OpenKeyEx(HKEY_CLASSES_ROOT, extension)
classname, nil = QueryValueEx(key, None)
logger.debug('ClassName for extension %r is: %r',
extension, classname)
except EnvironmentError:
classname = None
if classname is not None:
try:
# Look for Edit action in registry
key = OpenKeyEx(HKEY_CLASSES_ROOT,
classname+'\\Shell\\Edit\\Command')
editor, nil = QueryValueEx(key, None)
logger.debug('Edit action for %r is: %r',
classname, editor)
except EnvironmentError:
pass
if classname is not None and editor is None:
logger.debug('Could not find Edit action for %r. '
'Brute-force enumeration.', classname)
# Enumerate the actions looking for one
# starting with 'Edit'
try:
key = OpenKeyEx(HKEY_CLASSES_ROOT,
classname+'\\Shell')
index = 0
while 1:
try:
subkey = EnumKey(key, index)
index += 1
if str(subkey).lower().startswith('edit'):
subkey = OpenKeyEx(key, subkey +
'\\Command')
editor, nil = QueryValueEx(subkey,
None)
if editor is None:
continue
logger.debug('Found action %r for %r. '
'Command will be: %r',
subkey, classname, editor)
except EnvironmentError:
break
except EnvironmentError:
pass
if classname is not None and editor is None:
try:
# Look for Open action in registry
key = OpenKeyEx(HKEY_CLASSES_ROOT,
classname+'\\Shell\\Open\\Command')
editor, nil = QueryValueEx(key, None)
logger.debug('Open action for %r has command: %r. ',
classname, editor)
except EnvironmentError:
pass
if editor is None:
try:
nil, editor = FindExecutable(self.content_file, '')
logger.debug('Executable for %r is: %r. ',
self.content_file, editor)
except pywintypes.error:
pass
# Don't use IE as an "editor"
if editor is not None and editor.find('\\iexplore.exe') != -1:
logger.debug('Found iexplore.exe. Skipping.')
editor = None
if editor is not None:
return ExpandEnvironmentStrings(editor)
elif editor is None and osx:
# we will use the system in order to find the editor
pass
elif editor is None:
# linux
logger.debug("getEditorCommand: editor is None and linux")
logger.debug("getEditorCommand: self.autolauncher = %s" % self.autolauncher)
editor = self.findAvailableEditor(self.autolauncher)
logger.debug("getEditorCommand: editor is : %s" % editor)
return editor
def launch(self):
""" Launch external editor
"""
# Do we have an input file ?
if self.input_file == '':
fatalError(_("No input file. \n"
"ZopeEdit will close."), exit = 0)
self.last_mtime = os.path.getmtime(self.content_file)
self.initial_mtime = self.last_mtime
self.last_saved_mtime = self.last_mtime
self.dirty_file = False
command = self.getEditorCommand()
# lock before opening the file in the editor
if not self.lock():
self.networkerror = True
msg = _("%s\n"
"Unable to lock the file on the server.\n"
"This may be a network or proxy issue.\n"
"Your log file will be opened\n"
"Please save it and send it to your administrator."
) % self.title
errorDialog(msg)
logger.error("launch: lock failed. Exit.")
self.editFile(log_file,detach=True,default=True)
sys.exit()
# Extract the executable name from the command
if command and win32:
if command.find('\\') != -1:
bin = re.search(r'\\([^\.\\]+)\.exe', command.lower())
if bin is not None:
bin = bin.group(1)
else:
bin = command.lower().strip()
else:
bin = command
logger.info('launch: Command %r, will use %r', command, bin)
if bin is not None:
# Try to load the plugin for this editor
try:
logger.debug("launch: bin is not None - try to load a plugin : %s" % bin)
module = 'Plugins.%s' % bin
Plugin = __import__(module, globals(), locals(),
('EditorProcess',))
self.editor = Plugin.EditorProcess(self.content_file)
logger.info('launch: Launching Plugin %r with: %r',
Plugin, self.content_file)
except (ImportError, AttributeError):
logger.debug("launch: Error while to load the plugin ; set bin to None")
bin = None
if bin is None:
logger.info("launch: No plugin found ; using standard editor process")
# Use the standard EditorProcess class for this editor
if win32:
file_insert = '%1'
else:
file_insert = '$1'
if command.find(file_insert) > -1:
command = command.replace(file_insert, self.content_file)
else:
command = '%s %s' % (command, self.content_file)
logger.info('launch: Launching EditorProcess with: %r', command)
self.editor = EditorProcess(command,
self.content_file,
self.max_is_alive_counter,
self.lock_file_schemes)
logger.info("launch: Editor launched successfully")
launch_success = self.editor.isAlive()
if not launch_success:
fatalError( _("Unable to edit your file.\n\n"
"%s") % command)
file_monitor_exit_state = self.monitorFile()
unlock_success = self.unlock()
if not unlock_success:
logger.error("launch: not unlock_success. Flag networkerror")
self.networkerror = True
# Check is a file has been modified but not saved back to zope
# Clean content file
if self.dirty_file:
logger.exception("launch: Some modifications are NOT saved "
"we'll re-open file and logs")
self.clean_up = False
self.keep_log = True
elif ( not unlock_success ) and self.clean_up:
logger.exception("launch: Unlock failed and we have to clean up files")
self.clean_up = False
self.keep_log = True
if self.networkerror or self.dirty_file:
if self.dirty_file:
# Reopen file when there is an issue...
errorDialog(_("Network error :\n"
"Your working copy will be re-opened,\n"
"\n"
"SAVE YOUR WORK ON YOUR DESKTOP.\n"
"\n"
"A log file will be opened\n"
"Please save it and send it to your administrator."))
self.editor.startEditor()
else:
errorDialog(_("Network error : your file is still locked.\n"
"\n"
"A log file will be opened\n"
"Please save it and send it to your administrator."))
self.editFile(log_file,detach=True,default=True)
sys.exit(0)
# Inform the user of what has been done when the edition is finished
# without issue
if file_monitor_exit_state == "closed modified" or \
file_monitor_exit_state == "manual close modified":
msg = _("%(title)s\n\n"
"File : %(content_file)s\n\n"
"Saved at : %(time)s\n\n"
"Edition completed") % {
'title': self.title,
'content_file': self.content_file,
'time': time.ctime(self.last_saved_mtime )}
messageDialog(msg)
elif file_monitor_exit_state == "closed not modified" or \
file_monitor_exit_state == "manual close not modified":
msg = _("%(title)s\n\n"
"Edition completed") % { 'title': self.title, }
messageDialog(msg)
self.cleanContentFile()
def monitorFile(self):
""" Check if the file is edited and if it is modified.
If it's modified save it back.
If it is not any more edited exit with an information on what happened.
-> was saved back
-> was automatically detected
-> was manually controled by the user
"""
final_loop = False
isAlive_detected = False
returnChain = ""
while 1:
if not final_loop:
self.editor.wait(self.save_interval)
mtime = os.path.getmtime(self.content_file)
if mtime != self.last_mtime:
logger.debug("monitorFile: File is dirty : changes detected !")
self.dirty_file = True
launch_success = True
if self.versionControl():
logger.info("monitorFile: New version created successfully")
else:
logger.debug("monitorFile: No new version created")
self.saved = self.putChanges()
self.last_mtime = mtime
if self.saved:
self.last_saved_mtime = mtime
self.dirty_file = False
if not self.editor.isAlive():
if final_loop:
# exit from monitorFile
logger.info("monitorFile: Final loop done; break")
return returnChain
else:
# Check wether a file hasn't been saved before closing
if mtime != self.last_saved_mtime:
self.dirty_file = True
launch_success = True
self.saved = self.putChanges()
self.last_mtime = mtime
if self.saved:
self.last_saved_mtime = mtime
self.dirty_file = False
# Go through the loop one final time for good measure.
# Our editor's isAlive method may itself *block* during
# a save operation (seen in COM calls, which seem to
# respond asynchronously until they don't) and
# subsequently return false, but the editor may have
# actually saved the file to disk while the call
# blocked. We want to catch any changes that happened
# during a blocking isAlive call.
if isAlive_detected :
if self.last_saved_mtime != self.initial_mtime:
logger.debug("monitorFile: closed modified")
returnChain = "closed modified"
else:
logger.debug("monitorFile: closed not modified")
returnChain = "closed not modified"
else:
if self.last_saved_mtime != self.initial_mtime:
msg = _("%(title)s\n\n"
"File : %(content_file)s\n"
"Saved at : %(time)s\n\n"
"Did you close your file ?"
) % {
'title': self.title,
'content_file': self.content_file,
'time': time.ctime(
self.last_saved_mtime )}
if not askYesNo(msg) :
logger.debug("monitorFile: manual continue modified")
continue
else :
logger.debug("monitorFile: manual closed modified")
returnChain = "manual close modified"
else:
msg = _("%(title)s :\n\n"
"Did you close your file ?") % {
'title': self.title, }
if not askYesNo(msg) :
logger.debug("monitorFile: manual continue not modified")
continue
else :
logger.debug("monitorFile: manual close not monified")
returnChain = "manual close not modified"
final_loop = True
logger.info("monitorFile: Final loop")
else:
isAlive_detected = True
def putChanges(self):
"""Save changes to the file back to Zope"""
logger.info("putChanges at: %s" % time.asctime(time.localtime()) )
f = open(self.content_file, 'rb')
body = f.read()
logger.info("Document is %s bytes long" % len(body) )
f.close()
headers = {'Content-Type':
self.metadata.get('content_type', 'text/plain')}
if self.lock_token is not None:
headers['If'] = '<%s> (<%s>)' % (self.path, self.lock_token)
# Special care for Dexterity Item content type, which
# is encapsuled as its own rfc2822 message by plone.rfc822
if self.metadata["meta_type"] == "Dexterity Item":
import email
msg = email.message.Message()
for key in self.dexterity:
# Including the id in the PUT message causes dexterity to
# rename the item, resulting in a lock error.
if key == 'id':
continue
msg.add_header(key, self.dexterity[key])
msg.set_payload(body)
email.encoders.encode_base64(msg)
body = str(msg)
response = self.zopeRequest('PUT', headers, body)
# Don't keep the body around longer than we need to
del body
if response.status / 100 != 2:
# Something went wrong
if self.manage_locks and \
askRetryAfterError(response,_("Network error\n"
"\n"
"Could not save the file to server.\n"
"\n"
"Error detail :\n")):
return self.putChanges()
else:
logger.error("Could not save to Zope\n"
"Error during HTTP PUT")
return False
logger.info("File successfully saved back to the intranet")
return True
def lock(self):
"""Apply a webdav lock to the object in Zope
usecases :
- use_locks "1" and manage_locks "1"
1 - no existing lock
lock the file
if error : ask user if retry or not
if no retry and error : return False
2 - existing lock
if always_borrow_locks "yes"
borrow the lock and return True
else ask user wether retrieve it or not
if not : exit with error
if yes : borrow the lock
- use_locks "yes" and manage_locks "no"
1 - no existing lock
lock the file
if error : exit with error
2 - existing lock
exit with error
- use_locks "no"
don't do anything and exit with no error
"""
logger.debug("lock: lock at: %s" % time.asctime(time.localtime()) )
if not self.use_locks:
logger.debug("lock: don't use locks")
return True
if self.metadata.get('lock-token'):
# A lock token came down with the data, so the object is
# already locked
if not self.manage_locks:
logger.critical("lock: object already locked : "
"lock tocken not empty\n "
"user doesn't manage locks, so... "
"exit")
msg = _("%s\n"
"This object is already locked."
) %(self.title)
errorDialog(msg)
sys.exit()
# See if we can borrow the lock
if self.always_borrow_locks or self.metadata.get('borrow_lock'):
self.lock_token = 'opaquelocktoken:%s' \
% self.metadata['lock-token']
else:
msg = _("%s\n"
"This object is already locked by you "
"in another session.\n"
"Do you want to borrow this lock and continue ?"
) %(self.title)
if askYesNo(msg):
self.lock_token = 'opaquelocktoken:%s' \
% self.metadata['lock-token']
else:
logger.critical("lock: File locked and user doesn't want "
"to borrow the lock. Exit.")
sys.exit()
if self.lock_token is not None:
logger.warning("lock: Existing lock borrowed.")
return True
# Create a new lock
dav_lock_response = self.DAVLock()
if dav_lock_response / 100 == 2:
logger.info("lock: OK")
self.did_lock = True
return True
# There was an error. Retry ?
while self.manage_locks and not self.did_lock :
dav_lock_response = self.DAVLock()
if dav_lock_response / 100 == 2:
logger.info("lock: OK")
self.did_lock = True
return True
if dav_lock_response == 423:
logger.warning("lock: object locked by someone else... "
"EXIT !")
msg = _("%s\n"
"Object already locked") %(self.title)
errorDialog(msg)
exit()
else:
logger.error("lock: failed to lock object: "
"response status %s" % dav_lock_response )
msg = _("%(title)s\n"
"Unable to get a lock on the server"
"(return value %(dav_lock_response)s)"
) %{'title': self.title,
'dav_lock_response': dav_lock_response}
msg += '\n'
msg += _("Do you want to retry ?")
if askRetryCancel(msg):
logger.info("lock: Retry lock")
continue
else:
logger.critical("lock: Unable to lock the file ; return False")
logger.error("lock failed. Return False.")
return False
def DAVLock(self):
"""Do effectively lock the object"""
logger.debug("DAVLock at: %s" % time.asctime(time.localtime()) )
headers = {'Content-Type':'text/xml; charset="utf-8"',
'Timeout': self.lock_timeout,
'Depth':'0',
}
body = ('<?xml version="1.0" encoding="utf-8"?>\n'
'<d:lockinfo xmlns:d="DAV:">\n'
' <d:lockscope><d:exclusive/></d:lockscope>\n'
' <d:locktype><d:write/></d:locktype>\n'
' <d:depth>infinity</d:depth>\n'
' <d:owner>\n'
' <d:href>Zope External Editor</d:href>\n'
' </d:owner>\n'
'</d:lockinfo>'
)
response = self.zopeRequest('LOCK', headers, body)
logger.debug("DAVLock response:%r" % response.status)
dav_lock_response = response.status
if dav_lock_response / 100 == 2:
logger.info("Lock success.")
# We got our lock, extract the lock token and return it
reply = response.read()
token_start = reply.find('>opaquelocktoken:')
token_end = reply.find('<', token_start)
if token_start > 0 and token_end > 0:
self.lock_token = reply[token_start+1:token_end]
return dav_lock_response
def versionControl(self):
""" If version_control is enabled, ZopeEdit will try to
automatically create a new version of the file.
The version is created only if the file is modified,
just before the first save.
"""
if not self.version_control:
logger.debug("versionControl: version_control is False : %s" \
% self.version_control)
return False
if self.saved:
logger.debug("versionControl: don't create a version if "
"already saved")
return False
response=self.zopeRequest('GET',
command='%s' % self.version_command)
logger.debug("versionControl : "
"return code of new version is %s" % response.status)
if response.status == 302:
return True
else:
logger.warning("Creation of version failed : "
"response status %s" % response.status)
return False
def unlock(self, interactive = True):
"""Remove webdav lock from edited zope object"""
if not self.use_locks:
logger.debug("unlock: use_locks is False "
"return True.")
return True
if ( not self.did_lock ) and self.lock_token is None :
return True # nothing to do
response = self.DAVunlock()
status = int(response.status)
logger.debug("response : %s status : %s status/100: %s"% (
response, status, status / 100))
while status / 100 != 2:
#unlock failed
logger.error("Unlock failed at: %s did_lock=%s status=%s" % (
time.asctime(time.localtime()),
self.did_lock, status ))
if askRetryAfterError(response,
_("Network error\n"
"\n"
"Unable to unlock the file on server.\n")):
status = self.DAVunlock().status
continue
else :
return False
logger.info("Unlock successfully. did_lock = %s" % self.did_lock )
self.did_lock = False
return True
def DAVunlock(self):
logger.debug("DAVunlock at: %s" % time.asctime(time.localtime()) )
headers = {'Lock-Token':self.lock_token}
response = self.zopeRequest('UNLOCK', headers)
logger.debug("DAVunlock response:%r" % response)
return response
def _get_authorization(self, host, method, selector, cookie, ssl,
old_response):
#get the challenge
if ssl is True:
ctx = ssl.SSLContext(ssl.PROTOCOL_TLSv1_2)
h = HTTPSConnection(host, context=ctx)
else:
h = HTTPConnection(host)
if cookie is not None:
headers = {'Cookie': cookie}
else:
headers = {}
h.request('HEAD', selector, headers=headers)
r = h.getresponse()
if r.status != 401:
return None
auth_header = r.getheader('www-authenticate').strip()
if auth_header is None \
or not auth_header.lower().startswith('digest'):
return None
# XXX undocumented functions
chal = parse_keqv_list(parse_http_list(auth_header[7:]))
#get the user/password
if self.identity is not None:
username, password = self.identity
else:
# XXX undocumented functions
username = parse_keqv_list(parse_http_list(old_response[7:])
)['username']
password = askPassword(chal['realm'], username)
self.identity = (username, password)
#compute the authorization
algorithm = chal.get('algorithm', 'MD5')
if algorithm == 'MD5':
H = lambda x: md5(x).hexdigest()
elif algorithm == 'SHA':
H = lambda x: sha1(x).hexdigest()
# XXX MD5-sess not implemented
KD = lambda s, d: H("%s:%s" % (s, d))
nonce = chal['nonce']
res = ('Digest username="%s", realm="%s", nonce="%s", '
'algorithm="%s", '
'uri="%s"' % (username,
chal['realm'],
nonce,
chal['algorithm'],
selector
))
if 'opaque' in chal:
res += ', opaque="%s"' % chal['opaque']
A1 = '%s:%s:%s' % (username, chal['realm'], password)
A2 = '%s:%s' % (method, selector)
if 'qop' in chal:
# XXX auth-int not implemented
qop = chal['qop']
nc = '00000001'
cnonce = '12345678'
res += ', qop="%s", nc="%s", cnonce="%s"' % (qop, nc, cnonce)
response = KD( H(A1), '%s:%s:%s:%s:%s' % (nonce,
nc,
cnonce,
qop,
H(A2)))
else:
response = KD( H(A1), '%s:%s' % (nonce, H(A2)) )
res += ', response="%s"' % response
return res
def zopeRequest(self, method, headers={}, body='', command=''):
"""Send a request back to Zope"""
logger.debug("zopeRequest: method: %r, headers: %r, command: %r" % (method, headers, command))
if self.proxy == '':
logger.debug("zopeRequest: no proxy definition in config file : "
"self.proxy is empty")
host = self.host
url = self.path
logger.debug("zopeRequest: host:%s and url path:%r "
"retrieved from system" % (host, url) )
else:
host = self.proxy
url = self.url
logger.debug("zopeRequest: proxy defined in config file : "
"host:%s url:%s"% (host,url) )
url += command
logger.debug("zopeRequest: url = %s" % url)
logger.debug("zopeRequest: method = %s" % method)
logger.debug("zopeRequest: command = %s" % command)
try:
if self.ssl and self.proxy:
logger.debug("zopeRequest: ssl and proxy")
# XXX
#setup basic authentication
proxy_host, proxy_port = self.proxy.split(':')
proxy_port=int(proxy_port)
logger.debug("zopeRequest: proxy_host; %r, proxy_port: %r" % (proxy_host, proxy_port))
taburl = url.split('/')
if len(taburl[2].split(':')) == 2:
port=int(taburl[2].split(':')[1])
host=taburl[2].split(':')[0]
else:
if taburl[0] == 'https:':
port = 443
else:
port=80
host=taburl[2]
proxy_authorization = ''
if self.proxy_user and self.proxy_passwd:
logger.debug("zopeRequest: proxy_user: %r, proxy_passwd: XXX" % self.proxy_user)
user_pass = base64.encodestring(self.proxy_user+':' \
+self.proxy_passwd)
proxy_authorization = 'Proxy-authorization: Basic ' \
+user_pass+'\r\n'
proxy_connect = 'CONNECT %s:%s HTTP/1.0\r\n'%(host, port)
logger.debug("zopeRequest: proxy_connect: %r" % proxy_connect)
user_agent = 'User-Agent: Zope External Editor %s\r\n' \
% __version__
proxy_pieces = proxy_connect+proxy_authorization \
+user_agent+'\r\n'
#now connect, very simple recv and error checking
logger.debug("zopeRequest: initialyze proxy socket")
proxy = socket.socket(socket.AF_INET,socket.SOCK_STREAM)
logger.debug("zopeRequest: connect to proxy")
proxy.connect((proxy_host,proxy_port))
logger.debug("zopeRequest: send auth pieces to proxy (%r)" % proxy_pieces)
proxy.sendall(proxy_pieces)
logger.debug("zopeRequest: receive response fron proxy")
response = proxy.recv(8192)
status = response.split()[1]
if status != str(200): raise 'Error status=',str(status)
#trivial setup for ssl socket
logger.debug("zopeRequest: wrap proxy to ssl")
sock = ssl.wrap_socket(proxy)
#initalize httplib and replace with your socket
logger.debug("zopeRequest: initialyze HTTP connection")
hc = HTTPConnection(proxy_host,proxy_port)
hc.set_debuglevel(9)
hc.sock = sock
logger.debug("zopeRequest: putrequest method: %r, url: %r" % (method, url))
hc.putrequest(method, url)
hc.putheader('User-Agent', 'Zope External Editor/%s' \
% __version__)
#hc.putheader('Connection', 'close')
for header, value in headers.items():
hc.putheader(header, value)
hc.putheader("Content-Length", str(len(body)))
if self.metadata.get('auth',
'').lower().startswith('basic'):
hc.putheader("Authorization", self.metadata['auth'])
if self.metadata.get('cookie'):
hc.putheader("Cookie", self.metadata['cookie'])
hc.endheaders()
hc.send(body)
response = hc.getresponse()
logger.debug("zopeRequest: response: %r" % response)
return response
if self.ssl and not self.proxy:
logger.debug("zopeRequest: ssl and no proxy")
ctx = ssl.SSLContext(ssl.PROTOCOL_TLSv1_2)
h = HTTPSConnection(host, context=ctx)
else :
logger.debug("zopeRequest: no ssl and no proxy")
h = HTTPConnection(host)
h.putrequest(method, url)
h.putheader('User-Agent', 'Zope External Editor/%s' % \
__version__)
for header, value in headers.items():
h.putheader(header, value)
h.putheader("Content-Length", str(len(body)))
# authentification
auth_header = self.metadata.get('auth','')
if auth_header.lower().startswith('basic'):
h.putheader("Authorization", self.metadata['auth'])
if auth_header.lower().startswith('digest'):
authorization = self._get_authorization(host, method, url,
self.metadata.get('cookie'),
False, auth_header)
if authorization is not None:
h.putheader("Authorization", authorization)
#cookie
if self.metadata.get('cookie'):
h.putheader("Cookie", self.metadata['cookie'])
h.endheaders()
h.send(body)
response = h.getresponse()
logger.debug("zopeRequest: response: %r" % response.status)
return response
except:
# On error return a null response with error info
logger.error("zopeRequest: we got an exception !")
response = NullResponse()
response.reason = sys.exc_info()[1]
try:
response.status, response.reason = response.reason
except ValueError:
response.status = 0
if response.reason == 'EOF occurred in violation of protocol':
# Ignore this protocol error as a workaround for
# broken ssl server implementations
response.status = 200
return response
def editConfig(self):
logger.info('Edit local configuration')
user_config = self.getConfigPath(force_local_config=True)
# Read the configuration file
if win32:
# Check the home dir first and then the program dir
# sys.path[0] might be library.zip!!!!
app_dir = sys.path[0]
if app_dir.lower().endswith('library.zip'):
app_dir = os.path.dirname(app_dir)
global_config = os.path.join(app_dir or '', 'ZopeEdit.ini')
create_config_file = False
if not os.path.exists(user_config):
logger.info('Local configuration file %r does not exist. '
'Global configuration file is : %r.',
user_config, global_config)
create_config_file = True
else:
if askYesNo(_("Reset configuration file ?")):
create_config_file = True
logger.info("Replace the configuration file "
"with the default one.")
if create_config_file:
input_config_file = open(global_config, 'r')
output_config_file = open(user_config, 'w')
for l in input_config_file.readlines():
output_config_file.write( l )
input_config_file.close()
output_config_file.close()
else:
if askYesNo(_("Do you want to replace your "
"configuration file \n"
"with the default one ?")):
logger.info("Replace the configuration file with "
"the default one.")
output_config = open(user_config, 'w')
output_config.write(default_configuration)
output_config.close()
self.editFile(user_config,default=True)
def editFile(self, file, detach = False, default = False):
# launch default editor with the user configuration file
if default:
editor = self.findAvailableEditor(self.defaulteditors)
else:
editor = self.getEditorCommand()
if not editor:
if osx:
LSOpenFSRef(file,None)
else:
logger.critical("editFile: No editor found. "
"File edition failed.")
logger.info("editFile: Edit file %s with editor %s" % (
file, editor))
p = Popen("%s %s" % (editor, file), shell = True)
if linux:
if detach:
p.poll()
else:
sts = os.waitpid(p.pid, 0)[1]
logger.debug("sts : %s" % sts)
if p.pid == 0:
logger.debug("editFile: error with the detected editor ; "
"try with a default one as last option")
editor = self.findAvailableEditor(self.defaulteditors)
logger.info("editFile: Edit file %s with editor %s" % (
file, editor))
logger.debug("editFile: launching editor in a shell environment : %s %s" %
(editor, file) )
p = Popen("%s %s" % (editor, file), shell = True)
if linux:
if detach:
p.poll()
else:
sts = os.waitpid(p.pid, 0)[1]
logger.debug("sts : %s" % sts)
title = 'Zope External Editor'
def askRetryAfterError(response, operation, message=''):
"""Dumps response data"""
if not message \
and response.getheader('Bobo-Exception-Type') is not None:
message = '%s: %s' % (response.getheader('Bobo-Exception-Type'),
response.getheader('Bobo-Exception-Value'))
return askRetryCancel('%s\n\"%d %s - %s\"' % (operation, response.status,
response.reason, message))
class EditorProcess:
def __init__(self, command, contentfile,
max_is_alive_counter, lock_file_schemes):
"""Launch editor process"""
# Prepare the command arguments, we use this regex to
# split on whitespace and properly handle quoting
self.command = command
self.contentfile = contentfile
self.max_is_alive_counter = max_is_alive_counter
self.lock_file_schemes = lock_file_schemes
self.arg_re = r"""\s*([^'"]\S+)\s+|\s*"([^"]+)"\s*|\s*'([^']+)'\s*"""
self.is_alive_by_file = None; # do we check file or pid ?
self.is_alive_counter = 0 # number of isAlive Cycles
self.starting = True # still in a starting cycle
if win32:
self.methods = {
1: self.isFileLockedByLockFile,
2: self.isFileOpenWin32,
3: self.isPidUpWin32
}
self.nb_methods = 3
elif osx:
self.methods = {
1: self.isFileLockedByLockFile,
2: self.isFileOpen
}
self.nb_methods = 2
else:
self.methods = {
1: self.isFileLockedByLockFile,
2: self.isFileOpen,
3: self.isPidUp
}
self.nb_methods = 3
self.lock_detected = False
self.selected_method = False
if win32:
self.startEditorWin32()
elif osx:
self.startEditorOsx()
else:
self.startEditor()
def startEditorWin32(self):
try:
logger.debug('CreateProcess: %r', self.command)
self.handle, nil, nil, nil = CreateProcess(None,
self.command, None,
None, 1, 0, None,
None, STARTUPINFO())
except pywintypes.error, e:
fatalError('Error launching editor process\n'
'(%s):\n%s' % (self.command, e[2]))
def startEditorOsx(self):
res = LSOpenFSRef(self.contentfile,None)
def startEditor(self):
args = re.split(self.arg_re, self.command.strip())
args = filter(None, args) # Remove empty elements
logger.debug("starting editor %r" % args)
self.pid = Popen(args).pid
logger.debug("Pid is %s" % self.pid)
def wait(self, timeout):
"""Wait for editor to exit or until timeout"""
sleep(timeout)
def isFileOpenWin32(self):
try:
fileOpen = file(self.contentfile, 'a')
except IOError, e:
if e.args[0] == 13:
logger.debug("Document is writeLocked by command")
self.cmdLocksWrite = True
return True
else:
logger.error( "%s %s " % (e.__class__.__name__, str(e)))
fileOpen.close()
logger.info("File is not open : Editor is closed")
return False
def isPidUpWin32(self):
if GetExitCodeProcess(self.handle) == 259:
logger.info("Pid is up : Editor is still running")
return True
logger.info("Pid is not up : Editor exited")
return False
def isFileOpen(self):
"""Test if File is locked (filesystem)"""
logger.debug("test if the file edited is locked by filesystem")
command = '/bin/fuser'
if not os.path.exists(command):
command = '/usr/bin/fuser'
process = Popen([command , self.command.split(' ')[-1]],
stdout=subprocess.PIPE)
process.wait()
fileOpenWith = process.stdout.read()
return fileOpenWith != ''
def isPidUp(self):
"""Test PID"""
logger.debug("test if PID is up")
try:
exit_pid, exit_status = os.waitpid(self.pid, os.WNOHANG)
except OSError:
return False
return exit_pid != self.pid
def isFileLockedByLockFile(self):
"""Test Lock File (extra file)"""
if win32:
file_separator = "\\"
else:
file_separator = "/"
original_filepath = self.contentfile.split(file_separator)
logger.debug("log file schemes : %s" % self.lock_file_schemes)
for i in self.lock_file_schemes:
filepath = original_filepath[:]
if i == '':
continue
filepath[-1] = i % filepath[-1]
filename = file_separator.join(filepath)
logger.debug("Test: lock file : %s" % filename)
if glob.glob(filename):
self.lock_file_schemes = [i]
return True
return False
def isAlive(self):
"""Returns true if the editor process is still alive
is_alive_by_file stores whether we check file or pid
file check has priority"""
if self.starting:
logger.info("isAlive : still starting. Counter : %s" % \
self.is_alive_counter)
if self.is_alive_counter < self.max_is_alive_counter :
self.is_alive_counter += 1
else:
self.starting = False
for i in range(1, self.nb_methods + 1):
if self.methods[i]():
logger.debug("isAlive: True( %s : %s)"%
(i, self.methods[i].__doc__))
if i != self.selected_method:
logger.info("DETECTION METHOD CHANGE : "
"Level %s - %s" %(
i,
self.methods[i].__doc__))
self.selected_method = i
self.nb_methods = i
self.lock_detected = True
return True
logger.info("isAlive : no edition detected.")
if self.starting and not self.lock_detected:
logger.debug("isAlive : still in the startup process :"
"continue.")
return True
return False
## Platform specific declarations ##
if win32:
import Plugins # Assert dependancy
from win32ui import MessageBox
from win32process import CreateProcess, GetExitCodeProcess, STARTUPINFO
from win32event import WaitForSingleObject
from win32con import MB_OK, MB_OKCANCEL, MB_YESNO, MB_RETRYCANCEL, \
MB_SYSTEMMODAL, MB_ICONERROR, MB_ICONQUESTION, \
MB_ICONEXCLAMATION
def errorDialog(message):
MessageBox(message, title, MB_OK + MB_ICONERROR + MB_SYSTEMMODAL)
def messageDialog(message):
MessageBox(message, title, MB_OK + MB_ICONEXCLAMATION + MB_SYSTEMMODAL)
def askRetryCancel(message):
return MessageBox(message, title,
MB_OK + MB_RETRYCANCEL + MB_ICONEXCLAMATION
+ MB_SYSTEMMODAL) == 4
def askYesNo(message):
return MessageBox(message, title,
MB_OK + MB_YESNO + MB_ICONQUESTION +
MB_SYSTEMMODAL) == 6
def askPassword(realm, username):
import pywin.dialogs.login
title = _("Please enter your password")
userid, password = pywin.dialogs.login.GetLogin(title, username)
return password
else: # Posix platform
from time import sleep
import re
def has_tk():
"""Sets up a suitable tk root window if one has not
already been setup. Returns true if tk is happy,
false if tk throws an error (like its not available)"""
# create a hidden root window to make Tk happy
if not locals().has_key('tk_root'):
try:
global tk_root
from Tkinter import Tk
tk_root = Tk()
tk_root.withdraw()
return True
except:
return False
return True
def tk_flush():
tk_root.update()
def errorDialog(message):
"""Error dialog box"""
if has_tk():
from tkMessageBox import showerror
showerror(title, message)
tk_flush()
else:
print message
def messageDialog(message):
"""Error dialog box"""
if has_tk():
from tkMessageBox import showinfo
showinfo(title, message)
tk_flush()
else:
print message
def askRetryCancel(message):
if has_tk():
from tkMessageBox import askretrycancel
r = askretrycancel(title, message)
tk_flush()
return r
def askYesNo(message):
if has_tk():
from tkMessageBox import askyesno
r = askyesno(title, message)
tk_flush()
return r
def askPassword(realm, username):
if has_tk():
from tkSimpleDialog import askstring
r = askstring(title, "Please enter the password for '%s' in '%s'" %
(username, realm), show = '*')
tk_flush()
return r
def fatalError(message, exit = 1):
"""Show error message and exit"""
global log_file
msg = _("""FATAL ERROR: %s
ZopeEdit will close.""") % message
errorDialog(msg)
# Write out debug info to a temp file
if log_file is None:
log_file = mktemp(suffix = '-zopeedit-traceback.txt')
debug_f = open( log_file, 'a+b')
try:
# Copy the log_file before it goes away on a fatalError.
traceback.print_exc(file = debug_f)
finally:
debug_f.close()
if exit:
sys.exit(0)
def messageScrolledText(text):
if has_tk():
from ScrolledText import ScrolledText
myText=ScrolledText(tk_root, width=80, wrap="word")
myText.pack()
myText.insert('end',"".join(text))
tk_root.wm_deiconify()
tk_flush()
tk_root.protocol( "WM_DELETE_WINDOW", sys.exit )
tk_root.mainloop()
else:
print text
default_configuration = """
#######################################################################
# #
# Zope External Editor helper application configuration #
# #
# maintained by atReal contact@atreal.fr #
#######################################################################
# #
# Remove '#' to make an option active #
# #
#######################################################################
[general]
# General configuration options
version = %s
""" % __version__
default_configuration += """
# Create a new version when the file is closed ?
#version_control = 0
# Temporary file cleanup. Set to false for debugging or
# to waste disk space. Note: setting this to false is a
# security risk to the zope server
#cleanup_files = 1
#keep_log = 1
# Use WebDAV locking to prevent concurrent editing by
# different users. Disable for single user use or for
# better performance
# set use_locks = 0 if you use a proxy that does not allow wabdav LOCKs
#use_locks = 1
# If you wish to inform the user about locks issues
# set manage_locks = 1
# This will allow the user to borrow a lock or edit a locked file
# without informing the administrator
#manage_locks = 1
# To suppress warnings about borrowing locks on objects
# locked by you before you began editing you can
# set this flag. This is useful for applications that
# use server-side locking, like CMFStaging
#always_borrow_locks = 0
# Duration of file Lock : 1 day = 86400 seconds
# If this option is removed, fall back on 'infinite' zope default
# Default 'infinite' value is about 12 minutes
#lock_timeout = 86400
# Proxy address
#proxy = http://www.myproxy.com:8080
# Proxy user and password ( optional )
#proxy_user = 'username'
#proxy_pass = 'password'
# Automatic proxy configuration from system
# does nothing if proxy is configured
# Default value is "disabled" : 0
#autoproxy = 1
# Max isAlive counter
# This is used in order to wait the editor to effectively lock the file
# This is the number of 'probing' cycles
# default value is 5 cycles of save_interval
#max_isalive_counter = 5
# Automatic save interval, in seconds. Set to zero for
# no auto save (save to Zope only on exit).
#save_interval = 5
# log level : default is 'info'.
# It can be set to debug, info, warning, error or critical.
#log_level = debug
# If your server is not using utf-8
#server_charset = utf-8
# If your client charset is not iso-8859-1
# client_charset = iso-8859-1
# Lock File Scheme
# These are schemes that are used in order to detect "lock" files
# %s is the edited file's name (add a ';' between each scheme):
#lock_file_schemes = .~lock.%s#;~%s.lock;.%s.swp
"""
if linux:
default_configuration += """
# Uncomment and specify an editor value to override the editor
# specified in the environment. You can add several editors separated by ';'
# Default editor
#editor = gedit;kwrite;gvim;emacs;nano;vi
# Environment auto-launcher
# based on the association between mime type and applications
#autolaunchers = gnome-open;kde-open;xdg-open
# Specific settings by content-type or meta-type. Specific
# settings override general options above. Content-type settings
# override meta-type settings for the same option.
[meta-type:DTML Document]
extension=.dtml
[meta-type:DTML Method]
extension=.dtml
[meta-type:Script (Python)]
extension=.py
[meta-type:Page Template]
extension=.pt
[meta-type:Z SQL Method]
extension=.sql
[content-type:text/plain]
extension=.txt
[content-type:text/html]
extension=.html
[content-type:text/xml]
extension=.xml
[content-type:text/css]
extension=.css
[content-type:text/javascript]
extension=.js
[general-type:image/*]
editor=gimp -n
[content-type:application/x-xcf]
editor=gimp -n
[content-type:application/vnd.oasis.opendocument.text]
extension=.odt
editor=
[content-type:application/vnd.sun.xml.writer]
extension=.sxw
editor=
[content-type:application/vnd.sun.xml.calc]
extension=.sxc
editor=
[content-type:application/vnd.oasis.opendocument.spreadsheet]
extension=.ods
editor=
[content-type:application/vnd.oasis.opendocument.presentation]
extension=.odp
editor=
[content-type:application/msword]
extension=.doc
editor=
[content-type:application/vnd.ms-excel]
extension=.xls
editor=
[content-type:application/vnd.ms-powerpoint]
extension=.ppt
editor=
[content-type:application/x-freemind]
extension=.mm
editor=freemind
[content-type:text/xml]
extension=.planner
editor=planner
"""
def main():
""" call zopeedit as a lib
"""
args = sys.argv
input_file=''
if '--version' in args or '-v' in args:
credits = ('Zope External Editor %s\n'
'By atReal\n'
'http://www.atreal.net') % __version__
messageDialog(credits)
sys.exit(0)
if '--help' in args or '-h' in args:
# Open the VERSION file for reading.
try:
f=open(os.path.join(system_path,'docs','README.txt'), 'r')
except IOError:
# zopeedit is not properly installed : try uninstalled path
f=open(os.path.join(system_path,'..','..','README.txt'), 'r')
README = f.readlines()
f.close()
messageScrolledText(README)
sys.exit(0)
if len(sys.argv)>=2:
input_file = sys.argv[1]
try:
ExternalEditor(input_file).launch()
except (KeyboardInterrupt, SystemExit):
pass
except:
fatalError(sys.exc_info()[1])
else:
ExternalEditor().editConfig()
if __name__ == '__main__':
""" command line call
"""
main()
|
import matplotlib
import matplotlib.pyplot as plt
import numpy as np
import os
from analyze_perf import group_perf_by_events, filter_events_after_timestamp, \
classify_events_by_stages, get_percentage
from profiling_stages import draw_profiling_plot
x_labels = ['OPQ16,IVF262144\nnprobe=1', \
'OPQ16,IVF262144\nnprobe=2', \
'OPQ16,IVF262144\nnprobe=4', \
'OPQ16,IVF262144\nnprobe=8', \
'OPQ16,IVF262144\nnprobe=16', \
'OPQ16,IVF262144\nnprobe=32', \
'OPQ16,IVF262144\nnprobe=64', \
'OPQ16,IVF262144\nnprobe=128']
file_prefixes = [ \
'perf.out_SIFT1000M_OPQ16,IVF262144,PQ16_K_100_nprobe_1_qbs_10000', \
'perf.out_SIFT1000M_OPQ16,IVF262144,PQ16_K_100_nprobe_2_qbs_10000', \
'perf.out_SIFT1000M_OPQ16,IVF262144,PQ16_K_100_nprobe_4_qbs_10000', \
'perf.out_SIFT1000M_OPQ16,IVF262144,PQ16_K_100_nprobe_8_qbs_10000', \
'perf.out_SIFT1000M_OPQ16,IVF262144,PQ16_K_100_nprobe_16_qbs_10000', \
'perf.out_SIFT1000M_OPQ16,IVF262144,PQ16_K_100_nprobe_32_qbs_10000', \
'perf.out_SIFT1000M_OPQ16,IVF262144,PQ16_K_100_nprobe_64_qbs_10000', \
'perf.out_SIFT1000M_OPQ16,IVF262144,PQ16_K_100_nprobe_128_qbs_10000']
assert len(x_labels) == len(file_prefixes)
path_prefixes = []
for p in file_prefixes:
path_prefixes.append(os.path.join('../result_experiment_4_nprobe', p))
# time range of the search function, according to the search log, e.g.,
# time_bias_start = 135.656
# time_bias_end = 200.659
time_ranges = [ # pair of (time_bias_start, time_bias_end)
# ==== nprobe=1 ====
(28.157, 36.409),
# ==== nprobe=2 ====
(28.017, 35.706),
# ==== nprobe=4 ====
(27.268, 35.276),
# ==== nprobe=8 ====
(28.237, 37.730),
# ==== nprobe=16 ====
(27.252, 38.686),
# ==== nprobe=32 ====
(27.234, 43.001),
# ==== nprobe=64 ====
(27.344, 52.246),
# ==== nprobe=128 ====
(27.443, 69.042)]
# Stage 1: OPQ
# Stage 2: vector quantizer
# Stage 3: select centroids
# Stage 4: construct distance LUT
# Stage 5: PQ code scan
# Stage 6: collect topK results
profile_perc_array = []
# example_profile_array = [
# # 100M, 1
# [8.606278140845747, 0.11607633274229297, 3.3378707089447355, 78.57136070072978, 9.368414116737446], \
# # 100M, 10
# [32.7008185883583, 0.5164703077320218, 4.674772663594282, 33.70847203114799, 28.399466409167403]
# ]
for i in range(len(path_prefixes)):
print("Processing {}".format(path_prefixes[i]))
all_events = group_perf_by_events(path_prefixes[i])
time_bias_start, time_bias_end = time_ranges[i][0], time_ranges[i][1]
filtered_events = filter_events_after_timestamp(all_events, time_bias_start, time_bias_end)
t_1_4, t_5, t_6, t_other = classify_events_by_stages(filtered_events, track_non_faiss_func=False, remove_unrecognized_faiss_function=False)
p_1_4, p_5, p_6, p_other = get_percentage(t_1_4, t_5, t_6, t_other)
profile_perc_array.append([p_1_4, p_5, p_6, p_other])
y_stage_1_4 = [r[0] for r in profile_perc_array]
y_stage_5 = [r[1] for r in profile_perc_array]
y_stage_6 = [r[2] for r in profile_perc_array]
y_other = [r[3] for r in profile_perc_array]
draw_profiling_plot(x_labels, y_stage_1_4, y_stage_5, y_stage_6, y_other, 'cpu_profile_experiment_4_nprobe_SIFT1000M', x_tick_rotation=45)
|
def concat_multiples(num, multiples):
return int("".join([str(num*multiple) for multiple in range(1,multiples+1)]))
def is_pandigital(num):
return sorted([int(digit) for digit in str(num)]) == list(range(1,10))
def solve_p038():
# retrieve only 9 digit concatinations of multiples where n = (1,2,..n)
n6 = [concat_multiples(num, 6) for num in [3]]
n5 = [concat_multiples(num, 5) for num in range(5,10)]
n4 = [concat_multiples(num, 4) for num in range(25,33)]
n3 = [concat_multiples(num, 3) for num in range(100,333)]
n2 = [concat_multiples(num, 2) for num in range(5000,9999)]
all_concats = set(n2 + n3 + n4 + n5 + n6)
return max([num for num in all_concats if is_pandigital(num)])
if __name__ == '__main__':
print((solve_p038()))
|
import models
import json
import re
import constants.userConstants as UserConstants
from enums import UserEnums
from databaseService.bookDatabaseService import BookDatabaseService
def validate_and_convert_new_user_request_object(aa: dict, bb: models.User):
for field in UserConstants.USER_MANDATORY_FIELDS:
if field not in aa.keys():
return f"Required field {field} is missing"
return bb.from_json(json.dumps(aa)) # returns a dictionary
def convert_update_request_for_persistence(user_request, user_object):
try:
user_object.last_name = user_request.get(
'last_name') if user_request.get('last_name') else user_object.last_name
user_object.first_name = user_request.get(
'first_name') if user_request.get('first_name') else user_object.first_name
user_object.date_of_birth = user_request.get(
'date_of_birth') if user_request.get('date_of_birth') else user_object.date_of_birth
user_object.phone_number = user_request.get(
'phone_number') if user_request.get('phone_number') else user_object.phone_number
user_object.email = user_request.get(
'email') if user_request.get('email') else user_object.email
user_object.username = user_request.get(
'username') if user_request.get('username') else user_object.username
user_object.alt_username = user_request.get(
'email').rsplit('@')[0] if user_request.get('email') else user_object.alt_username
return user_object
except Exception as e:
return e
def convert_email_update_request_for_persistence(user_request, user_object):
user_object.email = user_request.get('newEmail')
return user_object
def convert_user_dto_to_public_response_dto(user):
try:
response_dto = dict()
response_dto.setdefault('id', str(user.get('id')))
response_dto.setdefault('first_name', user.get('first_name'))
response_dto.setdefault('last_name', user.get('last_name') if user.get('last_name') else "")
response_dto.setdefault('date_of_birth', str(user.get('date_of_birth')))
response_dto.setdefault('email', user.get('email'))
response_dto.setdefault('username', user.get('username'))
return response_dto
except Exception as e:
print("DEBUG: Exception occurred in _USER_DTO_PUBLIC - {}".format(e))
return "There was some error."
def convert_request_to_user_update_dto(request_dto, user_identity):
try:
response_user = clone_dto(user_identity)
for field in UserConstants.USER_FIELDS_FOR_DETAILS_UPDATE:
if field is not None:
response_user[field] = request_dto[field]
return response_user
except Exception as e:
return "Error: {}".format(e)
def clone_dto(user):
_cloned_response = {}
for field in user.keys():
_cloned_response.setdefault(field, user.get(field))
return _cloned_response
def is_length_valid_for_id_in_request(mongo_id) -> bool:
if len(mongo_id) > 12*2 or len(mongo_id) < 12*2:
return True
return False
def validate_min_length(value, limit):
if len(value) < int(limit):
return False
return True
def verify_username_length(curr, new):
if len(curr) < UserEnums.MIN_USER_NAME_LENGTH.value or len(new) < UserEnums.MIN_USER_NAME_LENGTH.value:
return [{'error': 'Invalid username length. Minimum username length should be 4.'}, 404]
return False
def verify_email_length(curr, new):
if len(curr) < UserEnums.MIN_EMAIL_LENGTH.value or len(new) < UserEnums.MIN_EMAIL_LENGTH.value:
return [
{
'error':
'Invalid email length. Minimum email length should be 6. Please check your email and try again.'
}, 404
]
return False
def get_user_favourite_books(user):
book_service = BookDatabaseService()
book_bucket = list()
for book in user.fav_books:
book_bucket.append(
(
book_service.find_active_book_by_id(book.id)
)
)
return book_bucket
|
# -*- encoding: utf-8 -*-
# This file is distributed under the same license as the Django package.
#
from __future__ import unicode_literals
# The *_FORMAT strings use the Django date format syntax,
# see http://docs.djangoproject.com/en/dev/ref/templates/builtins/#date
DATE_FORMAT = 'j F Y'
TIME_FORMAT = 'G:i'
DATETIME_FORMAT = 'j F Y، ساعت G:i'
YEAR_MONTH_FORMAT = 'F Y'
MONTH_DAY_FORMAT = 'j F'
SHORT_DATE_FORMAT = 'Y/n/j'
SHORT_DATETIME_FORMAT = 'Y/n/j، G:i'
# FIRST_DAY_OF_WEEK =
# The *_INPUT_FORMATS strings use the Python strftime format syntax,
# see http://docs.python.org/library/datetime.html#strftime-strptime-behavior
# DATE_INPUT_FORMATS =
# TIME_INPUT_FORMATS =
# DATETIME_INPUT_FORMATS =
DECIMAL_SEPARATOR = ','
THOUSAND_SEPARATOR = '.'
# NUMBER_GROUPING =
|
from car_ctrl import servo
import time
#max angle turns right
#0 turns left
def test_servo_rotation():
s = servo()
print(vars(s))
print("max_angle: " +str(s.max_angle))
print("slope: " +str(s.slope))
for i in range(0,3):
s.steer(s.max_angle)
print("turning left")
time.sleep(0.5)
for i in range(0,3):
s.steer(0)
time.sleep(0.5)
print("turning right")
for i in range(0,3):
s.steer(s.max_angle)
time.sleep(0.5)
print("Return to center")
s.kill_servo()
test_servo_rotation()
|
import os
class Config:
SQLALCHEMY_DATABASE_URI = os.environ.get('DATABASE_URL')
SQLALCHEMY_TRACK_MODIFICATIONS = True
SECRET_KEY = os.environ.get('SECRET_KEY')
UPLOADED_PHOTOS_DEST = 'app/static/photos'
MAIL_SERVER = 'smtp.gmail.com'
MAIL_PORT = 450
MAIL_USE_TLS = False
MAIL_USE_SSL = True
MAIL_USERNAME = os.environ.get("MAIL_USERNAME")
MAIL_PASSWORD = os.environ.get("MAIL_PASSWORD")
class ProdConfig(Config):
SQLALCHEMY_DATABASE_URI = os.environ.get("DATABASE_URL")
class DevConfig(Config):
DEBUG = True
config_options = {
'development': DevConfig,
'production': ProdConfig
}
|
# Dependencies
from splinter import Browser
from bs4 import BeautifulSoup
from webdriver_manager.chrome import ChromeDriverManager
import time
import pandas as pd
from pprint import pprint
from urllib.parse import urlsplit
import pymongo
# Initialize PyMongo to work with MongoDBs
conn = 'mongodb://localhost:27017'
client = pymongo.MongoClient(conn)
# Define database and collection
db = client.mars_db
collection = db.items
def init_browser():
# capture path to chrome driver
executable_path = {'executable_path': ChromeDriverManager().install()}
return Browser('chrome', **executable_path, headless=False)
def scrape_info():
browser = init_browser()
mars_info = {}
url = 'https://mars.nasa.gov/news/'
browser.visit(url)
html = browser.html
soup = BeautifulSoup(html, 'html.parser')
# scrape latest news headline and para
news_title=soup.find('ul', class_='item_list').\
find('li', class_='slide').\
find('div', class_= 'content_title').text
news_para=soup.find("div", class_='article_teaser_body').text
mars_info['news_title'] = news_title
mars_info['news_para'] = news_para
# Featured image
featured_image = "https://www.nasa.gov/image-feature/jpl/perseverance-s-first-full-color-look-at-mars"
browser.visit(featured_image)
base_url = "{0.scheme}://{0.netloc}/".format(urlsplit(featured_image))
# click on featured image using xpath
xpath = '//*[@id="468477"]/div[2]/div[2]/a/img'
results = browser.find_by_xpath(xpath)
img = results[0]
img.click()
time.sleep(1)
#get image url using BeautifulSoup
html_image = browser.html
soup = BeautifulSoup(html_image, "html.parser")
featured_img_url = soup.find('img')['src']
mars_info['featured_img_url'] = featured_img_url
# Mars Facts
url_facts = "https://space-facts.com/mars/"
table = pd.read_html(url_facts)
table[0]
df_mars_facts = table[0]
df_mars_facts.columns = ["Parameter", "Values"]
fact_table = df_mars_facts.set_index(["Parameter"])
mars_html_table = fact_table.to_html()
mars_html_table = mars_html_table.replace("\n", "")
mars_info['mars_facts_table'] = mars_html_table
# Mars Hemisphere
hemisphere_url = "https://astrogeology.usgs.gov/search/results?q=hemisphere+enhanced&k1=target&v1=Mars"
browser.visit(hemisphere_url)
#Get base url
hemisphere_base_url = "{0.scheme}://{0.netloc}/".format(urlsplit(hemisphere_url))
# list of xpaths for mars hemispheres
xpaths = ['//*[@id="product-section"]/div[2]/div[1]/a/img', '//*[@id="product-section"]/div[2]/div[2]/a/img', '//*[@id="product-section"]/div[2]/div[3]/a/img', '//*[@id="product-section"]/div[2]/div[4]/a/img']
hemisphere_img_urls = []
for xpath in xpaths:
hemisphere_url = "https://astrogeology.usgs.gov/search/results?q=hemisphere+enhanced&k1=target&v1=Mars"
browser.visit(hemisphere_url)
results = browser.find_by_xpath(xpath)
img = results[0]
img.click()
time.sleep(1)
#get image url using BeautifulSoup
html_image = browser.html
soup = BeautifulSoup(html_image, "html.parser")
img_url = soup.find("img", class_='wide-image')["src"]
time.sleep(1)
img_url = hemisphere_base_url + img_url
title = soup.find("h2",class_="title").text
hemisphere_img_urls.append({'title': title, 'image_url':img_url})
mars_info['hemisphere_img_urls'] = hemisphere_img_urls
browser.quit()
# collection.insert_one(mars_info)
return mars_info
|
from pydriller import RepositoryMining
import iocsv
import csv
repos = iocsv.read_csv_repos_fil("data_filtered.csv")
out = open('project_bot.csv', 'w')
w_out = csv.writer(out)
for commit in RepositoryMining(path_to_repo=repos, only_modifications_with_file_types= ['.yml']).traverse_commits():
files = []
for mod in commit.modifications:
if mod.filename == "stale.yml":
file = [commit.project_name, mod.change_type.name, commit.in_main_branch, commit.hash, commit.msg, commit.author.name, commit.committer.name, commit.merge,
commit.author_date.strftime("%Y-%m-%d %H:%M:%S"), mod.source_code, mod.diff, mod.added, mod.removed]
files.append(file)
break
if files:
w_out.writerow(files)
out.flush()
out.close()
|
from fastapi import APIRouter
from gladia_api_utils.submodules import TaskRouter
router = APIRouter()
inputs = [
{
"type": "text",
"name": "text",
"default": "Лорем ипсум долор сит амет",
"tooltip": "Insert the text to transliterate here",
},
{
"type": "text",
"name": "language",
"default": "ru",
"tooltip": "Insert the language code here",
},
]
output = {
"name": "transliterated_text",
"type": "str",
"example": "transliterated_text"
}
TaskRouter(router=router, input=inputs, output=output, default_model="transliterate")
|
"""
flask_flatpages_pandoc
~~~~~~~~~~~~~~~~~~~~~~
Flask-FlatPages-Pandoc is an HTML renderer for Flask-FlatPages that uses
pandoc as backend.
:copyright: (c) 2014 Fabian Hirschmann <fabian@hirschmann.email>
:license: MIT, see LICENSE.txt for more details.
With some changes by @apas:
- Invoke pandoc via pypandoc instead subprocess
- Indentation changes
- Support of Pandoc 2.0 by @ThoseGrapefruits
- Support of Python 3 by @frstp64
License: MIT
"""
import pkg_resources
import pypandoc
from flask import render_template_string, Markup
try:
__version__ = pkg_resources.require("Flask-FlatPages-Pandoc")[0]
except pkg_resources.DistributionNotFound:
__version__ = "0.0-dev"
class FlatPagesPandoc(object):
"""
Class that, when applied to a :class:`flask.Flask` instance,
sets up an HTML renderer using pandoc.
"""
def __init__(self, source_format, app=None, pandoc_args=[],
pre_render=False):
"""
Initializes Flask-FlatPages-Pandoc.
:param source_format: the source file format; directly passed
to pandoc.
:type source_format: string
:param app: your application. Can be omitted if you call
:meth:`init_app` later.
:type app: :class:`flask.Flask`
:param pandoc_args: extra arguments passed to pandoc
:type pandoc_args: sequence
:param pre_render: pre-render the page as :class:`flask.Markup`
:type pre_render: boolean
"""
self.source_format = source_format
self.pandoc_args = pandoc_args
self.pre_render = pre_render
if app:
self.init_app(app)
def init_app(self, app):
"""
Used to initialize an application. This is useful when passing
an app later.
:param app: your application
:type app: :class:`flask.Flask`
"""
self.app = app
# The following lambda expression works around Flask-FlatPage's
# reflection magic.
self.app.config["FLATPAGES_HTML_RENDERER"] = lambda t: self.renderer(t)
def renderer(self, text):
"""
Renders a flat page to HTML.
:param text: the text of the flat page
:type text: string
"""
#if type(text) == str:
# text = str(text, self.app.config["FLATPAGES_ENCODING"])
if self.pre_render:
text = render_template_string(Markup(text))
extra_args = [
"--filter=pandoc-crossref",
"--filter=pandoc-citeproc",
"--filter=pandoc-sidenote",
"--standalone",
"--mathml",
"--base-header-level=2",
"--highlight-style", "pygments",
"--bibliography=pages/all.bib",
"--csl=pages/lncs.csl",
"-Mreference-section-title=References",
"-Mlink-citations=true"
]
pandocver = int(pypandoc.get_pandoc_version()[0])
if pandocver < 2:
extra_args.append("-S")
format_str = "markdown+raw_tex+yaml_metadata_block"
else:
format_str = "markdown+raw_tex+smart+yaml_metadata_block+header_attributes"
output = pypandoc.convert_text(
text.encode("utf8"),
'html',
format = format_str,
extra_args=extra_args
)
return output
|
import os
from WMCore.Configuration import Configuration
from CRABClient.UserUtilities import config, getUsernameFromCRIC
config = Configuration()
config.section_("General")
config.General.requestName = '2018_ST_tW'
config.General.transferOutputs = True
config.General.transferLogs = True
config.section_("JobType")
config.JobType.allowUndistributedCMSSW = True
config.JobType.pluginName = 'Analysis'
config.JobType.psetName = 'PSet.py'
config.JobType.maxMemoryMB = 2000
config.JobType.maxJobRuntimeMin = 1315
config.JobType.numCores = 1
config.JobType.scriptExe = 'crab_script_2018_ST_tW.sh'
config.JobType.inputFiles = ['crab_script_2018_ST_tW.py',
os.path.join(os.environ['CMSSW_BASE'],'src/PhysicsTools/NanoAODTools/scripts/haddnano.py'),
]
config.JobType.outputFiles = ['hist.root']
config.JobType.sendPythonFolder = True
config.section_("Data")
config.Data.inputDataset = '/ST_tW_top_5f_inclusiveDecays_TuneCP5_13TeV-powheg-pythia8/RunIIAutumn18NanoAODv7-Nano02Apr2020_102X_upgrade2018_realistic_v21_ext1-v1/NANOAODSIM'
config.Data.inputDBS = 'global'
config.Data.splitting = 'FileBased'
if config.Data.splitting == 'FileBased':
config.Data.unitsPerJob = 1
# config.Data.totalUnits = $TOTAL_UNITS
# config.Data.userInputFiles = []
config.Data.outLFNDirBase = '/store/user/{user}/Fri13'.format(user=getUsernameFromCRIC())
config.Data.publication = True
config.Data.outputDatasetTag = 'Fri13'
config.section_("Site")
config.Site.storageSite = 'T2_CH_CERN'
|
from .GogsClient import GogsClient
from js9 import j
JSConfigBaseFactory = j.tools.configmanager.base_class_configs
class GogsFactory(JSConfigBaseFactory):
def __init__(self):
self.__jslocation__ = "j.clients.gogs"
self.__imports__ = "requests,psycopg2"
JSConfigBaseFactory.__init__(self, GogsClient)
|
import os
import time
from multiprocessing.dummy import Pool
import pytest
from helpers.cluster import ClickHouseCluster
from helpers.test_tools import assert_eq_with_retry
cluster = ClickHouseCluster(__file__)
node1 = cluster.add_instance("node1", main_configs=["configs/wide_parts_only.xml"])
@pytest.fixture(scope="module")
def started_cluster():
try:
cluster.start()
yield cluster
finally:
cluster.shutdown()
def check_hardlinks(table, part_path, column_file, count):
column_path = os.path.join(
"/var/lib/clickhouse/data/default", table, part_path, column_file
)
script = """
export INODE=`ls -i {column_path} | awk '{{print $1}}'`
export COUNT=`find /var/lib/clickhouse -inum $INODE | wc -l`
test $COUNT = {count}
""".format(
column_path=column_path, count=count
)
node1.exec_in_container(["bash", "-c", script])
def check_exists(table, part_path, column_file):
column_path = os.path.join(
"/var/lib/clickhouse/data/default", table, part_path, column_file
)
node1.exec_in_container(["bash", "-c", "test -f {}".format(column_path)])
def test_update_mutation(started_cluster):
node1.query(
"CREATE TABLE table_for_update(key UInt64, value1 UInt64, value2 String) ENGINE MergeTree() ORDER BY tuple()"
)
node1.query(
"INSERT INTO table_for_update SELECT number, number, toString(number) from numbers(100)"
)
assert int(node1.query("SELECT sum(value1) FROM table_for_update").strip()) == sum(
range(100)
)
node1.query(
"ALTER TABLE table_for_update UPDATE value1 = value1 * value1 WHERE 1",
settings={"mutations_sync": "2"},
)
assert int(node1.query("SELECT sum(value1) FROM table_for_update").strip()) == sum(
i * i for i in range(100)
)
check_hardlinks("table_for_update", "all_1_1_0_2", "key.bin", 2)
check_hardlinks("table_for_update", "all_1_1_0_2", "value2.bin", 2)
check_hardlinks("table_for_update", "all_1_1_0_2", "value1.bin", 1)
node1.query(
"ALTER TABLE table_for_update UPDATE key=key, value1=value1, value2=value2 WHERE 1",
settings={"mutations_sync": "2"},
)
assert int(node1.query("SELECT sum(value1) FROM table_for_update").strip()) == sum(
i * i for i in range(100)
)
check_hardlinks("table_for_update", "all_1_1_0_3", "key.bin", 1)
check_hardlinks("table_for_update", "all_1_1_0_3", "value1.bin", 1)
check_hardlinks("table_for_update", "all_1_1_0_3", "value2.bin", 1)
def test_modify_mutation(started_cluster):
node1.query(
"CREATE TABLE table_for_modify(key UInt64, value1 UInt64, value2 String) ENGINE MergeTree() ORDER BY tuple()"
)
node1.query(
"INSERT INTO table_for_modify SELECT number, number, toString(number) from numbers(100)"
)
assert int(node1.query("SELECT sum(value1) FROM table_for_modify").strip()) == sum(
range(100)
)
node1.query(
"ALTER TABLE table_for_modify MODIFY COLUMN value2 UInt64",
settings={"mutations_sync": "2"},
)
assert int(node1.query("SELECT sum(value2) FROM table_for_modify").strip()) == sum(
range(100)
)
check_hardlinks("table_for_modify", "all_1_1_0_2", "key.bin", 2)
check_hardlinks("table_for_modify", "all_1_1_0_2", "value1.bin", 2)
check_hardlinks("table_for_modify", "all_1_1_0_2", "value2.bin", 1)
def test_drop_mutation(started_cluster):
node1.query(
"CREATE TABLE table_for_drop(key UInt64, value1 UInt64, value2 String) ENGINE MergeTree() ORDER BY tuple()"
)
node1.query(
"INSERT INTO table_for_drop SELECT number, number, toString(number) from numbers(100)"
)
assert int(node1.query("SELECT sum(value1) FROM table_for_drop").strip()) == sum(
range(100)
)
node1.query(
"ALTER TABLE table_for_drop DROP COLUMN value2",
settings={"mutations_sync": "2"},
)
check_hardlinks("table_for_drop", "all_1_1_0_2", "key.bin", 2)
check_hardlinks("table_for_drop", "all_1_1_0_2", "value1.bin", 2)
with pytest.raises(Exception):
check_exists("table_for_drop", "all_1_1_0_2", "value2.bin")
with pytest.raises(Exception):
check_exists("table_for_drop", "all_1_1_0_2", "value2.mrk")
def test_delete_and_drop_mutation(started_cluster):
node1.query(
"CREATE TABLE table_for_delete_and_drop(key UInt64, value1 UInt64, value2 String) ENGINE MergeTree() ORDER BY tuple()"
)
node1.query(
"INSERT INTO table_for_delete_and_drop SELECT number, number, toString(number) from numbers(100)"
)
assert int(
node1.query("SELECT sum(value1) FROM table_for_delete_and_drop").strip()
) == sum(range(100))
node1.query("SYSTEM STOP MERGES")
def mutate():
node1.query(
"ALTER TABLE table_for_delete_and_drop DELETE WHERE key % 2 == 0, DROP COLUMN value2"
)
p = Pool(2)
p.apply_async(mutate)
for _ in range(1, 100):
result = node1.query(
"SELECT COUNT() FROM system.mutations WHERE table = 'table_for_delete_and_drop' and is_done=0"
)
try:
if int(result.strip()) == 2:
break
except:
print("Result", result)
pass
time.sleep(0.5)
node1.query("SYSTEM START MERGES")
assert_eq_with_retry(
node1,
"SELECT COUNT() FROM table_for_delete_and_drop",
str(sum(1 for i in range(100) if i % 2 != 0)),
)
check_hardlinks("table_for_delete_and_drop", "all_1_1_0_3", "key.bin", 1)
check_hardlinks("table_for_delete_and_drop", "all_1_1_0_3", "value1.bin", 1)
with pytest.raises(Exception):
check_exists("table_for_delete_and_drop", "all_1_1_0_3", "value2.bin")
with pytest.raises(Exception):
check_exists("table_for_delete_and_drop", "all_1_1_0_3", "value2.mrk")
|
from common.methods import set_progress
from resourcehandlers.aws.models import AWSHandler
def run(job, resource, **kwargs):
set_progress("Connecting to AWS s3 cloud")
aws = AWSHandler.objects.get(id=resource.aws_rh_id)
wrapper = aws.get_api_wrapper()
set_progress("This resource belongs to {}".format(aws))
file = "{{ file }}"
key_name = "{{ name }}"
s3 = wrapper.get_boto3_resource(
aws.serviceaccount,
aws.servicepasswd,
None,
service_name='s3'
)
try:
set_progress('uploading file from "{}"'.format(file))
s3.Bucket(resource.s3_bucket_name).upload_file(file, key_name)
except Exception as e:
return "FAILURE", str(e), ""
return "SUCCESS", "The file has been successfully uploaded to '{}' bucket".format(resource.s3_bucket_name), ""
|
#File: tank.py
#Author: Mariana Avalos
#Date: 22/02/2019
#Description: Python code that makes a 3D tank
import maya.cmds as c
import math as math
# 8 tires
tireTranslation = [3, -3]
tireRadius = 1.25
for j in range(len(tireTranslation)):
for i in range(4):
name = 'c' + str(i + (j * 4) + 1)
c.polyCylinder(r = tireRadius, sx = 20, sy = 1, n = 'c' + str(i + (j * 4) + 1))
c.setAttr(name + '.rotateZ', 90)
c.setAttr(name + '.scaleY', 0.8)
c.setAttr(name + '.translateZ', i * (tireRadius * 2) - (tireRadius * 3))
c.setAttr(name + '.translateX', tireTranslation[j])
# body made with the coolest for
body = 'body'
c.polyCube(sx = 4, sy = 2, sz = 1, d = 5.25, h = 3, w = 4, n = body)
c.setAttr(body + '.translateY', 0.5)
bodyRadius = 0.5
zFactor = [1, -1]
for j in range(len(zFactor)):
for i in range(0, 15):
rads = (360.0 / 8)*(3.1416 / 180)
x = -1 * bodyRadius * math.cos(rads * (i % 5))
z = zFactor[j] * bodyRadius * math.sin(rads * (i % 5))
c.polyMoveVertex(body + '.vtx[' + str(i + 15 * j) + ']', tx = x)
c.polyMoveVertex(body + '.vtx[' + str(i + 15 * j) + ']', tz = z)
if i in (5, 6, 7, 8, 9):
c.polyMoveVertex(body + '.vtx[' + str(i + 15 * j) + ']', tz = 3 * zFactor[j])
# head of tank
head = 'head'
headRadius = 0.5
c.polyCube(sx = 4, sy = 1, sz = 1, d = 3, h = 1.0, w = 4, n = head)
c.setAttr(head + '.translateY', 2.6)
c.setAttr(head + '.translateZ', -1)
for i in range(10, 20):
rads = (360.0 / 8)*(3.1416 / 180)
z = -1 * headRadius * math.sin(rads * (i % 5))
c.polyMoveVertex(head + '.vtx[' + str(i) + ']', tz = z)
if i in (10, 11, 12, 13, 14):
c.polyMoveVertex(head + '.vtx[' + str(i) + ']', tz = 1)
# axis under head
axis = 'axis'
c.polyCylinder(r = 1.5, sx = 20, sy = 1, h = 0.5, n = axis)
c.setAttr(axis + '.translateY', 2)
c.setAttr(axis + '.translateZ', -1.1)
# gun making: even parts are length 2 and odd parts are length 0.5
heights = [2, 0.5]
t = 1
gunRadius = 0.25
for i in range(0, 4):
name = 'gun' + str(i)
c.polyCylinder(r = gunRadius, sx = 8, sy = 1, h = heights[i % 2], n = name)
c.setAttr(name + '.translateY', 2.6)
c.setAttr(name + '.translateZ', t)
c.setAttr(name + '.rotateX', 90)
# translating: my height / 2 + next height / 2
t += heights[i % 2] / 2 + heights[(i + 1) % 2] / 2
gunRadius += 0.1
|
# -------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
# --------------------------------------------------------------------------
# pylint: disable=no-self-use
from concurrent import futures
from io import (BytesIO, IOBase, SEEK_CUR, SEEK_END, SEEK_SET, UnsupportedOperation)
from threading import Lock
from itertools import islice
from math import ceil
import six
from azure.core.tracing.common import with_current_context
from . import encode_base64, url_quote
from .request_handlers import get_length
from .response_handlers import return_response_headers
from .encryption import get_blob_encryptor_and_padder
_LARGE_BLOB_UPLOAD_MAX_READ_BUFFER_SIZE = 4 * 1024 * 1024
_ERROR_VALUE_SHOULD_BE_SEEKABLE_STREAM = "{0} should be a seekable file-like/io.IOBase type stream object."
def _parallel_uploads(executor, uploader, pending, running):
range_ids = []
while True:
# Wait for some download to finish before adding a new one
done, running = futures.wait(running, return_when=futures.FIRST_COMPLETED)
range_ids.extend([chunk.result() for chunk in done])
try:
for _ in range(0, len(done)):
next_chunk = next(pending)
running.add(executor.submit(with_current_context(uploader), next_chunk))
except StopIteration:
break
# Wait for the remaining uploads to finish
done, _running = futures.wait(running)
range_ids.extend([chunk.result() for chunk in done])
return range_ids
def upload_data_chunks(
service=None,
uploader_class=None,
total_size=None,
chunk_size=None,
max_concurrency=None,
stream=None,
validate_content=None,
encryption_options=None,
progress_hook=None,
**kwargs):
if encryption_options:
encryptor, padder = get_blob_encryptor_and_padder(
encryption_options.get('cek'),
encryption_options.get('vector'),
uploader_class is not PageBlobChunkUploader)
kwargs['encryptor'] = encryptor
kwargs['padder'] = padder
parallel = max_concurrency > 1
if parallel and 'modified_access_conditions' in kwargs:
# Access conditions do not work with parallelism
kwargs['modified_access_conditions'] = None
uploader = uploader_class(
service=service,
total_size=total_size,
chunk_size=chunk_size,
stream=stream,
parallel=parallel,
validate_content=validate_content,
progress_hook=progress_hook,
**kwargs)
if parallel:
with futures.ThreadPoolExecutor(max_concurrency) as executor:
upload_tasks = uploader.get_chunk_streams()
running_futures = [
executor.submit(with_current_context(uploader.process_chunk), u)
for u in islice(upload_tasks, 0, max_concurrency)
]
range_ids = _parallel_uploads(executor, uploader.process_chunk, upload_tasks, running_futures)
else:
range_ids = [uploader.process_chunk(result) for result in uploader.get_chunk_streams()]
if any(range_ids):
return [r[1] for r in sorted(range_ids, key=lambda r: r[0])]
return uploader.response_headers
def upload_substream_blocks(
service=None,
uploader_class=None,
total_size=None,
chunk_size=None,
max_concurrency=None,
stream=None,
progress_hook=None,
**kwargs):
parallel = max_concurrency > 1
if parallel and 'modified_access_conditions' in kwargs:
# Access conditions do not work with parallelism
kwargs['modified_access_conditions'] = None
uploader = uploader_class(
service=service,
total_size=total_size,
chunk_size=chunk_size,
stream=stream,
parallel=parallel,
progress_hook=progress_hook,
**kwargs)
if parallel:
with futures.ThreadPoolExecutor(max_concurrency) as executor:
upload_tasks = uploader.get_substream_blocks()
running_futures = [
executor.submit(with_current_context(uploader.process_substream_block), u)
for u in islice(upload_tasks, 0, max_concurrency)
]
range_ids = _parallel_uploads(executor, uploader.process_substream_block, upload_tasks, running_futures)
else:
range_ids = [uploader.process_substream_block(b) for b in uploader.get_substream_blocks()]
if any(range_ids):
return sorted(range_ids)
return []
class _ChunkUploader(object): # pylint: disable=too-many-instance-attributes
def __init__(
self, service,
total_size,
chunk_size,
stream,
parallel,
encryptor=None,
padder=None,
progress_hook=None,
**kwargs):
self.service = service
self.total_size = total_size
self.chunk_size = chunk_size
self.stream = stream
self.parallel = parallel
# Stream management
self.stream_start = stream.tell() if parallel else None
self.stream_lock = Lock() if parallel else None
# Progress feedback
self.progress_total = 0
self.progress_lock = Lock() if parallel else None
self.progress_hook = progress_hook
# Encryption
self.encryptor = encryptor
self.padder = padder
self.response_headers = None
self.etag = None
self.last_modified = None
self.request_options = kwargs
def get_chunk_streams(self):
index = 0
while True:
data = b""
read_size = self.chunk_size
# Buffer until we either reach the end of the stream or get a whole chunk.
while True:
if self.total_size:
read_size = min(self.chunk_size - len(data), self.total_size - (index + len(data)))
temp = self.stream.read(read_size)
if not isinstance(temp, six.binary_type):
raise TypeError("Blob data should be of type bytes.")
data += temp or b""
# We have read an empty string and so are at the end
# of the buffer or we have read a full chunk.
if temp == b"" or len(data) == self.chunk_size:
break
if len(data) == self.chunk_size:
if self.padder:
data = self.padder.update(data)
if self.encryptor:
data = self.encryptor.update(data)
yield index, data
else:
if self.padder:
data = self.padder.update(data) + self.padder.finalize()
if self.encryptor:
data = self.encryptor.update(data) + self.encryptor.finalize()
if data:
yield index, data
break
index += len(data)
def process_chunk(self, chunk_data):
chunk_bytes = chunk_data[1]
chunk_offset = chunk_data[0]
return self._upload_chunk_with_progress(chunk_offset, chunk_bytes)
def _update_progress(self, length):
if self.progress_lock is not None:
with self.progress_lock:
self.progress_total += length
else:
self.progress_total += length
if self.progress_hook:
self.progress_hook(self.progress_total, self.total_size)
def _upload_chunk(self, chunk_offset, chunk_data):
raise NotImplementedError("Must be implemented by child class.")
def _upload_chunk_with_progress(self, chunk_offset, chunk_data):
range_id = self._upload_chunk(chunk_offset, chunk_data)
self._update_progress(len(chunk_data))
return range_id
def get_substream_blocks(self):
assert self.chunk_size is not None
lock = self.stream_lock
blob_length = self.total_size
if blob_length is None:
blob_length = get_length(self.stream)
if blob_length is None:
raise ValueError("Unable to determine content length of upload data.")
blocks = int(ceil(blob_length / (self.chunk_size * 1.0)))
last_block_size = self.chunk_size if blob_length % self.chunk_size == 0 else blob_length % self.chunk_size
for i in range(blocks):
index = i * self.chunk_size
length = last_block_size if i == blocks - 1 else self.chunk_size
yield index, SubStream(self.stream, index, length, lock)
def process_substream_block(self, block_data):
return self._upload_substream_block_with_progress(block_data[0], block_data[1])
def _upload_substream_block(self, index, block_stream):
raise NotImplementedError("Must be implemented by child class.")
def _upload_substream_block_with_progress(self, index, block_stream):
range_id = self._upload_substream_block(index, block_stream)
self._update_progress(len(block_stream))
return range_id
def set_response_properties(self, resp):
self.etag = resp.etag
self.last_modified = resp.last_modified
class BlockBlobChunkUploader(_ChunkUploader):
def __init__(self, *args, **kwargs):
kwargs.pop("modified_access_conditions", None)
super(BlockBlobChunkUploader, self).__init__(*args, **kwargs)
self.current_length = None
def _upload_chunk(self, chunk_offset, chunk_data):
# TODO: This is incorrect, but works with recording.
index = '{0:032d}'.format(chunk_offset)
block_id = encode_base64(url_quote(encode_base64(index)))
self.service.stage_block(
block_id,
len(chunk_data),
chunk_data,
data_stream_total=self.total_size,
upload_stream_current=self.progress_total,
**self.request_options
)
return index, block_id
def _upload_substream_block(self, index, block_stream):
try:
block_id = 'BlockId{}'.format("%05d" % (index/self.chunk_size))
self.service.stage_block(
block_id,
len(block_stream),
block_stream,
data_stream_total=self.total_size,
upload_stream_current=self.progress_total,
**self.request_options
)
finally:
block_stream.close()
return block_id
class PageBlobChunkUploader(_ChunkUploader): # pylint: disable=abstract-method
def _is_chunk_empty(self, chunk_data):
# read until non-zero byte is encountered
# if reached the end without returning, then chunk_data is all 0's
return not any(bytearray(chunk_data))
def _upload_chunk(self, chunk_offset, chunk_data):
# avoid uploading the empty pages
if not self._is_chunk_empty(chunk_data):
chunk_end = chunk_offset + len(chunk_data) - 1
content_range = "bytes={0}-{1}".format(chunk_offset, chunk_end)
computed_md5 = None
self.response_headers = self.service.upload_pages(
body=chunk_data,
content_length=len(chunk_data),
transactional_content_md5=computed_md5,
range=content_range,
cls=return_response_headers,
data_stream_total=self.total_size,
upload_stream_current=self.progress_total,
**self.request_options
)
if not self.parallel and self.request_options.get('modified_access_conditions'):
self.request_options['modified_access_conditions'].if_match = self.response_headers['etag']
def _upload_substream_block(self, index, block_stream):
pass
class AppendBlobChunkUploader(_ChunkUploader): # pylint: disable=abstract-method
def __init__(self, *args, **kwargs):
super(AppendBlobChunkUploader, self).__init__(*args, **kwargs)
self.current_length = None
def _upload_chunk(self, chunk_offset, chunk_data):
if self.current_length is None:
self.response_headers = self.service.append_block(
body=chunk_data,
content_length=len(chunk_data),
cls=return_response_headers,
data_stream_total=self.total_size,
upload_stream_current=self.progress_total,
**self.request_options
)
self.current_length = int(self.response_headers["blob_append_offset"])
else:
self.request_options['append_position_access_conditions'].append_position = \
self.current_length + chunk_offset
self.response_headers = self.service.append_block(
body=chunk_data,
content_length=len(chunk_data),
cls=return_response_headers,
data_stream_total=self.total_size,
upload_stream_current=self.progress_total,
**self.request_options
)
def _upload_substream_block(self, index, block_stream):
pass
class DataLakeFileChunkUploader(_ChunkUploader): # pylint: disable=abstract-method
def _upload_chunk(self, chunk_offset, chunk_data):
# avoid uploading the empty pages
self.response_headers = self.service.append_data(
body=chunk_data,
position=chunk_offset,
content_length=len(chunk_data),
cls=return_response_headers,
data_stream_total=self.total_size,
upload_stream_current=self.progress_total,
**self.request_options
)
if not self.parallel and self.request_options.get('modified_access_conditions'):
self.request_options['modified_access_conditions'].if_match = self.response_headers['etag']
def _upload_substream_block(self, index, block_stream):
try:
self.service.append_data(
body=block_stream,
position=index,
content_length=len(block_stream),
cls=return_response_headers,
data_stream_total=self.total_size,
upload_stream_current=self.progress_total,
**self.request_options
)
finally:
block_stream.close()
class FileChunkUploader(_ChunkUploader): # pylint: disable=abstract-method
def _upload_chunk(self, chunk_offset, chunk_data):
length = len(chunk_data)
chunk_end = chunk_offset + length - 1
response = self.service.upload_range(
chunk_data,
chunk_offset,
length,
data_stream_total=self.total_size,
upload_stream_current=self.progress_total,
**self.request_options
)
return 'bytes={0}-{1}'.format(chunk_offset, chunk_end), response
# TODO: Implement this method.
def _upload_substream_block(self, index, block_stream):
pass
class SubStream(IOBase):
def __init__(self, wrapped_stream, stream_begin_index, length, lockObj):
# Python 2.7: file-like objects created with open() typically support seek(), but are not
# derivations of io.IOBase and thus do not implement seekable().
# Python > 3.0: file-like objects created with open() are derived from io.IOBase.
try:
# only the main thread runs this, so there's no need grabbing the lock
wrapped_stream.seek(0, SEEK_CUR)
except:
raise ValueError("Wrapped stream must support seek().")
self._lock = lockObj
self._wrapped_stream = wrapped_stream
self._position = 0
self._stream_begin_index = stream_begin_index
self._length = length
self._buffer = BytesIO()
# we must avoid buffering more than necessary, and also not use up too much memory
# so the max buffer size is capped at 4MB
self._max_buffer_size = (
length if length < _LARGE_BLOB_UPLOAD_MAX_READ_BUFFER_SIZE else _LARGE_BLOB_UPLOAD_MAX_READ_BUFFER_SIZE
)
self._current_buffer_start = 0
self._current_buffer_size = 0
super(SubStream, self).__init__()
def __len__(self):
return self._length
def close(self):
if self._buffer:
self._buffer.close()
self._wrapped_stream = None
IOBase.close(self)
def fileno(self):
return self._wrapped_stream.fileno()
def flush(self):
pass
def read(self, size=None):
if self.closed: # pylint: disable=using-constant-test
raise ValueError("Stream is closed.")
if size is None:
size = self._length - self._position
# adjust if out of bounds
if size + self._position >= self._length:
size = self._length - self._position
# return fast
if size == 0 or self._buffer.closed:
return b""
# attempt first read from the read buffer and update position
read_buffer = self._buffer.read(size)
bytes_read = len(read_buffer)
bytes_remaining = size - bytes_read
self._position += bytes_read
# repopulate the read buffer from the underlying stream to fulfill the request
# ensure the seek and read operations are done atomically (only if a lock is provided)
if bytes_remaining > 0:
with self._buffer:
# either read in the max buffer size specified on the class
# or read in just enough data for the current block/sub stream
current_max_buffer_size = min(self._max_buffer_size, self._length - self._position)
# lock is only defined if max_concurrency > 1 (parallel uploads)
if self._lock:
with self._lock:
# reposition the underlying stream to match the start of the data to read
absolute_position = self._stream_begin_index + self._position
self._wrapped_stream.seek(absolute_position, SEEK_SET)
# If we can't seek to the right location, our read will be corrupted so fail fast.
if self._wrapped_stream.tell() != absolute_position:
raise IOError("Stream failed to seek to the desired location.")
buffer_from_stream = self._wrapped_stream.read(current_max_buffer_size)
else:
absolute_position = self._stream_begin_index + self._position
# It's possible that there's connection problem during data transfer,
# so when we retry we don't want to read from current position of wrapped stream,
# instead we should seek to where we want to read from.
if self._wrapped_stream.tell() != absolute_position:
self._wrapped_stream.seek(absolute_position, SEEK_SET)
buffer_from_stream = self._wrapped_stream.read(current_max_buffer_size)
if buffer_from_stream:
# update the buffer with new data from the wrapped stream
# we need to note down the start position and size of the buffer, in case seek is performed later
self._buffer = BytesIO(buffer_from_stream)
self._current_buffer_start = self._position
self._current_buffer_size = len(buffer_from_stream)
# read the remaining bytes from the new buffer and update position
second_read_buffer = self._buffer.read(bytes_remaining)
read_buffer += second_read_buffer
self._position += len(second_read_buffer)
return read_buffer
def readable(self):
return True
def readinto(self, b):
raise UnsupportedOperation
def seek(self, offset, whence=0):
if whence is SEEK_SET:
start_index = 0
elif whence is SEEK_CUR:
start_index = self._position
elif whence is SEEK_END:
start_index = self._length
offset = -offset
else:
raise ValueError("Invalid argument for the 'whence' parameter.")
pos = start_index + offset
if pos > self._length:
pos = self._length
elif pos < 0:
pos = 0
# check if buffer is still valid
# if not, drop buffer
if pos < self._current_buffer_start or pos >= self._current_buffer_start + self._current_buffer_size:
self._buffer.close()
self._buffer = BytesIO()
else: # if yes seek to correct position
delta = pos - self._current_buffer_start
self._buffer.seek(delta, SEEK_SET)
self._position = pos
return pos
def seekable(self):
return True
def tell(self):
return self._position
def write(self):
raise UnsupportedOperation
def writelines(self):
raise UnsupportedOperation
def writeable(self):
return False
class IterStreamer(object):
"""
File-like streaming iterator.
"""
def __init__(self, generator, encoding="UTF-8"):
self.generator = generator
self.iterator = iter(generator)
self.leftover = b""
self.encoding = encoding
def __len__(self):
return self.generator.__len__()
def __iter__(self):
return self.iterator
def seekable(self):
return False
def __next__(self):
return next(self.iterator)
next = __next__ # Python 2 compatibility.
def tell(self, *args, **kwargs):
raise UnsupportedOperation("Data generator does not support tell.")
def seek(self, *args, **kwargs):
raise UnsupportedOperation("Data generator is unseekable.")
def read(self, size):
data = self.leftover
count = len(self.leftover)
try:
while count < size:
chunk = self.__next__()
if isinstance(chunk, six.text_type):
chunk = chunk.encode(self.encoding)
data += chunk
count += len(chunk)
# This means count < size and what's leftover will be returned in this call.
except StopIteration:
self.leftover = b""
if count >= size:
self.leftover = data[size:]
return data[:size]
|
import sys
import utils
import torch
from datasets import VisualDialogDataset
import torchvision.transforms as transforms
def build_dataset(mode, args, shared_dictionary=None, with_options=True):
normalize = transforms.Normalize(mean=[0.4711, 0.4475, 0.4080], std=[0.1223, 0.1221, 0.1450]) #visdial
transform = transforms.Compose([ transforms.Resize(256),
transforms.CenterCrop(224),
transforms.ToTensor(),
normalize])
dataset = VisualDialogDataset(mode, args, with_options, transform)
dataset.load_dictionary(shared_dictionary)
dataset.load_data()
return dataset
def get_dataloader(mode, args, shared_dictionary=None, with_options=True):
loader = torch.utils.data.DataLoader(
build_dataset(mode, args, shared_dictionary, with_options),
batch_size=args.batch_size, shuffle=False,
num_workers=args.workers, pin_memory=False)
nelements = len(loader.dataset)
return loader
def get_mask(human_set_mask, human_set_only=True):
if human_set_only:
if torch.sum(human_set_mask) == 0:
return None
else:
return human_set_mask
else:
return torch.ones_like(human_set_mask)
def get_flat_features(loader, args, human_set_only=False):
print('flattening {:} features...'.format(loader.dataset.mode))
if human_set_only:
return get_flat_human_features(loader, args)
else:
return get_flat_full_features(loader, args)
def get_flat_human_features(loader, args):
avg_fn = loader.dataset.get_avg_embedding
E = loader.dataset.dictionary.emb_size
questions, answers = [], []
for i, batch in enumerate(loader):
sys.stdout.write('\r{}/{} --> {:3.1f}%'.format(str(i+1), str(len(loader)), (i+1)/float(len(loader))*100))
sys.stdout.flush()
mask = get_mask(batch['in_human_set'])
if isinstance(mask, torch.Tensor):
bsz = mask.sum()
batch = utils.send_to_device(batch, args.gpu)
human_scores = batch['answer_options_scores'][mask].view(bsz,-1,100)
cluster_mask = (human_scores > 0)
cluster_mask.scatter_(2, batch['gtidxs'][mask].view(bsz,-1, 1), 1)
cluster_sizes = cluster_mask.sum(dim=2).view(bsz)
emb_question = avg_fn(batch['questions_ids'][mask].view(bsz,-1,args.S), batch['questions_length'][mask].view(bsz,-1)).cpu()
emb_answer_set = avg_fn(batch['answer_options_ids'][mask].view(-1,100,args.S), batch['answer_options_length'][mask].view(-1,100))
emb_answer_set = emb_answer_set.view(bsz,-1,100,E)
emb_cluster_set = emb_answer_set[cluster_mask].cpu()
batch_idx, counter = 0, 1
acc_cluster_sizes = torch.cumsum(cluster_sizes, dim=0)
for emb_answer in emb_cluster_set:
questions.append(emb_question[batch_idx])
answers.append(emb_answer)
if counter == acc_cluster_sizes[batch_idx]:
batch_idx += 1
counter += 1
sys.stdout.write("\n")
questions = torch.stack(questions)
answers = torch.stack(answers)
return [ answers.view(-1, E), questions.view(-1, E)]
def get_flat_full_features(loader, args):
avg_fn = loader.dataset.get_avg_embedding
E = loader.dataset.dictionary.emb_size
questions = torch.FloatTensor(loader.dataset.N, args.D, E)
answers = torch.FloatTensor(loader.dataset.N, args.D, E)
for i, batch in enumerate(loader):
sys.stdout.write('\r{}/{} --> {:3.1f}%'.format(str(i+1), str(len(loader)), (i+1)/float(len(loader))*100))
sys.stdout.flush()
batch = utils.send_to_device(batch, args.gpu)
bsz = batch['questions_ids'].size(0)
questions[i*loader.batch_size:i*loader.batch_size+bsz] = avg_fn(batch['questions_ids'], batch['questions_length']).cpu()
answers[i*loader.batch_size:i*loader.batch_size+bsz] = avg_fn(batch['answers_ids'], batch['answers_length']).cpu()
sys.stdout.write("\n")
return [ answers.view(-1, E), questions.view(-1, E)]
|
import os
import json
import string
import inspect
import sublime
from .miscellaneous_utils import command_kind_type
kind_mapping = {
"window": command_kind_type("window"),
"text": command_kind_type("text"),
"application": command_kind_type("application"),
"find": command_kind_type("find")
}
def core_commands_doc_panel(window, docs):
""" For core commands, since they are impemented in ST core, they can't be
navigated to, unlike plugin based commands that have an associated python file.
The JSON files have enough information to store the docs however, so we simply
present that informaion in a panel.
Args:
window (sublime.Window): The window object for which the panel has to be
created.
docs (List): This is a list of 2 items. The first one is the command name
and the second one is the command metadata.
Returns:
None
"""
doc_panel = window.create_output_panel("CommandsBrowser")
doc_panel.set_read_only(False)
final_doc_string = ""
description_string = f"""
Name of the command: {docs[0]}
Description: {docs[1]["doc_string"]}
"""
final_doc_string += inspect.cleandoc(description_string.strip()) + "\n" * 2
final_doc_string += "Arguments:" + "\n" * 2
if docs[1].get("args") is not None:
max_arg_length = max([len(doc["name"]) for doc in docs[1]["args"]])
max_length = max([(len(doc["name"]) + len(doc["type"]) + 4) for doc in docs[1]["args"]])
for doc in docs[1]["args"]:
length_1 = abs(max_arg_length - len(doc["name"]))
length_2 = abs(max_length - (len(doc["name"]) + len(doc["type"]) + length_1 + 4))
doc_string = doc["doc_string"] if doc["doc_string"] is not None else "No available description."
initial_string = f"""
{doc["name"]}{"":^{length_1}} ({doc["type"]}){"":^{length_2}} - {doc_string}
"""
final_doc_string += initial_string.strip() + "\n"
else:
final_doc_string += "No known args exist for this command."
doc_panel.run_command("insert", { "characters": final_doc_string })
doc_panel.settings().set("syntax", "Packages/CommandsBrowser/resources/CommandsBrowser.sublime-syntax")
doc_panel.settings().set("gutter", False)
doc_panel.set_read_only(True)
window.run_command("show_panel", {
"panel": "output.CommandsBrowser",
})
doc_panel.run_command("scroll_to_bof")
def get_core_commands_data(application = "st"):
""" Given the application type, generates a list of items representing
command data that can be returned from a CommandInputHandler.list_items
method.
Args:
application (str): The application for which the commands need to be
retrived. Valid values are 'st' (Sublime Text) or 'sm' (Sublime Merge).
Returns:
final_dict (Dict): The final dictionary of commands and their docs.
"""
json_file_names = [a for a in sublime.find_resources("*.json") if a.startswith(f"Packages/CommandsBrowser/{application}_commands_metadata")]
final_dict = {}
for file_name in json_file_names:
data = json.loads(sublime.load_resource(file_name))
if data is not None:
final_dict.update(data)
return final_dict
|
# ⋆ ˚。⋆୨୧˚ v a p o r w a v e b o t ˚୨୧⋆。˚ ⋆
# Simple Telegram bot that converts standard unicode chars to full-width ones
# Unicode full width characters, means that all characters has the size of a chinese character.
# Full width characters goes from 0xFF1 to 0xFFE5
# Japanese hirigana characters go from 0x3040 to 0x309f
# ⋆ ˚。⋆୨୧˚ ⋆ ˚。⋆୨୧˚ ⋆ ˚。⋆୨୧˚ ⋆ ˚。⋆୨୧˚ ⋆ ˚。⋆୨୧˚ ⋆ ˚。⋆୨୧˚ ⋆ ˚。⋆୨୧˚ ⋆ ˚。⋆୨୧˚ ⋆ ˚。⋆୨୧˚ ⋆ ˚。⋆୨୧˚ ⋆ ˚。⋆୨୧˚ ⋆ ˚。⋆୨୧˚
import os
from telegram.inline.inlinequery import InlineQuery
from telegram.inline.inlinequeryresult import InlineQueryResult
from uuid import uuid4
from telegram import (
Update,
ParseMode,
InlineQueryResultArticle,
InputTextMessageContent,
)
from telegram.ext import (
Updater,
CommandHandler,
MessageHandler,
Filters,
CallbackContext,
InlineQueryHandler,
)
import logging
import random
import utils
import config
import threading
import userutils
# initialize lists with characters
def main():
# enable logging
try:
os.mkdir(config.FILES_PATH)
except:
print("directory already exists")
logging.basicConfig(
level=logging.INFO,
format="%(asctime)s - %(name)s - %(levelname)s - %(message)s",
filename=config.FILES_PATH + "vaporwave-bot.log",
filemode="a+",
)
logging.info("VPRWV BOT STARTED")
userutils.init_cache()
ucheck = threading.Thread(target=userutils.usercheck, daemon=True)
ucheck.start()
updater = Updater(config.BOT_TOKEN)
dispatcher = updater.dispatcher
dispatcher.add_handler(CommandHandler("start", start))
dispatcher.add_handler(CommandHandler("help", help))
dispatcher.add_handler(CommandHandler("privacy", privacy_message))
dispatcher.add_handler(InlineQueryHandler(inline_vaporize_query))
updater.start_polling()
updater.idle()
def start(update: Update, context: CallbackContext):
try:
log = (
"User started bot. id : "
+ str(update.message.from_user.id)
+ " - username: "
+ update.message.from_user.username
)
logging.info(log)
except:
logging.exception("exception start method", exc_info=True)
update.message.reply_text(utils.start_msg, parse_mode=ParseMode.MARKDOWN)
def help(update, context):
update.message.reply_text(utils.help_msg, parse_mode=ParseMode.MARKDOWN)
def privacy_message(update, context):
update.message.reply_text(utils.privacy_msg, parse_mode=ParseMode.MARKDOWN)
def inline_vaporize_query(update: Update, context: CallbackContext):
query = update.inline_query.query
try:
userutils.queue.put(update.inline_query.from_user.username)
except:
logging.exception("Exception!", exc_info=True)
if query == "":
return
ans = [utils.hiramize(query), utils.emojize(query), utils.sparkleize(query)]
results = [
InlineQueryResultArticle(
id=str(uuid4()),
input_message_content=InputTextMessageContent(x),
title=x,
description=random.choice(utils.sparkles),
)
for x in ans
]
update.inline_query.answer(results, cache_time=utils.inline_cache_time)
if __name__ == "__main__":
main()
|
import torch
from torch.autograd import Variable
from torchvision import models
import cv2
import sys
import numpy as np
import os
import math
import torch.nn.functional as F
idx_to_class = {0 : 'aeroplane', 1 : 'bicycle', 2 : 'bird', 3 : 'boat', 4 : 'bottle', 5 : 'bus', 6 : 'car', 7 : 'cat',
8 : 'chair', 9 : 'cow', 10 : 'table', 11 : 'dog', 12 : 'horse', 13 : 'motorbike', 14 : 'person',
15 : 'plant', 16 : 'sheep', 17 : 'sofa', 18 : 'train', 19 : 'monitor'}
def tv_norm(input, tv_beta, diagonal=False, sum=False):
# print(input.shape)
img = input[0, :]
if sum:
row_grad = torch.sum(torch.abs((img[:-1 , :] - img[1 :, :])).pow(tv_beta))
col_grad = torch.sum(torch.abs((img[: , :-1] - img[: , 1 :])).pow(tv_beta))
else:
row_grad = torch.mean(torch.abs((img[:-1, :] - img[1:, :])).pow(tv_beta))
col_grad = torch.mean(torch.abs((img[:, :-1] - img[:, 1:])).pow(tv_beta))
if diagonal:
diag = 0
if sum:
diag += torch.sum(torch.abs((img[:-1, :-1] - img[1:, 1:])).pow(tv_beta))
diag += torch.sum(torch.abs((img[1:, :-1] - img[:-1, 1:])).pow(tv_beta))
diag += torch.sum(torch.abs((img[:-1, 1:] - img[1:, :-1])).pow(tv_beta))
diag += torch.sum(torch.abs((img[1:, 1:] - img[:-1, :-1])).pow(tv_beta))
else:
diag += torch.mean(torch.abs((img[:-1, :-1] - img[1:, 1:])).pow(tv_beta))
diag += torch.mean(torch.abs((img[1:, :-1] - img[:-1, 1:])).pow(tv_beta))
diag += torch.mean(torch.abs((img[:-1, 1:] - img[1:, :-1])).pow(tv_beta))
diag += torch.mean(torch.abs((img[1:, 1:] - img[:-1, :-1])).pow(tv_beta))
return row_grad + col_grad + diag
return row_grad + col_grad
def numpy_to_torch(img, requires_grad = True, cuda_device=None):
use_cuda = torch.cuda.is_available()
if len(img.shape) < 3:
output = np.float32([img])
else:
output = np.expand_dims(img, axis=1)
# output = np.transpose(img, (2, 0, 1))
output = torch.from_numpy(output)
if use_cuda:
if cuda_device==None:
output = output.cuda()
else:
output = output.cuda(cuda_device)
# output = output.repeat(3, 1, 1)
v = Variable(output, requires_grad = requires_grad)
# v = v.repeat(3, 1, 1)
return v
color_dicts = [
[0.6, 0, 0.05],
[0.03, 0.19, 0.42],
[0, 0.27, 0.11],
[0.24, 0, 0.49],
[0.5, 0.25, 0.02],
[1, 0.5, 0],
[0.2, 0.2, 0.2],
[1, 0.1, 0.6],
[0.8, 0.8, 0]
]
def save_pred(image, boxes, save_path, image_id):
image[0] += 102.9801
image[1] += 115.9465
image[2] += 122.7717
image = image.data.cpu().numpy().transpose(1, 2, 0).astype('uint8')
for coord_idx, coords in enumerate(boxes):
image = cv2.UMat(image).get()
color = color_dicts[coord_idx%len(color_dicts)]
color = [int(c*255.0) for c in color]
color = color[::-1]
image = cv2.rectangle(image, (int(coords[0]), int(coords[1])),
(int(coords[2]), int(coords[3])), color, 5)
save_name = '%s/%s/box_prediction.jpg' % (save_path, image_id)
cv2.imwrite(save_name, image)
def save_mask(mask, masked_img=None, proposal=None, original_coord=None, perturbed_coord=None, iteration=None, proposal_idx=None, image_id=None, class_name=None, save_path_root=None, single_p_idx=None):
if not (masked_img is None):
masked_img[0] += 102.9801
masked_img[1] += 115.9465
masked_img[2] += 122.7717
masked_img = masked_img.data.cpu().numpy().transpose(1, 2, 0).astype('uint8')
mask = (255*mask.data.cpu().numpy().transpose(1, 2, 0)).astype('uint8')
color = [(255, 0, 0), (0, 255, 0), (0, 0, 255)] # blue: proposal, green: unturbed, red_ perturbed
if (proposal is not None) and (original_coord is not None) and (perturbed_coord is None):
for coord_idx, coords in enumerate([proposal, original_coord]):
coords = coords.detach().data.cpu().numpy()
masked_img = cv2.UMat(masked_img).get()
masked_img = cv2.rectangle(masked_img, (int(coords[0]), int(coords[1])),
(int(coords[2]), int(coords[3])), color[coord_idx], 5)
if not((proposal is None) or (original_coord is None) or (perturbed_coord is None)):
for coord_idx, coords in enumerate([proposal, original_coord, perturbed_coord]):
coords = coords.detach().data.cpu().numpy()
masked_img = cv2.UMat(masked_img).get()
masked_img = cv2.rectangle(masked_img, (int(coords[0]), int(coords[1])),
(int(coords[2]), int(coords[3])), color[coord_idx], 5)
if not (masked_img is None):
masked_img = cv2.resize(masked_img, None, fx=0.5, fy=0.5)
mask = cv2.resize(mask, (masked_img.shape[1], masked_img.shape[0]))
if single_p_idx is None:
save_path = '%s/%s/pidx_%04d_%s/' % (save_path_root, image_id, proposal_idx, class_name)
else:
save_path = '%s/%s/pidx_%04d_%s/' % (save_path_root, image_id, proposal_idx, class_name)
if not os.path.exists(save_path):
os.makedirs(save_path)
if single_p_idx is None:
if not (masked_img is None):
cv2.imwrite('%s/iter_%04d.jpg' % (save_path, iteration), masked_img)
cv2.imwrite('%s/iter_%04d_mask.jpg' % (save_path, iteration), mask)
else:
if not (masked_img is None):
cv2.imwrite('%s/pidx_%04d_img.jpg' % (save_path, single_p_idx), masked_img)
cv2.imwrite('%s/pidx_%04d_mask.jpg' % (save_path, single_p_idx), mask)
def get_max_iou(source, targets):
# target: multiple boxes
maxIoU = 0
for target in targets.bbox:
bb1, bb2 = {}, {}
bb1['x1'], bb1['x2'] = int(source[0]), int(source[2])
bb1['y1'], bb1['y2'] = int(source[1]), int(source[3])
bb2['x1'], bb2['x2'] = int(target[0]), int(target[2])
bb2['y1'], bb2['y2'] = int(target[1]), int(target[3])
# determine the coordinates of the intersection rectangle
x_left = max(bb1['x1'], bb2['x1'])
y_top = max(bb1['y1'], bb2['y1'])
x_right = min(bb1['x2'], bb2['x2'])
y_bottom = min(bb1['y2'], bb2['y2'])
if not(x_right < x_left or y_bottom < y_top):
# The intersection of two axis-aligned bounding boxes is always an
# axis-aligned bounding box
intersection_area = (x_right - x_left) * (y_bottom - y_top)
# compute the area of both AABBs
bb1_area = (bb1['x2'] - bb1['x1']) * (bb1['y2'] - bb1['y1'])
bb2_area = (bb2['x2'] - bb2['x1']) * (bb2['y2'] - bb2['y1'])
# compute the intersection over union by taking the intersection
# area and dividing it by the sum of prediction + ground-truth
# areas - the interesection area
iou = intersection_area / float(bb1_area + bb2_area - intersection_area)
assert iou >= 0.0
assert iou <= 1.0
if maxIoU < iou:
maxIoU = iou
return maxIoU
def get_single_iou(source, target):
# target: multiple boxes
maxIoU = 0
bb1, bb2 = {}, {}
bb1['x1'], bb1['x2'] = int(source[0]), int(source[2])
bb1['y1'], bb1['y2'] = int(source[1]), int(source[3])
bb2['x1'], bb2['x2'] = int(target[0]), int(target[2])
bb2['y1'], bb2['y2'] = int(target[1]), int(target[3])
# determine the coordinates of the intersection rectangle
x_left = max(bb1['x1'], bb2['x1'])
y_top = max(bb1['y1'], bb2['y1'])
x_right = min(bb1['x2'], bb2['x2'])
y_bottom = min(bb1['y2'], bb2['y2'])
if x_right < x_left or y_bottom < y_top:
return 0.0
# The intersection of two axis-aligned bounding boxes is always an
# axis-aligned bounding box
intersection_area = (x_right - x_left) * (y_bottom - y_top)
# compute the area of both AABBs
bb1_area = (bb1['x2'] - bb1['x1']) * (bb1['y2'] - bb1['y1'])
bb2_area = (bb2['x2'] - bb2['x1']) * (bb2['y2'] - bb2['y1'])
# compute the intersection over union by taking the intersection
# area and dividing it by the sum of prediction + ground-truth
# areas - the interesection area
iou = intersection_area / float(bb1_area + bb2_area - intersection_area)
return iou
def selected_positives(ious, pred_classes, displacements, proposal_iter):
ious, pred_classes, displacements = np.array(ious), np.array(pred_classes), np.array(displacements)
top_ious = np.argsort(-ious)
top_displacement = np.argsort(-displacements)
# include top 30%
positive_idxs = list(top_ious[:int(proposal_iter * 0.3)])
for d in top_displacement:
if ious[d] > 0.8:
positive_idxs.append(d)
return positive_idxs[:proposal_iter]
def imsmooth(tensor,
sigma,
stride=1,
padding=0,
padding_mode='constant',
padding_value=0):
"From TorchRay (https://github.com/facebookresearch/TorchRay)"
assert sigma >= 0
width = math.ceil(4 * sigma)
SQRT_TWO_DOUBLE = torch.tensor(math.sqrt(2), dtype=torch.float32)
SQRT_TWO_SINGLE = SQRT_TWO_DOUBLE.to(torch.float32)
EPSILON_SINGLE = torch.tensor(1.19209290E-07, dtype=torch.float32)
filt = (torch.arange(-width,
width + 1,
dtype=torch.float32,
device=tensor.device) /
(SQRT_TWO_SINGLE * sigma + EPSILON_SINGLE))
filt = torch.exp(-filt * filt)
filt /= torch.sum(filt)
num_channels = tensor.shape[1]
width = width + padding
if padding_mode == 'constant' and padding_value == 0:
other_padding = width
x = tensor
else:
# pad: (before, after) pairs starting from last dimension backward
x = F.pad(tensor,
(width, width, width, width),
mode=padding_mode,
value=padding_value)
other_padding = 0
padding = 0
x = F.conv2d(x,
filt.reshape((1, 1, -1, 1)).expand(num_channels, -1, -1, -1),
padding=(other_padding, padding),
stride=(stride, 1),
groups=num_channels)
x = F.conv2d(x,
filt.reshape((1, 1, 1, -1)).expand(num_channels, -1, -1, -1),
padding=(padding, other_padding),
stride=(1, stride),
groups=num_channels)
return x
class MaskGenerator:
r"""Mask generator.
The class takes as input the mask parameters and returns
as output a mask.
Args:
shape (tuple of int): output shape.
step (int): parameterization step in pixels.
sigma (float): kernel size.
clamp (bool, optional): whether to clamp the mask to [0,1]. Defaults to True.
pooling_mehtod (str, optional): `'softmax'` (default), `'sum'`, '`sigmoid`'.
Attributes:
shape (tuple): the same as the specified :attr:`shape` parameter.
shape_in (tuple): spatial size of the parameter tensor.
shape_out (tuple): spatial size of the output mask including margin.
"""
def __init__(self, shape, step, sigma, clamp=True, pooling_method='softmax'):
self.shape = shape
self.step = step
self.sigma = sigma
self.coldness = 20
self.clamp = clamp
self.pooling_method = pooling_method
assert int(step) == step
# self.kernel = lambda z: (z < 1).float()
self.kernel = lambda z: torch.exp(-2 * ((z - .5).clamp(min=0)**2))
self.margin = self.sigma
# self.margin = 0
self.padding = 1 + math.ceil((self.margin + sigma) / step)
self.radius = 1 + math.ceil(sigma / step)
self.shape_in = [math.ceil(z / step) for z in self.shape]
self.shape_mid = [
z + 2 * self.padding - (2 * self.radius + 1) + 1
for z in self.shape_in
]
self.shape_up = [self.step * z for z in self.shape_mid]
self.shape_out = [z - step + 1 for z in self.shape_up]
self.weight = torch.zeros((
1,
(2 * self.radius + 1)**2,
self.shape_out[0],
self.shape_out[1]
))
step_inv = [
torch.tensor(zm, dtype=torch.float32) /
torch.tensor(zo, dtype=torch.float32)
for zm, zo in zip(self.shape_mid, self.shape_up)
]
for ky in range(2 * self.radius + 1):
for kx in range(2 * self.radius + 1):
uy, ux = torch.meshgrid(
torch.arange(self.shape_out[0], dtype=torch.float32),
torch.arange(self.shape_out[1], dtype=torch.float32)
)
iy = torch.floor(step_inv[0] * uy) + ky - self.padding
ix = torch.floor(step_inv[1] * ux) + kx - self.padding
delta = torch.sqrt(
(uy - (self.margin + self.step * iy))**2 +
(ux - (self.margin + self.step * ix))**2
)
k = ky * (2 * self.radius + 1) + kx
self.weight[0, k] = self.kernel(delta / sigma)
def generate(self, mask_in):
r"""Generate a mask.
The function takes as input a parameter tensor :math:`\bar m` for
:math:`K` masks, which is a :math:`K\times 1\times H_i\times W_i`
tensor where `H_i\times W_i` are given by :attr:`shape_in`.
Args:
mask_in (:class:`torch.Tensor`): mask parameters.
Returns:
tuple: a pair of mask, cropped and full. The cropped mask is a
:class:`torch.Tensor` with the same spatial shape :attr:`shape`
as specfied upon creating this object. The second mask is the same,
but with an additional margin and shape :attr:`shape_out`.
"""
mask = F.unfold(mask_in,
(2 * self.radius + 1,) * 2,
padding=(self.padding,) * 2)
mask = mask.reshape(
len(mask_in), -1, self.shape_mid[0], self.shape_mid[1])
mask = F.interpolate(mask, size=self.shape_up, mode='nearest')
mask = F.pad(mask, (0, -self.step + 1, 0, -self.step + 1))
mask = self.weight * mask
if self.pooling_method == 'sigmoid':
if self.coldness == float('+Inf'):
mask = (mask.sum(dim=1, keepdim=True) - 5 > 0).float()
else:
mask = torch.sigmoid(
self.coldness * mask.sum(dim=1, keepdim=True) - 3
)
elif self.pooling_method == 'softmax':
if self.coldness == float('+Inf'):
mask = mask.max(dim=1, keepdim=True)[0]
else:
mask = (
mask * F.softmax(self.coldness * mask, dim=1)
).sum(dim=1, keepdim=True)
elif self.pooling_method == 'sum':
mask = mask.sum(dim=1, keepdim=True)
else:
assert False, f"Unknown pooling method {self.pooling_method}"
m = round(self.margin)
if self.clamp:
mask = mask.clamp(min=0, max=1)
cropped = mask[:, :, m:m + self.shape[0], m:m + self.shape[1]]
return cropped, mask
def to(self, dev):
"""Switch to another device.
Args:
dev: PyTorch device.
Returns:
MaskGenerator: self.
"""
self.weight = self.weight.to(dev)
return self
|
import pytest
from wecs.core import Entity, System, Component, World
from wecs.core import and_filter
# Absolute basics
@pytest.fixture
def world():
return World()
@pytest.fixture
def entity(world):
return world.create_entity()
# Null stuff
@Component()
class NullComponent:
pass
@pytest.fixture
def null_component():
return NullComponent()
@pytest.fixture
def null_entity(world, null_component):
entity = world.create_entity(null_component)
world._flush_component_updates()
return entity
class NullSystem(System):
entity_filters = {
"null": and_filter([NullComponent])
}
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.entries = []
self.exits = []
self.updates = []
def enter_filters(self, filters, entity):
self.entries.append((filters, entity))
def exit_filters(self, filters, entity):
self.exits.append((filters, entity))
def update(self, entities_by_filter):
self.updates.append(entities_by_filter)
@pytest.fixture
def null_system():
return NullSystem()
@pytest.fixture
def null_system_world(world, null_system):
world.add_system(null_system, 0)
return world
|
import pygame
from pygame.sprite import Sprite
class Ship(Sprite):
"""Parent ship class for Blast."""
def __init__(self, blast_settings, screen):
"""Init ship and starting position."""
super(Ship, self).__init__()
self.screen = screen
self.blast_settings = blast_settings
self.image = pygame.image.load('../images/player.png')
self.rect = self.image.get_rect()
self.screen_rect = screen.get_rect()
self.rect.centerx = self.screen_rect.centerx
self.rect.centery = self.screen_rect.centery
self.rect.bottom = self.screen_rect.bottom
self.center = float(self.rect.centerx)
self.vertical = float(self.rect.centery)
# Movement flags
self.moving_right = False
self.moving_left = False
self.moving_up = False
self.moving_down = False
def update(self):
"""Update the ship's pos based on the movement flags."""
if self.moving_right and self.rect.right < self.screen_rect.right:
self.center += self.blast_settings.ship_speed_factor
if self.moving_left and self.rect.left > 0:
self.center -= self.blast_settings.ship_speed_factor
if self.moving_up and self.rect.top > 0:
self.vertical -= self.blast_settings.ship_speed_factor
if self.moving_down and self.rect.bottom < self.screen_rect.bottom:
self.vertical += self.blast_settings.ship_speed_factor
self.rect.centerx = self.center
self.rect.centery = self.vertical
def blitme(self):
"""Draw ship at current location."""
self.screen.blit(self.image, self.rect)
def center_ship(self):
"""Center ship on screen"""
self.center = self.screen_rect.centerx
# FIXME: Arbitrary "magic number" to get ship to bottom
self.vertical = self.screen_rect.bottom - 25
|
n1 = int(input('qual sua idade ? '))
n2 = int(input('qual sua idade ? '))
n3 = int(input('qual sua idade ? '))
n4 = int(input('qual sua idade ? '))
n5 = int(input('qual sua idade ? '))
n6 = int(input('qual sua idade ? '))
n7 = int(input('qual sua idade ? '))
if n1 > n2,n3,n4,n5,n6,n7
#tia tbm não entendi
|
__version__ = '3.0-dev'
from .facets import Facet, GlobalTermsFacet, RangeFilter, TermsFacet, YearHistogram
from .mapping import (
DEFAULT_ANALYZER, Indexable, ModelIndex, RawMultiString, RawString, build_mapping, deep_field_factory,
document_field, document_from_model)
from .registry import app_documents, documents, model_documents, register
from .utils import delete, index, search
from .views import Column, SeekerView
default_app_config = 'seeker.apps.SeekerConfig'
|
import json
from Qt import QtGui
from Qt import QtWidgets
def set_palette_from_dict(dct):
"""Set palette to current QApplication based on given dictionary"""
groups = ["Disabled", "Active", "Inactive", "Normal"]
roles = [
"AlternateBase",
"Background",
"Base",
"Button",
"ButtonText",
"BrightText",
"Dark",
"Foreground",
"Highlight",
"HighlightedText",
"Light",
"Link",
"LinkVisited",
"Mid",
"Midlight",
"Shadow",
"ToolTipBase",
"ToolTipText",
"Text",
"Window",
"WindowText",
]
palette = QtGui.QPalette()
for role in roles:
try:
for group in groups:
color = QtGui.QColor(dct["%s:%s" % (role, group)])
qGrp = getattr(QtGui.QPalette, group)
qRl = getattr(QtGui.QPalette, role)
palette.setColor(qGrp, qRl, color)
except: # noqa
print("Could not use: " + str(palette))
try:
QtWidgets.QApplication.setPalette(palette)
except: # noqa
print("Could not set palette: " + str(palette))
def set_style():
"""Set style"""
available_styles = QtWidgets.QStyleFactory.keys()
if "Fusion" in available_styles:
QtWidgets.QApplication.setStyle("Fusion")
elif "Plastique" in available_styles:
QtWidgets.QApplication.setStyle("Plastique")
def set_maya_tweaks():
"""Apply Maya-specific styling"""
base_palette = QtWidgets.QApplication.palette()
# Set custom colors
LIGHT_COLOR = QtGui.QColor(100, 100, 100)
MID_COLOR = QtGui.QColor(68, 68, 68)
# Create a new palette
tab_palette = QtGui.QPalette(base_palette)
tab_palette.setBrush(QtGui.QPalette.Window, QtGui.QBrush(LIGHT_COLOR))
tab_palette.setBrush(QtGui.QPalette.Button, QtGui.QBrush(MID_COLOR))
# Define the widgets that needs tweaking
widget_palettes = {}
widget_palettes["QTabBar"] = tab_palette
widget_palettes["QTabWidget"] = tab_palette
# Set the new tweaked palette
for name, palette in widget_palettes.items():
QtWidgets.QApplication.setPalette(palette, name)
def read_json(filepath):
"""Read given JSON filepath into dictionary"""
with open(filepath, "r") as data_file:
data = json.load(data_file)
return data
def set_maya_palette_with_tweaks(palette_filepath):
"""Apply styling to current QApplication"""
data = read_json(palette_filepath)
set_palette_from_dict(data)
set_style()
set_maya_tweaks()
|
#
# 2020 ExpertSystem
#
'''Script for generating predictions for the coinform250 dataset
using the acred predictor
See https://github.com/co-inform/Datasets
See also scripts/fetch-data.sh, which should download the input json file
and place it in the `data/evaluation/` folder.
'''
import argparse
import time
import json
import os
import os.path as osp
import requests
import traceback
import pandas as pd
def ensure_req_tweet_content(req):
for t in req['tweets']:
c = t['content']
if c is None:
t['content'] = ''
print('Fixed null content')
def acred_as_coinfo_label(credreview, thresh=0.4):
assert thresh >= 0.0
assert thresh <= 1.0
conf = credreview['reviewRating']['confidence']
if conf <= thresh:
return 'not_verifiable'
val = credreview['reviewRating']['ratingValue']
if val >= 0.5:
return 'credible'
if val >= 0.25:
return 'mostly_credible'
if val >= -0.25:
return 'credible_uncertain'
if val >= -0.5:
return 'credible_uncertain'
return 'not_credible'
def exec_req(i, req, args):
print('\n\nExecuting request %s' % (i))
ensure_req_tweet_content(req)
req['reviewFormat'] = 'schema.org'
start = time.time()
resp = requests.post(args.credpred_url, json=req,
verify=False,
timeout=args.req_timeout)
result = []
if resp.ok:
respd = resp.json()
result = [{
'tweet_id': request['tweet_id'],
'ratingValue': r['reviewRating']['ratingValue'],
'confidence': r['reviewRating']['confidence'],
'label': acred_as_coinfo_label(r)
} for request, r in zip(req['tweets'], respd)]
resp_f = 'coinform250_%s.json' % i
with open('%s/%s' % (args.outDir, resp_f), 'w') as outf:
json.dump(respd, outf)
else:
print("Failed: %s %s" % (str(resp), resp.text))
print('Processed in %ss.' % (time.time() - start))
return result
def as_acred_requests(tweets, batchSize=5):
batch = []
for i, t in enumerate(tweets):
batch.append({
'content': t['full_text'],
'tweet_id': t['id'],
'url': 'https://twitter.com/x/status/%s' % (t['id'])})
if len(batch) == batchSize:
yield {'tweets': batch,
'source': 'coinform250.json',
'batch_id': '%s-%s' % (i-batchSize, i)}
batch = []
if len(batch) > 0:
yield {'tweets': batch,
'source': 'coinform250.json',
'batch_id': '%s-%s' % (len(tweets) - len(batch), len(tweets))}
if __name__ == '__main__':
parser = argparse.ArgumentParser(
description='Generate tweet credibility predictions for a dir with requests',
formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser.add_argument(
'-inputJson',
help='Path to the coinform250.json file',
required=True)
parser.add_argument(
'-batchSize', type=int, default=5,
help='Number of tweets to send per request to acred endpoint')
parser.add_argument(
'-outDir',
help='Path to a local dir where the CredibilityReviews will be stored',
required=True)
parser.add_argument(
'-credpred_url',
help='URL of the acred endpoint for the tweet credibility')
parser.add_argument(
'-credpred_id',
help='ID of the generation task')
parser.add_argument(
'-req_timeout',
type=int, default=90,
help='Seconds to wait for a response')
args = parser.parse_args()
all_start = time.time()
assert osp.isdir(osp.join(args.outDir))
assert osp.isfile(args.inputJson)
tweets = []
with open(args.inputJson) as jsonl_file:
tweets = [json.loads(line) for line in jsonl_file]
assert len(tweets) > 0, '%s' % (len(tweets))
print('Reviewing credibility of %s tweets using batchSize %s' % (len(tweets), args.batchSize))
preds = []
for i, req in enumerate(as_acred_requests(tweets, args.batchSize)):
try:
preds.extend(exec_req(i, req, args))
except Exception as e:
print('Error executing request %s %s %s' % (i, req, str(e)))
print(traceback.format_exc())
pd.DataFrame(preds).to_csv('%s/%s.csv' % (args.outDir, 'predictions'), index=False)
print('Finished in %.3fs' % (time.time() - all_start))
|
#!/usr/bin/env python
import argparse
from LhcVaspTools.BasicUtils import readDataFromJson
from LhcVaspTools.OamExts import EnergyBandsWithOam
def parseArgv() -> argparse.Namespace:
parser: argparse.ArgumentParser = argparse.ArgumentParser(
description="This script is used to plot bands")
parser.add_argument("input_file_name", metavar='INPUT_FILE_NAME',
nargs="?", type=str, help="input hdf5 file.")
parser.add_argument("-o", "--output-file", nargs="?", type=str,
dest="output_file_name", required=True, help="output figure file.")
parser.add_argument('-c', '--component', nargs="?", type=str,
choices=['Lx', 'Ly', 'Lz'], dest='component', required=True,
help='the OAM component to plot.')
parser.add_argument('-bf', '--band-indices-file', nargs='?', type=str,
dest='band_indices_file_name', help='band indices file name.')
parser.add_argument('-xl', '--xlim', nargs=2, type=float,
dest='xlim', help='xlim for the bands plot.')
parser.add_argument('-yl', '--ylim', nargs=2, type=float,
dest='ylim', default=[-2., 1.], help='ylim for the bands plot.')
options: argparse.Namespace = parser.parse_args()
return options
def main() -> int:
options: argparse.Namespace = parseArgv()
input_file_name: str = options.input_file_name
output_file_name: str = options.output_file_name
component: str = options.component
band_indices_file_name: str = options.band_indices_file_name
xlim: list = options.xlim
ylim: list = options.ylim
if band_indices_file_name is None:
band_indices: list = None
else:
band_indices: list = readDataFromJson(band_indices_file_name)
energy_bands_with_oam: EnergyBandsWithOam = EnergyBandsWithOam()
energy_bands_with_oam.readFile(input_file_name)
energy_bands_with_oam.plotFigure(output_file_name, component,
xlim=xlim, ylim=ylim, band_indices=band_indices)
return 0
if __name__ == "__main__":
main()
|
# -*- coding: utf-8 -*-
from __future__ import absolute_import
from __future__ import print_function
from typing import Any, Callable, Dict, Iterable, List, Mapping, Tuple, TypeVar
from mock import patch, MagicMock
from django.http import HttpResponse
from django.test import TestCase, override_settings
from zerver.lib.test_helpers import (
queries_captured, simulated_empty_cache,
simulated_queue_client, tornado_redirected_to_list, ZulipTestCase,
most_recent_message, make_client
)
from zerver.lib.test_runner import slow
from zerver.models import UserProfile, Recipient, \
Realm, RealmAlias, UserActivity, \
get_user_profile_by_email, get_realm, \
get_client, get_stream, Message, get_unique_open_realm, \
completely_open
from zerver.lib.avatar import get_avatar_url
from zerver.lib.initial_password import initial_password
from zerver.lib.email_mirror import create_missed_message_address
from zerver.lib.actions import \
get_emails_from_user_ids, do_deactivate_user, do_reactivate_user, \
do_change_is_admin, extract_recipients, \
do_set_realm_name, do_deactivate_realm, \
do_add_subscription, do_remove_subscription, do_make_stream_private
from zerver.lib.notifications import handle_missedmessage_emails
from zerver.lib.session_user import get_session_dict_user
from zerver.middleware import is_slow_query
from zerver.worker import queue_processors
from django.conf import settings
from django.core import mail
from six import text_type
from six.moves import range
import os
import re
import sys
import time
import ujson
import random
def bail(msg):
# type: (str) -> None
print('\nERROR: %s\n' % (msg,))
sys.exit(1)
try:
settings.TEST_SUITE
except:
bail('Test suite only runs correctly with --settings=zproject.test_settings')
# Even though we don't use pygments directly in this file, we need
# this import.
try:
import pygments
except ImportError:
bail('The Pygments library is required to run the backend test suite.')
K = TypeVar('K')
V = TypeVar('V')
def find_dict(lst, k, v):
# type: (Iterable[Dict[K, V]], K, V) -> Dict[K, V]
for dct in lst:
if dct[k] == v:
return dct
raise Exception('Cannot find element in list where key %s == %s' % (k, v))
# same as in test_uploads.py
TEST_AVATAR_DIR = os.path.join(os.path.dirname(__file__), 'images')
class SlowQueryTest(TestCase):
def test_is_slow_query(self):
# type: () -> None
self.assertFalse(is_slow_query(1.1, '/some/random/url'))
self.assertTrue(is_slow_query(2, '/some/random/url'))
self.assertTrue(is_slow_query(5.1, '/activity'))
self.assertFalse(is_slow_query(2, '/activity'))
self.assertFalse(is_slow_query(2, '/json/report_error'))
self.assertFalse(is_slow_query(2, '/api/v1/deployments/report_error'))
self.assertFalse(is_slow_query(2, '/realm_activity/whatever'))
self.assertFalse(is_slow_query(2, '/user_activity/whatever'))
self.assertFalse(is_slow_query(9, '/accounts/webathena_kerberos_login/'))
self.assertTrue(is_slow_query(11, '/accounts/webathena_kerberos_login/'))
class ModelTest(TestCase):
def test_miscellaneous_things(self):
# type: () -> None
'''
This is a kitchen sink test that is designed simply to get
test coverage up to 100% for models.py.
'''
client = make_client('some_client')
self.assertEqual(str(client), u'<Client: some_client>')
class RealmTest(ZulipTestCase):
def assert_user_profile_cache_gets_new_name(self, email, new_realm_name):
# type: (text_type, text_type) -> None
user_profile = get_user_profile_by_email(email)
self.assertEqual(user_profile.realm.name, new_realm_name)
def test_do_set_realm_name_caching(self):
# type: () -> None
"""The main complicated thing about setting realm names is fighting the
cache, and we start by populating the cache for Hamlet, and we end
by checking the cache to ensure that the new value is there."""
get_user_profile_by_email('hamlet@zulip.com')
realm = get_realm('zulip.com')
new_name = 'Zed You Elle Eye Pea'
do_set_realm_name(realm, new_name)
self.assertEqual(get_realm(realm.domain).name, new_name)
self.assert_user_profile_cache_gets_new_name('hamlet@zulip.com', new_name)
def test_do_set_realm_name_events(self):
# type: () -> None
realm = get_realm('zulip.com')
new_name = 'Puliz'
events = [] # type: List[Dict[str, Any]]
with tornado_redirected_to_list(events):
do_set_realm_name(realm, new_name)
event = events[0]['event']
self.assertEqual(event, dict(
type = 'realm',
op = 'update',
property = 'name',
value = new_name,
))
def test_update_realm_api(self):
# type: () -> None
new_name = 'Zulip: Worldwide Exporter of APIs'
email = 'cordelia@zulip.com'
self.login(email)
user_profile = get_user_profile_by_email(email)
do_change_is_admin(user_profile, True)
def set_up_db(attr, value):
# type: (str, Any) -> None
realm = get_realm('zulip.com')
setattr(realm, attr, value)
realm.save()
def update_with_api(**kwarg):
# type: (**Any) -> Realm
params = {k: ujson.dumps(v) for k, v in kwarg.items()}
result = self.client_patch('/json/realm', params)
self.assert_json_success(result)
return get_realm('zulip.com') # refresh data
# name
realm = update_with_api(name=new_name)
self.assertEqual(realm.name, new_name)
# restricted
set_up_db('restricted_to_domain', False)
realm = update_with_api(restricted_to_domain=True)
self.assertEqual(realm.restricted_to_domain, True)
realm = update_with_api(restricted_to_domain=False)
self.assertEqual(realm.restricted_to_domain, False)
# invite_required
set_up_db('invite_required', False)
realm = update_with_api(invite_required=True)
self.assertEqual(realm.invite_required, True)
realm = update_with_api(invite_required=False)
self.assertEqual(realm.invite_required, False)
# invite_by_admins_only
set_up_db('invite_by_admins_only', False)
realm = update_with_api(invite_by_admins_only=True)
self.assertEqual(realm.invite_by_admins_only, True)
realm = update_with_api(invite_by_admins_only=False)
self.assertEqual(realm.invite_by_admins_only, False)
# create_stream_by_admins_only
set_up_db('create_stream_by_admins_only', False)
realm = update_with_api(create_stream_by_admins_only=True)
self.assertEqual(realm.create_stream_by_admins_only, True)
realm = update_with_api(create_stream_by_admins_only=False)
self.assertEqual(realm.create_stream_by_admins_only, False)
# allow_message_editing
set_up_db('allow_message_editing', False)
set_up_db('message_content_edit_limit_seconds', 0)
realm = update_with_api(allow_message_editing=True,
message_content_edit_limit_seconds=100)
self.assertEqual(realm.allow_message_editing, True)
self.assertEqual(realm.message_content_edit_limit_seconds, 100)
realm = update_with_api(allow_message_editing=False)
self.assertEqual(realm.allow_message_editing, False)
self.assertEqual(realm.message_content_edit_limit_seconds, 100)
realm = update_with_api(message_content_edit_limit_seconds=200)
self.assertEqual(realm.allow_message_editing, False)
self.assertEqual(realm.message_content_edit_limit_seconds, 200)
def test_admin_restrictions_for_changing_realm_name(self):
# type: () -> None
new_name = 'Mice will play while the cat is away'
email = 'othello@zulip.com'
self.login(email)
user_profile = get_user_profile_by_email(email)
do_change_is_admin(user_profile, False)
req = dict(name=ujson.dumps(new_name))
result = self.client_patch('/json/realm', req)
self.assert_json_error(result, 'Must be a realm administrator')
def test_do_deactivate_realm(self):
# type: () -> None
"""The main complicated thing about deactivating realm names is
updating the cache, and we start by populating the cache for
Hamlet, and we end by checking the cache to ensure that his
realm appears to be deactivated. You can make this test fail
by disabling cache.flush_realm()."""
get_user_profile_by_email('hamlet@zulip.com')
realm = get_realm('zulip.com')
do_deactivate_realm(realm)
user = get_user_profile_by_email('hamlet@zulip.com')
self.assertTrue(user.realm.deactivated)
def test_do_set_realm_default_language(self):
# type: () -> None
new_lang = "de"
realm = get_realm('zulip.com')
self.assertNotEqual(realm.default_language, new_lang)
# we need an admin user.
email = 'iago@zulip.com'
self.login(email)
req = dict(default_language=ujson.dumps(new_lang))
result = self.client_patch('/json/realm', req)
self.assert_json_success(result)
realm = get_realm('zulip.com')
self.assertEqual(realm.default_language, new_lang)
# Test setting zh_CN, we set zh_HANS instead of zh_CN in db
chinese = "zh_CN"
simplified_chinese = "zh_HANS"
req = dict(default_language=ujson.dumps(chinese))
result = self.client_patch('/json/realm', req)
self.assert_json_success(result)
realm = get_realm('zulip.com')
self.assertEqual(realm.default_language, simplified_chinese)
# Test to make sure that when invalid languages are passed
# as the default realm language, correct validation error is
# raised and the invalid language is not saved in db
invalid_lang = "invalid_lang"
req = dict(default_language=ujson.dumps(invalid_lang))
result = self.client_patch('/json/realm', req)
self.assert_json_error(result, "Invalid language '%s'" % (invalid_lang,))
realm = get_realm('zulip.com')
self.assertNotEqual(realm.default_language, invalid_lang)
class PermissionTest(ZulipTestCase):
def test_get_admin_users(self):
# type: () -> None
user_profile = get_user_profile_by_email('hamlet@zulip.com')
do_change_is_admin(user_profile, False)
admin_users = user_profile.realm.get_admin_users()
self.assertFalse(user_profile in admin_users)
do_change_is_admin(user_profile, True)
admin_users = user_profile.realm.get_admin_users()
self.assertTrue(user_profile in admin_users)
def test_updating_non_existent_user(self):
# type: () -> None
self.login('hamlet@zulip.com')
admin = get_user_profile_by_email('hamlet@zulip.com')
do_change_is_admin(admin, True)
result = self.client_patch('/json/users/nonexistentuser@zulip.com', {})
self.assert_json_error(result, 'No such user')
def test_admin_api(self):
# type: () -> None
self.login('hamlet@zulip.com')
admin = get_user_profile_by_email('hamlet@zulip.com')
user = get_user_profile_by_email('othello@zulip.com')
realm = admin.realm
do_change_is_admin(admin, True)
# Make sure we see is_admin flag in /json/users
result = self.client_get('/json/users')
self.assert_json_success(result)
members = ujson.loads(result.content)['members']
hamlet = find_dict(members, 'email', 'hamlet@zulip.com')
self.assertTrue(hamlet['is_admin'])
othello = find_dict(members, 'email', 'othello@zulip.com')
self.assertFalse(othello['is_admin'])
# Giveth
req = dict(is_admin=ujson.dumps(True))
events = [] # type: List[Dict[str, Any]]
with tornado_redirected_to_list(events):
result = self.client_patch('/json/users/othello@zulip.com', req)
self.assert_json_success(result)
admin_users = realm.get_admin_users()
self.assertTrue(user in admin_users)
person = events[0]['event']['person']
self.assertEqual(person['email'], 'othello@zulip.com')
self.assertEqual(person['is_admin'], True)
# Taketh away
req = dict(is_admin=ujson.dumps(False))
events = []
with tornado_redirected_to_list(events):
result = self.client_patch('/json/users/othello@zulip.com', req)
self.assert_json_success(result)
admin_users = realm.get_admin_users()
self.assertFalse(user in admin_users)
person = events[0]['event']['person']
self.assertEqual(person['email'], 'othello@zulip.com')
self.assertEqual(person['is_admin'], False)
# Make sure only admins can patch other user's info.
self.login('othello@zulip.com')
result = self.client_patch('/json/users/hamlet@zulip.com', req)
self.assert_json_error(result, 'Insufficient permission')
class ZephyrTest(ZulipTestCase):
def test_webathena_kerberos_login(self):
# type: () -> None
email = 'hamlet@zulip.com'
self.login(email)
def post(**kwargs):
# type: (**Any) -> HttpResponse
params = {k: ujson.dumps(v) for k, v in kwargs.items()}
return self.client_post('/accounts/webathena_kerberos_login/', params)
result = post()
self.assert_json_error(result, 'Could not find Kerberos credential')
result = post(cred='whatever')
self.assert_json_error(result, 'Webathena login not enabled')
email = 'starnine@mit.edu'
self.login(email)
def ccache_mock(**kwargs):
# type: (**Any) -> Any
return patch('zerver.views.zephyr.make_ccache', **kwargs)
def ssh_mock(**kwargs):
# type: (**Any) -> Any
return patch('zerver.views.zephyr.subprocess.check_call', **kwargs)
def mirror_mock():
# type: () -> Any
return self.settings(PERSONAL_ZMIRROR_SERVER='server')
def logging_mock():
# type: () -> Any
return patch('logging.exception')
cred = dict(cname=dict(nameString=['starnine']))
with ccache_mock(side_effect=KeyError('foo')):
result = post(cred=cred)
self.assert_json_error(result, 'Invalid Kerberos cache')
with \
ccache_mock(return_value=b'1234'), \
ssh_mock(side_effect=KeyError('foo')), \
logging_mock() as log:
result = post(cred=cred)
self.assert_json_error(result, 'We were unable to setup mirroring for you')
log.assert_called_with("Error updating the user's ccache")
with ccache_mock(return_value=b'1234'), mirror_mock(), ssh_mock() as ssh:
result = post(cred=cred)
self.assert_json_success(result)
ssh.assert_called_with([
'ssh',
'server',
'--',
'/home/zulip/zulip/bots/process_ccache',
'starnine',
get_user_profile_by_email(email).api_key,
'MTIzNA=='])
class AdminCreateUserTest(ZulipTestCase):
def test_create_user_backend(self):
# type: () -> None
# This test should give us complete coverage on
# create_user_backend. It mostly exercises error
# conditions, and it also does a basic test of the success
# path.
admin_email = 'hamlet@zulip.com'
self.login(admin_email)
admin = get_user_profile_by_email(admin_email)
do_change_is_admin(admin, True)
result = self.client_put("/json/users", dict())
self.assert_json_error(result, "Missing 'email' argument")
result = self.client_put("/json/users", dict(
email='romeo@not-zulip.com',
)
)
self.assert_json_error(result, "Missing 'password' argument")
result = self.client_put("/json/users", dict(
email='romeo@not-zulip.com',
password='xxxx',
)
)
self.assert_json_error(result, "Missing 'full_name' argument")
result = self.client_put("/json/users", dict(
email='romeo@not-zulip.com',
password='xxxx',
full_name='Romeo Montague',
)
)
self.assert_json_error(result, "Missing 'short_name' argument")
result = self.client_put("/json/users", dict(
email='broken',
password='xxxx',
full_name='Romeo Montague',
short_name='Romeo',
)
)
self.assert_json_error(result, "Bad name or username")
result = self.client_put("/json/users", dict(
email='romeo@not-zulip.com',
password='xxxx',
full_name='Romeo Montague',
short_name='Romeo',
)
)
self.assert_json_error(result,
"Email 'romeo@not-zulip.com' does not belong to domain 'zulip.com'")
RealmAlias.objects.create(realm=get_realm('zulip.com'), domain='zulip.net')
# HAPPY PATH STARTS HERE
valid_params = dict(
email='romeo@zulip.net',
password='xxxx',
full_name='Romeo Montague',
short_name='Romeo',
)
result = self.client_put("/json/users", valid_params)
self.assert_json_success(result)
new_user = get_user_profile_by_email('romeo@zulip.net')
self.assertEqual(new_user.full_name, 'Romeo Montague')
self.assertEqual(new_user.short_name, 'Romeo')
# One more error condition to test--we can't create
# the same user twice.
result = self.client_put("/json/users", valid_params)
self.assert_json_error(result,
"Email 'romeo@zulip.net' already in use")
class WorkerTest(TestCase):
class FakeClient(object):
def __init__(self):
# type: () -> None
self.consumers = {} # type: Dict[str, Callable]
self.queue = [] # type: List[Tuple[str, Dict[str, Any]]]
def register_json_consumer(self, queue_name, callback):
# type: (str, Callable) -> None
self.consumers[queue_name] = callback
def start_consuming(self):
# type: () -> None
for queue_name, data in self.queue:
callback = self.consumers[queue_name]
callback(data)
def test_UserActivityWorker(self):
# type: () -> None
fake_client = self.FakeClient()
user = get_user_profile_by_email('hamlet@zulip.com')
UserActivity.objects.filter(
user_profile = user.id,
client = get_client('ios')
).delete()
data = dict(
user_profile_id = user.id,
client = 'ios',
time = time.time(),
query = 'send_message'
)
fake_client.queue.append(('user_activity', data))
with simulated_queue_client(lambda: fake_client):
worker = queue_processors.UserActivityWorker()
worker.setup()
worker.start()
activity_records = UserActivity.objects.filter(
user_profile = user.id,
client = get_client('ios')
)
self.assertTrue(len(activity_records), 1)
self.assertTrue(activity_records[0].count, 1)
def test_error_handling(self):
# type: () -> None
processed = []
@queue_processors.assign_queue('unreliable_worker')
class UnreliableWorker(queue_processors.QueueProcessingWorker):
def consume(self, data):
# type: (Mapping[str, Any]) -> None
if data["type"] == 'unexpected behaviour':
raise Exception('Worker task not performing as expected!')
processed.append(data["type"])
def _log_problem(self):
# type: () -> None
# keep the tests quiet
pass
fake_client = self.FakeClient()
for msg in ['good', 'fine', 'unexpected behaviour', 'back to normal']:
fake_client.queue.append(('unreliable_worker', {'type': msg}))
fn = os.path.join(settings.QUEUE_ERROR_DIR, 'unreliable_worker.errors')
try:
os.remove(fn)
except OSError:
pass
with simulated_queue_client(lambda: fake_client):
worker = UnreliableWorker()
worker.setup()
worker.start()
self.assertEqual(processed, ['good', 'fine', 'back to normal'])
line = open(fn).readline().strip()
event = ujson.loads(line.split('\t')[1])
self.assertEqual(event["type"], 'unexpected behaviour')
def test_worker_noname(self):
# type: () -> None
class TestWorker(queue_processors.QueueProcessingWorker):
def __init__(self):
# type: () -> None
super(TestWorker, self).__init__()
def consume(self, data):
# type: (Mapping[str, Any]) -> None
pass
with self.assertRaises(queue_processors.WorkerDeclarationException):
TestWorker()
def test_worker_noconsume(self):
# type: () -> None
@queue_processors.assign_queue('test_worker')
class TestWorker(queue_processors.QueueProcessingWorker):
def __init__(self):
# type: () -> None
super(TestWorker, self).__init__()
with self.assertRaises(queue_processors.WorkerDeclarationException):
worker = TestWorker()
worker.consume({})
class DocPageTest(ZulipTestCase):
def _test(self, url, expected_content):
# type: (str, str) -> None
result = self.client_get(url)
self.assertEqual(result.status_code, 200)
self.assertIn(expected_content, str(result.content))
def test_doc_endpoints(self):
# type: () -> None
self._test('/api/', 'We hear you like APIs')
self._test('/api/endpoints/', 'pre-built API bindings for Python')
self._test('/apps/', 'Appsolutely')
self._test('/features/', 'Talk about multiple topics at once')
self._test('/hello/', 'workplace chat that actually improves your productivity')
self._test('/integrations/', 'require creating a Zulip bot')
self._test('/login/', '(Normal users)')
self._test('/register/', 'get started')
result = self.client_get('/new-user/')
self.assertEqual(result.status_code, 301)
self.assertIn('hello', result['Location'])
result = self.client_get('/robots.txt')
self.assertEqual(result.status_code, 301)
self.assertIn('static/robots.txt', result['Location'])
result = self.client_get('/static/robots.txt')
self.assertEqual(result.status_code, 200)
self.assertIn(
'Disallow: /',
''.join(str(x) for x in list(result.streaming_content))
)
class UserProfileTest(TestCase):
def test_get_emails_from_user_ids(self):
# type: () -> None
hamlet = get_user_profile_by_email('hamlet@zulip.com')
othello = get_user_profile_by_email('othello@zulip.com')
dct = get_emails_from_user_ids([hamlet.id, othello.id])
self.assertEqual(dct[hamlet.id], 'hamlet@zulip.com')
self.assertEqual(dct[othello.id], 'othello@zulip.com')
class UserChangesTest(ZulipTestCase):
def test_update_api_key(self):
# type: () -> None
email = "hamlet@zulip.com"
self.login(email)
user = get_user_profile_by_email(email)
old_api_key = user.api_key
result = self.client_post('/json/users/me/api_key/regenerate')
self.assert_json_success(result)
new_api_key = ujson.loads(result.content)['api_key']
self.assertNotEqual(old_api_key, new_api_key)
user = get_user_profile_by_email(email)
self.assertEqual(new_api_key, user.api_key)
class ActivateTest(ZulipTestCase):
def test_basics(self):
# type: () -> None
user = get_user_profile_by_email('hamlet@zulip.com')
do_deactivate_user(user)
self.assertFalse(user.is_active)
do_reactivate_user(user)
self.assertTrue(user.is_active)
def test_api(self):
# type: () -> None
admin = get_user_profile_by_email('othello@zulip.com')
do_change_is_admin(admin, True)
self.login('othello@zulip.com')
user = get_user_profile_by_email('hamlet@zulip.com')
self.assertTrue(user.is_active)
result = self.client_delete('/json/users/hamlet@zulip.com')
self.assert_json_success(result)
user = get_user_profile_by_email('hamlet@zulip.com')
self.assertFalse(user.is_active)
result = self.client_post('/json/users/hamlet@zulip.com/reactivate')
self.assert_json_success(result)
user = get_user_profile_by_email('hamlet@zulip.com')
self.assertTrue(user.is_active)
def test_api_with_nonexistent_user(self):
# type: () -> None
admin = get_user_profile_by_email('othello@zulip.com')
do_change_is_admin(admin, True)
self.login('othello@zulip.com')
# Can not deactivate a user with the bot api
result = self.client_delete('/json/bots/hamlet@zulip.com')
self.assert_json_error(result, 'No such bot')
# Can not deactivate a nonexistent user.
result = self.client_delete('/json/users/nonexistent@zulip.com')
self.assert_json_error(result, 'No such user')
# Can not reactivate a nonexistent user.
result = self.client_post('/json/users/nonexistent@zulip.com/reactivate')
self.assert_json_error(result, 'No such user')
def test_api_with_insufficient_permissions(self):
# type: () -> None
non_admin = get_user_profile_by_email('othello@zulip.com')
do_change_is_admin(non_admin, False)
self.login('othello@zulip.com')
# Can not deactivate a user with the users api
result = self.client_delete('/json/users/hamlet@zulip.com')
self.assert_json_error(result, 'Insufficient permission')
# Can not reactivate a user
result = self.client_post('/json/users/hamlet@zulip.com/reactivate')
self.assert_json_error(result, 'Insufficient permission')
class BotTest(ZulipTestCase):
def assert_num_bots_equal(self, count):
# type: (int) -> None
result = self.client_get("/json/bots")
self.assert_json_success(result)
json = ujson.loads(result.content)
self.assertEqual(count, len(json['bots']))
def create_bot(self, **extras):
# type: (**Any) -> Dict[str, Any]
bot_info = {
'full_name': 'The Bot of Hamlet',
'short_name': 'hambot',
}
bot_info.update(extras)
result = self.client_post("/json/bots", bot_info)
self.assert_json_success(result)
return ujson.loads(result.content)
def deactivate_bot(self):
# type: () -> None
result = self.client_delete("/json/bots/hambot-bot@zulip.com")
self.assert_json_success(result)
def test_add_bot_with_bad_username(self):
# type: () -> None
self.login("hamlet@zulip.com")
self.assert_num_bots_equal(0)
bot_info = dict(
full_name='',
short_name='',
)
result = self.client_post("/json/bots", bot_info)
self.assert_json_error(result, 'Bad name or username')
self.assert_num_bots_equal(0)
def test_add_bot(self):
# type: () -> None
self.login("hamlet@zulip.com")
self.assert_num_bots_equal(0)
events = [] # type: List[Dict[str, Any]]
with tornado_redirected_to_list(events):
result = self.create_bot()
self.assert_num_bots_equal(1)
event = [e for e in events if e['event']['type'] == 'realm_bot'][0]
self.assertEqual(
dict(
type='realm_bot',
op='add',
bot=dict(email='hambot-bot@zulip.com',
full_name='The Bot of Hamlet',
api_key=result['api_key'],
avatar_url=result['avatar_url'],
default_sending_stream=None,
default_events_register_stream=None,
default_all_public_streams=False,
owner='hamlet@zulip.com',
)
),
event['event']
)
users_result = self.client_get('/json/users')
members = ujson.loads(users_result.content)['members']
bots = [m for m in members if m['email'] == 'hambot-bot@zulip.com']
self.assertEqual(len(bots), 1)
bot = bots[0]
self.assertEqual(bot['bot_owner'], 'hamlet@zulip.com')
def test_add_bot_with_username_in_use(self):
# type: () -> None
self.login("hamlet@zulip.com")
self.assert_num_bots_equal(0)
result = self.create_bot()
self.assert_num_bots_equal(1)
bot_info = dict(
full_name='Duplicate',
short_name='hambot',
)
result = self.client_post("/json/bots", bot_info)
self.assert_json_error(result, 'Username already in use')
def test_add_bot_with_user_avatar(self):
# type: () -> None
self.login("hamlet@zulip.com")
self.assert_num_bots_equal(0)
with open(os.path.join(TEST_AVATAR_DIR, 'img.png'), 'rb') as fp:
self.create_bot(file=fp)
self.assert_num_bots_equal(1)
profile = get_user_profile_by_email('hambot-bot@zulip.com')
self.assertEqual(profile.avatar_source, UserProfile.AVATAR_FROM_USER)
# TODO: check img.png was uploaded properly
def test_add_bot_with_too_many_files(self):
# type: () -> None
self.login("hamlet@zulip.com")
self.assert_num_bots_equal(0)
with open(os.path.join(TEST_AVATAR_DIR, 'img.png'), 'rb') as fp1, \
open(os.path.join(TEST_AVATAR_DIR, 'img.gif'), 'rb') as fp2:
bot_info = dict(
full_name='whatever',
short_name='whatever',
file1=fp1,
file2=fp2,
)
result = self.client_post("/json/bots", bot_info)
self.assert_json_error(result, 'You may only upload one file at a time')
self.assert_num_bots_equal(0)
def test_add_bot_with_default_sending_stream(self):
# type: () -> None
self.login("hamlet@zulip.com")
self.assert_num_bots_equal(0)
result = self.create_bot(default_sending_stream='Denmark')
self.assert_num_bots_equal(1)
self.assertEqual(result['default_sending_stream'], 'Denmark')
profile = get_user_profile_by_email('hambot-bot@zulip.com')
self.assertEqual(profile.default_sending_stream.name, 'Denmark')
def test_add_bot_with_default_sending_stream_not_subscribed(self):
# type: () -> None
self.login("hamlet@zulip.com")
self.assert_num_bots_equal(0)
result = self.create_bot(default_sending_stream='Rome')
self.assert_num_bots_equal(1)
self.assertEqual(result['default_sending_stream'], 'Rome')
profile = get_user_profile_by_email('hambot-bot@zulip.com')
self.assertEqual(profile.default_sending_stream.name, 'Rome')
def test_bot_add_subscription(self):
# type: () -> None
"""
Calling POST /json/users/me/subscriptions should successfully add
streams, and a stream to the
list of subscriptions and confirm the right number of events
are generated.
When 'principals' has a bot, no notification message event or invitation email
is sent when add_subscriptions_backend is called in the above api call.
"""
self.login("hamlet@zulip.com")
# Normal user i.e. not a bot.
request_data = {
'principals': '["iago@zulip.com"]'
}
events = [] # type: List[Dict[str, Any]]
with tornado_redirected_to_list(events):
result = self.common_subscribe_to_streams("hamlet@zulip.com", ['Rome'], request_data)
self.assert_json_success(result)
msg_event = [e for e in events if e['event']['type'] == 'message']
self.assert_length(msg_event, 1, exact=True) # Notification message event is sent.
# Create a bot.
self.assert_num_bots_equal(0)
result = self.create_bot()
self.assert_num_bots_equal(1)
# A bot
bot_request_data = {
'principals': '["hambot-bot@zulip.com"]'
}
events_bot = [] # type: List[Dict[str, Any]]
with tornado_redirected_to_list(events_bot):
result = self.common_subscribe_to_streams("hamlet@zulip.com", ['Rome'], bot_request_data)
self.assert_json_success(result)
# No notification message event or invitation email is sent because of bot.
msg_event = [e for e in events_bot if e['event']['type'] == 'message']
self.assert_length(msg_event, 0, exact=True)
self.assertEqual(len(events_bot), len(events) - 1)
# Test runner automatically redirects all sent email to a dummy 'outbox'.
self.assertEqual(len(mail.outbox), 0)
def test_add_bot_with_default_sending_stream_private_allowed(self):
# type: () -> None
self.login("hamlet@zulip.com")
user_profile = get_user_profile_by_email("hamlet@zulip.com")
stream = get_stream("Denmark", user_profile.realm)
do_add_subscription(user_profile, stream)
do_make_stream_private(user_profile.realm, "Denmark")
self.assert_num_bots_equal(0)
events = [] # type: List[Dict[str, Any]]
with tornado_redirected_to_list(events):
result = self.create_bot(default_sending_stream='Denmark')
self.assert_num_bots_equal(1)
self.assertEqual(result['default_sending_stream'], 'Denmark')
profile = get_user_profile_by_email('hambot-bot@zulip.com')
self.assertEqual(profile.default_sending_stream.name, 'Denmark')
event = [e for e in events if e['event']['type'] == 'realm_bot'][0]
self.assertEqual(
dict(
type='realm_bot',
op='add',
bot=dict(email='hambot-bot@zulip.com',
full_name='The Bot of Hamlet',
api_key=result['api_key'],
avatar_url=result['avatar_url'],
default_sending_stream='Denmark',
default_events_register_stream=None,
default_all_public_streams=False,
owner='hamlet@zulip.com',
)
),
event['event']
)
self.assertEqual(event['users'], (user_profile.id,))
def test_add_bot_with_default_sending_stream_private_denied(self):
# type: () -> None
self.login("hamlet@zulip.com")
user_profile = get_user_profile_by_email("hamlet@zulip.com")
stream = get_stream("Denmark", user_profile.realm)
do_remove_subscription(user_profile, stream)
do_make_stream_private(user_profile.realm, "Denmark")
bot_info = {
'full_name': 'The Bot of Hamlet',
'short_name': 'hambot',
'default_sending_stream': 'Denmark',
}
result = self.client_post("/json/bots", bot_info)
self.assert_json_error(result, 'Insufficient permission')
def test_add_bot_with_default_events_register_stream(self):
# type: () -> None
self.login("hamlet@zulip.com")
self.assert_num_bots_equal(0)
result = self.create_bot(default_events_register_stream='Denmark')
self.assert_num_bots_equal(1)
self.assertEqual(result['default_events_register_stream'], 'Denmark')
profile = get_user_profile_by_email('hambot-bot@zulip.com')
self.assertEqual(profile.default_events_register_stream.name, 'Denmark')
def test_add_bot_with_default_events_register_stream_private_allowed(self):
# type: () -> None
self.login("hamlet@zulip.com")
user_profile = get_user_profile_by_email("hamlet@zulip.com")
stream = get_stream("Denmark", user_profile.realm)
do_add_subscription(user_profile, stream)
do_make_stream_private(user_profile.realm, "Denmark")
self.assert_num_bots_equal(0)
events = [] # type: List[Dict[str, Any]]
with tornado_redirected_to_list(events):
result = self.create_bot(default_events_register_stream='Denmark')
self.assert_num_bots_equal(1)
self.assertEqual(result['default_events_register_stream'], 'Denmark')
profile = get_user_profile_by_email('hambot-bot@zulip.com')
self.assertEqual(profile.default_events_register_stream.name, 'Denmark')
event = [e for e in events if e['event']['type'] == 'realm_bot'][0]
self.assertEqual(
dict(
type='realm_bot',
op='add',
bot=dict(email='hambot-bot@zulip.com',
full_name='The Bot of Hamlet',
api_key=result['api_key'],
avatar_url=result['avatar_url'],
default_sending_stream=None,
default_events_register_stream='Denmark',
default_all_public_streams=False,
owner='hamlet@zulip.com',
)
),
event['event']
)
self.assertEqual(event['users'], (user_profile.id,))
def test_add_bot_with_default_events_register_stream_private_denied(self):
# type: () -> None
self.login("hamlet@zulip.com")
user_profile = get_user_profile_by_email("hamlet@zulip.com")
stream = get_stream("Denmark", user_profile.realm)
do_remove_subscription(user_profile, stream)
do_make_stream_private(user_profile.realm, "Denmark")
self.assert_num_bots_equal(0)
bot_info = {
'full_name': 'The Bot of Hamlet',
'short_name': 'hambot',
'default_events_register_stream': 'Denmark',
}
result = self.client_post("/json/bots", bot_info)
self.assert_json_error(result, 'Insufficient permission')
def test_add_bot_with_default_all_public_streams(self):
# type: () -> None
self.login("hamlet@zulip.com")
self.assert_num_bots_equal(0)
result = self.create_bot(default_all_public_streams=ujson.dumps(True))
self.assert_num_bots_equal(1)
self.assertTrue(result['default_all_public_streams'])
profile = get_user_profile_by_email('hambot-bot@zulip.com')
self.assertEqual(profile.default_all_public_streams, True)
def test_deactivate_bot(self):
# type: () -> None
self.login("hamlet@zulip.com")
self.assert_num_bots_equal(0)
self.create_bot()
self.assert_num_bots_equal(1)
self.deactivate_bot()
# You can deactivate the same bot twice.
self.deactivate_bot()
self.assert_num_bots_equal(0)
def test_deactivate_bogus_bot(self):
# type: () -> None
"""Deleting a bogus bot will succeed silently."""
self.login("hamlet@zulip.com")
self.assert_num_bots_equal(0)
self.create_bot()
self.assert_num_bots_equal(1)
result = self.client_delete("/json/bots/bogus-bot@zulip.com")
self.assert_json_error(result, 'No such bot')
self.assert_num_bots_equal(1)
def test_bot_deactivation_attacks(self):
# type: () -> None
"""You cannot deactivate somebody else's bot."""
self.login("hamlet@zulip.com")
self.assert_num_bots_equal(0)
self.create_bot()
self.assert_num_bots_equal(1)
# Have Othello try to deactivate both Hamlet and
# Hamlet's bot.
self.login("othello@zulip.com")
# Can not deactivate a user as a bot
result = self.client_delete("/json/bots/hamlet@zulip.com")
self.assert_json_error(result, 'No such bot')
result = self.client_delete("/json/bots/hambot-bot@zulip.com")
self.assert_json_error(result, 'Insufficient permission')
# But we don't actually deactivate the other person's bot.
self.login("hamlet@zulip.com")
self.assert_num_bots_equal(1)
# Can not deactivate a bot as a user
result = self.client_delete("/json/users/hambot-bot@zulip.com")
self.assert_json_error(result, 'No such user')
self.assert_num_bots_equal(1)
def test_bot_permissions(self):
# type: () -> None
self.login("hamlet@zulip.com")
self.assert_num_bots_equal(0)
self.create_bot()
self.assert_num_bots_equal(1)
# Have Othello try to mess with Hamlet's bots.
self.login("othello@zulip.com")
result = self.client_post("/json/bots/hambot-bot@zulip.com/api_key/regenerate")
self.assert_json_error(result, 'Insufficient permission')
bot_info = {
'full_name': 'Fred',
}
result = self.client_patch("/json/bots/hambot-bot@zulip.com", bot_info)
self.assert_json_error(result, 'Insufficient permission')
def get_bot(self):
# type: () -> Dict[str, Any]
result = self.client_get("/json/bots")
bots = ujson.loads(result.content)['bots']
return bots[0]
def test_update_api_key(self):
# type: () -> None
self.login("hamlet@zulip.com")
self.create_bot()
bot = self.get_bot()
old_api_key = bot['api_key']
result = self.client_post('/json/bots/hambot-bot@zulip.com/api_key/regenerate')
self.assert_json_success(result)
new_api_key = ujson.loads(result.content)['api_key']
self.assertNotEqual(old_api_key, new_api_key)
bot = self.get_bot()
self.assertEqual(new_api_key, bot['api_key'])
def test_update_api_key_for_invalid_user(self):
# type: () -> None
self.login("hamlet@zulip.com")
result = self.client_post('/json/bots/nonexistentuser@zulip.com/api_key/regenerate')
self.assert_json_error(result, 'No such user')
def test_patch_bot_full_name(self):
# type: () -> None
self.login("hamlet@zulip.com")
bot_info = {
'full_name': 'The Bot of Hamlet',
'short_name': 'hambot',
}
result = self.client_post("/json/bots", bot_info)
self.assert_json_success(result)
bot_info = {
'full_name': 'Fred',
}
result = self.client_patch("/json/bots/hambot-bot@zulip.com", bot_info)
self.assert_json_success(result)
full_name = ujson.loads(result.content)['full_name']
self.assertEqual('Fred', full_name)
bot = self.get_bot()
self.assertEqual('Fred', bot['full_name'])
def test_patch_bot_avatar(self):
# type: () -> None
self.login("hamlet@zulip.com")
bot_info = {
'full_name': 'The Bot of Hamlet',
'short_name': 'hambot',
}
result = self.client_post("/json/bots", bot_info)
self.assert_json_success(result)
profile = get_user_profile_by_email('hambot-bot@zulip.com')
self.assertEqual(profile.avatar_source, UserProfile.AVATAR_FROM_GRAVATAR)
# Try error case first (too many files):
with open(os.path.join(TEST_AVATAR_DIR, 'img.png'), 'rb') as fp1, \
open(os.path.join(TEST_AVATAR_DIR, 'img.gif'), 'rb') as fp2:
result = self.client_patch_multipart(
'/json/bots/hambot-bot@zulip.com',
dict(file1=fp1, file2=fp2))
self.assert_json_error(result, 'You may only upload one file at a time')
# HAPPY PATH
with open(os.path.join(TEST_AVATAR_DIR, 'img.png'), 'rb') as fp:
result = self.client_patch_multipart(
'/json/bots/hambot-bot@zulip.com',
dict(file=fp))
self.assert_json_success(result)
profile = get_user_profile_by_email('hambot-bot@zulip.com')
self.assertEqual(profile.avatar_source, UserProfile.AVATAR_FROM_USER)
# TODO: check img.png was uploaded properly
def test_patch_bot_to_stream(self):
# type: () -> None
self.login("hamlet@zulip.com")
bot_info = {
'full_name': 'The Bot of Hamlet',
'short_name': 'hambot',
}
result = self.client_post("/json/bots", bot_info)
self.assert_json_success(result)
bot_info = {
'default_sending_stream': 'Denmark',
}
result = self.client_patch("/json/bots/hambot-bot@zulip.com", bot_info)
self.assert_json_success(result)
default_sending_stream = ujson.loads(result.content)['default_sending_stream']
self.assertEqual('Denmark', default_sending_stream)
bot = self.get_bot()
self.assertEqual('Denmark', bot['default_sending_stream'])
def test_patch_bot_to_stream_not_subscribed(self):
# type: () -> None
self.login("hamlet@zulip.com")
bot_info = {
'full_name': 'The Bot of Hamlet',
'short_name': 'hambot',
}
result = self.client_post("/json/bots", bot_info)
self.assert_json_success(result)
bot_info = {
'default_sending_stream': 'Rome',
}
result = self.client_patch("/json/bots/hambot-bot@zulip.com", bot_info)
self.assert_json_success(result)
default_sending_stream = ujson.loads(result.content)['default_sending_stream']
self.assertEqual('Rome', default_sending_stream)
bot = self.get_bot()
self.assertEqual('Rome', bot['default_sending_stream'])
def test_patch_bot_to_stream_none(self):
# type: () -> None
self.login("hamlet@zulip.com")
bot_info = {
'full_name': 'The Bot of Hamlet',
'short_name': 'hambot',
}
result = self.client_post("/json/bots", bot_info)
self.assert_json_success(result)
bot_info = {
'default_sending_stream': '',
}
result = self.client_patch("/json/bots/hambot-bot@zulip.com", bot_info)
self.assert_json_success(result)
default_sending_stream = ujson.loads(result.content)['default_sending_stream']
self.assertEqual(None, default_sending_stream)
bot = self.get_bot()
self.assertEqual(None, bot['default_sending_stream'])
def test_patch_bot_to_stream_private_allowed(self):
# type: () -> None
self.login("hamlet@zulip.com")
user_profile = get_user_profile_by_email("hamlet@zulip.com")
stream = get_stream("Denmark", user_profile.realm)
do_add_subscription(user_profile, stream)
do_make_stream_private(user_profile.realm, "Denmark")
bot_info = {
'full_name': 'The Bot of Hamlet',
'short_name': 'hambot',
}
result = self.client_post("/json/bots", bot_info)
self.assert_json_success(result)
bot_info = {
'default_sending_stream': 'Denmark',
}
result = self.client_patch("/json/bots/hambot-bot@zulip.com", bot_info)
self.assert_json_success(result)
default_sending_stream = ujson.loads(result.content)['default_sending_stream']
self.assertEqual('Denmark', default_sending_stream)
bot = self.get_bot()
self.assertEqual('Denmark', bot['default_sending_stream'])
def test_patch_bot_to_stream_private_denied(self):
# type: () -> None
self.login("hamlet@zulip.com")
user_profile = get_user_profile_by_email("hamlet@zulip.com")
stream = get_stream("Denmark", user_profile.realm)
do_remove_subscription(user_profile, stream)
do_make_stream_private(user_profile.realm, "Denmark")
bot_info = {
'full_name': 'The Bot of Hamlet',
'short_name': 'hambot',
}
result = self.client_post("/json/bots", bot_info)
self.assert_json_success(result)
bot_info = {
'default_sending_stream': 'Denmark',
}
result = self.client_patch("/json/bots/hambot-bot@zulip.com", bot_info)
self.assert_json_error(result, 'Insufficient permission')
def test_patch_bot_to_stream_not_found(self):
# type: () -> None
self.login("hamlet@zulip.com")
bot_info = {
'full_name': 'The Bot of Hamlet',
'short_name': 'hambot',
}
result = self.client_post("/json/bots", bot_info)
self.assert_json_success(result)
bot_info = {
'default_sending_stream': 'missing',
}
result = self.client_patch("/json/bots/hambot-bot@zulip.com", bot_info)
self.assert_json_error(result, 'No such stream \'missing\'')
def test_patch_bot_events_register_stream(self):
# type: () -> None
self.login("hamlet@zulip.com")
bot_info = {
'full_name': 'The Bot of Hamlet',
'short_name': 'hambot',
}
result = self.client_post("/json/bots", bot_info)
self.assert_json_success(result)
bot_info = {
'default_events_register_stream': 'Denmark',
}
result = self.client_patch("/json/bots/hambot-bot@zulip.com", bot_info)
self.assert_json_success(result)
default_events_register_stream = ujson.loads(result.content)['default_events_register_stream']
self.assertEqual('Denmark', default_events_register_stream)
bot = self.get_bot()
self.assertEqual('Denmark', bot['default_events_register_stream'])
def test_patch_bot_events_register_stream_allowed(self):
# type: () -> None
self.login("hamlet@zulip.com")
user_profile = get_user_profile_by_email("hamlet@zulip.com")
stream = get_stream("Denmark", user_profile.realm)
do_add_subscription(user_profile, stream)
do_make_stream_private(user_profile.realm, "Denmark")
bot_info = {
'full_name': 'The Bot of Hamlet',
'short_name': 'hambot',
}
result = self.client_post("/json/bots", bot_info)
self.assert_json_success(result)
bot_info = {
'default_events_register_stream': 'Denmark',
}
result = self.client_patch("/json/bots/hambot-bot@zulip.com", bot_info)
self.assert_json_success(result)
default_events_register_stream = ujson.loads(result.content)['default_events_register_stream']
self.assertEqual('Denmark', default_events_register_stream)
bot = self.get_bot()
self.assertEqual('Denmark', bot['default_events_register_stream'])
def test_patch_bot_events_register_stream_denied(self):
# type: () -> None
self.login("hamlet@zulip.com")
user_profile = get_user_profile_by_email("hamlet@zulip.com")
stream = get_stream("Denmark", user_profile.realm)
do_remove_subscription(user_profile, stream)
do_make_stream_private(user_profile.realm, "Denmark")
bot_info = {
'full_name': 'The Bot of Hamlet',
'short_name': 'hambot',
}
result = self.client_post("/json/bots", bot_info)
self.assert_json_success(result)
bot_info = {
'default_events_register_stream': 'Denmark',
}
result = self.client_patch("/json/bots/hambot-bot@zulip.com", bot_info)
self.assert_json_error(result, 'Insufficient permission')
def test_patch_bot_events_register_stream_none(self):
# type: () -> None
self.login("hamlet@zulip.com")
bot_info = {
'full_name': 'The Bot of Hamlet',
'short_name': 'hambot',
}
result = self.client_post("/json/bots", bot_info)
self.assert_json_success(result)
bot_info = {
'default_events_register_stream': '',
}
result = self.client_patch("/json/bots/hambot-bot@zulip.com", bot_info)
self.assert_json_success(result)
default_events_register_stream = ujson.loads(result.content)['default_events_register_stream']
self.assertEqual(None, default_events_register_stream)
bot = self.get_bot()
self.assertEqual(None, bot['default_events_register_stream'])
def test_patch_bot_events_register_stream_not_found(self):
# type: () -> None
self.login("hamlet@zulip.com")
bot_info = {
'full_name': 'The Bot of Hamlet',
'short_name': 'hambot',
}
result = self.client_post("/json/bots", bot_info)
self.assert_json_success(result)
bot_info = {
'default_events_register_stream': 'missing',
}
result = self.client_patch("/json/bots/hambot-bot@zulip.com", bot_info)
self.assert_json_error(result, 'No such stream \'missing\'')
def test_patch_bot_default_all_public_streams_true(self):
# type: () -> None
self.login("hamlet@zulip.com")
bot_info = {
'full_name': 'The Bot of Hamlet',
'short_name': 'hambot',
}
result = self.client_post("/json/bots", bot_info)
self.assert_json_success(result)
bot_info = {
'default_all_public_streams': ujson.dumps(True),
}
result = self.client_patch("/json/bots/hambot-bot@zulip.com", bot_info)
self.assert_json_success(result)
default_events_register_stream = ujson.loads(result.content)['default_all_public_streams']
self.assertEqual(default_events_register_stream, True)
bot = self.get_bot()
self.assertEqual(bot['default_all_public_streams'], True)
def test_patch_bot_default_all_public_streams_false(self):
# type: () -> None
self.login("hamlet@zulip.com")
bot_info = {
'full_name': 'The Bot of Hamlet',
'short_name': 'hambot',
}
result = self.client_post("/json/bots", bot_info)
self.assert_json_success(result)
bot_info = {
'default_all_public_streams': ujson.dumps(False),
}
result = self.client_patch("/json/bots/hambot-bot@zulip.com", bot_info)
self.assert_json_success(result)
default_events_register_stream = ujson.loads(result.content)['default_all_public_streams']
self.assertEqual(default_events_register_stream, False)
bot = self.get_bot()
self.assertEqual(bot['default_all_public_streams'], False)
def test_patch_bot_via_post(self):
# type: () -> None
self.login("hamlet@zulip.com")
bot_info = {
'full_name': 'The Bot of Hamlet',
'short_name': 'hambot',
}
result = self.client_post("/json/bots", bot_info)
self.assert_json_success(result)
bot_info = {
'full_name': 'Fred',
'method': 'PATCH'
}
result = self.client_post("/json/bots/hambot-bot@zulip.com", bot_info)
self.assert_json_success(result)
full_name = ujson.loads(result.content)['full_name']
self.assertEqual('Fred', full_name)
bot = self.get_bot()
self.assertEqual('Fred', bot['full_name'])
def test_patch_bogus_bot(self):
# type: () -> None
"""Deleting a bogus bot will succeed silently."""
self.login("hamlet@zulip.com")
self.create_bot()
bot_info = {
'full_name': 'Fred',
}
result = self.client_patch("/json/bots/nonexistent-bot@zulip.com", bot_info)
self.assert_json_error(result, 'No such user')
self.assert_num_bots_equal(1)
class ChangeSettingsTest(ZulipTestCase):
def check_well_formed_change_settings_response(self, result):
# type: (Dict[str, Any]) -> None
self.assertIn("full_name", result)
def check_for_toggle_param(self, pattern, param):
# type: (str, str) -> None
self.login("hamlet@zulip.com")
user_profile = get_user_profile_by_email("hamlet@zulip.com")
json_result = self.client_post(pattern,
{param: ujson.dumps(True)})
self.assert_json_success(json_result)
# refetch user_profile object to correctly handle caching
user_profile = get_user_profile_by_email("hamlet@zulip.com")
self.assertEqual(getattr(user_profile, param), True)
json_result = self.client_post(pattern,
{param: ujson.dumps(False)})
self.assert_json_success(json_result)
# refetch user_profile object to correctly handle caching
user_profile = get_user_profile_by_email("hamlet@zulip.com")
self.assertEqual(getattr(user_profile, param), False)
def test_successful_change_settings(self):
# type: () -> None
"""
A call to /json/settings/change with valid parameters changes the user's
settings correctly and returns correct values.
"""
self.login("hamlet@zulip.com")
json_result = self.client_post("/json/settings/change",
dict(
full_name='Foo Bar',
old_password=initial_password('hamlet@zulip.com'),
new_password='foobar1',
confirm_password='foobar1',
)
)
self.assert_json_success(json_result)
result = ujson.loads(json_result.content)
self.check_well_formed_change_settings_response(result)
self.assertEqual(get_user_profile_by_email("hamlet@zulip.com").
full_name, "Foo Bar")
self.client_post('/accounts/logout/')
self.login("hamlet@zulip.com", "foobar1")
user_profile = get_user_profile_by_email('hamlet@zulip.com')
self.assertEqual(get_session_dict_user(self.client.session), user_profile.id)
def test_illegal_name_changes(self):
# type: () -> None
email = 'hamlet@zulip.com'
self.login(email)
user = get_user_profile_by_email(email)
full_name = user.full_name
with self.settings(NAME_CHANGES_DISABLED=True):
json_result = self.client_post("/json/settings/change",
dict(full_name='Foo Bar'))
# We actually fail silently here, since this only happens if
# somebody is trying to game our API, and there's no reason to
# give them the courtesy of an error reason.
self.assert_json_success(json_result)
user = get_user_profile_by_email(email)
self.assertEqual(user.full_name, full_name)
# Now try a too-long name
json_result = self.client_post("/json/settings/change",
dict(full_name='x' * 1000))
self.assert_json_error(json_result, 'Name too long!')
# This is basically a don't-explode test.
def test_notify_settings(self):
# type: () -> None
self.check_for_toggle_param("/json/notify_settings/change", "enable_desktop_notifications")
self.check_for_toggle_param("/json/notify_settings/change", "enable_stream_desktop_notifications")
self.check_for_toggle_param("/json/notify_settings/change", "enable_stream_sounds")
self.check_for_toggle_param("/json/notify_settings/change", "enable_sounds")
self.check_for_toggle_param("/json/notify_settings/change", "enable_offline_email_notifications")
self.check_for_toggle_param("/json/notify_settings/change", "enable_offline_push_notifications")
self.check_for_toggle_param("/json/notify_settings/change", "enable_digest_emails")
def test_ui_settings(self):
# type: () -> None
self.check_for_toggle_param("/json/ui_settings/change", "autoscroll_forever")
self.check_for_toggle_param("/json/ui_settings/change", "default_desktop_notifications")
def test_toggling_left_side_userlist(self):
# type: () -> None
self.check_for_toggle_param("/json/left_side_userlist", "left_side_userlist")
def test_time_setting(self):
# type: () -> None
self.check_for_toggle_param("/json/time_setting", "twenty_four_hour_time")
def test_enter_sends_setting(self):
# type: () -> None
self.check_for_toggle_param('/json/users/me/enter-sends', "enter_sends")
def test_mismatching_passwords(self):
# type: () -> None
"""
new_password and confirm_password must match
"""
self.login("hamlet@zulip.com")
result = self.client_post("/json/settings/change",
dict(
new_password="mismatched_password",
confirm_password="not_the_same",
)
)
self.assert_json_error(result,
"New password must match confirmation password!")
def test_wrong_old_password(self):
# type: () -> None
"""
new_password and confirm_password must match
"""
self.login("hamlet@zulip.com")
result = self.client_post("/json/settings/change",
dict(
old_password='bad_password',
new_password="ignored",
confirm_password="ignored",
)
)
self.assert_json_error(result, "Wrong password!")
def test_changing_nothing_returns_error(self):
# type: () -> None
"""
We need to supply at least one non-empty parameter
to this API, or it should fail. (Eventually, we should
probably use a patch interface for these changes.)
"""
self.login("hamlet@zulip.com")
result = self.client_post("/json/settings/change",
dict(
old_password='ignored',
)
)
self.assert_json_error(result, "No new data supplied")
def test_change_default_language(self):
# type: () -> None
"""
Test changing the default language of the user.
"""
email = "hamlet@zulip.com"
self.login(email)
german = "de"
data = dict(default_language=ujson.dumps(german))
result = self.client_post("/json/language_setting", data)
self.assert_json_success(result)
user_profile = get_user_profile_by_email(email)
self.assertEqual(user_profile.default_language, german)
# Test to make sure invalid languages are not accepted
# and saved in the db.
invalid_lang = "invalid_lang"
data = dict(default_language=ujson.dumps(invalid_lang))
result = self.client_post("/json/language_setting", data)
self.assert_json_error(result, "Invalid language '%s'" % (invalid_lang,))
user_profile = get_user_profile_by_email(email)
self.assertNotEqual(user_profile.default_language, invalid_lang)
class GetProfileTest(ZulipTestCase):
def common_update_pointer(self, email, pointer):
# type: (text_type, int) -> None
self.login(email)
result = self.client_put("/json/users/me/pointer", {"pointer": pointer})
self.assert_json_success(result)
def common_get_profile(self, email):
# type: (str) -> Dict[text_type, Any]
user_profile = get_user_profile_by_email(email)
self.send_message(email, "Verona", Recipient.STREAM, "hello")
result = self.client_get("/api/v1/users/me", **self.api_auth(email))
max_id = most_recent_message(user_profile).id
self.assert_json_success(result)
json = ujson.loads(result.content)
self.assertIn("client_id", json)
self.assertIn("max_message_id", json)
self.assertIn("pointer", json)
self.assertEqual(json["max_message_id"], max_id)
return json
def test_get_pointer(self):
# type: () -> None
email = "hamlet@zulip.com"
self.login(email)
result = self.client_get("/json/users/me/pointer")
self.assert_json_success(result)
json = ujson.loads(result.content)
self.assertIn("pointer", json)
def test_cache_behavior(self):
# type: () -> None
with queries_captured() as queries:
with simulated_empty_cache() as cache_queries:
user_profile = get_user_profile_by_email('hamlet@zulip.com')
self.assert_length(queries, 1)
self.assert_length(cache_queries, 1, exact=True)
self.assertEqual(user_profile.email, 'hamlet@zulip.com')
def test_api_get_empty_profile(self):
# type: () -> None
"""
Ensure GET /users/me returns a max message id and returns successfully
"""
json = self.common_get_profile("othello@zulip.com")
self.assertEqual(json["pointer"], -1)
def test_profile_with_pointer(self):
# type: () -> None
"""
Ensure GET /users/me returns a proper pointer id after the pointer is updated
"""
id1 = self.send_message("othello@zulip.com", "Verona", Recipient.STREAM)
id2 = self.send_message("othello@zulip.com", "Verona", Recipient.STREAM)
json = self.common_get_profile("hamlet@zulip.com")
self.common_update_pointer("hamlet@zulip.com", id2)
json = self.common_get_profile("hamlet@zulip.com")
self.assertEqual(json["pointer"], id2)
self.common_update_pointer("hamlet@zulip.com", id1)
json = self.common_get_profile("hamlet@zulip.com")
self.assertEqual(json["pointer"], id2) # pointer does not move backwards
result = self.client_put("/json/users/me/pointer", {"pointer": 99999999})
self.assert_json_error(result, "Invalid message ID")
def test_get_all_profiles_avatar_urls(self):
# type: () -> None
user_profile = get_user_profile_by_email('hamlet@zulip.com')
result = self.client_get("/api/v1/users", **self.api_auth('hamlet@zulip.com'))
self.assert_json_success(result)
json = ujson.loads(result.content)
for user in json['members']:
if user['email'] == 'hamlet@zulip.com':
self.assertEqual(
user['avatar_url'],
get_avatar_url(user_profile.avatar_source, user_profile.email),
)
class HomeTest(ZulipTestCase):
@slow('big method')
def test_home(self):
# type: () -> None
# Keep this list sorted!!!
html_bits = [
'Compose your message here...',
'Exclude messages with topic',
'Get started',
'Keyboard shortcuts',
'Loading...',
'Manage Streams',
'Narrow by topic',
'Next message',
'SHARE THE LOVE',
'Search streams',
'Welcome to Zulip',
'pygments.css',
'var page_params',
]
# Keep this list sorted!!!
expected_keys = [
"alert_words",
"autoscroll_forever",
"avatar_url",
"bot_list",
"can_create_streams",
"cross_realm_user_emails",
"debug_mode",
"default_desktop_notifications",
"default_language",
"default_language_name",
"desktop_notifications_enabled",
"development_environment",
"domain",
"email",
"email_dict",
"enable_digest_emails",
"enable_offline_email_notifications",
"enable_offline_push_notifications",
"enter_sends",
"event_queue_id",
"first_in_realm",
"fullname",
"furthest_read_time",
"has_mobile_devices",
"have_initial_messages",
"initial_pointer",
"initial_presences",
"initial_servertime",
"is_admin",
"is_zephyr_mirror_realm",
"language_list",
"language_list_dbl_col",
"last_event_id",
"left_side_userlist",
"login_page",
"mandatory_topics",
"max_message_id",
"maxfilesize",
"muted_topics",
"name_changes_disabled",
"narrow",
"narrow_stream",
"needs_tutorial",
"neversubbed_info",
"notifications_stream",
"password_auth_enabled",
"people_list",
"poll_timeout",
"presence_disabled",
"product_name",
"prompt_for_invites",
"realm_allow_message_editing",
"realm_create_stream_by_admins_only",
"realm_default_language",
"realm_default_streams",
"realm_emoji",
"realm_filters",
"realm_invite_by_admins_only",
"realm_invite_required",
"realm_message_content_edit_limit_seconds",
"realm_name",
"realm_restricted_to_domain",
"realm_uri",
"referrals",
"save_stacktraces",
"server_generation",
"server_uri",
"share_the_love",
"show_digest_email",
"sounds_enabled",
"stream_desktop_notifications_enabled",
"stream_sounds_enabled",
"subbed_info",
"test_suite",
"twenty_four_hour_time",
"unread_count",
"unsubbed_info",
"user_id",
"zulip_version",
]
email = "hamlet@zulip.com"
# Verify fails if logged-out
result = self.client_get('/')
self.assertEqual(result.status_code, 302)
# Verify succeeds once logged-in
self.login(email)
result = self._get_home_page(stream='Denmark')
html = result.content.decode('utf-8')
for html_bit in html_bits:
if html_bit not in html:
self.fail('%s not in result' % (html_bit,))
page_params = self._get_page_params(result)
actual_keys = sorted([str(k) for k in page_params.keys()])
self.assertEqual(actual_keys, expected_keys)
# TODO: Inspect the page_params data further.
# print(ujson.dumps(page_params, indent=2))
def _get_home_page(self, **kwargs):
# type: (**Any) -> HttpResponse
with \
patch('zerver.lib.actions.request_event_queue', return_value=42), \
patch('zerver.lib.actions.get_user_events', return_value=[]):
result = self.client_get('/', dict(**kwargs))
return result
def _get_page_params(self, result):
# type: (HttpResponse) -> Dict[str, Any]
html = result.content.decode('utf-8')
lines = html.split('\n')
page_params_line = [l for l in lines if l.startswith('var page_params')][0]
page_params_json = page_params_line.split(' = ')[1].rstrip(';')
page_params = ujson.loads(page_params_json)
return page_params
def _sanity_check(self, result):
# type: (HttpResponse) -> None
'''
Use this for tests that are geared toward specific edge cases, but
which still want the home page to load properly.
'''
html = result.content.decode('utf-8')
if 'Compose your message' not in html:
self.fail('Home page probably did not load.')
def test_terms_of_service(self):
# type: () -> None
email = 'hamlet@zulip.com'
self.login(email)
with \
self.settings(TERMS_OF_SERVICE='whatever'), \
self.settings(TOS_VERSION='99.99'):
result = self.client_get('/', dict(stream='Denmark'))
html = result.content.decode('utf-8')
self.assertIn('There is a new terms of service', html)
def test_bad_narrow(self):
# type: () -> None
email = 'hamlet@zulip.com'
self.login(email)
with patch('logging.exception') as mock:
result = self._get_home_page(stream='Invalid Stream')
mock.assert_called_once_with('Narrow parsing')
self._sanity_check(result)
def test_bad_pointer(self):
# type: () -> None
email = 'hamlet@zulip.com'
user_profile = get_user_profile_by_email(email)
user_profile.pointer = 999999
user_profile.save()
self.login(email)
with patch('logging.warning') as mock:
result = self._get_home_page()
mock.assert_called_once_with('hamlet@zulip.com has invalid pointer 999999')
self._sanity_check(result)
def test_topic_narrow(self):
# type: () -> None
email = 'hamlet@zulip.com'
self.login(email)
result = self._get_home_page(stream='Denmark', topic='lunch')
self._sanity_check(result)
html = result.content.decode('utf-8')
self.assertIn('lunch', html)
def test_notifications_stream(self):
# type: () -> None
email = 'hamlet@zulip.com'
realm = get_realm('zulip.com')
realm.notifications_stream = get_stream('Denmark', realm)
realm.save()
self.login(email)
result = self._get_home_page()
page_params = self._get_page_params(result)
self.assertEqual(page_params['notifications_stream'], 'Denmark')
def test_new_stream(self):
# type: () -> None
email = 'hamlet@zulip.com'
stream_name = 'New stream'
self.subscribe_to_stream(email, stream_name)
self.login(email)
result = self._get_home_page(stream=stream_name)
page_params = self._get_page_params(result)
self.assertEqual(page_params['narrow_stream'], stream_name)
self.assertEqual(page_params['narrow'], [dict(operator='stream', operand=stream_name)])
self.assertEqual(page_params['initial_pointer'], -1)
self.assertEqual(page_params['max_message_id'], -1)
self.assertEqual(page_params['have_initial_messages'], False)
def test_invites_by_admins_only(self):
# type: () -> None
email = 'hamlet@zulip.com'
user_profile = get_user_profile_by_email(email)
realm = user_profile.realm
realm.invite_by_admins_only = True
realm.save()
self.login(email)
self.assertFalse(user_profile.is_realm_admin)
result = self._get_home_page()
html = result.content.decode('utf-8')
self.assertNotIn('Invite more users', html)
user_profile.is_realm_admin = True
user_profile.save()
result = self._get_home_page()
html = result.content.decode('utf-8')
self.assertIn('Invite more users', html)
class MutedTopicsTests(ZulipTestCase):
def test_json_set(self):
# type: () -> None
email = 'hamlet@zulip.com'
self.login(email)
url = '/json/set_muted_topics'
data = {'muted_topics': '[["stream", "topic"]]'}
result = self.client_post(url, data)
self.assert_json_success(result)
user = get_user_profile_by_email(email)
self.assertEqual(ujson.loads(user.muted_topics), [["stream", "topic"]])
url = '/json/set_muted_topics'
data = {'muted_topics': '[["stream2", "topic2"]]'}
result = self.client_post(url, data)
self.assert_json_success(result)
user = get_user_profile_by_email(email)
self.assertEqual(ujson.loads(user.muted_topics), [["stream2", "topic2"]])
class ExtractedRecipientsTest(TestCase):
def test_extract_recipients(self):
# type: () -> None
# JSON list w/dups, empties, and trailing whitespace
s = ujson.dumps([' alice@zulip.com ', ' bob@zulip.com ', ' ', 'bob@zulip.com'])
self.assertEqual(sorted(extract_recipients(s)), ['alice@zulip.com', 'bob@zulip.com'])
# simple string with one name
s = 'alice@zulip.com '
self.assertEqual(extract_recipients(s), ['alice@zulip.com'])
# JSON-encoded string
s = '"alice@zulip.com"'
self.assertEqual(extract_recipients(s), ['alice@zulip.com'])
# bare comma-delimited string
s = 'bob@zulip.com, alice@zulip.com'
self.assertEqual(sorted(extract_recipients(s)), ['alice@zulip.com', 'bob@zulip.com'])
# JSON-encoded, comma-delimited string
s = '"bob@zulip.com,alice@zulip.com"'
self.assertEqual(sorted(extract_recipients(s)), ['alice@zulip.com', 'bob@zulip.com'])
# TODO: This class currently only tests the default-off
# SEND_MISSED_MESSAGE_EMAILS_AS_USER=True case. We should refactor it
# to test both cases (the False case being the most important).
class TestMissedMessages(ZulipTestCase):
def normalize_string(self, s):
# type: (text_type) -> text_type
s = s.strip()
return re.sub(r'\s+', ' ', s)
@override_settings(SEND_MISSED_MESSAGE_EMAILS_AS_USER=True)
@patch('zerver.lib.email_mirror.generate_random_token')
def test_extra_context_in_missed_stream_messages(self, mock_random_token):
# type: (MagicMock) -> None
tokens = [str(random.getrandbits(32)) for _ in range(30)]
mock_random_token.side_effect = tokens
self.send_message("othello@zulip.com", "Denmark", Recipient.STREAM, '0')
self.send_message("othello@zulip.com", "Denmark", Recipient.STREAM, '1')
self.send_message("othello@zulip.com", "Denmark", Recipient.STREAM, '2')
self.send_message("othello@zulip.com", "Denmark", Recipient.STREAM, '3')
self.send_message("othello@zulip.com", "Denmark", Recipient.STREAM, '4')
self.send_message("othello@zulip.com", "Denmark", Recipient.STREAM, '5')
self.send_message("othello@zulip.com", "Denmark", Recipient.STREAM, '6')
self.send_message("othello@zulip.com", "Denmark", Recipient.STREAM, '7')
self.send_message("othello@zulip.com", "Denmark", Recipient.STREAM, '8')
self.send_message("othello@zulip.com", "Denmark", Recipient.STREAM, '9')
self.send_message("othello@zulip.com", "Denmark", Recipient.STREAM, '10')
self.send_message("othello@zulip.com", "Denmark", Recipient.STREAM, '11', subject='test2')
msg_id = self.send_message("othello@zulip.com", "denmark", Recipient.STREAM, '@**hamlet**')
othello = get_user_profile_by_email('othello@zulip.com')
hamlet = get_user_profile_by_email('hamlet@zulip.com')
handle_missedmessage_emails(hamlet.id, [{'message_id': msg_id}])
msg = mail.outbox[0]
reply_to_addresses = [settings.EMAIL_GATEWAY_PATTERN % (u'mm' + t)
for t in tokens]
sender = 'Zulip <{}>'.format(settings.NOREPLY_EMAIL_ADDRESS)
self.assertEquals(len(mail.outbox), 1)
self.assertEqual(msg.from_email, '"%s" <%s>' % (othello.full_name, othello.email))
self.assertIn(msg.extra_headers['Reply-To'], reply_to_addresses)
self.assertEqual(msg.extra_headers['Sender'], sender)
self.assertIn(
'Denmark > test Othello, the Moor of Venice 1 2 3 4 5 6 7 8 9 10 @**hamlet**',
self.normalize_string(mail.outbox[0].body),
)
@override_settings(SEND_MISSED_MESSAGE_EMAILS_AS_USER=True)
@patch('zerver.lib.email_mirror.generate_random_token')
def test_extra_context_in_personal_missed_stream_messages(self, mock_random_token):
# type: (MagicMock) -> None
tokens = [str(random.getrandbits(32)) for _ in range(30)]
mock_random_token.side_effect = tokens
msg_id = self.send_message("othello@zulip.com", "hamlet@zulip.com",
Recipient.PERSONAL,
'Extremely personal message!')
othello = get_user_profile_by_email('othello@zulip.com')
hamlet = get_user_profile_by_email('hamlet@zulip.com')
handle_missedmessage_emails(hamlet.id, [{'message_id': msg_id}])
msg = mail.outbox[0]
reply_to_addresses = [settings.EMAIL_GATEWAY_PATTERN % (u'mm' + t)
for t in tokens]
sender = 'Zulip <{}>'.format(settings.NOREPLY_EMAIL_ADDRESS)
self.assertEquals(len(mail.outbox), 1)
self.assertEqual(msg.from_email, '"%s" <%s>' % (othello.full_name, othello.email))
self.assertIn(msg.extra_headers['Reply-To'], reply_to_addresses)
self.assertEqual(msg.extra_headers['Sender'], sender)
self.assertIn('You and Othello, the Moor of Venice Extremely personal message!',
self.normalize_string(msg.body))
@override_settings(SEND_MISSED_MESSAGE_EMAILS_AS_USER=True)
@patch('zerver.lib.email_mirror.generate_random_token')
def test_extra_context_in_huddle_missed_stream_messages(self, mock_random_token):
# type: (MagicMock) -> None
tokens = [str(random.getrandbits(32)) for _ in range(30)]
mock_random_token.side_effect = tokens
msg_id = self.send_message("othello@zulip.com",
["hamlet@zulip.com", "iago@zulip.com"],
Recipient.PERSONAL,
'Group personal message!')
othello = get_user_profile_by_email('othello@zulip.com')
hamlet = get_user_profile_by_email('hamlet@zulip.com')
handle_missedmessage_emails(hamlet.id, [{'message_id': msg_id}])
msg = mail.outbox[0]
reply_to_addresses = [settings.EMAIL_GATEWAY_PATTERN % (u'mm' + t)
for t in tokens]
sender = 'Zulip <{}>'.format(settings.NOREPLY_EMAIL_ADDRESS)
self.assertEquals(len(mail.outbox), 1)
self.assertEqual(msg.from_email, '"%s" <%s>' % (othello.full_name, othello.email))
self.assertIn(msg.extra_headers['Reply-To'], reply_to_addresses)
self.assertEqual(msg.extra_headers['Sender'], sender)
body = ('You and Iago, Othello, the Moor of Venice Othello,'
' the Moor of Venice Group personal message')
self.assertIn(body, self.normalize_string(msg.body))
class TestOpenRealms(ZulipTestCase):
def test_open_realm_logic(self):
# type: () -> None
mit_realm = get_realm("mit.edu")
self.assertEquals(get_unique_open_realm(), None)
mit_realm.restricted_to_domain = False
mit_realm.save()
self.assertTrue(completely_open(mit_realm.domain))
self.assertEquals(get_unique_open_realm(), None)
with self.settings(SYSTEM_ONLY_REALMS={"zulip.com"}):
self.assertEquals(get_unique_open_realm(), mit_realm)
mit_realm.restricted_to_domain = True
mit_realm.save()
|
from .sentence_dataloader import *
|
import time
from inky_fork import InkyPHAT, InkyWHAT
from PIL import Image
from PIL import ImageDraw
from PIL import ImageFont
from datetime import datetime
from time import gmtime, strftime
inky_display = InkyWHAT("black_fast")
font = ImageFont.truetype("Nunito-ExtraLight.ttf", 130)
i = 10
while True:
image = Image.new("P", (inky_display.WIDTH, inky_display.HEIGHT))
draw = ImageDraw.Draw(image)
draw.rectangle((400, 300, 0, 0), fill=inky_display.WHITE)
draw.text((100, 100), str(i), inky_display.BLACK, font)
inky_display.set_image(image)
inky_display.show()
i = i + 1
time.sleep(1)
|
while True:
n = int(input("Quer ver a tabuada de qual valor? "))
if n < 0:
print('-=' * 19)
print('PROGRAMA TABUADA ENCERRADO, VOLTE SEMPRE')
break
print('-=' * 19)
for c in range(1, 11):
print(f'{n} x {c:2} = {n*c:2}')
|
#!/usr/bin/python
#
# Copyright 2016 Red Hat | Ansible
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
DOCUMENTATION = '''
module: docker_network
short_description: Manage Docker networks
description:
- Create/remove Docker networks and connect containers to them.
- Performs largely the same function as the "docker network" CLI subcommand.
options:
name:
description:
- Name of the network to operate on.
type: str
required: yes
aliases:
- network_name
connected:
description:
- List of container names or container IDs to connect to a network.
- Please note that the module only makes sure that these containers are connected to the network,
but does not care about connection options. If you rely on specific IP addresses etc., use the
M(community.general.docker_container) module to ensure your containers are correctly connected to this network.
type: list
elements: str
aliases:
- containers
driver:
description:
- Specify the type of network. Docker provides bridge and overlay drivers, but 3rd party drivers can also be used.
type: str
default: bridge
driver_options:
description:
- Dictionary of network settings. Consult docker docs for valid options and values.
type: dict
force:
description:
- With state C(absent) forces disconnecting all containers from the
network prior to deleting the network. With state C(present) will
disconnect all containers, delete the network and re-create the
network.
- This option is required if you have changed the IPAM or driver options
and want an existing network to be updated to use the new options.
type: bool
default: no
appends:
description:
- By default the connected list is canonical, meaning containers not on the list are removed from the network.
- Use I(appends) to leave existing containers connected.
type: bool
default: no
aliases:
- incremental
enable_ipv6:
description:
- Enable IPv6 networking.
type: bool
ipam_driver:
description:
- Specify an IPAM driver.
type: str
ipam_driver_options:
description:
- Dictionary of IPAM driver options.
type: dict
ipam_options:
description:
- Dictionary of IPAM options.
- Deprecated in 2.8, will be removed in community.general 2.0.0. Use parameter I(ipam_config) instead. In Docker 1.10.0, IPAM
options were introduced (see L(here,https://github.com/moby/moby/pull/17316)). This module parameter addresses
the IPAM config not the newly introduced IPAM options. For the IPAM options, see the I(ipam_driver_options)
parameter.
type: dict
suboptions:
subnet:
description:
- IP subset in CIDR notation.
type: str
iprange:
description:
- IP address range in CIDR notation.
type: str
gateway:
description:
- IP gateway address.
type: str
aux_addresses:
description:
- Auxiliary IP addresses used by Network driver, as a mapping from hostname to IP.
type: dict
ipam_config:
description:
- List of IPAM config blocks. Consult
L(Docker docs,https://docs.docker.com/compose/compose-file/compose-file-v2/#ipam) for valid options and values.
Note that I(iprange) is spelled differently here (we use the notation from the Docker SDK for Python).
type: list
elements: dict
suboptions:
subnet:
description:
- IP subset in CIDR notation.
type: str
iprange:
description:
- IP address range in CIDR notation.
type: str
gateway:
description:
- IP gateway address.
type: str
aux_addresses:
description:
- Auxiliary IP addresses used by Network driver, as a mapping from hostname to IP.
type: dict
state:
description:
- C(absent) deletes the network. If a network has connected containers, it
cannot be deleted. Use the I(force) option to disconnect all containers
and delete the network.
- C(present) creates the network, if it does not already exist with the
specified parameters, and connects the list of containers provided via
the connected parameter. Containers not on the list will be disconnected.
An empty list will leave no containers connected to the network. Use the
I(appends) option to leave existing containers connected. Use the I(force)
options to force re-creation of the network.
type: str
default: present
choices:
- absent
- present
internal:
description:
- Restrict external access to the network.
type: bool
labels:
description:
- Dictionary of labels.
type: dict
scope:
description:
- Specify the network's scope.
type: str
choices:
- local
- global
- swarm
attachable:
description:
- If enabled, and the network is in the global scope, non-service containers on worker nodes will be able to connect to the network.
type: bool
extends_documentation_fragment:
- community.general.docker
- community.general.docker.docker_py_1_documentation
notes:
- When network options are changed, the module disconnects all containers from the network, deletes the network, and re-creates the network.
It does not try to reconnect containers, except the ones listed in (I(connected), and even for these, it does not consider specific
connection options like fixed IP addresses or MAC addresses. If you need more control over how the containers are connected to the
network, loop the M(community.general.docker_container) module to loop over your containers to make sure they are connected properly.
- The module does not support Docker Swarm, i.e. it will not try to disconnect or reconnect services. If services are connected to the
network, deleting the network will fail. When network options are changed, the network has to be deleted and recreated, so this will
fail as well.
author:
- "Ben Keith (@keitwb)"
- "Chris Houseknecht (@chouseknecht)"
- "Dave Bendit (@DBendit)"
requirements:
- "L(Docker SDK for Python,https://docker-py.readthedocs.io/en/stable/) >= 1.10.0 (use L(docker-py,https://pypi.org/project/docker-py/) for Python 2.6)"
- "The docker server >= 1.10.0"
'''
EXAMPLES = '''
- name: Create a network
community.general.docker_network:
name: network_one
- name: Remove all but selected list of containers
community.general.docker_network:
name: network_one
connected:
- container_a
- container_b
- container_c
- name: Remove a single container
community.general.docker_network:
name: network_one
connected: "{{ fulllist|difference(['container_a']) }}"
- name: Add a container to a network, leaving existing containers connected
community.general.docker_network:
name: network_one
connected:
- container_a
appends: yes
- name: Create a network with driver options
community.general.docker_network:
name: network_two
driver_options:
com.docker.network.bridge.name: net2
- name: Create a network with custom IPAM config
community.general.docker_network:
name: network_three
ipam_config:
- subnet: 172.3.27.0/24
gateway: 172.3.27.2
iprange: 172.3.27.0/26
aux_addresses:
host1: 172.3.27.3
host2: 172.3.27.4
- name: Create a network with labels
community.general.docker_network:
name: network_four
labels:
key1: value1
key2: value2
- name: Create a network with IPv6 IPAM config
community.general.docker_network:
name: network_ipv6_one
enable_ipv6: yes
ipam_config:
- subnet: fdd1:ac8c:0557:7ce1::/64
- name: Create a network with IPv6 and custom IPv4 IPAM config
community.general.docker_network:
name: network_ipv6_two
enable_ipv6: yes
ipam_config:
- subnet: 172.4.27.0/24
- subnet: fdd1:ac8c:0557:7ce2::/64
- name: Delete a network, disconnecting all containers
community.general.docker_network:
name: network_one
state: absent
force: yes
'''
RETURN = '''
network:
description:
- Network inspection results for the affected network.
- Note that facts are part of the registered vars since Ansible 2.8. For compatibility reasons, the facts
are also accessible directly as C(docker_network). Note that the returned fact will be removed in community.general 2.0.0.
returned: success
type: dict
sample: {}
'''
import re
import traceback
from distutils.version import LooseVersion
from ansible_collections.community.general.plugins.module_utils.docker.common import (
AnsibleDockerClient,
DockerBaseClass,
docker_version,
DifferenceTracker,
clean_dict_booleans_for_docker_api,
RequestException,
)
try:
from docker import utils
from docker.errors import DockerException
if LooseVersion(docker_version) >= LooseVersion('2.0.0'):
from docker.types import IPAMPool, IPAMConfig
except Exception:
# missing Docker SDK for Python handled in ansible.module_utils.docker.common
pass
class TaskParameters(DockerBaseClass):
def __init__(self, client):
super(TaskParameters, self).__init__()
self.client = client
self.name = None
self.connected = None
self.driver = None
self.driver_options = None
self.ipam_driver = None
self.ipam_driver_options = None
self.ipam_options = None
self.ipam_config = None
self.appends = None
self.force = None
self.internal = None
self.labels = None
self.debug = None
self.enable_ipv6 = None
self.scope = None
self.attachable = None
for key, value in client.module.params.items():
setattr(self, key, value)
def container_names_in_network(network):
return [c['Name'] for c in network['Containers'].values()] if network['Containers'] else []
CIDR_IPV4 = re.compile(r'^([0-9]{1,3}\.){3}[0-9]{1,3}/([0-9]|[1-2][0-9]|3[0-2])$')
CIDR_IPV6 = re.compile(r'^[0-9a-fA-F:]+/([0-9]|[1-9][0-9]|1[0-2][0-9])$')
def validate_cidr(cidr):
"""Validate CIDR. Return IP version of a CIDR string on success.
:param cidr: Valid CIDR
:type cidr: str
:return: ``ipv4`` or ``ipv6``
:rtype: str
:raises ValueError: If ``cidr`` is not a valid CIDR
"""
if CIDR_IPV4.match(cidr):
return 'ipv4'
elif CIDR_IPV6.match(cidr):
return 'ipv6'
raise ValueError('"{0}" is not a valid CIDR'.format(cidr))
def normalize_ipam_config_key(key):
"""Normalizes IPAM config keys returned by Docker API to match Ansible keys.
:param key: Docker API key
:type key: str
:return Ansible module key
:rtype str
"""
special_cases = {
'AuxiliaryAddresses': 'aux_addresses'
}
return special_cases.get(key, key.lower())
def dicts_are_essentially_equal(a, b):
"""Make sure that a is a subset of b, where None entries of a are ignored."""
for k, v in a.items():
if v is None:
continue
if b.get(k) != v:
return False
return True
class DockerNetworkManager(object):
def __init__(self, client):
self.client = client
self.parameters = TaskParameters(client)
self.check_mode = self.client.check_mode
self.results = {
u'changed': False,
u'actions': []
}
self.diff = self.client.module._diff
self.diff_tracker = DifferenceTracker()
self.diff_result = dict()
self.existing_network = self.get_existing_network()
if not self.parameters.connected and self.existing_network:
self.parameters.connected = container_names_in_network(self.existing_network)
if (self.parameters.ipam_options['subnet'] or self.parameters.ipam_options['iprange'] or
self.parameters.ipam_options['gateway'] or self.parameters.ipam_options['aux_addresses']):
self.parameters.ipam_config = [self.parameters.ipam_options]
if self.parameters.ipam_config:
try:
for ipam_config in self.parameters.ipam_config:
validate_cidr(ipam_config['subnet'])
except ValueError as e:
self.client.fail(str(e))
if self.parameters.driver_options:
self.parameters.driver_options = clean_dict_booleans_for_docker_api(self.parameters.driver_options)
state = self.parameters.state
if state == 'present':
self.present()
elif state == 'absent':
self.absent()
if self.diff or self.check_mode or self.parameters.debug:
if self.diff:
self.diff_result['before'], self.diff_result['after'] = self.diff_tracker.get_before_after()
self.results['diff'] = self.diff_result
def get_existing_network(self):
return self.client.get_network(name=self.parameters.name)
def has_different_config(self, net):
'''
Evaluates an existing network and returns a tuple containing a boolean
indicating if the configuration is different and a list of differences.
:param net: the inspection output for an existing network
:return: (bool, list)
'''
differences = DifferenceTracker()
if self.parameters.driver and self.parameters.driver != net['Driver']:
differences.add('driver',
parameter=self.parameters.driver,
active=net['Driver'])
if self.parameters.driver_options:
if not net.get('Options'):
differences.add('driver_options',
parameter=self.parameters.driver_options,
active=net.get('Options'))
else:
for key, value in self.parameters.driver_options.items():
if not (key in net['Options']) or value != net['Options'][key]:
differences.add('driver_options.%s' % key,
parameter=value,
active=net['Options'].get(key))
if self.parameters.ipam_driver:
if not net.get('IPAM') or net['IPAM']['Driver'] != self.parameters.ipam_driver:
differences.add('ipam_driver',
parameter=self.parameters.ipam_driver,
active=net.get('IPAM'))
if self.parameters.ipam_driver_options is not None:
ipam_driver_options = net['IPAM'].get('Options') or {}
if ipam_driver_options != self.parameters.ipam_driver_options:
differences.add('ipam_driver_options',
parameter=self.parameters.ipam_driver_options,
active=ipam_driver_options)
if self.parameters.ipam_config is not None and self.parameters.ipam_config:
if not net.get('IPAM') or not net['IPAM']['Config']:
differences.add('ipam_config',
parameter=self.parameters.ipam_config,
active=net.get('IPAM', {}).get('Config'))
else:
# Put network's IPAM config into the same format as module's IPAM config
net_ipam_configs = []
for net_ipam_config in net['IPAM']['Config']:
config = dict()
for k, v in net_ipam_config.items():
config[normalize_ipam_config_key(k)] = v
net_ipam_configs.append(config)
# Compare lists of dicts as sets of dicts
for idx, ipam_config in enumerate(self.parameters.ipam_config):
net_config = dict()
for net_ipam_config in net_ipam_configs:
if dicts_are_essentially_equal(ipam_config, net_ipam_config):
net_config = net_ipam_config
break
for key, value in ipam_config.items():
if value is None:
# due to recursive argument_spec, all keys are always present
# (but have default value None if not specified)
continue
if value != net_config.get(key):
differences.add('ipam_config[%s].%s' % (idx, key),
parameter=value,
active=net_config.get(key))
if self.parameters.enable_ipv6 is not None and self.parameters.enable_ipv6 != net.get('EnableIPv6', False):
differences.add('enable_ipv6',
parameter=self.parameters.enable_ipv6,
active=net.get('EnableIPv6', False))
if self.parameters.internal is not None and self.parameters.internal != net.get('Internal', False):
differences.add('internal',
parameter=self.parameters.internal,
active=net.get('Internal'))
if self.parameters.scope is not None and self.parameters.scope != net.get('Scope'):
differences.add('scope',
parameter=self.parameters.scope,
active=net.get('Scope'))
if self.parameters.attachable is not None and self.parameters.attachable != net.get('Attachable', False):
differences.add('attachable',
parameter=self.parameters.attachable,
active=net.get('Attachable'))
if self.parameters.labels:
if not net.get('Labels'):
differences.add('labels',
parameter=self.parameters.labels,
active=net.get('Labels'))
else:
for key, value in self.parameters.labels.items():
if not (key in net['Labels']) or value != net['Labels'][key]:
differences.add('labels.%s' % key,
parameter=value,
active=net['Labels'].get(key))
return not differences.empty, differences
def create_network(self):
if not self.existing_network:
params = dict(
driver=self.parameters.driver,
options=self.parameters.driver_options,
)
ipam_pools = []
if self.parameters.ipam_config:
for ipam_pool in self.parameters.ipam_config:
if LooseVersion(docker_version) >= LooseVersion('2.0.0'):
ipam_pools.append(IPAMPool(**ipam_pool))
else:
ipam_pools.append(utils.create_ipam_pool(**ipam_pool))
if self.parameters.ipam_driver or self.parameters.ipam_driver_options or ipam_pools:
# Only add ipam parameter if a driver was specified or if IPAM parameters
# were specified. Leaving this parameter away can significantly speed up
# creation; on my machine creation with this option needs ~15 seconds,
# and without just a few seconds.
if LooseVersion(docker_version) >= LooseVersion('2.0.0'):
params['ipam'] = IPAMConfig(driver=self.parameters.ipam_driver,
pool_configs=ipam_pools,
options=self.parameters.ipam_driver_options)
else:
params['ipam'] = utils.create_ipam_config(driver=self.parameters.ipam_driver,
pool_configs=ipam_pools)
if self.parameters.enable_ipv6 is not None:
params['enable_ipv6'] = self.parameters.enable_ipv6
if self.parameters.internal is not None:
params['internal'] = self.parameters.internal
if self.parameters.scope is not None:
params['scope'] = self.parameters.scope
if self.parameters.attachable is not None:
params['attachable'] = self.parameters.attachable
if self.parameters.labels:
params['labels'] = self.parameters.labels
if not self.check_mode:
resp = self.client.create_network(self.parameters.name, **params)
self.client.report_warnings(resp, ['Warning'])
self.existing_network = self.client.get_network(network_id=resp['Id'])
self.results['actions'].append("Created network %s with driver %s" % (self.parameters.name, self.parameters.driver))
self.results['changed'] = True
def remove_network(self):
if self.existing_network:
self.disconnect_all_containers()
if not self.check_mode:
self.client.remove_network(self.parameters.name)
self.results['actions'].append("Removed network %s" % (self.parameters.name,))
self.results['changed'] = True
def is_container_connected(self, container_name):
if not self.existing_network:
return False
return container_name in container_names_in_network(self.existing_network)
def connect_containers(self):
for name in self.parameters.connected:
if not self.is_container_connected(name):
if not self.check_mode:
self.client.connect_container_to_network(name, self.parameters.name)
self.results['actions'].append("Connected container %s" % (name,))
self.results['changed'] = True
self.diff_tracker.add('connected.{0}'.format(name),
parameter=True,
active=False)
def disconnect_missing(self):
if not self.existing_network:
return
containers = self.existing_network['Containers']
if not containers:
return
for c in containers.values():
name = c['Name']
if name not in self.parameters.connected:
self.disconnect_container(name)
def disconnect_all_containers(self):
containers = self.client.get_network(name=self.parameters.name)['Containers']
if not containers:
return
for cont in containers.values():
self.disconnect_container(cont['Name'])
def disconnect_container(self, container_name):
if not self.check_mode:
self.client.disconnect_container_from_network(container_name, self.parameters.name)
self.results['actions'].append("Disconnected container %s" % (container_name,))
self.results['changed'] = True
self.diff_tracker.add('connected.{0}'.format(container_name),
parameter=False,
active=True)
def present(self):
different = False
differences = DifferenceTracker()
if self.existing_network:
different, differences = self.has_different_config(self.existing_network)
self.diff_tracker.add('exists', parameter=True, active=self.existing_network is not None)
if self.parameters.force or different:
self.remove_network()
self.existing_network = None
self.create_network()
self.connect_containers()
if not self.parameters.appends:
self.disconnect_missing()
if self.diff or self.check_mode or self.parameters.debug:
self.diff_result['differences'] = differences.get_legacy_docker_diffs()
self.diff_tracker.merge(differences)
if not self.check_mode and not self.parameters.debug:
self.results.pop('actions')
network_facts = self.get_existing_network()
self.results['ansible_facts'] = {u'docker_network': network_facts}
self.results['network'] = network_facts
def absent(self):
self.diff_tracker.add('exists', parameter=False, active=self.existing_network is not None)
self.remove_network()
def main():
argument_spec = dict(
name=dict(type='str', required=True, aliases=['network_name']),
connected=dict(type='list', default=[], elements='str', aliases=['containers']),
state=dict(type='str', default='present', choices=['present', 'absent']),
driver=dict(type='str', default='bridge'),
driver_options=dict(type='dict', default={}),
force=dict(type='bool', default=False),
appends=dict(type='bool', default=False, aliases=['incremental']),
ipam_driver=dict(type='str'),
ipam_driver_options=dict(type='dict'),
ipam_options=dict(type='dict', default={}, options=dict(
subnet=dict(type='str'),
iprange=dict(type='str'),
gateway=dict(type='str'),
aux_addresses=dict(type='dict'),
), removed_in_version='2.0.0', removed_from_collection='community.general'), # was Ansible 2.12
ipam_config=dict(type='list', elements='dict', options=dict(
subnet=dict(type='str'),
iprange=dict(type='str'),
gateway=dict(type='str'),
aux_addresses=dict(type='dict'),
)),
enable_ipv6=dict(type='bool'),
internal=dict(type='bool'),
labels=dict(type='dict', default={}),
debug=dict(type='bool', default=False),
scope=dict(type='str', choices=['local', 'global', 'swarm']),
attachable=dict(type='bool'),
)
mutually_exclusive = [
('ipam_config', 'ipam_options')
]
option_minimal_versions = dict(
scope=dict(docker_py_version='2.6.0', docker_api_version='1.30'),
attachable=dict(docker_py_version='2.0.0', docker_api_version='1.26'),
labels=dict(docker_api_version='1.23'),
ipam_driver_options=dict(docker_py_version='2.0.0'),
)
client = AnsibleDockerClient(
argument_spec=argument_spec,
mutually_exclusive=mutually_exclusive,
supports_check_mode=True,
min_docker_version='1.10.0',
min_docker_api_version='1.22',
# "The docker server >= 1.10.0"
option_minimal_versions=option_minimal_versions,
)
try:
cm = DockerNetworkManager(client)
client.module.exit_json(**cm.results)
except DockerException as e:
client.fail('An unexpected docker error occurred: {0}'.format(e), exception=traceback.format_exc())
except RequestException as e:
client.fail('An unexpected requests error occurred when docker-py tried to talk to the docker daemon: {0}'.format(e), exception=traceback.format_exc())
if __name__ == '__main__':
main()
|
#!/uwsr/bin/env python3
import asyncio
async def main():
print('hello')
await asyncio.sleep(1)
print('world')
if __name__=="__main__":
asyncio.run(main())
|
"""Translation constants."""
import pathlib
PROJECT_ID = "130246255a974bd3b5e8a1.51616605"
DOCKER_IMAGE = "b8329d20280263cad04f65b843e54b9e8e6909a348a678eac959550b5ef5c75f"
INTEGRATIONS_DIR = pathlib.Path("homeassistant/components")
|
import os
from flask import Flask, send_from_directory
app = Flask(__name__, static_folder='client/build')
@app.route('/', defaults={'path': ''})
@app.route('/<path:path>')
def serve(path):
if(path == ""):
return send_from_directory('client/build', 'index.html')
else:
if(os.path.exists("client/build/" + path)):
return send_from_directory('client/build', path)
else:
return send_from_directory('client/build', 'index.html')
if __name__ == '__main__':
app.run(use_reloader=True, port=3000, threaded=True)
|
# coding=utf-8
# *** WARNING: this file was generated by the Pulumi SDK Generator. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union
from ... import _utilities, _tables
from . import outputs
__all__ = [
'GetInterfaceEndpointResult',
'AwaitableGetInterfaceEndpointResult',
'get_interface_endpoint',
]
@pulumi.output_type
class GetInterfaceEndpointResult:
"""
Interface endpoint resource.
"""
def __init__(__self__, endpoint_service=None, etag=None, fqdn=None, location=None, name=None, network_interfaces=None, owner=None, provisioning_state=None, subnet=None, tags=None, type=None):
if endpoint_service and not isinstance(endpoint_service, dict):
raise TypeError("Expected argument 'endpoint_service' to be a dict")
pulumi.set(__self__, "endpoint_service", endpoint_service)
if etag and not isinstance(etag, str):
raise TypeError("Expected argument 'etag' to be a str")
pulumi.set(__self__, "etag", etag)
if fqdn and not isinstance(fqdn, str):
raise TypeError("Expected argument 'fqdn' to be a str")
pulumi.set(__self__, "fqdn", fqdn)
if location and not isinstance(location, str):
raise TypeError("Expected argument 'location' to be a str")
pulumi.set(__self__, "location", location)
if name and not isinstance(name, str):
raise TypeError("Expected argument 'name' to be a str")
pulumi.set(__self__, "name", name)
if network_interfaces and not isinstance(network_interfaces, list):
raise TypeError("Expected argument 'network_interfaces' to be a list")
pulumi.set(__self__, "network_interfaces", network_interfaces)
if owner and not isinstance(owner, str):
raise TypeError("Expected argument 'owner' to be a str")
pulumi.set(__self__, "owner", owner)
if provisioning_state and not isinstance(provisioning_state, str):
raise TypeError("Expected argument 'provisioning_state' to be a str")
pulumi.set(__self__, "provisioning_state", provisioning_state)
if subnet and not isinstance(subnet, dict):
raise TypeError("Expected argument 'subnet' to be a dict")
pulumi.set(__self__, "subnet", subnet)
if tags and not isinstance(tags, dict):
raise TypeError("Expected argument 'tags' to be a dict")
pulumi.set(__self__, "tags", tags)
if type and not isinstance(type, str):
raise TypeError("Expected argument 'type' to be a str")
pulumi.set(__self__, "type", type)
@property
@pulumi.getter(name="endpointService")
def endpoint_service(self) -> Optional['outputs.EndpointServiceResponse']:
"""
A reference to the service being brought into the virtual network.
"""
return pulumi.get(self, "endpoint_service")
@property
@pulumi.getter
def etag(self) -> Optional[str]:
"""
Gets a unique read-only string that changes whenever the resource is updated.
"""
return pulumi.get(self, "etag")
@property
@pulumi.getter
def fqdn(self) -> Optional[str]:
"""
A first-party service's FQDN that is mapped to the private IP allocated via this interface endpoint.
"""
return pulumi.get(self, "fqdn")
@property
@pulumi.getter
def location(self) -> Optional[str]:
"""
Resource location.
"""
return pulumi.get(self, "location")
@property
@pulumi.getter
def name(self) -> str:
"""
Resource name.
"""
return pulumi.get(self, "name")
@property
@pulumi.getter(name="networkInterfaces")
def network_interfaces(self) -> Sequence['outputs.NetworkInterfaceResponse']:
"""
Gets an array of references to the network interfaces created for this interface endpoint.
"""
return pulumi.get(self, "network_interfaces")
@property
@pulumi.getter
def owner(self) -> str:
"""
A read-only property that identifies who created this interface endpoint.
"""
return pulumi.get(self, "owner")
@property
@pulumi.getter(name="provisioningState")
def provisioning_state(self) -> str:
"""
The provisioning state of the interface endpoint. Possible values are: 'Updating', 'Deleting', and 'Failed'.
"""
return pulumi.get(self, "provisioning_state")
@property
@pulumi.getter
def subnet(self) -> Optional['outputs.SubnetResponse']:
"""
The ID of the subnet from which the private IP will be allocated.
"""
return pulumi.get(self, "subnet")
@property
@pulumi.getter
def tags(self) -> Optional[Mapping[str, str]]:
"""
Resource tags.
"""
return pulumi.get(self, "tags")
@property
@pulumi.getter
def type(self) -> str:
"""
Resource type.
"""
return pulumi.get(self, "type")
class AwaitableGetInterfaceEndpointResult(GetInterfaceEndpointResult):
# pylint: disable=using-constant-test
def __await__(self):
if False:
yield self
return GetInterfaceEndpointResult(
endpoint_service=self.endpoint_service,
etag=self.etag,
fqdn=self.fqdn,
location=self.location,
name=self.name,
network_interfaces=self.network_interfaces,
owner=self.owner,
provisioning_state=self.provisioning_state,
subnet=self.subnet,
tags=self.tags,
type=self.type)
def get_interface_endpoint(expand: Optional[str] = None,
interface_endpoint_name: Optional[str] = None,
resource_group_name: Optional[str] = None,
opts: Optional[pulumi.InvokeOptions] = None) -> AwaitableGetInterfaceEndpointResult:
"""
Use this data source to access information about an existing resource.
:param str expand: Expands referenced resources.
:param str interface_endpoint_name: The name of the interface endpoint.
:param str resource_group_name: The name of the resource group.
"""
__args__ = dict()
__args__['expand'] = expand
__args__['interfaceEndpointName'] = interface_endpoint_name
__args__['resourceGroupName'] = resource_group_name
if opts is None:
opts = pulumi.InvokeOptions()
if opts.version is None:
opts.version = _utilities.get_version()
__ret__ = pulumi.runtime.invoke('azure-nextgen:network/v20181001:getInterfaceEndpoint', __args__, opts=opts, typ=GetInterfaceEndpointResult).value
return AwaitableGetInterfaceEndpointResult(
endpoint_service=__ret__.endpoint_service,
etag=__ret__.etag,
fqdn=__ret__.fqdn,
location=__ret__.location,
name=__ret__.name,
network_interfaces=__ret__.network_interfaces,
owner=__ret__.owner,
provisioning_state=__ret__.provisioning_state,
subnet=__ret__.subnet,
tags=__ret__.tags,
type=__ret__.type)
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
from pyteaser import SummarizeUrl
from scipy import spatial
import re, math
from collections import Counter
#http://stackoverflow.com/questions/15173225/how-to-calculate-cosine-similarity-given-2-sentence-strings-python
WORD = re.compile(r'\w+')
def get_cosine(vec1, vec2):
intersection = set(vec1.keys()) & set(vec2.keys())
numerator = sum([vec1[x] * vec2[x] for x in intersection])
sum1 = sum([vec1[x]**2 for x in vec1.keys()])
sum2 = sum([vec2[x]**2 for x in vec2.keys()])
denominator = math.sqrt(sum1) * math.sqrt(sum2)
if not denominator:
return 0.0
else:
return float(numerator) / denominator
def text_to_vector(text):
words = WORD.findall(text)
return Counter(words)
url = 'http://www.svt.se/kultur/bjorn-granath-har-avlidit'
summaries = SummarizeUrl(url)
sums = " ".join(summaries)
print sums.replace('\n', '')
url2 = 'https://www.svd.se/bjorn-granath-ar-dod/om/kultur:scen'
summaries2 = SummarizeUrl(url2)
sums2 = " ".join(summaries2)
print sums2.replace('\n', '')
url3 = 'https://www.dn.se/kultur-noje/bjorn-granath-ar-dod/'
summaries3 = SummarizeUrl(url3)
sums3 = " ".join(summaries3)
print sums3.replace('\n', '')
vector1 = text_to_vector(sums)
vector2 = text_to_vector(sums2)
vector3 = text_to_vector(sums3)
print 'Cosine:', get_cosine(vector1, vector2)
print 'Cosine:', get_cosine(vector1, vector3)
print 'Cosine:', get_cosine(vector2, vector3)
#result = 1 - spatial.distance.cosine(sums, sums)
#print result
|
# Copyright 2013 Red Hat, Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from cinderclient.tests import utils
from cinderclient.tests.v2 import fakes
cs = fakes.FakeClient()
class SnapshotActionsTest(utils.TestCase):
def test_update_snapshot_status(self):
s = cs.volume_snapshots.get('1234')
cs.volume_snapshots.update_snapshot_status(s,
{'status': 'available'})
cs.assert_called('POST', '/snapshots/1234/action')
def test_update_snapshot_status_with_progress(self):
s = cs.volume_snapshots.get('1234')
cs.volume_snapshots.update_snapshot_status(s,
{'status': 'available',
'progress': '73%'})
cs.assert_called('POST', '/snapshots/1234/action')
|
# Copyright (C) 2020 THL A29 Limited, a Tencent company.
# All rights reserved.
# Licensed under the BSD 3-Clause License (the "License"); you may
# not use this file except in compliance with the License. You may
# obtain a copy of the License at
# https://opensource.org/licenses/BSD-3-Clause
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" basis,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied. See the License for the specific language governing
# permissions and limitations under the License.
# See the AUTHORS file for names of contributors.
import unittest
import torch
from transformers.modeling_bert import BertModel, BertConfig
import numpy
import turbo_transformers
import sys
import os
sys.path.append(os.path.dirname(__file__))
import test_helper
class TestBertModel(unittest.TestCase):
def init_data(self, use_cuda) -> None:
torch.set_grad_enabled(False)
torch.set_num_threads(4)
turbo_transformers.set_num_threads(4)
self.test_device = torch.device('cuda:0') if use_cuda else \
torch.device('cpu:0')
self.cfg = BertConfig()
self.torch_model = BertModel(self.cfg)
self.torch_model.eval()
if torch.cuda.is_available():
self.torch_model.to(self.test_device)
self.turbo_model = turbo_transformers.BertModel.from_torch(
self.torch_model, self.test_device, "turbo", use_memory_opt=True)
def check_torch_and_turbo(self,
use_cuda,
batch_size,
seq_len,
use_memory_opt=True):
self.init_data(use_cuda)
num_iter = 1
device_name = "GPU" if use_cuda else "CPU"
input_ids = torch.randint(low=0,
high=self.cfg.vocab_size - 1,
size=(batch_size, seq_len),
dtype=torch.long,
device=self.test_device)
torch_model = lambda: self.torch_model(input_ids)
torch_result, torch_qps, torch_time = \
test_helper.run_model(torch_model, use_cuda, num_iter)
print(f'BertModel PyTorch({device_name}) QPS {torch_qps}')
turbo_model = (lambda: self.turbo_model(input_ids))
if use_memory_opt:
turbo_transformers.bert_opt_mem_allocate_api(
input_ids.size()[0], # batch
input_ids.size()[1], # seq_len
self.cfg.num_attention_heads,
self.cfg.hidden_size,
self.cfg.num_hidden_layers,
"GPU" if 'cuda' in input_ids.device.type else "CPU")
with turbo_transformers.pref_guard("bert_perf") as perf:
turbo_result, turbo_qps, turbo_time = \
test_helper.run_model(turbo_model, use_cuda, num_iter)
print(f'BertModel TurboTransformer({device_name}) QPS {turbo_qps}')
# set the allocator back to naive, otherwise it will affect
# the other inference processes.
if use_memory_opt:
turbo_transformers.reset_allocator_schema("naive")
print(f"batch {batch_size} seq_len {seq_len}")
print(torch.max(torch_result[0].cpu() - turbo_result[0].cpu()))
self.assertTrue(
numpy.allclose(torch_result[0].cpu(),
turbo_result[0].cpu(),
atol=1e-2,
rtol=1e-3))
def test_bert_model_helper(self, use_memory_opt=False):
if use_memory_opt:
turbo_transformers.reset_allocator_schema("model-aware")
for batch_size in [1, 4, 20]:
for seq_len in [50, 4, 16]:
if torch.cuda.is_available() and \
turbo_transformers.config.is_compiled_with_cuda():
self.check_torch_and_turbo(use_cuda=True,
batch_size=batch_size,
seq_len=seq_len,
use_memory_opt=use_memory_opt)
self.check_torch_and_turbo(use_cuda=False,
batch_size=batch_size,
seq_len=seq_len,
use_memory_opt=use_memory_opt)
if use_memory_opt:
turbo_transformers.reset_allocator_schema("naive")
def test_bert_model(self, use_memory_opt=False):
self.test_bert_model_helper(True)
self.test_bert_model_helper(False)
if __name__ == '__main__':
unittest.main()
|
#!/usr/bin/env python
# Copyright (c) 2014, Palo Alto Networks
#
# Permission to use, copy, modify, and/or distribute this software for any
# purpose with or without fee is hereby granted, provided that the above
# copyright notice and this permission notice appear in all copies.
#
# THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
# WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
# MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
# ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
# WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
# ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
# OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
# Author: David Spears <dspears@paloaltonetworks.com>
"""
panorama-commit-push.py
==========
This script performs a panorama commit and will push to devices in a specific device group
that is by default an argument passed in at exectution.
**Usage**::
upgrade.py [-h] [-v] [-q] [-n] hostname username password devicegroup
**Examples**:
Commit to a Panorama at 13.129.150.75 that has a modified devicegroup named GWLB:
$ python panorama-commit-push.py 13.129.150.75 username password GWLB
Instructions for installing the PAN-OS-SDK are located here:
https://pandevice.readthedocs.io/en/latest/getting-started.html
"""
__author__ = "djspears"
import argparse
from panos import panorama
def main():
# Get command line arguments
parser = argparse.ArgumentParser(
description="Commit and Push an updated Panorama device group configuration"
)
parser.add_argument(
"-v", "--verbose", action="count", help="Verbose (-vv for extra verbose)"
)
parser.add_argument("-q", "--quiet", action="store_true", help="No output")
# Palo Alto Networks related arguments
fw_group = parser.add_argument_group("Palo Alto Networks Device")
fw_group.add_argument("hostname", help="Hostname of Panorama")
fw_group.add_argument("username", help="Username for Panorama")
fw_group.add_argument("password", help="Password for Panorama")
fw_group.add_argument("devicegroup", help="DeviceGroup for Panorama")
args = parser.parse_args()
# Connects to Panorama.
pano = panorama.Panorama(args.hostname, args.username, args.password,) # Create a panorama object
# Performs the commit and device group push
print("Performing commit...")
pano.commit(sync_all=True,sync=True)
print("Done")
print("Performing device push...")
pano.commit_all(sync=True,sync_all=True,cmd="<commit-all><shared-policy><device-group><entry name='%s'/></device-group></shared-policy></commit-all>"%(args.devicegroup))
print("Done")
# Call the main() function to begin the program if not
# loaded as a module.
if __name__ == "__main__":
main()
|
import random
import string
from django.conf import settings
SHORTCODE_MIN = getattr(settings, "SHORTCODE_MIN", 5)
def code_generator(size=SHORTCODE_MIN, chars=string.ascii_lowercase + string.digits + string.ascii_uppercase):
return ''.join(random.choice(chars) for _ in range(size))
def create_shortcode(instance, size=SHORTCODE_MIN):
new_code = code_generator(size=size)
Klass = instance.__class__
qs_exists = Klass.objects.filter(shortcode=new_code).exists()
if qs_exists:
return code_generator(size=size)
return new_code
|
preprocess_output = r"D:\Codes\Wos_IE\result\content_dic.json"
abbreviate_dictionary_output = r"D:\Codes\Wos_IE\result\abbreviate_words.json"
|
# coding: utf-8
from itertools import combinations
import matplotlib.pyplot as plt
from sklearn.datasets import load_iris
from sklearn.cross_validation import KFold
data = load_iris()
for k, v in data.items():
print k
print v, '\n'
featureNames = data['feature_names']
features = data['data']
targetNames = data['target_names']
targets = data['target']
plt.figure(figsize=(15, 10))
styles = {0: 'r>', 1: 'go', 2: 'bx'}
for f1, f2 in combinations(range(len(featureNames)), 2):
plt.subplot(230+f2 if f1==0 else 231+f1+f2)
plt.grid()
plt.xlabel(featureNames[f1])
plt.ylabel(featureNames[f2])
for t in range(len(targetNames)):
plt.scatter(features[targets==t, f1], features[targets==t, f2], marker=styles[t][1], c=styles[t][0])
# plt.show()
labels = targetNames[targets]
plen = features[:, 2]
is_setosa = (labels == 'setosa')
print plen[is_setosa].max()
print plen[~is_setosa].min()
def is_setosa_test(features):
return features[2] < 2.5
x0 = features[:, 2].min() * .8
x1 = features[:, 2].max() * 1.2
y0 = features[:, 3].min() * .8
y1 = features[:, 3].max() * 1.2
plt.figure()
plt.grid()
plt.xlabel(featureNames[2])
plt.ylabel(featureNames[3])
plt.fill_between([x0, 2.5], [y0, y0], [y1, y1], color=(1, .9, .9))
plt.fill_between([2.5, x1], [y0, y0], [y1, y1], color=(.9, .9, 1))
plt.plot([2.5, 2.5], [y0, y1], 'k--', lw=3)
for t in range(len(targetNames)):
plt.scatter(features[targets==t, 2], features[targets==t, 3], marker=styles[t][1], c=styles[t][0])
plt.xlim(x0, x1)
plt.ylim(y0, y1)
# plt.show()
features = features[~is_setosa]
labels = labels[~is_setosa]
is_virginica = (labels == 'virginica')
def fit_model(features, labels):
bestThres = 0
bestAcc = -1
bestF = 0
rev = False
for f in range(features.shape[1]):
for t in features[:, f]:
pred = (features[:, f] > t)
acc = (pred == labels).mean()
if acc > bestAcc or 1 - acc > bestAcc:
bestThres = t
bestAcc = max(acc, 1 - acc)
bestF = f
rev = bestAcc == 1 - acc
return bestThres, bestF, rev
model = fit_model(features, is_virginica)
print model[0], model[1], featureNames[model[2]]
x0 = features[:, 2].min() * .8
x1 = features[:, 2].max() * 1.2
y0 = features[:, 3].min() * .8
y1 = features[:, 3].max() * 1.2
targets = targets[~is_setosa]
plt.figure()
plt.grid()
plt.xlabel(featureNames[2])
plt.ylabel(featureNames[3])
plt.fill_between([x0, x1], [1.6, 1.6], [y0, y0], color=(1, .9, .9))
plt.fill_between([x0, x1], [1.6, 1.6], [y1, y1], color=(.9, .9, 1))
plt.plot([x0, x1], [1.6, 1.6], 'k--', lw=3)
for t in range(len(targetNames)):
plt.scatter(features[targets==t, 2], features[targets==t, 3], marker=styles[t][1], c=styles[t][0])
plt.xlim(x0, x1)
plt.ylim(y0, y1)
# plt.show()
def predict(model, features):
t, f, rev = model
return features[:, f] > t if not rev else features[:, f] <= t
def accuracy(features, labels, model):
return (predict(model, features) == labels).mean()
for train, test in KFold(labels.shape[0], 5, True):
model = fit_model(features[train], is_virginica[train])
print 'train acc:', accuracy(features[train], is_virginica[train], model),
print 'test acc:', accuracy(features[test], is_virginica[test], model)
|
"""
Copyright (c) 2018-2020 Intel Corporation
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
from PIL import Image
import numpy as np
from .format_converter import BaseFormatConverter, ConverterReturn
from ..config import PathField, StringField, BoolField
from ..representation import ClassificationAnnotation
class ClutteredMNISTConverter(BaseFormatConverter):
__provider__ = 'cluttered_mnist'
@classmethod
def parameters(cls):
params = super().parameters()
params.update({
'data_file': PathField(),
'split': StringField(optional=True, default='test', choices=['train', 'valid', 'test']),
'convert_images': BoolField(optional=True, default=True),
'images_dir': PathField(is_directory=True, optional=True)
})
return params
def configure(self):
self.data_file = self.get_value_from_config('data_file')
self.split = self.get_value_from_config('split')
self.convert_images = self.get_value_from_config('convert_images')
self.images_dir = self.get_value_from_config('images_dir') or self.data_file.parent / 'converted_images'
if self.convert_images and not self.images_dir.exists():
self.images_dir.mkdir(parents=True)
def convert(self, check_content=False, progress_callback=None, progress_interval=100, **kwargs):
data = np.load(str(self.data_file))
x_values = data['x_{}'.format(self.split)]
y_values = data['y_{}'.format(self.split)]
annotations = []
for idx, y in enumerate(y_values):
identifier = '{}_{}.png'.format(self.split, idx)
y_label = np.argmax(y)
if self.convert_images:
x = x_values[idx].reshape((60, 60)) * 255
image = Image.fromarray(x)
image = image.convert("L")
image.save(str(self.images_dir / identifier))
annotations.append(ClassificationAnnotation(identifier, y_label))
return ConverterReturn(annotations, None, None)
|
"""empty message
Revision ID: ee248674f637
Revises: ebf728dc4d0d
Create Date: 2017-05-31 15:07:32.715000
"""
# revision identifiers, used by Alembic.
revision = 'ee248674f637'
down_revision = 'ebf728dc4d0d'
from alembic import op
import sqlalchemy as sa
def upgrade():
# ### commands auto generated by Alembic - please adjust! ###
pass
# ### end Alembic commands ###
def downgrade():
# ### commands auto generated by Alembic - please adjust! ###
pass
# ### end Alembic commands ###
|
# ----------------------------------------------------------------------------
# Copyright (c) 2016-2020, empress development team.
#
# Distributed under the terms of the Modified BSD License.
#
# The full license is in the file LICENSE, distributed with this software.
# ----------------------------------------------------------------------------
import warnings
from skbio import TreeNode
import numpy as np
from bp import BP, from_skbio_treenode
class TreeFormatWarning(Warning):
pass
class Tree:
"""
Attributes
----------
length
leafcount
height
depth
Notes
-----
`length` refers to the branch length of a node to its parent.
`leafcount` is the number of tips within a subtree. `height` refers
to the longest path from root to the deepst leaf in that subtree.
`depth` is the number of nodes found in the longest path.
"""
def __init__(self, bp_tree):
""" Constructs a Dendrogram object for visualization.
Parameters
----------
bp_tree: bp.BP
BP tree object
Returns
-------
"""
self.bp_tree = bp_tree
self.B = self.bp_tree.B
self.leafcounts = np.zeros(self.B.size, np.int)
self.depths = np.zeros(self.B.size, np.double)
self.heights = np.zeros(self.B.size, np.double)
self.yr = np.zeros(self.B.size, np.double)
self.xr = np.zeros(self.B.size, np.double)
self.highest_child_yr = np.zeros(self.B.size, np.float)
self.lowest_child_yr = np.zeros(self.B.size, np.float)
self.clangle = np.zeros(self.B.size, np.double)
self.clradius = np.zeros(self.B.size, np.double)
self.xc0 = np.zeros(self.B.size, np.double)
self.yc0 = np.zeros(self.B.size, np.double)
self.xc1 = np.zeros(self.B.size, np.double)
self.yc1 = np.zeros(self.B.size, np.double)
self.highest_child_clangle = np.zeros(self.B.size, np.float)
self.lowest_child_clangle = np.zeros(self.B.size, np.float)
self.arcx0 = np.zeros(self.B.size, np.double)
self.arcy0 = np.zeros(self.B.size, np.double)
self.arcx1 = np.zeros(self.B.size, np.double)
self.arcy1 = np.zeros(self.B.size, np.double)
self.x1 = np.zeros(self.B.size, np.double)
self.y1 = np.zeros(self.B.size, np.double)
self.x2 = np.zeros(self.B.size, np.double)
self.y2 = np.zeros(self.B.size, np.double)
self.angle = np.zeros(self.B.size, np.double)
self.childRem = -1
@classmethod
def from_tree(cls, tree, use_lengths=True):
""" Creates an Tree object from a skbio tree.
Parameters
----------
tree : skbio.TreeNode
Input skbio tree
use_lengths: Boolean
Specify if the branch length should be incorporated into
the geometry calculations for visualization.
Returns
-------
Tree: bp.BP
"""
bp_tree = from_skbio_treenode(tree)
if sum(bp_tree.B) <= 1:
raise ValueError("Tree must contain at least 2 nodes.")
# While traversing the tree, record tip / internal node names
# (Nodes without names are ignored, since we'll assign those later
# using tools.fill_missing_node_names())
tip_names = []
internal_node_names = []
max_branch_length = 0
for i in range(sum(bp_tree.B)):
node_idx = bp_tree.postorderselect(i)
name = bp_tree.name(node_idx)
length = bp_tree.length(node_idx)
if name is not None:
# NOTE: This should eventually be taken out when
# fill_missing_node_names() is refactored. However, for now,
# this makes sure that users can't accidentally break things by
# naming nodes identical to our default names for missing nodes
if name.startswith("EmpressNode"):
raise ValueError(
'Node names can\'t start with "EmpressNode".'
)
if isleaf(bp_tree, node_idx):
tip_names.append(name)
else:
internal_node_names.append(name)
if length is None:
raise ValueError(
"Non-root branches of the tree must have lengths."
)
if length < 0:
raise ValueError(
"Non-root branches of the tree must have nonnegative "
"lengths."
)
max_branch_length = max(length, max_branch_length)
# We didn't consider the root node in the above traversal since we
# don't care about its length. However, we do care about its name,
# so we add the root's name to internal_node_names.
if max_branch_length == 0:
raise ValueError(
"At least one non-root branch of the tree must have a "
"positive length."
)
unique_tip_name_set = set(tip_names)
if len(unique_tip_name_set) != len(tip_names):
raise ValueError("Tip names in the tree must be unique.")
unique_internal_node_name_set = set(internal_node_names)
if len(unique_tip_name_set & unique_internal_node_name_set) > 0:
raise ValueError(
"Tip names in the tree cannot overlap with internal node "
"names."
)
if len(unique_internal_node_name_set) != len(internal_node_names):
warnings.warn(
"Internal node names in the tree are not unique.",
TreeFormatWarning
)
bp_tree = Tree(bp_tree)
bp_tree.update_geometry(use_lengths)
return bp_tree
def postorder(self, include_self=True):
e = sum(self.B) if include_self else sum(self.B) - 1
for i in range(e):
node_idx = self.bp_tree.postorderselect(i)
yield node_idx
def preorder(self, include_self=True):
s = 0 if include_self else 1
for i in range(s, sum(self.B)):
node_idx = self.bp_tree.preorderselect(i)
yield node_idx
def bp_tree_tips(self):
""" Extracts tip names in the tree, ignoring unnamed tips.
Parameters
----------
bp_tree : bp.BP
Input BP tree
Returns
-------
tips : list of strings
list of tip names in the tree
"""
tips = []
# Iterate through all open and closing parentheses and extract tip names
for i in range(self.B.size):
pos_name = self.bp_tree.name(i)
# Check if this is a leaf node with a label
if self.isleaf(i) and (pos_name is not None):
tips.append(pos_name)
return tips
def bp_tree_non_tips(self):
""" Extracts internal node names in the tree, ignoring unnamed nodes.
Parameters
----------
bp_tree : bp.BP
Input BP tree
Returns
-------
non_tips : list of strings
list of internal node names in the tree
"""
non_tips = []
for i in range(self.B.size):
pos_name = self.bp_tree.name(i)
# Check if this is an opening parenthesis, is not a leaf, and
# has a node label
if self.B[i] and not self.isleaf(i) and pos_name is not None:
non_tips.append(pos_name)
return non_tips
def update_geometry(self, use_lengths, depth=None):
"""Calculate tree node attributes such as height and depth.
Parameters
----------
use_lengths: bool
Specify if the branch length should be incorporated into
the geometry calculations for visualization.
depth: int
The number of nodes in the longest path from root to leaf.
This is agnostic to scale and orientation.
"""
new_heights = np.zeros(self.B.size, dtype=np.double)
new_leaf_count = np.zeros(self.B.size, dtype=np.int)
new_depths = np.zeros(self.B.size, dtype=np.double)
for node_idx in self.postorder():
length = self.bp_tree.length(node_idx)
if length is None or not use_lengths:
if not use_lengths:
if self.isleaf(node_idx):
length = 5
else:
length = 1
else:
length = 0
new_depths[node_idx] = (depth or 0) + length
if self.isleaf(node_idx):
new_heights[node_idx] = length
new_leaf_count[node_idx] = 1
else:
idx = self.bp_tree.fchild(node_idx)
height = 0
leafcount = 0
while idx:
height = max(height, new_heights[idx])
leafcount += new_leaf_count[idx]
idx = self.bp_tree.nsibling(idx)
height += length
new_heights[node_idx] = height
new_leaf_count[node_idx] = leafcount
self.leafcounts = new_leaf_count
self.heights = new_heights
self.depths = new_depths
def coords(self, height, width):
""" Computes the coordinates of nodes to be rendered in plot.
This runs multiple layout algorithms and saves all of the resulting
coordinates for each node, so that layout algorithms can be rapidly
toggled between in the JS interface.
Also adds on .highest_child_yr and .lowest_child_yr attributes to
internal nodes so that vertical bars for these nodes can be drawn in
the rectangular layout.
Parameters
----------
height : int
The height of the canvas.
width : int
The width of the canvas.
Returns
-------
dict:
Mapping between layout and the coordinate suffix.
str:
Name of the default layout.
"""
layout_to_coordsuffix = {}
layout_algs = (
self.layout_unrooted,
self.layout_rectangular,
self.layout_circular,
)
# We set the default layout to whatever the first layout in
# layout_algs is, but this behavior is of course modifiable
default_layout = None
for alg in layout_algs:
name, suffix = alg(width, height)
layout_to_coordsuffix[name] = suffix
self.alter_coordinates_relative_to_root(suffix)
if name == "Circular":
self.alter_coordinates_relative_to_root("c0")
if default_layout is None:
default_layout = name
# Determine highest and lowest child y-position for internal nodes in
# the rectangular layout; used to draw vertical lines for these nodes.
#
# NOTE / TODO: This will have the effect of drawing vertical lines even
# for nodes with only 1 child -- in this case lowest_child_yr ==
# highest_child_yr for this node, so all of the stuff drawn in WebGL
# for this vertical line shouldn't show up. I don't think this should
# cause any problems, but it may be worth detecting these cases and not
# drawing vertical lines for them in the future.
for node_idx in self.preorder():
if not self.isleaf(node_idx):
# wow, child does not look like a word any more
self.highest_child_yr[node_idx] = float("-inf")
self.lowest_child_yr[node_idx] = float("inf")
for c_idx in self.children(node_idx):
if self.yr[c_idx] > self.highest_child_yr[node_idx]:
self.highest_child_yr[node_idx] = self.yr[c_idx]
if self.yr[c_idx] < self.lowest_child_yr[node_idx]:
self.lowest_child_yr[node_idx] = self.yr[c_idx]
return layout_to_coordsuffix, default_layout
def alter_coordinates_relative_to_root(self, suffix):
""" Subtracts the root node's x- and y- coords from all nodes' coords.
This was previously done within coords(), but I moved it here so that
this logic can be used after arbitrary layout computations.
Parameters
----------
suffix : str
The suffix of the x- and y-coordinates to adjust.
For example, this is "2" for the unrooted layout since coordinates
are stored in the x2 and y2 attributes for every node; and it's "r"
for the rectangular layout since the coordinate attributes are now
xr and yr.
"""
xname = "x" + suffix
yname = "y" + suffix
centersX = getattr(self, xname)
centersY = getattr(self, yname)
centerX = centersX[0]
centerY = centersY[0]
for node_idx in self.postorder():
# This code might look sort of intimidating, but it's really just
# another way to write out:
# node.x2 = node.x2 - centerX
# node.y2 = node.y2 - centerY
# ...when we don't know what "x2" or "y2" will be named beforehand.
centersX[node_idx] = centersX[node_idx] - centerX
centersY[node_idx] = centersY[node_idx] - centerY
setattr(self, xname, centersX)
setattr(self, yname, centersY)
def isleaf(self, i):
""" Checks if node at position i belongs to a leaf node or not
Parameters
----------
bp_tree : bp.BP
Input BP tree
i : int
The query node index
Returns
-------
bool
True if this is a leaf node, False otherwise
"""
return self.B[i] and (not self.B[i + 1])
def children(self, i):
children = []
child = self.bp_tree.fchild(i)
while child > 0:
children.append(child)
child = self.bp_tree.nsibling(child)
return children
def layout_rectangular(self, width, height):
""" Rectangular layout.
In this sort of layout, each tip has a distinct y-position, and parent
y-positions are centered over their descendant tips' positions.
x-positions are computed based on nodes' branch lengths.
Following this algorithm, nodes' rectangular layout coordinates are
accessible at [node].xr and [node].yr.
For a simple tree, this layout should look something like:
__
___|
___| |__
| |___
| ___
|___|
|___
Parameters
----------
width : float
width of the canvas
height : float
height of the canvas
References
----------
https://rachel53461.wordpress.com/2014/04/20/algorithm-for-drawing-trees/
Clear explanation of Reingold-Tilford that I used a lot
https://github.com/qiime/Topiary-Explorer/blob/master/src/topiaryexplorer/TreeVis.java
Derived from the "Rectangular" layout algorithm code.
"""
# NOTE: This doesn't draw a horizontal line leading to the root "node"
# of the graph. See https://github.com/biocore/empress/issues/141 for
# context.
max_width = 0
max_height = 0
prev_y = 0
for node_idx in self.postorder():
if self.isleaf(node_idx):
self.yr[node_idx] = prev_y
prev_y += 1
if self.yr[node_idx] > max_height:
max_height = self.yr[node_idx]
else:
# Center internal nodes above their children
# We could also center them above their tips, but (IMO) this
# looks better ;)
children = self.children(node_idx)
self.yr[node_idx] = sum([self.yr[c_idx] for
c_idx in children]) / len(children)
for node_idx in self.preorder(include_self=False):
self.xr[node_idx] = self.xr[self.bp_tree.parent(node_idx)] + \
self.bp_tree.length(node_idx)
if self.xr[node_idx] > max_width:
max_width = self.xr[node_idx]
# We don't check if max_width == 0 here, because we check when
# constructing an Empress tree that it has at least one positive
# branch length and no negative branch lengths. (And if this is the
# case, then max_width must be > 0.)
x_scaling_factor = width / max_width
if max_height > 0:
# Having a max_height of 0 could actually happen, in the funky case
# where the entire tree is a straight line (e.g. A -> B -> C). In
# this case our "rectangular layout" drawing places all nodes on
# the same y-coordinate (0), resulting in max_height = 0.
# ... So, that's why we only do y-scaling if this *isn't* the case.
y_scaling_factor = height / max_height
else:
# Since this will be multiplied by 0 for every node, we can set
# this to any real number and get the intended "effect" of keeping
# every node's y-coordinate at 0.
y_scaling_factor = 1
for node_idx in self.preorder():
self.xr[node_idx] *= x_scaling_factor
self.yr[node_idx] *= y_scaling_factor
# Now we have the layout! In the JS we'll need to draw each internal
# node as a vertical line ranging from its lowest child y-position to
# its highest child y-position, and then draw horizontal lines from
# this line to all of its child nodes (where the length of the
# horizontal line is proportional to the node length in question).
return "Rectangular", "r"
def layout_circular(self, width, height):
""" Circular layout version of the rectangular layout.
Works analogously to the rectangular layout:
-Each tip is assigned a unique angle from the "center"/root of
the tree (out of the range [0, 2pi] in radians), and internal
nodes are set to an angle equal to the average of their
children's. This mirrors the assignment of y-coordinates for
the rectangular layout.
-All nodes are then assigned a radius equal to the sum of their
branch lengths descending from the root (but not including
the root's branch length, if provided -- the root is represented
as just a single point in the center of the layout). This mirrors
the assignment of x-coordinates for the rectangular layout.
-Lastly, we'll draw arcs for every internal node (except for the
root) connecting the "start points" of the child nodes of that
node with the minimum and maximum angle. (These points should
occur at the radius equal to the "end" of the given internal
node.)
We don't draw this arc for the root node because we don't draw
the root the same way we do the other nodes in the tree:
the root is represented as just a single point at the center
of the layout. Due to this, there isn't a way to draw an arc
from the root, since the root's "end" is at the same point as
its beginning (so the arc wouldn't be visible).
Following this algorithm, nodes' circular layout coordinates are
accessible at [node].xc and [node].yc. Angles will also be available
at [node].clangle, and radii will be available at [node].clradius; and
for non-root internal nodes, arc start and end coordinates will be
available at [node].arcx0, [node].arcy0, [node].arcx1, & [node].arcy1.
Parameters
----------
width : float
width of the canvas
height : float
height of the canvas
References
----------
https://github.com/qiime/Topiary-Explorer/blob/master/src/topiaryexplorer/TreeVis.java
Description above + the implementation of this algorithm
derived from the Polar layout algorithm code.
"""
anglepernode = (2 * np.pi) / self.leafcounts[0]
prev_clangle = 0
for node_idx in self.postorder():
if self.isleaf(node_idx):
self.clangle[node_idx] = prev_clangle
prev_clangle += anglepernode
else:
# Center internal nodes at an angle above their children
children = self.children(node_idx)
child_clangle_sum = sum([self.clangle[c_idx] for c_idx
in children])
self.clangle[node_idx] = child_clangle_sum / len(children)
max_clradius = 0
for node_idx in self.preorder(include_self=False):
self.clradius[node_idx] = self.clradius[self.bp_tree.parent(node_idx)] + \
self.bp_tree.length(node_idx)
if self.clradius[node_idx] > max_clradius:
max_clradius = self.clradius[node_idx]
# Now that we have the polar coordinates of the nodes, convert these
# coordinates to normal x/y coordinates.
# NOTE that non-root nodes will actually have two x/y coordinates we
# need to keep track of: one for the "end" of the node's line, and
# another for the "start" of the node's line. The latter of these is
# needed because the node's line begins at the parent node's radius but
# the child node's angle, if that makes sense -- and since converting
# from polar to x/y and back is annoying, it's easiest to just compute
# this in python.
max_x = max_y = float("-inf")
min_x = min_y = float("inf")
for node_idx in self.postorder():
self.xc1[node_idx] = self.clradius[node_idx] * \
np.cos(self.clangle[node_idx])
self.yc1[node_idx] = self.clradius[node_idx] * \
np.sin(self.clangle[node_idx])
if self.isleaf(node_idx):
# NOTE that the root has a clradius of 0 (since it's just
# represented as a point at the center of the layout). We don't
# even bother drawing the root in the Empress JS code, but for
# the purposes of alter_coordinates_relative_to_root() we need
# to explicitly position the root at (0, 0).
self.xc0[node_idx] = 0
self.yc0[node_idx] = 0
else:
self.xc0[node_idx] = self.clradius[
self.bp_tree.parent(node_idx)] *\
np.cos(self.clangle[node_idx])
self.yc0[node_idx] = self.clradius[
self.bp_tree.parent(node_idx)] *\
np.sin(self.clangle[node_idx])
# NOTE: We don't bother testing the xc0 / yc0 coordinates as
# "extrema" because they should always be further "within" the
# tree than the xc1 / yc1 coordinates.
# TODO: verify that the "tree is a line" case doesn't mess this up.
if self.xc1[node_idx] > max_x:
max_x = self.xc1[node_idx]
if self.yc1[node_idx] > max_y:
max_y = self.yc1[node_idx]
if self.xc1[node_idx] < min_x:
min_x = self.xc1[node_idx]
if self.yc1[node_idx] < min_y:
min_y = self.yc1[node_idx]
# TODO: raise error if the maximum and minimum are same for x or y.
# may happen if the tree is a straight line.
# set scaling factors
# normalize the coordinate based on the largest dimension
width_scale = width / (max_x - min_x)
height_scale = height / (max_y - min_y)
scale_factor = width_scale if width_scale > height_scale else \
height_scale
x_scaling_factor = scale_factor
y_scaling_factor = scale_factor
for node_idx in self.preorder():
self.xc0[node_idx] *= x_scaling_factor
self.yc0[node_idx] *= y_scaling_factor
self.xc1[node_idx] *= x_scaling_factor
self.yc1[node_idx] *= y_scaling_factor
if not self.isleaf(node_idx) and (node_idx != 0):
self.highest_child_clangle[node_idx] = float("-inf")
self.lowest_child_clangle[node_idx] = float("inf")
for c_idx in self.children(node_idx):
if self.clangle[c_idx] >\
self.highest_child_clangle[node_idx]:
self.highest_child_clangle[node_idx] =\
self.clangle[c_idx]
if self.clangle[c_idx] < \
self.lowest_child_clangle[node_idx]:
self.lowest_child_clangle[node_idx] =\
self.clangle[c_idx]
# Figure out "arc" endpoints for the circular layout
# NOTE: As with the "vertical lines" for internal nodes in the
# rectangular layout, these arcs will be drawn for nodes with
# only one child. Here, this case would mean that the
# highest_child_clangle would equal the lowest_child_clangle,
# so arcx0 would equal arcx1 and arcy0 would equal arcy1. So
# nothing should show up (but it may be worth addressing this
# in the future).
self.arcx0[node_idx] = self.clradius[node_idx] * \
np.cos(
self.highest_child_clangle[node_idx])
self.arcy0[node_idx] = self.clradius[node_idx] * \
np.sin(
self.highest_child_clangle[node_idx])
self.arcx1[node_idx] = self.clradius[node_idx] * \
np.cos(
self.lowest_child_clangle[node_idx])
self.arcy1[node_idx] = self.clradius[node_idx] * \
np.sin(
self.lowest_child_clangle[node_idx])
self.arcx0[node_idx] *= x_scaling_factor
self.arcy0[node_idx] *= y_scaling_factor
self.arcx1[node_idx] *= x_scaling_factor
self.arcy1[node_idx] *= y_scaling_factor
return "Circular", "c1"
def layout_unrooted(self, width, height):
""" Find best scaling factor for fitting the tree in the figure.
This method will find the best orientation and scaling possible to
fit the tree within the dimensions specified by width and height, using
an unrooted layout algorithm.
Following this algorithm, nodes' unrooted layout coordinates are
accessible at [node].x2 and [node].y2.
Parameters
----------
width : float
width of the canvas
height : float
height of the canvas
Returns
-------
best_scaling : float
largest scaling factor in which the tree can fit in the canvas.
Notes
-----
"""
# Recall that 360 degrees is equal to (2 * pi) radians.
# You can think of this variable as "the maximum angle we can 'give' to
# each leaf of the tree".
angle = (2 * np.pi) / self.leafcounts[0]
best_scale = 0
for i in range(60):
direction = i / 60.0 * np.pi
(max_x, min_x, max_y, min_y) = self.update_unrooted_coords(
1.0, 0, 0, direction, angle)
x_diff = max_x - min_x
width_min = 0
if x_diff != 0:
width_min = float(width) / x_diff
y_diff = max_y - min_y
height_min = 0
if y_diff != 0:
height_min = float(height) / y_diff
scale = min(width_min, height_min)
scale *= 0.95 # extra margin for labels
if scale >= best_scale:
best_scale = scale
mid_x = width / 2 - ((max_x + min_x) / 2) * scale
mid_y = height / 2 - ((max_y + min_y) / 2) * scale
best_args = (scale, mid_x, mid_y, direction, angle)
self.update_unrooted_coords(*best_args)
return "Unrooted", "2"
def update_unrooted_coords(self, s, x1, y1, a, da):
""" Update x, y coordinates of tree nodes in canvas.
This function will update the x1, y1, x2, y2, and angle attributes
for all of the nodes within the tree. Note that (once the unrooted
layout has finished) all that is really used are the x2 and y2
attributes.
In a server-based version of Empress, this could be applied when
the tree becomes modified (i.e. pruning or collapsing) and the
resulting coordinates would be modified to reflect the changes
to the tree structure. (In practice, we just run this once on the
Python side of things in order to precompute the layout.)
Parameters
----------
s : float
scaling
x1 : float
x midpoint
y1 : float
y midpoint
a : float
angle (degrees)
da : float
angle resolution (degrees)
Returns
-------
points : list of tuple
2D coordinates of all of the nodes.
"""
max_x = float('-inf')
min_x = float('inf')
max_y = float('-inf')
min_y = float('inf')
# calculates self coords/angle
# Constant angle algorithm. Should add maximum daylight step.
x2 = x1 + self.bp_tree.length(0) * s * np.sin(a)
y2 = y1 + self.bp_tree.length(0) * s * np.cos(a)
(self.x1[0], self.y1[0], self.x2[0], self.y2[0], self.angle[0]) = \
(x1, y1, x2, y2, a)
node_indices = [node_idx for node_idx in
self.postorder(include_self=False)]
node_indices.reverse()
# for node in self.preorder(include_self=False):
for node_idx in node_indices:
x1 = self.x2[self.bp_tree.parent(node_idx)]
y1 = self.y2[self.bp_tree.parent(node_idx)]
# init a
a = self.angle[self.bp_tree.parent(node_idx)]
# same modify across nodes
a = a - self.leafcounts[self.bp_tree.parent(node_idx)] * da / 2
# check for conditional higher order
for sib_idx in self.children(self.bp_tree.parent(node_idx)):
if sib_idx != node_idx:
a += self.leafcounts[sib_idx] * da
else:
a += (self.leafcounts[node_idx] * da) / 2
break
# Constant angle algorithm. Should add maximum daylight step.
x2 = x1 + self.bp_tree.length(node_idx) * s * np.sin(a)
y2 = y1 + self.bp_tree.length(node_idx) * s * np.cos(a)
(self.x1[node_idx], self.y1[node_idx], self.x2[node_idx],
self.y2[node_idx], self.angle[node_idx]) = (x1, y1, x2, y2, a)
max_x, min_x = max(max_x, x2), min(min_x, x2)
max_y, min_y = max(max_y, y2), min(min_y, y2)
return (max_x, min_x, max_y, min_y)
def isleaf(bp_tree, i):
""" Checks if node at position i belongs to a leaf node or not
Parameters
----------
bp_tree : bp.BP
Input BP tree
i : int
The query node index
Returns
-------
bool
True if this is a leaf node, False otherwise
"""
return bp_tree.B[i] and (not bp_tree.B[i + 1])
|
# Copyright 2017 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Utility functions for building models."""
from __future__ import print_function
import collections
import os
import time
import numpy as np
import six
import tensorflow as tf
from tensorflow.python.ops import lookup_ops
from .utils import iterator_utils
from .utils import misc_utils as utils
from .utils import vocab_utils
__all__ = [
"get_initializer", "get_device_str", "create_train_model",
"create_eval_model", "create_infer_model",
"create_emb_for_encoder_and_decoder", "create_rnn_cell", "gradient_clip",
"create_or_load_model", "load_model", "avg_checkpoints",
"compute_perplexity"
]
# If a vocab size is greater than this value, put the embedding on cpu instead
VOCAB_SIZE_THRESHOLD_CPU = 50000
def get_initializer(init_op, seed=None, init_weight=None):
"""Create an initializer. init_weight is only for uniform."""
if init_op == "uniform":
assert init_weight
return tf.random_uniform_initializer(
-init_weight, init_weight, seed=seed)
elif init_op == "glorot_normal":
return tf.keras.initializers.glorot_normal(
seed=seed)
elif init_op == "glorot_uniform":
return tf.keras.initializers.glorot_uniform(
seed=seed)
else:
raise ValueError("Unknown init_op %s" % init_op)
def get_device_str(device_id, num_gpus):
"""Return a device string for multi-GPU setup."""
if num_gpus == 0:
return "/cpu:0"
device_str_output = "/gpu:%d" % (device_id % num_gpus)
return device_str_output
class ExtraArgs(collections.namedtuple(
"ExtraArgs", ("single_cell_fn", "model_device_fn",
"attention_mechanism_fn", "encoder_emb_lookup_fn"))):
pass
class TrainModel(
collections.namedtuple("TrainModel", ("graph", "model", "iterator",
"skip_count_placeholder"))):
pass
def create_train_model(
model_creator, hparams, scope=None, num_workers=1, jobid=0,
extra_args=None):
"""Create train graph, model, and iterator."""
src_file = "%s.%s" % (hparams.train_prefix, hparams.src)
tgt_file = "%s.%s" % (hparams.train_prefix, hparams.tgt)
src_vocab_file = hparams.src_vocab_file
tgt_vocab_file = hparams.tgt_vocab_file
graph = tf.Graph()
with graph.as_default(), tf.container(scope or "train"):
src_vocab_table, tgt_vocab_table = vocab_utils.create_vocab_tables(
src_vocab_file, tgt_vocab_file, hparams.share_vocab)
src_dataset = tf.data.TextLineDataset(tf.gfile.Glob(src_file))
tgt_dataset = tf.data.TextLineDataset(tf.gfile.Glob(tgt_file))
skip_count_placeholder = tf.placeholder(shape=(), dtype=tf.int64)
iterator = iterator_utils.get_iterator(
src_dataset,
tgt_dataset,
src_vocab_table,
tgt_vocab_table,
batch_size=hparams.batch_size,
sos=hparams.sos,
eos=hparams.eos,
random_seed=hparams.random_seed,
num_buckets=hparams.num_buckets,
src_max_len=hparams.src_max_len,
tgt_max_len=hparams.tgt_max_len,
skip_count=skip_count_placeholder,
num_shards=num_workers,
shard_index=jobid,
use_char_encode=hparams.use_char_encode)
# Note: One can set model_device_fn to
# `tf.train.replica_device_setter(ps_tasks)` for distributed training.
model_device_fn = None
if extra_args: model_device_fn = extra_args.model_device_fn
with tf.device(model_device_fn):
model = model_creator(
hparams,
iterator=iterator,
mode=tf.contrib.learn.ModeKeys.TRAIN,
source_vocab_table=src_vocab_table,
target_vocab_table=tgt_vocab_table,
scope=scope,
extra_args=extra_args)
return TrainModel(
graph=graph,
model=model,
iterator=iterator,
skip_count_placeholder=skip_count_placeholder)
class EvalModel(
collections.namedtuple("EvalModel",
("graph", "model", "src_file_placeholder",
"tgt_file_placeholder", "iterator"))):
pass
def create_eval_model(model_creator, hparams, scope=None, extra_args=None):
"""Create train graph, model, src/tgt file holders, and iterator."""
src_vocab_file = hparams.src_vocab_file
tgt_vocab_file = hparams.tgt_vocab_file
graph = tf.Graph()
with graph.as_default(), tf.container(scope or "eval"):
src_vocab_table, tgt_vocab_table = vocab_utils.create_vocab_tables(
src_vocab_file, tgt_vocab_file, hparams.share_vocab)
reverse_tgt_vocab_table = lookup_ops.index_to_string_table_from_file(
tgt_vocab_file, default_value=vocab_utils.UNK)
src_file_placeholder = tf.placeholder(shape=(), dtype=tf.string)
tgt_file_placeholder = tf.placeholder(shape=(), dtype=tf.string)
src_dataset = tf.data.TextLineDataset(src_file_placeholder)
tgt_dataset = tf.data.TextLineDataset(tgt_file_placeholder)
iterator = iterator_utils.get_iterator(
src_dataset,
tgt_dataset,
src_vocab_table,
tgt_vocab_table,
hparams.batch_size,
sos=hparams.sos,
eos=hparams.eos,
random_seed=hparams.random_seed,
num_buckets=hparams.num_buckets,
src_max_len=hparams.src_max_len_infer,
tgt_max_len=hparams.tgt_max_len_infer,
use_char_encode=hparams.use_char_encode)
model = model_creator(
hparams,
iterator=iterator,
mode=tf.contrib.learn.ModeKeys.EVAL,
source_vocab_table=src_vocab_table,
target_vocab_table=tgt_vocab_table,
reverse_target_vocab_table=reverse_tgt_vocab_table,
scope=scope,
extra_args=extra_args)
return EvalModel(
graph=graph,
model=model,
src_file_placeholder=src_file_placeholder,
tgt_file_placeholder=tgt_file_placeholder,
iterator=iterator)
class InferModel(
collections.namedtuple("InferModel",
("graph", "model", "src_placeholder",
"batch_size_placeholder", "iterator"))):
pass
def create_infer_model(model_creator, hparams, scope=None, extra_args=None):
"""Create inference model."""
graph = tf.Graph()
src_vocab_file = hparams.src_vocab_file
tgt_vocab_file = hparams.tgt_vocab_file
with graph.as_default(), tf.container(scope or "infer"):
src_vocab_table, tgt_vocab_table = vocab_utils.create_vocab_tables(
src_vocab_file, tgt_vocab_file, hparams.share_vocab)
reverse_tgt_vocab_table = lookup_ops.index_to_string_table_from_file(
tgt_vocab_file, default_value=vocab_utils.UNK)
src_placeholder = tf.placeholder(shape=[None], dtype=tf.string)
batch_size_placeholder = tf.placeholder(shape=[], dtype=tf.int64)
src_dataset = tf.data.Dataset.from_tensor_slices(
src_placeholder)
iterator = iterator_utils.get_infer_iterator(
src_dataset,
src_vocab_table,
batch_size=batch_size_placeholder,
eos=hparams.eos,
src_max_len=hparams.src_max_len_infer,
use_char_encode=hparams.use_char_encode)
model = model_creator(
hparams,
iterator=iterator,
mode=tf.contrib.learn.ModeKeys.INFER,
source_vocab_table=src_vocab_table,
target_vocab_table=tgt_vocab_table,
reverse_target_vocab_table=reverse_tgt_vocab_table,
scope=scope,
extra_args=extra_args)
return InferModel(
graph=graph,
model=model,
src_placeholder=src_placeholder,
batch_size_placeholder=batch_size_placeholder,
iterator=iterator)
def _get_embed_device(vocab_size):
"""Decide on which device to place an embed matrix given its vocab size."""
if vocab_size > VOCAB_SIZE_THRESHOLD_CPU:
return "/cpu:0"
else:
return "/gpu:0"
def _create_pretrained_emb_from_txt(
vocab_file, embed_file, num_trainable_tokens=3, dtype=tf.float32,
scope=None):
"""Load pretrain embeding from embed_file, and return an embedding matrix.
Args:
embed_file: Path to a Glove formated embedding txt file.
num_trainable_tokens: Make the first n tokens in the vocab file as trainable
variables. Default is 3, which is "<unk>", "<s>" and "</s>".
"""
vocab, _ = vocab_utils.load_vocab(vocab_file)
trainable_tokens = vocab[:num_trainable_tokens]
utils.print_out("# Using pretrained embedding: %s." % embed_file)
utils.print_out(" with trainable tokens: ")
emb_dict, emb_size = vocab_utils.load_embed_txt(embed_file)
for token in trainable_tokens:
utils.print_out(" %s" % token)
if token not in emb_dict:
emb_dict[token] = [0.0] * emb_size
emb_mat = np.array(
[emb_dict[token] for token in vocab], dtype=dtype.as_numpy_dtype())
emb_mat = tf.constant(emb_mat)
emb_mat_const = tf.slice(emb_mat, [num_trainable_tokens, 0], [-1, -1])
with tf.variable_scope(scope or "pretrain_embeddings", dtype=dtype) as scope:
with tf.device(_get_embed_device(num_trainable_tokens)):
emb_mat_var = tf.get_variable(
"emb_mat_var", [num_trainable_tokens, emb_size])
return tf.concat([emb_mat_var, emb_mat_const], 0)
def _create_or_load_embed(embed_name, vocab_file, embed_file,
vocab_size, embed_size, dtype):
"""Create a new or load an existing embedding matrix."""
if vocab_file and embed_file:
embedding = _create_pretrained_emb_from_txt(vocab_file, embed_file)
else:
with tf.device(_get_embed_device(vocab_size)):
embedding = tf.get_variable(
embed_name, [vocab_size, embed_size], dtype)
return embedding
def create_emb_for_encoder_and_decoder(share_vocab,
src_vocab_size,
tgt_vocab_size,
src_embed_size,
tgt_embed_size,
dtype=tf.float32,
num_enc_partitions=0,
num_dec_partitions=0,
src_vocab_file=None,
tgt_vocab_file=None,
src_embed_file=None,
tgt_embed_file=None,
use_char_encode=False,
scope=None):
"""Create embedding matrix for both encoder and decoder.
Args:
share_vocab: A boolean. Whether to share embedding matrix for both
encoder and decoder.
src_vocab_size: An integer. The source vocab size.
tgt_vocab_size: An integer. The target vocab size.
src_embed_size: An integer. The embedding dimension for the encoder's
embedding.
tgt_embed_size: An integer. The embedding dimension for the decoder's
embedding.
dtype: dtype of the embedding matrix. Default to float32.
num_enc_partitions: number of partitions used for the encoder's embedding
vars.
num_dec_partitions: number of partitions used for the decoder's embedding
vars.
scope: VariableScope for the created subgraph. Default to "embedding".
Returns:
embedding_encoder: Encoder's embedding matrix.
embedding_decoder: Decoder's embedding matrix.
Raises:
ValueError: if use share_vocab but source and target have different vocab
size.
"""
if num_enc_partitions <= 1:
enc_partitioner = None
else:
# Note: num_partitions > 1 is required for distributed training due to
# embedding_lookup tries to colocate single partition-ed embedding variable
# with lookup ops. This may cause embedding variables being placed on worker
# jobs.
enc_partitioner = tf.fixed_size_partitioner(num_enc_partitions)
if num_dec_partitions <= 1:
dec_partitioner = None
else:
# Note: num_partitions > 1 is required for distributed training due to
# embedding_lookup tries to colocate single partition-ed embedding variable
# with lookup ops. This may cause embedding variables being placed on worker
# jobs.
dec_partitioner = tf.fixed_size_partitioner(num_dec_partitions)
if src_embed_file and enc_partitioner:
raise ValueError(
"Can't set num_enc_partitions > 1 when using pretrained encoder "
"embedding")
if tgt_embed_file and dec_partitioner:
raise ValueError(
"Can't set num_dec_partitions > 1 when using pretrained decdoer "
"embedding")
with tf.variable_scope(
scope or "embeddings", dtype=dtype, partitioner=enc_partitioner) as scope:
# Share embedding
if share_vocab:
if src_vocab_size != tgt_vocab_size:
raise ValueError("Share embedding but different src/tgt vocab sizes"
" %d vs. %d" % (src_vocab_size, tgt_vocab_size))
assert src_embed_size == tgt_embed_size
utils.print_out("# Use the same embedding for source and target")
vocab_file = src_vocab_file or tgt_vocab_file
embed_file = src_embed_file or tgt_embed_file
embedding_encoder = _create_or_load_embed(
"embedding_share", vocab_file, embed_file,
src_vocab_size, src_embed_size, dtype)
embedding_decoder = embedding_encoder
else:
if not use_char_encode:
with tf.variable_scope("encoder", partitioner=enc_partitioner):
embedding_encoder = _create_or_load_embed(
"embedding_encoder", src_vocab_file, src_embed_file,
src_vocab_size, src_embed_size, dtype)
else:
embedding_encoder = None
with tf.variable_scope("decoder", partitioner=dec_partitioner):
embedding_decoder = _create_or_load_embed(
"embedding_decoder", tgt_vocab_file, tgt_embed_file,
tgt_vocab_size, tgt_embed_size, dtype)
return embedding_encoder, embedding_decoder
def _single_cell(unit_type, num_units, forget_bias, dropout, mode,
residual_connection=False, device_str=None, residual_fn=None):
"""Create an instance of a single RNN cell."""
# dropout (= 1 - keep_prob) is set to 0 during eval and infer
dropout = dropout if mode == tf.contrib.learn.ModeKeys.TRAIN else 0.0
# Cell Type
if unit_type == "lstm":
utils.print_out(" LSTM, forget_bias=%g" % forget_bias, new_line=False)
single_cell = tf.contrib.rnn.BasicLSTMCell(
num_units,
forget_bias=forget_bias)
elif unit_type == "gru":
utils.print_out(" GRU", new_line=False)
single_cell = tf.contrib.rnn.GRUCell(num_units)
elif unit_type == "layer_norm_lstm":
utils.print_out(" Layer Normalized LSTM, forget_bias=%g" % forget_bias,
new_line=False)
single_cell = tf.contrib.rnn.LayerNormBasicLSTMCell(
num_units,
forget_bias=forget_bias,
layer_norm=True)
elif unit_type == "nas":
utils.print_out(" NASCell", new_line=False)
single_cell = tf.contrib.rnn.NASCell(num_units)
else:
raise ValueError("Unknown unit type %s!" % unit_type)
# Dropout (= 1 - keep_prob)
if dropout > 0.0:
single_cell = tf.contrib.rnn.DropoutWrapper(
cell=single_cell, input_keep_prob=(1.0 - dropout))
utils.print_out(" %s, dropout=%g " % (type(single_cell).__name__, dropout),
new_line=False)
# Residual
if residual_connection:
single_cell = tf.contrib.rnn.ResidualWrapper(
single_cell, residual_fn=residual_fn)
utils.print_out(" %s" % type(single_cell).__name__, new_line=False)
# Device Wrapper
if device_str:
single_cell = tf.contrib.rnn.DeviceWrapper(single_cell, device_str)
utils.print_out(" %s, device=%s" %
(type(single_cell).__name__, device_str), new_line=False)
return single_cell
def _cell_list(unit_type, num_units, num_layers, num_residual_layers,
forget_bias, dropout, mode, num_gpus, base_gpu=0,
single_cell_fn=None, residual_fn=None):
"""Create a list of RNN cells."""
if not single_cell_fn:
single_cell_fn = _single_cell
# Multi-GPU
cell_list = []
for i in range(num_layers):
utils.print_out(" cell %d" % i, new_line=False)
single_cell = single_cell_fn(
unit_type=unit_type,
num_units=num_units,
forget_bias=forget_bias,
dropout=dropout,
mode=mode,
residual_connection=(i >= num_layers - num_residual_layers),
device_str=get_device_str(i + base_gpu, num_gpus),
residual_fn=residual_fn
)
utils.print_out("")
cell_list.append(single_cell)
return cell_list
def create_rnn_cell(unit_type, num_units, num_layers, num_residual_layers,
forget_bias, dropout, mode, num_gpus, base_gpu=0,
single_cell_fn=None):
"""Create multi-layer RNN cell.
Args:
unit_type: string representing the unit type, i.e. "lstm".
num_units: the depth of each unit.
num_layers: number of cells.
num_residual_layers: Number of residual layers from top to bottom. For
example, if `num_layers=4` and `num_residual_layers=2`, the last 2 RNN
cells in the returned list will be wrapped with `ResidualWrapper`.
forget_bias: the initial forget bias of the RNNCell(s).
dropout: floating point value between 0.0 and 1.0:
the probability of dropout. this is ignored if `mode != TRAIN`.
mode: either tf.contrib.learn.TRAIN/EVAL/INFER
num_gpus: The number of gpus to use when performing round-robin
placement of layers.
base_gpu: The gpu device id to use for the first RNN cell in the
returned list. The i-th RNN cell will use `(base_gpu + i) % num_gpus`
as its device id.
single_cell_fn: allow for adding customized cell.
When not specified, we default to model_helper._single_cell
Returns:
An `RNNCell` instance.
"""
cell_list = _cell_list(unit_type=unit_type,
num_units=num_units,
num_layers=num_layers,
num_residual_layers=num_residual_layers,
forget_bias=forget_bias,
dropout=dropout,
mode=mode,
num_gpus=num_gpus,
base_gpu=base_gpu,
single_cell_fn=single_cell_fn)
if len(cell_list) == 1: # Single layer.
return cell_list[0]
else: # Multi layers
return tf.contrib.rnn.MultiRNNCell(cell_list)
def gradient_clip(gradients, max_gradient_norm):
"""Clipping gradients of a model."""
clipped_gradients, gradient_norm = tf.clip_by_global_norm(
gradients, max_gradient_norm)
gradient_norm_summary = [tf.summary.scalar("grad_norm", gradient_norm)]
gradient_norm_summary.append(
tf.summary.scalar("clipped_gradient", tf.global_norm(clipped_gradients)))
return clipped_gradients, gradient_norm_summary, gradient_norm
def print_variables_in_ckpt(ckpt_path):
"""Print a list of variables in a checkpoint together with their shapes."""
utils.print_out("# Variables in ckpt %s" % ckpt_path)
reader = tf.train.NewCheckpointReader(ckpt_path)
variable_map = reader.get_variable_to_shape_map()
for key in sorted(variable_map.keys()):
utils.print_out(" %s: %s" % (key, variable_map[key]))
def load_model(model, ckpt_path, session, name):
"""Load model from a checkpoint."""
start_time = time.time()
try:
model.saver.restore(session, ckpt_path)
except tf.errors.NotFoundError as e:
utils.print_out("Can't load checkpoint")
print_variables_in_ckpt(ckpt_path)
utils.print_out("%s" % str(e))
session.run(tf.tables_initializer())
utils.print_out(
" loaded %s model parameters from %s, time %.2fs" %
(name, ckpt_path, time.time() - start_time))
return model
def avg_checkpoints(model_dir, num_last_checkpoints, global_step,
global_step_name):
"""Average the last N checkpoints in the model_dir."""
checkpoint_state = tf.train.get_checkpoint_state(model_dir)
if not checkpoint_state:
utils.print_out("# No checkpoint file found in directory: %s" % model_dir)
return None
# Checkpoints are ordered from oldest to newest.
checkpoints = (
checkpoint_state.all_model_checkpoint_paths[-num_last_checkpoints:])
if len(checkpoints) < num_last_checkpoints:
utils.print_out(
"# Skipping averaging checkpoints because not enough checkpoints is "
"avaliable."
)
return None
avg_model_dir = os.path.join(model_dir, "avg_checkpoints")
if not tf.gfile.Exists(avg_model_dir):
utils.print_out(
"# Creating new directory %s for saving averaged checkpoints." %
avg_model_dir)
tf.gfile.MakeDirs(avg_model_dir)
utils.print_out("# Reading and averaging variables in checkpoints:")
var_list = tf.contrib.framework.list_variables(checkpoints[0])
var_values, var_dtypes = {}, {}
for (name, shape) in var_list:
if name != global_step_name:
var_values[name] = np.zeros(shape)
for checkpoint in checkpoints:
utils.print_out(" %s" % checkpoint)
reader = tf.contrib.framework.load_checkpoint(checkpoint)
for name in var_values:
tensor = reader.get_tensor(name)
var_dtypes[name] = tensor.dtype
var_values[name] += tensor
for name in var_values:
var_values[name] /= len(checkpoints)
# Build a graph with same variables in the checkpoints, and save the averaged
# variables into the avg_model_dir.
with tf.Graph().as_default():
tf_vars = [
tf.get_variable(v, shape=var_values[v].shape, dtype=var_dtypes[name])
for v in var_values
]
placeholders = [tf.placeholder(v.dtype, shape=v.shape) for v in tf_vars]
assign_ops = [tf.assign(v, p) for (v, p) in zip(tf_vars, placeholders)]
global_step_var = tf.Variable(
global_step, name=global_step_name, trainable=False)
saver = tf.train.Saver(tf.all_variables())
with tf.Session() as sess:
sess.run(tf.initialize_all_variables())
for p, assign_op, (name, value) in zip(placeholders, assign_ops,
six.iteritems(var_values)):
sess.run(assign_op, {p: value})
# Use the built saver to save the averaged checkpoint. Only keep 1
# checkpoint and the best checkpoint will be moved to avg_best_metric_dir.
saver.save(
sess,
os.path.join(avg_model_dir, "translate.ckpt"))
return avg_model_dir
def create_or_load_model(model, model_dir, session, name):
"""Create translation model and initialize or load parameters in session."""
latest_ckpt = tf.train.latest_checkpoint(model_dir)
if latest_ckpt:
model = load_model(model, latest_ckpt, session, name)
else:
start_time = time.time()
session.run(tf.global_variables_initializer())
session.run(tf.tables_initializer())
utils.print_out(" created %s model with fresh parameters, time %.2fs" %
(name, time.time() - start_time))
global_step = model.global_step.eval(session=session)
return model, global_step
def compute_perplexity(model, sess, name):
"""Compute perplexity of the output of the model.
Args:
model: model for compute perplexity.
sess: tensorflow session to use.
name: name of the batch.
Returns:
The perplexity of the eval outputs.
"""
total_loss = 0
total_predict_count = 0
start_time = time.time()
while True:
try:
output_tuple = model.eval(sess)
total_loss += output_tuple.eval_loss * output_tuple.batch_size
total_predict_count += output_tuple.predict_count
except tf.errors.OutOfRangeError:
break
perplexity = utils.safe_exp(total_loss / total_predict_count)
utils.print_time(" eval %s: perplexity %.2f" % (name, perplexity),
start_time)
return perplexity
|
import numpy as np
from tqdm import tqdm, trange
from script.data_handler.Base.BaseDataset import BaseDataset
from script.model.sklearn_like_model.BaseModel import BaseModel
from script.model.sklearn_like_model.Mixin import UnsupervisedMetricCallback
from script.model.sklearn_like_model.NetModule.BaseNetModule import BaseNetModule
from script.model.sklearn_like_model.NetModule.FusionNetStructure import FusionNetModule
from script.model.sklearn_like_model.NetModule.PlaceHolderModule import PlaceHolderModule
from script.model.sklearn_like_model.NetModule.TFDynamicLearningRate import TFDynamicLearningRate
from script.util.Stacker import Stacker
from script.util.tensor_ops import *
class pre_train_Unet(BaseModel):
def __init__(
self,
verbose=10,
learning_rate=0.01,
beta1=0.9,
batch_size=100,
stage=4,
n_classes=2,
capacity=64,
depth=1,
dropout_rate=0.5,
**kwargs
):
BaseModel.__init__(self, verbose, **kwargs)
self.batch_size = batch_size
self.learning_rate = learning_rate
self.beta1 = beta1
self.dropout_rate = dropout_rate
self.capacity = capacity
self.stage = stage
self.n_classes = n_classes
self.depth = depth
def _build_input_shapes(self, shapes):
self.x_ph_module = PlaceHolderModule(shapes['x'], tf.float32, name='x')
ret = {}
ret.update(self.x_ph_module.shape_dict)
return ret
def _build_main_graph(self):
self.Xs = self.x_ph_module.build().placeholder
self.net_module = FusionNetModule(
self.Xs, capacity=self.capacity, depth=self.depth, level=self.stage,
n_classes=self.n_classes, dropout_rate=self.dropout_rate
).build()
self.decode = self.net_module.decode
self.recon_module = reconModule(
self.decode, self.capacity
)
self.recon_module.build()
self._recon = self.recon_module.recon
self._recon = self.decode
self.vars = self.net_module.vars
self.vars += self.recon_module.vars
def _build_loss_ops(self):
self.loss = tf.squared_difference(self.Xs, self._recon, name='loss')
self.loss_mean = tf.reduce_mean(self.loss, name='loss_mean')
def _build_train_ops(self):
self.drl = TFDynamicLearningRate(self.learning_rate)
self.drl.build()
self.train_op = tf.train.AdamOptimizer(
self.drl.learning_rate, self.beta1
).minimize(
loss=self.loss_mean, var_list=self.vars
)
def _train_iter(self, dataset, batch_size):
# self.net_module.set_train(self.sess)
x = dataset.next_batch(self.batch_size)
_ = self.sess.run(self.train_op, {self.Xs: x})
# self.net_module.set_predict(self.sess)
def train_AE(
self, x, epoch=1, batch_size=None, dataset_callback=None,
epoch_pbar=True, iter_pbar=True, epoch_callbacks=None,
):
if not self.is_built:
raise RuntimeError(f'{self} not built')
batch_size = getattr(self, 'batch_size') if batch_size is None else batch_size
dataset = dataset_callback if dataset_callback else BaseDataset(x=x)
metric = None
epoch_pbar = tqdm([i for i in range(1, epoch + 1)]) if epoch_pbar else None
for _ in range(1, epoch + 1):
dataset.shuffle()
iter_pbar = trange if iter_pbar else range
for _ in iter_pbar(int(dataset.size / batch_size)):
self._train_iter(dataset, batch_size)
self.sess.run(self.op_inc_global_epoch)
global_epoch = self.sess.run(self.global_epoch)
if epoch_pbar: epoch_pbar.update(1)
metric = getattr(self, 'metric', None)(x)
if metric in (np.nan, np.inf, -np.inf):
tqdm.write(f'train fail, e = {global_epoch}, metric = {metric}')
break
results = []
if epoch_callbacks:
for callback in epoch_callbacks:
result = callback(self, dataset, metric, global_epoch)
results += [result]
break_epoch = False
for result in results:
if result and getattr(result, 'break_epoch', False):
break_epoch = True
if break_epoch: break
if epoch_pbar: epoch_pbar.close()
if dataset_callback: del dataset
return metric
def metric(self, x):
if not getattr(self, '_metric_callback', None):
self._metric_callback = UnsupervisedMetricCallback(
self, self.loss_mean, self.Xs,
)
return self._metric_callback(x)
def update_learning_rate(self, lr):
self.learning_rate = lr
if self.sess is not None:
self.drl.update(self.sess, self.learning_rate)
class reconModule(BaseNetModule):
def __init__(self, x, capacity=None, reuse=False, name=None, verbose=0):
super().__init__(capacity, reuse, name, verbose)
self.x = x
def build(self):
with tf.variable_scope(self.name):
stacker = Stacker(self.x)
stacker.conv2d(1, CONV_FILTER_3311)
self.recon = stacker.sigmoid()
return self
|
"""
# Train a new model starting from pre-trained weights
python3 training.py --dataset=/path/to/dataset --weight=/path/to/pretrained/weight.h5
# Resume training a model
python3 training.py --dataset=/path/to/dataset --continue_train=/path/to/latest/weights.h5
"""
import logging
import warnings
import os
logging.getLogger("tensorflow").setLevel(logging.ERROR)
warnings.filterwarnings('ignore')
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '2'
from PIL import ImageFile
ImageFile.LOAD_TRUNCATED_IMAGES = True
import sys
import json
import datetime
import numpy as np
import skimage.draw
import cv2
import matplotlib.pyplot as plt
import imgaug
# Root directory of the project
ROOT_DIR = os.getcwd()
from mrcnn.config import Config
from mrcnn import model as modellib, utils
from mrcnn import parse_args
import dataset
############################################################
# Args Configurations
############################################################
args = parse_args.parse_args()
# config parameter
pretrained_weight = os.path.join(ROOT_DIR, args.weight)
dataset_path = os.path.join(ROOT_DIR, args.dataset)
logs = os.path.join(ROOT_DIR, "logs")
if args.continue_train == "None":
continue_train = args.continue_train
else:
continue_train = os.path.join(ROOT_DIR, args.continue_train)
############################################################
# Configurations
############################################################
class CustomConfig(Config):
NAME = "custom_dataset"
IMAGES_PER_GPU = 1
IMAGE_MAX_DIM = 512
NUM_CLASSES = 1 + 4
STEPS_PER_EPOCH = 750
VALIDATION_STEPS = 250
DETECTION_MIN_CONFIDENCE = 0.9
LEARNING_RATE = 0.001
DETECTION_NMS_THRESHOLD = 0.2
TRAIN_ROIS_PER_IMAGE = 200
MAX_GT_INSTANCES = 50
DETECTION_MAX_INSTANCES = 50
############################################################
# Training
############################################################
def train(model):
# Training set.
dataset_train = dataset.CustomDataset()
dataset_train.load_custom(dataset_path, "train")
dataset_train.prepare()
print("Images: {}\nClasses: {}".format(len(dataset_train.image_ids), dataset_train.class_names))
# Validation set
dataset_val = dataset.CustomDataset()
dataset_val.load_custom(dataset_path, "val")
dataset_val.prepare()
print("Images: {}\nClasses: {}".format(len(dataset_val.image_ids), dataset_val.class_names))
augmentation = imgaug.augmenters.Sometimes(0.5, [
imgaug.augmenters.Fliplr(0.5),
imgaug.augmenters.Flipud(0.5)])
model_inference = modellib.MaskRCNN(mode="inference", config=config,model_dir=logs)
#calculating COCO-mAP after every 5 epoch, limited to the first 1000 images
mAP_callback = modellib.MeanAveragePrecisionCallback(model, model_inference, dataset_val,
calculate_at_every_X_epoch=5, dataset_limit=1000, verbose=1)
# Training - Stage 1
print("Training network heads")
model.train(dataset_train, dataset_val,
learning_rate=config.LEARNING_RATE,
epochs=20,
layers='heads',
custom_callbacks=[mAP_callback],
augmentation=augmentation)
# print("Fine tune Resnet stage 4 and up")
# model.train(dataset_train, dataset_val,
# learning_rate=config.LEARNING_RATE,
# epochs=60,
# layers='4+',
# custom_callbacks=[mAP_callback],
# augmentation=augmentation)
# print("Fine tune Resnet stage 3 and up")
# model.train(dataset_train, dataset_val,
# learning_rate=config.LEARNING_RATE/10,
# epochs=90,
# layers='3+',
# custom_callbacks=[mAP_callback],
# augmentation=augmentation)
# print("Fine tune all layers")
# model.train(dataset_train, dataset_val,
# learning_rate=config.LEARNING_RATE/100,
# epochs=100,
# layers='all',
# custom_callbacks=[mAP_callback])
# # augmentation=augmentation)
############################################################
# Main
############################################################
if __name__ == '__main__':
print("Pre-trained weight: ", pretrained_weight)
print("Dataset: ", dataset_path)
print("Logs: ", logs)
print("Continue Train: ", continue_train)
# Configurations
config = CustomConfig()
config.display()
# Create model
model = modellib.MaskRCNN(mode="training", config=config,
model_dir=logs)
if continue_train.lower() == "none":
weights_path = pretrained_weight
else:
weights_path = continue_train
# Load weights
print("Loading weights ", weights_path)
if continue_train == "None":
# Exclude the last layers because they require a matching
# number of classes
model.load_weights(weights_path, by_name=True, exclude=[
"mrcnn_class_logits", "mrcnn_bbox_fc",
"mrcnn_bbox", "mrcnn_mask"])
else:
model.load_weights(weights_path, by_name=True)
train(model)
|
# (C) Datadog, Inc. 2018-present
# All rights reserved
# Licensed under Simplified BSD License (see LICENSE)
import time
from collections import defaultdict
import requests
from six import iteritems, itervalues
from six.moves.urllib.parse import urljoin, urlparse
from datadog_checks.base import AgentCheck, is_affirmative, to_string
from .config import from_instance
from .metrics import (
CLUSTER_PENDING_TASKS,
health_stats_for_version,
index_stats_for_version,
node_system_stats_for_version,
pshard_stats_for_version,
slm_stats_for_version,
stats_for_version,
)
class AuthenticationError(requests.exceptions.HTTPError):
"""Authentication Error, unable to reach server"""
class ESCheck(AgentCheck):
HTTP_CONFIG_REMAPPER = {
'aws_service': {'name': 'aws_service', 'default': 'es'},
'ssl_verify': {'name': 'tls_verify'},
'ssl_cert': {'name': 'tls_cert'},
'ssl_key': {'name': 'tls_private_key'},
}
SERVICE_CHECK_CONNECT_NAME = 'elasticsearch.can_connect'
SERVICE_CHECK_CLUSTER_STATUS = 'elasticsearch.cluster_health'
SOURCE_TYPE_NAME = 'elasticsearch'
def __init__(self, name, init_config, instances):
super(ESCheck, self).__init__(name, init_config, instances)
# Host status needs to persist across all checks
self.cluster_status = {}
if self.instance.get('auth_type') == 'aws' and self.instance.get('url'):
self.HTTP_CONFIG_REMAPPER = self.HTTP_CONFIG_REMAPPER.copy()
self.HTTP_CONFIG_REMAPPER['aws_host'] = {
'name': 'aws_host',
'default': urlparse(self.instance['url']).hostname,
}
self._config = from_instance(self.instance)
def check(self, _):
admin_forwarder = self._config.admin_forwarder
jvm_rate = self.instance.get('gc_collectors_as_rate', False)
base_tags = list(self._config.tags)
service_check_tags = list(self._config.service_check_tags)
# Check ES version for this instance and define parameters
# (URLs and metrics) accordingly
try:
version = self._get_es_version()
except AuthenticationError:
self.log.exception("The ElasticSearch credentials are incorrect")
raise
health_url, stats_url, pshard_stats_url, pending_tasks_url, slm_url = self._get_urls(version)
stats_metrics = stats_for_version(version, jvm_rate)
if self._config.cluster_stats:
# Include Node System metrics
stats_metrics.update(node_system_stats_for_version(version))
pshard_stats_metrics = pshard_stats_for_version(version)
# Load stats data.
# This must happen before other URL processing as the cluster name
# is retrieved here, and added to the tag list.
stats_url = self._join_url(stats_url, admin_forwarder)
stats_data = self._get_data(stats_url)
if stats_data.get('cluster_name'):
# retrieve the cluster name from the data, and append it to the
# master tag list.
cluster_tags = ["elastic_cluster:{}".format(stats_data['cluster_name'])]
if not is_affirmative(self.instance.get('disable_legacy_cluster_tag', False)):
cluster_tags.append("cluster_name:{}".format(stats_data['cluster_name']))
base_tags.extend(cluster_tags)
service_check_tags.extend(cluster_tags)
self._process_stats_data(stats_data, stats_metrics, base_tags)
# Load cluster-wise data
# Note: this is a cluster-wide query, might TO.
if self._config.pshard_stats:
send_sc = bubble_ex = not self._config.pshard_graceful_to
pshard_stats_url = self._join_url(pshard_stats_url, admin_forwarder)
try:
pshard_stats_data = self._get_data(pshard_stats_url, send_sc=send_sc)
self._process_pshard_stats_data(pshard_stats_data, pshard_stats_metrics, base_tags)
except requests.ReadTimeout as e:
if bubble_ex:
raise
self.log.warning("Timed out reading pshard-stats from servers (%s) - stats will be missing", e)
# Get Snapshot Lifecycle Management (SLM) policies
if slm_url is not None:
slm_url = self._join_url(slm_url, admin_forwarder)
policy_data = self._get_data(slm_url)
self._process_policy_data(policy_data, version, base_tags)
# Load the health data.
health_url = self._join_url(health_url, admin_forwarder)
health_data = self._get_data(health_url)
self._process_health_data(health_data, version, base_tags, service_check_tags)
if self._config.pending_task_stats:
# Load the pending_tasks data.
pending_tasks_url = self._join_url(pending_tasks_url, admin_forwarder)
pending_tasks_data = self._get_data(pending_tasks_url)
self._process_pending_tasks_data(pending_tasks_data, base_tags)
if self._config.index_stats and version >= [1, 0, 0]:
try:
self._get_index_metrics(admin_forwarder, version, base_tags)
except requests.ReadTimeout as e:
self.log.warning("Timed out reading index stats from servers (%s) - stats will be missing", e)
# If we're here we did not have any ES conn issues
self.service_check(self.SERVICE_CHECK_CONNECT_NAME, AgentCheck.OK, tags=self._config.service_check_tags)
def _get_es_version(self):
"""
Get the running version of elasticsearch.
"""
try:
data = self._get_data(self._config.url, send_sc=False)
raw_version = data['version']['number']
self.set_metadata('version', raw_version)
# pre-release versions of elasticearch are suffixed with -rcX etc..
# peel that off so that the map below doesn't error out
raw_version = raw_version.split('-')[0]
version = [int(p) for p in raw_version.split('.')[0:3]]
except AuthenticationError:
raise
except Exception as e:
self.warning("Error while trying to get Elasticsearch version from %s %s", self._config.url, e)
version = [1, 0, 0]
self.log.debug("Elasticsearch version is %s", version)
return version
def _join_url(self, url, admin_forwarder=False):
"""
overrides `urlparse.urljoin` since it removes base url path
https://docs.python.org/2/library/urlparse.html#urlparse.urljoin
"""
if admin_forwarder:
return self._config.url + url
else:
return urljoin(self._config.url, url)
def _get_index_metrics(self, admin_forwarder, version, base_tags):
cat_url = '/_cat/indices?format=json&bytes=b'
index_url = self._join_url(cat_url, admin_forwarder)
index_resp = self._get_data(index_url)
index_stats_metrics = index_stats_for_version(version)
health_stat = {'green': 0, 'yellow': 1, 'red': 2}
reversed_health_stat = {'red': 0, 'yellow': 1, 'green': 2}
for idx in index_resp:
tags = base_tags + ['index_name:' + idx['index']]
# we need to remap metric names because the ones from elastic
# contain dots and that would confuse `_process_metric()` (sic)
index_data = {
'docs_count': idx.get('docs.count'),
'docs_deleted': idx.get('docs.deleted'),
'primary_shards': idx.get('pri'),
'replica_shards': idx.get('rep'),
'primary_store_size': idx.get('pri.store.size'),
'store_size': idx.get('store.size'),
'health': idx.get('health'),
}
# Convert the health status value
if index_data['health'] is not None:
status = index_data['health'].lower()
index_data['health'] = health_stat[status]
index_data['health_reverse'] = reversed_health_stat[status]
# Ensure that index_data does not contain None values
for key, value in list(iteritems(index_data)):
if value is None:
del index_data[key]
self.log.warning("The index %s has no metric data for %s", idx['index'], key)
for metric in index_stats_metrics:
# metric description
desc = index_stats_metrics[metric]
self._process_metric(index_data, metric, *desc, tags=tags)
def _get_urls(self, version):
"""
Compute the URLs we need to hit depending on the running ES version
"""
pshard_stats_url = "/_stats"
health_url = "/_cluster/health"
slm_url = None
if version >= [0, 90, 10]:
pending_tasks_url = "/_cluster/pending_tasks"
stats_url = "/_nodes/stats" if self._config.cluster_stats else "/_nodes/_local/stats"
if version < [5, 0, 0]:
# version 5 errors out if the `all` parameter is set
stats_url += "?all=true"
if version >= [7, 4, 0] and self._config.slm_stats:
slm_url = "/_slm/policy"
else:
# legacy
pending_tasks_url = None
stats_url = (
"/_cluster/nodes/stats?all=true"
if self._config.cluster_stats
else "/_cluster/nodes/_local/stats?all=true"
)
return health_url, stats_url, pshard_stats_url, pending_tasks_url, slm_url
def _get_data(self, url, send_sc=True):
"""
Hit a given URL and return the parsed json
"""
resp = None
try:
resp = self.http.get(url)
resp.raise_for_status()
except Exception as e:
# this means we've hit a particular kind of auth error that means the config is broken
if resp and resp.status_code == 400:
raise AuthenticationError("The ElasticSearch credentials are incorrect")
if send_sc:
self.service_check(
self.SERVICE_CHECK_CONNECT_NAME,
AgentCheck.CRITICAL,
message="Error {} when hitting {}".format(e, url),
tags=self._config.service_check_tags,
)
raise
self.log.debug("request to url %s returned: %s", url, resp)
return resp.json()
def _process_pending_tasks_data(self, data, base_tags):
p_tasks = defaultdict(int)
average_time_in_queue = 0
for task in data.get('tasks', []):
p_tasks[task.get('priority')] += 1
average_time_in_queue += task.get('time_in_queue_millis', 0)
total = sum(itervalues(p_tasks))
node_data = {
'pending_task_total': total,
'pending_tasks_priority_high': p_tasks['high'],
'pending_tasks_priority_urgent': p_tasks['urgent'],
# if total is 0 default to 1
'pending_tasks_time_in_queue': average_time_in_queue // (total or 1),
}
for metric in CLUSTER_PENDING_TASKS:
# metric description
desc = CLUSTER_PENDING_TASKS[metric]
self._process_metric(node_data, metric, *desc, tags=base_tags)
def _process_stats_data(self, data, stats_metrics, base_tags):
for node_data in itervalues(data.get('nodes', {})):
metric_hostname = None
metrics_tags = list(base_tags)
# Resolve the node's name
node_name = node_data.get('name')
if node_name:
metrics_tags.append('node_name:{}'.format(node_name))
# Resolve the node's hostname
if self._config.node_name_as_host:
if node_name:
metric_hostname = node_name
elif self._config.cluster_stats:
for k in ['hostname', 'host']:
if k in node_data:
metric_hostname = node_data[k]
break
for metric, desc in iteritems(stats_metrics):
self._process_metric(node_data, metric, *desc, tags=metrics_tags, hostname=metric_hostname)
def _process_pshard_stats_data(self, data, pshard_stats_metrics, base_tags):
for metric, desc in iteritems(pshard_stats_metrics):
self._process_metric(data, metric, *desc, tags=base_tags)
def _process_metric(self, data, metric, xtype, path, xform=None, tags=None, hostname=None):
"""
data: dictionary containing all the stats
metric: datadog metric
path: corresponding path in data, flattened, e.g. thread_pool.bulk.queue
xform: a lambda to apply to the numerical value
"""
value = data
# Traverse the nested dictionaries
for key in path.split('.'):
if value is not None:
value = value.get(key)
else:
break
if value is not None:
if xform:
value = xform(value)
if xtype == "gauge":
self.gauge(metric, value, tags=tags, hostname=hostname)
else:
self.rate(metric, value, tags=tags, hostname=hostname)
else:
self.log.debug("Metric not found: %s -> %s", path, metric)
def _process_health_data(self, data, version, base_tags, service_check_tags):
cluster_status = data.get('status')
if not self.cluster_status.get(self._config.url):
self.cluster_status[self._config.url] = cluster_status
if cluster_status in ["yellow", "red"]:
event = self._create_event(cluster_status, tags=base_tags)
self.event(event)
if cluster_status != self.cluster_status.get(self._config.url):
self.cluster_status[self._config.url] = cluster_status
event = self._create_event(cluster_status, tags=base_tags)
self.event(event)
cluster_health_metrics = health_stats_for_version(version)
for metric, desc in iteritems(cluster_health_metrics):
self._process_metric(data, metric, *desc, tags=base_tags)
# Process the service check
if cluster_status == 'green':
status = AgentCheck.OK
data['tag'] = "OK"
elif cluster_status == 'yellow':
status = AgentCheck.WARNING
data['tag'] = "WARN"
else:
status = AgentCheck.CRITICAL
data['tag'] = "ALERT"
msg = (
"{tag} on cluster \"{cluster_name}\" "
"| active_shards={active_shards} "
"| initializing_shards={initializing_shards} "
"| relocating_shards={relocating_shards} "
"| unassigned_shards={unassigned_shards} "
"| timed_out={timed_out}".format(
tag=data.get('tag'),
cluster_name=data.get('cluster_name'),
active_shards=data.get('active_shards'),
initializing_shards=data.get('initializing_shards'),
relocating_shards=data.get('relocating_shards'),
unassigned_shards=data.get('unassigned_shards'),
timed_out=data.get('timed_out'),
)
)
self.service_check(self.SERVICE_CHECK_CLUSTER_STATUS, status, message=msg, tags=service_check_tags)
def _process_policy_data(self, data, version, base_tags):
for policy, policy_data in iteritems(data):
repo = policy_data.get('policy', {}).get('repository', 'unknown')
tags = base_tags + ['policy:{}'.format(policy), 'repository:{}'.format(repo)]
slm_stats = slm_stats_for_version(version)
for metric, desc in iteritems(slm_stats):
self._process_metric(policy_data, metric, *desc, tags=tags)
def _create_event(self, status, tags=None):
hostname = to_string(self.hostname)
if status == "red":
alert_type = "error"
msg_title = "{} is {}".format(hostname, status)
elif status == "yellow":
alert_type = "warning"
msg_title = "{} is {}".format(hostname, status)
else:
# then it should be green
alert_type = "success"
msg_title = "{} recovered as {}".format(hostname, status)
msg = "ElasticSearch: {} just reported as {}".format(hostname, status)
return {
'timestamp': int(time.time()),
'event_type': 'elasticsearch',
'host': hostname,
'msg_text': msg,
'msg_title': msg_title,
'alert_type': alert_type,
'source_type_name': "elasticsearch",
'event_object': hostname,
'tags': tags,
}
|
" Userman: UI modules. "
import tornado.web
from . import constants
class Icon(tornado.web.UIModule):
"HTML for an icon, optionally labelled with a title."
template = """<img src="{url}" class="icon" alt="{alt}" title="{title}">"""
def render(self, name, title=None, label=False):
if not isinstance(name, basestring):
name = name[constants.DB_DOCTYPE]
Name = name.capitalize()
value = self.template.format(url=self.handler.static_url(name + '.png'),
alt=Name,
title=title or Name)
if label:
value += ' ' + (title or Name)
return value
class Doc(tornado.web.UIModule):
"HTML for a linkified document."
iconfilename = None
keyfield = '_id'
template = """<a href="{url}">""" \
"""<img src="{src}" class="icon" alt="{title}" title="{title}">""" \
""" {title}</a>"""
def render(self, doc, title=None):
self.doc = doc
return self.template.format(
url=self.handler.reverse_url(self.__class__.__name__.lower(),
doc[self.keyfield]),
src=self.handler.static_url(self.iconfilename),
title=title or self.get_title())
def get_title(self):
try:
return self.doc['name']
except KeyError:
return self.doc['_id']
class User(Doc):
"HTML for a linkified user document."
keyfield = 'email'
@property
def iconfilename(self):
if self.doc['role'] == constants.ADMIN:
return 'admin.png'
else:
return 'user.png'
def get_title(self):
return self.doc['email']
class Team(Doc):
"HTML for a linkified team document."
iconfilename = 'team.png'
keyfield = 'name'
class Service(Doc):
"HTML for a linkified service document."
iconfilename = 'service.png'
keyfield = 'name'
class Submit(tornado.web.UIModule):
"HTML for a submit button with an icon, optionally with a different title."
def render(self, name, title=None, onclick=None):
if onclick:
result = """<button type="submit" onclick="{0}">""".format(onclick)
else:
result = """<button type="submit">"""
Name = name.capitalize()
result += """<img src="{url}" alt="{name}" title="{name}">""".format(
url=self.handler.static_url(name + '.png'),
name=Name)
result += ' ' + (title or Name)
result += '</button>'
return result
class Access(Icon):
"HTML for access flag: 'public' or 'private'."
def render(self, item, label=False):
name = item.get('public') and 'public' or 'private'
return super(Access, self).render(name, label=label)
|
NLOT = CouplingOrder(name = 'NLOT', # ggS triangle nlo couplings
expansion_order = 1,
hierarchy = 2)
NLOTHL = CouplingOrder(name = 'NLOTHL', # ggS triangle nlo couplings for HL
expansion_order = 1,
hierarchy = 2)
NLOTHH = CouplingOrder(name = 'NLOTHH', # ggS triangle nlo couplings for HH
expansion_order = 1,
hierarchy = 2)
NLOTHA = CouplingOrder(name = 'NLOTHA', # ggS triangle nlo couplings for HA
expansion_order = 1,
hierarchy = 2)
NLOB = CouplingOrder(name = 'NLOB', # ggSS box nlo couplings
expansion_order = 1,
hierarchy = 2)
NLOZ = CouplingOrder(name = 'NLOZ', # ggZ nlo couplings
expansion_order = 1,
hierarchy = 2)
NLOEW = CouplingOrder(name = 'NLOEW', # gagaS nlo couplings
expansion_order = 1,
hierarchy = 2)
NLOEWHL = CouplingOrder(name = 'NLOEWHL', # gagaS nlo couplings for HL
expansion_order = 1,
hierarchy = 2)
NLOEWHH = CouplingOrder(name = 'NLOEWHH', # gagaS nlo couplings for HH
expansion_order = 1,
hierarchy = 2)
NLOEWHA = CouplingOrder(name = 'NLOEWHA', # gagaS nlo couplings for HA
expansion_order = 1,
hierarchy = 2)
|
# Copyright 2016, Google Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""Service-side implementation of gRPC Python."""
import collections
import enum
import logging
import threading
import time
import grpc
from grpc import _common
from grpc._cython import cygrpc
from grpc.framework.foundation import callable_util
_SHUTDOWN_TAG = 'shutdown'
_REQUEST_CALL_TAG = 'request_call'
_RECEIVE_CLOSE_ON_SERVER_TOKEN = 'receive_close_on_server'
_SEND_INITIAL_METADATA_TOKEN = 'send_initial_metadata'
_RECEIVE_MESSAGE_TOKEN = 'receive_message'
_SEND_MESSAGE_TOKEN = 'send_message'
_SEND_INITIAL_METADATA_AND_SEND_MESSAGE_TOKEN = (
'send_initial_metadata * send_message')
_SEND_STATUS_FROM_SERVER_TOKEN = 'send_status_from_server'
_SEND_INITIAL_METADATA_AND_SEND_STATUS_FROM_SERVER_TOKEN = (
'send_initial_metadata * send_status_from_server')
_OPEN = 'open'
_CLOSED = 'closed'
_CANCELLED = 'cancelled'
_EMPTY_FLAGS = 0
_EMPTY_METADATA = cygrpc.Metadata(())
_UNEXPECTED_EXIT_SERVER_GRACE = 1.0
def _serialized_request(request_event):
return request_event.batch_operations[0].received_message.bytes()
def _application_code(code):
cygrpc_code = _common.STATUS_CODE_TO_CYGRPC_STATUS_CODE.get(code)
return cygrpc.StatusCode.unknown if cygrpc_code is None else cygrpc_code
def _completion_code(state):
if state.code is None:
return cygrpc.StatusCode.ok
else:
return _application_code(state.code)
def _abortion_code(state, code):
if state.code is None:
return code
else:
return _application_code(state.code)
def _details(state):
return b'' if state.details is None else state.details
class _HandlerCallDetails(
collections.namedtuple(
'_HandlerCallDetails', ('method', 'invocation_metadata',)),
grpc.HandlerCallDetails):
pass
class _RPCState(object):
def __init__(self):
self.condition = threading.Condition()
self.due = set()
self.request = None
self.client = _OPEN
self.initial_metadata_allowed = True
self.disable_next_compression = False
self.trailing_metadata = None
self.code = None
self.details = None
self.statused = False
self.rpc_errors = []
self.callbacks = []
def _raise_rpc_error(state):
rpc_error = grpc.RpcError()
state.rpc_errors.append(rpc_error)
raise rpc_error
def _possibly_finish_call(state, token):
state.due.remove(token)
if (state.client is _CANCELLED or state.statused) and not state.due:
callbacks = state.callbacks
state.callbacks = None
return state, callbacks
else:
return None, ()
def _send_status_from_server(state, token):
def send_status_from_server(unused_send_status_from_server_event):
with state.condition:
return _possibly_finish_call(state, token)
return send_status_from_server
def _abort(state, call, code, details):
if state.client is not _CANCELLED:
effective_code = _abortion_code(state, code)
effective_details = details if state.details is None else state.details
if state.initial_metadata_allowed:
operations = (
cygrpc.operation_send_initial_metadata(
_EMPTY_METADATA, _EMPTY_FLAGS),
cygrpc.operation_send_status_from_server(
_common.cygrpc_metadata(state.trailing_metadata), effective_code,
effective_details, _EMPTY_FLAGS),
)
token = _SEND_INITIAL_METADATA_AND_SEND_STATUS_FROM_SERVER_TOKEN
else:
operations = (
cygrpc.operation_send_status_from_server(
_common.cygrpc_metadata(state.trailing_metadata), effective_code,
effective_details, _EMPTY_FLAGS),
)
token = _SEND_STATUS_FROM_SERVER_TOKEN
call.start_server_batch(
cygrpc.Operations(operations),
_send_status_from_server(state, token))
state.statused = True
state.due.add(token)
def _receive_close_on_server(state):
def receive_close_on_server(receive_close_on_server_event):
with state.condition:
if receive_close_on_server_event.batch_operations[0].received_cancelled:
state.client = _CANCELLED
elif state.client is _OPEN:
state.client = _CLOSED
state.condition.notify_all()
return _possibly_finish_call(state, _RECEIVE_CLOSE_ON_SERVER_TOKEN)
return receive_close_on_server
def _receive_message(state, call, request_deserializer):
def receive_message(receive_message_event):
serialized_request = _serialized_request(receive_message_event)
if serialized_request is None:
with state.condition:
if state.client is _OPEN:
state.client = _CLOSED
state.condition.notify_all()
return _possibly_finish_call(state, _RECEIVE_MESSAGE_TOKEN)
else:
request = _common.deserialize(serialized_request, request_deserializer)
with state.condition:
if request is None:
_abort(
state, call, cygrpc.StatusCode.internal,
b'Exception deserializing request!')
else:
state.request = request
state.condition.notify_all()
return _possibly_finish_call(state, _RECEIVE_MESSAGE_TOKEN)
return receive_message
def _send_initial_metadata(state):
def send_initial_metadata(unused_send_initial_metadata_event):
with state.condition:
return _possibly_finish_call(state, _SEND_INITIAL_METADATA_TOKEN)
return send_initial_metadata
def _send_message(state, token):
def send_message(unused_send_message_event):
with state.condition:
state.condition.notify_all()
return _possibly_finish_call(state, token)
return send_message
class _Context(grpc.ServicerContext):
def __init__(self, rpc_event, state, request_deserializer):
self._rpc_event = rpc_event
self._state = state
self._request_deserializer = request_deserializer
def is_active(self):
with self._state.condition:
return self._state.client is not _CANCELLED and not self._state.statused
def time_remaining(self):
return max(self._rpc_event.request_call_details.deadline - time.time(), 0)
def cancel(self):
self._rpc_event.operation_call.cancel()
def add_callback(self, callback):
with self._state.condition:
if self._state.callbacks is None:
return False
else:
self._state.callbacks.append(callback)
return True
def disable_next_message_compression(self):
with self._state.condition:
self._state.disable_next_compression = True
def invocation_metadata(self):
return _common.application_metadata(self._rpc_event.request_metadata)
def peer(self):
return _common.decode(self._rpc_event.operation_call.peer())
def send_initial_metadata(self, initial_metadata):
with self._state.condition:
if self._state.client is _CANCELLED:
_raise_rpc_error(self._state)
else:
if self._state.initial_metadata_allowed:
operation = cygrpc.operation_send_initial_metadata(
_common.cygrpc_metadata(initial_metadata), _EMPTY_FLAGS)
self._rpc_event.operation_call.start_server_batch(
cygrpc.Operations((operation,)),
_send_initial_metadata(self._state))
self._state.initial_metadata_allowed = False
self._state.due.add(_SEND_INITIAL_METADATA_TOKEN)
else:
raise ValueError('Initial metadata no longer allowed!')
def set_trailing_metadata(self, trailing_metadata):
with self._state.condition:
self._state.trailing_metadata = _common.cygrpc_metadata(
trailing_metadata)
def set_code(self, code):
with self._state.condition:
self._state.code = code
def set_details(self, details):
with self._state.condition:
self._state.details = _common.encode(details)
class _RequestIterator(object):
def __init__(self, state, call, request_deserializer):
self._state = state
self._call = call
self._request_deserializer = request_deserializer
def _raise_or_start_receive_message(self):
if self._state.client is _CANCELLED:
_raise_rpc_error(self._state)
elif self._state.client is _CLOSED or self._state.statused:
raise StopIteration()
else:
self._call.start_server_batch(
cygrpc.Operations((cygrpc.operation_receive_message(_EMPTY_FLAGS),)),
_receive_message(self._state, self._call, self._request_deserializer))
self._state.due.add(_RECEIVE_MESSAGE_TOKEN)
def _look_for_request(self):
if self._state.client is _CANCELLED:
_raise_rpc_error(self._state)
elif (self._state.request is None and
_RECEIVE_MESSAGE_TOKEN not in self._state.due):
raise StopIteration()
else:
request = self._state.request
self._state.request = None
return request
def _next(self):
with self._state.condition:
self._raise_or_start_receive_message()
while True:
self._state.condition.wait()
request = self._look_for_request()
if request is not None:
return request
def __iter__(self):
return self
def __next__(self):
return self._next()
def next(self):
return self._next()
def _unary_request(rpc_event, state, request_deserializer):
def unary_request():
with state.condition:
if state.client is _CANCELLED or state.statused:
return None
else:
start_server_batch_result = rpc_event.operation_call.start_server_batch(
cygrpc.Operations(
(cygrpc.operation_receive_message(_EMPTY_FLAGS),)),
_receive_message(
state, rpc_event.operation_call, request_deserializer))
state.due.add(_RECEIVE_MESSAGE_TOKEN)
while True:
state.condition.wait()
if state.request is None:
if state.client is _CLOSED:
details = '"{}" requires exactly one request message.'.format(
rpc_event.request_call_details.method)
_abort(
state, rpc_event.operation_call,
cygrpc.StatusCode.unimplemented, _common.encode(details))
return None
elif state.client is _CANCELLED:
return None
else:
request = state.request
state.request = None
return request
return unary_request
def _call_behavior(rpc_event, state, behavior, argument, request_deserializer):
context = _Context(rpc_event, state, request_deserializer)
try:
return behavior(argument, context), True
except Exception as e: # pylint: disable=broad-except
with state.condition:
if e not in state.rpc_errors:
details = 'Exception calling application: {}'.format(e)
logging.exception(details)
_abort(state, rpc_event.operation_call,
cygrpc.StatusCode.unknown, _common.encode(details))
return None, False
def _take_response_from_response_iterator(rpc_event, state, response_iterator):
try:
return next(response_iterator), True
except StopIteration:
return None, True
except Exception as e: # pylint: disable=broad-except
with state.condition:
if e not in state.rpc_errors:
details = 'Exception iterating responses: {}'.format(e)
logging.exception(details)
_abort(state, rpc_event.operation_call,
cygrpc.StatusCode.unknown, _common.encode(details))
return None, False
def _serialize_response(rpc_event, state, response, response_serializer):
serialized_response = _common.serialize(response, response_serializer)
if serialized_response is None:
with state.condition:
_abort(
state, rpc_event.operation_call, cygrpc.StatusCode.internal,
b'Failed to serialize response!')
return None
else:
return serialized_response
def _send_response(rpc_event, state, serialized_response):
with state.condition:
if state.client is _CANCELLED or state.statused:
return False
else:
if state.initial_metadata_allowed:
operations = (
cygrpc.operation_send_initial_metadata(
_EMPTY_METADATA, _EMPTY_FLAGS),
cygrpc.operation_send_message(serialized_response, _EMPTY_FLAGS),
)
state.initial_metadata_allowed = False
token = _SEND_INITIAL_METADATA_AND_SEND_MESSAGE_TOKEN
else:
operations = (
cygrpc.operation_send_message(serialized_response, _EMPTY_FLAGS),
)
token = _SEND_MESSAGE_TOKEN
rpc_event.operation_call.start_server_batch(
cygrpc.Operations(operations), _send_message(state, token))
state.due.add(token)
while True:
state.condition.wait()
if token not in state.due:
return state.client is not _CANCELLED and not state.statused
def _status(rpc_event, state, serialized_response):
with state.condition:
if state.client is not _CANCELLED:
trailing_metadata = _common.cygrpc_metadata(state.trailing_metadata)
code = _completion_code(state)
details = _details(state)
operations = [
cygrpc.operation_send_status_from_server(
trailing_metadata, code, details, _EMPTY_FLAGS),
]
if state.initial_metadata_allowed:
operations.append(
cygrpc.operation_send_initial_metadata(
_EMPTY_METADATA, _EMPTY_FLAGS))
if serialized_response is not None:
operations.append(cygrpc.operation_send_message(
serialized_response, _EMPTY_FLAGS))
rpc_event.operation_call.start_server_batch(
cygrpc.Operations(operations),
_send_status_from_server(state, _SEND_STATUS_FROM_SERVER_TOKEN))
state.statused = True
state.due.add(_SEND_STATUS_FROM_SERVER_TOKEN)
def _unary_response_in_pool(
rpc_event, state, behavior, argument_thunk, request_deserializer,
response_serializer):
argument = argument_thunk()
if argument is not None:
response, proceed = _call_behavior(
rpc_event, state, behavior, argument, request_deserializer)
if proceed:
serialized_response = _serialize_response(
rpc_event, state, response, response_serializer)
if serialized_response is not None:
_status(rpc_event, state, serialized_response)
def _stream_response_in_pool(
rpc_event, state, behavior, argument_thunk, request_deserializer,
response_serializer):
argument = argument_thunk()
if argument is not None:
response_iterator, proceed = _call_behavior(
rpc_event, state, behavior, argument, request_deserializer)
if proceed:
while True:
response, proceed = _take_response_from_response_iterator(
rpc_event, state, response_iterator)
if proceed:
if response is None:
_status(rpc_event, state, None)
break
else:
serialized_response = _serialize_response(
rpc_event, state, response, response_serializer)
if serialized_response is not None:
proceed = _send_response(rpc_event, state, serialized_response)
if not proceed:
break
else:
break
else:
break
def _handle_unary_unary(rpc_event, state, method_handler, thread_pool):
unary_request = _unary_request(
rpc_event, state, method_handler.request_deserializer)
thread_pool.submit(
_unary_response_in_pool, rpc_event, state, method_handler.unary_unary,
unary_request, method_handler.request_deserializer,
method_handler.response_serializer)
def _handle_unary_stream(rpc_event, state, method_handler, thread_pool):
unary_request = _unary_request(
rpc_event, state, method_handler.request_deserializer)
thread_pool.submit(
_stream_response_in_pool, rpc_event, state, method_handler.unary_stream,
unary_request, method_handler.request_deserializer,
method_handler.response_serializer)
def _handle_stream_unary(rpc_event, state, method_handler, thread_pool):
request_iterator = _RequestIterator(
state, rpc_event.operation_call, method_handler.request_deserializer)
thread_pool.submit(
_unary_response_in_pool, rpc_event, state, method_handler.stream_unary,
lambda: request_iterator, method_handler.request_deserializer,
method_handler.response_serializer)
def _handle_stream_stream(rpc_event, state, method_handler, thread_pool):
request_iterator = _RequestIterator(
state, rpc_event.operation_call, method_handler.request_deserializer)
thread_pool.submit(
_stream_response_in_pool, rpc_event, state, method_handler.stream_stream,
lambda: request_iterator, method_handler.request_deserializer,
method_handler.response_serializer)
def _find_method_handler(rpc_event, generic_handlers):
for generic_handler in generic_handlers:
method_handler = generic_handler.service(
_HandlerCallDetails(
_common.decode(rpc_event.request_call_details.method),
rpc_event.request_metadata))
if method_handler is not None:
return method_handler
else:
return None
def _handle_unrecognized_method(rpc_event):
operations = (
cygrpc.operation_send_initial_metadata(_EMPTY_METADATA, _EMPTY_FLAGS),
cygrpc.operation_receive_close_on_server(_EMPTY_FLAGS),
cygrpc.operation_send_status_from_server(
_EMPTY_METADATA, cygrpc.StatusCode.unimplemented,
b'Method not found!', _EMPTY_FLAGS),
)
rpc_state = _RPCState()
rpc_event.operation_call.start_server_batch(
operations, lambda ignored_event: (rpc_state, (),))
return rpc_state
def _handle_with_method_handler(rpc_event, method_handler, thread_pool):
state = _RPCState()
with state.condition:
rpc_event.operation_call.start_server_batch(
cygrpc.Operations(
(cygrpc.operation_receive_close_on_server(_EMPTY_FLAGS),)),
_receive_close_on_server(state))
state.due.add(_RECEIVE_CLOSE_ON_SERVER_TOKEN)
if method_handler.request_streaming:
if method_handler.response_streaming:
_handle_stream_stream(rpc_event, state, method_handler, thread_pool)
else:
_handle_stream_unary(rpc_event, state, method_handler, thread_pool)
else:
if method_handler.response_streaming:
_handle_unary_stream(rpc_event, state, method_handler, thread_pool)
else:
_handle_unary_unary(rpc_event, state, method_handler, thread_pool)
return state
def _handle_call(rpc_event, generic_handlers, thread_pool):
if rpc_event.request_call_details.method is not None:
method_handler = _find_method_handler(rpc_event, generic_handlers)
if method_handler is None:
return _handle_unrecognized_method(rpc_event)
else:
return _handle_with_method_handler(rpc_event, method_handler, thread_pool)
else:
return None
@enum.unique
class _ServerStage(enum.Enum):
STOPPED = 'stopped'
STARTED = 'started'
GRACE = 'grace'
class _ServerState(object):
def __init__(self, completion_queue, server, generic_handlers, thread_pool):
self.lock = threading.Lock()
self.completion_queue = completion_queue
self.server = server
self.generic_handlers = list(generic_handlers)
self.thread_pool = thread_pool
self.stage = _ServerStage.STOPPED
self.shutdown_events = None
# TODO(https://github.com/grpc/grpc/issues/6597): eliminate these fields.
self.rpc_states = set()
self.due = set()
def _add_generic_handlers(state, generic_handlers):
with state.lock:
state.generic_handlers.extend(generic_handlers)
def _add_insecure_port(state, address):
with state.lock:
return state.server.add_http2_port(address)
def _add_secure_port(state, address, server_credentials):
with state.lock:
return state.server.add_http2_port(address, server_credentials._credentials)
def _request_call(state):
state.server.request_call(
state.completion_queue, state.completion_queue, _REQUEST_CALL_TAG)
state.due.add(_REQUEST_CALL_TAG)
# TODO(https://github.com/grpc/grpc/issues/6597): delete this function.
def _stop_serving(state):
if not state.rpc_states and not state.due:
for shutdown_event in state.shutdown_events:
shutdown_event.set()
state.stage = _ServerStage.STOPPED
return True
else:
return False
def _serve(state):
while True:
event = state.completion_queue.poll()
if event.tag is _SHUTDOWN_TAG:
with state.lock:
state.due.remove(_SHUTDOWN_TAG)
if _stop_serving(state):
return
elif event.tag is _REQUEST_CALL_TAG:
with state.lock:
state.due.remove(_REQUEST_CALL_TAG)
rpc_state = _handle_call(
event, state.generic_handlers, state.thread_pool)
if rpc_state is not None:
state.rpc_states.add(rpc_state)
if state.stage is _ServerStage.STARTED:
_request_call(state)
elif _stop_serving(state):
return
else:
rpc_state, callbacks = event.tag(event)
for callback in callbacks:
callable_util.call_logging_exceptions(
callback, 'Exception calling callback!')
if rpc_state is not None:
with state.lock:
state.rpc_states.remove(rpc_state)
if _stop_serving(state):
return
def _stop(state, grace):
with state.lock:
if state.stage is _ServerStage.STOPPED:
shutdown_event = threading.Event()
shutdown_event.set()
return shutdown_event
else:
if state.stage is _ServerStage.STARTED:
state.server.shutdown(state.completion_queue, _SHUTDOWN_TAG)
state.stage = _ServerStage.GRACE
state.shutdown_events = []
state.due.add(_SHUTDOWN_TAG)
shutdown_event = threading.Event()
state.shutdown_events.append(shutdown_event)
if grace is None:
state.server.cancel_all_calls()
# TODO(https://github.com/grpc/grpc/issues/6597): delete this loop.
for rpc_state in state.rpc_states:
with rpc_state.condition:
rpc_state.client = _CANCELLED
rpc_state.condition.notify_all()
else:
def cancel_all_calls_after_grace():
shutdown_event.wait(timeout=grace)
with state.lock:
state.server.cancel_all_calls()
# TODO(https://github.com/grpc/grpc/issues/6597): delete this loop.
for rpc_state in state.rpc_states:
with rpc_state.condition:
rpc_state.client = _CANCELLED
rpc_state.condition.notify_all()
thread = threading.Thread(target=cancel_all_calls_after_grace)
thread.start()
return shutdown_event
shutdown_event.wait()
return shutdown_event
def _start(state):
with state.lock:
if state.stage is not _ServerStage.STOPPED:
raise ValueError('Cannot start already-started server!')
state.server.start()
state.stage = _ServerStage.STARTED
_request_call(state)
def cleanup_server(timeout):
if timeout is None:
_stop(state, _UNEXPECTED_EXIT_SERVER_GRACE).wait()
else:
_stop(state, timeout).wait()
thread = _common.CleanupThread(
cleanup_server, target=_serve, args=(state,))
thread.start()
class Server(grpc.Server):
def __init__(self, thread_pool, generic_handlers):
completion_queue = cygrpc.CompletionQueue()
server = cygrpc.Server()
server.register_completion_queue(completion_queue)
self._state = _ServerState(
completion_queue, server, generic_handlers, thread_pool)
def add_generic_rpc_handlers(self, generic_rpc_handlers):
_add_generic_handlers(self._state, generic_rpc_handlers)
def add_insecure_port(self, address):
return _add_insecure_port(self._state, _common.encode(address))
def add_secure_port(self, address, server_credentials):
return _add_secure_port(self._state, _common.encode(address), server_credentials)
def start(self):
_start(self._state)
def stop(self, grace):
return _stop(self._state, grace)
def __del__(self):
_stop(self._state, None)
|
# Copyright 2013-2021 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
from spack import *
class Libxevie(AutotoolsPackage, XorgPackage):
"""Xevie - X Event Interception Extension (XEvIE)."""
homepage = "https://cgit.freedesktop.org/xorg/lib/libXevie"
xorg_mirror_path = "lib/libXevie-1.0.3.tar.gz"
version('1.0.3', sha256='3759bb1f7fdade13ed99bfc05c0717bc42ce3f187e7da4eef80beddf5e461258')
depends_on('libx11')
depends_on('libxext')
depends_on('xproto')
depends_on('xextproto')
depends_on('evieext')
depends_on('pkgconfig', type='build')
depends_on('util-macros', type='build')
|
from django.contrib.auth import get_user_model
from django.urls import reverse
from django.test import TestCase
from rest_framework import status
from rest_framework.test import APIClient
from core.models import Ingredient
from recipe.serializers import IngredientSerializer
INGREDIENTS_URL = reverse('recipe:ingredient-list')
class PublicIngredientsApiTests(TestCase):
# Test the publicly available ingredients API
def setUp(self):
self.client = APIClient()
def test_login_required(self):
# Test tha login is required to access the endpoint
res = self.client.get(INGREDIENTS_URL)
self.assertEqual(res.status_code, status.HTTP_401_UNAUTHORIZED)
class PrivateIngredientsApiTests(TestCase):
# Test the private ingredients API
def setUp(self):
self.client = APIClient()
self.user = get_user_model().objects.create_user(
'test@test.com',
'testpass'
)
self.client.force_authenticate(self.user)
def test_retrieve_ingredient_list(self):
# Test retrieving a list of ingredients
Ingredient.objects.create(user=self.user, name='Kale')
Ingredient.objects.create(user=self.user, name='Salt')
res = self.client.get(INGREDIENTS_URL)
ingredients = Ingredient.objects.all().order_by('-name')
serializer = IngredientSerializer(ingredients, many=True)
self.assertEqual(res.status_code, status.HTTP_200_OK)
self.assertEqual(res.data, serializer.data)
def test_ingredients_limited_to_user(self):
# Test that ingredients for the authenticated user are returned
user2 = get_user_model().objects.create_user(
'other@test.com',
'testpass'
)
Ingredient.objects.create(user=user2, name='Vinegar')
ingredient = Ingredient.objects.create(user=self.user, name='Tumeric')
res = self.client.get(INGREDIENTS_URL)
self.assertEqual(res.status_code, status.HTTP_200_OK)
self.assertEqual(len(res.data), 1)
self.assertEqual(res.data[0]['name'], ingredient.name)
def test_create_ingredient_successful(self):
# Test create a new ingredient
payload = {'name': 'Cabbage'}
self.client.post(INGREDIENTS_URL, payload)
exists = Ingredient.objects.filter(
user=self.user,
name=payload['name'],
).exists()
self.assertTrue(exists)
def test_create_ingredient_invalid(self):
# Test creating invalid ingredient fails
payload = {'name': ''}
res = self.client.post(INGREDIENTS_URL, payload)
self.assertEqual(res.status_code, status.HTTP_400_BAD_REQUEST)
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
#=============================================================================
# FileName: lickeeper.py
# Desc:
# Author: Jeyrce.Lu
# Email: jianxin.lu@woqutech.com
# HomePage: www.woqutech.com
# Version: 0.0.1
# LastChange: 2021/1/13 上午11:18
# History:
#=============================================================================
"""
|
from twisted.internet.protocol import Protocol
from gandyloo import parse
class MinesweeperClient(Protocol):
'''Represents a connection to a server using twisted's Protocol framework.
Created with an event sink, where parsed events (subclasses of
gandyloo.message.Response) are fired. Sink should have a method
self.response(resp).
'''
def __init__(self, event_sink):
self.buffer = ""
self.hello_received = False
self.size = None
self.event_sink = event_sink
def dataReceived(self, data):
self.buffer += data
if not self.hello_received:
try:
resp, self.buffer = parse.parse_start(self.buffer, first=True)
except parse.NotReadyError:
return # Haven't received enough data yet
self.hello_received = True
self.size = resp.size
self.event_sink.response(resp)
try:
while True:
resp, self.buffer = parse.parse_start(self.buffer, self.size)
self.event_sink.response(resp)
except parse.NotReadyError:
return
def command(self, command):
self.transport.write(command.render())
def clientConnectionLost(self, connection, reason):
self.event_sink.response(message.CloseResp(reason))
|
import pytest
from datagears.core.network import Network
@pytest.fixture
def myfeature() -> Network:
"""Testing fixture for a feature."""
from datagears.core.network import Network
from datagears.features.dummy import my_out
network = Network("my-network", outputs=[my_out])
return network
@pytest.fixture
def store_feature() -> Network:
"""Testing fixture for a feature."""
from datagears.core.network import Network
from datagears.core.stores import FeatureStore
from datagears.features.dummy import my_out
network = Network("my-network", outputs=[my_out], feature_store=FeatureStore())
return network
|
from generators.neural_rendering import NeuralRenderer
import math
def next_upsample_step(curriculum, current_step):
# Return the epoch when it will next upsample
current_metadata = extract_metadata(curriculum, current_step)
current_size = current_metadata['img_size']
for curriculum_step in sorted([cs for cs in curriculum.keys() if type(cs) == int]):
if curriculum_step > current_step and curriculum[curriculum_step].get('img_size', 512) > current_size:
return curriculum_step
return float('Inf')
def last_upsample_step(curriculum, current_step):
# Returns the start epoch of the current stage, i.e. the epoch
# it last upsampled
current_metadata = extract_metadata(curriculum, current_step)
current_size = current_metadata['img_size']
for curriculum_step in sorted([cs for cs in curriculum.keys() if type(cs) == int]):
if curriculum_step <= current_step and curriculum[curriculum_step]['img_size'] == current_size:
return curriculum_step
return 0
def get_current_step(curriculum, epoch):
step = 0
for update_epoch in curriculum['update_epochs']:
if epoch >= update_epoch:
step += 1
return step
def extract_metadata(curriculum, current_step):
return_dict = {}
for curriculum_step in sorted([cs for cs in curriculum.keys() if type(cs) == int], reverse=True):
if curriculum_step <= current_step:
for key, value in curriculum[curriculum_step].items():
return_dict[key] = value
break
for key in [k for k in curriculum.keys() if type(k) != int]:
return_dict[key] = curriculum[key]
return return_dict
CelebA = {
0: {'batch_size': 24 * 2, 'num_steps': 12, 'img_size': 64, 'batch_split': 2, 'gen_lr': 6e-5, 'disc_lr': 2e-4},
int(200e3): {},
# 'dataset_path': '/home/ericryanchan/data/celeba/img_align_celeba/*.jpg',
'dataset_path': '/media/data2/sunjx/FENeRF/data/celebahq/data512x512/*.jpg',
'fov': 12,
'ray_start': 0.88,
'ray_end': 1.12,
'fade_steps': 10000,
'h_stddev': 0.3,
'v_stddev': 0.155,
'h_mean': math.pi*0.5,
'v_mean': math.pi*0.5,
'sample_dist': 'gaussian',
'topk_interval': 2000,
'topk_v': 0.6,
'betas': (0, 0.9),
'unique_lr': False,
'weight_decay': 0,
'r1_lambda': 0.2,
'latent_dim': 512,
'output_dim': 4,
'grad_clip': 10,
'model': 'SPATIALSIRENBASELINE',
# 'model': 'EmbeddingPiGAN128',
'generator': 'ImplicitGenerator3d',
'discriminator': 'CCSEncoderDiscriminator',
'dataset': 'CelebA',
'clamp_mode': 'relu',
'z_dist': 'gaussian',
'hierarchical_sample': True,
'z_lambda': 0,
'pos_lambda': 15,
'last_back': False,
'eval_last_back': True,
'fill_mode': 'eval_white_back',
'target_size': 128
}
CelebA_double_semantic = {
0: {'batch_size': 24, 'num_steps': 12, 'img_size': 32, 'batch_split': 6, 'gen_lr': 5e-5, 'disc_img_lr': 2e-4, 'disc_seg_lr': 1e-4},
int(10e3): {'batch_size': 12, 'num_steps': 12, 'img_size': 64, 'batch_split': 2, 'gen_lr':2e-5, 'disc_img_lr': 1e-4, 'disc_seg_lr': 5e-5},
int(50e3):{'batch_size': 4, 'num_steps': 24, 'img_size': 128, 'batch_split': 4, 'gen_lr': 5e-6, 'disc_img_lr': 5e-5, 'disc_seg_lr': 2e-5},
int(500e3): {},
# 'dataset_path': '/home/ericryanchan/data/celeba/img_align_celeba/*.jpg',
'dataset_path': 'data/celebahq_mask',
'background_mask': True,
'fov': 12,
'ray_start': 0.88,
'ray_end': 1.12,
'fade_steps': 10000,
'h_stddev': 0.3,
'v_stddev': 0.155,
'h_mean': math.pi*0.5,
'v_mean': math.pi*0.5,
'sample_dist': 'gaussian',
'topk_interval': 2000,
'topk_v': 0.6,
'betas': (0, 0.9),
'unique_lr': True,
'weight_decay': 0,
'r1_lambda': 0.2,
'latent_geo_dim': 256,
'latent_app_dim': 256,
'output_dim': 22,
'grad_clip': 10,
# 'model': 'SPATIALSIRENSEMANTICDISENTANGLE',
'model': 'SIRENBASELINESEMANTICDISENTANGLE',
'generator': 'DoubleImplicitGenerator3d',
'discriminator_img': 'CCSDoubleEncoderDiscriminator',
'discriminator_seg': 'CCSDoubleEncoderDiscriminator',
'dataset': 'CelebAMaskHQ_wo_background_seg_18',
'clamp_mode': 'relu',
'z_dist': 'gaussian',
'hierarchical_sample': True,
'z_geo_lambda': 0,
'z_app_lambda': 0,
'pos_lambda': 15,
'last_back': False,
'eval_last_back': False,
'd_seg_loss_lambda': 0.1,
'g_seg_loss_lambda': 0.1,
'softmax_label': False,
'target_size': 128,
'fill_mode': 'seg_padding_background'
}
CelebA_double_semantic_texture_embedding_256_dim_96 = {
0: {'batch_size': 24, 'num_steps': 24, 'img_size': 32, 'batch_split': 4, 'gen_lr': 6e-5, 'disc_img_lr': 2e-4, 'disc_seg_lr': 2e-4},
int(20e3): {'batch_size': 48, 'num_steps': 24, 'img_size': 64, 'batch_split': 4, 'gen_lr':6e-5, 'disc_img_lr': 2e-4, 'disc_seg_lr': 2e-4},
int(50e3):{'batch_size': 24, 'num_steps': 24, 'img_size': 128, 'batch_split': 4, 'gen_lr': 2e-5, 'disc_img_lr': 5e-5, 'disc_seg_lr': 2e-5},
int(500e3): {},
'dataset_path': 'data/celebahq_mask',
'background_mask': True,
'fov': 12,
'ray_start': 0.88,
'ray_end': 1.12,
'fade_steps': 10000,
'h_stddev': 0.3,
'v_stddev': 0.155,
'h_mean': math.pi*0.5,
'v_mean': math.pi*0.5,
'sample_dist': 'gaussian',
'topk_interval': 2000,
'topk_v': 0.6,
'betas': (0, 0.9),
'unique_lr': True,
'weight_decay': 0,
'r1_lambda': 0.2,
'latent_geo_dim': 256,
'latent_app_dim': 256,
'output_dim': 22,
'grad_clip': 10,
# 'model': 'SIRENBASELINESEMANTICDISENTANGLE',
'model': 'TextureEmbeddingPiGAN256SEMANTICDISENTANGLE_DIM_96',
'generator': 'DoubleImplicitGenerator3d',
'discriminator_img': 'CCSDoubleEncoderDiscriminator',
'discriminator_seg': 'CCSDoubleEncoderDiscriminator',
'dataset': 'CelebAMaskHQ_wo_background_seg_18',
'clamp_mode': 'relu',
'z_dist': 'gaussian',
'hierarchical_sample': True,
'z_geo_lambda': 0,
'z_app_lambda': 0,
'pos_lambda': 15,
'last_back': False,
'eval_last_back': False,
'd_seg_loss_lambda': 0.1,
'g_seg_loss_lambda': 0.1,
'softmax_label': False,
'target_size': 128,
'fill_mode': 'seg_padding_background'
}
|
from datetime import datetime, timedelta
import trading_calendars
ANNUAL_DAYS = 240
# Get public holidays data from Shanghai Stock Exchange
cn_calendar = trading_calendars.get_calendar('XSHG')
holidays = [x.to_pydatetime() for x in cn_calendar.precomputed_holidays]
# Filter future public holidays
start = datetime.today()
PUBLIC_HOLIDAYS = [x for x in holidays if x >= start]
def calculate_days_to_expiry(option_expiry: datetime) -> int:
""""""
current_dt = datetime.now().replace(hour=0, minute=0, second=0, microsecond=0)
days = 1
while current_dt <= option_expiry:
current_dt += timedelta(days=1)
# Ignore weekends
if current_dt.weekday() in [5, 6]:
continue
# Ignore public holidays
if current_dt in PUBLIC_HOLIDAYS:
continue
days += 1
return days
|
#!/usr/bin/env python3
import redis
import argparse
import hashlib
from getpass import getpass
r = redis.StrictRedis(host="localhost", port=6379)
parser = argparse.ArgumentParser()
group = parser.add_mutually_exclusive_group(required=True)
group.add_argument('--add', action='store_true', help='Adds a service')
group.add_argument('--check', action='store_true', help='Retrieve and print service details')
group.add_argument('--delete', action='store_true', help='Delete a service entry')
group.add_argument('--update', action='store_true', help='Update a service entry')
group.add_argument('--list', action='store_true', help='List all users')
group.add_argument('--stats', action='store_true', help='Statistics for all users')
parser.add_argument('service', nargs='?', default=None, type=str, help='Service username')
args = parser.parse_args()
if not args.service and not (args.list or args.stats):
from sys import exit
parser.print_help()
exit(1)
def hash_key(x: str):
m = hashlib.blake2b()
m.update(x.encode("utf-8"))
return m.digest()
def exists(key: str):
existing = r.exists(f"service:{key}") and r.sismember('services', key)
if not existing:
print(f"{key} not found")
else:
print(f"{key} exists")
return existing
def existing_users():
return [l.decode("utf-8") for l in r.smembers('services')]
def interactive_add():
public = "N/A"
public_opts = ["Y", "N"]
while public not in public_opts:
public = input("Public information? (Y/N): ")
display = input("Display name: ")
website = input("Website: ")
api_key = None
while not api_key:
api_key = getpass("API Key (hidden, will be hashed): ")
options = {
"public": public,
"display": display,
"website": website,
"api_key": hash_key(api_key),
"precache": 0,
"ondemand": 0
}
return options
def interactive_update():
public = "N/A"
public_opts = ["Y", "N", ""]
while public not in public_opts:
public = input("Public information? (Y/N): ")
display = input("Display name: ")
website = input("Website: ")
api_key = getpass("API Key (hidden, will be hashed): ")
options = dict()
if public:
options["public"] = public
if display:
options["display"] = display
if website:
options["website"] = website
if api_key:
options["api_key"] = hash_key(api_key)
return options
def display(user):
options = r.hgetall(f"service:{user}")
options = {k.decode("utf-8"): v for k,v in options.items()}
options = {k: v if k=="api_key" else v.decode("utf-8") for k,v in options.items()}
print(options)
def add(user):
print("Creating new entry.")
options = interactive_add()
r.hmset(f"service:{user}", options)
r.sadd("services", user)
print(f"User {user} created:")
display(user)
def update(user):
print("Updating entry. Leave a field blank to skip.")
options = interactive_update()
if options:
r.hmset(f"service:{user}", options)
print(f"User {user} updated:")
else:
print(f"No changes to {user}:")
display(user)
def delete(user):
print("Deleting entry.")
r.delete(f"service:{user}")
r.srem('services', user)
user_exists = exists(user)
if user_exists:
print("Failure in deleting")
else:
print("Deleting successfull")
def statistics(users):
for user in users:
stats = r.hgetall(f"service:{user}")
stats = {k.decode("utf-8"): v for k,v in stats.items()}
stats = {k: v if k=="api_key" else v.decode("utf-8") for k,v in stats.items()}
print(user)
print(f"\t{'PUBLIC' if stats['public']=='Y' else 'PRIVATE'}\n"
f"\tprecache: {stats.get('precache') or 0}"
f"\tondemand: {stats.get('ondemand') or 0}"
)
def main():
if args.list:
print("Services in database:\n", existing_users())
elif args.stats:
statistics(existing_users())
else:
user = args.service
user_exists = exists(user)
if not user_exists:
if args.add:
add(user)
else:
print("Services in database:\n", existing_users())
else:
if args.check:
display(user)
elif args.delete:
delete(user)
elif args.update:
update(user)
else:
NotImplementedError
if __name__ == '__main__':
main()
|
# Copyright (c) Microsoft. All rights reserved.
# Licensed under the MIT license. See LICENSE.md file in the project root
# for full license information.
# ==============================================================================
import numpy as np
import sys
import os
import time
from cntk import Trainer, Axis, text_format_minibatch_source, StreamConfiguration
from cntk.device import cpu, set_default_device
from cntk.learner import sgd
from cntk.ops import input_variable, cross_entropy_with_softmax, combine, classification_error
abs_path = os.path.dirname(os.path.abspath(__file__))
sys.path.append(os.path.join(abs_path, "..", ".."))
from examples.common.nn import LSTMP_component_with_self_stabilization, embedding, linear_layer, select_last, print_training_progress
# Defines the LSTM model for classifying sequences
def LSTM_sequence_classifer_net(input, num_output_classes, embedding_dim, LSTM_dim, cell_dim):
embedding_function = embedding(input, embedding_dim)
LSTM_function = LSTMP_component_with_self_stabilization(
embedding_function.output, LSTM_dim, cell_dim)[0]
thought_vector = select_last(LSTM_function)
return linear_layer(thought_vector, num_output_classes)
# Creates and trains a LSTM sequence classification model
def train_sequence_classifier(debug_output=False):
input_dim = 2000
cell_dim = 25
hidden_dim = 25
embedding_dim = 50
num_output_classes = 5
# Input variables denoting the features and label data
features = input_variable(shape=input_dim, is_sparse=True)
label = input_variable(num_output_classes, dynamic_axes=[
Axis.default_batch_axis()])
# Instantiate the sequence classification model
classifier_output = LSTM_sequence_classifer_net(
features, num_output_classes, embedding_dim, hidden_dim, cell_dim)
ce = cross_entropy_with_softmax(classifier_output, label)
pe = classification_error(classifier_output, label)
rel_path = r"../../../../Tests/EndToEndTests/Text/SequenceClassification/Data/Train.ctf"
path = os.path.join(os.path.dirname(os.path.abspath(__file__)), rel_path)
feature_stream_name = 'features'
labels_stream_name = 'labels'
mb_source = text_format_minibatch_source(path, [
StreamConfiguration(feature_stream_name, input_dim, True, 'x'),
StreamConfiguration(labels_stream_name, num_output_classes, False, 'y')], 0)
features_si = mb_source[features]
labels_si = mb_source[label]
# Instantiate the trainer object to drive the model training
trainer = Trainer(classifier_output, ce, pe,
[sgd(classifier_output.parameters, lr=0.0005)])
# Get minibatches of sequences to train with and perform model training
minibatch_size = 200
training_progress_output_freq = 10
i = 0
if debug_output:
training_progress_output_freq = training_progress_output_freq/3
while True:
mb = mb_source.next_minibatch(minibatch_size)
if len(mb) == 0:
break
# Specify the mapping of input variables in the model to actual
# minibatch data to be trained with
arguments = {features: mb[features_si],
label: mb[labels_si]}
trainer.train_minibatch(arguments)
print_training_progress(trainer, i, training_progress_output_freq)
i += 1
import copy
evaluation_average = copy.copy(
trainer.previous_minibatch_evaluation_average)
loss_average = copy.copy(trainer.previous_minibatch_loss_average)
return evaluation_average, loss_average
if __name__ == '__main__':
# Specify the target device to be used for computing, if you do not want to
# use the best available one, e.g.
# set_default_device(cpu())
error, _ = train_sequence_classifier()
print("Error: %f" % error)
|
from django.views.generic import TemplateView
class HomeRequestView(TemplateView):
http_method_names = ['get', ]
template_name = "home.html"
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
"""
Atenção: usado no notebook da aula.
Não precisa ser usado diretamente
"""
print("Este script não deve ser executado diretamente")
from ipywidgets import widgets, interact, interactive, FloatSlider, IntSlider
import numpy as np
import cv2
def make_widgets_mat(m, n):
"""
Makes a m rows x n columns
matriz of integer Jupyter Widgets
all values initialized to zero
"""
list_elements = []
for i in range(m):
row = []
for j in range(n):
row.append(widgets.IntText(value=0))
list_elements.append(row)
rows = []
for row in list_elements:
rows.append(widgets.HBox(row))
widgets_mat = widgets.VBox(rows)
return list_elements, widgets_mat
def make_widgets_mat_from_data(data):
"""
Creates a matriz of int Widgets given 2D-data
"""
n = len(data)
m = len(data[0])
elements, mat = makeMat(n, m)
for i in range(n):
for j in range(m):
elements[i][j].value = data[i][j]
return elements, mat
def make_np_from_widgets_list(widgets_list):
"""
Takes as input a list of lists of widgets and initializes a matrix
"""
widgets = widgets_list
n = len(widgets)
m = len(widgets[0])
array = np.zeros((n,m), dtype=np.float32)
for i in range(n):
for j in range(m):
array[i][j] = widgets[i][j].value
return array
def convert_to_tuple(html_color):
colors = html_color.split("#")[1]
r = int(colors[0:2],16)
g = int(colors[2:4],16)
b = int(colors[4:],16)
return (r,g,b)
def to_1px(tpl):
img = np.zeros((1,1,3), dtype=np.uint8)
img[0,0,0] = tpl[0]
img[0,0,1] = tpl[1]
img[0,0,2] = tpl[2]
return img
def to_hsv(html_color):
tupla = convert_to_tuple(html_color)
hsv = cv2.cvtColor(to_1px(tupla), cv2.COLOR_RGB2HSV)
return hsv[0][0]
def ranges(value):
hsv = to_hsv(value)
hsv2 = np.copy(hsv)
hsv[0] = max(0, hsv[0]-10)
hsv2[0] = min(180, hsv[0]+ 10)
hsv[1:] = 50
hsv2[1:] = 255
return hsv, hsv2
|
# -*- coding: utf-8 -*-
"""
Created on Mon Sep 28 11:12:44 2020
Files for this layout:
ftp://ftpe.rrc.texas.gov/sholed
ftp://ftpe.rrc.texas.gov/sholed/olf001l.ebc.gz
ftp://ftpe.rrc.texas.gov/sholed/olf003l.ebc.gz
ftp://ftpe.rrc.texas.gov/sholed/olf004l.ebc.gz
ftp://ftpe.rrc.texas.gov/sholed/olf005l.ebc.gz
ftp://ftpe.rrc.texas.gov/sholed/olf007l.ebc.gz
ftp://ftpe.rrc.texas.gov/sholed/olf008l.ebc.gz
ftp://ftpe.rrc.texas.gov/sholed/olf009l.ebc.gz
ftp://ftpe.rrc.texas.gov/sholed/olf010l.ebc.gz
ftp://ftpe.rrc.texas.gov/sholed/olf011l.ebc.gz
ftp://ftpe.rrc.texas.gov/sholed/olf013l.ebc.gz
ftp://ftpe.rrc.texas.gov/sholed/olf014l.ebc.gz
ftp://ftpe.rrc.texas.gov/sholed/ReadMe.txt
Layout Manual:
https://www.rrc.texas.gov/media/1273/ola013k.pdf
"""
OIL_FIELD_01 = [
('TYPE-REC',0,1,'pic_numeric'), ##PIC 9
('DIST',1,3,'pic_any'), ##PIC XXX
('FIELD',4,8,'pic_numeric'), ##PIC 9(8)
('OPR',12,6,'pic_numeric'), ##PIC 9(6)
('LEASE',18,5,'pic_numeric'), ##PIC 9(5)
('LEASE FILLER',23,2,'pic_numeric'), ##PIC 99
('OFFSHORE',25,1,'pic_numeric'), ##PIC 9
('F-NAME',26,32,'pic_any'), ##PIC X(32)
('COUNTY',58,18,'pic_numeric'), ##PIC 9(18)
('DISC-DATE',76,8,'pic_yyyymmdd'), ##PIC 9(8)
('F-DEPTH',84,5,'pic_numeric'), ##PIC 9(5)
('O-GRAV',89,3,'pic_numeric'), ##PIC 999
('F-TYPE',92,1,'pic_numeric'), ##PIC 9
('MULT-RES',93,1,'pic_numeric'), ##PIC 9
('F-LPB',94,1,'pic_numeric'), ##PIC 9
('F-XMT',95,1,'pic_numeric'), ##PIC 9
('PRT-AS-IS',96,1,'pic_numeric'), ##PIC 9
('YARD',97,1,'pic_numeric'), ##PIC 9
('T-CODES',98,12,'pic_numeric'), ##PIC 9(12)
('ALLOCATION',110,12,'pic_numeric'), ##PIC 9(12)
('RES-AMT',122,6,'pic_numeric'), ##PIC 9(6)
('F-GOR',128,6,'pic_numeric'), ##PIC 9(6)
('F-TOP',134,5,'pic_numeric'), ##PIC 9(5)
('F-NET',139,6,'pic_numeric'), ##PIC 9(6)
('UNET',145,3,'pic_numeric'), ##PIC 999
('TOL',148,4,'pic_numeric'), ##PIC 9999
('SPAC',152,8,'pic_numeric'), ##PIC 9(8)
('DIAG',160,4,'pic_numeric'), ##PIC 9999
('CUM-PROD',164,7,'pic_comp'), ##PIC S9(13) COMP-3
('CASING',171,21,'pic_any'), ##PIC X(21)
('COL-HEAD',192,1,'pic_any'), ##PIC X
('ALO-CODE',193,1,'pic_any'), ##PIC X
('F-RMK1',194,66,'pic_any'), ##PIC X(66)
('F-RMK2',260,66,'pic_any'), ##PIC X(66)
('PERM-NO',326,5,'pic_any'), ##PIC X(5)
('SP-FHC',331,1,'pic_numeric'), ##PIC 9
('AN-A',332,90,'pic_any'), ##PIC X(90)
('AN-B',422,35,'pic_any'), ##PIC X(35)
('F-OOIP',457,8,'pic_numeric'), ##PIC 9(08) ##('FILLER',465,7,'pic_numeric'), ##PIC 9(07) ##('FILLER',472,15,'pic_numeric'), ##PIC 9(15) ##('FILLER',487,13,'pic_numeric'), ##PIC 9(13)
('FM-DATE',500,6,'pic_yyyymm'), ##PIC 9(6)
('FM-PW',506,2,'pic_comp'), ##PIC S9(3) COMP-3
('FM-AC',508,4_4,'pic_comp'), ##PIC S999V9(4) COMP-3 ##('FILLER',512,4,'pic_numeric'), ##PIC 9(4)
('FM-OTHC',516,1,'pic_numeric'), ##PIC 9
('FM-CHG',517,1,'pic_numeric'), ##PIC 9
('FM-PROD-FACT',518,3_3,'pic_comp'), ##PIC S99V999 COMP-3
('FM-SPLIT-PROD-FACT',521,3_3,'pic_comp'), ##PIC S99V999 COMP-3
('FM-JOHN',524,1,'pic_numeric'), ##PIC 9
('FM-OTH',525,8_7,'pic_comp'), ##PIC S9(8)V9(7) COMP-3 ##('FILLER',533,15,'pic_any'), ##PIC X(15)
]
OIL_LEASE_03 = [
('LEASE-REC-TYPE-REC',0,1,'pic_numeric'), ##PIC 9
('LEASE-REC-DIST',1,3,'pic_any'), ##PIC XXX
('LEASE-REC-FIELD',4,8,'pic_numeric'), ##PIC 9(8)
('LEASE-REC-OPR',12,6,'pic_numeric'), ##PIC 9(6)
('LEASE-REC-LEASE',18,5,'pic_numeric'), ##PIC 9(5)
('LEASE-REC-FILLER',23,2,'pic_any'), ##PIC XX
('LEASE-REC-OFFSHORE',25,1,'pic_numeric'), ##PIC 9
('L-NAME',26,32,'pic_any'), ##PIC X(32)
('LSE-CO',58,6,'pic_numeric'), ##PIC 9(6)
('POGATH',64,5,'pic_any'), ##PIC X(5)
('PGGATH',69,5,'pic_any'), ##PIC X(5)
('OSPLIT',74,1,'pic_numeric'), ##PIC 9
('GSPLIT',75,1,'pic_numeric'), ##PIC 9
('OOGATH',76,5,'pic_any'), ##PIC X(5)
('OGGATH',81,5,'pic_any'), ##PIC X(5)
('OOPR',86,6,'pic_numeric'), ##PIC 9(6)
('BO-STATUS',92,4,'pic_comp'), ##PIC S9(7) COMP-3
('BG-STATUS',96,4,'pic_comp'), ##PIC S9(7) COMP-3
('MOVE-BAL',100,4,'pic_comp'), ##PIC S9(7) COMP-3
('PO-STATUS',104,4,'pic_comp'), ##PIC S9(7) COMP-3
('PG-STATUS',108,4,'pic_comp'), ##PIC S9(7) COMP-3
('SEC-REC',112,1,'pic_numeric'), ##PIC 9
('CERT',113,2,'pic_numeric'), ##PIC 99
('BATCH',115,1,'pic_any'), ##PIC X
('L-LPB',116,1,'pic_numeric'), ##PIC 9
('COMMINGLE-CD',117,1,'pic_numeric'), ##PIC 9
('COMMINGLE',118,4,'pic_numeric'), ##PIC 9999
('L-INFO',122,54,'pic_any'), ##PIC X(54)
('AD-BO-STATUS',176,4,'pic_comp'), ##PIC S9(7) COMP-3
('AD-BG-STATUS',180,4,'pic_comp'), ##PIC S9(7) COMP-3
('COMMINGLE-DATE',184,6,'pic_yyyymm'), ##PIC 9(6)
('L-RMCD',190,1,'pic_numeric'), ##PIC 9
('L-RMDT',191,6,'pic_yyyymm'), ##PIC 9(6)
('SEV-CD-13',197,1,'pic_numeric'), ##PIC 9
('SEV-CD-14',198,1,'pic_numeric'), ##PIC 9
('L-CAS-SI-LTR-DTE',199,6,'pic_yyyymm'), ##PIC 9(6)
('L-RED-RTE-DTE',205,6,'pic_yyyymm'), ##PIC 9(6)
('L-EXC-TST',211,1,'pic_numeric'), ##PIC 9
('L-RLTYCD',212,1,'pic_numeric'), ##PIC 9
('L-ONE-WELL-LEASE',213,1,'pic_any'), ##PIC X
('L-PANHANDLE-GOR-EXC',214,1,'pic_any'), ##PIC X(01)
('L-PANHANDLE-GOR-AMT',215,5_1,'pic_comp'), ##PIC 9(08)V9 COMP-3 ##('FILLER',220,4,'pic_numeric'), ##PIC 9(04)
('L-MONTH-DATE',224,6,'pic_yyyymm'), ##PIC 9(6)
('LM-SEV',230,1,'pic_numeric'), ##PIC 9
('LM-RETRO',231,1,'pic_numeric'), ##PIC 9
('LM-REC',232,1,'pic_numeric'), ##PIC 9
('LM-CHG',233,1,'pic_numeric'), ##PIC 9
('LM-ALLOW',234,4,'pic_comp'), ##PIC S9(7) COMP-3
('LM-PROD',238,4,'pic_comp'), ##PIC S9(7) COMP-3
('LM-FW',242,3,'pic_numeric'), ##PIC 999
('LM-OW',245,3,'pic_numeric'), ##PIC 999
('LM-PL',248,4,'pic_comp'), ##PIC S9(7) COMP-3
('LM-PLC',252,1,'pic_numeric'), ##PIC 9
('LM-OTH',253,4,'pic_comp'), ##PIC S9(7) COMP-3
('LM-OTHC',257,1,'pic_numeric'), ##PIC 9
('LM-STO',258,4,'pic_comp'), ##PIC S9(7) COMP-3
('LM-GL',262,5,'pic_comp'), ##PIC S9(7) COMP-3
('LM-GPROD',267,5,'pic_comp'), ##PIC S9(7) COMP-3
('LM-GLIFT',272,4,'pic_comp'), ##PIC S9(7) COMP-3
('LM-CSIL',276,1,'pic_numeric'), ##PIC 9
('LM-JOHN',277,1,'pic_numeric'), ##PIC 9
('LM-LTR-CODE',278,1,'pic_numeric'), ##PIC 9 ##('FILLER',279,13,'pic_numeric'), ##PIC 9(13) ##('FILLER',292,904,'pic_numeric'), ##PIC 9(13) ##('FILLER',1196,4,'pic_numeric') ##PIC 9(04)
]
OIL_MULTI_WELL_04 = [
('MULTI-W-REC-TYPE-REC',0,1,'pic_numeric'), ##PIC 9
('MULTI-W-REC-DIST',1,3,'pic_any'), ##PIC XXX
('MUTLI-W-REC-FIELD',4,8,'pic_numeric'), ##PIC 9(8)
('MULTI-W-REC-OPR',12,6,'pic_numeric'), ##PIC 9(6)
('MULTI-W-REC-LEASE',18,5,'pic_numeric'), ##PIC 9(5)
('MULTI-W-REC-FILLER',23,2,'pic_numeric'), ##PIC 99
('MULTI-W-REC-OFFSHORE',25,1,'pic_numeric'), ##PIC 9
('M-RECORD',26,6,'pic_any'), ##PIC X(6)
('TYPEW',32,1,'pic_any'), ##PIC X
('RESER',33,5,'pic_any'), ##PIC X(5)
('M-COUNTY',38,6,'pic_numeric'), ##PIC 9(6)
('M-TST-EFF',44,1,'pic_any'), ##PIC X
('M-PNTR-1ST',45,6,'pic_numeric'), ##PIC 9(6)
('CAP',51,1,'pic_numeric'), ##PIC 9
('PROD-WELL',52,6,'pic_numeric'), ##PIC 9(6)
('MARG-WELL',58,6,'pic_numeric'), ##PIC 9(6)
('M-DEPTH',64,1,'pic_numeric'), ##PIC 9
('M-PNTR-LST',65,6,'pic_numeric'), ##PIC 9(6)
('M-EXC-TEST',71,1,'pic_numeric'), ##PIC 9 ##('FILLER',72,6,'pic_numeric'), ##PIC 9(6)
('M-WATER',78,6,'pic_numeric'), ##PIC 9(6)
('M-REMARK',84,55,'pic_any'), ##PIC X(55)
('MM-PRCNT',139,3,'pic_comp'), ##PIC V999 ##('FILLER',142,11,'pic_numeric'), ##PIC 9(11) ##('FILLER',153,11,'pic_numeric'), ##PIC 9(11)
('M-MONTH-DATE',164,6,'pic_yyyymm'), ##PIC 9(6)
('MM-CHG',170,1,'pic_numeric'), ##PIC 9
('MM-NO',171,1,'pic_numeric'), ##PIC 9
('MM-ALLOW',172,4,'pic_comp'), ##PIC S9(7) COMP-3
('MM-ACODE',176,1,'pic_numeric'), ##PIC 9
('MM-TCODE',177,1,'pic_numeric'), ##PIC 9
('MM-LIMIT',178,5,'pic_comp'), ##PIC S9(9) COMP-3
('MM-ALLOW2',183,4,'pic_comp'), ##PIC S9(7) COMP-3
('MM-ACODE2',187,1,'pic_numeric'), ##PIC 9
('MM-TCODE2',188,1,'pic_numeric'), ##PIC 9
('MM-LIMIT2',189,5,'pic_comp'), ##PIC S9(9) COMP-3
('MM-DATE2',194,2,'pic_numeric'), ##PIC 99
('MM-ALLOW3',196,4,'pic_comp'), ##PIC S9(7) COMP-3
('MM-ACODE3',200,1,'pic_numeric'), ##PIC 9
('MM-TCODE3',201,1,'pic_numeric'), ##PIC 9
('MM-LIMIT3',202,5,'pic_comp'), ##PIC S9(9) COMP-3
('MM-DATE3',207,2,'pic_numeric'), ##PIC 99
('MM-FORM-LCK',209,1,'pic_numeric'), ##PIC 9
('MM-SPACE1',210,4,'pic_comp'), ##PIC S9(7) COMP-3
('MM-KODE2',214,1,'pic_numeric'), ##PIC 9
('MM-SPACE2',215,4,'pic_comp'), ##PIC S9(7) COMP-3
('MM-JOHN',219,1,'pic_numeric'), ##PIC 9 ##('FILLER',220,9,'pic_numeric'), ##PIC 9(09) ##('FILLER',229,9,'pic_numeric'), ##PIC 9(09)
]
OIL_WELL_05 = [
('WELL-REC-TYPE-REC',0,1,'pic_numeric'), ##PIC 9
('WELL-REC-DIST',1,3,'pic_any'), ##PIC XXX
('WELL-REC-FIELD',4,8,'pic_numeric'), ##PIC 9(8)
('WELL-REC-OPR',12,6,'pic_numeric'), ##PIC 9(6)
('WELL-REC-LEASE',18,5,'pic_numeric'), ##PIC 9(5)
('WELL-REC-FILLER',23,2,'pic_numeric'), ##PIC 99
('WELL-REC-OFFSHORE',25,1,'pic_numeric'), ##PIC 9
('WELL-NO',26,6,'pic_any'), ##PIC X(6)
('W-TYPE-WELL',32,1,'pic_any'), ##PIC X(1)
('W-UNIT-NO',33,1,'pic_any'), ##PIC X
('W-UNIT-VALUE',34,4,'pic_numeric'), ##PIC 9V999
('W-KEY',38,1,'pic_numeric'), ##PIC 9
('W-COUNTY',39,3,'pic_numeric'), ##PIC 999
('PUMP',42,1,'pic_numeric'), ##PIC 9
('W-SP',43,5,'pic_numeric'), ##PIC 9(5)
('W-NET',48,6,'pic_numeric'), ##PIC 9(6)
('W-DEPTH',54,5,'pic_numeric'), ##PIC 9(5)
('SAND',59,3,'pic_numeric'), ##PIC 9(3)
('FROZEN',62,5,'pic_numeric'), ##PIC 9(5)
('PERF',67,5,'pic_numeric'), ##PIC 9(5)
('W-DATE',72,8,'pic_yyyymmdd'), ##PIC 9(8)
('EX-14B-CD',80,1,'pic_any'), ##PIC X
('W-SUB-WELL',81,1,'pic_numeric'), ##PIC 9
('W-NO-PROD-CD',82,1,'pic_numeric'), ##PIC 9
('W-DELQ-FORM',83,1,'pic_numeric'), ##PIC 9
('W-TST-EFF',84,1,'pic_any'), ##PIC X
('W-EXC-TST',85,1,'pic_numeric'), ##PIC 9
('W-WATER',86,4,'pic_numeric'), ##PIC 9(4)
('EX-14B-DATE',90,6,'pic_yyyymm'), ##PIC 9(6)
('W-RMKS',96,15,'pic_any'), ##PIC X(15)
('BONUS-AMT',111,4,'pic_numeric'), ##PIC 9(4)
('FROZTSF',115,3,'pic_numeric'), ##PIC 999
('W-WLSD',118,1,'pic_numeric'), ##PIC 9
('W-TST-DT',119,8,'pic_yyyymmdd'), ##PIC 9(8)
('W-DTE-LST-UTL',127,6,'pic_yyyymm'), ##PIC 9(6)
('W-NEW-WB-EXC',133,1,'pic_any'), ##PIC X(01)
('W-NEW-WB-CONNECT-DATE',134,8,'pic_yyyymmdd'), ##PIC 9(8)
('W-14B2-TYPE-COVERAGE',142,1,'pic_any'), ##PIC X(01)
('W-14B2-APP-NO',143,6,'pic_numeric'), ##PIC 9(06) ##('FILLER',149,4,'pic_numeric'), ##PIC 9(04) ##('FILLER',153,18,'pic_numeric'), ##PIC 9(18) ##('FILLER',171,7,'pic_numeric'), ##PIC 9(07)
('W-MONTH-DATE',178,6,'pic_yyyymm'), ##PIC 9(6)
('WM-CHG',184,1,'pic_numeric'), ##PIC 9
('WM-NO',185,1,'pic_numeric'), ##PIC 9
('WM-ALLOW',186,3,'pic_comp'), ##PIC S9(5) COMP-3
('WM-ACODE',189,1,'pic_any'), ##PIC X
('WM-TCODE',190,1,'pic_any'), ##PIC X
('WM-LIMIT',191,4,'pic_comp'), ##PIC S9(7) COMP-3
('WM-ALLOW2',195,3,'pic_comp'), ##PIC S9(5) COMP-3
('WM-ACODE2',198,1,'pic_any'), ##PIC X
('WM-TCODE2',199,1,'pic_any'), ##PIC X
('WM-LIMIT2',200,4,'pic_comp'), ##PIC S9(7) COMP-3
('WM-DATE2',204,2,'pic_numeric'), ##PIC 99
('WM-ALLOW3',206,3,'pic_comp'), ##PIC S9(5) COMP-3
('WM-ACODE3',209,1,'pic_any'), ##PIC X
('WM-TCODE3',210,1,'pic_any'), ##PIC X
('WM-LIMIT3',211,4,'pic_comp'), ##PIC S9(7) COMP-3
('WM-DATE3',215,2,'pic_numeric'), ##PIC 99
('WM-FORM-LICK',217,1,'pic_numeric'), ##PIC 9
('WM-PGT',218,2,'pic_comp'), ##PIC S999 COMP-3
('WM-TSWA',220,1,'pic_numeric'), ##PIC 9
('WM-EGT',221,2,'pic_comp'), ##PIC S999 COMP-3
('WM-ESWA',223,1,'pic_numeric'), ##PIC 9
('WM-ACRE',224,3_2,'pic_comp'), ##PIC S999V99 COMP-3
('WM-POTE',227,3_2,'pic_comp'), ##PIC S9999V9 COMP-3
('WM-ACFT',230,3,'pic_comp'), ##PIC S9(5) COMP-3
('WM-GOR',233,3,'pic_comp'), ##PIC S9(5) COMP-3
('WM-OTRAN-CD',236,1,'pic_numeric'), ##PIC 9
('WM-POT',237,2,'pic_comp'), ##PIC S999 COMP-3
('WM-EOT',239,2,'pic_comp'), ##PIC S999 COMP-3
('WM-JOHN',241,1,'pic_numeric'), ##PIC 9
('WM-OOIP',242,6,'pic_numeric'), ##PIC 9(06) ##('FILLER',248,3,'pic_numeric'), ##PIC 9(03)
]
def oilProd_layout(startval):
layouts_map = {
'1' : {'name': 'OIL_FIELD', 'layout': OIL_FIELD_01},
'3' : {'name': 'OIL_LEASE', 'layout': OIL_LEASE_03},
'4' : {'name': 'OIL_MULTI_WELL', 'layout': OIL_MULTI_WELL_04},
'5' : {'name': 'OIL_WELL', 'layout': OIL_WELL_05},
}
try:
returnval = layouts_map[startval]
except:
returnval = None
return returnval
|
import cv2
import numpy as np
import matplotlib.pyplot as plt
import glob
import pickle
# read in all the images in the calibration folder
calib_images = glob.glob(".\camera_cal\*.jpg")
#define chess board parameters:
nx = 9
ny = 6
# Arrays to store image point and opbject points
imgpoints = []
objpoints = []
def get_points_chessboard(img, nx, ny):
"""
returns the obj and img points from one chessboard image
"""
#Genreate obj points based on the chessboar from (0,0) to (nx-1, ny-1)
objp = np.zeros((nx*ny,3), np.float32)
#np.mgrid cretes two arrays with 9x5 which are than merged together using T (transpose) and reshape. Only the first 2 columns of objp are replaced
objp[:,:2] = np.mgrid[0:nx,0:ny].T.reshape(-1,2)
#convert Image to gray scale
gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
#get chess board corners
ret, corners = cv2.findChessboardCorners(gray, (nx, ny), None)
return ret, objp, corners
def calc_cam_values(img, objpoints, imgpoints):
"""
Calculates camera matrix etc. using the fucntio cv2.calibrateCamera
"""
ret, mtx, dist, rvecs, tvecs = cv2.calibrateCamera(objpoints, imgpoints, img.shape[:2], None, None)
return ret, mtx, dist, rvecs, tvecs
#Iterate thorugh images and extract there image points
for image_path in calib_images:
image = cv2.imread(image_path)
ret, objp, imgp = get_points_chessboard(image, nx, ny)
if ret == True:
imgpoints.append(imgp)
objpoints.append(objp)
else:
print("image is not usable: ", image_path)
ret, mtx, dist, rvecs, tvecs = calc_cam_values(image, objpoints, imgpoints)
#write cam values into a dict
cam_values = { "mtx": mtx, "dist": dist,"rvecs": rvecs,"tvecs": tvecs}
#Save cam values in a pickle
pickle.dump(cam_values, open("cam_values.p", "wb"))
|
# coding: utf-8
"""
.. module: scieloopds
:synopsis: WSGI Application to provide SciELO Books in OPDS protocol.
.. moduleauthor:: Allison Vollmann <allisonvoll@gmail.com>
Example configuration (aditional parameters):
.. note::
[app:main]
...
mongo_uri = mongodb://localhost:27017/scieloopds
scielo_uri = http://books.scielo.org/api/v1/
auto_sync = True
auto_sync_interval = 60
items_per_page = 20
"""
import os
import sys
import logging
from urlparse import urlparse
from datetime import datetime, timedelta
import pymongo
from pyramid.config import Configurator
from pyramid.events import NewRequest
from pyramid.settings import asbool
from .sync import main as do_sync
from .utils import get_db_connection
APP_PATH = os.path.abspath(os.path.dirname(__file__))
DEFAULT_SETTINGS = [
('mongo_uri', 'OPDS_MONGO_URI', str,
'mongodb://localhost:27017/scieloopds'),
('scielo_uri', 'OPDS_SCIELO_URI', str,
'http://books.scielo.org/api/v1'),
('auto_sync', 'OPDS_AUTO_SYNC', bool,
True),
('auto_sync_interval', 'OPDS_AUTO_SYNC_INTERVAL', int,
60*60*12),
('items_per_page', 'OPDS_ITEMS_PER_PAGE', int,
20),
]
def parse_settings(settings):
"""Analisa e retorna as configurações da app com base no arquivo .ini e env.
As variáveis de ambiente possuem precedência em relação aos valores
definidos no arquivo .ini.
"""
parsed = {}
cfg = list(DEFAULT_SETTINGS)
for name, envkey, convert, default in cfg:
value = os.environ.get(envkey, settings.get(name, default))
if convert is not None:
value = convert(value)
parsed[name] = value
return parsed
def main(global_config, **settings):
""" This function returns a Pyramid WSGI application.
"""
config = Configurator(settings=parse_settings(settings))
config.add_static_view('static', 'static', cache_max_age=3600)
config.add_route('root', '/opds/')
config.add_route('new', '/opds/new')
config.add_route('alpha_catalog', '/opds/alpha')
config.add_route('alpha_filter', '/opds/alpha/{id}')
config.add_route('publisher_catalog', '/opds/publisher')
config.add_route('publisher_filter', '/opds/publisher/{id}')
config.add_subscriber(add_mongo_db, NewRequest)
config.add_subscriber(start_sync, NewRequest)
config.scan(ignore='scieloopds.tests')
config.add_renderer('opds', factory='scieloopds.renderers.opds_factory')
return config.make_wsgi_app()
def ensure_indexes(db):
db.book.ensure_index([('updated', pymongo.DESCENDING)])
db.book.ensure_index([('title_ascii', pymongo.ASCENDING)])
db.alpha.ensure_index([('title_ascii', pymongo.ASCENDING)])
db.publisher.ensure_index([('title_ascii', pymongo.ASCENDING)])
def add_mongo_db(event):
settings = event.request.registry.settings
db = get_db_connection(settings)
ensure_indexes(db)
event.request.db = db
def start_sync(event):
settings = event.request.registry.settings
if settings['auto_sync']:
db = event.request.db
interval = settings['auto_sync_interval']
try:
update = db.catalog.find_one()
if update:
last_update = update['updated']
next_update = last_update + timedelta(seconds=interval)
if next_update < datetime.now():
do_sync(settings)
else:
do_sync(settings)
except pymongo.errors.AutoReconnect as e:
logging.getLogger(__name__).error('MongoDB: %s' % e.message)
|
# -*- coding: utf-8 -*-
import pandas as pd
from futu.common import RspHandlerBase
from futu.quote.quote_query import *
class StockQuoteHandlerBase(RspHandlerBase):
"""
异步处理推送的订阅股票的报价。
.. code:: python
class StockQuoteTest(StockQuoteHandlerBase):
def on_recv_rsp(self, rsp_str):
ret_code, content = super(StockQuoteTest,self).on_recv_rsp(rsp_str)
if ret_code != RET_OK:
print("StockQuoteTest: error, msg: %s" % content)
return RET_ERROR, content
print("StockQuoteTest ", content) # StockQuoteTest自己的处理逻辑
return RET_OK, content
"""
@classmethod
def parse_rsp_pb(cls, rsp_pb):
ret_code, msg, quote_list = StockQuoteQuery.unpack_rsp(rsp_pb)
if ret_code != RET_OK:
return ret_code, msg
else:
return RET_OK, quote_list
def on_recv_rsp(self, rsp_pb):
"""
在收到实时报价推送后会回调到该函数,使用者需要在派生类中覆盖此方法
注意该回调是在独立子线程中
:param rsp_pb: 派生类中不需要直接处理该参数
:return: 参见get_stock_quote的返回值
"""
ret_code, content = self.parse_rsp_pb(rsp_pb)
if ret_code != RET_OK:
return ret_code, content
else:
col_list = [
'code', 'data_date', 'data_time', 'last_price', 'open_price',
'high_price', 'low_price', 'prev_close_price', 'volume',
'turnover', 'turnover_rate', 'amplitude', 'suspension',
'listing_date', 'price_spread', 'dark_status', 'sec_status', 'strike_price',
'contract_size', 'open_interest', 'implied_volatility',
'premium', 'delta', 'gamma', 'vega', 'theta', 'rho',
'net_open_interest', 'expiry_date_distance', 'contract_nominal_value',
'owner_lot_multiplier', 'option_area_type', 'contract_multiplier',
'last_settle_price','position','position_change'
]
col_list.extend(row[0] for row in pb_field_map_PreAfterMarketData_pre)
col_list.extend(row[0] for row in pb_field_map_PreAfterMarketData_after)
quote_frame_table = pd.DataFrame(content, columns=col_list)
return RET_OK, quote_frame_table
class OrderBookHandlerBase(RspHandlerBase):
"""
异步处理推送的实时摆盘。
.. code:: python
class OrderBookTest(OrderBookHandlerBase):
def on_recv_rsp(self, rsp_str):
ret_code, data = super(OrderBookTest,self).on_recv_rsp(rsp_str)
if ret_code != RET_OK:
print("OrderBookTest: error, msg: %s" % data)
return RET_ERROR, data
print("OrderBookTest ", data) # OrderBookTest自己的处理逻辑
return RET_OK, content
"""
@classmethod
def parse_rsp_pb(cls, rsp_pb):
ret_code, msg, order_book = OrderBookQuery.unpack_rsp(rsp_pb)
if ret_code != RET_OK:
return ret_code, msg
else:
return RET_OK, order_book
def on_recv_rsp(self, rsp_pb):
"""
在收到实摆盘数据推送后会回调到该函数,使用者需要在派生类中覆盖此方法
注意该回调是在独立子线程中
:param rsp_pb: 派生类中不需要直接处理该参数
:return: 参见get_order_book的返回值
"""
ret_code, content = self.parse_rsp_pb(rsp_pb)
if ret_code == RET_OK:
self.on_recv_log(content)
return ret_code, content
class CurKlineHandlerBase(RspHandlerBase):
"""
异步处理推送的k线数据。
.. code:: python
class CurKlineTest(CurKlineHandlerBase):
def on_recv_rsp(self, rsp_str):
ret_code, data = super(CurKlineTest,self).on_recv_rsp(rsp_str)
if ret_code != RET_OK:
print("CurKlineTest: error, msg: %s" % data)
return RET_ERROR, data
print("CurKlineTest ", data) # CurKlineTest自己的处理逻辑
return RET_OK, content
"""
@classmethod
def parse_rsp_pb(cls, rsp_pb):
ret_code, msg, kline_list = CurKlinePush.unpack_rsp(rsp_pb)
if ret_code != RET_OK:
return ret_code, msg
else:
return RET_OK, kline_list
def on_recv_rsp(self, rsp_pb):
"""
在收到实时k线数据推送后会回调到该函数,使用者需要在派生类中覆盖此方法
注意该回调是在独立子线程中
:param rsp_pb: 派生类中不需要直接处理该参数
:return: 参见get_cur_kline的返回值
"""
ret_code, content = self.parse_rsp_pb(rsp_pb)
if ret_code != RET_OK:
return ret_code, content
else:
col_list = [
'code', 'time_key', 'open', 'close', 'high', 'low', 'volume',
'turnover', 'k_type', 'last_close'
]
kline_frame_table = pd.DataFrame(content, columns=col_list)
return RET_OK, kline_frame_table
class TickerHandlerBase(RspHandlerBase):
"""
异步处理推送的逐笔数据。
.. code:: python
class TickerTest(TickerHandlerBase):
def on_recv_rsp(self, rsp_str):
ret_code, data = super(TickerTest,self).on_recv_rsp(rsp_str)
if ret_code != RET_OK:
print("CurKlineTest: error, msg: %s" % data)
return RET_ERROR, data
print("TickerTest ", data) # TickerTest自己的处理逻辑
return RET_OK, content
"""
@classmethod
def parse_rsp_pb(cls, rsp_pb):
ret_code, msg, ticker_list = TickerQuery.unpack_rsp(rsp_pb)
if ret_code != RET_OK:
return ret_code, msg
else:
return RET_OK, ticker_list
def on_recv_rsp(self, rsp_pb):
"""
在收到实时逐笔数据推送后会回调到该函数,使用者需要在派生类中覆盖此方法
注意该回调是在独立子线程中
:param rsp_pb: 派生类中不需要直接处理该参数
:return: 参见get_rt_ticker的返回值
"""
ret_code, content = self.parse_rsp_pb(rsp_pb)
if ret_code != RET_OK:
return ret_code, content
else:
self.on_recv_log(content)
col_list = [
'code', 'time', 'price', 'volume', 'turnover',
"ticker_direction", 'sequence', 'type', 'push_data_type',
]
ticker_frame_table = pd.DataFrame(content, columns=col_list)
return RET_OK, ticker_frame_table
class RTDataHandlerBase(RspHandlerBase):
"""
异步处理推送的分时数据。
.. code:: python
class RTDataTest(RTDataHandlerBase):
def on_recv_rsp(self, rsp_str):
ret_code, data = super(RTDataTest,self).on_recv_rsp(rsp_str)
if ret_code != RET_OK:
print("RTDataTest: error, msg: %s" % data)
return RET_ERROR, data
print("RTDataTest ", data) # RTDataTest自己的处理逻辑
return RET_OK, content
"""
@classmethod
def parse_rsp_pb(cls, rsp_pb):
ret_code, msg, rt_data_list = RtDataQuery.unpack_rsp(rsp_pb)
if ret_code != RET_OK:
return ret_code, msg
else:
return RET_OK, rt_data_list
def on_recv_rsp(self, rsp_pb):
"""
在收到实时逐笔数据推送后会回调到该函数,使用者需要在派生类中覆盖此方法
注意该回调是在独立子线程中
:param rsp_pb: 派生类中不需要直接处理该参数
:return: 参见get_rt_data的返回值
"""
ret_code, content = self.parse_rsp_pb(rsp_pb)
if ret_code != RET_OK:
return ret_code, content
else:
col_list = [
'code', 'time', 'is_blank', 'opened_mins', 'cur_price',
"last_close", 'avg_price', 'turnover', 'volume'
]
rt_data_table = pd.DataFrame(content, columns=col_list)
return RET_OK, rt_data_table
class BrokerHandlerBase(RspHandlerBase):
"""
异步处理推送的经纪数据。
.. code:: python
class BrokerTest(BrokerHandlerBase):
def on_recv_rsp(self, rsp_str):
ret_code, data = super(BrokerTest,self).on_recv_rsp(rsp_str)
if ret_code != RET_OK:
print("BrokerTest: error, msg: %s" % data)
return RET_ERROR, data
print("BrokerTest ", data) # BrokerTest自己的处理逻辑
return RET_OK, content
"""
@classmethod
def parse_rsp_pb(cls, rsp_pb):
ret_code, msg, (stock_code, bid_content,
ask_content) = BrokerQueueQuery.unpack_rsp(rsp_pb)
if ret_code != RET_OK:
return ret_code, msg
else:
return RET_OK, (stock_code, bid_content, ask_content)
def on_recv_rsp(self, rsp_pb):
"""
在收到实时经纪数据推送后会回调到该函数,使用者需要在派生类中覆盖此方法
注意该回调是在独立子线程中
:param rsp_pb: 派生类中不需要直接处理该参数
:return: 成功时返回(RET_OK, stock_code, [bid_frame_table, ask_frame_table]), 相关frame table含义见 get_broker_queue_ 的返回值说明
失败时返回(RET_ERROR, ERR_MSG, None)
"""
ret_code, content = self.parse_rsp_pb(rsp_pb)
if ret_code != RET_OK:
return ret_code, content, None
else:
self.on_recv_log(content)
stock_code, bid_content, ask_content = content
bid_list = [
'code', 'bid_broker_id', 'bid_broker_name', 'bid_broker_pos'
]
ask_list = [
'code', 'ask_broker_id', 'ask_broker_name', 'ask_broker_pos'
]
bid_frame_table = pd.DataFrame(bid_content, columns=bid_list)
ask_frame_table = pd.DataFrame(ask_content, columns=ask_list)
return ret_code, stock_code, [bid_frame_table, ask_frame_table]
class KeepAliveHandlerBase(RspHandlerBase):
"""Base class for handling KeepAlive"""
@classmethod
def parse_rsp_pb(cls, rsp_pb):
ret_code, msg, alive_time = KeepAlive.unpack_rsp(rsp_pb)
if ret_code != RET_OK:
return ret_code, msg
else:
return RET_OK, alive_time
def on_recv_rsp(self, rsp_pb):
"""receive response callback function"""
ret_code, content = self.parse_rsp_pb(rsp_pb)
return ret_code, content
class SysNotifyHandlerBase(RspHandlerBase):
"""sys notify"""
@classmethod
def parse_rsp_pb(cls, rsp_pb):
ret_code, content = SysNotifyPush.unpack_rsp(rsp_pb)
return ret_code, content
def on_recv_rsp(self, rsp_pb):
"""receive response callback function"""
ret_code, content = self.parse_rsp_pb(rsp_pb)
return ret_code, content
class AsyncHandler_InitConnect(RspHandlerBase):
""" AsyncHandler_TrdSubAccPush"""
def __init__(self, notify_obj=None):
self._notify_obj = notify_obj
super(AsyncHandler_InitConnect, self).__init__()
def on_recv_rsp(self, rsp_pb):
"""receive response callback function"""
ret_code, msg, conn_info_map = InitConnect.unpack_rsp(rsp_pb)
if self._notify_obj is not None:
self._notify_obj.on_async_init_connect(
ret_code, msg, conn_info_map)
return ret_code, msg
#
# class OrderDetailHandlerBase(RspHandlerBase):
# def __init__(self):
# super(OrderDetailHandlerBase, self).__init__()
#
# def on_recv_rsp(self, rsp_pb):
# """receive response callback function"""
# ret_code, msg, data = OrderDetail.unpack_rsp(rsp_pb)
#
# if ret_code != RET_OK:
# return ret_code, msg
# else:
# return ret_code, data
|
from datetime import datetime
from io import StringIO
from unittest.mock import patch
from django.conf import settings
from django.core import mail
from django.core.management import call_command
from django.test import TestCase
from django.utils import timezone
from main import models
from main.management.commands import mail_exports, process_notifications
class EnqueueNotificationsTest(TestCase):
"""
Test that the enqueue_notifications management command, creates NotificationRecords
to the blog_user subscribers.
"""
def setUp(self):
self.user = models.User.objects.create(
username="alice", email="alice@wonderland.com", notifications_on=True
)
post_data = {
"title": "Old post",
"slug": "old-post",
"body": "Content sentence.",
"published_at": timezone.make_aware(datetime(2019, 1, 2)),
}
models.Post.objects.create(owner=self.user, **post_data)
post_data = {
"title": "Yesterday post",
"slug": "yesterday-post",
"body": "Content sentence.",
"published_at": timezone.make_aware(datetime(2020, 1, 1)),
}
models.Post.objects.create(owner=self.user, **post_data)
# as inactive, it should be ignored by the enqueue functionality
models.Notification.objects.create(
blog_user=self.user,
email="inactive@example.com",
is_active=False,
)
self.notification = models.Notification.objects.create(
blog_user=self.user, email="s@example.com"
)
def test_command(self):
output = StringIO()
with patch.object(timezone, "now", return_value=datetime(2020, 1, 2, 9, 00)):
call_command("enqueue_notifications", stdout=output)
# notification records
self.assertEqual(len(models.NotificationRecord.objects.all()), 1)
self.assertEqual(
models.NotificationRecord.objects.first().notification.email,
self.notification.email,
)
self.assertEqual(
models.NotificationRecord.objects.first().post.title, "Yesterday post"
)
self.assertIsNone(models.NotificationRecord.objects.first().sent_at)
# logging
self.assertIn("Enqueuing notifications started.", output.getvalue())
self.assertIn(
"Adding notification record for 'Yesterday post' to 's@example.com'",
output.getvalue(),
)
self.assertIn("Enqueuing complete for 'Yesterday post'", output.getvalue())
self.assertIn("Enqueuing finished.", output.getvalue())
def tearDown(self):
models.User.objects.all().delete()
models.Post.objects.all().delete()
class ProcessNotificationsTest(TestCase):
"""
Test process_notifications sends emails to the subscibers of the
NotificationRecords that exist.
"""
def setUp(self):
self.user = models.User.objects.create(
username="alice", email="alice@wonderland.com", notifications_on=True
)
post_data = {
"title": "Yesterday post",
"slug": "yesterday-post",
"body": "Content sentence.",
"published_at": timezone.make_aware(datetime(2020, 1, 1)),
}
self.post_yesterday = models.Post.objects.create(owner=self.user, **post_data)
post_data = {
"title": "Today post",
"slug": "today-post",
"body": "Content sentence.",
"published_at": timezone.make_aware(datetime(2020, 1, 2)),
}
self.post_today = models.Post.objects.create(owner=self.user, **post_data)
self.notification = models.Notification.objects.create(
blog_user=self.user, email="zf@sirodoht.com"
)
# notification records
self.notificationrecord_yesterday = models.NotificationRecord.objects.create(
notification=self.notification,
post=self.post_yesterday,
sent_at=None,
)
self.notificationrecord_today = models.NotificationRecord.objects.create(
notification=self.notification,
post=self.post_today,
sent_at=None,
)
def test_mail_backend(self):
connection = process_notifications.get_mail_connection()
self.assertEqual(connection.host, settings.EMAIL_HOST_BROADCASTS)
def test_command(self):
output = StringIO()
with patch.object(
timezone, "now", return_value=datetime(2020, 1, 2, 13, 00)
), patch.object(
# Django default test runner overrides SMTP EmailBackend with locmem,
# but because we re-import the SMTP backend in
# process_notifications.get_mail_connection, we need to mock it here too.
process_notifications,
"get_mail_connection",
return_value=mail.get_connection(
"django.core.mail.backends.locmem.EmailBackend"
),
):
call_command("process_notifications", stdout=output)
# notification records
records = models.NotificationRecord.objects.all()
self.assertEqual(len(records), 2)
# notification record for yesterday's post
self.assertEqual(
records.filter(sent_at__isnull=False).first().notification.email,
self.notificationrecord_today.notification.email,
)
self.assertEqual(
records.filter(sent_at__isnull=False).first().post.title, "Yesterday post"
)
# notification record for today's post
records = models.NotificationRecord.objects.all()
self.assertEqual(
records.filter(sent_at__isnull=True).first().notification.email,
self.notificationrecord_today.notification.email,
)
self.assertEqual(
records.filter(sent_at__isnull=True).first().post.title, "Today post"
)
# logging
self.assertIn("Processing notifications.", output.getvalue())
self.assertIn("Broadcast sent. Total 1 emails.", output.getvalue())
self.assertIn(
"Adding notification record for 'Yesterday post' to 'zf@sirodoht.com'",
output.getvalue(),
)
# email
self.assertEqual(len(mail.outbox), 1)
self.assertEqual(mail.outbox[0].subject, "Yesterday post")
self.assertIn("To unsubscribe", mail.outbox[0].body)
# email headers
self.assertEqual(mail.outbox[0].to, [self.notification.email])
self.assertEqual(mail.outbox[0].reply_to, [self.user.email])
self.assertEqual(
mail.outbox[0].from_email,
f"{self.user.username} <{self.user.username}@{settings.EMAIL_FROM_HOST}>",
)
self.assertEqual(
mail.outbox[0].extra_headers["X-PM-Message-Stream"], "newsletters"
)
self.assertIn(
"/newsletter/unsubscribe/",
mail.outbox[0].extra_headers["List-Unsubscribe"],
)
self.assertEqual(
mail.outbox[0].extra_headers["List-Unsubscribe-Post"],
"List-Unsubscribe=One-Click",
)
def tearDown(self):
models.User.objects.all().delete()
models.Post.objects.all().delete()
class MailExportsTest(TestCase):
"""
Test mail_export sends emails to users with `mail_export_on` enabled.
"""
def setUp(self):
self.user = models.User.objects.create(
username="alice", email="alice@wonderland.com", mail_export_on=True
)
post_data = {
"title": "A post",
"slug": "a-post",
"body": "Content sentence.",
"published_at": timezone.make_aware(datetime(2020, 1, 1)),
}
self.post_a = models.Post.objects.create(owner=self.user, **post_data)
post_data = {
"title": "Second post",
"slug": "second-post",
"body": "Content sentence two.",
"published_at": timezone.make_aware(datetime(2020, 1, 2)),
}
self.post_b = models.Post.objects.create(owner=self.user, **post_data)
def test_mail_backend(self):
connection = mail_exports.get_mail_connection()
self.assertEqual(connection.host, settings.EMAIL_HOST_BROADCASTS)
def test_command(self):
output = StringIO()
with patch.object(
timezone, "now", return_value=datetime(2020, 1, 3, 00, 00)
), patch.object(
# Django default test runner overrides SMTP EmailBackend with locmem,
# but because we re-import the SMTP backend in
# process_notifications.get_mail_connection, we need to mock it here too.
mail_exports,
"get_mail_connection",
return_value=mail.get_connection(
"django.core.mail.backends.locmem.EmailBackend"
),
):
call_command("mail_exports", stdout=output)
# export records
records = models.ExportRecord.objects.all()
self.assertEqual(len(records), 1)
self.assertEqual(records[0].user, self.user)
self.assertIn("export-markdown-", records[0].name)
# logging
self.assertIn("Processing email exports.", output.getvalue())
self.assertIn(f"Processing user {self.user.username}.", output.getvalue())
self.assertIn(f"Export sent to {self.user.username}.", output.getvalue())
self.assertIn(
f"Logging export record for '{records[0].name}'.", output.getvalue()
)
self.assertIn("Emailing all exports complete.", output.getvalue())
# email
self.assertEqual(len(mail.outbox), 1)
self.assertIn("Mataroa export", mail.outbox[0].subject)
self.assertIn("Unsubscribe", mail.outbox[0].body)
# email headers
self.assertEqual(mail.outbox[0].to, [self.user.email])
self.assertEqual(
mail.outbox[0].from_email,
settings.DEFAULT_FROM_EMAIL,
)
self.assertEqual(mail.outbox[0].extra_headers["X-PM-Message-Stream"], "exports")
self.assertIn(
"/export/unsubscribe/",
mail.outbox[0].extra_headers["List-Unsubscribe"],
)
self.assertEqual(
mail.outbox[0].extra_headers["List-Unsubscribe-Post"],
"List-Unsubscribe=One-Click",
)
def tearDown(self):
models.User.objects.all().delete()
models.Post.objects.all().delete()
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Partially based on AboutMessagePassing in the Ruby Koans
#
from runner.koan import *
class AboutAttributeAccess(Koan):
class TypicalObject:
pass
def test_calling_undefined_functions_normally_results_in_errors(self):
typical = self.TypicalObject()
with self.assertRaises(AttributeError): typical.foobar()
def test_calling_getattribute_causes_an_attribute_error(self):
typical = self.TypicalObject()
with self.assertRaises(AttributeError): typical.__getattribute__('foobar')
# THINK ABOUT IT:
#
# If the method __getattribute__() causes the AttributeError, then
# what would happen if we redefine __getattribute__()?
# ------------------------------------------------------------------
class CatchAllAttributeReads:
def __getattribute__(self, attr_name):
return "Someone called '" + attr_name + "' and it could not be found"
def test_all_attribute_reads_are_caught(self):
catcher = self.CatchAllAttributeReads()
self.assertRegex(catcher.foobar, "Someone called 'foobar' and it could not be found")
def test_intercepting_return_values_can_disrupt_the_call_chain(self):
catcher = self.CatchAllAttributeReads()
self.assertRegex(catcher.foobaz, "Someone called 'foobaz' and it could not be found") # This is fine
try:
catcher.foobaz(1)
except TypeError as ex:
err_msg = ex.args[0]
self.assertRegex(err_msg, "'str' object is not callable")
# foobaz returns a string. What happens to the '(1)' part?
# Try entering this into a python console to reproduce the issue:
#
# "foobaz"(1)
#
def test_changes_to_the_getattribute_implementation_affects_getattr_function(self):
catcher = self.CatchAllAttributeReads()
self.assertRegex(getattr(catcher, 'any_attribute'), "Someone called 'any_attribute' and it could not be found")
# ------------------------------------------------------------------
class WellBehavedFooCatcher:
def __getattribute__(self, attr_name):
if attr_name[:3] == "foo":
return "Foo to you too"
else:
return super().__getattribute__(attr_name)
def test_foo_attributes_are_caught(self):
catcher = self.WellBehavedFooCatcher()
self.assertEqual("Foo to you too", catcher.foo_bar)
self.assertEqual("Foo to you too", catcher.foo_baz)
def test_non_foo_messages_are_treated_normally(self):
catcher = self.WellBehavedFooCatcher()
with self.assertRaises(AttributeError): catcher.normal_undefined_attribute
# ------------------------------------------------------------------
global stack_depth
stack_depth = 0
class RecursiveCatcher:
def __init__(self):
global stack_depth
stack_depth = 0
self.no_of_getattribute_calls = 0
def __getattribute__(self, attr_name):
# We need something that is outside the scope of this class:
global stack_depth
stack_depth += 1
if stack_depth<=10: # to prevent a stack overflow
self.no_of_getattribute_calls += 1
# Oops! We just accessed an attribute (no_of_getattribute_calls)
# Guess what happens when self.no_of_getattribute_calls is
# accessed?
# Using 'object' directly because using super() here will also
# trigger a __getattribute__() call.
return object.__getattribute__(self, attr_name)
def my_method(self):
pass
def test_getattribute_is_a_bit_overzealous_sometimes(self):
catcher = self.RecursiveCatcher()
catcher.my_method()
global stack_depth
self.assertEqual(11, stack_depth)
# ------------------------------------------------------------------
class MinimalCatcher:
class DuffObject: pass
def __init__(self):
self.no_of_getattr_calls = 0
def __getattr__(self, attr_name):
self.no_of_getattr_calls += 1
return self.DuffObject
def my_method(self):
pass
def test_getattr_ignores_known_attributes(self):
catcher = self.MinimalCatcher()
catcher.my_method()
self.assertEqual(0, catcher.no_of_getattr_calls)
def test_getattr_only_catches_unknown_attributes(self):
catcher = self.MinimalCatcher()
catcher.purple_flamingos()
catcher.free_pie()
self.assertEqual("DuffObject",
type(catcher.give_me_duff_or_give_me_death()).__name__)
self.assertEqual(3, catcher.no_of_getattr_calls)
# ------------------------------------------------------------------
class PossessiveSetter(object):
def __setattr__(self, attr_name, value):
new_attr_name = attr_name
if attr_name[-5:] == 'comic':
new_attr_name = "my_" + new_attr_name
elif attr_name[-3:] == 'pie':
new_attr_name = "a_" + new_attr_name
object.__setattr__(self, new_attr_name, value)
def test_setattr_intercepts_attribute_assignments(self):
fanboy = self.PossessiveSetter()
fanboy.comic = 'The Laminator, issue #1'
fanboy.pie = 'blueberry'
self.assertEqual("blueberry", fanboy.a_pie)
#
# NOTE: Change the prefix to make this next assert pass
#
prefix = 'my'
self.assertEqual("The Laminator, issue #1", getattr(fanboy, prefix + '_comic'))
# ------------------------------------------------------------------
class ScarySetter:
def __init__(self):
self.num_of_coconuts = 9
self._num_of_private_coconuts = 2
def __setattr__(self, attr_name, value):
new_attr_name = attr_name
if attr_name[0] != '_':
new_attr_name = "altered_" + new_attr_name
object.__setattr__(self, new_attr_name, value)
def test_it_modifies_external_attribute_as_expected(self):
setter = self.ScarySetter()
setter.e = "mc hammer"
self.assertEqual("mc hammer", setter.altered_e)
def test_it_mangles_some_internal_attributes(self):
setter = self.ScarySetter()
try:
coconuts = setter.num_of_coconuts
except AttributeError:
self.assertEqual(9, setter.altered_num_of_coconuts)
def test_in_this_case_private_attributes_remain_unmangled(self):
setter = self.ScarySetter()
self.assertEqual(2, setter._num_of_private_coconuts)
|
"""Here we import the different task submodules/ collections"""
from invoke import Collection, task
from tasks import docker, package, sphinx, test # pylint: disable=import-self
# pylint: disable=invalid-name
# as invoke only recognizes lower case
namespace = Collection()
namespace.add_collection(test)
namespace.add_collection(docker)
namespace.add_collection(package)
namespace.add_collection(sphinx)
|
from pygame import Rect
# noinspection PyPackageRequirements
from OpenGL import GL
from albow.openGL.GLViewport import GLViewport
class GLOrtho(GLViewport):
"""
GLOrtho provides an OpenGL drawing area with an orthographic projection.
Using a GLOrtho widget is the same as using a GLViewport, except that you do not need to
provide a `setup_projection()` method.
------
------
"""
def __init__(self, rect: Rect=None, xmin=-1, xmax=1, ymin=-1, ymax=1, near=-1, far=1, **kwds):
"""
Creates a GLOrtho instance with the given initial values for its projection parameters.
Args:
rect: A pygame Rect
xmin: Specify the coordinates for the left vertical clipping planes.
xmax: Specify the coordinates for the right vertical clipping planes.
ymin: Specify the coordinates for the bottom horizontal clipping planes.
ymax: Specify the coordinates for the top horizontal clipping planes.
near: Specify the distances to the nearer clipping planes.
These distances are negative if the plane is to be behind the viewer.
far: Specify the distances to the depth clipping planes.
These distances are negative if the plane is to be behind the viewer.
**kwds:
"""
#
# Python 3 update
#
# GLViewport.__init__(self, rect, **kwds)
super().__init__(rect, **kwds)
self.xmin = xmin
self.xmax = xmax
self.ymin = ymin
self.ymax = ymax
self.near = near
self.far = far
def setup_projection(self):
GL.glOrtho(self.xmin, self.xmax, self.ymin, self.ymax, self.near, self.far)
|
import pytest
from mock import MagicMock, patch, PropertyMock
from pontoon.tags.models import Tag
from pontoon.tags.utils import (
TagsLatestTranslationsTool, TagsResourcesTool,
TagsStatsTool, TagsTool, TagTool)
from pontoon.tags.utils.base import Clonable
def test_util_tags_tool():
# test tags tool instantiation
tags_tool = TagsTool()
assert tags_tool.tag_class is TagTool
assert tags_tool.resources_class is TagsResourcesTool
assert tags_tool.translations_class is TagsLatestTranslationsTool
assert tags_tool.stats_class is TagsStatsTool
assert tags_tool.locales is None
assert tags_tool.projects is None
assert tags_tool.priority is None
assert tags_tool.slug is None
assert tags_tool.path is None
assert tags_tool.tag_manager == Tag.objects
@patch('pontoon.tags.utils.TagsTool.stats_class')
def test_util_tags_tool_stats(stats_mock, tag_init_kwargs):
# tests instantiation of tag.stats_tool with different args
tags_tool = TagsTool(**tag_init_kwargs)
stats_mock.return_value = 23
assert tags_tool.stat_tool == 23
assert stats_mock.call_args[1] == tag_init_kwargs
@pytest.mark.parametrize(
"kwargs",
[dict(
slug=None,
locales=None,
projects=None,
path=None),
dict(
slug=1,
locales=2,
projects=3,
path=4)])
@patch('pontoon.tags.utils.TagsTool.resources_class')
def test_util_tags_tool_resources(resources_mock, kwargs):
# tests instantiation of tag.resources_tool with different args
tags_tool = TagsTool(**kwargs)
resources_mock.return_value = 23
assert tags_tool.resource_tool == 23
assert resources_mock.call_args[1] == kwargs
@pytest.mark.parametrize(
"kwargs",
[dict(
slug=None,
locales=None,
projects=None),
dict(
slug=1,
locales=2,
projects=3)])
@patch('pontoon.tags.utils.TagsTool.translations_class')
def test_util_tags_tool_translations(trans_mock, kwargs):
# tests instantiation of tag.translations_tool with different args
tags_tool = TagsTool(**kwargs)
trans_mock.return_value = 23
assert tags_tool.translation_tool == 23
assert trans_mock.call_args[1] == kwargs
@patch('pontoon.tags.utils.TagsTool.tag_class')
@patch('pontoon.tags.utils.TagsTool.get_tags')
@patch('pontoon.tags.utils.TagsTool.__len__')
@patch('pontoon.tags.utils.TagsTool.__iter__')
def test_util_tags_tool_get(iter_mock, len_mock, tags_mock, class_mock):
# tests getting a TagTool from TagsTool
tags_tool = TagsTool()
class_mock.return_value = 23
len_mock.return_value = 7
iter_mock.return_value = iter([3, 17, 73])
# with no slug returns first result from iter(self)
assert tags_tool.get() == 3
assert not class_mock.called
assert not tags_mock.called
assert len_mock.called
assert iter_mock.called
len_mock.reset_mock()
iter_mock.reset_mock()
# calling with slug creates a TagTool instance
# and doesnt call iter(self) at all
assert tags_tool.get(113) == 23
assert not len_mock.called
assert not iter_mock.called
assert (
list(class_mock.call_args)
== [(tags_tool, ), {}])
assert (
list(tags_mock.call_args)
== [(), {'slug': 113}])
def test_util_tags_tool_call_and_clone():
# tests cloning a TagsTool
tags_tool = TagsTool()
cloned = tags_tool()
assert cloned is not tags_tool
assert isinstance(tags_tool, Clonable)
assert isinstance(cloned, Clonable)
@patch('pontoon.tags.utils.TagsTool.__call__')
def test_util_tags_tool_getitem(call_mock):
# test that calling __getitem__ calls __call__ with slug
tags_tool = TagsTool()
slugs = ["foo", "bar"]
for slug in slugs:
tags_tool[slug]
assert call_mock.call_args_list[0][1] == dict(slug=slugs[0])
assert call_mock.call_args_list[1][1] == dict(slug=slugs[1])
@patch('pontoon.tags.utils.TagsTool.iter_tags')
@patch('pontoon.tags.utils.TagsTool.stat_tool',
new_callable=PropertyMock)
def test_util_tags_tool_iter(stats_mock, iter_mock):
# tests that when you iter it calls iter_tags with
# stats data
tags_tool = TagsTool()
stats_mock.configure_mock(
**{'return_value.data': [7, 23]})
iter_mock.return_value = iter([])
assert list(tags_tool) == []
assert stats_mock.called
assert (
list(iter_mock.call_args)
== [([7, 23],), {}])
@patch('pontoon.tags.utils.TagsTool.stat_tool',
new_callable=PropertyMock)
def test_util_tags_tool_len(stats_mock):
# tests that when you len() you get the len
# of the stats data
m_len = MagicMock()
m_len.__len__.return_value = 23
stats_mock.configure_mock(
**{'return_value.data': m_len})
tags_tool = TagsTool()
assert len(tags_tool) == 23
assert m_len.__len__.called
@patch('pontoon.tags.utils.TagsTool.translation_tool',
new_callable=PropertyMock)
@patch('pontoon.tags.utils.TagsTool.tag_class')
def test_util_tags_tool_iter_tags(tag_mock, trans_mock):
# tests that iter_tags calls instantiates a TagTool with
# stat data and latest_translation data
trans_mock.configure_mock(
**{'return_value.data.get.return_value': 23})
tags_tool = TagsTool()
list(
tags_tool.iter_tags(
[dict(resource__tag=1, foo="bar"),
dict(resource__tag=2, foo="bar"),
dict(resource__tag=3, foo="bar")]))
# translation_tool.data.get() was called 3 times with tag pks
assert (
[x[0][0] for x in trans_mock.return_value.data.get.call_args_list]
== [1, 2, 3])
# TagTool was called 3 times with the tags tool as arg
assert (
[x[0][0] for x in tag_mock.call_args_list]
== [tags_tool] * 3)
# and stat + translation data as kwargs
assert (
[x[1] for x in tag_mock.call_args_list]
== [{'resource__tag': 1, 'latest_translation': 23, 'foo': 'bar'},
{'resource__tag': 2, 'latest_translation': 23, 'foo': 'bar'},
{'resource__tag': 3, 'latest_translation': 23, 'foo': 'bar'}])
@patch('pontoon.tags.utils.TagsTool.tag_manager',
new_callable=PropertyMock)
@patch('pontoon.tags.utils.tags.glob_to_regex')
def test_util_tags_tool_get_tags(glob_mock, tag_mock):
glob_mock.return_value = 17
filter_mock = MagicMock(
**{'filter.return_value': 23})
tag_mock.configure_mock(
**{'return_value.values.return_value': filter_mock})
tags_tool = TagsTool()
# no slug provided, returns `values`
assert tags_tool.get_tags() is filter_mock
assert not filter_mock.called
assert not glob_mock.called
assert (
list(tag_mock.return_value.values.call_args)
== [('pk', 'name', 'slug', 'priority', 'project'), {}])
tag_mock.reset_mock()
# slug provided, `values` is filtered
assert tags_tool.get_tags('FOO') == 23
assert (
list(filter_mock.filter.call_args)
== [(), {'slug__regex': 17}])
assert list(glob_mock.call_args) == [('FOO',), {}]
assert (
list(tag_mock.return_value.values.call_args)
== [('pk', 'name', 'slug', 'priority', 'project'), {}])
|
# Generated by Django 3.2.6 on 2021-08-26 16:31
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
import uuid
class Migration(migrations.Migration):
initial = True
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
('contenttypes', '0002_remove_content_type_name'),
]
operations = [
migrations.CreateModel(
name='Vote',
fields=[
('id', models.UUIDField(default=uuid.uuid4, editable=False, primary_key=True, serialize=False)),
('object_id', models.UUIDField()),
('value', models.IntegerField(choices=[(-1, 'down'), (0, 'neutral'), (1, 'up')])),
('content_type', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='contenttypes.contenttype')),
('owner', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)),
],
),
]
|
from __future__ import annotations
import os
import itertools
from typing import Any, Optional, TypeVar
import pygame
from gupb.controller import keyboard
from gupb.model import characters
from gupb.model import effects
from gupb.model import games
from gupb.model import tiles
from gupb.model import weapons
pygame.init()
Sprite = TypeVar('Sprite')
TILE_SIZE = 8
BLACK = pygame.Color('black')
def load_sprite(group: str, name: str, transparent: pygame.Color = None) -> Sprite:
path = os.path.join('resources', 'images', group, f'{name}.png')
sprite = pygame.image.load(path).convert()
if transparent:
sprite.set_colorkey(transparent)
return sprite
class SpriteRepository:
def __init__(self) -> None:
self.sprites: dict[Any, Sprite] = {
tiles.Land: load_sprite('tiles', 'land'),
tiles.Sea: load_sprite('tiles', 'sea'),
tiles.Wall: load_sprite('tiles', 'wall'),
tiles.Menhir: load_sprite('tiles', 'menhir'),
weapons.Knife: load_sprite('weapons', 'knife', BLACK),
weapons.Sword: load_sprite('weapons', 'sword', BLACK),
weapons.Axe: load_sprite('weapons', 'axe', BLACK),
weapons.Bow: load_sprite('weapons', 'bow', BLACK),
weapons.Amulet: load_sprite('weapons', 'amulet', BLACK),
characters.Tabard.BLUE: load_sprite('characters', 'champion_blue', BLACK),
characters.Tabard.BROWN: load_sprite('characters', 'champion_brown', BLACK),
characters.Tabard.GREY: load_sprite('characters', 'champion_grey', BLACK),
characters.Tabard.RED: load_sprite('characters', 'champion_red', BLACK),
characters.Tabard.VIOLET: load_sprite('characters', 'champion_violet', BLACK),
characters.Tabard.WHITE: load_sprite('characters', 'champion_white', BLACK),
characters.Tabard.YELLOW: load_sprite('characters', 'champion_yellow', BLACK),
effects.Mist: load_sprite('effects', 'mist', BLACK),
effects.WeaponCut: load_sprite('effects', 'blood', BLACK),
}
self.rotation_values: dict[characters.Facing, int] = {
characters.Facing.RIGHT: 0,
characters.Facing.UP: 90,
characters.Facing.LEFT: 180,
characters.Facing.DOWN: 270,
}
self.champion_sprites: dict[tuple[characters.Tabard, characters.Facing], Sprite] = {
(tabard, facing): pygame.transform.rotate(self.sprites[tabard], self.rotation_values[facing])
for tabard, facing in itertools.product(
[
characters.Tabard.BLUE,
characters.Tabard.BROWN,
characters.Tabard.GREY,
characters.Tabard.RED,
characters.Tabard.VIOLET,
characters.Tabard.WHITE,
characters.Tabard.YELLOW,
],
[
characters.Facing.RIGHT,
characters.Facing.UP,
characters.Facing.LEFT,
characters.Facing.DOWN,
]
)
}
def match_sprite(self, element: Any) -> Sprite:
if isinstance(element, characters.Champion):
return self.champion_sprites[(element.tabard, element.facing)]
else:
return self.sprites[type(element)]
class Renderer:
def __init__(self, ms_per_time_unit: int = 1):
pygame.display.set_caption('GUPB')
self.screen = pygame.display.set_mode((100, 100))
self.sprite_repository = SpriteRepository()
self.clock = pygame.time.Clock()
self.time_passed = 0
self.ms_per_time_unit = ms_per_time_unit
def run(
self,
game: games.Game,
show_sight: Optional[characters.Champion] = None,
keyboard_controller: Optional[keyboard.KeyboardController] = None,
) -> None:
self.screen = self._resize_window(game)
time_to_cycle = self._time_to_cycle(game)
self.clock.tick()
while not game.finished:
self.time_passed += self.clock.tick()
if self.time_passed >= time_to_cycle:
self.time_passed -= time_to_cycle
game.cycle()
self._render(game, show_sight)
time_to_cycle = self._time_to_cycle(game)
for event in pygame.event.get():
if event.type == pygame.QUIT:
return
elif event.type == pygame.KEYDOWN and keyboard_controller:
keyboard_controller.register(event.key)
@staticmethod
def _resize_window(game: games.Game) -> pygame.Surface:
arena_x_size, arena_y_size = game.arena.size
window_size = TILE_SIZE * arena_x_size, TILE_SIZE * arena_y_size
return pygame.display.set_mode(window_size)
def _time_to_cycle(self, game: games.Game) -> int:
return self.ms_per_time_unit * game.current_state.value
def _render(self, game: games.Game, show_sight: Optional[characters.Champion]) -> None:
background = pygame.Surface(self.screen.get_size())
background.convert()
self._render_arena(game, background)
if show_sight:
self._render_sight(game, show_sight, background)
self.screen.blit(background, (0, 0))
pygame.display.flip()
def _render_arena(self, game: games.Game, background: pygame.Surface) -> None:
for i, j in game.arena.terrain:
blit_destination = (i * TILE_SIZE, j * TILE_SIZE)
tile = game.arena.terrain[i, j]
tile_sprite = self.sprite_repository.match_sprite(tile)
background.blit(tile_sprite, blit_destination)
if tile.loot:
loot_sprite = self.sprite_repository.match_sprite(tile.loot)
background.blit(loot_sprite, blit_destination)
if tile.character:
character_sprite = self.sprite_repository.match_sprite(tile.character)
background.blit(character_sprite, blit_destination)
if tile.effects:
for effect in tile.effects:
effect_sprite = self.sprite_repository.match_sprite(effect)
background.blit(effect_sprite, blit_destination)
@staticmethod
def _render_sight(game: games.Game, show_sight: characters.Champion, background: pygame.Surface) -> None:
if show_sight in game.champions:
darken_percent = 0.5
dark = pygame.Surface((TILE_SIZE, TILE_SIZE), pygame.SRCALPHA)
dark.fill((0, 0, 0, darken_percent * 255))
visible = game.arena.visible_coords(show_sight)
for i, j in game.arena.terrain:
if (i, j) not in visible:
blit_destination = (i * TILE_SIZE, j * TILE_SIZE)
background.blit(dark, blit_destination)
|
# -*- coding: utf-8 -*-
#
# michael a.g. aïvázis
# orthologue
# (c) 1998-2019 all rights reserved
#
# externals
import collections
# superclass
from .AbstractMetaclass import AbstractMetaclass
# class declaration
class AttributeClassifier(AbstractMetaclass):
"""
A base metaclass that enables attribute categorization.
A common pattern in pyre is to define classes that contain special attributes whose purpose
is to collect declaration meta data and associate them with a class attribute. These
attributes are processed by metaclasses and are converted into appropriate behavior. For
example, components have properties, which are decorated descriptors that enable external
configuration of component state. Similarly, XML parsing happens with the aid of classes
that capture the syntax, semantics and processing behavior of tags by employing descriptors
to capture the layout of an XML document.
This class defines {pyre_harvest}, which scans the class attribute dictionary for instances
of the special class {descriptor}. It also overrides {__prepare__} to provide attribute
storage that records the order in which attributes were encountered in the class record.
"""
# data
pyre_reserved = set()
# meta methods
@classmethod
def __prepare__(cls, name, bases, **kwds):
"""
Build an attribute table that maintains a category index for attribute descriptors
"""
# use an ordered dictionary
return collections.OrderedDict()
# interface
@classmethod
def pyre_harvest(cls, attributes, descriptor):
"""
Examine {attributes}, looking for instances of {descriptor}
"""
# reserved names are excluded from harvesting
reserved = cls.pyre_reserved
# loop over the attributes
for name, attribute in attributes.items():
# if this is a descriptor that's not in the reserved list
if isinstance(attribute, descriptor) and name not in reserved:
# return it to the caller along with its name
yield name, attribute
# all done
return
# end of file
|
#!/usr/bin/env python
import sys
import string
from sklearn.feature_extraction import text
stops = set(text.ENGLISH_STOP_WORDS)
# get all lines from stdin
for line in sys.stdin:
# remove leading and trailing whitespace, lower case letters, remove punctuation
line = line.strip().lower().translate(None, string.punctuation)
# split the line into words; splits on any whitespace
words = line.split()
# output tuples (word, 1) in tab-delimited format if is stopword from nltk
# english_stop_words
for word in words:
if word not in stops:
print '%s\t%s' % (word, "1")
|
from ..base import MultiGridEnv
from .empty import EmptyMultiGrid, EmptyColorMultiGrid
from .doorkey import DoorKeyEnv
from .cluttered import ClutteredMultiGrid
from .goalcycle import ClutteredGoalCycleEnv
from .viz_test import VisibilityTestEnv
from .hallways import HallWaysMultiGrid
from .comm_game import CommunicationGameEnv
from ..agents import GridAgentInterface
from gym.envs.registration import register as gym_register
import sys
import inspect
import random
this_module = sys.modules[__name__]
registered_envs = []
def register_marl_env(
env_name,
env_class,
n_agents,
grid_size,
view_size,
view_tile_size=8,
view_offset=0,
agent_color=None,
env_kwargs={},
):
colors = ["red", "blue", "purple", "orange", "olive", "pink"]
assert n_agents <= len(colors)
class RegEnv(env_class):
def __new__(cls):
instance = super(env_class, RegEnv).__new__(env_class)
instance.__init__(
agents=[
GridAgentInterface(
color=c if agent_color is None else agent_color,
view_size=view_size,
view_tile_size=8,
view_offset=view_offset,
)
for c in colors[:n_agents]
],
grid_size=grid_size,
**env_kwargs,
)
return instance
env_class_name = f"env_{len(registered_envs)}"
setattr(this_module, env_class_name, RegEnv)
registered_envs.append(env_name)
gym_register(env_name, entry_point=f"marlgrid.envs:{env_class_name}")
def env_from_config(env_config, randomize_seed=True):
possible_envs = {
k: v
for k, v in globals().items()
if inspect.isclass(v) and issubclass(v, MultiGridEnv)
}
env_class = possible_envs[env_config["env_class"]]
env_kwargs = {k: v for k, v in env_config.items() if k != "env_class"}
if randomize_seed:
env_kwargs["seed"] = env_kwargs.get("seed", 0) + random.randint(0, 1337 * 1337)
return env_class(**env_kwargs)
register_marl_env(
"MarlGrid-1AgentCluttered15x15-v0",
ClutteredMultiGrid,
n_agents=1,
grid_size=11,
view_size=5,
env_kwargs={"n_clutter": 30},
)
register_marl_env(
"MarlGrid-2AgentCluttered15x15-v0",
ClutteredMultiGrid,
n_agents=2,
grid_size=11,
view_size=5,
env_kwargs={"n_clutter": 30},
)
register_marl_env(
"MarlGrid-3AgentCluttered11x11-v0",
ClutteredMultiGrid,
n_agents=3,
grid_size=11,
view_size=7,
env_kwargs={"clutter_density": 0.15},
)
register_marl_env(
"MarlGrid-3AgentCluttered15x15-v0",
ClutteredMultiGrid,
n_agents=3,
grid_size=15,
view_size=7,
env_kwargs={"clutter_density": 0.15},
)
register_marl_env(
"MarlGrid-2AgentEmpty9x9-v0", EmptyMultiGrid, n_agents=2, grid_size=9, view_size=7
)
register_marl_env(
"MarlGrid-3AgentEmpty9x9-v0", EmptyMultiGrid, n_agents=3, grid_size=9, view_size=7
)
register_marl_env(
"MarlGrid-4AgentEmpty9x9-v0", EmptyMultiGrid, n_agents=4, grid_size=9, view_size=7
)
register_marl_env(
"Goalcycle-demo-solo-v0",
ClutteredGoalCycleEnv,
n_agents=1,
grid_size=13,
view_size=7,
view_tile_size=5,
view_offset=1,
env_kwargs={"clutter_density": 0.1, "n_bonus_tiles": 3},
)
register_marl_env(
"MarlGrid-2AgentComms15x15-v0",
HallWaysMultiGrid,
n_agents=2,
grid_size=15,
view_size=7,
env_kwargs={
"respawn": False,
"ghost_mode": False,
"reward_decay": False,
"goal_coordinates": [(1, 1), (1, 13), (13, 13), (13, 1)],
"goal_colors": ["blue", "red", "blue", "red"],
"max_steps": 250,
},
agent_color="green",
)
register_marl_env(
"MarlGrid-2AgentEmptyColor15x15-v0",
EmptyColorMultiGrid,
n_agents=2,
grid_size=15,
view_size=7,
env_kwargs={
"respawn": False,
"ghost_mode": False,
"reward_decay": False,
"goal_coordinates": [(7, 1), (8, 1), (8, 13), (7, 13)],
"goal_colors": ["blue", "red", "blue", "red"],
"max_steps": 250,
},
agent_color="green",
)
register_marl_env(
"MarlGrid-2AgentCommGame15x15-v0",
CommunicationGameEnv,
n_agents=2,
grid_size=15,
view_size=7,
env_kwargs={
"respawn": False,
"ghost_mode": False,
"reward_decay": False,
"block_coordinates": [(1, 1), (13, 1), (1, 13), (13, 13)],
"block_colors": ["blue", "red", "cyan", "pink"],
"comm_blocks_coordinates": [(7, 4), (7, 10)],
"max_steps": 250,
},
agent_color="green",
)
|
#!/usr/bin/env python
# -- Content-Encoding: UTF-8 --
"""
Tests the framework events.
:author: Thomas Calmant
"""
# Standard library
try:
import unittest2 as unittest
except ImportError:
import unittest
# Pelix
from pelix.framework import FrameworkFactory, Bundle, BundleException, \
BundleContext, BundleEvent, ServiceEvent
from pelix.services import SERVICE_EVENT_LISTENER_HOOK
# Tests
from tests import log_on, log_off
from tests.interfaces import IEchoService
# ------------------------------------------------------------------------------
__version_info__ = (1, 0, 1)
__version__ = ".".join(str(x) for x in __version_info__)
SERVICE_BUNDLE = "tests.framework.service_bundle"
SIMPLE_BUNDLE = "tests.framework.simple_bundle"
# ------------------------------------------------------------------------------
class BundleEventTest(unittest.TestCase):
"""
Pelix bundle event tests
"""
def setUp(self):
"""
Called before each test. Initiates a framework.
"""
self.framework = FrameworkFactory.get_framework()
self.framework.start()
self.test_bundle_name = SIMPLE_BUNDLE
self.bundle = None
self.received = []
def tearDown(self):
"""
Called after each test
"""
self.framework.stop()
FrameworkFactory.delete_framework()
def reset_state(self):
"""
Resets the flags
"""
del self.received[:]
def bundle_changed(self, event):
"""
Called by the framework when a bundle event is triggered
@param event: The BundleEvent
"""
assert isinstance(event, BundleEvent)
bundle = event.get_bundle()
kind = event.get_kind()
if self.bundle is not None \
and kind == BundleEvent.INSTALLED:
# Bundle is not yet locally known...
self.assertIs(self.bundle, bundle,
"Received an event for an other bundle.")
self.assertNotIn(kind, self.received, "Event received twice")
self.received.append(kind)
def testBundleEvents(self):
"""
Tests if the signals are correctly received
"""
context = self.framework.get_bundle_context()
assert isinstance(context, BundleContext)
# Register to events
self.assertTrue(context.add_bundle_listener(self),
"Can't register the bundle listener")
# Install the bundle
self.bundle = bundle = context.install_bundle(self.test_bundle_name)
assert isinstance(bundle, Bundle)
# Assert the Install events has been received
self.assertEqual([BundleEvent.INSTALLED],
self.received, "Received {0}".format(self.received))
self.reset_state()
# Start the bundle
bundle.start()
# Assert the events have been received
self.assertEqual([BundleEvent.STARTING, BundleEvent.STARTED],
self.received, "Received {0}".format(self.received))
self.reset_state()
# Stop the bundle
bundle.stop()
# Assert the events have been received
self.assertEqual([BundleEvent.STOPPING, BundleEvent.STOPPING_PRECLEAN,
BundleEvent.STOPPED], self.received,
"Received {0}".format(self.received))
self.reset_state()
# Uninstall the bundle
bundle.uninstall()
# Assert the events have been received
self.assertEqual([BundleEvent.UNINSTALLED],
self.received, "Received {0}".format(self.received))
self.reset_state()
# Unregister from events
context.remove_bundle_listener(self)
# ------------------------------------------------------------------------------
class ServiceEventTest(unittest.TestCase):
"""
Pelix service event tests
"""
def setUp(self):
"""
Called before each test. Initiates a framework.
"""
self.framework = FrameworkFactory.get_framework()
self.framework.start()
self.test_bundle_name = SERVICE_BUNDLE
self.bundle = None
self.received = []
def tearDown(self):
"""
Called after each test
"""
self.framework.stop()
FrameworkFactory.delete_framework()
def reset_state(self):
"""
Resets the flags
"""
del self.received[:]
def service_changed(self, event):
"""
Called by the framework when a service event is triggered
@param event: The ServiceEvent
"""
assert isinstance(event, ServiceEvent)
ref = event.get_service_reference()
self.assertIsNotNone(ref, "Invalid service reference in the event")
kind = event.get_kind()
if kind == ServiceEvent.MODIFIED \
or kind == ServiceEvent.MODIFIED_ENDMATCH:
# Properties have been modified
self.assertNotEqual(ref.get_properties(),
event.get_previous_properties(),
"Modified event for unchanged properties")
self.assertNotIn(kind, self.received, "Event received twice")
self.received.append(kind)
def testDoubleListener(self):
"""
Tests double registration / unregistration
"""
context = self.framework.get_bundle_context()
assert isinstance(context, BundleContext)
# Double registration
self.assertTrue(context.add_service_listener(self),
"Can't register the service listener")
log_off()
self.assertFalse(context.add_service_listener(self),
"Service listener registered twice")
log_on()
# Double unregistration
self.assertTrue(context.remove_service_listener(self),
"Can't unregister the service listener")
log_off()
self.assertFalse(context.remove_service_listener(self),
"Service listener unregistered twice")
log_on()
def testInvalidFilterListener(self):
"""
Tests invalid filter listener registration
"""
context = self.framework.get_bundle_context()
assert isinstance(context, BundleContext)
log_off()
self.assertRaises(BundleException, context.add_service_listener, self,
"Invalid")
log_on()
self.assertFalse(context.remove_service_listener(self),
"Invalid filter was registered anyway")
def testServiceEventsNormal(self):
"""
Tests if the signals are correctly received
"""
context = self.framework.get_bundle_context()
assert isinstance(context, BundleContext)
# Register to events
self.assertTrue(context.add_service_listener(self),
"Can't register the service listener")
# Install the bundle
self.bundle = bundle = context.install_bundle(self.test_bundle_name)
assert isinstance(bundle, Bundle)
# Assert the Install events has been received
self.assertEqual(
[], self.received, "Received {0}".format(self.received))
self.reset_state()
# Start the bundle
bundle.start()
# Assert the events have been received
self.assertEqual([ServiceEvent.REGISTERED],
self.received, "Received {0}".format(self.received))
self.reset_state()
# Stop the bundle
bundle.stop()
# Assert the events have been received
self.assertEqual([ServiceEvent.UNREGISTERING],
self.received, "Received {0}".format(self.received))
self.reset_state()
# Uninstall the bundle
bundle.uninstall()
# Assert the events have been received
self.assertEqual(
[], self.received, "Received {0}".format(self.received))
self.reset_state()
# Unregister from events
context.remove_service_listener(self)
def testServiceEventsNoStop(self):
"""
Tests if the signals are correctly received, even if the service is not
correctly removed
"""
context = self.framework.get_bundle_context()
assert isinstance(context, BundleContext)
# Register to events
self.assertTrue(context.add_service_listener(self),
"Can't register the service listener")
# Install the bundle
self.bundle = bundle = context.install_bundle(self.test_bundle_name)
assert isinstance(bundle, Bundle)
# Assert the Install events has been received
self.assertEqual(
[], self.received, "Received {0}".format(self.received))
self.reset_state()
# Start the bundle
bundle.start()
# Assert the events have been received
self.assertEqual([ServiceEvent.REGISTERED],
self.received, "Received {0}".format(self.received))
self.reset_state()
# Uninstall the bundle, without unregistering the service
module_ = bundle.get_module()
module_.unregister = False
bundle.uninstall()
# Assert the events have been received
self.assertEqual([ServiceEvent.UNREGISTERING],
self.received, "Received {0}".format(self.received))
self.reset_state()
# Unregister from events
context.remove_service_listener(self)
def testServiceModified(self):
"""
Tests the service modified event
"""
context = self.framework.get_bundle_context()
assert isinstance(context, BundleContext)
# Register to events
self.assertTrue(context.add_service_listener(self, "(test=True)"),
"Can't register the service listener")
# Install the bundle
self.bundle = bundle = context.install_bundle(self.test_bundle_name)
assert isinstance(bundle, Bundle)
# Start the bundle
bundle.start()
# Assert the events have been received
self.assertEqual([ServiceEvent.REGISTERED],
self.received, "Received {0}".format(self.received))
self.reset_state()
# Get the service
ref = context.get_service_reference(IEchoService)
self.assertIsNotNone(ref, "ServiceReference not found")
svc = context.get_service(ref)
self.assertIsNotNone(ref, "Invalid service instance")
# Modify the service => Simple modification
svc.modify({"answer": 42})
self.assertEqual([ServiceEvent.MODIFIED],
self.received, "Received {0}".format(self.received))
self.reset_state()
# Set the same value => No event should be sent
svc.modify({"answer": 42})
self.assertEqual([], self.received,
"Received {0}".format(self.received))
self.reset_state()
# Modify the service => Ends the filter match
svc.modify({"test": False})
# Assert the events have been received
self.assertEqual([ServiceEvent.MODIFIED_ENDMATCH],
self.received, "Received {0}".format(self.received))
self.reset_state()
# Modify the service => the filter matches again
svc.modify({"test": True})
# Assert the events have been received
self.assertEqual([ServiceEvent.MODIFIED],
self.received, "Received {0}".format(self.received))
self.reset_state()
# Stop the bundle
bundle.stop()
# Assert the events have been received
self.assertEqual([ServiceEvent.UNREGISTERING],
self.received, "Received {0}".format(self.received))
self.reset_state()
# Uninstall the bundle
bundle.uninstall()
# Unregister from events
context.remove_service_listener(self)
# ------------------------------------------------------------------------------
class EventListenerHookTest(unittest.TestCase):
"""
Event Listener Hook tests
"""
def setUp(self):
"""
Called before each test. Initiates a framework.
"""
self.framework = FrameworkFactory.get_framework()
self.framework.start()
self.test_bundle_name = SERVICE_BUNDLE
self.bundle = None
self.received = []
def tearDown(self):
"""
Called after each test
"""
self.framework.stop()
self.framework.delete()
def test_normal_behaviour(self):
"""
Checks if event listener hooks are registered correctly
"""
# Test implementation
events = []
class Hook(object):
@staticmethod
def event(svc_event, listeners_dict):
events.append((svc_event, listeners_dict))
# Register the hook
ctx = self.framework.get_bundle_context()
reg = ctx.register_service(SERVICE_EVENT_LISTENER_HOOK, Hook(), {})
# Hooks shouldn't be aware of themselves
self.assertFalse(events)
# Register a dummy service
dummy_reg = ctx.register_service("dummy", object(), {})
# Pop information
event, listeners = events.pop(0)
# Check event
assert isinstance(event, ServiceEvent)
self.assertEqual(event.get_kind(), ServiceEvent.REGISTERED)
self.assertIs(event.get_service_reference(), dummy_reg.get_reference())
# No listeners are registered
self.assertFalse(listeners)
# Update the service
dummy_reg.set_properties({"hello": "world"})
# Pop information
event, listeners = events.pop(0)
# Check event
assert isinstance(event, ServiceEvent)
self.assertEqual(event.get_kind(), ServiceEvent.MODIFIED)
self.assertIs(event.get_service_reference(), dummy_reg.get_reference())
# Unregister the service
dummy_reg.unregister()
# Pop information
event, listeners = events.pop(0)
# Check event
assert isinstance(event, ServiceEvent)
self.assertEqual(event.get_kind(), ServiceEvent.UNREGISTERING)
self.assertIs(event.get_service_reference(), dummy_reg.get_reference())
# Unregister the hook
reg.unregister()
# Register a new service
ctx.register_service("dummy", object(), {})
# Hook must not be notified
self.assertFalse(events)
def test_hook(self):
"""
Tests the hook filtering behaviour
"""
# Add a bundle to have two contexts in the test
fw_ctx = self.framework.get_bundle_context()
bnd = fw_ctx.install_bundle("tests.dummy_1")
bnd.start()
bnd_ctx = bnd.get_bundle_context()
# Setup a hook
class Hook(object):
@staticmethod
def event(svc_event, listeners_dict):
to_remove = svc_event.get_service_reference() \
.get_property("to.remove")
info_to_remove = []
for listener_bc, listeners_info in listeners_dict.items():
# Check the dictionary content
for listener_info in listeners_info:
self.assertIs(listener_bc, listener_info.bundle_context)
self.assertIs(
listener_bc, listener_info.listener.context)
self.assertIs(
listener_bc, listener_info.get_bundle_context())
if listener_info.listener in to_remove:
info_to_remove.append(listener_info)
# Remove the requested listeners
for listener_info in info_to_remove:
listeners_dict[listener_info.bundle_context] \
.remove(listener_info)
fw_ctx.register_service(SERVICE_EVENT_LISTENER_HOOK, Hook(), {})
# Register multiple listeners
class Listener(object):
def __init__(self, bc):
self.context = bc
self.storage = []
bc.add_service_listener(self)
def service_changed(self, event):
self.storage.append(event)
listener_referee = Listener(fw_ctx)
listener_1 = Listener(fw_ctx)
listener_2 = Listener(bnd_ctx)
# Register a service that only the referee will get
reg = fw_ctx.register_service(
"dummy", object(), {"to.remove": [listener_1, listener_2]})
evt = listener_referee.storage.pop(0)
self.assertIs(evt.get_service_reference(), reg.get_reference())
self.assertEqual(evt.get_kind(), ServiceEvent.REGISTERED)
self.assertFalse(listener_1.storage)
self.assertFalse(listener_2.storage)
# Modify it so that listener_1 gets it
reg.set_properties({"to.remove": [listener_2]})
self.assertFalse(listener_2.storage)
evt = listener_referee.storage.pop(0)
self.assertIs(evt.get_service_reference(), reg.get_reference())
self.assertEqual(evt.get_kind(), ServiceEvent.MODIFIED)
evt1 = listener_1.storage.pop(0)
self.assertIs(evt1, evt)
# Modify it so that listener_2, but not listener_1 gets it
reg.set_properties({"to.remove": [listener_1]})
self.assertFalse(listener_1.storage)
evt = listener_referee.storage.pop(0)
self.assertIs(evt.get_service_reference(), reg.get_reference())
self.assertEqual(evt.get_kind(), ServiceEvent.MODIFIED)
evt2 = listener_2.storage.pop(0)
self.assertIs(evt2, evt)
# ------------------------------------------------------------------------------
if __name__ == "__main__":
# Set logging level
import logging
logging.basicConfig(level=logging.DEBUG)
unittest.main()
|
"""Core tests."""
from typing import Any
import pytest
from borsh_construct import (
F32,
F64,
I8,
I16,
I32,
I64,
I128,
U8,
U16,
U32,
U64,
U128,
Bool,
Vec,
CStruct,
TupleStruct,
Enum,
String,
Option,
HashMap,
HashSet,
Bytes,
)
from borsh_construct.core import (
NAMED_TUPLE_FIELD_ERROR,
TUPLE_DATA,
UNNAMED_SUBCON_ERROR,
NON_STR_NAME_ERROR,
UNDERSCORE_NAME_ERROR,
TUPLE_DATA_NAME_ERROR,
)
from construct import Construct, Float32l, Float64l, FormatField, FormatFieldError
ENUM = Enum(
"Unit",
"TupleVariant" / TupleStruct(U128, String, I64, Option(U16)),
"CStructVariant"
/ CStruct("u128_field" / U128, "string_field" / String, "vec_field" / Vec(U16)),
enum_name="Placeholder",
)
TYPE_INPUT_EXPECTED = (
(Bool, True, [1]),
(Bool, False, [0]),
(U8, 255, [255]),
(I8, -128, [128]),
(U16, 65535, [255, 255]),
(I16, -32768, [0, 128]),
(U32, 4294967295, [255, 255, 255, 255]),
(I32, -2147483648, [0, 0, 0, 128]),
(U64, 18446744073709551615, [255, 255, 255, 255, 255, 255, 255, 255]),
(I64, -9223372036854775808, [0, 0, 0, 0, 0, 0, 0, 128]),
(
U128,
340282366920938463463374607431768211455,
[
255,
255,
255,
255,
255,
255,
255,
255,
255,
255,
255,
255,
255,
255,
255,
255,
],
),
(
I128,
-170141183460469231731687303715884105728,
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 128],
),
(F32, 0.5, [0, 0, 0, 63]),
(F64, -0.5, [0, 0, 0, 0, 0, 0, 224, 191]),
(I16[3], [1, 2, 3], [1, 0, 2, 0, 3, 0]),
(Vec(I16), [1, 1], [2, 0, 0, 0, 1, 0, 1, 0]),
(
TupleStruct(U128, String, I64, Option(U16)),
[123, "hello", 1400, 13],
[
123,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
5,
0,
0,
0,
104,
101,
108,
108,
111,
120,
5,
0,
0,
0,
0,
0,
0,
1,
13,
0,
],
),
(
CStruct("u128_field" / U128, "string_field" / String, "vec_field" / Vec(U16)),
{"u128_field": 1033, "string_field": "hello", "vec_field": [1, 2, 3]},
[
9,
4,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
5,
0,
0,
0,
104,
101,
108,
108,
111,
3,
0,
0,
0,
1,
0,
2,
0,
3,
0,
],
),
(ENUM, ENUM.enum.Unit(), [0]),
(
ENUM,
ENUM.enum.TupleVariant([10, "hello", 13, 12]),
[
1,
10,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
5,
0,
0,
0,
104,
101,
108,
108,
111,
13,
0,
0,
0,
0,
0,
0,
0,
1,
12,
0,
],
),
(
ENUM,
ENUM.enum.CStructVariant(
u128_field=15,
string_field="hi",
vec_field=[3, 2, 1],
),
[
2,
15,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
2,
0,
0,
0,
104,
105,
3,
0,
0,
0,
3,
0,
2,
0,
1,
0,
],
),
(
HashMap(U8, ENUM),
{2: ENUM.enum.Unit(), 1: ENUM.enum.TupleVariant([11, "hello", 123, None])},
[
2,
0,
0,
0,
1,
1,
11,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
5,
0,
0,
0,
104,
101,
108,
108,
111,
123,
0,
0,
0,
0,
0,
0,
0,
0,
2,
0,
],
),
(HashSet(U8), {1, 2, 3}, [3, 0, 0, 0, 1, 2, 3]),
(Bytes, b"\x01\x02\x03", [3, 0, 0, 0, 1, 2, 3]),
(
String,
"🚀🚀🚀",
[12, 0, 0, 0, 240, 159, 154, 128, 240, 159, 154, 128, 240, 159, 154, 128],
),
)
@pytest.mark.parametrize("obj_type,obj_input,expected", TYPE_INPUT_EXPECTED)
def test_serde(obj_type: Construct, obj_input: Any, expected: Any) -> None:
"""Tests that inputs are serialized and deserialized as expected."""
serialized = obj_type.build(obj_input)
assert list(serialized) == expected
deserialized = obj_type.parse(serialized)
assert deserialized == obj_input
@pytest.mark.parametrize(
"nonan_type,construct_type",
[(F32, Float32l), (F64, Float64l)],
)
def test_nan_floats(nonan_type: FormatField, construct_type: FormatField) -> None:
"""Check that error is raised if you try to build or parse nan floats."""
nan = float("nan") # noqa: WPS456
with pytest.raises(FormatFieldError):
nonan_type.build(nan)
nan_serialized = construct_type.build(nan)
with pytest.raises(FormatFieldError):
nonan_type.parse(nan_serialized)
def test_named_tuple_struct_field_raises() -> None:
"""Check that error is raised if TupleStruct field is named."""
with pytest.raises(ValueError) as exc:
TupleStruct("foo" / U8)
assert exc.value == NAMED_TUPLE_FIELD_ERROR
def test_unnamed_subcon_raises() -> None:
"""Check that error is raised when enum variant or CStruct field is unnamed."""
with pytest.raises(ValueError) as excinfo:
Enum("foo", TupleStruct(U8), enum_name="placeholder")
assert str(excinfo.value) == str(UNNAMED_SUBCON_ERROR)
def test_non_str_name_raises() -> None:
"""Check that error is raised when subcon name is not a string."""
with pytest.raises(ValueError) as excinfo:
CStruct(1 / U8) # type: ignore
assert str(excinfo.value) == str(NON_STR_NAME_ERROR)
def test_tuple_data_name_raises() -> None:
"""Check that error is raised when subcon name is not a string."""
with pytest.raises(ValueError) as excinfo:
CStruct(TUPLE_DATA / U8)
assert str(excinfo.value) == str(TUPLE_DATA_NAME_ERROR)
def test_underscore_name_raises() -> None:
"""Check that error is raised when subcon name starts with underscore."""
with pytest.raises(ValueError) as excinfo:
CStruct("_foo" / U8)
assert str(excinfo.value) == str(UNDERSCORE_NAME_ERROR)
def test_unrecognized_variant_type_raises() -> None:
"""Check that error is raised if variant type is not valid."""
with pytest.raises(ValueError) as excinfo:
Enum("foo" / U8, enum_name="placeholder")
assert "Unrecognized" in str(excinfo.value)
def test_duplicate_variant_name_raises() -> None:
"""Check error raised if two variants in same Enum have same name."""
with pytest.raises(ValueError) as excinfo:
Enum("foo", "foo", enum_name="placeholder")
assert "must be unique" in str(excinfo.value)
|
import sys
sys.path.append("../..")
from api.control.PID import PID
from api.control.sensor import sensor
from api.control.robot import robot
import posix_ipc as ipc
import time
import threading
import math
import numpy as np
graphq = ipc.MessageQueue('/graphQueue', ipc.O_CREAT)
mq = ipc.MessageQueue('/keyQueue', ipc.O_CREAT)
mq.block = False
lidar = sensor('lidar', '/pointQueue')
""" THREAD CLASS """
class sensor_thread(threading.Thread):
def __init__(self, name, delay,*args, **kwargs):
super(sensor_thread, self).__init__(*args, **kwargs)
self._stopper = threading.Event()
self.name = name
self.delay = delay
def stopit(self):
self._stopper.set()
def stopped(self):
return self._stopper.isSet()
def run(self):
while True:
if self.stopped():
return
if self.name == 'cam':
cam.set_data()
if self.name == 'ir':
ir.set_data()
if self.name == 'lidar':
lidar.set_data()
time.sleep(self.delay)
def getPressed():
try:
mes = mq.receive()
key = list((mes[0].decode()).split(","))
key = int(key[0]), list(map(int, key[1:3])), list(map(float, key[3:]))
return key
except:
return None
""" GLOBAL VARIABLE HERE """
SENSOR_TYPE = [('lidar', 0.0)]
ATTRIBUTE = 'data'
DELTA_ANGLE = 50
RIGHT_HAND_ANGLE = 90
HELPER_HAND_ANGLE = RIGHT_HAND_ANGLE + DELTA_ANGLE
FACE_ANGLE = 180
WALL_THRES = 1
WALL_DISTANCE = 60
WALL_LEFT_BOUND = WALL_DISTANCE - WALL_THRES
WALL_RIGHT_BOUND = WALL_DISTANCE + WALL_THRES
AVOIDER_POWER = 35
STOP = 0, 0, 0
class power:
value = 0, 0, 0
def set(self, x, y, turn):
self.value = x, y ,turn
def find_nearest(array, value):
array = np.asarray(array)
idx = (np.abs(array - value)).argmin()
return idx
def main():
start = 0
last_start = start
min_power = 20
max_power = 50
kp = 1
ki = 0
kd = 0
lidar_pid = PID(kp, ki, kd, WALL_DISTANCE)
workers = []
for name, delay in SENSOR_TYPE:
print('[info] start thread : ' , name)
thread = sensor_thread(name, delay)
workers.append(thread)
thread.start()
try:
rc = robot('/serialWriteQueue')
time.sleep(5)
rc.connect()
time.sleep(0.5)
pwr = power()
while True:
key = getPressed()
if key:
print(key)
start, (min_power, max_power), (kp, ki, kd) = key
lidar_pid.setOutputLimits((-max_power, max_power))
lidar_pid.setKValue(kp, ki ,kd)
if start != last_start:
rx_distance = 0
graphq.send(",".join(map(str, [start, rx_distance, WALL_DISTANCE])))
last_start = start
if start:
point = lidar.data
print(type(point))
if type(point) is np.ndarray:
print("ye")
angles, ranges = point
right_hand = float(ranges[find_nearest(angles, RIGHT_HAND_ANGLE)])
helper_hand = float(ranges[find_nearest(angles, HELPER_HAND_ANGLE)])
face = float(ranges[find_nearest(angles, FACE_ANGLE)])
teta = math.radians(DELTA_ANGLE)
if face < 50:
print("50")
pwr.set(0, 0, AVOIDER_POWER)
elif right_hand > 0 and helper_hand > 0:
print("ye")
alpha = math.atan((right_hand * math.cos(teta) - helper_hand)/ (right_hand * math.sin(teta)))
rx_distance = helper_hand * math.cos(math.radians(alpha))
graphq.send(",".join(map(str, [start, rx_distance, WALL_DISTANCE])))
if rx_distance > WALL_RIGHT_BOUND or rx_distance < WALL_LEFT_BOUND:
out = lidar_pid.update(rx_distance)
if out < min_power and out > 0:
out = min_power
if out > -min_power and out < 0:
out = -min_power
print(rx_distance, out)
pwr.set(0, max_power, out)
else:
pwr.set(0, max_power, 0)
else:
pwr.set(*STOP)
else:
pwr.set(*STOP)
else:
pwr.set(*STOP)
rc.drive(*pwr.value)
time.sleep(0.001)
except KeyboardInterrupt:
print('[info] interrupt pressed')
print('[main] work finished')
for worker in workers:
worker.stopit()
time.sleep(3)
worker.join()
#lidar.cleanup()
#ir.cleanup()
#cam.cleanup()
#rc.disconnect()
print('[main] end')
main()
|
import networkx as nx
import numpy as np
import matplotlib.pyplot as plt
import pickle
import math
from dynsimf.models.Model import Model
from dynsimf.models.Model import ModelConfiguration
from dynsimf.models.components.Memory import MemoryConfiguration
from dynsimf.models.components.Memory import MemoryConfigurationType
from dynsimf.models.components.conditions.Condition import ConditionType
from dynsimf.models.components.conditions.ThresholdCondition import ThresholdCondition
from dynsimf.models.components.conditions.CustomCondition import CustomCondition
from dynsimf.models.components.conditions.ThresholdCondition import ThresholdOperator
from dynsimf.models.components.conditions.ThresholdCondition import ThresholdConfiguration
if __name__ == "__main__":
# Network definition
g_list = pickle.load(open(r"C:/Users/Admin/MEGA/Uni/Master/Thesis/data/g_list.pkl", 'rb'))
X_list = pickle.load(open(r"C:/Users/Admin/MEGA/Uni/Master/Thesis/data/x_list.pkl", 'rb'))
school = 3
X = X_list[school]
n = len(X['sex'])
avg_initial_links = 5 # desired average degree in initial network
link_prop = avg_initial_links/n
g = np.random.choice([0, 1], size=(n, n),
p=[1 - link_prop,
link_prop])
np.fill_diagonal(g, 0)
g = nx.convert_matrix.from_numpy_array(g, create_using=nx.DiGraph)
cfg = {
'adjacency_memory_config': \
MemoryConfiguration(MemoryConfigurationType.ADJACENCY, {
'memory_size': 0
}),
'edge_values_memory_config': \
MemoryConfiguration(MemoryConfigurationType.EDGE_VALUES, {
'memory_size': 0
})
}
model = Model(g, ModelConfiguration(cfg))
constants = {
'n': n,
'delta': 0.05,
'gamma': 0.65,
'c': 0.175,
'B1': 0.1,
'B2': 0.1,
'B3': 0.2,
'sigma': 0.035,
'alpha': 2,
'min_prop': 1000,
'X': X
}
def initial_utility():
utility = np.zeros((constants['n'], constants['n']))
race = list(constants['X']['race'])
sex = list(constants['X']['sex'])
grade = list(constants['X']['grade'])
for i in range(constants['n']):
for j in range(constants['n']):
weighted_diffs = [constants['B1']*abs(sex[i] - sex[j]),
constants['B2'] * (0 if grade[i] == grade[j] else 1),
constants['B3'] * (0 if race[i] == race[j] else 1)]
utility[i, j] = math.exp(-sum(weighted_diffs))
return utility
def initial_prop():
prop = np.zeros((constants['n'], constants['n']))
utility = initial_utility()
# Loop over the person and their peers
for i in range(constants['n']):
for j in range(constants['n']):
if i == j:
prop[i, j] = 0
else:
prop[i, j] = utility[i, j] + constants['min_prop']
# Normalize
prop[i, :] = prop[i, :] / np.sum(prop[i, :])
return prop
constants['probability'] = initial_prop()
constants['utility'] = initial_utility()
def nb_update():
adj = model.get_adjacency()
return {'Neighbors': np.sum(adj, axis=1)}
def node_utility(node, adj):
utility = constants['utility']
# degree, connection gain and cost calculations
d_i = adj[node].sum()
direct_u = np.sum(adj[node] * utility[node])
mutual_u = np.sum(adj[node] * adj.T[node] * utility[node])
# indirect connection gain
a = (adj.T.dot(adj[node, :]) * utility)[node]
a[node] = 0
indirect_u = np.sum(a)
return direct_u + constants['gamma'] * mutual_u + constants['delta'] * indirect_u - d_i ** constants['alpha'] * constants['c']
def network_update(nodes):
adj = model.get_adjacency()
order = nodes.copy()
eps = np.random.normal(scale=constants['sigma'], size=constants['n']*2)
np.random.shuffle(order)
changes = {}
P = constants['probability']
for node in order:
other_node = node
while other_node == node:
other_node = np.random.choice(nodes, p=P[node])
existing_connection = not not adj[node, other_node]
adj[node, other_node] = 0
U_without = node_utility(node, adj) + eps[node]
adj[node, other_node] = 1
U_with = node_utility(node, adj) + eps[-node]
if U_without > U_with and existing_connection:
changes[node] = {'remove': [other_node]}
elif U_without < U_with and not existing_connection:
changes[node] = {'add': [other_node]}
return {
'edge_change': changes
}
# Model definition
model.constants = constants
model.set_states(['Neighbors'])
model.add_update(nb_update)
model.set_edge_values(['utility'])
model.set_initial_edge_values({
'utility': initial_utility,
})
model.add_network_update(network_update, get_nodes=True)
output = model.simulate(500)
visualization_config = {
'plot_interval': 10,
'edge_values': 'utility',
'plot_variable': 'Neighbors',
'variable_limits': {
'Neighbors': [0, 55]
},
'color_scale': 'Reds',
'show_plot': False,
'repeat': True,
'plot_output': '../animations/school_segregation/school_' + str(school) + '.gif',
'plot_title': 'School segregation'
}
model.configure_visualization(visualization_config, output)
model.visualize('animation')
|
from functools import partial
from pyais.exceptions import UnknownMessageException
import typing
import bitarray
from pyais.constants import (
NavigationStatus,
ManeuverIndicator,
TransmitMode,
EpfdType,
ShipType,
StationType,
StationIntervals,
NavAid
)
from pyais.util import get_int, encode_bin_as_ascii6
def decode_msg_1(bit_arr: bitarray.bitarray) -> typing.Dict:
"""
AIS Vessel position report using SOTDMA (Self-Organizing Time Division Multiple Access)
Src: https://gpsd.gitlab.io/gpsd/AIVDM.html#_types_1_2_and_3_position_report_class_a
"""
get_int_from_data = partial(get_int, bit_arr)
return {
'type': get_int_from_data(0, 6),
'repeat': get_int_from_data(6, 8),
'mmsi': get_int_from_data(8, 38),
'status': NavigationStatus(get_int_from_data(38, 42)),
'turn': get_int_from_data(42, 50, signed=True),
'speed': get_int_from_data(50, 60) / 10.0,
'accuracy': bit_arr[60],
'lon': get_int_from_data(61, 89, signed=True) / 600000.0,
'lat': get_int_from_data(89, 116, signed=True) / 600000.0,
'course': get_int_from_data(116, 128) * 0.1,
'heading': get_int_from_data(128, 137),
'second': get_int_from_data(137, 143),
'maneuver': ManeuverIndicator(get_int_from_data(143, 145)),
'raim': bit_arr[148],
'radio': get_int_from_data(149, bit_arr.length()),
}
def decode_msg_2(bit_arr: bitarray.bitarray) -> typing.Dict:
"""AIS Vessel position report using SOTDMA (Self-Organizing Time Division Multiple Access)
Src: https://gpsd.gitlab.io/gpsd/AIVDM.html#_types_1_2_and_3_position_report_class_a
"""
return decode_msg_1(bit_arr)
def decode_msg_3(bit_arr: bitarray.bitarray) -> typing.Dict:
"""
AIS Vessel position report using ITDMA (Incremental Time Division Multiple Access)
Src: https://gpsd.gitlab.io/gpsd/AIVDM.html#_types_1_2_and_3_position_report_class_a
"""
return decode_msg_1(bit_arr)
def decode_msg_4(bit_arr: bitarray.bitarray) -> typing.Dict:
"""
AIS Vessel position report using SOTDMA (Self-Organizing Time Division Multiple Access)
Src: https://gpsd.gitlab.io/gpsd/AIVDM.html#_type_4_base_station_report
"""
get_int_from_data = partial(get_int, bit_arr)
return {
'type': get_int_from_data(0, 6),
'repeat': get_int_from_data(6, 8),
'mmsi': get_int_from_data(8, 38),
'year': get_int_from_data(38, 52),
'month': get_int_from_data(52, 56),
'day': get_int_from_data(56, 61),
'hour': get_int_from_data(61, 66),
'minute': get_int_from_data(66, 72),
'second': get_int_from_data(72, 78),
'accuracy': bit_arr[78],
'lon': get_int_from_data(79, 107, signed=True) / 600000.0,
'lat': get_int_from_data(107, 134, signed=True) / 600000.0,
'epfd': EpfdType(get_int_from_data(134, 138)),
'raim': bit_arr[148],
'radio': get_int_from_data(148, len(bit_arr)),
}
def decode_msg_5(bit_arr: bitarray.bitarray) -> typing.Dict:
"""
Static and Voyage Related Data
Src: https://gpsd.gitlab.io/gpsd/AIVDM.html#_type_5_static_and_voyage_related_data
"""
get_int_from_data = partial(get_int, bit_arr)
return {
'type': get_int_from_data(0, 6),
'repeat': get_int_from_data(6, 8),
'mmsi': get_int_from_data(8, 38),
'ais_version': get_int_from_data(38, 40),
'imo': get_int_from_data(40, 70),
'callsign': encode_bin_as_ascii6(bit_arr[70:112]),
'shipname': encode_bin_as_ascii6(bit_arr[112:232]),
'shiptype': ShipType(get_int_from_data(232, 240)),
'to_bow': get_int_from_data(240, 249),
'to_stern': get_int_from_data(249, 258),
'to_port': get_int_from_data(258, 264),
'to_starboard': get_int_from_data(264, 270),
'epfd': EpfdType(get_int_from_data(270, 274)),
'month': get_int_from_data(274, 278),
'day': get_int_from_data(278, 283),
'hour': get_int_from_data(283, 288),
'minute': get_int_from_data(288, 294),
'draught': get_int_from_data(294, 302) / 10.0,
'destination': encode_bin_as_ascii6(bit_arr[302:422]),
'dte': bit_arr[-2]
}
def decode_msg_6(bit_arr: bitarray.bitarray) -> typing.Dict:
"""
Binary Addresses Message
Src: https://gpsd.gitlab.io/gpsd/AIVDM.html#_type_4_base_station_report
"""
get_int_from_data = partial(get_int, bit_arr)
return {
'type': get_int_from_data(0, 6),
'repeat': get_int_from_data(6, 8),
'mmsi': get_int_from_data(8, 38),
'seqno': get_int_from_data(38, 40),
'dest_mmsi': get_int_from_data(40, 70),
'retransmit': bit_arr[70],
'dac': get_int_from_data(72, 82),
'fid': get_int_from_data(82, 88),
'data': bit_arr[88:].to01()
}
def decode_msg_7(bit_arr: bitarray.bitarray) -> typing.Dict:
"""
Binary Acknowledge
Src: https://gpsd.gitlab.io/gpsd/AIVDM.html#_type_7_binary_acknowledge
"""
get_int_from_data = partial(get_int, bit_arr)
return {
'type': get_int_from_data(0, 6),
'repeat': get_int_from_data(6, 8),
'mmsi': get_int_from_data(8, 38),
'mmsi1': get_int_from_data(40, 70),
'mmsiseq1': get_int_from_data(70, 72),
'mmsi2': get_int_from_data(72, 102),
'mmsiseq2': get_int_from_data(102, 104),
'mmsi3': get_int_from_data(104, 134),
'mmsiseq3': get_int_from_data(134, 136),
'mmsi4': get_int_from_data(136, 166),
'mmsiseq4': get_int_from_data(166, 168)
}
def decode_msg_8(bit_arr: bitarray.bitarray) -> typing.Dict:
"""
Binary Acknowledge
Src: https://gpsd.gitlab.io/gpsd/AIVDM.html#_type_8_binary_broadcast_message
"""
get_int_from_data = partial(get_int, bit_arr)
return {
'type': get_int_from_data(0, 6),
'repeat': get_int_from_data(6, 8),
'mmsi': get_int_from_data(8, 38),
'dac': get_int_from_data(40, 50),
'fid': get_int_from_data(50, 56),
'data': bit_arr[56:].to01()
}
def decode_msg_9(bit_arr: bitarray.bitarray) -> typing.Dict:
"""
Standard SAR Aircraft Position Report
Src: https://gpsd.gitlab.io/gpsd/AIVDM.html#_type_9_standard_sar_aircraft_position_report
"""
get_int_from_data = partial(get_int, bit_arr)
return {
'type': get_int_from_data(0, 6),
'repeat': get_int_from_data(6, 8),
'mmsi': get_int_from_data(8, 38),
'alt': get_int_from_data(38, 50),
'speed': get_int_from_data(50, 60),
'accuracy': bit_arr[60],
'lon': get_int_from_data(61, 89, signed=True) / 600000.0,
'lat': get_int_from_data(89, 116, signed=True) / 600000.0,
'course': get_int_from_data(116, 128) * 0.1,
'second': get_int_from_data(128, 134),
'dte': bit_arr[142],
'assigned': bit_arr[146],
'raim': bit_arr[147],
'radio': get_int_from_data(148, 168)
}
def decode_msg_10(bit_arr: bitarray.bitarray) -> typing.Dict:
"""
UTC/Date Inquiry
Src: https://gpsd.gitlab.io/gpsd/AIVDM.html#_type_10_utc_date_inquiry
"""
get_int_from_data = partial(get_int, bit_arr)
return {
'type': get_int_from_data(0, 6),
'repeat': get_int_from_data(6, 8),
'mmsi': get_int_from_data(8, 38),
'dest_mmsi': get_int_from_data(40, 70)
}
def decode_msg_11(bit_arr: bitarray.bitarray) -> typing.Dict:
"""
UTC/Date Response
Src: https://gpsd.gitlab.io/gpsd/AIVDM.html#_type_11_utc_date_response
"""
return decode_msg_4(bit_arr)
def decode_msg_12(bit_arr: bitarray.bitarray) -> typing.Dict:
"""
Addressed Safety-Related Message
Src: https://gpsd.gitlab.io/gpsd/AIVDM.html#_type_12_addressed_safety_related_message
"""
get_int_from_data = partial(get_int, bit_arr)
return {
'type': get_int_from_data(0, 6),
'repeat': get_int_from_data(6, 8),
'mmsi': get_int_from_data(8, 38),
'seqno': get_int_from_data(38, 40),
'dest_mmsi': get_int_from_data(40, 70),
'retransmit': bit_arr[70],
'text': encode_bin_as_ascii6(bit_arr[72:])
}
def decode_msg_13(bit_arr: bitarray.bitarray) -> typing.Dict:
"""
Identical to type 7
"""
return decode_msg_7(bit_arr)
def decode_msg_14(bit_arr: bitarray.bitarray) -> typing.Dict:
"""
Safety-Related Broadcast Message
Src: https://gpsd.gitlab.io/gpsd/AIVDM.html#_type_14_safety_related_broadcast_message
"""
get_int_from_data = partial(get_int, bit_arr)
return {
'type': get_int_from_data(0, 6),
'repeat': get_int_from_data(6, 8),
'mmsi': get_int_from_data(8, 38),
'text': encode_bin_as_ascii6(bit_arr[40:])
}
def decode_msg_15(bit_arr: bitarray.bitarray) -> typing.Dict:
"""
Interrogation
Src: https://gpsd.gitlab.io/gpsd/AIVDM.html#_type_15_interrogation
"""
get_int_from_data = partial(get_int, bit_arr)
return {
'type': get_int_from_data(0, 6),
'repeat': get_int_from_data(6, 8),
'mmsi': get_int_from_data(8, 38),
'mmsi1': get_int_from_data(40, 70),
'type1_1': get_int_from_data(70, 76),
'offset1_1': get_int_from_data(76, 88),
'type1_2': get_int_from_data(90, 96),
'offset1_2': get_int_from_data(96, 108),
'mmsi2': get_int_from_data(110, 140),
'type2_1': get_int_from_data(140, 146),
'offset2_1': get_int_from_data(146, 157),
}
def decode_msg_16(bit_arr: bitarray.bitarray) -> typing.Dict:
"""
Assignment Mode Command
Src: https://gpsd.gitlab.io/gpsd/AIVDM.html#_type_16_assignment_mode_command
"""
get_int_from_data = partial(get_int, bit_arr)
return {
'type': get_int_from_data(0, 6),
'repeat': get_int_from_data(6, 8),
'mmsi': get_int_from_data(8, 38),
'mmsi1': get_int_from_data(40, 70),
'offset1': get_int_from_data(70, 82),
'increment1': get_int_from_data(82, 92),
'mmsi2': get_int_from_data(92, 122),
'offset2': get_int_from_data(122, 134),
'increment2': get_int_from_data(134, 144)
}
def decode_msg_17(bit_arr: bitarray.bitarray) -> typing.Dict:
"""
DGNSS Broadcast Binary Message
Src: https://gpsd.gitlab.io/gpsd/AIVDM.html#_type_17_dgnss_broadcast_binary_message
"""
get_int_from_data = partial(get_int, bit_arr)
return {
'type': get_int_from_data(0, 6),
'repeat': get_int_from_data(6, 8),
'mmsi': get_int_from_data(8, 38),
'lon': get_int_from_data(40, 58, signed=True),
'lat': get_int_from_data(58, 75, signed=True),
'data': get_int_from_data(80, 816)
}
def decode_msg_18(bit_arr: bitarray.bitarray) -> typing.Dict:
"""
Standard Class B CS Position Report
Src: https://gpsd.gitlab.io/gpsd/AIVDM.html#_type_18_standard_class_b_cs_position_report
"""
get_int_from_data = partial(get_int, bit_arr)
return {
'type': get_int_from_data(0, 6),
'repeat': get_int_from_data(8, 8),
'mmsi': get_int_from_data(8, 38),
'speed': get_int_from_data(46, 56) * 0.1,
'accuracy': bit_arr[56],
'lon': get_int_from_data(57, 85, signed=True) / 600000.0,
'lat': get_int_from_data(85, 112, signed=True) / 600000.0,
'course': get_int_from_data(112, 124) * 0.1,
'heading': get_int_from_data(124, 133),
'second': get_int_from_data(133, 139),
'regional': get_int_from_data(139, 141),
'cs': bit_arr[141],
'display': bit_arr[142],
'dsc': bit_arr[143],
'band': bit_arr[144],
'msg22': bit_arr[145],
'assigned': bit_arr[146],
'raim': bit_arr[147],
'radio': get_int_from_data(148, len(bit_arr)),
}
def decode_msg_19(bit_arr: bitarray.bitarray) -> typing.Dict:
"""
Extended Class B CS Position Report
Src: https://gpsd.gitlab.io/gpsd/AIVDM.html#_type_19_extended_class_b_cs_position_report
"""
get_int_from_data = partial(get_int, bit_arr)
return {
'type': get_int_from_data(0, 6),
'repeat': get_int_from_data(8, 8),
'mmsi': get_int_from_data(8, 38),
'speed': get_int_from_data(46, 56) * 0.1,
'accuracy': bit_arr[56],
'lon': get_int_from_data(57, 85, signed=True) / 600000.0,
'lat': get_int_from_data(85, 112, signed=True) / 600000.0,
'course': get_int_from_data(112, 124) * 0.1,
'heading': get_int_from_data(124, 133),
'second': get_int_from_data(133, 139),
'regional': get_int_from_data(139, 143),
'shipname': encode_bin_as_ascii6(bit_arr[143:263]),
'shiptype': ShipType(get_int_from_data(263, 271)),
'to_bow': get_int_from_data(271, 280),
'to_stern': get_int_from_data(280, 289),
'to_port': get_int_from_data(289, 295),
'to_starboard': get_int_from_data(295, 301),
'epfd': EpfdType(get_int_from_data(301, 305)),
'raim': bit_arr[305],
'dte': bit_arr[306],
'assigned': bit_arr[307],
}
def decode_msg_20(bit_arr: bitarray.bitarray) -> typing.Dict:
"""
Data Link Management Message
Src: https://gpsd.gitlab.io/gpsd/AIVDM.html#_type_20_data_link_management_message
"""
get_int_from_data = partial(get_int, bit_arr)
return {
'type': get_int_from_data(0, 6),
'repeat': get_int_from_data(8, 8),
'mmsi': get_int_from_data(8, 38),
'offset1': get_int_from_data(40, 52),
'number1': get_int_from_data(52, 56),
'timeout1': get_int_from_data(56, 59),
'increment1': get_int_from_data(59, 70),
'offset2': get_int_from_data(70, 82),
'number2': get_int_from_data(82, 86),
'timeout2': get_int_from_data(86, 89),
'increment2': get_int_from_data(89, 100),
'offset3': get_int_from_data(100, 112),
'number3': get_int_from_data(112, 116),
'timeout3': get_int_from_data(116, 119),
'increment3': get_int_from_data(110, 130),
'offset4': get_int_from_data(130, 142),
'number4': get_int_from_data(142, 146),
'timeout4': get_int_from_data(146, 149),
'increment4': get_int_from_data(149, 160),
}
def decode_msg_21(bit_arr: bitarray.bitarray) -> typing.Dict:
"""
Aid-to-Navigation Report
Src: https://gpsd.gitlab.io/gpsd/AIVDM.html#_type_21_aid_to_navigation_report
"""
get_int_from_data = partial(get_int, bit_arr)
return {
'type': get_int_from_data(0, 6),
'repeat': get_int_from_data(8, 8),
'mmsi': get_int_from_data(8, 38),
'aid_type': NavAid(get_int_from_data(38, 43)),
'name': encode_bin_as_ascii6(bit_arr[43:163]),
'accuracy': bit_arr[163],
'lon': get_int_from_data(164, 192, signed=True) / 600000.0,
'lat': get_int_from_data(192, 219, signed=True) / 600000.0,
'to_bow': get_int_from_data(219, 228),
'to_stern': get_int_from_data(228, 237),
'to_port': get_int_from_data(237, 243),
'to_starboard': get_int_from_data(243, 249),
'epfd': EpfdType(get_int_from_data(249, 253)),
'second': get_int_from_data(253, 259),
'off_position': bit_arr[259],
'regional': get_int_from_data(260, 268),
'raim': bit_arr[268],
'virtual_aid': bit_arr[269],
'assigned': bit_arr[270],
'name_extension': encode_bin_as_ascii6(bit_arr[272:]),
}
def decode_msg_22(bit_arr: bitarray.bitarray) -> typing.Dict:
"""
Channel Management
Src: https://gpsd.gitlab.io/gpsd/AIVDM.html#_type_22_channel_management
"""
get_int_from_data = partial(get_int, bit_arr)
data = {
'type': get_int_from_data(0, 6),
'repeat': get_int_from_data(8, 8),
'mmsi': get_int_from_data(8, 38),
'channel_a': get_int_from_data(40, 52),
'channel_b': get_int_from_data(52, 64),
'txrx': get_int_from_data(64, 68),
'power': bit_arr[68],
'addressed': bit_arr[139],
'band_a': bit_arr[140],
'band_b': bit_arr[141],
'zonesize': get_int_from_data(142, 145),
}
# Broadcast
if data['addressed']:
d = {
'dest1': get_int_from_data(69, 99),
'dest2': get_int_from_data(104, 134),
}
# Addressed
else:
d = {
'ne_lon': get_int_from_data(69, 87, signed=True) * 0.1,
'ne_lat': get_int_from_data(87, 104, signed=True) * 0.1,
'sw_lon': get_int_from_data(104, 122, signed=True) * 0.1,
'sw_lat': get_int_from_data(122, 139, signed=True) * 0.1,
}
data.update(d)
return data
def decode_msg_23(bit_arr: bitarray.bitarray) -> typing.Dict:
"""
Group Assignment Command
Src: https://gpsd.gitlab.io/gpsd/AIVDM.html#_type_23_group_assignment_command
"""
get_int_from_data = partial(get_int, bit_arr)
return {
'type': get_int_from_data(0, 6),
'repeat': get_int_from_data(8, 8),
'mmsi': get_int_from_data(8, 38),
'ne_lon': get_int_from_data(40, 58, signed=True) * 0.1,
'ne_lat': get_int_from_data(58, 75, signed=True) * 0.1,
'sw_lon': get_int_from_data(75, 93, signed=True) * 0.1,
'sw_lat': get_int_from_data(93, 110, signed=True) * 0.1,
'station_type': StationType(get_int_from_data(110, 114)),
'ship_type': ShipType(get_int_from_data(114, 122)),
'txrx': TransmitMode(get_int_from_data(144, 146)),
'interval': StationIntervals(get_int_from_data(146, 150)),
'quiet': get_int_from_data(150, 154),
}
def decode_msg_24(bit_arr: bitarray.bitarray) -> typing.Dict:
"""
Static Data Report
Src: https://gpsd.gitlab.io/gpsd/AIVDM.html#_type_24_static_data_report
"""
get_int_from_data = partial(get_int, bit_arr)
data: typing.Dict = {
'type': get_int_from_data(0, 6),
'repeat': get_int_from_data(8, 8),
'mmsi': get_int_from_data(8, 38),
'partno': get_int_from_data(38, 40)
}
if not data['partno']:
# Part A
d: typing.Dict = {
'shipname': encode_bin_as_ascii6(bit_arr[40: 160])
}
else:
# Part B
d: typing.Dict = {
'shiptype': ShipType(get_int_from_data(40, 48)),
'vendorid': encode_bin_as_ascii6(bit_arr[48: 66]),
'model': get_int_from_data(66, 70),
'serial': get_int_from_data(70, 90),
'callsign': encode_bin_as_ascii6(bit_arr[90: 132]),
'to_bow': get_int_from_data(132, 141),
'to_stern': get_int_from_data(141, 150),
'to_port': get_int_from_data(150, 156),
'to_starboard': get_int_from_data(156, 162),
'mothership_mmsi': get_int_from_data(132, 162)
}
data.update(d)
return data
def decode_msg_25(bit_arr: bitarray.bitarray) -> typing.Dict:
"""
Single Slot Binary Message
Src: https://gpsd.gitlab.io/gpsd/AIVDM.html#_type_25_single_slot_binary_message
NOTE: This message type is quite uncommon and
I was not able find any real world occurrence of the type.
Also documentation seems to vary. Use with caution.
"""
get_int_from_data = partial(get_int, bit_arr)
data = {
'type': get_int_from_data(0, 6),
'repeat': get_int_from_data(8, 8),
'mmsi': get_int_from_data(8, 38),
'addressed': bit_arr[38],
'structured': bit_arr[39],
}
if data['addressed']:
d = {
'dest_mmsi': get_int_from_data(40, 70),
}
data.update(d)
lo_ix = 40 if data['addressed'] else 70
hi_ix = lo_ix + 16
if data['structured']:
d = {
'app_id': get_int_from_data(lo_ix, hi_ix),
'data': bit_arr[hi_ix:].to01()
}
else:
d = {
'data': bit_arr[lo_ix:].to01()
}
data.update(d)
return data
def decode_msg_26(bit_arr: bitarray.bitarray) -> typing.Dict:
"""
Multiple Slot Binary Message
Src: https://gpsd.gitlab.io/gpsd/AIVDM.html#_type_26_multiple_slot_binary_message
NOTE: This message type is quite uncommon and
I was not able find any real world occurrence of the type.
Also documentation seems to vary. Use with caution.
"""
get_int_from_data = partial(get_int, bit_arr)
radio_status_offset = len(bit_arr) - 20
data = {
'type': get_int_from_data(0, 6),
'repeat': get_int_from_data(8, 8),
'mmsi': get_int_from_data(8, 38),
'addressed': bit_arr[38],
'structured': bit_arr[39],
'radio': get_int_from_data(radio_status_offset, len(bit_arr))
}
if data['addressed']:
d = {
'dest_mmsi': get_int_from_data(40, 70),
}
data.update(d)
lo_ix = 40 if data['addressed'] else 70
hi_ix = lo_ix + 16
if data['structured']:
d = {
'app_id': get_int_from_data(lo_ix, hi_ix),
'data': bit_arr[hi_ix:radio_status_offset].to01()
}
else:
d = {
'data': bit_arr[lo_ix:radio_status_offset].to01()
}
data.update(d)
return data
def decode_msg_27(bit_arr: bitarray.bitarray) -> typing.Dict:
"""
Long Range AIS Broadcast message
Src: https://gpsd.gitlab.io/gpsd/AIVDM.html#_type_27_long_range_ais_broadcast_message
"""
get_int_from_data = partial(get_int, bit_arr)
return {
'type': get_int_from_data(0, 6),
'repeat': get_int_from_data(8, 8),
'mmsi': get_int_from_data(8, 38),
'accuracy': bit_arr[38],
'raim': bit_arr[39],
'status': NavigationStatus(get_int_from_data(40, 44)),
'lon': get_int_from_data(44, 62, signed=True) / 600.0,
'lat': get_int_from_data(62, 79, signed=True) / 600.0,
'speed': get_int_from_data(79, 85),
'course': get_int_from_data(85, 94),
'gnss': bit_arr[94],
}
# Decoding Lookup Table
DECODE_MSG = [
decode_msg_1, # there are messages with a zero (0) as an id. these seem to be the same as type 1 messages
decode_msg_1,
decode_msg_2,
decode_msg_3,
decode_msg_4,
decode_msg_5,
decode_msg_6,
decode_msg_7,
decode_msg_8,
decode_msg_9,
decode_msg_10,
decode_msg_11,
decode_msg_12,
decode_msg_13,
decode_msg_14,
decode_msg_15,
decode_msg_16,
decode_msg_17,
decode_msg_18,
decode_msg_19,
decode_msg_20,
decode_msg_21,
decode_msg_22,
decode_msg_23,
decode_msg_24,
decode_msg_25,
decode_msg_26,
decode_msg_27,
]
def _decode(msg) -> typing.Dict:
"""
Decodes a given NMEA message.
"""
try:
return DECODE_MSG[msg.ais_id](msg.bit_array)
except IndexError as e:
raise UnknownMessageException(f"The message {msg} is not currently supported!") from e
def decode(msg) -> typing.Dict:
"""
Decodes a given message.
@param msg: A object of type NMEAMessage to decode
"""
return _decode(msg)
|
""" Module for controlling Thorlabs motorized pollarization paddles """
import ctypes
from ctypes import Structure
import time
from pylabnet.utils.logging.logger import LogHandler
#from comtypes.typeinfo import SAFEARRAYABOUND
#enum FT_Status
FT_OK = ctypes.c_short(0x00)
FT_InvalidHandle = ctypes.c_short(0x0)
FT_DeviceNotFound = ctypes.c_short(0x02)
FT_DeviceNotOpened = ctypes.c_short(0x03)
FT_IOError = ctypes.c_short(0x04)
FT_InsufficientResources = ctypes.c_short(0x05)
FT_InvalidParameter = ctypes.c_short(0x06)
FT_DeviceNotPresent = ctypes.c_short(0x07)
FT_IncorrectDevice = ctypes.c_short(0x08)
FT_Status = ctypes.c_short
#enum MOT_MotorTypes
MOT_NotMotor = ctypes.c_int(0)
MOT_DCMotor = ctypes.c_int(1)
MOT_StepperMotor = ctypes.c_int(2)
MOT_BrushlessMotor = ctypes.c_int(3)
MOT_CustomMotor = ctypes.c_int(100)
MOT_MotorTypes = ctypes.c_int
#enum POL_Paddle
paddle1 = ctypes.c_uint16(1)
paddle2 = ctypes.c_uint16(2)
paddle3 = ctypes.c_uint16(3)
POL_Paddles = ctypes.c_uint16
#enum POL_PaddleBits
none_ctype = ctypes.c_ushort(0x0) #is None in header file
PaddleBit1 = ctypes.c_ushort(0x01)
PaddleBit2 = ctypes.c_ushort(0x02)
PaddleBit4 = ctypes.c_ushort(0x04)
AllPaddles = ctypes.c_ushort(0x07)
POL_PaddleBits = ctypes.c_ushort
#enum MOT_TravelDirection
MOT_TravelDirectionDisabled = ctypes.c_short(0x00)
MOT_Forwards = ctypes.c_short(0x01)
MOT_Reverse = ctypes.c_short(0x02)
MOT_TravelDirection = ctypes.c_short
#enum MPC_IOModes
MPC_ToggleOnPositiveEdge = ctypes.c_ulong(0x01)
MPC_SetPositionOnPositiveEdge = ctypes.c_ulong(0x02)
MPC_OutputHighAtSetPosition = ctypes.c_ulong(0x04)
MPC_OutputHighWhemMoving = ctypes.c_ulong(0x08)
MPC_IOModes = ctypes.c_ulong
class TLI_DeviceInfo(Structure):
_fields_ = [("typeID", ctypes.c_ulong),
("description", (65 * ctypes.c_char)), #changed from 65* _char
("serialNo", (9 * ctypes.c_char)), #changed from 9* _char
("PID", ctypes.c_ulong),# wintypes.DWORD
("isKnownType", ctypes.c_bool),
("motorType", MOT_MotorTypes),
("isPiezoDevice", ctypes.c_bool),
("isLaser", ctypes.c_bool),
("isCustomType", ctypes.c_bool),
("isRack", ctypes.c_bool),
("maxPaddles", ctypes.c_short)]
# class TLI_HardwareInformation(Structure):
# _fields_ = [("serialNumber", ctypes.c_ulong),
# ("modelNumber", (8 * ctypes.c_char)),
# ("type", ctypes.c_ushort),
# ("firmwareVersion", ctypes.c_ulong),
# ("notes", (48 * ctypes.c_char)),
# ("deviceDependantData", (12 * ctypes.c_byte)),
# ("hardwareVersion", ctypes.c_ushort),
# ("modificationState", ctypes.c_ushort),
# ("numChannels", ctypes.c_ushort)]
class TLI_PolarizerParameters(Structure):
_fields_ = [("Velocity", ctypes.c_ushort),
("HomePosition", ctypes.c_double),
("JogSize1", ctypes.c_double),
("JogSize2", ctypes.c_double),
("JogSize3", ctypes.c_double)]
#class SAFEARRAYBOUND(Strcuture):
# _fields_ = [("cElements" , ctypes.c_ulong),
# ("lLbound" , ctypes.c_long)]
#class SAFEARRAY(Strcuture):
# _fields_ = [("cDims", ctypes.c_ushort),
# ("fFeatures", ctypes.c_ushort),
# ("cbElements", ctypes.c_ulong),
# ("cLocks", ctypes.c_ulong),
# ("pvData", ctypes.c_void_p),
# ("rgsabound", SAFEARRAYBOUND * 1)]
class Driver():
def __init__(self, device_num, logger):
"""Instantiate driver class.
device_num is numbering of devices connected via USB. Drivrt then finds serial numbers of polarization paddle by Driver, e.g. b'38154354' """
# Instantiate log.
self.log = LogHandler(logger=logger)
#Loads polarization contorolles DLL and define arguments and result 5types for c function
self._polarizationdll = ctypes.cdll.LoadLibrary('Thorlabs.MotionControl.Polarizer.dll')
self._devmanagerdll = ctypes.cdll.LoadLibrary('Thorlabs.MotionControl.DeviceManager.dll')
self._configure_functions()
#get device list size
if self._polarizationdll.TLI_BuildDeviceList() == 0:
num_devs = self._polarizationdll.TLI_GetDeviceListSize()
#print(f"There are {num_devs} devices connected")
#Get devices serial numbers
serialNos = ctypes.create_string_buffer(100) #the way to have a mutable buffer
serialNosSize = ctypes.c_ulong(ctypes.sizeof(serialNos))
List = self._polarizationdll.TLI_GetDeviceListByTypeExt(serialNos, serialNosSize, 38)
#if List:
# print("Failed to get device list")
#else:
# print("Device list created succesfully") #change these massages to interact with logger
self.dev_name = serialNos.value.decode("utf-8") #.strip().split(',')
#print(f"Connected to device {self.dev_name}")
#get device info including serial number
self.device_info = TLI_DeviceInfo() # container for device info
self._polarizationdll.TLI_GetDeviceInfo(serialNos[(device_num - 1) * 9:(device_num * 9) - 1], ctypes.byref(self.device_info)) #when there will be a few devices figure out how to seperate and access each one
self.device = serialNos[(device_num - 1) * 9:(device_num * 9) - 1]
#print("Description: ", self.device_info.description)
#print("Serial No: ", self.device_info.serialNo)
#print("Motor Type: ", self.device_info.motorType)
#print("USB PID: ", self.device_info.PID)
#print("Max Number of Paddles: ", self.device_info.maxPaddles)
#establising connection to device
self.paddles = [paddle1, paddle3, paddle2]
connection = self._polarizationdll.MPC_Open(self.device)
if connection == 0:
self.log.info(f"Successfully connected to {self.device}.")
else:
self.log.error(f"Connection to {self.device} failed due to error {connection}.")
#technical methods
def _configure_functions(self):
""" Defines arguments and results for c functions """
self._polarizationdll.TLI_BuildDeviceList.argtype = None
self._polarizationdll.TLI_BuildDeviceList.restype = ctypes.c_short
self._polarizationdll.TLI_GetDeviceListSize.argtype = None
self._polarizationdll.TLI_GetDeviceListSize.restype = ctypes.c_short
self._polarizationdll.TLI_GetDeviceInfo.argtypes = [ctypes.POINTER(ctypes.c_char), ctypes.POINTER(TLI_DeviceInfo)]
self._polarizationdll.TLI_GetDeviceInfo.restype = ctypes.c_short
self._polarizationdll.TLI_GetDeviceListByTypeExt.argtypes = [ctypes.POINTER(ctypes.c_char), ctypes.c_ulong, ctypes.c_int]
self._polarizationdll.TLI_GetDeviceListByTypeExt.restype = ctypes.c_short
self._polarizationdll.MPC_Open.argtype = ctypes.POINTER(ctypes.c_char)
self._polarizationdll.MPC_Open.restype = ctypes.c_short
self._polarizationdll.MPC_Close.argtype = ctypes.POINTER(ctypes.c_char)
self._polarizationdll.MPC_Close.restype = ctypes.c_short
self._polarizationdll.MPC_CheckConnection.argtype = ctypes.c_char_p
self._polarizationdll.MPC_CheckConnection.restype = ctypes.c_bool
self._polarizationdll.MPC_GetPosition.argtypes = [ctypes.POINTER(ctypes.c_char), POL_Paddles]
self._polarizationdll.MPC_GetPosition.restype = ctypes.c_double
self._polarizationdll.MPC_RequestPolParams.argtype = ctypes.POINTER(ctypes.c_char)
self._polarizationdll.MPC_RequestPolParams.restype = ctypes.c_short
self._polarizationdll.MPC_GetPolParams.argtypes = [ctypes.POINTER(ctypes.c_char), ctypes.POINTER(TLI_PolarizerParameters)]
self._polarizationdll.MPC_GetPolParams.restype = ctypes.c_short
self._polarizationdll.MPC_SetPolParams.argtypes = [ctypes.POINTER(ctypes.c_char), ctypes.POINTER(TLI_PolarizerParameters)]
self._polarizationdll.MPC_SetPolParams.restype = ctypes.c_short
self._polarizationdll.MPC_SetJogSize.argtypes = [ctypes.POINTER(ctypes.c_char), POL_Paddles, ctypes.c_double]
self._polarizationdll.MPC_SetJogSize.restype = ctypes.c_short
self._polarizationdll.MPC_Jog.argtypes = [ctypes.POINTER(ctypes.c_char), POL_Paddles, MOT_TravelDirection]
self._polarizationdll.MPC_Jog.restype = ctypes.c_short
self._polarizationdll.MPC_GetMaxTravel.argtype = ctypes.POINTER(ctypes.c_char)
self._polarizationdll.MPC_GetMaxTravel.restype = ctypes.c_double
self._polarizationdll.MPC_MoveToPosition.argtypes = [ctypes.POINTER(ctypes.c_char), POL_Paddles, ctypes.c_double]
self._polarizationdll.MPC_MoveToPosition.restype = ctypes.c_short
self._polarizationdll.MPC_Stop.argtypes = [ctypes.POINTER(ctypes.c_char), POL_Paddles]
self._polarizationdll.MPC_Stop.restype = ctypes.c_short
self._polarizationdll.MPC_Home.argtypes = [ctypes.POINTER(ctypes.c_char), POL_Paddles]
self._polarizationdll.MPC_Home.restype = ctypes.c_short
self._polarizationdll.MPC_Jog.argtypes = [ctypes.POINTER(ctypes.c_char), ctypes.POINTER(TLI_PolarizerParameters), MOT_TravelDirection]
self._polarizationdll.MPC_Jog.restype = ctypes.c_short
self._polarizationdll.MPC_StartPolling.argtypes = [ctypes.POINTER(ctypes.c_char), ctypes.c_int]
self._polarizationdll.MPC_StartPolling.restype = ctypes.c_bool
self._polarizationdll.MPC_StopPolling.argtype = ctypes.POINTER(ctypes.c_char)
self._polarizationdll.MPC_StopPolling.restype = ctypes.c_void_p #did not find the a c_void with no pointer as needed
self._polarizationdll.MPC_SetVelocity.argtypes = [ctypes.POINTER(ctypes.c_char), ctypes.c_short]
self._polarizationdll.MPC_SetVelocity.restype = ctypes.c_short
self._polarizationdll.MPC_MoveRelative.argtypes = [ctypes.POINTER(ctypes.c_char), POL_Paddles, ctypes.c_double]
self._polarizationdll.MPC_MoveRelative.restype = ctypes.c_short
self._polarizationdll.MPC_GetStepsPerDegree.argtype = [ctypes.POINTER(ctypes.c_char)]
self._polarizationdll.MPC_GetStepsPerDegree.result = ctypes.c_double
#wrap function for external use
def open(self):
result = self._polarizationdll.MPC_Open(self.device)
if result == 0:
print("Connected succesfully to device")
else:
print("A problem occured when trying to connect to device")
def close(self):
resultc = self._polarizationdll.MPC_Close(self.device)
if resultc == 0:
print("Closed connection to device")
else:
print("A problem occured when trying to diconnect from device")
def home(self, paddle_num):
home_result = self._polarizationdll.MPC_Home(self.device, self.paddles[paddle_num])
return home_result
def set_velocity(self, velocity):
velocity = self._polarizationdll.MPC_SetVelocity(self.device, velocity)
def move(self, paddle_num, pos, sleep_time):
#posinitial = self._polarizationdll.MPC_GetPosition(self.device, self.paddles[paddle_num])
move_result = self._polarizationdll.MPC_MoveToPosition(self.device, self.paddles[paddle_num], pos)
time.sleep(abs(sleep_time * pos / 170))
#posfinal = self._polarizationdll.MPC_GetPosition(self.device, self.paddles[paddle_num])
return move_result #, posinitial, posfinal
def move_rel(self, paddle_num, step, sleep_time):
#posinitial = self._polarizationdll.MPC_GetPosition(self.device, self.paddles[paddle_num])
move_result = self._polarizationdll.MPC_MoveRelative(self.device, self.paddles[paddle_num], step)
time.sleep(abs(sleep_time * step / 170))
#posfinal = self._polarizationdll.MPC_GetPosition(self.device, self.paddles[paddle_num])
return move_result #, posinitial, posfinal
def get_angle(self, paddle_num):
currentpos = self._polarizationdll.MPC_GetPosition(self.device, self.paddles[paddle_num])
return currentpos
|
from abc import ABCMeta, abstractmethod
from ..utils.activations import *
class NetworkBase(metaclass=ABCMeta):
def __init__(self, sizes, activation, last_layer, **kwargs):
self.sizes = sizes
self.num_layers = len(sizes)
if activation.lower() == "sigmoid":
self.activation = Sigmoid()
# self.activation_derivative = sigmoid_derivative
elif activation.lower() == "relu":
self.activation = ReLU()
# self.activation_derivative = relu_derivative
elif activation.lower() == "tanh":
self.activation = Tanh()
elif activation.lower() == "softplus":
self.activation = Softplus()
elif activation.lower() == "leaky_relu" or "leakyrelu":
if "alpha" in kwargs:
self.activation = LeakyReLU(kwargs.get("alpha"))
else:
self.activation = LeakyReLU()
elif activation.lower() == "elu":
if "alpha" in kwargs:
self.activation = ELU(kwargs.get("alpha"))
else:
self.activation = ELU()
elif activation.lower() == "selu":
self.activation = Selu()
if last_layer.lower() == "softmax":
self.last_layer = Softmax()
@abstractmethod
def predict(self):
raise NotImplementedError
@abstractmethod
def backprop(self):
raise NotImplementedError
|
"""prawtools setup.py."""
import re
from codecs import open
from os import path
from setuptools import setup
PACKAGE_NAME = "prawtools"
HERE = path.abspath(path.dirname(__file__))
with open(path.join(HERE, "README.md"), encoding="utf-8") as fp:
README = fp.read()
with open(path.join(HERE, PACKAGE_NAME, "__init__.py"), encoding="utf-8") as fp:
VERSION = re.search('__version__ = "([^"]+)', fp.read()).group(1)
extras = {
"ci": ["coveralls"],
"lint": ["black", "flake8", "pydocstyle"],
"test": [
"betamax >=0.7.1, <0.8",
"betamax-serializers >=0.2.0, <0.3",
"mock ==1.0.1",
"pytest",
],
}
required = ["praw >=4.0.0, <7", "six >=1, <2"]
setup(
name=PACKAGE_NAME,
author="Bryce Boe",
author_email="bbzbryce@gmail.com",
classifiers=[
"Environment :: Console",
"Intended Audience :: Developers",
"License :: OSI Approved :: BSD License",
"Natural Language :: English",
"Operating System :: OS Independent",
"Programming Language :: Python :: 3",
"Programming Language :: Python :: 3.5",
"Programming Language :: Python :: 3.6",
"Programming Language :: Python :: 3.7",
"Programming Language :: Python :: 3.8",
"Topic :: Utilities",
],
description="A collection of utilities that utilize the reddit API.",
entry_points={
"console_scripts": [
"modutils = prawtools.mod:main",
"reddit_alert = prawtools.alert:main",
"subreddit_stats = prawtools.stats:main",
]
},
extras_require=extras,
install_requires=required,
keywords="reddit mod moderator subreddit statistics tools",
license="Simplified BSD License",
long_description=README,
packages=[PACKAGE_NAME],
url="https://github.com/praw-dev/prawtools",
version=VERSION,
)
|
#-*- coding: utf-8 -*-
"""
Created on Mon Dec 10 12:48:22 2018
@author: Aite Zhao
"""
from __future__ import print_function
#import random
import tensorflow as tf
#from tensorflow.python.ops import rnn, rnn_cell
import numpy as np
#import plot_confusion_matrix
import rnn_cell_GRU as rnn_cell
import rnn
from sklearn import preprocessing
from sklearn.cross_validation import train_test_split
from sklearn.neighbors import KNeighborsClassifier
import matplotlib.pyplot as plt
import os
from tensorflow.contrib.tensor_forest.python import tensor_forest
from tensorflow.python.ops import resources
#from EvoloPy import *
from sklearn.ensemble import VotingClassifier
from sklearn.metrics import accuracy_score
from hmmlearn import hmm
from sklearn.linear_model import BayesianRidge
from sklearn.metrics import mean_squared_error
from sklearn.learning_curve import validation_curve
from sklearn.svm import SVC
from hmmexm import hmm_4model_classification,hmm_3model_classification
from sklearn.model_selection import LeaveOneOut, KFold,cross_val_score
from deep_CCA_model import *
from linear_cca import linear_cca
n_classes = 52
def labelprocess(label,n_class=n_classes):
label_length=len(label)
label_matrix=np.zeros((label_length,n_class))
for i,j in enumerate(label):
label_matrix[i,int(j)]=1
return label_matrix
#
#def kfold_validation(data,label,n_splits=5):
# # K fold cross validation
# x_trains = []
# y_trains = []
# x_tests = []
# y_tests = []
# k_fold = KFold(n_splits)
# for train_index, test_index in k_fold.split(data):
# X_train, X_test = data[train_index], data[test_index]
# y_train, y_test = label[train_index], label[test_index]
# x_trains.append(X_train)
# y_trains.append(y_train)
# x_tests.append(X_test)
# y_tests.append(y_test)
# return x_trains,y_trains,x_tests,y_tests
#
def next_batch(batch_size,train_x,train_y,newli_train,force):
global batchid_force, batchid_time
if force==True:
if batchid_force+batch_size > len(train_x):
batchid_force = 0
batch_data = (train_x[batchid_force:min(batchid_force +batch_size, len(train_y)),:])
batch_labels = (newli_train[batchid_force:min(batchid_force + batch_size, len(newli_train)),:])
batch_labels_1d = (train_y[batchid_force:min(batchid_force + batch_size, len(train_y))])
batchid_force = min(batchid_force + batch_size, len(train_y))
return batch_data, batch_labels,batch_labels_1d
else:
if batchid_time+batch_size > len(train_x):
batchid_time = 0
batch_data = (train_x[batchid_time:min(batchid_time +batch_size, len(train_y)),:])
batch_labels = (newli_train[batchid_time:min(batchid_time + batch_size, len(newli_train)),:])
batch_labels_1d = (train_y[batchid_time:min(batchid_time + batch_size, len(train_y))])
batchid_time = min(batchid_time + batch_size, len(train_y))
return batch_data, batch_labels,batch_labels_1d
def RNN(x, weights, biases, n_input):
x = tf.transpose(x, [1, 0, 2])
# Reshaping to (n_steps*batch_size, n_input)
x = tf.reshape(tensor=x, shape=[-1, n_input])
# Split to get a list of 'n_steps' tensors of shape (batch_size, n_input)
x = tf.split(value=x, num_or_size_splits=n_steps, axis=0)
# Define a lstm cell with tensorflow
#lstm_cell = rnn_cell.BasicLSTMCell(n_hidden, forget_bias=1)
lstm_cell = rnn_cell.GRUCell(n_hidden)
#lstm_cell = rnn_cell.LSTMCell(n_hidden,use_peepholes=True)
# avoid overfitting
lstm_cell = rnn_cell.DropoutWrapper(lstm_cell, output_keep_prob=0.5)
# 2 layers lstm
# num_units = [256, 256]
# cells = [rnn_cell.GRUCell(num_units=n) for n in num_units]
# lstm_cell = rnn_cell.MultiRNNCell(cells)
lstm_cell = rnn_cell.MultiRNNCell([lstm_cell] * 2)
# Get lstm cell output
# print(x)
outputs, states = rnn.rnn(cell=lstm_cell, inputs=x, dtype=tf.float32)
return tf.matmul(outputs[-1], weights) + biases, outputs
def feature_connect(a_time,a_force):
a=np.array([])
for j in range(int(11340/15)):
f=np.array([])
for i in range(15):
f = np.concatenate((f, a_force[j*15+i,:]), axis=0) if f.size else a_force[j*15+i,:]
a=np.c_[a,f] if a.size else f
# np.savetxt('./feature_extract/fusionfeature_data.txt', np.c_[a_time,np.transpose(a)],fmt='%.4f')
return np.c_[a_time,np.transpose(a)],np.transpose(a)
def DCCA():
# LSTM CCA
outdim_size = 10
input_size1 = n_hidden
input_size2 = n_hidden
# input_size2 = 256
layer_sizes1 = [1024, 1024, 1024, outdim_size]
layer_sizes2 = [1024, 1024, 1024, outdim_size]
layer_sizes3 = [1024, 1024, 1024, n_classes]
layer_sizes4 = [1024, 1024, 1024, n_classes]
reg_par = 1e-4
use_all_singular_values = True
dcca_model = DeepCCA(layer_sizes1, layer_sizes2,layer_sizes3,layer_sizes4,
input_size1, input_size2,
outdim_size,
reg_par, use_all_singular_values)
return dcca_model
def softmax(x):
x_exp = np.exp(x)
x_sum = np.sum(x_exp, axis = 1, keepdims = True)
s = x_exp / x_sum
return s
if __name__=='__main__':
#remove cpu occupation
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '2'
#os.environ["CUDA_VISIBLE_DEVICES"] = ""
# load data
a_force=np.loadtxt("/home/zat/zresearch/ndds-corrlstm/data/sdugait/12023f.txt")
a_force=a_force[:,0:60]
b_force=np.loadtxt("/home/zat/zresearch/ndds-corrlstm/data/sdugait/12023label.txt")
b_force=b_force-1
a_time=np.loadtxt("/home/zat/zresearch/ndds-corrlstm/results/sdu/feature/out256_sdu_img.txt")
b_time=np.loadtxt("/home/zat/zresearch/ndds-corrlstm/data/sdugait/12023label.txt")
# a_time=a_time[:,270:330]
b_time=b_time-1
# a_time=preprocessing.normalize(a_time+1)
all_fea_force=labelprocess(b_force)
all_fea_time=labelprocess(b_time)
## train_test_split 20% testing
# train_x_time,test_x_time,train_y_time,test_y_time = train_test_split(a_time,b_time,test_size=0.2)
# train_x_force,test_x_force,train_y_force,test_y_force = train_test_split(a_force,b_force,test_size=0.2)
# print(train_x_time.shape,test_x_time.shape,train_x_force.shape,test_x_force.shape)
# newli_train_time=labelprocess(train_y_time)
# newli_test_time=labelprocess(test_y_time)
# newli_train_force=labelprocess(train_y_force)
# newli_test_force=labelprocess(test_y_force)
## 10 Fold cross validation
# x_trains_force,y_trains_force,x_tests_force,y_tests_force = kfold_validation(a_force,b_force)
# x_trains_time,y_trains_time,x_tests_time,y_tests_time = kfold_validation(a_time,b_time)
# Parameters
learning_rate = 0.001
training_iters_force = 5000000
# training_iters_time = 500000
batch_size = 256
display_step = 100
batchid_time = 0
batchid_force = 0
# Network Parameters
n_input_force = 15
n_input_time = 32
n_steps = 4
n_hidden = 128
# reset graph
tf.reset_default_graph()
# force_channel Graph
G_force=tf.Graph()
Sess_force=tf.Session(graph=G_force)
with Sess_force.as_default():
with G_force.as_default():
with tf.variable_scope("force_channel") as scope:
x_force = tf.placeholder("float", [None, n_steps, n_input_force],name='x_force')
y_force = tf.placeholder("float", [None, n_classes])
weights = {
'weights_out_force': tf.Variable(tf.random_normal([n_hidden, n_classes]),name='weights_out_force')
}
biases= {
'biases_out_force': tf.Variable(tf.random_normal([n_classes]),name='biases_out_force')
}
pred_force, out_force = RNN(x_force, weights['weights_out_force'], biases['biases_out_force'], n_input_force)
logits_scaled_force=tf.nn.softmax(pred_force)
cost_force = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits_v2(logits=pred_force, labels=y_force))
optimizer_force = tf.train.AdamOptimizer(learning_rate=learning_rate).minimize(cost_force)
correct_pred_force = tf.equal(tf.argmax(pred_force,1), tf.argmax(y_force,1))
accuracy_force = tf.reduce_mean(tf.cast(correct_pred_force, tf.float32))
Sess_force.run(tf.global_variables_initializer())
saverf = tf.train.Saver()
# time_channel Graph
G_time=tf.Graph()
Sess_time=tf.Session(graph=G_time)
with Sess_time.as_default():
with G_time.as_default():
with tf.variable_scope("time_channel") as scope:
x_time = tf.placeholder("float", [None, n_steps, n_input_time],name='x_time')
y_time = tf.placeholder("float", [None, n_classes])
weights = {
'weights_out_time': tf.Variable(tf.random_normal([n_hidden, n_classes]),name='weights_out_time'),
}
biases= {
'biases_out_time': tf.Variable(tf.random_normal([n_classes]),name='biases_out_time'),
}
pred_time, out_time = RNN(x_time, weights['weights_out_time'], biases['biases_out_time'], n_input_time)
logits_scaled_time=tf.nn.softmax(pred_time)
cost_time = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits_v2(logits=pred_time, labels=y_time))
optimizer_time = tf.train.AdamOptimizer(learning_rate=learning_rate).minimize(cost_time)
correct_pred_time = tf.equal(tf.argmax(pred_time,1), tf.argmax(y_time,1))
accuracy_time = tf.reduce_mean(tf.cast(correct_pred_time, tf.float32))
Sess_time.run(tf.global_variables_initializer())
savert = tf.train.Saver()
# dcca_model Graph
G_dcca=tf.Graph()
Sess_dcca=tf.Session(graph=G_dcca)
with Sess_dcca.as_default():
with G_dcca.as_default():
dcca_model=DCCA()
input_view1 = dcca_model.input_view1
input_view2 = dcca_model.input_view2
hidden_view1 = dcca_model.output_view1
hidden_view2 = dcca_model.output_view2
hidden_view1_pred = dcca_model.output_view1_class
hidden_view2_pred = dcca_model.output_view2_class
label1 = dcca_model.label1
label2 = dcca_model.label2
neg_corr = dcca_model.neg_corr
value= dcca_model.value
# gpu_options = tf.GPUOptions(per_process_gpu_memory_fraction=0.333)
# Sess_dcca = tf.InteractiveSession(config=tf.ConfigProto(gpu_options=gpu_options))
# maxmize the correlation between two data unsupervised learning(minimize -corr)
# train_op = tf.train.MomentumOptimizer(learning_rate, 0.99).minimize(neg_corr,var_list=tf.trainable_variables())
train_op = tf.train.AdamOptimizer(learning_rate).minimize(neg_corr,var_list=tf.trainable_variables())
# minimize the cost between different classes supervised learning
cross_entropy1 = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits_v2(labels=label1, logits=hidden_view1_pred))
optimizer1 = tf.train.AdamOptimizer(learning_rate=learning_rate).minimize(cross_entropy1)
accuracy1 = tf.reduce_mean(tf.cast(tf.equal(tf.argmax(hidden_view1_pred, 1), tf.argmax(label1, 1)), tf.float32))
cross_entropy2 = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits_v2(labels=label2, logits=hidden_view2_pred))
optimizer2 = tf.train.AdamOptimizer(learning_rate=learning_rate).minimize(cross_entropy2)
accuracy2 = tf.reduce_mean(tf.cast(tf.equal(tf.argmax(hidden_view2_pred, 1), tf.argmax(label2, 1)), tf.float32))
lossfuse=cross_entropy1+cross_entropy2+tf.exp(neg_corr)
optimizerfuse=tf.train.AdamOptimizer(learning_rate=learning_rate).minimize(lossfuse)
## supervised learning
# cross_entropy1 = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits_v2(labels=cnnlabel1, logits=hidden_view1))
# optimizer1 = tf.train.AdamOptimizer(learning_rate=learning_rate).minimize(cross_entropy1)
# cnnaccuracy1 = tf.reduce_mean(tf.cast(tf.equal(tf.argmax(hidden_view1, 1), tf.argmax(cnnlabel1, 1)), tf.float32))
#
# cross_entropy2 = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits_v2(labels=cnnlabel2, logits=hidden_view2))
# optimizer2 = tf.train.AdamOptimizer(learning_rate=learning_rate).minimize(cross_entropy2)
# cnnaccuracy2 = tf.reduce_mean(tf.cast(tf.equal(tf.argmax(hidden_view2, 1), tf.argmax(cnnlabel2, 1)), tf.float32))
Sess_dcca.run(tf.global_variables_initializer())
saverd = tf.train.Saver()
# tf.InteractiveSession.close()
# weights = {
# 'weights_out_time': tf.Variable(tf.random_normal([n_hidden, n_classes]),name='weights_out_time'),
# 'weights_out_force': tf.Variable(tf.random_normal([n_hidden, n_classes]),name='weights_out_force')
# }
# biases= {
# 'biases_out_time': tf.Variable(tf.random_normal([n_classes]),name='biases_out_time'),
# 'biases_out_force': tf.Variable(tf.random_normal([n_classes]),name='biases_out_force')
# }
# weights = {
# 'out': tf.Variable(tf.random_normal([n_hidden, n_classes]))
# }
# biases= {
# 'out': tf.Variable(tf.random_normal([n_classes]))
# }
#
# with tf.variable_scope("force_channel") as scope:
# pred_force, out_force = RNN(x_force, weights['weights_out_force'], biases['biases_out_force'], n_input_force)
# pred_force, out_force = RNN(x_force, weights['out'], biases['out'], n_input_force)
# logits_scaled_force=tf.nn.softmax(pred_force)
# cost_force = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits_v2(logits=pred_force, labels=y_force))
# optimizer_force = tf.train.AdamOptimizer(learning_rate=learning_rate).minimize(cost_force)
# correct_pred_force = tf.equal(tf.argmax(pred_force,1), tf.argmax(y_force,1))
# accuracy_force = tf.reduce_mean(tf.cast(correct_pred_force, tf.float32))
#
# with tf.variable_scope("time_channel") as scope:
## pred_time, out_time = RNN(x_time, weights['weights_out_time'], biases['biases_out_time'], n_input_time)
# pred_time, out_time = RNN(x_time, weights['out'], biases['out'], n_input_time)
# logits_scaled_time=tf.nn.softmax(pred_time)
# cost_time = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits_v2(logits=pred_time, labels=y_time))
# optimizer_time = tf.train.AdamOptimizer(learning_rate=learning_rate).minimize(cost_time)
# correct_pred_time = tf.equal(tf.argmax(pred_time,1), tf.argmax(y_time,1))
# accuracy_time = tf.reduce_mean(tf.cast(correct_pred_time, tf.float32))
accuracys_force=[]
accuracys_time=[]
for i in range(1):
#20% split
train_x_time,test_x_time,train_y_time,test_y_time = train_test_split(a_time,b_time,test_size=0.2,random_state=1)
train_x_force,test_x_force,train_y_force,test_y_force = train_test_split(a_force,b_force,test_size=0.2,random_state=1)
print(train_x_time.shape,test_x_time.shape,train_x_force.shape,test_x_force.shape)
newli_train_time=labelprocess(train_y_time)
newli_test_time=labelprocess(test_y_time)
newli_train_force=labelprocess(train_y_force)
newli_test_force=labelprocess(test_y_force)
#10 fold
# train_x_force=x_trains_force[i]
# train_y_force=y_trains_force[i]
# test_x_force=x_tests_force[i]
# test_y_force=y_tests_force[i]
#
# train_x_time=x_trains_time[i]
# train_y_time=y_trains_time[i]
# test_x_time=x_tests_time[i]
# test_y_time=y_tests_time[i]
#
# newli_train_force=labelprocess(train_y_force)
# newli_train_time=labelprocess(train_y_time)
#
# newli_test_force=labelprocess(test_y_force)
# newli_test_time=labelprocess(test_y_time)
# Initializing the variables
# init = tf.global_variables_initializer()
# saver = tf.train.Saver()
# Launch the graph
# with tf.Session() as sess:
# #rnn
# sess.run(init)
# #rf
# # sess.run(rf_init_vars)
# tf.device('/gpu:0')
step = 1
acc_forces=[]
loss_forces=[]
acc_times=[]
loss_times=[]
dccaloss=[]
fuseloss=[]
out_force256=None
out_time256=None
tf.device('/gpu:0')
while step * batch_size < training_iters_force:
with tf.variable_scope("force_channel") as scope:
rf_batch_x_force, batch_y_force, rf_batch_y_force= next_batch(batch_size,train_x_force,train_y_force,newli_train_force,True)
batch_x_force = rf_batch_x_force.reshape((batch_size, n_steps, n_input_force))
_,out_force256=Sess_force.run([optimizer_force,out_force],
feed_dict={x_force: batch_x_force, y_force: batch_y_force})
if step % display_step == 0:
acc_force,loss_force= Sess_force.run([accuracy_force,cost_force],
feed_dict={x_force: batch_x_force, y_force: batch_y_force})
print("Iter " + str(step*batch_size) + ", Minibatch loss_force= " + \
"{:.6f}".format(loss_force) + ", Training Accuracy= " + \
"{:.5f}".format(acc_force))
acc_forces.append(acc_force)
loss_forces.append(loss_force)
# step += 1
# step = 1
# while step * batch_size < training_iters_time:
with tf.variable_scope("time_channel") as scope:
rf_batch_x_time, batch_y_time, rf_batch_y_time= next_batch(batch_size,train_x_time,train_y_time,newli_train_time,False)
batch_x_time = rf_batch_x_time.reshape((batch_size, n_steps, n_input_time))
_,out_time256=Sess_time.run([optimizer_time,out_time],
feed_dict={x_time: batch_x_time, y_time: batch_y_time})
if step % display_step == 0:
acc_time,loss_time = Sess_time.run([accuracy_time,cost_time],
feed_dict={x_time: batch_x_time, y_time: batch_y_time})
print("Iter " + str(step*batch_size) + ", Minibatch loss_time= " + \
"{:.6f}".format(loss_time) + ", Training Accuracy= " + \
"{:.5f}".format(acc_time))
acc_times.append(acc_time)
loss_times.append(loss_time)
################# Deep CCA maxmize the correlation #############################
# correlation in each node
# for force256,time256 in zip(out_force256,out_time256):
# _, neg_corr_val,_,_= Sess_dcca.run([train_op, neg_corr,optimizer1,optimizer2],
# feed_dict={input_view1:force256,input_view2:time256,
# label1:batch_y_force,
# label2:batch_y_time})
# acc1,acc2 = Sess_dcca.run([accuracy1, accuracy2],
# feed_dict={input_view1:force256,input_view2:time256,
# label1:batch_y_force,
# label2:batch_y_time})
for force256,time256 in zip(out_force256,out_time256):
# print(force256.shape,time256.shape)
_, neg_corr_val,_,lossfuseprint,corvalue= Sess_dcca.run([train_op, neg_corr,optimizerfuse,lossfuse,value],
feed_dict={input_view1:force256,input_view2:time256,
label1:batch_y_force,
label2:batch_y_time})
# acc1,acc2 = Sess_dcca.run([accuracy1, accuracy2],
# feed_dict={input_view1:force256,input_view2:time256,
# label1:batch_y_force,
# label2:batch_y_time})
# print(corvalue)
if step % display_step == 0:
dccaloss.append(np.exp(neg_corr_val))
fuseloss.append(lossfuseprint)
# print('corr_val',-neg_corr_val)
# print("fuse_loss_for_train:", lossfuseprint)
# print("accuracy1:", acc1)
# print("accuracy2:", acc2)
step += 1
# save the training process
# np.savetxt('./results/train_loss_dcca'+str(i)+'.csv',dccaloss,delimiter=',')
# np.savetxt('./results/train_loss_fuse'+str(i)+'.csv',fuseloss,delimiter=',')
#
#
# np.savetxt('./results/train_acc_force'+str(i)+'.csv',acc_forces,delimiter=',')
# np.savetxt('./results/train_loss_force'+str(i)+'.csv',loss_forces,delimiter=',')
# np.savetxt('./results/train_acc_time'+str(i)+'.csv',acc_times,delimiter=',')
# np.savetxt('./results/train_loss_time'+str(i)+'.csv',loss_times,delimiter=',')
################# Linear CCA #############################
# Using CCA to extract feature in each node in LSTM
data_time=a_time.reshape((-1,n_steps, n_input_time))
out256_time=Sess_time.run(out_time,feed_dict={x_time: data_time, y_time: all_fea_time})
data_force=a_force.reshape((-1,n_steps, n_input_force))
out256_force=Sess_force.run(out_force,feed_dict={x_force: data_force, y_force: all_fea_force})
fusionfeature_data=np.c_[out256_time[-1],out256_force[-1]]
np.savetxt('./fusionfeature_Corrmnn_sdu.csv', fusionfeature_data, fmt='%.4f')
# compute the correlation in each node in LSTM (timestep* batchsize * 256d)
X1projlist=np.array([])
X2projlist=np.array([])
for eachnode_force,eachnode_time in zip(out256_force,out256_time):
X1proj, X2proj = Sess_dcca.run([hidden_view1, hidden_view2],
feed_dict={
input_view1: eachnode_force,
input_view2: eachnode_time})
# (11340, 10) (756, 10)
X1projlist=np.c_[X1projlist,X1proj] if X1projlist.size else X1proj
X2projlist=np.c_[X2projlist,X2proj] if X2projlist.size else X2proj
# ccafuse_data,_ = feature_connect(X2projlist,X1projlist)
ccafuse_data=np.c_[X2projlist,X1projlist]
print('----------ccafuse_data '+str(i)+'-----------')
# (756, 1600) (756, 1600)
np.savetxt('./ccafuse_sdu.csv', ccafuse_data, fmt='%.4f')
# print("Linear CCA started!")
# w = [None, None]
# m = [None, None]
# print(X1proj.shape, X2proj.shape)
# w[0], w[1], m[0], m[1] = linear_cca(X1proj, X2proj, 10)
# print("Linear CCA ended!")
# X1proj -= m[0].reshape([1, -1]).repeat(len(X1proj), axis=0)
# X1proj = np.dot(X1proj, w[0])
# X1projlist=np.c_[X1projlist,X1proj] if X1projlist.size else X1proj
# print(X1projlist.shape)
################# testing LSTM #############################
test_data=test_x_force.reshape((-1,n_steps, n_input_force))
test_label=newli_test_force
accuracy_force_out=Sess_force.run(accuracy_force, feed_dict={x_force: test_data, y_force: test_label})
print("Force Testing Accuracy:",accuracy_force_out)
test_data=test_x_time.reshape((-1,n_steps, n_input_time))
test_label=newli_test_time
accuracy_time_out=Sess_time.run(accuracy_time, feed_dict={x_time: test_data, y_time: test_label})
print("Time Testing Accuracy:",accuracy_time_out)
accuracys_force.append(accuracy_force_out)
accuracys_time.append(accuracy_time_out)
print(accuracys_force,accuracys_time)
print('accuracys_force_mean:',np.mean(accuracys_force))
print('accuracys_time_mean:',np.mean(accuracys_time))
accuracys_force.append(np.mean(accuracys_force))
accuracys_time.append(np.mean(accuracys_time))
# np.savetxt('./test_result_fog.csv',[accuracys_force,accuracys_time])
## extract the last output of the lstm in all data
# data_time=a_time.reshape((-1,n_steps, n_input_time))
# out256_time=Sess_time.run(out_time,feed_dict={x_time: data_time, y_time: all_fea_time})
#
# data_force=a_force.reshape((-1,n_steps, n_input_force))
# out256_force=Sess_force.run(out_force,feed_dict={x_force: data_force, y_force: all_fea_force})
#
# np.savetxt('./out256_time.txt', out256_time, fmt='%.4f')
# np.savetxt('./out256_force.txt', out256_force, fmt='%.4f')
#
# saver.save(sess, './modelcache/fusemodel.ckpt')
# writer=tf.summary.FileWriter('./fusemodel_graph',sess.graph)
# writer.flush()
# writer.close()
# sess.close()
# saverf.save(Sess_force, './modelcache/forcemodel.ckpt')
# writerf=tf.summary.FileWriter('./graphs/forcemodel_graph',Sess_force.graph)
# savert.save(Sess_time, './modelcache/timemodel.ckpt')
# writert=tf.summary.FileWriter('./graphs/timemodel_graph',Sess_time.graph)
# saverd.save(Sess_dcca, './modelcache/dccamodel.ckpt')
# writerd=tf.summary.FileWriter('./graphs/dccamodel_graph',Sess_dcca.graph)
# writerf.flush()
# writerf.close()
# Sess_force.close()
# writert.flush()
# writert.close()
# Sess_time.close()
# writerd.flush()
# writerd.close()
# Sess_dcca.close()
# align the two types of data
# fusionfeature_data,force_data = feature_connect(out256_time,out256_force)
# fusionfeature_data=np.c_[out256_time[-1],out256_force[-1]]
# np.savetxt('./fusionfeature_Corrmnn.txt', fusionfeature_data, fmt='%.4f')
# hmm_accuracy = hmm_4model_classification(fusionfeature_data,b_time)
# combine the lda feature(2d) with ccafuse_data
# ldafeature=np.loadtxt('./feature_extract/ldafeature_data.txt')
# ldafeature=softmax(ldafeature)
# ldafeature=preprocessing.normalize(ldafeature)
# print(ldafeature)
# ccafuse_data=np.c_[ccafuse_data,ldafeature]
#
# hmm_accuracy = hmm_4model_classification(ccafuse_data,b_time)
# print('Total hmm accuracy:',hmm_accuracy)
# fuse_data=np.loadtxt('/home/zat/zresearch/ndds-corrlstm/results/fog/fusefea.csv')
#
# hmm_accuracy = hmm_3model_classification(fuse_data,b_time)
# print('Total hmm accuracy:',hmm_accuracy)
|
import datetime
import shutil
import services.inventory
import workflow
import pandas as pd
import os
import file_system
import file_system.images as images
import json
from file_system.file_system_object import FileSystemObject
from services import inventory, library
from tabulate import tabulate
import cv2
TEMP_FOLDER = "tmp/eval"
RECYCLE_BIN = "tmp/recycle_bin"
def inventory_menu():
library_id = prompt_for_library()
while True:
print("\n")
print("###############################################")
print("Digital Library Utility - Inventory Management ")
print("###############################################")
print("[0] Return to Main Menu")
print("[1] Add/Update (Refresh) Inventory")
print("[3] View Inventory")
print("[4] Reconcile (Library) Inventory")
print("[5] Update Inventory Compare Scores")
print("[6] Manage Duplicate Inventory")
print("[7] Restore files from Recycle Bin")
print("[8] Classify Inventory")
choice = input("> ")
if choice.isnumeric() and int(choice) in range(10):
if int(choice) == 0:
workflow.main_menu()
elif int(choice) == 1: # add/update inventory
refresh_inventory(library_id=library_id)
reconcile_inventory(library_id=library_id, calculate_compare_score=False)
elif int(choice) == 3: # view inventory by library
display_library_inventory(library_id)
elif int(choice) == 4: # reconcile inventory
reconcile_inventory(library_id=library_id, calculate_compare_score=False)
elif int(choice) == 5: # reconcile inventory with compare score calculation
reconcile_inventory(library_id=library_id, calculate_compare_score=True)
elif int(choice) == 6: # manage duplicate inventory
refresh_inventory(library_id=library_id)
reconcile_inventory(library_id=library_id, calculate_compare_score=True)
get_comparable_inventory(library_id=library_id)
move_files_to_recycle_bin(library_id=library_id)
clear_eval_folder(TEMP_FOLDER)
refresh_inventory(library_id=library_id)
elif int(choice) == 7:
restore_from_recycle_bin()
reconcile_inventory(library_id=library_id, calculate_compare_score=False)
elif int(choice) == 8:
display_library_inventory(library_id)
update_classification()
else:
print("Selection not valid. Please try again.")
def refresh_inventory(library_id):
src = get_library_base_path(library_id)
exclusion_list = ['.map', 'venv', '.pyc', '__pycache__', '.DS_Store', 'ignore', '.idea', 'git']
restricted_list = []
data = file_system.search(search_path=src,
recursive=True,
exclusion_list=exclusion_list,
restricted_list=restricted_list)
for idx, item in enumerate(data):
data[idx]['library_id'] = library_id
if not data[idx]['is_hidden']:
inventory.refresh_inventory(**data[idx])
def prompt_for_library():
workflow.workflow_library.display_user_libraries()
prompt = input("Select Library ID: ")
if lib := services.library.get_library(prompt):
return lib.library_id
print(f"{prompt} is not a valid Library ID")
prompt_for_library()
def get_library_base_path(library_id):
lib = library.get_library(library_id)
return lib.base_path
def update_inventory_compare_scores(inventory_id, full_path):
fso = FileSystemObject(full_path).to_dict()
if fso and fso['is_found'] and not fso['is_hidden']:
fso['inventory_removed_date'] = None
inv = inventory.get_inventory_item(inventory_id)
if not inv.compare_score or inv.compare_score == 0 or inv.compare_score_dt < inv.modified_dt:
fso['compare_score'] = (update_compare_score(full_path, size=fso['size']))
fso['compare_score_dt'] = datetime.datetime.now()
inventory.update_inventory(inventory_id, **fso)
else:
data = {'inventory_removed_date': datetime.datetime.now()}
inventory.update_inventory(inventory_id, **data)
def update_compare_score(full_path, size):
return images.calculate_compare_score(full_path, size=size)
def get_inventory(library_id):
return inventory.get_library_inventory(library_id=library_id)
def display_all_inventory():
results = inventory.get_all_inventory()
df = pd.DataFrame(results)
# df = df.drop(['_sa_instance_state'], axis=1)
df.sort_values(by=['library_id', 'directory', 'full_path'])
print(tabulate(df.head(500), headers='keys', tablefmt='psql'))
def display_library_inventory(library_id):
if results := inventory.get_library_inventory(library_id):
df = pd.DataFrame(results)
# df = df.drop(['_sa_instance_state'], axis=1)
df.sort_values(by=['library_id', 'directory', 'full_path'])
print(tabulate(df.head(500), headers='keys', tablefmt='psql'))
else:
return None
def reconcile_inventory(library_id, calculate_compare_score: bool = False):
# Purpose: Identify files/folders that no longer exist and update DB accordingly
# library_id = prompt_for_library()
results = inventory.get_library_inventory(library_id)
for idx, item in enumerate(results):
if results[idx]['file']:
src_path = results[idx]['full_path']
inventory_id = results[idx]['inventory_id']
fso = FileSystemObject(src_path).to_dict()
if fso and fso['is_found'] and not fso['is_hidden']:
data = {
'inventory_removed_date': None,
'inventory_removed_reason': None,
'is_missing': False
}
else:
data = {'inventory_removed_date': datetime.datetime.now(),
'is_missing': True
}
inventory.update_inventory(inventory_id, **data)
if calculate_compare_score:
update_inventory_compare_scores(inventory_id, src_path)
def restore_from_recycle_bin():
path = RECYCLE_BIN
for root, folders, files in os.walk(path, topdown=True):
for file in files:
recycled_file = os.path.splitext(file)[0]
src = os.path.join(root, file)
original_file = services.inventory.get_inventory_item(recycled_file)
dest = original_file.full_path
shutil.move(src, dest)
def get_comparable_inventory(library_id):
try:
if data := inventory.get_comparable_inventory(library_id):
df = pd.DataFrame(data)
# df = df.drop(['_sa_instance_state'], axis=1)
df["file"] = df["file"].str.lower()
df['compare_score_frequency'] = df.groupby('compare_score')['compare_score'].transform('count')
df = df[df.groupby('compare_score')['compare_score'].transform('count') > 1]
df = df[['inventory_id', 'library_id', 'directory', 'full_path', 'file', 'file_extension',
'size', 'created_dt', 'modified_dt',
'compare_score_dt', 'compare_score', 'compare_score_frequency']]
# df.sort_values(by=['compare_score', 'size'])
# print(tabulate(df, headers='keys', tablefmt='psql'))
group_duplicates(df)
clear_eval_folder(TEMP_FOLDER)
else:
print("No duplicates were found.")
except:
print("An unexpected error has occurred")
def group_duplicates(df: pd.DataFrame):
distinct_scores = list(df['compare_score'].unique())
count = len(distinct_scores)
for counter, score in enumerate(distinct_scores, 1):
sample = df[df["compare_score"] == score]
sample = pd.DataFrame(sample, columns=['inventory_id', 'file', 'file_extension', 'full_path', 'directory',
'size', 'created_dt', 'modified_dt'])
sample.reset_index(drop=True, inplace=True)
print("###############################################")
print(f"Potential Duplicate Group {counter} of {count}")
print(f"Compare Score: {score}")
print("###############################################")
evaluate_duplicates_by_group(sample)
def evaluate_duplicates_by_group(sample: pd.DataFrame):
clear_eval_folder(path=TEMP_FOLDER)
group = []
# print(tabulate(sample.head(), headers='keys', tablefmt='psql'))
for idx, row in sample.iterrows():
group.append(row['inventory_id'])
inventory_id = row['inventory_id']
created = row['created_dt']
modified = row['modified_dt']
size = row['size']
src = row['full_path']
dest = f'{TEMP_FOLDER}/' + inventory_id + row['file_extension']
print(f"InventoryID: {inventory_id} | File: {row['file']} | Created: {created} | "
f"Modified: {modified} | Size: {size}")
shutil.copy2(src, dest)
if retain := input("Enter Inventory IDs you wish to keep (separate by comma): ").split(","):
for idx, item in enumerate(retain):
retain[idx] = item.strip()
for inv_id in group:
if inv_id not in retain:
reason = input(f"Enter reason for removal of {inv_id}: ")
services.inventory.remove_inventory_item(inv_id.strip(), reason.strip())
def move_files_to_recycle_bin(library_id):
reconcile_inventory(library_id, calculate_compare_score=False)
if data := inventory.get_removed_inventory(library_id):
for idx, item in enumerate(data):
src = data[idx]['full_path']
inventory_id = data[idx]['inventory_id']
file_extension = data[idx]['file_extension']
dest = f'{RECYCLE_BIN}/' + inventory_id + file_extension
try:
shutil.move(src, dest)
except FileNotFoundError:
print("A FileNotFound error has occurred.")
def remove_inventory(group: list, retain: list):
for idx, item in enumerate(retain):
retain[idx] = item.strip()
for inv_id in group:
if inv_id not in retain:
reason = input(f"Enter reason for removal of {inv_id}: ")
services.inventory.remove_inventory_item(inv_id.strip(), reason.strip())
def clear_eval_folder(path: str):
mypath = path
for root, dirs, files in os.walk(mypath):
for file in files:
os.remove(os.path.join(root, file))
def select_inventory_item():
return input("Input Inventory ID: ")
def get_inventory_item(inventory_id):
return services.inventory.get_inventory_item(inventory_id=inventory_id)
def update_classification(library_id, incl_assignment: bool = False):
inv = workflow.workflow_inventory.get_inventory(library_id=library_id)
try:
for file in inv:
inventory_id = file['inventory_id']
if file['is_image']:
# inv = services.inventory.get_inventory_item(inventory_id=inventory_id).to_dict()
cv2.imshow(file['file'], cv2.imread(file['full_path']))
cv2.waitKey(1)
if file['classification']:
print(f"Current Tags: {file['classification']['tags']}")
tag_values = [item.strip() for item in input("Input Tags (separated by comma): ").split(',')]
data = {
'inventory_id': inventory_id,
'classification': {'tags': tag_values},
'model_assignment': input("Model Assignment Name: ") if incl_assignment else file['model_assignment']
}
services.inventory.update_inventory_classification(**data)
cv2.destroyAllWindows()
cv2.destroyAllWindows()
except:
raise
def update_classification_from_model(inventory_id, tags: str):
file = workflow.workflow_inventory.get_inventory_item(inventory_id).to_dict()
classification = file['classification']['tags'] if file['classification'] else []
classification.append(tags)
classification = list(set(classification))
data = {
'inventory_id': inventory_id,
'classification': {'tags': classification}
}
services.inventory.update_inventory_classification(**data)
# for image in inv:
# inventory_id = image['inventory_id']
#
# try:
# if inv := services.inventory.get_inventory_item(inventory_id=inventory_id).to_dict():
# cv2.imshow(image['file'], image['full_path'])
# # cv2.imwrite("tests/samples/ml/test/output.jpg", image)
# cv2.waitKey(0)
# # cv2.destroyAllWindows()
# if inv['classification']:
# print(f"Current Tags: {inv['classification']['tags']}")
#
# tag_values = [item.strip() for item in input("Input Tags (separated by comma): ").split(',')]
# data = {
# 'inventory_id': inventory_id,
# 'classification': {'tags': tag_values},
# 'model_assignment': input("Model Assignment Name: ") if incl_assignment else inv['model_assignment']
# }
# services.inventory.update_inventory_classification(**data)
#
# cv2.destroyAllWindows()
# except:
# raise
#5351dd023ef1440393b81ec0acbe2f4a
|
from __future__ import absolute_import
from __future__ import print_function
import veriloggen
import thread_slice
expected_verilog = """
module test;
reg CLK;
reg RST;
wire [8-1:0] LED;
blinkled
uut
(
.CLK(CLK),
.RST(RST),
.LED(LED)
);
initial begin
$dumpfile("uut.vcd");
$dumpvars(0, uut);
end
initial begin
CLK = 0;
forever begin
#5 CLK = !CLK;
end
end
initial begin
RST = 0;
#100;
RST = 1;
#100;
RST = 0;
#10000;
$finish;
end
endmodule
module blinkled
(
input CLK,
input RST,
output reg [8-1:0] LED
);
reg [8-1:0] count;
reg [32-1:0] th_blink;
localparam th_blink_init = 0;
reg signed [32-1:0] _th_blink_times_0;
reg signed [32-1:0] _th_blink_i_1;
reg signed [32-1:0] _th_blink_x_2;
localparam th_blink_1 = 1;
localparam th_blink_2 = 2;
localparam th_blink_3 = 3;
localparam th_blink_4 = 4;
localparam th_blink_5 = 5;
localparam th_blink_6 = 6;
localparam th_blink_7 = 7;
localparam th_blink_8 = 8;
localparam th_blink_9 = 9;
localparam th_blink_10 = 10;
localparam th_blink_11 = 11;
localparam th_blink_12 = 12;
localparam th_blink_13 = 13;
localparam th_blink_14 = 14;
localparam th_blink_15 = 15;
localparam th_blink_16 = 16;
localparam th_blink_17 = 17;
localparam th_blink_18 = 18;
localparam th_blink_19 = 19;
localparam th_blink_20 = 20;
localparam th_blink_21 = 21;
localparam th_blink_22 = 22;
localparam th_blink_23 = 23;
localparam th_blink_24 = 24;
localparam th_blink_25 = 25;
localparam th_blink_26 = 26;
localparam th_blink_27 = 27;
localparam th_blink_28 = 28;
always @(posedge CLK) begin
if(RST) begin
th_blink <= th_blink_init;
_th_blink_times_0 <= 0;
LED <= 0;
count <= 0;
_th_blink_i_1 <= 0;
_th_blink_x_2 <= 0;
LED[_th_blink_x_2] <= (0 >> _th_blink_x_2) & 1'd1;
end else begin
case(th_blink)
th_blink_init: begin
_th_blink_times_0 <= 10;
th_blink <= th_blink_1;
end
th_blink_1: begin
LED <= 0;
th_blink <= th_blink_2;
end
th_blink_2: begin
count <= 0;
th_blink <= th_blink_3;
end
th_blink_3: begin
_th_blink_i_1 <= 0;
th_blink <= th_blink_4;
end
th_blink_4: begin
if(_th_blink_i_1 < _th_blink_times_0) begin
th_blink <= th_blink_5;
end else begin
th_blink <= th_blink_12;
end
end
th_blink_5: begin
_th_blink_x_2 <= 0;
th_blink <= th_blink_6;
end
th_blink_6: begin
if(_th_blink_x_2 < 8) begin
th_blink <= th_blink_7;
end else begin
th_blink <= th_blink_9;
end
end
th_blink_7: begin
LED[_th_blink_x_2] <= count[_th_blink_x_2];
th_blink <= th_blink_8;
end
th_blink_8: begin
_th_blink_x_2 <= _th_blink_x_2 + 1;
th_blink <= th_blink_6;
end
th_blink_9: begin
$display("led = %d", LED);
th_blink <= th_blink_10;
end
th_blink_10: begin
count <= count + 1;
th_blink <= th_blink_11;
end
th_blink_11: begin
_th_blink_i_1 <= _th_blink_i_1 + 1;
th_blink <= th_blink_4;
end
th_blink_12: begin
LED <= 0;
th_blink <= th_blink_13;
end
th_blink_13: begin
count <= 0;
th_blink <= th_blink_14;
end
th_blink_14: begin
_th_blink_i_1 <= 0;
th_blink <= th_blink_15;
end
th_blink_15: begin
if(_th_blink_i_1 < _th_blink_times_0) begin
th_blink <= th_blink_16;
end else begin
th_blink <= th_blink_20;
end
end
th_blink_16: begin
LED <= count[1:0];
th_blink <= th_blink_17;
end
th_blink_17: begin
$display("led = %d", LED);
th_blink <= th_blink_18;
end
th_blink_18: begin
count <= count + 1;
th_blink <= th_blink_19;
end
th_blink_19: begin
_th_blink_i_1 <= _th_blink_i_1 + 1;
th_blink <= th_blink_15;
end
th_blink_20: begin
LED <= 0;
th_blink <= th_blink_21;
end
th_blink_21: begin
count <= 0;
th_blink <= th_blink_22;
end
th_blink_22: begin
_th_blink_i_1 <= 0;
th_blink <= th_blink_23;
end
th_blink_23: begin
if(_th_blink_i_1 < _th_blink_times_0) begin
th_blink <= th_blink_24;
end else begin
th_blink <= th_blink_28;
end
end
th_blink_24: begin
LED <= { count[6], count[4], count[2], count[0] };
th_blink <= th_blink_25;
end
th_blink_25: begin
$display("led = %d", LED);
th_blink <= th_blink_26;
end
th_blink_26: begin
count <= count + 1;
th_blink <= th_blink_27;
end
th_blink_27: begin
_th_blink_i_1 <= _th_blink_i_1 + 1;
th_blink <= th_blink_23;
end
endcase
end
end
endmodule
"""
def test():
veriloggen.reset()
test_module = thread_slice.mkTest()
code = test_module.to_verilog()
from pyverilog.vparser.parser import VerilogParser
from pyverilog.ast_code_generator.codegen import ASTCodeGenerator
parser = VerilogParser()
expected_ast = parser.parse(expected_verilog)
codegen = ASTCodeGenerator()
expected_code = codegen.visit(expected_ast)
assert(expected_code == code)
|
import keras
import tensorflow as tf
import keras.backend.tensorflow_backend as K
config = tf.ConfigProto()
config.gpu_options.allow_growth = True
# config.gpu_options.per_process_gpu_memory_fraction = 0.9
sess = tf.Session(config=config)
K.set_session(sess)
import os
import sys
sys.path.insert(0, '../')
from models.crnn import crnn
from data_utils.transform import reshape_to_target, pre_processing
from .ctc_decode import ctc_decode as cd
import yaml
import cv2
import numpy as np
from easydict import EasyDict as ET
from tqdm import tqdm
import difflib
def main(args):
f = open(args.config)
cfgs = yaml.load(f)
f.close()
cfgs = ET(cfgs)
test_list = cfgs.TEST_LIST
image_size = cfgs.IMAGE_SIZE
charset = cfgs.CHARSET
weight = cfgs.WEIGHT
h, w, c = image_size.split(',')
image_size = (int(h), int(w), int(c))
with open(charset) as f:
charset = f.readline().strip('\n')
f.close()
nb_classes = len(charset) + 1
model, *_ = crnn(nb_classes, image_size)
model.load_weights(weight, by_name=True)
test_list = open(test_list).readlines()
line_acc = 0.
char_acc = 0.
total_test = 0
print('start test..')
for item in tqdm(test_list):
img_path, label_str = item.strip('\n').split('\t')
img = cv2.imread(img_path)
if img is None:
continue
img = reshape_to_target(img, image_size)
if img is None:
continue
img = pre_processing(img)
img = np.expand_dims(img, axis=0)
prob = model.predict(img)
result_str = cd(prob, charset)
# compute str score
score = difflib.SequenceMatcher(None, result_str, label_str).ratio()
if score == 1.0:
line_acc += 1.0
char_acc += score
total_test += 1
print('test done..')
print('Line-wise acc: {}%'.format((line_acc/total_test)*100))
print('Char-wise acc: {}%'.format((char_acc/total_test)*100))
|
import sys
try:
from django.conf import settings
settings.configure(
DEBUG=True,
USE_TZ=True,
DATABASES={
"default": {
"ENGINE": "django.db.backends.sqlite3",
}
},
ROOT_URLCONF="normal_admin.urls",
INSTALLED_APPS=[
"django.contrib.auth",
"django.contrib.contenttypes",
"django.contrib.sites",
"normal_admin",
],
SITE_ID=1,
NOSE_ARGS=['-s'],
)
try:
import django
setup = django.setup
except AttributeError:
pass
else:
setup()
from django_nose import NoseTestSuiteRunner
except ImportError:
import traceback
traceback.print_exc()
raise ImportError("To fix this error, run: pip install -r requirements-test.txt")
def run_tests(*test_args):
if not test_args:
test_args = ['tests']
# Run tests
test_runner = NoseTestSuiteRunner(verbosity=1)
failures = test_runner.run_tests(test_args)
if failures:
sys.exit(failures)
if __name__ == '__main__':
run_tests(*sys.argv[1:])
|
from social_core.backends.azuread_tenant import AzureADTenantOAuth2
from social_core.backends.azuread_b2c import AzureADB2COAuth2
from tethys_services.backends.multi_tenant_mixin import MultiTenantMixin
class AzureADTenantOAuth2MultiTenant(MultiTenantMixin, AzureADTenantOAuth2):
pass
class AzureADB2COAuth2MultiTenant(MultiTenantMixin, AzureADB2COAuth2):
pass
|
import unittest
import sys
sys.path.append('bin')
from umdinst import wrap
class TestIsSourceFile(unittest.TestCase):
def testHasExtension(self):
self.failUnless(wrap.hasextension('foo.c'))
self.failIf(wrap.hasextension('bar'))
def testIsSourceFile(self):
self.failUnless(wrap.issourcefile('foo.c'))
self.failUnless(wrap.issourcefile('foo.cpp'))
self.failUnless(wrap.issourcefile('foo.cc'))
self.failUnless(wrap.issourcefile('foo.cxx'))
self.failUnless(wrap.issourcefile('foo.C'))
self.failUnless(wrap.issourcefile('foo.upc'))
self.failUnless(wrap.issourcefile('foo.f'))
self.failUnless(wrap.issourcefile('foo.f77'))
self.failUnless(wrap.issourcefile('foo.f90'))
self.failIf(wrap.issourcefile('foo'))
self.failIf(wrap.issourcefile('foo.exe'))
self.failIf(wrap.issourcefile('foo.o'))
self.failIf(wrap.issourcefile('foo.a'))
if __name__ == '__main__':
unittest.main()
|
import numpy as np
import matplotlib.pyplot as plt
"""
As in evAccum.py, the direct simulation for a single agent. Here the code is modified to stop when the agent hits a
boundary and also tracks where the LLR paths are. This allows us to output an array of exit times and compute the
survival probability.
"""
# Parameters for the simulation
length = 100
mean1 = 0.1
mean2 = -0.1
var1 = 1
var2 = 1
bdy_plus = 0.9
bdy_minus = -3
# # Observations are drawn from the Norm(mean1, var1) distribution.
# obs = np.sqrt(var1) * np.random.randn(length) + mean1 # scale and translate draws from the standard distribution
runs = int(1e3)
max_time = 500
exit_times = np.zeros(runs)
paths_plus = np.zeros(max_time) # How many sims have chosen H^+
paths_minus = np.zeros(max_time) # ^^ H^-
paths_pos = np.zeros(max_time) # How many sims have not exited and are positive
paths_neg = np.zeros(max_time) # How many sims have not exited and are negative
correct = 0
class Dist:
"""We define a class for distributions so that we can easily access the truth distributions rather than writing out
the formula for the distribution each time we want to use it."""
def __init__(self, mean, var):
self.mean = mean
self.var = var
def prob(self, x):
return np.exp(-np.power(x - self.mean, 2) / (2*self.var))/(np.sqrt(2 * np.pi * self.var))
pos = Dist(mean1, var1) # the positive state distribution
neg = Dist(mean2, var2)
def compute_llr(x_array, dist1, dist2):
"""
Computes the log-likelihood ratio for a given array of observations.
:param x_array: an array of observations
:param dist1: the positive truth distribution
:param dist2: the negative truth distribution
:return: an array the size of x_array of LLRs
"""
return np.log(dist1(x_array)/dist2(x_array))
# Compute and store the LLRs as a vector of accumulated evidence.
for r in range(runs):
ev = 0
T = 0
time = 0
while (ev < bdy_plus) and (ev > bdy_minus) and (time < max_time):
if ev >= 0:
paths_pos[time] += 1
else:
paths_neg[time] += 1
time += 1
obs = np.sqrt(var1) * np.random.randn(1) + mean1
ev += compute_llr(obs, pos.prob, neg.prob)
T += 1
if ev >= bdy_plus:
correct += 1
paths_plus[T:] += 1
else:
paths_minus[T:] += 1
exit_times[r] = T
# The last part here plots time (in steps) against the accumulated evidence. After adding modifications to the plot we
# then call it using the show() method.
print "Correct: " + str(100 * correct / runs) + "%"
plt.hist(exit_times, 50, normed=1, facecolor='green', alpha=0.75)
np.save('exit_times.npy', exit_times)
path_data = np.vstack((paths_plus, paths_minus, paths_pos, paths_neg))
np.save('path_data.npy', path_data)
plt.xlabel('Time')
plt.ylabel('LLR')
plt.title('Evidence Accum')
# plt.text(60, .025, r'$\mu=100,\ \sigma=15$')
# plt.axis([0, length, 0, 1])
# plt.grid(True)
plt.show()
|
# ----------------------------------------------------------------------------
# pyglet
# Copyright (c) 2006-2008 Alex Holkner
# Copyright (c) 2008-2021 pyglet contributors
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in
# the documentation and/or other materials provided with the
# distribution.
# * Neither the name of pyglet nor the names of its
# contributors may be used to endorse or promote products
# derived from this software without specific prior written
# permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
# FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
# COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
# BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
# ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
# ----------------------------------------------------------------------------
import unicodedata
import urllib.parse
from ctypes import *
from functools import lru_cache
import pyglet
from pyglet.window import WindowException, MouseCursorException
from pyglet.window import MouseCursor, DefaultMouseCursor, ImageMouseCursor
from pyglet.window import BaseWindow, _PlatformEventHandler, _ViewEventHandler
from pyglet.window import key
from pyglet.window import mouse
from pyglet.event import EventDispatcher
from pyglet.canvas.xlib import XlibCanvas
from pyglet.libs.x11 import xlib
from pyglet.libs.x11 import cursorfont
from pyglet.util import asbytes
try:
from pyglet.libs.x11 import xsync
_have_xsync = True
except ImportError:
_have_xsync = False
class mwmhints_t(Structure):
_fields_ = [
('flags', c_uint32),
('functions', c_uint32),
('decorations', c_uint32),
('input_mode', c_int32),
('status', c_uint32)
]
# XXX: wraptypes can't parse the header this function is in yet
XkbSetDetectableAutoRepeat = xlib._lib.XkbSetDetectableAutoRepeat
XkbSetDetectableAutoRepeat.restype = c_int
XkbSetDetectableAutoRepeat.argtypes = [POINTER(xlib.Display), c_int, POINTER(c_int)]
_can_detect_autorepeat = None
XA_CARDINAL = 6 # Xatom.h:14
XA_ATOM = 4
XDND_VERSION = 5
# Do we have the November 2000 UTF8 extension?
_have_utf8 = hasattr(xlib._lib, 'Xutf8TextListToTextProperty')
# symbol,ctrl -> motion mapping
_motion_map = {
(key.UP, False): key.MOTION_UP,
(key.RIGHT, False): key.MOTION_RIGHT,
(key.DOWN, False): key.MOTION_DOWN,
(key.LEFT, False): key.MOTION_LEFT,
(key.RIGHT, True): key.MOTION_NEXT_WORD,
(key.LEFT, True): key.MOTION_PREVIOUS_WORD,
(key.HOME, False): key.MOTION_BEGINNING_OF_LINE,
(key.END, False): key.MOTION_END_OF_LINE,
(key.PAGEUP, False): key.MOTION_PREVIOUS_PAGE,
(key.PAGEDOWN, False): key.MOTION_NEXT_PAGE,
(key.HOME, True): key.MOTION_BEGINNING_OF_FILE,
(key.END, True): key.MOTION_END_OF_FILE,
(key.BACKSPACE, False): key.MOTION_BACKSPACE,
(key.DELETE, False): key.MOTION_DELETE,
}
class XlibException(WindowException):
"""An X11-specific exception. This exception is probably a programming
error in pyglet."""
pass
class XlibMouseCursor(MouseCursor):
gl_drawable = False
hw_drawable = True
def __init__(self, cursor):
self.cursor = cursor
# Platform event data is single item, so use platform event handler directly.
XlibEventHandler = _PlatformEventHandler
ViewEventHandler = _ViewEventHandler
class XlibWindow(BaseWindow):
_x_display = None # X display connection
_x_screen_id = None # X screen index
_x_ic = None # X input context
_window = None # Xlib window handle
_override_redirect = False
_x = 0
_y = 0 # Last known window position
_mouse_exclusive_client = None # x,y of "real" mouse during exclusive
_mouse_buttons = [False] * 6 # State of each xlib button
_active = True
_applied_mouse_exclusive = False
_applied_keyboard_exclusive = False
_mapped = False
_lost_context = False
_lost_context_state = False
_enable_xsync = False
_current_sync_value = None
_current_sync_valid = False
_default_event_mask = (0x1ffffff & ~xlib.PointerMotionHintMask
& ~xlib.ResizeRedirectMask
& ~xlib.SubstructureNotifyMask)
def __init__(self, *args, **kwargs):
# Bind event handlers
self._event_handlers = {}
self._view_event_handlers = {}
for name in self._platform_event_names:
if not hasattr(self, name):
continue
func = getattr(self, name)
for message in func._platform_event_data:
if hasattr(func, '_view'):
self._view_event_handlers[message] = func
else:
self._event_handlers[message] = func
super(XlibWindow, self).__init__(*args, **kwargs)
global _can_detect_autorepeat
if _can_detect_autorepeat is None:
supported_rtrn = c_int()
_can_detect_autorepeat = XkbSetDetectableAutoRepeat(self.display._display, c_int(1),
byref(supported_rtrn))
if _can_detect_autorepeat:
self.pressed_keys = set()
def _recreate(self, changes):
# If flipping to/from fullscreen, need to recreate the window. (This
# is the case with both override_redirect method and
# _NET_WM_STATE_FULLSCREEN).
#
# A possible improvement could be to just hide the top window,
# destroy the GLX window, and reshow it again when leaving fullscreen.
# This would prevent the floating window from being moved by the
# WM.
if 'fullscreen' in changes or 'resizable' in changes:
# clear out the GLX context
self.context.detach()
xlib.XDestroyWindow(self._x_display, self._window)
del self.display._window_map[self._window]
del self.display._window_map[self._view]
self._window = None
self._mapped = False
# TODO: detect state loss only by examining context share.
if 'context' in changes:
self._lost_context = True
self._lost_context_state = True
self._create()
def _create_xdnd_atoms(self, display):
self._xdnd_atoms = {
'XdndAware' : xlib.XInternAtom(display, asbytes('XdndAware'), False),
'XdndEnter' : xlib.XInternAtom(display, asbytes('XdndEnter'), False),
'XdndTypeList' : xlib.XInternAtom(display, asbytes('XdndTypeList'), False),
'XdndDrop' : xlib.XInternAtom(display, asbytes('XdndDrop'), False),
'XdndFinished' : xlib.XInternAtom(display, asbytes('XdndFinished'), False),
'XdndSelection' : xlib.XInternAtom(display, asbytes('XdndSelection'), False),
'XdndPosition' : xlib.XInternAtom(display, asbytes('XdndPosition'), False),
'XdndStatus' : xlib.XInternAtom(display, asbytes('XdndStatus'), False),
'XdndActionCopy' : xlib.XInternAtom(display, asbytes('XdndActionCopy'), False),
'text/uri-list' : xlib.XInternAtom(display, asbytes("text/uri-list"), False)
}
def _create(self):
# Unmap existing window if necessary while we fiddle with it.
if self._window and self._mapped:
self._unmap()
self._x_display = self.display._display
self._x_screen_id = self.display.x_screen
# Create X window if not already existing.
if not self._window:
root = xlib.XRootWindow(self._x_display, self._x_screen_id)
visual_info = self.config.get_visual_info()
visual = visual_info.visual
visual_id = xlib.XVisualIDFromVisual(visual)
default_visual = xlib.XDefaultVisual(self._x_display, self._x_screen_id)
default_visual_id = xlib.XVisualIDFromVisual(default_visual)
window_attributes = xlib.XSetWindowAttributes()
if visual_id != default_visual_id:
window_attributes.colormap = xlib.XCreateColormap(self._x_display, root,
visual, xlib.AllocNone)
else:
window_attributes.colormap = xlib.XDefaultColormap(self._x_display,
self._x_screen_id)
window_attributes.bit_gravity = xlib.StaticGravity
# Issue 287: Compiz on Intel/Mesa doesn't draw window decoration
# unless CWBackPixel is given in mask. Should have
# no effect on other systems, so it's set
# unconditionally.
mask = xlib.CWColormap | xlib.CWBitGravity | xlib.CWBackPixel
if self._fullscreen:
width, height = self.screen.width, self.screen.height
self._view_x = (width - self._width) // 2
self._view_y = (height - self._height) // 2
else:
width, height = self._width, self._height
self._view_x = self._view_y = 0
self._window = xlib.XCreateWindow(self._x_display, root,
0, 0, width, height, 0, visual_info.depth,
xlib.InputOutput, visual, mask,
byref(window_attributes))
self._view = xlib.XCreateWindow(self._x_display,
self._window, self._view_x, self._view_y,
self._width, self._height, 0, visual_info.depth,
xlib.InputOutput, visual, mask,
byref(window_attributes))
xlib.XMapWindow(self._x_display, self._view)
xlib.XSelectInput(self._x_display, self._view, self._default_event_mask)
self.display._window_map[self._window] = self.dispatch_platform_event
self.display._window_map[self._view] = self.dispatch_platform_event_view
self.canvas = XlibCanvas(self.display, self._view)
self.context.attach(self.canvas)
self.context.set_vsync(self._vsync) # XXX ?
# Setting null background pixmap disables drawing the background,
# preventing flicker while resizing (in theory).
#
# Issue 287: Compiz on Intel/Mesa doesn't draw window decoration if
# this is called. As it doesn't seem to have any
# effect anyway, it's just commented out.
# xlib.XSetWindowBackgroundPixmap(self._x_display, self._window, 0)
self._enable_xsync = (pyglet.options['xsync'] and
self.display._enable_xsync and
self.config.double_buffer)
# Set supported protocols
protocols = []
protocols.append(xlib.XInternAtom(self._x_display, asbytes('WM_DELETE_WINDOW'), False))
if self._enable_xsync:
protocols.append(xlib.XInternAtom(self._x_display,
asbytes('_NET_WM_SYNC_REQUEST'),
False))
protocols = (c_ulong * len(protocols))(*protocols)
xlib.XSetWMProtocols(self._x_display, self._window, protocols, len(protocols))
# Create window resize sync counter
if self._enable_xsync:
value = xsync.XSyncValue()
self._sync_counter = xlib.XID(xsync.XSyncCreateCounter(self._x_display, value))
atom = xlib.XInternAtom(self._x_display,
asbytes('_NET_WM_SYNC_REQUEST_COUNTER'), False)
ptr = pointer(self._sync_counter)
xlib.XChangeProperty(self._x_display, self._window,
atom, XA_CARDINAL, 32,
xlib.PropModeReplace,
cast(ptr, POINTER(c_ubyte)), 1)
# Atoms required for Xdnd
self._create_xdnd_atoms(self._x_display)
# Support for drag and dropping files needs to be enabled.
if self._file_drops:
# Some variables set because there are 4 different drop events that need shared data.
self._xdnd_source = None
self._xdnd_version = None
self._xdnd_format = None
self._xdnd_position = (0, 0) # For position callback.
VERSION = c_ulong(int(XDND_VERSION))
ptr = pointer(VERSION)
xlib.XChangeProperty(self._x_display, self._window,
self._xdnd_atoms['XdndAware'], XA_ATOM, 32,
xlib.PropModeReplace,
cast(ptr, POINTER(c_ubyte)), 1)
# Set window attributes
attributes = xlib.XSetWindowAttributes()
attributes_mask = 0
self._override_redirect = False
if self._fullscreen:
if pyglet.options['xlib_fullscreen_override_redirect']:
# Try not to use this any more, it causes problems; disabled
# by default in favour of _NET_WM_STATE_FULLSCREEN.
attributes.override_redirect = self._fullscreen
attributes_mask |= xlib.CWOverrideRedirect
self._override_redirect = True
else:
self._set_wm_state('_NET_WM_STATE_FULLSCREEN')
if self._fullscreen:
xlib.XMoveResizeWindow(self._x_display, self._window,
self.screen.x, self.screen.y,
self.screen.width, self.screen.height)
else:
xlib.XResizeWindow(self._x_display, self._window, self._width, self._height)
xlib.XChangeWindowAttributes(self._x_display, self._window,
attributes_mask, byref(attributes))
# Set style
styles = {
self.WINDOW_STYLE_DEFAULT: '_NET_WM_WINDOW_TYPE_NORMAL',
self.WINDOW_STYLE_DIALOG: '_NET_WM_WINDOW_TYPE_DIALOG',
self.WINDOW_STYLE_TOOL: '_NET_WM_WINDOW_TYPE_UTILITY',
}
if self._style in styles:
self._set_atoms_property('_NET_WM_WINDOW_TYPE', (styles[self._style],))
elif self._style == self.WINDOW_STYLE_BORDERLESS:
MWM_HINTS_DECORATIONS = 1 << 1
PROP_MWM_HINTS_ELEMENTS = 5
mwmhints = mwmhints_t()
mwmhints.flags = MWM_HINTS_DECORATIONS
mwmhints.decorations = 0
name = xlib.XInternAtom(self._x_display, asbytes('_MOTIF_WM_HINTS'), False)
xlib.XChangeProperty(self._x_display, self._window,
name, name, 32, xlib.PropModeReplace,
cast(pointer(mwmhints), POINTER(c_ubyte)),
PROP_MWM_HINTS_ELEMENTS)
# Set resizeable
if not self._resizable and not self._fullscreen:
self.set_minimum_size(self._width, self._height)
self.set_maximum_size(self._width, self._height)
# Set caption
self.set_caption(self._caption)
# Set WM_CLASS for modern desktop environments
self.set_wm_class(self._caption)
# this is supported by some compositors (ie gnome-shell), and more to come
# see: http://standards.freedesktop.org/wm-spec/wm-spec-latest.html#idp6357888
_NET_WM_BYPASS_COMPOSITOR_HINT_ON = c_ulong(int(self._fullscreen))
name = xlib.XInternAtom(self._x_display, asbytes('_NET_WM_BYPASS_COMPOSITOR'), False)
ptr = pointer(_NET_WM_BYPASS_COMPOSITOR_HINT_ON)
xlib.XChangeProperty(self._x_display, self._window,
name, XA_CARDINAL, 32,
xlib.PropModeReplace,
cast(ptr, POINTER(c_ubyte)), 1)
# Create input context. A good but very outdated reference for this
# is http://www.sbin.org/doc/Xlib/chapt_11.html
if _have_utf8 and not self._x_ic:
if not self.display._x_im:
xlib.XSetLocaleModifiers(asbytes('@im=none'))
self.display._x_im = xlib.XOpenIM(self._x_display, None, None, None)
xlib.XFlush(self._x_display)
# Need to set argtypes on this function because it's vararg,
# and ctypes guesses wrong.
xlib.XCreateIC.argtypes = [xlib.XIM,
c_char_p, c_int,
c_char_p, xlib.Window,
c_char_p, xlib.Window,
c_void_p]
self._x_ic = xlib.XCreateIC(self.display._x_im,
asbytes('inputStyle'),
xlib.XIMPreeditNothing | xlib.XIMStatusNothing,
asbytes('clientWindow'), self._window,
asbytes('focusWindow'), self._window,
None)
filter_events = c_ulong()
xlib.XGetICValues(self._x_ic, 'filterEvents', byref(filter_events), None)
self._default_event_mask |= filter_events.value
xlib.XSetICFocus(self._x_ic)
self.switch_to()
if self._visible:
self.set_visible(True)
self.set_mouse_platform_visible()
self._applied_mouse_exclusive = None
self._update_exclusivity()
def _map(self):
if self._mapped:
return
# Map the window, wait for map event before continuing.
xlib.XSelectInput(self._x_display, self._window, xlib.StructureNotifyMask)
xlib.XMapRaised(self._x_display, self._window)
e = xlib.XEvent()
while True:
xlib.XNextEvent(self._x_display, e)
if e.type == xlib.ConfigureNotify:
self._width = e.xconfigure.width
self._height = e.xconfigure.height
elif e.type == xlib.MapNotify:
break
xlib.XSelectInput(self._x_display, self._window, self._default_event_mask)
self._mapped = True
if self._override_redirect:
# Possibly an override_redirect issue.
self.activate()
self._update_view_size()
self.dispatch_event('on_resize', self._width, self._height)
self.dispatch_event('on_show')
self.dispatch_event('on_expose')
def _unmap(self):
if not self._mapped:
return
xlib.XSelectInput(self._x_display, self._window, xlib.StructureNotifyMask)
xlib.XUnmapWindow(self._x_display, self._window)
e = xlib.XEvent()
while True:
xlib.XNextEvent(self._x_display, e)
if e.type == xlib.UnmapNotify:
break
xlib.XSelectInput(self._x_display, self._window, self._default_event_mask)
self._mapped = False
def _get_root(self):
attributes = xlib.XWindowAttributes()
xlib.XGetWindowAttributes(self._x_display, self._window, byref(attributes))
return attributes.root
def _is_reparented(self):
root = c_ulong()
parent = c_ulong()
children = pointer(c_ulong())
n_children = c_uint()
xlib.XQueryTree(self._x_display, self._window,
byref(root), byref(parent), byref(children),
byref(n_children))
return root.value != parent.value
def close(self):
if not self._window:
return
self.context.destroy()
self._unmap()
if self._window:
xlib.XDestroyWindow(self._x_display, self._window)
del self.display._window_map[self._window]
del self.display._window_map[self._view]
self._window = None
self._view_event_handlers.clear()
self._event_handlers.clear()
if _have_utf8:
xlib.XDestroyIC(self._x_ic)
self._x_ic = None
super(XlibWindow, self).close()
def switch_to(self):
if self.context:
self.context.set_current()
def flip(self):
self.draw_mouse_cursor()
# TODO canvas.flip?
if self.context:
self.context.flip()
self._sync_resize()
def set_vsync(self, vsync: bool) -> None:
if pyglet.options['vsync'] is not None:
vsync = pyglet.options['vsync']
super().set_vsync(vsync)
self.context.set_vsync(vsync)
def set_caption(self, caption):
if caption is None:
caption = ''
self._caption = caption
self._set_text_property('WM_NAME', caption, allow_utf8=False)
self._set_text_property('WM_ICON_NAME', caption, allow_utf8=False)
self._set_text_property('_NET_WM_NAME', caption)
self._set_text_property('_NET_WM_ICON_NAME', caption)
def set_wm_class(self, name):
# WM_CLASS can only contain Ascii characters
try:
name = name.encode('ascii')
except UnicodeEncodeError:
name = "pyglet"
hint = xlib.XAllocClassHint()
hint.contents.res_class = asbytes(name)
hint.contents.res_name = asbytes(name.lower())
xlib.XSetClassHint(self._x_display, self._window, hint.contents)
xlib.XFree(hint)
def get_caption(self):
return self._caption
def set_size(self, width: int, height: int) -> None:
super().set_size(width, height)
if not self._resizable:
self.set_minimum_size(width, height)
self.set_maximum_size(width, height)
xlib.XResizeWindow(self._x_display, self._window, width, height)
self._update_view_size()
self.dispatch_event('on_resize', width, height)
def _update_view_size(self):
xlib.XResizeWindow(self._x_display, self._view, self._width, self._height)
def set_location(self, x, y):
if self._is_reparented():
# Assume the window manager has reparented our top-level window
# only once, in which case attributes.x/y give the offset from
# the frame to the content window. Better solution would be
# to use _NET_FRAME_EXTENTS, where supported.
attributes = xlib.XWindowAttributes()
xlib.XGetWindowAttributes(self._x_display, self._window, byref(attributes))
# XXX at least under KDE's WM these attrs are both 0
x -= attributes.x
y -= attributes.y
xlib.XMoveWindow(self._x_display, self._window, x, y)
def get_location(self):
child = xlib.Window()
x = c_int()
y = c_int()
xlib.XTranslateCoordinates(self._x_display,
self._window,
self._get_root(),
0, 0,
byref(x),
byref(y),
byref(child))
return x.value, y.value
def activate(self):
# Issue 218
if self._x_display and self._window:
xlib.XSetInputFocus(self._x_display, self._window, xlib.RevertToParent, xlib.CurrentTime)
def set_visible(self, visible: bool = True) -> None:
super().set_visible(visible)
if visible:
self._map()
else:
self._unmap()
def set_minimum_size(self, width: int, height: int) -> None:
super().set_minimum_size(width, height)
self._set_wm_normal_hints()
def set_maximum_size(self, width: int, height: int) -> None:
super().set_maximum_size(width, height)
self._set_wm_normal_hints()
def minimize(self):
xlib.XIconifyWindow(self._x_display, self._window, self._x_screen_id)
def maximize(self):
self._set_wm_state('_NET_WM_STATE_MAXIMIZED_HORZ',
'_NET_WM_STATE_MAXIMIZED_VERT')
@staticmethod
def _downsample_1bit(pixelarray):
byte_list = []
value = 0
for i, pixel in enumerate(pixelarray):
index = i % 8
if pixel:
value |= 1 << index
if index == 7:
byte_list.append(value)
value = 0
return bytes(byte_list)
@lru_cache()
def _create_cursor_from_image(self, cursor):
"""Creates platform cursor from an ImageCursor instance."""
texture = cursor.texture
width = texture.width
height = texture.height
alpha_luma_bytes = texture.get_image_data().get_data('AL', -width * 2)
mask_data = self._downsample_1bit(alpha_luma_bytes[0::2])
bmp_data = self._downsample_1bit(alpha_luma_bytes[1::2])
bitmap = xlib.XCreateBitmapFromData(self._x_display, self._window, bmp_data, width, height)
mask = xlib.XCreateBitmapFromData(self._x_display, self._window, mask_data, width, height)
white = xlib.XColor(red=65535, green=65535, blue=65535) # background color
black = xlib.XColor() # foreground color
# hot_x/y must be within the image dimension, or the cursor will not display:
hot_x = min(max(0, int(self._mouse_cursor.hot_x)), width)
hot_y = min(max(0, int(height - self._mouse_cursor.hot_y)), height)
cursor = xlib.XCreatePixmapCursor(self._x_display, bitmap, mask, white, black, hot_x, hot_y)
xlib.XFreePixmap(self._x_display, bitmap)
xlib.XFreePixmap(self._x_display, mask)
return cursor
def set_mouse_platform_visible(self, platform_visible=None):
if not self._window:
return
if platform_visible is None:
platform_visible = self._mouse_visible and not self._mouse_cursor.gl_drawable
if platform_visible is False:
# Hide pointer by creating an empty cursor:
black = xlib.XColor()
bitmap = xlib.XCreateBitmapFromData(self._x_display, self._window, bytes(8), 8, 8)
cursor = xlib.XCreatePixmapCursor(self._x_display, bitmap, bitmap, black, black, 0, 0)
xlib.XDefineCursor(self._x_display, self._window, cursor)
xlib.XFreeCursor(self._x_display, cursor)
xlib.XFreePixmap(self._x_display, bitmap)
elif isinstance(self._mouse_cursor, ImageMouseCursor) and self._mouse_cursor.hw_drawable:
# Create a custom hardware cursor:
cursor = self._create_cursor_from_image(self._mouse_cursor)
xlib.XDefineCursor(self._x_display, self._window, cursor)
else:
# Restore standard hardware cursor:
if isinstance(self._mouse_cursor, XlibMouseCursor):
xlib.XDefineCursor(self._x_display, self._window, self._mouse_cursor.cursor)
else:
xlib.XUndefineCursor(self._x_display, self._window)
def set_mouse_position(self, x, y):
xlib.XWarpPointer(self._x_display,
0, # src window
self._window, # dst window
0, 0, # src x, y
0, 0, # src w, h
x, self._height - y)
def _update_exclusivity(self):
mouse_exclusive = self._active and self._mouse_exclusive
keyboard_exclusive = self._active and self._keyboard_exclusive
if mouse_exclusive != self._applied_mouse_exclusive:
if mouse_exclusive:
self.set_mouse_platform_visible(False)
# Restrict to client area
xlib.XGrabPointer(self._x_display, self._window,
True,
0,
xlib.GrabModeAsync,
xlib.GrabModeAsync,
self._window,
0,
xlib.CurrentTime)
# Move pointer to center of window
x = self._width // 2
y = self._height // 2
self._mouse_exclusive_client = x, y
self.set_mouse_position(x, y)
elif self._fullscreen and not self.screen._xinerama:
# Restrict to fullscreen area (prevent viewport scrolling)
self.set_mouse_position(0, 0)
r = xlib.XGrabPointer(self._x_display, self._view,
True, 0,
xlib.GrabModeAsync,
xlib.GrabModeAsync,
self._view,
0,
xlib.CurrentTime)
if r:
# Failed to grab, try again later
self._applied_mouse_exclusive = None
return
self.set_mouse_platform_visible()
else:
# Unclip
xlib.XUngrabPointer(self._x_display, xlib.CurrentTime)
self.set_mouse_platform_visible()
self._applied_mouse_exclusive = mouse_exclusive
if keyboard_exclusive != self._applied_keyboard_exclusive:
if keyboard_exclusive:
xlib.XGrabKeyboard(self._x_display,
self._window,
False,
xlib.GrabModeAsync,
xlib.GrabModeAsync,
xlib.CurrentTime)
else:
xlib.XUngrabKeyboard(self._x_display, xlib.CurrentTime)
self._applied_keyboard_exclusive = keyboard_exclusive
def set_exclusive_mouse(self, exclusive=True):
if exclusive == self._mouse_exclusive:
return
super().set_exclusive_mouse(exclusive)
self._update_exclusivity()
def set_exclusive_keyboard(self, exclusive=True):
if exclusive == self._keyboard_exclusive:
return
super().set_exclusive_keyboard(exclusive)
self._update_exclusivity()
def get_system_mouse_cursor(self, name):
if name == self.CURSOR_DEFAULT:
return DefaultMouseCursor()
# NQR means default shape is not pretty... surely there is another
# cursor font?
cursor_shapes = {
self.CURSOR_CROSSHAIR: cursorfont.XC_crosshair,
self.CURSOR_HAND: cursorfont.XC_hand2,
self.CURSOR_HELP: cursorfont.XC_question_arrow, # NQR
self.CURSOR_NO: cursorfont.XC_pirate, # NQR
self.CURSOR_SIZE: cursorfont.XC_fleur,
self.CURSOR_SIZE_UP: cursorfont.XC_top_side,
self.CURSOR_SIZE_UP_RIGHT: cursorfont.XC_top_right_corner,
self.CURSOR_SIZE_RIGHT: cursorfont.XC_right_side,
self.CURSOR_SIZE_DOWN_RIGHT: cursorfont.XC_bottom_right_corner,
self.CURSOR_SIZE_DOWN: cursorfont.XC_bottom_side,
self.CURSOR_SIZE_DOWN_LEFT: cursorfont.XC_bottom_left_corner,
self.CURSOR_SIZE_LEFT: cursorfont.XC_left_side,
self.CURSOR_SIZE_UP_LEFT: cursorfont.XC_top_left_corner,
self.CURSOR_SIZE_UP_DOWN: cursorfont.XC_sb_v_double_arrow,
self.CURSOR_SIZE_LEFT_RIGHT: cursorfont.XC_sb_h_double_arrow,
self.CURSOR_TEXT: cursorfont.XC_xterm,
self.CURSOR_WAIT: cursorfont.XC_watch,
self.CURSOR_WAIT_ARROW: cursorfont.XC_watch, # NQR
}
if name not in cursor_shapes:
raise MouseCursorException('Unknown cursor name "%s"' % name)
cursor = xlib.XCreateFontCursor(self._x_display, cursor_shapes[name])
return XlibMouseCursor(cursor)
def set_icon(self, *images):
# Careful! XChangeProperty takes an array of long when data type
# is 32-bit (but long can be 64 bit!), so pad high bytes of format if
# necessary.
import sys
fmt = {('little', 4): 'BGRA',
('little', 8): 'BGRAAAAA',
('big', 4): 'ARGB',
('big', 8): 'AAAAARGB'}[(sys.byteorder, sizeof(c_ulong))]
data = asbytes('')
for image in images:
image = image.get_image_data()
pitch = -(image.width * len(fmt))
s = c_buffer(sizeof(c_ulong) * 2)
memmove(s, cast((c_ulong * 2)(image.width, image.height), POINTER(c_ubyte)), len(s))
data += s.raw + image.get_data(fmt, pitch)
buffer = (c_ubyte * len(data))()
memmove(buffer, data, len(data))
atom = xlib.XInternAtom(self._x_display, asbytes('_NET_WM_ICON'), False)
xlib.XChangeProperty(self._x_display, self._window, atom, XA_CARDINAL,
32, xlib.PropModeReplace, buffer, len(data)//sizeof(c_ulong))
# Private utility
def _set_wm_normal_hints(self):
hints = xlib.XAllocSizeHints().contents
if self._minimum_size:
hints.flags |= xlib.PMinSize
hints.min_width, hints.min_height = self._minimum_size
if self._maximum_size:
hints.flags |= xlib.PMaxSize
hints.max_width, hints.max_height = self._maximum_size
xlib.XSetWMNormalHints(self._x_display, self._window, byref(hints))
def _set_text_property(self, name, value, allow_utf8=True):
atom = xlib.XInternAtom(self._x_display, asbytes(name), False)
if not atom:
raise XlibException('Undefined atom "%s"' % name)
text_property = xlib.XTextProperty()
if _have_utf8 and allow_utf8:
buf = create_string_buffer(value.encode('utf8'))
result = xlib.Xutf8TextListToTextProperty(self._x_display,
cast(pointer(buf), c_char_p),
1, xlib.XUTF8StringStyle,
byref(text_property))
if result < 0:
raise XlibException('Could not create UTF8 text property')
else:
buf = create_string_buffer(value.encode('ascii', 'ignore'))
result = xlib.XStringListToTextProperty(
cast(pointer(buf), c_char_p), 1, byref(text_property))
if result < 0:
raise XlibException('Could not create text property')
xlib.XSetTextProperty(self._x_display, self._window, byref(text_property), atom)
# XXX <rj> Xlib doesn't like us freeing this
# xlib.XFree(text_property.value)
def _set_atoms_property(self, name, values, mode=xlib.PropModeReplace):
name_atom = xlib.XInternAtom(self._x_display, asbytes(name), False)
atoms = []
for value in values:
atoms.append(xlib.XInternAtom(self._x_display, asbytes(value), False))
atom_type = xlib.XInternAtom(self._x_display, asbytes('ATOM'), False)
if len(atoms):
atoms_ar = (xlib.Atom * len(atoms))(*atoms)
xlib.XChangeProperty(self._x_display, self._window,
name_atom, atom_type, 32, mode,
cast(pointer(atoms_ar), POINTER(c_ubyte)), len(atoms))
else:
net_wm_state = xlib.XInternAtom(self._x_display, asbytes('_NET_WM_STATE'), False)
if net_wm_state:
xlib.XDeleteProperty(self._x_display, self._window, net_wm_state)
def _set_wm_state(self, *states):
# Set property
net_wm_state = xlib.XInternAtom(self._x_display, asbytes('_NET_WM_STATE'), False)
atoms = []
for state in states:
atoms.append(xlib.XInternAtom(self._x_display, asbytes(state), False))
atom_type = xlib.XInternAtom(self._x_display, asbytes('ATOM'), False)
if len(atoms):
atoms_ar = (xlib.Atom * len(atoms))(*atoms)
xlib.XChangeProperty(self._x_display, self._window,
net_wm_state, atom_type, 32, xlib.PropModePrepend,
cast(pointer(atoms_ar), POINTER(c_ubyte)), len(atoms))
else:
xlib.XDeleteProperty(self._x_display, self._window, net_wm_state)
# Nudge the WM
e = xlib.XEvent()
e.xclient.type = xlib.ClientMessage
e.xclient.message_type = net_wm_state
e.xclient.display = cast(self._x_display, POINTER(xlib.Display))
e.xclient.window = self._window
e.xclient.format = 32
e.xclient.data.l[0] = xlib.PropModePrepend
for i, atom in enumerate(atoms):
e.xclient.data.l[i + 1] = atom
xlib.XSendEvent(self._x_display, self._get_root(),
False, xlib.SubstructureRedirectMask, byref(e))
# Event handling
def dispatch_events(self):
self.dispatch_pending_events()
self._allow_dispatch_event = True
e = xlib.XEvent()
# Cache these in case window is closed from an event handler
_x_display = self._x_display
_window = self._window
_view = self._view
# Check for the events specific to this window
while xlib.XCheckWindowEvent(_x_display, _window, 0x1ffffff, byref(e)):
# Key events are filtered by the xlib window event
# handler so they get a shot at the prefiltered event.
if e.xany.type not in (xlib.KeyPress, xlib.KeyRelease):
if xlib.XFilterEvent(e, 0):
continue
self.dispatch_platform_event(e)
# Check for the events specific to this view
while xlib.XCheckWindowEvent(_x_display, _view, 0x1ffffff, byref(e)):
# Key events are filtered by the xlib window event
# handler so they get a shot at the prefiltered event.
if e.xany.type not in (xlib.KeyPress, xlib.KeyRelease):
if xlib.XFilterEvent(e, 0):
continue
self.dispatch_platform_event_view(e)
# Generic events for this window (the window close event).
while xlib.XCheckTypedWindowEvent(_x_display, _window, xlib.ClientMessage, byref(e)):
self.dispatch_platform_event(e)
self._allow_dispatch_event = False
def dispatch_pending_events(self):
while self._event_queue:
EventDispatcher.dispatch_event(self, *self._event_queue.pop(0))
# Dispatch any context-related events
if self._lost_context:
self._lost_context = False
EventDispatcher.dispatch_event(self, 'on_context_lost')
if self._lost_context_state:
self._lost_context_state = False
EventDispatcher.dispatch_event(self, 'on_context_state_lost')
def dispatch_platform_event(self, e):
if self._applied_mouse_exclusive is None:
self._update_exclusivity()
event_handler = self._event_handlers.get(e.type)
if event_handler:
event_handler(e)
def dispatch_platform_event_view(self, e):
event_handler = self._view_event_handlers.get(e.type)
if event_handler:
event_handler(e)
@staticmethod
def _translate_modifiers(state):
modifiers = 0
if state & xlib.ShiftMask:
modifiers |= key.MOD_SHIFT
if state & xlib.ControlMask:
modifiers |= key.MOD_CTRL
if state & xlib.LockMask:
modifiers |= key.MOD_CAPSLOCK
if state & xlib.Mod1Mask:
modifiers |= key.MOD_ALT
if state & xlib.Mod2Mask:
modifiers |= key.MOD_NUMLOCK
if state & xlib.Mod4Mask:
modifiers |= key.MOD_WINDOWS
if state & xlib.Mod5Mask:
modifiers |= key.MOD_SCROLLLOCK
return modifiers
# Event handlers
"""
def _event_symbol(self, event):
# pyglet.self.key keysymbols are identical to X11 keysymbols, no
# need to map the keysymbol.
symbol = xlib.XKeycodeToKeysym(self._x_display, event.xkey.keycode, 0)
if symbol == 0:
# XIM event
return None
elif symbol not in key._key_names.keys():
symbol = key.user_key(event.xkey.keycode)
return symbol
"""
def _event_text_symbol(self, ev):
text = None
symbol = xlib.KeySym()
buffer = create_string_buffer(128)
# Look up raw keysym before XIM filters it (default for keypress and
# keyrelease)
count = xlib.XLookupString(ev.xkey, buffer, len(buffer) - 1, byref(symbol), None)
# Give XIM a shot
filtered = xlib.XFilterEvent(ev, ev.xany.window)
if ev.type == xlib.KeyPress and not filtered:
status = c_int()
if _have_utf8:
encoding = 'utf8'
count = xlib.Xutf8LookupString(self._x_ic,
ev.xkey,
buffer, len(buffer) - 1,
byref(symbol), byref(status))
if status.value == xlib.XBufferOverflow:
raise NotImplementedError('TODO: XIM buffer resize')
else:
encoding = 'ascii'
count = xlib.XLookupString(ev.xkey, buffer, len(buffer) - 1, byref(symbol), None)
if count:
status.value = xlib.XLookupBoth
if status.value & (xlib.XLookupChars | xlib.XLookupBoth):
text = buffer.value[:count].decode(encoding)
# Don't treat Unicode command codepoints as text, except Return.
if text and unicodedata.category(text) == 'Cc' and text != '\r':
text = None
symbol = symbol.value
# If the event is a XIM filtered event, the keysym will be virtual
# (e.g., aacute instead of A after a dead key). Drop it, we don't
# want these kind of key events.
if ev.xkey.keycode == 0 and not filtered:
symbol = None
# pyglet.self.key keysymbols are identical to X11 keysymbols, no
# need to map the keysymbol. For keysyms outside the pyglet set, map
# raw key code to a user key.
if symbol and symbol not in key._key_names and ev.xkey.keycode:
# Issue 353: Symbol is uppercase when shift key held down.
try:
symbol = ord(chr(symbol).lower())
except ValueError:
# Not a valid unichr, use the keycode
symbol = key.user_key(ev.xkey.keycode)
else:
# If still not recognised, use the keycode
if symbol not in key._key_names:
symbol = key.user_key(ev.xkey.keycode)
if filtered:
# The event was filtered, text must be ignored, but the symbol is
# still good.
return None, symbol
return text, symbol
@staticmethod
def _event_text_motion(symbol, modifiers):
if modifiers & key.MOD_ALT:
return None
ctrl = modifiers & key.MOD_CTRL != 0
return _motion_map.get((symbol, ctrl), None)
@ViewEventHandler
@XlibEventHandler(xlib.KeyPress)
@XlibEventHandler(xlib.KeyRelease)
def _event_key_view(self, ev):
# Try to detect autorepeat ourselves if the server doesn't support it
# XXX: Doesn't always work, better off letting the server do it
global _can_detect_autorepeat
if not _can_detect_autorepeat and ev.type == xlib.KeyRelease:
# Look in the queue for a matching KeyPress with same timestamp,
# indicating an auto-repeat rather than actual key event.
saved = []
while True:
auto_event = xlib.XEvent()
result = xlib.XCheckWindowEvent(self._x_display,
self._window, xlib.KeyPress|xlib.KeyRelease,
byref(auto_event))
if not result:
break
saved.append(auto_event)
if auto_event.type == xlib.KeyRelease:
# just save this off for restoration back to the queue
continue
if ev.xkey.keycode == auto_event.xkey.keycode:
# Found a key repeat: dispatch EVENT_TEXT* event
text, symbol = self._event_text_symbol(auto_event)
modifiers = self._translate_modifiers(ev.xkey.state)
modifiers_ctrl = modifiers & (key.MOD_CTRL | key.MOD_ALT)
motion = self._event_text_motion(symbol, modifiers)
if motion:
if modifiers & key.MOD_SHIFT:
self.dispatch_event(
'on_text_motion_select', motion)
else:
self.dispatch_event('on_text_motion', motion)
elif text and not modifiers_ctrl:
self.dispatch_event('on_text', text)
ditched = saved.pop()
for auto_event in reversed(saved):
xlib.XPutBackEvent(self._x_display, byref(auto_event))
return
else:
# Key code of press did not match, therefore no repeating
# is going on, stop searching.
break
# Whoops, put the events back, it's for real.
for auto_event in reversed(saved):
xlib.XPutBackEvent(self._x_display, byref(auto_event))
text, symbol = self._event_text_symbol(ev)
modifiers = self._translate_modifiers(ev.xkey.state)
modifiers_ctrl = modifiers & (key.MOD_CTRL | key.MOD_ALT)
motion = self._event_text_motion(symbol, modifiers)
if ev.type == xlib.KeyPress:
if symbol and (not _can_detect_autorepeat or symbol not in self.pressed_keys):
self.dispatch_event('on_key_press', symbol, modifiers)
if _can_detect_autorepeat:
self.pressed_keys.add(symbol)
if motion:
if modifiers & key.MOD_SHIFT:
self.dispatch_event('on_text_motion_select', motion)
else:
self.dispatch_event('on_text_motion', motion)
elif text and not modifiers_ctrl:
self.dispatch_event('on_text', text)
elif ev.type == xlib.KeyRelease:
if symbol:
self.dispatch_event('on_key_release', symbol, modifiers)
if _can_detect_autorepeat and symbol in self.pressed_keys:
self.pressed_keys.remove(symbol)
@XlibEventHandler(xlib.KeyPress)
@XlibEventHandler(xlib.KeyRelease)
def _event_key(self, ev):
return self._event_key_view(ev)
@ViewEventHandler
@XlibEventHandler(xlib.MotionNotify)
def _event_motionnotify_view(self, ev):
x = ev.xmotion.x
y = self.height - ev.xmotion.y
if self._mouse_in_window:
dx = x - self._mouse_x
dy = y - self._mouse_y
else:
dx = dy = 0
if self._applied_mouse_exclusive \
and (ev.xmotion.x, ev.xmotion.y) == self._mouse_exclusive_client:
# Ignore events caused by XWarpPointer
self._mouse_x = x
self._mouse_y = y
return
if self._applied_mouse_exclusive:
# Reset pointer position
ex, ey = self._mouse_exclusive_client
xlib.XWarpPointer(self._x_display,
0,
self._window,
0, 0,
0, 0,
ex, ey)
self._mouse_x = x
self._mouse_y = y
self._mouse_in_window = True
buttons = 0
if ev.xmotion.state & xlib.Button1MotionMask:
buttons |= mouse.LEFT
if ev.xmotion.state & xlib.Button2MotionMask:
buttons |= mouse.MIDDLE
if ev.xmotion.state & xlib.Button3MotionMask:
buttons |= mouse.RIGHT
if buttons:
# Drag event
modifiers = self._translate_modifiers(ev.xmotion.state)
self.dispatch_event('on_mouse_drag', x, y, dx, dy, buttons, modifiers)
else:
# Motion event
self.dispatch_event('on_mouse_motion', x, y, dx, dy)
@XlibEventHandler(xlib.MotionNotify)
def _event_motionnotify(self, ev):
# Window motion looks for drags that are outside the view but within
# the window.
buttons = 0
if ev.xmotion.state & xlib.Button1MotionMask:
buttons |= mouse.LEFT
if ev.xmotion.state & xlib.Button2MotionMask:
buttons |= mouse.MIDDLE
if ev.xmotion.state & xlib.Button3MotionMask:
buttons |= mouse.RIGHT
if buttons:
# Drag event
x = ev.xmotion.x - self._view_x
y = self._height - (ev.xmotion.y - self._view_y)
if self._mouse_in_window:
dx = x - self._mouse_x
dy = y - self._mouse_y
else:
dx = dy = 0
self._mouse_x = x
self._mouse_y = y
modifiers = self._translate_modifiers(ev.xmotion.state)
self.dispatch_event('on_mouse_drag', x, y, dx, dy, buttons, modifiers)
@XlibEventHandler(xlib.ClientMessage)
def _event_clientmessage(self, ev):
atom = ev.xclient.data.l[0]
if atom == xlib.XInternAtom(ev.xclient.display, asbytes('WM_DELETE_WINDOW'), False):
self.dispatch_event('on_close')
elif (self._enable_xsync and
atom == xlib.XInternAtom(ev.xclient.display,
asbytes('_NET_WM_SYNC_REQUEST'), False)):
lo = ev.xclient.data.l[2]
hi = ev.xclient.data.l[3]
self._current_sync_value = xsync.XSyncValue(hi, lo)
elif ev.xclient.message_type == self._xdnd_atoms['XdndPosition']:
self._event_drag_position(ev)
elif ev.xclient.message_type == self._xdnd_atoms['XdndDrop']:
self._event_drag_drop(ev)
elif ev.xclient.message_type == self._xdnd_atoms['XdndEnter']:
self._event_drag_enter(ev)
def _event_drag_drop(self, ev):
if self._xdnd_version > XDND_VERSION:
return
time = xlib.CurrentTime
if self._xdnd_format:
if self._xdnd_version >= 1:
time = ev.xclient.data.l[2]
# Convert to selection notification.
xlib.XConvertSelection(self._x_display,
self._xdnd_atoms['XdndSelection'],
self._xdnd_format,
self._xdnd_atoms['XdndSelection'],
self._window,
time)
xlib.XFlush(self._x_display)
elif self._xdnd_version >= 2:
# If no format send finished with no data.
e = xlib.XEvent()
e.xclient.type = xlib.ClientMessage
e.xclient.message_type = self._xdnd_atoms['XdndFinished']
e.xclient.display = cast(self._x_display, POINTER(xlib.Display))
e.xclient.window = self._window
e.xclient.format = 32
e.xclient.data.l[0] = self._window
e.xclient.data.l[1] = 0
e.xclient.data.l[2] = None
xlib.XSendEvent(self._x_display, self._xdnd_source,
False, xlib.NoEventMask, byref(e))
xlib.XFlush(self._x_display)
def _event_drag_position(self, ev):
if self._xdnd_version > XDND_VERSION:
return
xoff = (ev.xclient.data.l[2] >> 16) & 0xffff
yoff = (ev.xclient.data.l[2]) & 0xffff
# Need to convert the position to actual window coordinates with the screen offset
child = xlib.Window()
x = c_int()
y = c_int()
xlib.XTranslateCoordinates(self._x_display,
self._get_root(),
self._window,
xoff, yoff,
byref(x),
byref(y),
byref(child))
self._xdnd_position = (x.value, y.value)
e = xlib.XEvent()
e.xclient.type = xlib.ClientMessage
e.xclient.message_type = self._xdnd_atoms['XdndStatus']
e.xclient.display = cast(self._x_display, POINTER(xlib.Display))
e.xclient.window = ev.xclient.data.l[0]
e.xclient.format = 32
e.xclient.data.l[0] = self._window
e.xclient.data.l[2] = 0
e.xclient.data.l[3] = 0
if self._xdnd_format:
e.xclient.data.l[1] = 1
if self._xdnd_version >= 2:
e.xclient.data.l[4] = self._xdnd_atoms['XdndActionCopy']
xlib.XSendEvent(self._x_display, self._xdnd_source,
False, xlib.NoEventMask, byref(e))
xlib.XFlush(self._x_display)
def _event_drag_enter(self, ev):
self._xdnd_source = ev.xclient.data.l[0]
self._xdnd_version = ev.xclient.data.l[1] >> 24
self._xdnd_format = None
if self._xdnd_version > XDND_VERSION:
return
three_or_more = ev.xclient.data.l[1] & 1
# Search all of them (usually 8)
if three_or_more:
data, count = self.get_single_property(self._xdnd_source, self._xdnd_atoms['XdndTypeList'], XA_ATOM)
data = cast(data, POINTER(xlib.Atom))
else:
# Some old versions may only have 3? Needs testing.
count = 3
data = ev.xclient.data.l + 2
# Check all of the properties we received from the dropped item and verify it support URI.
for i in range(count):
if data[i] == self._xdnd_atoms['text/uri-list']:
self._xdnd_format = self._xdnd_atoms['text/uri-list']
break
if data:
xlib.XFree(data)
def get_single_property(self, window, atom_property, atom_type):
""" Returns the length and data of a window property. """
actualAtom = xlib.Atom()
actualFormat = c_int()
itemCount = c_ulong()
bytesAfter = c_ulong()
data = POINTER(c_ubyte)()
xlib.XGetWindowProperty(self._x_display, window,
atom_property, 0, 2147483647, False, atom_type,
byref(actualAtom),
byref(actualFormat),
byref(itemCount),
byref(bytesAfter),
data)
return data, itemCount.value
@XlibEventHandler(xlib.SelectionNotify)
def _event_selection_notification(self, ev):
if ev.xselection.property != 0 and ev.xselection.selection == self._xdnd_atoms['XdndSelection']:
if self._xdnd_format:
# This will get the data
data, count = self.get_single_property(ev.xselection.requestor,
ev.xselection.property,
ev.xselection.target)
buffer = create_string_buffer(count)
memmove(buffer, data, count)
formatted_paths = self.parse_filenames(buffer.value.decode())
e = xlib.XEvent()
e.xclient.type = xlib.ClientMessage
e.xclient.message_type = self._xdnd_atoms['XdndFinished']
e.xclient.display = cast(self._x_display, POINTER(xlib.Display))
e.xclient.window = self._window
e.xclient.format = 32
e.xclient.data.l[0] = self._xdnd_source
e.xclient.data.l[1] = 1
e.xclient.data.l[2] = self._xdnd_atoms['XdndActionCopy']
xlib.XSendEvent(self._x_display, self._get_root(),
False, xlib.NoEventMask, byref(e))
xlib.XFlush(self._x_display)
xlib.XFree(data)
self.dispatch_event('on_file_drop', self._xdnd_position[0], self._height - self._xdnd_position[1], formatted_paths)
@staticmethod
def parse_filenames(decoded_string):
"""All of the filenames from file drops come as one big string with
some special characters (%20), this will parse them out.
"""
import sys
different_files = decoded_string.splitlines()
parsed = []
for filename in different_files:
if filename:
filename = urllib.parse.urlsplit(filename).path
encoding = sys.getfilesystemencoding()
parsed.append(urllib.parse.unquote(filename, encoding))
return parsed
def _sync_resize(self):
if self._enable_xsync and self._current_sync_valid:
if xsync.XSyncValueIsZero(self._current_sync_value):
self._current_sync_valid = False
return
xsync.XSyncSetCounter(self._x_display,
self._sync_counter,
self._current_sync_value)
self._current_sync_value = None
self._current_sync_valid = False
@ViewEventHandler
@XlibEventHandler(xlib.ButtonPress)
@XlibEventHandler(xlib.ButtonRelease)
def _event_button(self, ev):
x = ev.xbutton.x
y = self.height - ev.xbutton.y
button = 1 << (ev.xbutton.button - 1) # 1, 2, 3 -> 1, 2, 4
modifiers = self._translate_modifiers(ev.xbutton.state)
if ev.type == xlib.ButtonPress:
# override_redirect issue: manually activate this window if
# fullscreen.
if self._override_redirect and not self._active:
self.activate()
if ev.xbutton.button == 4:
self.dispatch_event('on_mouse_scroll', x, y, 0, 1)
elif ev.xbutton.button == 5:
self.dispatch_event('on_mouse_scroll', x, y, 0, -1)
elif ev.xbutton.button == 6:
self.dispatch_event('on_mouse_scroll', x, y, -1, 0)
elif ev.xbutton.button == 7:
self.dispatch_event('on_mouse_scroll', x, y, 1, 0)
elif ev.xbutton.button < len(self._mouse_buttons):
self._mouse_buttons[ev.xbutton.button] = True
self.dispatch_event('on_mouse_press', x, y, button, modifiers)
else:
if ev.xbutton.button < 4:
self._mouse_buttons[ev.xbutton.button] = False
self.dispatch_event('on_mouse_release', x, y, button, modifiers)
@ViewEventHandler
@XlibEventHandler(xlib.Expose)
def _event_expose(self, ev):
# Ignore all expose events except the last one. We could be told
# about exposure rects - but I don't see the point since we're
# working with OpenGL and we'll just redraw the whole scene.
if ev.xexpose.count > 0:
return
self.dispatch_event('on_expose')
@ViewEventHandler
@XlibEventHandler(xlib.EnterNotify)
def _event_enternotify(self, ev):
# figure active mouse buttons
# XXX ignore modifier state?
state = ev.xcrossing.state
self._mouse_buttons[1] = state & xlib.Button1Mask
self._mouse_buttons[2] = state & xlib.Button2Mask
self._mouse_buttons[3] = state & xlib.Button3Mask
self._mouse_buttons[4] = state & xlib.Button4Mask
self._mouse_buttons[5] = state & xlib.Button5Mask
# mouse position
x = self._mouse_x = ev.xcrossing.x
y = self._mouse_y = self.height - ev.xcrossing.y
self._mouse_in_window = True
# XXX there may be more we could do here
self.dispatch_event('on_mouse_enter', x, y)
@ViewEventHandler
@XlibEventHandler(xlib.LeaveNotify)
def _event_leavenotify(self, ev):
x = self._mouse_x = ev.xcrossing.x
y = self._mouse_y = self.height - ev.xcrossing.y
self._mouse_in_window = False
self.dispatch_event('on_mouse_leave', x, y)
@XlibEventHandler(xlib.ConfigureNotify)
def _event_configurenotify(self, ev):
if self._enable_xsync and self._current_sync_value:
self._current_sync_valid = True
if self._fullscreen:
return
self.switch_to()
w, h = ev.xconfigure.width, ev.xconfigure.height
x, y = ev.xconfigure.x, ev.xconfigure.y
if self._width != w or self._height != h:
self._width = w
self._height = h
self._update_view_size()
self.dispatch_event('on_resize', self._width, self._height)
if self._x != x or self._y != y:
self.dispatch_event('on_move', x, y)
self._x = x
self._y = y
@XlibEventHandler(xlib.FocusIn)
def _event_focusin(self, ev):
self._active = True
self._update_exclusivity()
self.dispatch_event('on_activate')
xlib.XSetICFocus(self._x_ic)
@XlibEventHandler(xlib.FocusOut)
def _event_focusout(self, ev):
self._active = False
self._update_exclusivity()
self.dispatch_event('on_deactivate')
xlib.XUnsetICFocus(self._x_ic)
@XlibEventHandler(xlib.MapNotify)
def _event_mapnotify(self, ev):
self._mapped = True
self.dispatch_event('on_show')
self._update_exclusivity()
@XlibEventHandler(xlib.UnmapNotify)
def _event_unmapnotify(self, ev):
self._mapped = False
self.dispatch_event('on_hide')
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.