hexsha
stringlengths 40
40
| size
int64 1
1.03M
| ext
stringclasses 10
values | lang
stringclasses 1
value | max_stars_repo_path
stringlengths 3
239
| max_stars_repo_name
stringlengths 5
130
| max_stars_repo_head_hexsha
stringlengths 40
78
| max_stars_repo_licenses
listlengths 1
10
| max_stars_count
int64 1
191k
⌀ | max_stars_repo_stars_event_min_datetime
stringlengths 24
24
⌀ | max_stars_repo_stars_event_max_datetime
stringlengths 24
24
⌀ | max_issues_repo_path
stringlengths 3
239
| max_issues_repo_name
stringlengths 5
130
| max_issues_repo_head_hexsha
stringlengths 40
78
| max_issues_repo_licenses
listlengths 1
10
| max_issues_count
int64 1
67k
⌀ | max_issues_repo_issues_event_min_datetime
stringlengths 24
24
⌀ | max_issues_repo_issues_event_max_datetime
stringlengths 24
24
⌀ | max_forks_repo_path
stringlengths 3
239
| max_forks_repo_name
stringlengths 5
130
| max_forks_repo_head_hexsha
stringlengths 40
78
| max_forks_repo_licenses
listlengths 1
10
| max_forks_count
int64 1
105k
⌀ | max_forks_repo_forks_event_min_datetime
stringlengths 24
24
⌀ | max_forks_repo_forks_event_max_datetime
stringlengths 24
24
⌀ | content
stringlengths 1
1.03M
| avg_line_length
float64 1
958k
| max_line_length
int64 1
1.03M
| alphanum_fraction
float64 0
1
|
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
4a16145307d730b19850dc1fc80caf2b1aafe3c9
| 18,708
|
py
|
Python
|
databuilder/publisher/neo4j_csv_publisher.py
|
fBedecarrats/amundsendatabuilder
|
69bf5b77af11c770641724f2c311dd13b6ea8b8b
|
[
"Apache-2.0"
] | null | null | null |
databuilder/publisher/neo4j_csv_publisher.py
|
fBedecarrats/amundsendatabuilder
|
69bf5b77af11c770641724f2c311dd13b6ea8b8b
|
[
"Apache-2.0"
] | null | null | null |
databuilder/publisher/neo4j_csv_publisher.py
|
fBedecarrats/amundsendatabuilder
|
69bf5b77af11c770641724f2c311dd13b6ea8b8b
|
[
"Apache-2.0"
] | null | null | null |
import copy
import csv
import ctypes
import logging
import time
from os import listdir
from os.path import isfile, join
from string import Template
import six
from neo4j.v1 import GraphDatabase, Transaction # noqa: F401
from pyhocon import ConfigFactory # noqa: F401
from pyhocon import ConfigTree # noqa: F401
from typing import Set, List # noqa: F401
from databuilder.publisher.base_publisher import Publisher
from databuilder.publisher.neo4j_preprocessor import NoopRelationPreprocessor
# Setting field_size_limit to solve the error below
# _csv.Error: field larger than field limit (131072)
# https://stackoverflow.com/a/54517228/5972935
csv.field_size_limit(int(ctypes.c_ulong(-1).value // 2))
# Config keys
# A directory that contains CSV files for nodes
NODE_FILES_DIR = 'node_files_directory'
# A directory that contains CSV files for relationships
RELATION_FILES_DIR = 'relation_files_directory'
# A end point for Neo4j e.g: bolt://localhost:9999
NEO4J_END_POINT_KEY = 'neo4j_endpoint'
# A transaction size that determines how often it commits.
NEO4J_TRANSCATION_SIZE = 'neo4j_transaction_size'
# A progress report frequency that determines how often it report the progress.
NEO4J_PROGRESS_REPORT_FREQUENCY = 'neo4j_progress_report_frequency'
# A boolean flag to make it fail if relationship is not created
NEO4J_RELATIONSHIP_CREATION_CONFIRM = 'neo4j_relationship_creation_confirm'
NEO4J_MAX_CONN_LIFE_TIME_SEC = 'neo4j_max_conn_life_time_sec'
# list of nodes that are create only, and not updated if match exists
NEO4J_CREATE_ONLY_NODES = 'neo4j_create_only_nodes'
NEO4J_USER = 'neo4j_user'
NEO4J_PASSWORD = 'neo4j_password'
# This will be used to provide unique tag to the node and relationship
JOB_PUBLISH_TAG = 'job_publish_tag'
# Neo4j property name for published tag
PUBLISHED_TAG_PROPERTY_NAME = 'published_tag'
# Neo4j property name for last updated timestamp
LAST_UPDATED_EPOCH_MS = 'publisher_last_updated_epoch_ms'
RELATION_PREPROCESSOR = 'relation_preprocessor'
# CSV HEADER
# A header with this suffix will be pass to Neo4j statement without quote
UNQUOTED_SUFFIX = ':UNQUOTED'
# A header for Node label
NODE_LABEL_KEY = 'LABEL'
# A header for Node key
NODE_KEY_KEY = 'KEY'
# Required columns for Node
NODE_REQUIRED_KEYS = {NODE_LABEL_KEY, NODE_KEY_KEY}
# Relationship relates two nodes together
# Start node label
RELATION_START_LABEL = 'START_LABEL'
# Start node key
RELATION_START_KEY = 'START_KEY'
# End node label
RELATION_END_LABEL = 'END_LABEL'
# Node node key
RELATION_END_KEY = 'END_KEY'
# Type for relationship (Start Node)->(End Node)
RELATION_TYPE = 'TYPE'
# Type for reverse relationship (End Node)->(Start Node)
RELATION_REVERSE_TYPE = 'REVERSE_TYPE'
# Required columns for Relationship
RELATION_REQUIRED_KEYS = {RELATION_START_LABEL, RELATION_START_KEY,
RELATION_END_LABEL, RELATION_END_KEY,
RELATION_TYPE, RELATION_REVERSE_TYPE}
DEFAULT_CONFIG = ConfigFactory.from_dict({NEO4J_TRANSCATION_SIZE: 500,
NEO4J_PROGRESS_REPORT_FREQUENCY: 500,
NEO4J_RELATIONSHIP_CREATION_CONFIRM: False,
NEO4J_MAX_CONN_LIFE_TIME_SEC: 50,
RELATION_PREPROCESSOR: NoopRelationPreprocessor()})
NODE_MERGE_TEMPLATE = Template("""MERGE (node:$LABEL {key: '${KEY}'})
ON CREATE SET ${create_prop_body}
${update_statement}""")
NODE_UPDATE_TEMPLATE = Template("""ON MATCH SET ${update_prop_body}""")
RELATION_MERGE_TEMPLATE = Template("""MATCH (n1:$START_LABEL {key: '${START_KEY}'}),
(n2:$END_LABEL {key: '${END_KEY}'})
MERGE (n1)-[r1:$TYPE]->(n2)-[r2:$REVERSE_TYPE]->(n1)
$PROP_STMT RETURN n1.key, n2.key""")
CREATE_UNIQUE_INDEX_TEMPLATE = Template('CREATE CONSTRAINT ON (node:${LABEL}) ASSERT node.key IS UNIQUE')
LOGGER = logging.getLogger(__name__)
class Neo4jCsvPublisher(Publisher):
"""
A Publisher takes two folders for input and publishes to Neo4j.
One folder will contain CSV file(s) for Node where the other folder will contain CSV file(s) for Relationship.
Neo4j follows Label Node properties Graph and more information about this is in:
https://neo4j.com/docs/developer-manual/current/introduction/graphdb-concepts/
#TODO User UNWIND batch operation for better performance
"""
def __init__(self):
# type: () -> None
super(Neo4jCsvPublisher, self).__init__()
def init(self, conf):
# type: (ConfigTree) -> None
conf = conf.with_fallback(DEFAULT_CONFIG)
self._count = 0 # type: int
self._progress_report_frequency = conf.get_int(NEO4J_PROGRESS_REPORT_FREQUENCY)
self._node_files = self._list_files(conf, NODE_FILES_DIR)
self._node_files_iter = iter(self._node_files)
self._relation_files = self._list_files(conf, RELATION_FILES_DIR)
self._relation_files_iter = iter(self._relation_files)
self._driver = \
GraphDatabase.driver(conf.get_string(NEO4J_END_POINT_KEY),
max_connection_life_time=conf.get_int(NEO4J_MAX_CONN_LIFE_TIME_SEC),
auth=(conf.get_string(NEO4J_USER), conf.get_string(NEO4J_PASSWORD)))
self._transaction_size = conf.get_int(NEO4J_TRANSCATION_SIZE)
self._session = self._driver.session()
self._confirm_rel_created = conf.get_bool(NEO4J_RELATIONSHIP_CREATION_CONFIRM)
# config is list of node label.
# When set, this list specifies a list of nodes that shouldn't be updated, if exists
self.create_only_nodes = set(conf.get_list(NEO4J_CREATE_ONLY_NODES, default=[]))
self.labels = set() # type: Set[str]
self.publish_tag = conf.get_string(JOB_PUBLISH_TAG) # type: str
if not self.publish_tag:
raise Exception('{} should not be empty'.format(JOB_PUBLISH_TAG))
self._relation_preprocessor = conf.get(RELATION_PREPROCESSOR)
LOGGER.info('Publishing Node csv files {}, and Relation CSV files {}'
.format(self._node_files, self._relation_files))
def _list_files(self, conf, path_key):
# type: (ConfigTree, str) -> List[str]
"""
List files from directory
:param conf:
:param path_key:
:return: List of file paths
"""
if path_key not in conf:
return []
path = conf.get_string(path_key)
return [join(path, f) for f in listdir(path) if isfile(join(path, f))]
def publish_impl(self): # noqa: C901
# type: () -> None
"""
Publishes Nodes first and then Relations
:return:
"""
start = time.time()
LOGGER.info('Creating indices using Node files: {}'.format(self._node_files))
for node_file in self._node_files:
self._create_indices(node_file=node_file)
LOGGER.info('Publishing Node files: {}'.format(self._node_files))
try:
tx = self._session.begin_transaction()
while True:
try:
node_file = next(self._node_files_iter)
tx = self._publish_node(node_file, tx=tx)
except StopIteration:
break
LOGGER.info('Publishing Relationship files: {}'.format(self._relation_files))
while True:
try:
relation_file = next(self._relation_files_iter)
tx = self._publish_relation(relation_file, tx=tx)
except StopIteration:
break
tx.commit()
LOGGER.info('Committed total {} statements'.format(self._count))
# TODO: Add statsd support
LOGGER.info('Successfully published. Elapsed: {} seconds'.format(time.time() - start))
except Exception as e:
LOGGER.exception('Failed to publish. Rolling back.')
if not tx.closed():
tx.rollback()
raise e
def get_scope(self):
# type: () -> str
return 'publisher.neo4j'
def _create_indices(self, node_file):
"""
Go over the node file and try creating unique index
:param node_file:
:return:
"""
# type: (str) -> None
LOGGER.info('Creating indices. (Existing indices will be ignored)')
with open(node_file, 'r') as node_csv:
for node_record in csv.DictReader(node_csv):
label = node_record[NODE_LABEL_KEY]
if label not in self.labels:
self._try_create_index(label)
self.labels.add(label)
LOGGER.info('Indices have been created.')
def _publish_node(self, node_file, tx):
# type: (str, Transaction) -> Transaction
"""
Iterate over the csv records of a file, each csv record transform to Merge statement and will be executed.
All nodes should have a unique key, and this method will try to create unique index on the LABEL when it sees
first time within a job scope.
Example of Cypher query executed by this method:
MERGE (col_test_id1:Column {key: 'presto://gold.test_schema1/test_table1/test_id1'})
ON CREATE SET col_test_id1.name = 'test_id1',
col_test_id1.order_pos = 2,
col_test_id1.type = 'bigint'
ON MATCH SET col_test_id1.name = 'test_id1',
col_test_id1.order_pos = 2,
col_test_id1.type = 'bigint'
:param node_file:
:return:
"""
with open(node_file, 'r') as node_csv:
for count, node_record in enumerate(csv.DictReader(node_csv)):
stmt = self.create_node_merge_statement(node_record=node_record)
tx = self._execute_statement(stmt, tx)
return tx
def is_create_only_node(self, node_record):
# type: (dict) -> bool
"""
Check if node can be updated
:param node_record:
:return:
"""
if self.create_only_nodes:
return node_record[NODE_LABEL_KEY] in self.create_only_nodes
else:
return False
def create_node_merge_statement(self, node_record):
# type: (dict) -> str
"""
Creates node merge statement
:param node_record:
:return:
"""
params = copy.deepcopy(node_record)
params['create_prop_body'] = self._create_props_body(node_record, NODE_REQUIRED_KEYS, 'node')
update_statement = ''
if not self.is_create_only_node(node_record):
update_prop_body = self._create_props_body(node_record, NODE_REQUIRED_KEYS, 'node')
update_statement = NODE_UPDATE_TEMPLATE.substitute(update_prop_body=update_prop_body)
params['update_statement'] = update_statement
return NODE_MERGE_TEMPLATE.substitute(params)
def _publish_relation(self, relation_file, tx):
# type: (str, Transaction) -> Transaction
"""
Creates relation between two nodes.
(In Amundsen, all relation is bi-directional)
Example of Cypher query executed by this method:
MATCH (n1:Table {key: 'presto://gold.test_schema1/test_table1'}),
(n2:Column {key: 'presto://gold.test_schema1/test_table1/test_col1'})
MERGE (n1)-[r1:COLUMN]->(n2)-[r2:BELONG_TO_TABLE]->(n1)
RETURN n1.key, n2.key
:param relation_file:
:return:
"""
if self._relation_preprocessor.is_perform_preprocess():
LOGGER.info('Pre-processing relation with {}'.format(self._relation_preprocessor))
count = 0
with open(relation_file, 'r') as relation_csv:
for rel_record in csv.DictReader(relation_csv):
stmt, params = self._relation_preprocessor.preprocess_cypher(
start_label=rel_record[RELATION_START_LABEL],
end_label=rel_record[RELATION_END_LABEL],
start_key=rel_record[RELATION_START_KEY],
end_key=rel_record[RELATION_END_KEY],
relation=rel_record[RELATION_TYPE],
reverse_relation=rel_record[RELATION_REVERSE_TYPE])
if stmt:
tx = self._execute_statement(stmt, tx=tx, params=params)
count += 1
LOGGER.info('Executed pre-processing Cypher statement {} times'.format(count))
with open(relation_file, 'r') as relation_csv:
for count, rel_record in enumerate(csv.DictReader(relation_csv)):
stmt = self.create_relationship_merge_statement(rel_record=rel_record)
tx = self._execute_statement(stmt, tx,
expect_result=self._confirm_rel_created)
return tx
def create_relationship_merge_statement(self, rel_record):
# type: (dict) -> str
"""
Creates relationship merge statement
:param rel_record:
:return:
"""
param = copy.deepcopy(rel_record)
create_prop_body = self._create_props_body(rel_record, RELATION_REQUIRED_KEYS, 'r1')
param['PROP_STMT'] = ' ' # No properties for relationship by default
if create_prop_body:
# We need one more body for reverse relation
create_prop_body = ' , '.join([create_prop_body,
self._create_props_body(rel_record, RELATION_REQUIRED_KEYS, 'r2')])
update_prop_body = ' , '.join([self._create_props_body(rel_record, RELATION_REQUIRED_KEYS, 'r1'),
self._create_props_body(rel_record, RELATION_REQUIRED_KEYS, 'r2')])
param['PROP_STMT'] = """ON CREATE SET {create_prop_body}
ON MATCH SET {update_prop_body}""".format(create_prop_body=create_prop_body,
update_prop_body=update_prop_body)
return RELATION_MERGE_TEMPLATE.substitute(param)
def _create_props_body(self,
record_dict,
excludes,
identifier):
# type: (dict, Set, str) -> str
"""
Creates properties body with params required for resolving template.
e.g: Note that node.key3 is not quoted if header has UNQUOTED_SUFFIX.
identifier.key1 = 'val1' , identifier.key2 = 'val2', identifier.key3 = val3
:param record_dict: A dict represents CSV row
:param excludes: set of excluded columns that does not need to be in properties (e.g: KEY, LABEL ...)
:param identifier: identifier that will be used in CYPHER query as shown on above example
:param is_update: Creates property body for update statement in MERGE
:return: Properties body for Cypher statement
"""
template_params = {}
props = []
for k, v in six.iteritems(record_dict):
if k in excludes:
template_params[k] = v
continue
# escape quote for Cypher query
# if isinstance(str, v):
v = v.replace('\'', "\\'")
# escape backslash for Cypher query
v = v.replace("\\", "\\")
if k.endswith(UNQUOTED_SUFFIX):
k = k[:-len(UNQUOTED_SUFFIX)]
props.append('{id}.{key} = {val}'.format(id=identifier, key=k, val=v))
else:
props.append("""{id}.{key} = '{val}'""".format(id=identifier, key=k, val=v))
template_params[k] = v
props.append("""{id}.{key} = '{val}'""".format(id=identifier,
key=PUBLISHED_TAG_PROPERTY_NAME,
val=self.publish_tag))
props.append("""{id}.{key} = {val}""".format(id=identifier,
key=LAST_UPDATED_EPOCH_MS,
val='timestamp()'))
return ', '.join(props)
def _execute_statement(self,
stmt,
tx,
params=None,
expect_result=False):
# type: (str, Transaction, bool) -> Transaction
"""
Executes statement against Neo4j. If execution fails, it rollsback and raise exception.
If 'expect_result' flag is True, it confirms if result object is not null.
:param stmt:
:param tx:
:param count:
:param expect_result: By having this True, it will validate if result object is not None.
:return:
"""
try:
if LOGGER.isEnabledFor(logging.DEBUG):
LOGGER.debug('Executing statement: {} with params {}'.format(stmt, params))
if six.PY2:
result = tx.run(unicode(stmt, errors='ignore'), parameters=params) # noqa
else:
result = tx.run(str(stmt).encode('utf-8', 'ignore'), parameters=params)
if expect_result and not result.single():
raise RuntimeError('Failed to executed statement: {}'.format(stmt))
self._count += 1
if self._count > 1 and self._count % self._transaction_size == 0:
tx.commit()
LOGGER.info('Committed {} statements so far'.format(self._count))
return self._session.begin_transaction()
if self._count > 1 and self._count % self._progress_report_frequency == 0:
LOGGER.info('Processed {} statements so far'.format(self._count))
return tx
except Exception as e:
LOGGER.exception('Failed to execute Cypher query')
if not tx.closed():
tx.rollback()
raise e
def _try_create_index(self,
label):
# type: (str) -> None
"""
For any label seen first time for this publisher it will try to create unique index.
There's no side effect on Neo4j side issuing index creation for existing index.
:param label:
:return:
"""
stmt = CREATE_UNIQUE_INDEX_TEMPLATE.substitute(LABEL=label)
LOGGER.info('Trying to create index for label {label} if not exist: {stmt}'.format(label=label,
stmt=stmt))
with self._driver.session() as session:
session.run(stmt)
| 40.493506
| 117
| 0.618345
|
4a16159489a44aea91ac7952de2da0111dad0378
| 7,543
|
py
|
Python
|
landmark_regression/lib/dataset/PEdataset.py
|
Aurelio93/satellite-pose-estimation
|
46957a9bc9f204d468f8fe3150593b3db0f0726a
|
[
"MIT"
] | 24
|
2019-09-05T04:05:58.000Z
|
2022-02-06T01:34:11.000Z
|
landmark_regression/lib/dataset/PEdataset.py
|
Aurelio93/satellite-pose-estimation
|
46957a9bc9f204d468f8fe3150593b3db0f0726a
|
[
"MIT"
] | 2
|
2020-07-30T07:04:22.000Z
|
2022-03-14T06:40:49.000Z
|
landmark_regression/lib/dataset/PEdataset.py
|
Aurelio93/satellite-pose-estimation
|
46957a9bc9f204d468f8fe3150593b3db0f0726a
|
[
"MIT"
] | 6
|
2020-02-04T06:34:32.000Z
|
2021-12-10T03:23:51.000Z
|
# ------------------------------------------------------------------------------
# Copyright (c) Microsoft
# Licensed under the MIT License.
# Written by Bin Xiao (Bin.Xiao@microsoft.com)
# ------------------------------------------------------------------------------
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import logging
import os
import json_tricks as json
from collections import OrderedDict
import numpy as np
from scipy.io import loadmat, savemat
from dataset.JointsDataset import JointsDataset
logger = logging.getLogger(__name__)
class PEdataset(JointsDataset):
def __init__(self, cfg, root, image_set, is_train, transform=None):
super().__init__(cfg, root, image_set, is_train, transform)
self.num_joints = 11
self.flip_pairs = []
self.parent_ids = []
self.upper_body_ids = None
self.lower_body_ids = None
self.image_width = 1920
self.image_height = 1200
self.aspect_ratio = self.image_width * 1.0 / self.image_height
self.pixel_std = 200
self.db = self._get_db()
logger.info('=> load {} samples'.format(len(self.db)))
def _get_db(self):
# create train/val split
file_name = os.path.join(
self.root, self.image_set+'.json'
)
with open(file_name) as anno_file:
anno = json.load(anno_file)
gt_db = []
for a in anno:
image_name = a['image']
# c = np.array(a['center'], dtype=np.float)
# s = np.array([a['scale'], a['scale']], dtype=np.float)
box = np.array(a['box']).flatten()
# import ipdb;ipdb.set_trace();
c,s=self._box2cs(box)
joints_3d = np.zeros((self.num_joints, 3), dtype=np.float)
joints_3d_vis = np.zeros((self.num_joints, 3), dtype=np.float)
for idx, rec in enumerate(a['joints']):
joints_3d[idx, 0:2] = rec[:]
for idx, rec in enumerate(a['visible']):
joints_3d_vis[idx, 0:2] = rec
#
#
# joints_3d = np.zeros((self.num_joints, 3), dtype=np.float)
# joints_3d_vis = np.zeros((self.num_joints, 3), dtype=np.float)
# if self.image_set != 'test':
# joints = np.array(a['joints'])
# joints[:, 0:2] = joints[:, 0:2] - 1
# joints_vis = np.array(a['joints_vis'])
# assert len(joints) == self.num_joints, \
# 'joint num diff: {} vs {}'.format(len(joints),
# self.num_joints)
#
# joints_3d[:, 0:2] = joints[:, 0:2]
# joints_3d_vis[:, 0] = joints_vis[:]
# joints_3d_vis[:, 1] = joints_vis[:]
image_dir = '../images/train/'
gt_db.append(
{
'image': os.path.join(image_dir, image_name),
'center': c,
'scale': s,
'joints_3d': joints_3d,
'joints_3d_vis': joints_3d_vis,
'filename': '',
'imgnum': 0,
}
)
return gt_db
def _box2cs(self, box):
x, y, w, h = box[:4]
return self._xywh2cs(x, y, w, h)
def _xywh2cs(self, x, y, w, h):
center = np.zeros((2), dtype=np.float32)
center[0] = x + w * 0.5
center[1] = y + h * 0.5
if w > self.aspect_ratio * h:
h = w * 1.0 / self.aspect_ratio
elif w < self.aspect_ratio * h:
w = h * self.aspect_ratio
scale = np.array(
[w * 1.0 / self.pixel_std, h * 1.0 / self.pixel_std],
dtype=np.float32)
if center[0] != -1:
scale = scale * 1.25
return center, scale
def evaluate(self, cfg, preds, output_dir, *args, **kwargs):
# # convert 0-based index to 1-based index
preds[:, :, 0:2] += 1.0
if output_dir:
pred_file = os.path.join(output_dir, 'pred.mat')
savemat(pred_file, mdict={'preds': preds})
return {'Null': 0}, 0
if 'test' in cfg.DATASET.TEST_SET:
return {'Null': 0.0}, 0.0
SC_BIAS = 0.6
threshold = 0.5
gt_file = os.path.join(cfg.DATASET.ROOT,
'annot',
'gt_{}.mat'.format(cfg.DATASET.TEST_SET))
gt_dict = loadmat(gt_file)
dataset_joints = gt_dict['dataset_joints']
jnt_missing = gt_dict['jnt_missing']
pos_gt_src = gt_dict['pos_gt_src']
headboxes_src = gt_dict['headboxes_src']
pos_pred_src = np.transpose(preds, [1, 2, 0])
head = np.where(dataset_joints == 'head')[1][0]
lsho = np.where(dataset_joints == 'lsho')[1][0]
lelb = np.where(dataset_joints == 'lelb')[1][0]
lwri = np.where(dataset_joints == 'lwri')[1][0]
lhip = np.where(dataset_joints == 'lhip')[1][0]
lkne = np.where(dataset_joints == 'lkne')[1][0]
lank = np.where(dataset_joints == 'lank')[1][0]
rsho = np.where(dataset_joints == 'rsho')[1][0]
relb = np.where(dataset_joints == 'relb')[1][0]
rwri = np.where(dataset_joints == 'rwri')[1][0]
rkne = np.where(dataset_joints == 'rkne')[1][0]
rank = np.where(dataset_joints == 'rank')[1][0]
rhip = np.where(dataset_joints == 'rhip')[1][0]
jnt_visible = 1 - jnt_missing
uv_error = pos_pred_src - pos_gt_src
uv_err = np.linalg.norm(uv_error, axis=1)
headsizes = headboxes_src[1, :, :] - headboxes_src[0, :, :]
headsizes = np.linalg.norm(headsizes, axis=0)
headsizes *= SC_BIAS
scale = np.multiply(headsizes, np.ones((len(uv_err), 1)))
scaled_uv_err = np.divide(uv_err, scale)
scaled_uv_err = np.multiply(scaled_uv_err, jnt_visible)
jnt_count = np.sum(jnt_visible, axis=1)
less_than_threshold = np.multiply((scaled_uv_err <= threshold),
jnt_visible)
PCKh = np.divide(100.*np.sum(less_than_threshold, axis=1), jnt_count)
# save
rng = np.arange(0, 0.5+0.01, 0.01)
pckAll = np.zeros((len(rng), 16))
for r in range(len(rng)):
threshold = rng[r]
less_than_threshold = np.multiply(scaled_uv_err <= threshold,
jnt_visible)
pckAll[r, :] = np.divide(100.*np.sum(less_than_threshold, axis=1),
jnt_count)
PCKh = np.ma.array(PCKh, mask=False)
PCKh.mask[6:8] = True
jnt_count = np.ma.array(jnt_count, mask=False)
jnt_count.mask[6:8] = True
jnt_ratio = jnt_count / np.sum(jnt_count).astype(np.float64)
name_value = [
('Head', PCKh[head]),
('Shoulder', 0.5 * (PCKh[lsho] + PCKh[rsho])),
('Elbow', 0.5 * (PCKh[lelb] + PCKh[relb])),
('Wrist', 0.5 * (PCKh[lwri] + PCKh[rwri])),
('Hip', 0.5 * (PCKh[lhip] + PCKh[rhip])),
('Knee', 0.5 * (PCKh[lkne] + PCKh[rkne])),
('Ankle', 0.5 * (PCKh[lank] + PCKh[rank])),
('Mean', np.sum(PCKh * jnt_ratio)),
('Mean@0.1', np.sum(pckAll[11, :] * jnt_ratio))
]
name_value = OrderedDict(name_value)
return name_value, name_value['Mean']
| 35.247664
| 80
| 0.513986
|
4a1615ce791431662118406eeadd34ecb19a9eae
| 4,730
|
py
|
Python
|
src/vallenae/features/acoustic_emission.py
|
vallen-systems/pyVallenAE
|
0e1c712c055ea5c4e7cb4373a2bf73f33c2b886d
|
[
"MIT"
] | 10
|
2020-01-30T15:34:51.000Z
|
2022-03-11T07:49:31.000Z
|
src/vallenae/features/acoustic_emission.py
|
vallen-systems/pyVallenAE
|
0e1c712c055ea5c4e7cb4373a2bf73f33c2b886d
|
[
"MIT"
] | 1
|
2020-10-30T16:25:11.000Z
|
2020-11-05T19:51:51.000Z
|
src/vallenae/features/acoustic_emission.py
|
vallen-systems/pyVallenAE
|
0e1c712c055ea5c4e7cb4373a2bf73f33c2b886d
|
[
"MIT"
] | 5
|
2020-02-08T04:43:03.000Z
|
2022-02-23T13:52:00.000Z
|
import math
from typing import Optional
import numpy as np
from numba import njit
@njit
def peak_amplitude(data: np.ndarray) -> float:
"""
Compute maximum absolute amplitude.
Args:
data: Input array
Returns:
Peak amplitude of the input array
"""
max_amplitude = 0
for sample in data:
abs_value = abs(sample)
if abs_value > max_amplitude:
max_amplitude = abs_value
return max_amplitude
@njit
def peak_amplitude_index(data: np.ndarray) -> int:
"""
Compute index of peak amplitude.
Args:
data: Input array
Returns:
Index of peak amplitude
"""
max_amplitude = 0
index_peak = 0
for index, sample in enumerate(data):
abs_value = abs(sample)
if abs_value > max_amplitude:
max_amplitude = abs_value
index_peak = index
return index_peak
@njit
def is_above_threshold(data: np.ndarray, threshold: float) -> bool:
"""
Checks if absolute amplitudes are above threshold.
Args:
data: Input array
threshold: Threshold amplitude
Returns:
True if input array is above threshold, otherwise False
"""
for sample in data:
if abs(sample) >= threshold:
return True
return False
@njit
def first_threshold_crossing(data: np.ndarray, threshold: float) -> Optional[int]:
"""
Compute index of first threshold crossing.
Args:
data: Input array
threshold: Threshold amplitude
Returns:
Index of first threshold crossing. None if threshold was not exceeded
"""
for index, sample in enumerate(data):
if abs(sample) >= threshold:
return index
return None
@njit
def rise_time(
data: np.ndarray,
threshold: float,
samplerate: int,
first_crossing: Optional[int] = None,
index_peak: Optional[int] = None,
) -> float:
"""
Compute the rise time.
The rise time is the time between the first threshold crossing and the peak amplitude.
Args:
data: Input array (hit)
threshold: Threshold amplitude (in volts)
samplerate: Sample rate of the input array
first_crossing: Precomputed index of first threshold crossing to save computation time
index_peak: Precomputed index of peak amplitude to save computation time
"""
# save some computations if pre-results are provided
n_first_crossing = (
first_crossing
if first_crossing is not None
else first_threshold_crossing(data, threshold)
)
n_max = (
index_peak
if index_peak is not None
else peak_amplitude_index(data)
)
return (n_max - n_first_crossing) / samplerate
@njit
def energy(data: np.ndarray, samplerate: int) -> float:
"""
Compute the energy of a hit.
Energy is the integral of the squared AE-signal over time (EN 1330-9).
The unit of energy is eu. 1 eu corresponds to 1e-14 V²s.
Args:
data: Input array (hit)
samplerate: Sample rate of input array in Hz
Returns:
Energy of input array (hit)
"""
agg: float = 0
for sample in data:
agg += sample ** 2
return agg * 1e14 / samplerate
@njit
def signal_strength(data: np.ndarray, samplerate: int) -> float:
"""
Compute the signal strength of a hit.
Signal strength is the integral of the rectified AE-signal over time.
The unit of Signal Strength is nVs (1e-9 Vs).
Args:
data: Input array (hit)
samplerate: Sample rate of input array in Hz
Returns:
Signal strength of input array (hit)
"""
agg: float = 0
for sample in data:
agg += abs(sample)
return agg * 1e9 / samplerate
@njit
def counts(data: np.ndarray, threshold: float) -> int:
"""
Compute the number of positive threshold crossings of a hit (counts).
Args:
data: Input array
threshold: Threshold amplitude
Returns:
Number of positive threshold crossings
"""
result: int = 0
was_above_threshold: bool = False
for sample in data:
if sample >= threshold:
if not was_above_threshold:
result += 1
was_above_threshold = True
else:
was_above_threshold = False
return result
@njit
def rms(data: np.ndarray) -> float:
"""
Compute the root mean square (RMS) of an array.
Args:
data: Input array
Returns:
RMS of the input array
References:
https://en.wikipedia.org/wiki/Root_mean_square
"""
agg: float = 0
for sample in data:
agg += sample ** 2
return math.sqrt(agg / len(data))
| 23.415842
| 94
| 0.627273
|
4a1617377b43ae9e472527396bf2394879bb9cc9
| 2,457
|
py
|
Python
|
streak-podium/streak-podium.py
|
supermitch/streak-podium
|
3b4aba92f6b138548afe87c204d507cd2211c2ef
|
[
"MIT"
] | 1
|
2015-08-17T16:57:57.000Z
|
2015-08-17T16:57:57.000Z
|
streak-podium/streak-podium.py
|
supermitch/streak-podium
|
3b4aba92f6b138548afe87c204d507cd2211c2ef
|
[
"MIT"
] | 1
|
2015-08-29T04:58:43.000Z
|
2015-09-10T04:22:13.000Z
|
streak-podium/streak-podium.py
|
supermitch/streak-podium
|
3b4aba92f6b138548afe87c204d507cd2211c2ef
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python
import argparse
import collections
import os
import json
import parse
import read
import render
def setup_args():
"""
Add and return command line arguments.
"""
parser = argparse.ArgumentParser('Streak Podium')
parser.add_argument('-f', '--file', help='member list input filename')
parser.add_argument('-o', '--org', help='organization to sort')
parser.add_argument('-l', '--limit', type=int, default=5, help='limit number of users')
return parser.parse_args()
def gather_usernames(args):
"""
Return username list from chosen input source.
"""
if args.file:
return read.input_file('temp/' + args.file)
elif args.org:
return read.org_members(args.org)
else:
return ['supermitch'] # Some default for now
def main():
args = setup_args()
print('Running Streak Podium')
print('---------------------')
if os.path.isfile('temp/streaks.json'):
print('Found json streaks')
with open('temp/streaks.json', 'r') as f:
streaks = json.load(f)
else:
print('Did not find json streaks')
kind = 'file' if args.file else 'Github org'
name = args.file if args.file else args.org
print('Input is {} [{}]'.format(kind, name))
print('\tlimiting query to {} users'.format(args.limit))
print('Gathering usernames...')
usernames = gather_usernames(args)
print('\tfound {} usernames'.format(len(usernames)))
print('Reading streaks...')
svgs = (read.svg_data(username) for username in usernames[:args.limit])
commits = (parse.extract_commits(svg) for svg in svgs)
streaks = {user: parse.find_streaks(x) for user, x in zip(usernames, commits)}
print('\tfound {} streaks'.format(len(streaks)))
if not streaks:
return
with open('temp/streaks.json', 'w') as f:
json.dump(streaks, f)
for sort in ('best', 'latest'): # Sort by best, then by latest
sorted_streaks = sorted(streaks.items(), key=lambda t: t[1].get(sort), reverse=True)
print('\nTop {} streaks:'.format(sort))
print('============')
for user, streak in sorted_streaks[:min(len(sorted_streaks), 5)]: # Top 5
print('{} - best: {} - latest: {}'.format(user, streak['best'], streak['latest']))
render.horizontal_bar(sorted_streaks, sort)
if __name__ == '__main__':
main()
| 29.60241
| 94
| 0.60928
|
4a16176ce7e2dbb7656fad5d84c80f3d89fe1cc9
| 2,349
|
py
|
Python
|
drawer.py
|
h3nnn4n/lambada-brot
|
50ec98544dfb4daa36c7781492c336d2fb822c59
|
[
"MIT"
] | null | null | null |
drawer.py
|
h3nnn4n/lambada-brot
|
50ec98544dfb4daa36c7781492c336d2fb822c59
|
[
"MIT"
] | null | null | null |
drawer.py
|
h3nnn4n/lambada-brot
|
50ec98544dfb4daa36c7781492c336d2fb822c59
|
[
"MIT"
] | null | null | null |
import json
from multiprocessing import Pool
import requests
ENDPOINT = "https://4j10ejf71g.execute-api.us-east-1.amazonaws.com/mandel"
def remote_mandel(request):
response = requests.post(ENDPOINT, json=request)
data = json.loads(response.content)
ix = request["ix"]
iy = request["iy"]
print(f"[{ix:>3}, {iy:>3}]: {data['duration']:>4.2f}")
return {(ix, iy): data.get("result")}
def write_ppm(filename, size, data):
with open(filename, "wt") as f:
f.write("P2\n")
f.write(f"{size} {size}\n")
f.write("256\n")
for k, v in enumerate(data):
f.write(f"{v} ")
if k % size == 0 and k > 0:
f.write("\n")
def main():
xmin = -0.742030
ymin = 0.125433
xmax = -0.744030
ymax = 0.127433
n_threads = 64
size = 1024
step_size = 64
max_iters = 1024
dx = (xmax - xmin) / (size // step_size)
dy = (ymax - ymin) / (size // step_size)
request_chunks = []
for ix in range(int(size / step_size)):
for iy in range(int(size / step_size)):
xmin_ = xmin + dx * ix
ymin_ = ymin + dy * iy
xmax_ = xmin + dx * (ix + 1)
ymax_ = ymin + dy * (iy + 1)
request = {
"ix": ix,
"iy": iy,
"xmin": xmin_,
"ymin": ymin_,
"xmax": xmax_,
"ymax": ymax_,
"size": step_size,
"max_iters": max_iters,
}
request_chunks.append(request)
image_data = {}
with Pool(n_threads) as p:
for r in p.map(remote_mandel, request_chunks):
k, v = next(iter(r.items()))
image_data[k] = v
image_pixels = []
for x in range(size):
for y in range(size):
block_ix = int(x / step_size)
block_iy = int(y / step_size)
ix = x % step_size
iy = y % step_size
pixel_index = ix + iy * step_size
pixel = image_data[(block_iy, block_ix)][pixel_index]
image_pixels.append(pixel)
max_pixel = max(image_pixels)
for i in range(size * size):
image_pixels[i] = int(image_pixels[i] / max_pixel * 256)
write_ppm("mandel.ppm", size, image_pixels)
if __name__ == "__main__":
main()
| 25.258065
| 74
| 0.515113
|
4a1618ad8a6a434b8df56f7ecb8f2882b1259e6c
| 315
|
py
|
Python
|
verde/tests/utils.py
|
jobar8/verde
|
66020d28d3e1e3990fcaa888766ff05c62464100
|
[
"BSD-3-Clause"
] | 1
|
2019-11-19T20:24:02.000Z
|
2019-11-19T20:24:02.000Z
|
verde/tests/utils.py
|
lheagy/verde
|
24416cfc8388c6c7f3c0867dcd2ad3fdca37bb1b
|
[
"BSD-3-Clause"
] | null | null | null |
verde/tests/utils.py
|
lheagy/verde
|
24416cfc8388c6c7f3c0867dcd2ad3fdca37bb1b
|
[
"BSD-3-Clause"
] | null | null | null |
"""
Testing utilities.
"""
import pytest
def requires_numba(function):
"""
Skip the decorated test if numba is not installed.
"""
try:
import numba
except ImportError:
numba = None
mark = pytest.mark.skipif(numba is None, reason="requires numba")
return mark(function)
| 18.529412
| 69
| 0.64127
|
4a161aa6adffbcff13ff1b9aeef862acd97718a5
| 2,592
|
py
|
Python
|
Android/NDK/android-ndk-r20b-win/build/ldflags_to_sanitizers.py
|
X018/CCTOOL
|
989af4d7edab82bf540400eb72eca4e7447d722c
|
[
"MIT"
] | 5
|
2020-09-08T03:08:51.000Z
|
2021-05-27T20:51:08.000Z
|
Android/NDK/android-ndk-r20b-win/build/ldflags_to_sanitizers.py
|
X018/CCTOOL
|
989af4d7edab82bf540400eb72eca4e7447d722c
|
[
"MIT"
] | null | null | null |
Android/NDK/android-ndk-r20b-win/build/ldflags_to_sanitizers.py
|
X018/CCTOOL
|
989af4d7edab82bf540400eb72eca4e7447d722c
|
[
"MIT"
] | 3
|
2020-11-10T06:20:25.000Z
|
2021-05-24T07:21:59.000Z
|
#!/usr/bin/env python
#
# Copyright (C) 2018 The Android Open Source Project
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""Determines which sanitizers should be linked based on ldflags."""
from __future__ import print_function
import sys
def sanitizers_from_args(args):
"""Returns the sanitizers enabled by a given set of ldflags."""
sanitizers = set()
for arg in args:
if arg.startswith('-fsanitize='):
sanitizer_list = arg.partition('=')[2]
sanitizers |= set(sanitizer_list.split(','))
elif arg.startswith('-fno-sanitize='):
sanitizer_list = arg.partition('=')[2]
sanitizers -= set(sanitizer_list.split(','))
return sorted(list(sanitizers))
def argv_to_module_arg_lists(args):
"""Converts module ldflags from argv format to per-module lists.
Flags are passed to us in the following format:
['global flag', '--module', 'flag1', 'flag2', '--module', 'flag 3']
These should be returned as a list for the global flags and a list of
per-module lists, i.e.:
['global flag'], [['flag1', 'flag2'], ['flag1', 'flag3']]
"""
modules = [[]]
for arg in args:
if arg == '--module':
modules.append([])
else:
modules[-1].append(arg)
return modules[0], modules[1:]
def main(argv, stream=sys.stdout):
"""Program entry point."""
# The only args we're guaranteed to see are the program name and at least
# one --module. GLOBAL_FLAGS might be empty, as might any of the
# MODULE_FLAGS sections.
if len(argv) < 2:
sys.exit(
'usage: ldflags_to_sanitizers.py [GLOBAL_FLAGS] '
'--module [MODULE_FLAGS] [--module [MODULE_FLAGS]...]')
global_flags, modules_flags = argv_to_module_arg_lists(argv[1:])
all_sanitizers = list(sanitizers_from_args(global_flags))
for module_flags in modules_flags:
all_sanitizers.extend(sanitizers_from_args(module_flags))
print(' '.join(sorted(set(all_sanitizers))), file=stream)
if __name__ == '__main__':
main(sys.argv)
| 35.027027
| 77
| 0.665509
|
4a161ac06b4487ed78c41b606469899418f5ab80
| 12,769
|
py
|
Python
|
django_auth_adfs/backend.py
|
InnopolisSport/django-auth-adfs
|
306efd63d41f2fe858db5ab620d7bca6976b10e3
|
[
"BSD-2-Clause"
] | null | null | null |
django_auth_adfs/backend.py
|
InnopolisSport/django-auth-adfs
|
306efd63d41f2fe858db5ab620d7bca6976b10e3
|
[
"BSD-2-Clause"
] | null | null | null |
django_auth_adfs/backend.py
|
InnopolisSport/django-auth-adfs
|
306efd63d41f2fe858db5ab620d7bca6976b10e3
|
[
"BSD-2-Clause"
] | null | null | null |
import logging
import jwt
from django.contrib.auth import get_user_model
from django.contrib.auth.backends import ModelBackend
from django.contrib.auth.models import Group
from django.core.exceptions import ImproperlyConfigured, ObjectDoesNotExist, PermissionDenied
from django_auth_adfs import signals
from django_auth_adfs.config import provider_config, settings
from django_auth_adfs.exceptions import MFARequired
logger = logging.getLogger("django_auth_adfs")
class AdfsBaseBackend(ModelBackend):
def exchange_auth_code(self, authorization_code, request):
logger.debug("Received authorization code: %s", authorization_code)
data = {
'grant_type': 'authorization_code',
'client_id': settings.CLIENT_ID,
'redirect_uri': provider_config.redirect_uri(request),
'code': authorization_code,
}
if settings.CLIENT_SECRET:
data['client_secret'] = settings.CLIENT_SECRET
logger.debug("Getting access token at: %s", provider_config.token_endpoint)
response = provider_config.session.post(provider_config.token_endpoint, data, timeout=settings.TIMEOUT)
# 200 = valid token received
# 400 = 'something' is wrong in our request
if response.status_code == 400:
if response.json().get("error_description", "").startswith("AADSTS50076"):
raise MFARequired
logger.error("ADFS server returned an error: %s", response.json()["error_description"])
raise PermissionDenied
if response.status_code != 200:
logger.error("Unexpected ADFS response: %s", response.content.decode())
raise PermissionDenied
adfs_response = response.json()
return adfs_response
def validate_access_token(self, access_token):
for idx, key in enumerate(provider_config.signing_keys):
try:
# Explicitly define the verification option.
# The list below is the default the jwt module uses.
# Explicit is better then implicit and it protects against
# changes in the defaults the jwt module uses.
options = {
'verify_signature': True,
'verify_exp': True,
'verify_nbf': True,
'verify_iat': True,
'verify_aud': True,
'verify_iss': True,
'require_exp': False,
'require_iat': False,
'require_nbf': False
}
# Validate token and return claims
return jwt.decode(
access_token,
key=key,
algorithms=['RS256', 'RS384', 'RS512'],
audience=settings.AUDIENCE,
issuer=provider_config.issuer,
options=options,
leeway=settings.JWT_LEEWAY
)
except jwt.ExpiredSignatureError as error:
logger.info("Signature has expired: %s", error)
raise PermissionDenied
except jwt.DecodeError as error:
# If it's not the last certificate in the list, skip to the next one
if idx < len(provider_config.signing_keys) - 1:
continue
else:
logger.info('Error decoding signature: %s', error)
raise PermissionDenied
except jwt.InvalidTokenError as error:
logger.info(str(error))
raise PermissionDenied
def process_access_token(self, access_token, adfs_response=None):
if not access_token:
raise PermissionDenied
logger.debug("Received access token: %s", access_token)
claims = self.validate_access_token(access_token)
if (
settings.BLOCK_GUEST_USERS
and claims.get('http://schemas.microsoft.com/identity/claims/tenantid')
!= settings.TENANT_ID
):
logger.info('Guest user denied')
raise PermissionDenied
if not claims:
raise PermissionDenied
user = self.create_user(claims)
self.update_user_attributes(user, claims)
self.update_user_groups(user, claims)
self.update_user_flags(user, claims)
signals.post_authenticate.send(
sender=self,
user=user,
claims=claims,
adfs_response=adfs_response
)
user.full_clean()
user.save()
return user
def create_user(self, claims):
"""
Create the user if it doesn't exist yet
Args:
claims (dict): claims from the access token
Returns:
django.contrib.auth.models.User: A Django user
"""
# Create the user
username_claim = settings.USERNAME_CLAIM
usermodel = get_user_model()
if not claims.get(username_claim):
logger.error("User claim's doesn't have the claim '%s' in his claims: %s" % (username_claim, claims))
raise PermissionDenied
userdata = {usermodel.USERNAME_FIELD: claims[username_claim]}
try:
user = usermodel.objects.get(**userdata)
except usermodel.DoesNotExist:
if settings.CREATE_NEW_USERS:
user = usermodel.objects.create(**userdata)
logger.debug("User '%s' has been created.", claims[username_claim])
else:
logger.debug("User '%s' doesn't exist and creating users is disabled.", claims[username_claim])
raise PermissionDenied
if not user.password:
user.set_unusable_password()
return user
def update_user_attributes(self, user, claims):
"""
Updates user attributes based on the CLAIM_MAPPING setting.
Args:
user (django.contrib.auth.models.User): User model instance
claims (dict): claims from the access token
"""
required_fields = [field.name for field in user._meta.fields if field.blank is False]
for field, claim in settings.CLAIM_MAPPING.items():
if hasattr(user, field):
if claim in claims:
setattr(user, field, claims[claim])
logger.debug("Attribute '%s' for user '%s' was set to '%s'.", field, user, claims[claim])
else:
if field in required_fields:
msg = "Claim not found in access token: '{}'. Check ADFS claims mapping."
raise ImproperlyConfigured(msg.format(claim))
else:
logger.warning("Claim '%s' for user field '%s' was not found in "
"the access token for user '%s'. "
"Field is not required and will be left empty", claim, field, user)
else:
msg = "User model has no field named '{}'. Check ADFS claims mapping."
raise ImproperlyConfigured(msg.format(field))
def update_user_groups(self, user, claims):
"""
Updates user group memberships based on the GROUPS_CLAIM setting.
Args:
user (django.contrib.auth.models.User): User model instance
claims (dict): Claims from the access token
"""
if settings.GROUPS_CLAIM is not None:
# Update the user's group memberships
django_groups = [group.name for group in user.groups.all()]
if settings.GROUPS_CLAIM in claims:
claim_groups = claims[settings.GROUPS_CLAIM]
if not isinstance(claim_groups, list):
claim_groups = [claim_groups, ]
else:
logger.debug("The configured groups claim '%s' was not found in the access token",
settings.GROUPS_CLAIM)
claim_groups = []
if sorted(claim_groups) != sorted(django_groups):
existing_groups = list(Group.objects.filter(name__in=claim_groups).iterator())
existing_group_names = frozenset(group.name for group in existing_groups)
new_groups = []
if settings.MIRROR_GROUPS:
new_groups = [
Group.objects.get_or_create(name=name)[0]
for name in claim_groups
if name not in existing_group_names
]
else:
for name in claim_groups:
if name not in existing_group_names:
try:
group = Group.objects.get(name=name)
new_groups.append(group)
except ObjectDoesNotExist:
pass
user.groups.set(existing_groups + new_groups)
def update_user_flags(self, user, claims):
"""
Updates user boolean attributes based on the BOOLEAN_CLAIM_MAPPING setting.
Args:
user (django.contrib.auth.models.User): User model instance
claims (dict): Claims from the access token
"""
if settings.GROUPS_CLAIM is not None:
if settings.GROUPS_CLAIM in claims:
access_token_groups = claims[settings.GROUPS_CLAIM]
if not isinstance(access_token_groups, list):
access_token_groups = [access_token_groups, ]
else:
logger.debug("The configured group claim was not found in the access token")
access_token_groups = []
for flag, group in settings.GROUP_TO_FLAG_MAPPING.items():
if hasattr(user, flag):
if group in access_token_groups:
value = True
else:
value = False
setattr(user, flag, value)
logger.debug("Attribute '%s' for user '%s' was set to '%s'.", user, flag, value)
else:
msg = "User model has no field named '{}'. Check ADFS boolean claims mapping."
raise ImproperlyConfigured(msg.format(flag))
for field, claim in settings.BOOLEAN_CLAIM_MAPPING.items():
if hasattr(user, field):
bool_val = False
if claim in claims and str(claims[claim]).lower() in ['y', 'yes', 't', 'true', 'on', '1']:
bool_val = True
setattr(user, field, bool_val)
logger.debug('Attribute "%s" for user "%s" was set to "%s".', user, field, bool_val)
else:
msg = "User model has no field named '{}'. Check ADFS boolean claims mapping."
raise ImproperlyConfigured(msg.format(field))
class AdfsAuthCodeBackend(AdfsBaseBackend):
"""
Authentication backend to allow authenticating users against a
Microsoft ADFS server with an authorization code.
"""
def authenticate(self, request=None, authorization_code=None, **kwargs):
# If loaded data is too old, reload it again
provider_config.load_config()
# If there's no token or code, we pass control to the next authentication backend
if authorization_code is None or authorization_code == '':
logger.debug("django_auth_adfs authentication backend was called but no authorization code was received")
return
adfs_response = self.exchange_auth_code(authorization_code, request)
access_token = adfs_response["access_token"]
user = self.process_access_token(access_token, adfs_response)
return user
class AdfsAccessTokenBackend(AdfsBaseBackend):
"""
Authentication backend to allow authenticating users against a
Microsoft ADFS server with an access token retrieved by the client.
"""
def authenticate(self, request=None, access_token=None, **kwargs):
# If loaded data is too old, reload it again
provider_config.load_config()
# If there's no token or code, we pass control to the next authentication backend
if access_token is None or access_token == '':
logger.debug("django_auth_adfs authentication backend was called but no authorization code was received")
return
access_token = access_token.decode()
user = self.process_access_token(access_token)
return user
class AdfsBackend(AdfsAuthCodeBackend):
""" Backwards compatible class name """
pass
| 42.003289
| 117
| 0.585089
|
4a161af4c07517a355f97a384d6cc3a26fc93265
| 9,589
|
py
|
Python
|
jobviewer.py
|
jansende/Igor
|
c9ada05fdbe052e799f27de94b872858a67da3d6
|
[
"MIT"
] | null | null | null |
jobviewer.py
|
jansende/Igor
|
c9ada05fdbe052e799f27de94b872858a67da3d6
|
[
"MIT"
] | null | null | null |
jobviewer.py
|
jansende/Igor
|
c9ada05fdbe052e799f27de94b872858a67da3d6
|
[
"MIT"
] | null | null | null |
try:
import readline
except ImportError:
print('You need to install readline.')
print('Under Windows use: pip install pyreadline')
raise
import cmd
import glob
import json
import os
import os.path
import socket
import subprocess
import threading
import time
import copy
from classes.job import JobInformation, getJobList
from classes.workerinformation import WorkerInformation
from classes.machineinformation import MachineInformation, getMachineList
from classes.helpers import openJSON
def cleanJobs(Directory, Worker = None):
for File in os.listdir(Directory):
if File.endswith('.json'):
try:
with openJSON(os.path.join(Directory,File), JobInformation) as Job:
if Job.Status == 'Finished':
os.remove(os.path.join(Directory,File))
except:
continue
def cleanMachines(Directory):
for File in os.listdir(Directory):
if File.endswith('.json'):
try:
with openJSON(os.path.join(Directory,File), MachineInformation) as Machine:
os.remove(os.path.join(Directory,File))
except:
continue
def printJobs(Directory, Worker = None):
JobList = getJobList(Directory, filterByWorker=Worker)
if len(JobList) == 0:
raise RuntimeError('*** runtime error: no Jobs were found')
WorkerList = set([ a.Information.Worker for a in JobList ])
StatusList = set([ a.Information.Status.lower() for a in JobList ])
for Worker in WorkerList:
print('========================================')
print('{worker:^40}'.format(worker=Worker))
print('========================================')
for Status in StatusList:
Jobs = getJobList(Directory, filterByWorker=Worker, filterByStatus=Status)
if len(Jobs) > 0:
print('--{status:-<38}'.format(status=Status.capitalize()))
for Thread in Jobs:
print(os.path.basename(Thread.File))
print('========================================')
def printMachines(Directory):
MachineList = getMachineList(Directory)
if len(MachineList) == 0:
raise RuntimeError('*** runtime error: no Workers were found')
print('========================================')
print('{title:^40}'.format(title='WORKERS'))
print('========================================')
for Machine in MachineList:
print(Machine)
print('========================================')
def printMachineInformation(File):
with openJSON(File, MachineInformation) as Machine:
print('========================================')
print('{domain:^40}'.format(domain=Machine.Domain))
print('========================================')
print('User:', Machine.CurrentUser)
print('Name:', Machine.Name)
print('IP: ', Machine.IpAddress)
print('----------------------------------------')
print('CPU: {numberofcores} Cores ({machinetype})'.format(machinetype=Machine.MachineType,numberofcores=Machine.NumberOfCores))
print('OS: {system} ({platform})'.format(system=Machine.System,platform=Machine.Platform))
print('========================================')
def printJobInformation(File):
with openJSON(File, JobInformation) as Job:
print('========================================')
print('{name:^40}'.format(name=Job.Name))
print('========================================')
print('Worker: ', Job.Worker)
print('Priority: ', Job.Priority)
print('Status: ', Job.Status)
print('----------------------------------------')
print('Script: ', Job.Script)
print('TimeOut: ', Job.TimeOut)
print('Directory:', Job.WorkingDirectory)
print('========================================')
def printInformation(File):
try:
printMachineInformation(File)
return
except:
pass
try:
printJobInformation(File)
return
except:
pass
raise FileNotFoundError('*** File error: file could not be read')
class JobViewer(cmd.Cmd):
intro = 'Welcome to the Igor JobViewer. Type help or ? to list commands.'
prompt = 'Igor>'
config_file = 'config.json'
def onecmd(self, line):
#This ensures that we can use the do_* syntax and raise exceptions without breaking the program.
#Furthermore, this means we can raise exceptions on deeper levels without implementing error handling in the do_* routines.
try:
return cmd.Cmd.onecmd(self, line)
except Exception as Error:
print(Error)
def _WorkingDirectory(self):
#Gets the JobDirectory from the configuration file.
with openJSON(self.config_file, WorkerInformation) as Worker:
return Worker.JobDirectory
def do_show(self, arg):
'''
Display an overview of all jobs
Usage: show jobs [WORKER]
WORKER ... filters jobs to display only those handled by WORKER
Display an overview of all workers
Usage: show workers
Display information for a file (regardless if worker or job)
Usage: show JSON_FILE
JSON_FILE ... job or workers file
'''
arguments = arg.split()
if len(arguments) == 0:
raise IndexError('*** Unknown syntax: ' + self.lastcmd)
if arguments[0].endswith('.json'):
printInformation(os.path.join(self._WorkingDirectory(),arguments[0]))
elif arguments[0] == 'workers':
printMachines(self._WorkingDirectory())
elif arguments[0] == 'jobs':
try:
printJobs(self._WorkingDirectory(), arguments[1])
except IndexError:
printJobs(self._WorkingDirectory())
else:
raise NotImplementedError('*** Unknown syntax: ' + self.lastcmd)
def complete_show(self, *args):
files = set(self._complete_file_names(*args, filter='*.json'))
commands = set(self._complete_show_commands(*args))
return list(commands | files)
def do_clean(self, arg):
'''
Removes all completed jobs from the job folder
Usage: clean jobs
Removes all worker information from the worker information folder
Usage: clean workers
'''
arguments = arg.split()
if len(arguments) != 1:
raise IndexError('*** Unknown syntax: ' + self.lastcmd)
if arguments[0] == 'workers':
cleanMachines(self._WorkingDirectory())
elif arguments[0] == 'jobs':
cleanJobs(self._WorkingDirectory())
else:
raise NotImplementedError('*** Unknown syntax: ' + self.lastcmd)
def complete_clean(self, *args):
#The commands are the same as for show
commands = set(self._complete_show_commands(*args))
return list(commands)
def do_redo(self, arg):
'''
Restart a given job
Usage: redo JSON_FILE
JSON_FILE ... job file
'''
arguments = arg.split()
if len(arguments) == 0:
raise IndexError('*** Unknown syntax: ' + self.lastcmd)
if arguments[0].endswith('.json'):
with openJSON(os.path.join(self._WorkingDirectory(),arguments[0]), JobInformation, 'u') as New:
New.Status = 'ToDo'
else:
raise NotImplementedError('*** Unknown syntax: ' + self.lastcmd)
def complete_redo(self, *args):
files = set(self._complete_file_names(*args, filter='*.json'))
return list(files)
def do_reassign(self, arg):
'''
Reassigns a given job to a new worker
Usage: reassign JSON_FILE WORKER
JSON_FILE ... job file
WORKER ... name of the worker the job is assigned to
'''
arguments = arg.split()
if len(arguments) == 2:
with openJSON(os.path.join(self._WorkingDirectory(),arguments[0]), JobInformation, 'u') as New:
New.Worker = arguments[1]
else:
raise NotImplementedError('*** Unknown syntax: ' + self.lastcmd)
def complete_reassign(self, *args):
files = set(self._complete_file_names(*args, filter='*.json'))
return list(files)
def do_delete(self, arg):
'''
Deletes a given job file
Usage: delete JSON_FILE
JSON_FILE ... job file
'''
arguments = arg.split()
if len(arguments) == 1:
os.remove(os.path.join(self._WorkingDirectory(),arguments[0]))
else:
raise NotImplementedError('*** Unknown syntax: ' + self.lastcmd)
def complete_delete(self, *args):
files = set(self._complete_file_names(*args, filter='*.json'))
return list(files)
def _complete_file_names(self, text, *ignored, filter):
try:
files = glob.glob(os.path.join(self._WorkingDirectory(),filter))
return [os.path.basename(a) for a in files if os.path.basename(a).startswith(text)]
except:
return []
def _complete_show_commands(self, text, *ignored):
commands = ['jobs','workers']
return [a for a in commands if a.startswith(text)]
def do_exit(self, arg):
'''Exit the JobViewer'''
return True
if __name__ == '__main__':
JobViewer().cmdloop()
| 40.804255
| 136
| 0.564814
|
4a161cde8e51265eeeb6c569d74e0fa79114a06b
| 20,170
|
py
|
Python
|
python/cohorte/repositories/java/bundles.py
|
isandlaTech/cohorte-runtime
|
686556cdde20beba77ae202de9969be46feed5e2
|
[
"Apache-2.0"
] | 6
|
2015-04-28T16:51:08.000Z
|
2017-07-12T11:29:00.000Z
|
python/cohorte/repositories/java/bundles.py
|
isandlaTech/cohorte-runtime
|
686556cdde20beba77ae202de9969be46feed5e2
|
[
"Apache-2.0"
] | 29
|
2015-02-24T11:11:26.000Z
|
2017-08-25T08:30:18.000Z
|
qualifier/deploy/cohorte-home/repo/cohorte/repositories/java/bundles.py
|
isandlaTech/cohorte-devtools
|
9ba9021369188d2f0ad5c845ef242fd5a7097b57
|
[
"Apache-2.0"
] | 1
|
2015-08-24T13:23:43.000Z
|
2015-08-24T13:23:43.000Z
|
#!/usr/bin/python
# -- Content-Encoding: UTF-8 --
"""
Java JARs repository utility module
**TODO:**
* Enhance API & code
:author: Thomas Calmant
:license: Apache Software License 2.0
..
Copyright 2014 isandlaTech
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
# Standard library
import contextlib
import logging
import operator
import os
import zipfile
# Pelix
from pelix.utilities import to_str
from pelix.ipopo.decorators import ComponentFactory, Provides, Property, \
Invalidate, Validate
# Cohorte
import cohorte
import cohorte.repositories
from cohorte.repositories.beans import Artifact, Version
from cohorte.repositories.java.manifest import Manifest
# ------------------------------------------------------------------------------
# Bundle version
import cohorte.version
__version__=cohorte.version.__version__
# ------------------------------------------------------------------------------
# Empty bundle representing the system
SYSTEM_BUNDLE = object()
# Path of the manifest in a JAR
MANIFEST_FILE = 'META-INF/MANIFEST.MF'
# Path of the descriptions of the bundle services
BUNDLE_SERVICES_FOLDER = 'META-INF/services'
# FrameworkFactory service descriptor in the framework JAR file
FRAMEWORK_SERVICE = 'org.osgi.framework.launch.FrameworkFactory'
_logger = logging.getLogger(__name__)
# ------------------------------------------------------------------------------
class Bundle(Artifact):
"""
Represents an OSGi bundle
"""
def __init__(self, jar_file, manifest):
"""
Sets up the bundle details
:param jar_file: Path to the JAR file (can be empty, but shouldn't)
:param manifest: The parsed Manifest of the JAR file (mandatory)
:raise ValueError: Invalid manifest argument
"""
# Validate parameters
if not manifest:
raise ValueError("Manifest can't be None")
# Extract information from the manifest
self._manifest = manifest
name = self._manifest.get('Bundle-SymbolicName', '').split(';', 2)[0]
version = Version(self._manifest.get('Bundle-Version'))
# Configure the Artifact
Artifact.__init__(self, "java", name, version, jar_file)
# Store Package information
self.all_exports = \
self._manifest.extract_packages_list('Export-Package')
self.all_imports = \
self._manifest.extract_packages_list('Import-Package')
self.all_require = \
self._manifest.extract_packages_list('Require-Bundle')
def exports(self, package_name, required_version=None):
"""
Tests if the given package is exported by this bundle
:param package_name: The name of a package
:param required_version: The required version/range of this package
:return: True if the package is exported by this bundle
"""
if package_name not in self.all_exports:
return False
version = Version(self.all_exports[package_name].get('version'))
return version.matches(required_version)
def imports(self, other_bundle):
"""
Tests if this bundle should imports the given one
:return: True if this bundle imports the other one through
Require-Bundle or Import-Package
"""
if other_bundle.name in self.all_require:
# Bundle referenced with a Require-Bundle
requirement = self.all_require[other_bundle.name]
if other_bundle.version.matches(requirement.get('version')):
# The given bundle matches our requirements
return True
# Try with the packages
for package in self.all_imports:
if package in other_bundle.all_exports:
# Get the required version
requirement = self.all_imports[package].get('version')
# Parse the provided version
provided = other_bundle.all_exports[package].get('version')
if Version(provided).matches(requirement):
return True
return False
def is_fragment(self):
"""
Tests if this bundle is a fragment
:return: True if this bundle is a fragment
"""
return 'Fragment-Host' in self._manifest.entries
def get_exported_packages(self):
"""
Retrieves a Name -> Version dictionary of the exported packages
:return: A Name -> Version dictionary
"""
return ((name, Version(attributes.get('version')))
for name, attributes in self.all_exports.items())
def get_manifest(self):
"""
Retrieves the parsed Manifest
"""
return self._manifest
def get_service(self, service_name):
"""
Retrieves the content of a service description file (like the
FrameworkFactory service)
:param service_name: The name of a service
:return: The content of the service description file, or None
"""
if not service_name:
return None
jar_file = zipfile.ZipFile(self.file)
try:
service_factory = jar_file.read(
'{0}/{1}'.format(BUNDLE_SERVICES_FOLDER, service_name))
service_factory = to_str(service_factory)
return service_factory.strip()
except KeyError:
# Not a framework JAR
return None
finally:
jar_file.close()
# ------------------------------------------------------------------------------
@ComponentFactory("cohorte-repository-artifacts-java-factory")
@Provides(cohorte.repositories.SERVICE_REPOSITORY_ARTIFACTS)
@Property('_language', cohorte.repositories.PROP_REPOSITORY_LANGUAGE, "java")
class OSGiBundleRepository(object):
"""
Represents a repository
"""
def __init__(self):
"""
Sets up the repository
"""
# Language (property)
self._language = None
# Name -> [Bundle]
self._bundles = {}
# File -> Bundle
self._files = {}
# Name -> [(Package Version, Bundle)]
self._packages = {}
def __contains__(self, item):
"""
Tests if the given item is in the repository
:param item: Item to be tested
:return: True if the item is in the repository
"""
if isinstance(item, Bundle):
# Test the Bundle object
return item in self._bundles.values()
elif item in self._bundles:
# Item matches a bundle name
return True
else:
# Test the file name
for name in (item, os.path.realpath(item)):
if name in self._files:
return True
# No match
return False
def __len__(self):
"""
Length of a repository <=> number of individual artifacts
"""
return sum((len(bundles) for bundles in self._bundles.values()))
def __add_bundle(self, bundle, bundle_registry=None,
package_registry=None):
"""
Adds a bundle to the given registry
:param bundle: A Bundle object
:param bundle_registry: Registry where to store the bundle
:param package_registry: Registry where to store the packages of this
bundle
"""
if bundle_registry is None:
bundle_registry = self._bundles
# Add the bundle to the dictionary
bundle_list = bundle_registry.setdefault(bundle.name, [])
bundle_list.append(bundle)
bundle_list.sort(reverse=True)
# Associate the file name with the bundle
self._files[bundle.file] = bundle
# Store exported packages
for name, version in bundle.get_exported_packages():
self.__add_package(package_registry, name, version, bundle)
def __add_package(self, registry, name, version, bundle):
"""
Adds a Java package to the given registry
:param registry: Registry where to store the package details
:param name: Name of the package
:param version: Version of the package
:param bundle: Bundle providing this package
"""
if registry is None:
registry = self._packages
package_list = registry.setdefault(name, [])
package_list.append((version, bundle))
package_list.sort(key=operator.itemgetter(0), reverse=True)
def add_file(self, filename):
"""
Adds a JAR file to the repository
:param filename: A JAR file name
"""
# Compute the real name of the JAR file
filename = os.path.realpath(filename)
# Parse the manifest
manifest = Manifest()
with contextlib.closing(zipfile.ZipFile(filename)) as jar:
manifest.parse(jar.read(MANIFEST_FILE))
# Store the bundle
self.__add_bundle(Bundle(filename, manifest))
def add_directory(self, dirname):
"""
Recursively adds all .jar bundles found in the given directory into the
repository
:param dirname: A path to a directory
"""
for root, _, filenames in os.walk(dirname, followlinks=True):
for filename in filenames:
if os.path.splitext(filename)[1] == '.jar':
fullname = os.path.join(root, filename)
try:
self.add_file(fullname)
except ValueError as ex:
_logger.warning(ex)
def clear(self):
"""
Clears the repository content
"""
self._files.clear()
self._bundles.clear()
self._packages.clear()
def filter_services(self, service):
"""
Generator to find all bundles that declares an implementation of the
given service (in META-INF/services)
:param service: A service name
:return: A generator of bundles (can be empty)
"""
for bundles in self._bundles.values():
# Multiple bundles with the same name
for bundle in bundles:
if bundle.get_service(service) is not None:
yield bundle
def get_artifact(self, name=None, version=None, filename=None,
registry=None):
"""
Retrieves a bundle from the repository
:param name: The bundle symbolic name (mutually exclusive with
filename)
:param version: The bundle version (None or '0.0.0' for any), ignored
if filename is used
:param filename: The bundle file name (mutually exclusive with name)
:param registry: Registry where to look for the bundle
:return: The first matching bundle
:raise ValueError: If the bundle can't be found
"""
if registry is None:
registry = self._bundles
if filename:
# Use the file name (direct search)
bundle = self._files.get(filename)
if bundle:
# Found it
return bundle
for bundle_file in self._files:
# Search by file base name
if os.path.basename(bundle_file) == filename:
return self._files[bundle_file]
if not name:
# Not found by file name, and no name to look for
raise ValueError("Bundle file not found: {0}".format(filename))
if isinstance(name, Bundle):
# Got a bundle
bundle = name
if bundle in registry:
return bundle
else:
# Use the bundle symbolic name and version
name = bundle.name
version = bundle.version
matching = registry.get(name, None)
if not matching:
raise ValueError('Bundle {0} not found.'.format(name))
for bundle in matching:
if bundle.version.matches(version):
return bundle
raise ValueError('Bundle {0} not found for version {1}'
.format(name, version))
def get_language(self):
"""
Retrieves the language of the artifacts stored in this repository
"""
return self._language
def get_package(self, name, version=None,
bundle_registry=None, package_registry=None):
"""
Retrieves a bundle from the repository providing the given package
:param name: The package name
:param version: The package version (None or '0.0.0' for any)
:return: The first bundle providing the package, None if not found
"""
if bundle_registry is None:
bundle_registry = self._bundles
if package_registry is None:
package_registry = self._packages
matching = package_registry.get(name, None)
if not matching:
return None
for pkg_version, pkg_bundle in matching:
if pkg_version.matches(version):
return pkg_bundle
return None
def resolve_installation(self, bundles, system_artifacts=None,
system_packages=None):
"""
Returns all the bundles that must be installed in order to have the
given bundles resolved.
To simplify the work, the OSGi framework should be the first one in
the list.
:param bundles: A list of bundles to be resolved
:param system_packages: Packages considered available by the framework
:return: A tuple: (bundles, dependencies, missing artifacts,
missing packages)
:raise ValueError: One of the given bundles is unknown
"""
# Name -> Bundle for this resolution
local_bundles = {}
# Name -> (Version, Bundle)
local_packages = {}
# Bundle -> [Bundles]
dependencies = {}
# Missing elements
missing_bundles = set()
missing_packages = set()
# Consider system packages already installed
if system_packages:
for name in system_packages:
self.__add_package(local_packages, name, Version(None),
SYSTEM_BUNDLE)
# Consider system bundles already installed
if system_artifacts:
for artifact in system_artifacts:
if isinstance(artifact, Bundle):
# Got a bundle
self.__add_bundle(artifact, local_bundles, local_packages)
elif isinstance(artifact, Artifact):
# Got an artifact
bundle = self.get_artifact(artifact.name, artifact.version,
artifact.file)
if bundle:
self.__add_bundle(bundle, local_bundles,
local_packages)
else:
_logger.warning("Unknown system bundle: %s", artifact)
else:
_logger.warning("Unhandled system artifact: %s", artifact)
# Resolution loop
to_install = [self.get_artifact(name) for name in bundles]
i = 0
while i < len(to_install):
# Loop control
bundle = to_install[i]
i += 1
if bundle is None:
# Ignore None bundle (system bundle)
continue
# Add the current bundle
self.__add_bundle(bundle, local_bundles, local_packages)
dependencies[bundle] = []
# Resolve Require-Bundle
for required in bundle.all_require:
# Get the required version
required_version = bundle.all_require[required].get('version')
# Find the bundle
registry = None
provider = None
for registry in (local_bundles, self._bundles):
try:
provider = self.get_artifact(required,
required_version,
None, registry)
# Found one
break
except ValueError:
# Try next
pass
else:
# No provider found
missing_bundles.add(required)
continue
if provider is SYSTEM_BUNDLE:
# Nothing to do with system bundles
continue
# Store the bundle we found
dependencies[bundle].append(provider)
if registry is self._bundles:
# The provider was found in the global registry, store it
self.__add_bundle(provider,
local_bundles, local_packages)
# The new bundle will be resolved later
if provider not in to_install:
# We'll have to resolve it
to_install.append(provider)
# Resolve Import-Package
for imported in bundle.all_imports:
# Get the required version
pkg_version = bundle.all_imports[imported].get('version')
# Self-import ?
if bundle.exports(imported, pkg_version):
# Nothing to do for this package
continue
# Work only if necessary
provider = self.get_package(imported, pkg_version,
local_bundles, local_packages)
if provider:
# Found the package in the resolved bundles
if provider is not SYSTEM_BUNDLE:
dependencies[bundle].append(provider)
else:
# Find it
provider = self.get_package(imported, pkg_version,
self._bundles, self._packages)
if not provider:
# Missing
missing_packages.add(imported)
elif provider is not SYSTEM_BUNDLE:
# Store the bundle
self.__add_bundle(provider,
local_bundles, local_packages)
# Store the dependency
dependencies[bundle].append(provider)
if provider not in to_install:
# We'll have to resolve it
to_install.append(provider)
return to_install, dependencies, missing_bundles, missing_packages
def walk(self):
"""
# Walk through the known artifacts
"""
for bundles in self._bundles.values():
for bundle in bundles:
yield bundle
@Validate
def validate(self, context):
"""
Component validated
"""
# Home/Base repository
for key in (cohorte.PROP_BASE, cohorte.PROP_HOME):
repository = os.path.join(context.get_property(key), "repo")
self.add_directory(repository)
@Invalidate
def invalidate(self, context):
"""
Component invalidated
"""
self.clear()
| 33.842282
| 80
| 0.564155
|
4a161d90865a00dee99364f1939e33fd4038fcf4
| 1,441
|
py
|
Python
|
CTFd/admin/pages.py
|
xmsec/LanCTFD
|
77659cca2aae4ab68cf25ddb5a7cbe63f597a9af
|
[
"Apache-2.0"
] | 3
|
2020-05-13T14:02:11.000Z
|
2022-03-12T08:09:34.000Z
|
CTFd/admin/pages.py
|
xmsec/LanCTFD
|
77659cca2aae4ab68cf25ddb5a7cbe63f597a9af
|
[
"Apache-2.0"
] | 6
|
2019-01-26T15:06:07.000Z
|
2019-02-11T01:48:20.000Z
|
CTFd/admin/pages.py
|
xmsec/LanCTFD
|
77659cca2aae4ab68cf25ddb5a7cbe63f597a9af
|
[
"Apache-2.0"
] | 4
|
2019-08-01T02:16:44.000Z
|
2022-03-12T08:09:35.000Z
|
from flask import current_app as app, render_template, request, redirect, jsonify, url_for, Blueprint
from CTFd.utils.decorators import admins_only
from CTFd.models import db, Teams, Solves, Awards, Challenges, Fails, Flags, Tags, Files, Tracking, Pages, Configs
from CTFd.schemas.pages import PageSchema
from CTFd.utils import config, validators, markdown, uploads
from CTFd.cache import cache
from CTFd.admin import admin
@admin.route('/admin/pages')
@admins_only
def pages_listing():
pages = Pages.query.all()
return render_template('admin/pages.html', pages=pages)
@admin.route('/admin/pages/new')
@admins_only
def pages_new():
return render_template('admin/editor.html')
@admin.route('/admin/pages/preview', methods=['POST'])
@admins_only
def pages_preview():
data = request.form.to_dict()
schema = PageSchema()
page = schema.load(data)
return render_template('page.html', content=markdown(page.data.content))
@admin.route('/admin/pages/<int:page_id>')
@admins_only
def pages_detail(page_id):
page = Pages.query.filter_by(id=page_id).first_or_404()
page_op = request.args.get('operation')
if request.method == 'GET' and page_op == 'preview':
return render_template('page.html', content=markdown(page.content))
if request.method == 'GET' and page_op == 'create':
return render_template('admin/editor.html')
return render_template('admin/editor.html', page=page)
| 32.022222
| 114
| 0.734906
|
4a161e33023aa212516d3fa34d0aece3fbdf6d78
| 24,765
|
py
|
Python
|
salt/states/cron.py
|
xiaowei582648206/saltx
|
1d17b030b973ce5422e0fbe7e17c98c7ca91c49b
|
[
"Apache-2.0"
] | 1
|
2022-02-09T06:40:14.000Z
|
2022-02-09T06:40:14.000Z
|
salt/states/cron.py
|
xiaowei582648206/saltx
|
1d17b030b973ce5422e0fbe7e17c98c7ca91c49b
|
[
"Apache-2.0"
] | null | null | null |
salt/states/cron.py
|
xiaowei582648206/saltx
|
1d17b030b973ce5422e0fbe7e17c98c7ca91c49b
|
[
"Apache-2.0"
] | 4
|
2020-11-04T06:28:05.000Z
|
2022-02-09T10:54:49.000Z
|
# -*- coding: utf-8 -*-
'''
Management of cron, the Unix command scheduler
==============================================
Cron declarations require a number of parameters. The following are the
parameters used by Salt to define the various timing values for a cron job:
* ``minute``
* ``hour``
* ``daymonth``
* ``month``
* ``dayweek`` (0 to 6 are Sunday through Saturday, 7 can also be used for
Sunday)
.. warning::
Any timing arguments not specified take a value of ``*``. This means that
setting ``hour`` to ``5``, while not defining the ``minute`` param, will
result in Salt adding a job that will execute every minute between 5 and 6
A.M.!
Additionally, the default user for these states is ``root``. Therefore, if
the cron job is for another user, it is necessary to specify that user with
the ``user`` parameter.
A long time ago (before 2014.2), when making changes to an existing cron job,
the name declaration is the parameter used to uniquely identify the job,
so if an existing cron that looks like this:
.. code-block:: yaml
date > /tmp/crontest:
cron.present:
- user: root
- minute: 5
Is changed to this:
.. code-block:: yaml
date > /tmp/crontest:
cron.present:
- user: root
- minute: 7
- hour: 2
Then the existing cron will be updated, but if the cron command is changed,
then a new cron job will be added to the user's crontab.
The current behavior is still relying on that mechanism, but you can also
specify an identifier to identify your crontabs:
.. code-block:: yaml
date > /tmp/crontest:
cron.present:
- identifier: SUPERCRON
- user: root
- minute: 7
- hour: 2
.. versionadded:: 2014.1.2
And, some months later, you modify it:
.. code-block:: yaml
superscript > /tmp/crontest:
cron.present:
- identifier: SUPERCRON
- user: root
- minute: 3
- hour: 4
.. versionadded:: 2014.1.2
The old **date > /tmp/crontest** will be replaced by
**superscript > /tmp/crontest**.
Additionally, Salt also supports running a cron every ``x minutes`` very similarly to the Unix
convention of using ``*/5`` to have a job run every five minutes. In Salt, this
looks like:
.. code-block:: yaml
date > /tmp/crontest:
cron.present:
- user: root
- minute: '*/5'
The job will now run every 5 minutes.
Additionally, the temporal parameters (minute, hour, etc.) can be randomized by
using ``random`` instead of using a specific value. For example, by using the
``random`` keyword in the ``minute`` parameter of a cron state, the same cron
job can be pushed to hundreds or thousands of hosts, and they would each use a
randomly-generated minute. This can be helpful when the cron job accesses a
network resource, and it is not desirable for all hosts to run the job
concurrently.
.. code-block:: yaml
/path/to/cron/script:
cron.present:
- user: root
- minute: random
- hour: 2
.. versionadded:: 0.16.0
Since Salt assumes a value of ``*`` for unspecified temporal parameters, adding
a parameter to the state and setting it to ``random`` will change that value
from ``*`` to a randomized numeric value. However, if that field in the cron
entry on the minion already contains a numeric value, then using the ``random``
keyword will not modify it.
Added the opportunity to set a job with a special keyword like '@reboot' or
'@hourly'. Quotes must be used, otherwise PyYAML will strip the '@' sign.
.. code-block:: yaml
/path/to/cron/script:
cron.present:
- user: root
- special: '@hourly'
The script will be executed every reboot if cron daemon support this option.
.. code-block:: yaml
/path/to/cron/otherscript:
cron.absent:
- user: root
- special: '@daily'
This counter part definition will ensure than a job with a special keyword
is not set.
'''
from __future__ import absolute_import
# Import python libs
import os
# Import salt libs
import salt.utils
import salt.utils.files
from salt.modules.cron import (
_needs_change,
_cron_matched
)
def __virtual__():
if 'cron.list_tab' in __salt__:
return True
else:
return (False, 'cron module could not be loaded')
def _check_cron(user,
cmd,
minute=None,
hour=None,
daymonth=None,
month=None,
dayweek=None,
comment=None,
commented=None,
identifier=None,
special=None):
'''
Return the changes
'''
if minute is not None:
minute = str(minute).lower()
if hour is not None:
hour = str(hour).lower()
if daymonth is not None:
daymonth = str(daymonth).lower()
if month is not None:
month = str(month).lower()
if dayweek is not None:
dayweek = str(dayweek).lower()
if identifier is not None:
identifier = str(identifier)
if commented is not None:
commented = commented is True
if cmd is not None:
cmd = str(cmd)
lst = __salt__['cron.list_tab'](user)
if special is None:
for cron in lst['crons']:
if _cron_matched(cron, cmd, identifier):
if any([_needs_change(x, y) for x, y in
((cron['minute'], minute), (cron['hour'], hour),
(cron['daymonth'], daymonth), (cron['month'], month),
(cron['dayweek'], dayweek), (cron['identifier'], identifier),
(cron['cmd'], cmd), (cron['comment'], comment),
(cron['commented'], commented))]):
return 'update'
return 'present'
else:
for cron in lst['special']:
if _cron_matched(cron, cmd, identifier):
if any([_needs_change(x, y) for x, y in
((cron['spec'], special),
(cron['identifier'], identifier),
(cron['cmd'], cmd),
(cron['comment'], comment),
(cron['commented'], commented))]):
return 'update'
return 'present'
return 'absent'
def _check_cron_env(user,
name,
value=None):
'''
Return the environment changes
'''
if value is None:
value = "" # Matching value set in salt.modules.cron._render_tab
lst = __salt__['cron.list_tab'](user)
for env in lst['env']:
if name == env['name']:
if value != env['value']:
return 'update'
return 'present'
return 'absent'
def _get_cron_info():
'''
Returns the proper group owner and path to the cron directory
'''
owner = 'root'
if __grains__['os'] == 'FreeBSD':
group = 'wheel'
crontab_dir = '/var/cron/tabs'
elif __grains__['os'] == 'OpenBSD':
group = 'crontab'
crontab_dir = '/var/cron/tabs'
elif __grains__['os_family'] == 'Solaris':
group = 'root'
crontab_dir = '/var/spool/cron/crontabs'
elif __grains__['os'] == 'MacOS':
group = 'wheel'
crontab_dir = '/usr/lib/cron/tabs'
else:
group = 'root'
crontab_dir = '/var/spool/cron'
return owner, group, crontab_dir
def present(name,
user='root',
minute='*',
hour='*',
daymonth='*',
month='*',
dayweek='*',
comment=None,
commented=False,
identifier=False,
special=None):
'''
Verifies that the specified cron job is present for the specified user.
For more advanced information about what exactly can be set in the cron
timing parameters, check your cron system's documentation. Most Unix-like
systems' cron documentation can be found via the crontab man page:
``man 5 crontab``.
name
The command that should be executed by the cron job.
user
The name of the user whose crontab needs to be modified, defaults to
the root user
minute
The information to be set into the minute section, this can be any
string supported by your cron system's the minute field. Default is
``*``
hour
The information to be set in the hour section. Default is ``*``
daymonth
The information to be set in the day of month section. Default is ``*``
month
The information to be set in the month section. Default is ``*``
dayweek
The information to be set in the day of week section. Default is ``*``
comment
User comment to be added on line previous the cron job
commented
The cron job is set commented (prefixed with ``#DISABLED#``).
Defaults to False.
.. versionadded:: 2016.3.0
identifier
Custom-defined identifier for tracking the cron line for future crontab
edits. This defaults to the state name
special
A special keyword to specify periodicity (eg. @reboot, @hourly...).
Quotes must be used, otherwise PyYAML will strip the '@' sign.
.. versionadded:: 2016.3.0
'''
name = name.strip()
if identifier is False:
identifier = name
ret = {'changes': {},
'comment': '',
'name': name,
'result': True}
if __opts__['test']:
status = _check_cron(user,
cmd=name,
minute=minute,
hour=hour,
daymonth=daymonth,
month=month,
dayweek=dayweek,
comment=comment,
commented=commented,
identifier=identifier,
special=special)
ret['result'] = None
if status == 'absent':
ret['comment'] = 'Cron {0} is set to be added'.format(name)
elif status == 'present':
ret['result'] = True
ret['comment'] = 'Cron {0} already present'.format(name)
elif status == 'update':
ret['comment'] = 'Cron {0} is set to be updated'.format(name)
return ret
if special is None:
data = __salt__['cron.set_job'](user=user,
minute=minute,
hour=hour,
daymonth=daymonth,
month=month,
dayweek=dayweek,
cmd=name,
comment=comment,
commented=commented,
identifier=identifier)
else:
data = __salt__['cron.set_special'](user=user,
special=special,
cmd=name,
comment=comment,
commented=commented,
identifier=identifier)
if data == 'present':
ret['comment'] = 'Cron {0} already present'.format(name)
return ret
if data == 'new':
ret['comment'] = 'Cron {0} added to {1}\'s crontab'.format(name, user)
ret['changes'] = {user: name}
return ret
if data == 'updated':
ret['comment'] = 'Cron {0} updated'.format(name)
ret['changes'] = {user: name}
return ret
ret['comment'] = ('Cron {0} for user {1} failed to commit with error \n{2}'
.format(name, user, data))
ret['result'] = False
return ret
def absent(name,
user='root',
identifier=False,
special=None,
**kwargs):
'''
Verifies that the specified cron job is absent for the specified user; only
the name is matched when removing a cron job.
name
The command that should be absent in the user crontab.
user
The name of the user whose crontab needs to be modified, defaults to
the root user
identifier
Custom-defined identifier for tracking the cron line for future crontab
edits. This defaults to the state name
special
The special keyword used in the job (eg. @reboot, @hourly...).
Quotes must be used, otherwise PyYAML will strip the '@' sign.
'''
### NOTE: The keyword arguments in **kwargs are ignored in this state, but
### cannot be removed from the function definition, otherwise the use
### of unsupported arguments will result in a traceback.
name = name.strip()
if identifier is False:
identifier = name
ret = {'name': name,
'result': True,
'changes': {},
'comment': ''}
if __opts__['test']:
status = _check_cron(user, name, identifier=identifier)
ret['result'] = None
if status == 'absent':
ret['result'] = True
ret['comment'] = 'Cron {0} is absent'.format(name)
elif status == 'present' or status == 'update':
ret['comment'] = 'Cron {0} is set to be removed'.format(name)
return ret
if special is None:
data = __salt__['cron.rm_job'](user, name, identifier=identifier)
else:
data = __salt__['cron.rm_special'](user, name, special=special, identifier=identifier)
if data == 'absent':
ret['comment'] = "Cron {0} already absent".format(name)
return ret
if data == 'removed':
ret['comment'] = ("Cron {0} removed from {1}'s crontab"
.format(name, user))
ret['changes'] = {user: name}
return ret
ret['comment'] = ("Cron {0} for user {1} failed to commit with error {2}"
.format(name, user, data))
ret['result'] = False
return ret
def file(name,
source_hash='',
source_hash_name=None,
user='root',
template=None,
context=None,
replace=True,
defaults=None,
backup='',
**kwargs):
'''
Provides file.managed-like functionality (templating, etc.) for a pre-made
crontab file, to be assigned to a given user.
name
The source file to be used as the crontab. This source file can be
hosted on either the salt master server, or on an HTTP or FTP server.
For files hosted on the salt file server, if the file is located on
the master in the directory named spam, and is called eggs, the source
string is ``salt://spam/eggs``
If the file is hosted on a HTTP or FTP server then the source_hash
argument is also required
source_hash
This can be either a file which contains a source hash string for
the source, or a source hash string. The source hash string is the
hash algorithm followed by the hash of the file:
``md5=e138491e9d5b97023cea823fe17bac22``
source_hash_name
When ``source_hash`` refers to a hash file, Salt will try to find the
correct hash by matching the filename/URI associated with that hash. By
default, Salt will look for the filename being managed. When managing a
file at path ``/tmp/foo.txt``, then the following line in a hash file
would match:
.. code-block:: text
acbd18db4cc2f85cedef654fccc4a4d8 foo.txt
However, sometimes a hash file will include multiple similar paths:
.. code-block:: text
37b51d194a7513e45b56f6524f2d51f2 ./dir1/foo.txt
acbd18db4cc2f85cedef654fccc4a4d8 ./dir2/foo.txt
73feffa4b7f6bb68e44cf984c85f6e88 ./dir3/foo.txt
In cases like this, Salt may match the incorrect hash. This argument
can be used to tell Salt which filename to match, to ensure that the
correct hash is identified. For example:
.. code-block:: yaml
foo_crontab:
cron.file:
- name: https://mydomain.tld/dir2/foo.txt
- source_hash: https://mydomain.tld/hashes
- source_hash_name: ./dir2/foo.txt
.. note::
This argument must contain the full filename entry from the
checksum file, as this argument is meant to disambiguate matches
for multiple files that have the same basename. So, in the
example above, simply using ``foo.txt`` would not match.
.. versionadded:: 2016.3.5
user
The user to whom the crontab should be assigned. This defaults to
root.
template
If this setting is applied then the named templating engine will be
used to render the downloaded file. Currently, jinja and mako are
supported.
context
Overrides default context variables passed to the template.
replace
If the crontab should be replaced, if False then this command will
be ignored if a crontab exists for the specified user. Default is True.
defaults
Default context passed to the template.
backup
Overrides the default backup mode for the user's crontab.
'''
# Initial set up
mode = '0600'
try:
group = __salt__['user.info'](user)['groups'][0]
except Exception:
ret = {'changes': {},
'comment': "Could not identify group for user {0}".format(user),
'name': name,
'result': False}
return ret
cron_path = salt.utils.files.mkstemp()
with salt.utils.fopen(cron_path, 'w+') as fp_:
raw_cron = __salt__['cron.raw_cron'](user)
if not raw_cron.endswith('\n'):
raw_cron = "{0}\n".format(raw_cron)
fp_.write(raw_cron)
ret = {'changes': {},
'comment': '',
'name': name,
'result': True}
# Avoid variable naming confusion in below module calls, since ID
# declaration for this state will be a source URI.
source = name
if not replace and os.stat(cron_path).st_size > 0:
ret['comment'] = 'User {0} already has a crontab. No changes ' \
'made'.format(user)
os.unlink(cron_path)
return ret
if __opts__['test']:
fcm = __salt__['file.check_managed'](cron_path,
source,
source_hash,
source_hash_name,
user,
group,
mode,
template,
context,
defaults,
__env__,
**kwargs
)
ret['result'], ret['comment'] = fcm
os.unlink(cron_path)
return ret
# If the source is a list then find which file exists
source, source_hash = __salt__['file.source_list'](source,
source_hash,
__env__)
# Gather the source file from the server
try:
sfn, source_sum, comment = __salt__['file.get_managed'](
cron_path,
template,
source,
source_hash,
source_hash_name,
user,
group,
mode,
__env__,
context,
defaults,
False, # skip_verify
**kwargs
)
except Exception as exc:
ret['result'] = False
ret['changes'] = {}
ret['comment'] = 'Unable to manage file: {0}'.format(exc)
return ret
if comment:
ret['comment'] = comment
ret['result'] = False
os.unlink(cron_path)
return ret
try:
ret = __salt__['file.manage_file'](
cron_path,
sfn,
ret,
source,
source_sum,
user,
group,
mode,
__env__,
backup
)
except Exception as exc:
ret['result'] = False
ret['changes'] = {}
ret['comment'] = 'Unable to manage file: {0}'.format(exc)
return ret
cron_ret = None
if "diff" in ret['changes']:
cron_ret = __salt__['cron.write_cron_file_verbose'](user, cron_path)
# Check cmd return code and show success or failure
if cron_ret['retcode'] == 0:
ret['comment'] = 'Crontab for user {0} was updated'.format(user)
ret['result'] = True
ret['changes'] = ret['changes']['diff']
else:
ret['comment'] = 'Unable to update user {0} crontab {1}.' \
' Error: {2}'.format(user, cron_path, cron_ret['stderr'])
ret['result'] = False
ret['changes'] = {}
elif ret['result']:
ret['comment'] = 'Crontab for user {0} is in the correct ' \
'state'.format(user)
ret['changes'] = {}
os.unlink(cron_path)
return ret
def env_present(name,
value=None,
user='root'):
'''
Verifies that the specified environment variable is present in the crontab
for the specified user.
name
The name of the environment variable to set in the user crontab
user
The name of the user whose crontab needs to be modified, defaults to
the root user
value
The value to set for the given environment variable
'''
ret = {'changes': {},
'comment': '',
'name': name,
'result': True}
if __opts__['test']:
status = _check_cron_env(user, name, value=value)
ret['result'] = None
if status == 'absent':
ret['comment'] = 'Cron env {0} is set to be added'.format(name)
elif status == 'present':
ret['result'] = True
ret['comment'] = 'Cron env {0} already present'.format(name)
elif status == 'update':
ret['comment'] = 'Cron env {0} is set to be updated'.format(name)
return ret
data = __salt__['cron.set_env'](user, name, value=value)
if data == 'present':
ret['comment'] = 'Cron env {0} already present'.format(name)
return ret
if data == 'new':
ret['comment'] = 'Cron env {0} added to {1}\'s crontab'.format(name, user)
ret['changes'] = {user: name}
return ret
if data == 'updated':
ret['comment'] = 'Cron env {0} updated'.format(name)
ret['changes'] = {user: name}
return ret
ret['comment'] = ('Cron env {0} for user {1} failed to commit with error \n{2}'
.format(name, user, data))
ret['result'] = False
return ret
def env_absent(name,
user='root'):
'''
Verifies that the specified environment variable is absent from the crontab
for the specified user
name
The name of the environment variable to remove from the user crontab
user
The name of the user whose crontab needs to be modified, defaults to
the root user
'''
name = name.strip()
ret = {'name': name,
'result': True,
'changes': {},
'comment': ''}
if __opts__['test']:
status = _check_cron_env(user, name)
ret['result'] = None
if status == 'absent':
ret['result'] = True
ret['comment'] = 'Cron env {0} is absent'.format(name)
elif status == 'present' or status == 'update':
ret['comment'] = 'Cron env {0} is set to be removed'.format(name)
return ret
data = __salt__['cron.rm_env'](user, name)
if data == 'absent':
ret['comment'] = "Cron env {0} already absent".format(name)
return ret
if data == 'removed':
ret['comment'] = ("Cron env {0} removed from {1}'s crontab"
.format(name, user))
ret['changes'] = {user: name}
return ret
ret['comment'] = ("Cron env {0} for user {1} failed to commit with error {2}"
.format(name, user, data))
ret['result'] = False
return ret
| 32.457405
| 94
| 0.547264
|
4a161e8f3410bacaf7d7ceac4abf9ee0e8a4b1d2
| 411
|
py
|
Python
|
batch_convert.py
|
mathfreak231/pbrchase
|
b50d5aa6ccf7366c3b605bb6a5b31e3fca54cd0f
|
[
"MIT"
] | 9
|
2015-07-19T05:37:59.000Z
|
2020-10-02T20:26:11.000Z
|
batch_convert.py
|
mathfreak231/pbrchase
|
b50d5aa6ccf7366c3b605bb6a5b31e3fca54cd0f
|
[
"MIT"
] | 8
|
2015-07-19T23:52:14.000Z
|
2020-06-05T01:45:28.000Z
|
batch_convert.py
|
mathfreak231/pbrchase
|
b50d5aa6ccf7366c3b605bb6a5b31e3fca54cd0f
|
[
"MIT"
] | 7
|
2016-03-21T03:37:45.000Z
|
2021-12-22T10:26:54.000Z
|
import glob
import subprocess
import os.path
def main():
for filename in glob.glob('sounds/*/*.wav'):
output_filename = os.path.splitext(filename)[0] + '.ogg'
subprocess.check_call(['sox', filename, output_filename])
output_filename = os.path.splitext(filename)[0] + '.mp3'
subprocess.check_call(['sox', filename, output_filename])
if __name__ == '__main__':
main()
| 24.176471
| 65
| 0.659367
|
4a16203d568097184063f2c90af08e28ab0362d8
| 754
|
py
|
Python
|
tz_detect/views.py
|
ben-hoover/django-tz-detect
|
91450a2aaef608106a6265b4a53d12372db030d5
|
[
"MIT"
] | null | null | null |
tz_detect/views.py
|
ben-hoover/django-tz-detect
|
91450a2aaef608106a6265b4a53d12372db030d5
|
[
"MIT"
] | null | null | null |
tz_detect/views.py
|
ben-hoover/django-tz-detect
|
91450a2aaef608106a6265b4a53d12372db030d5
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
from django.http import HttpResponse
from django.views.generic import View
class SetOffsetView(View):
http_method_names = ['post']
def post(self, request, *args, **kwargs):
tz = request.POST.get('tz', None)
if not tz:
offset = request.POST.get('offset', None)
if not offset:
return HttpResponse("No 'offset' parameter provided", status=400)
try:
offset = int(offset)
except ValueError:
return HttpResponse("Invalid 'offset' value provided", status=400)
request.session['detected_tz'] = int(offset)
else:
request.session['detected_tz'] = tz
return HttpResponse("OK")
| 31.416667
| 82
| 0.582228
|
4a162047fbfae8b1ca98a08632d1010b3dc69c02
| 6,490
|
py
|
Python
|
pybind/nos/v6_0_2c/rbridge_id/router/router_bgp/address_family/ipv4/ipv4_unicast/default_vrf/neighbor/af_ipv4_neighbor_peergroup_holder/af_ipv4_neighbor_peergroup/af_neighbor_capability/__init__.py
|
shivharis/pybind
|
4e1c6d54b9fd722ccec25546ba2413d79ce337e6
|
[
"Apache-2.0"
] | null | null | null |
pybind/nos/v6_0_2c/rbridge_id/router/router_bgp/address_family/ipv4/ipv4_unicast/default_vrf/neighbor/af_ipv4_neighbor_peergroup_holder/af_ipv4_neighbor_peergroup/af_neighbor_capability/__init__.py
|
shivharis/pybind
|
4e1c6d54b9fd722ccec25546ba2413d79ce337e6
|
[
"Apache-2.0"
] | null | null | null |
pybind/nos/v6_0_2c/rbridge_id/router/router_bgp/address_family/ipv4/ipv4_unicast/default_vrf/neighbor/af_ipv4_neighbor_peergroup_holder/af_ipv4_neighbor_peergroup/af_neighbor_capability/__init__.py
|
shivharis/pybind
|
4e1c6d54b9fd722ccec25546ba2413d79ce337e6
|
[
"Apache-2.0"
] | 1
|
2021-11-05T22:15:42.000Z
|
2021-11-05T22:15:42.000Z
|
from operator import attrgetter
import pyangbind.lib.xpathhelper as xpathhelper
from pyangbind.lib.yangtypes import RestrictedPrecisionDecimalType, RestrictedClassType, TypedListType
from pyangbind.lib.yangtypes import YANGBool, YANGListType, YANGDynClass, ReferenceType
from pyangbind.lib.base import PybindBase
from decimal import Decimal
from bitarray import bitarray
import __builtin__
import orf
class af_neighbor_capability(PybindBase):
"""
This class was auto-generated by the PythonClass plugin for PYANG
from YANG module brocade-rbridge - based on the path /rbridge-id/router/router-bgp/address-family/ipv4/ipv4-unicast/default-vrf/neighbor/af-ipv4-neighbor-peergroup-holder/af-ipv4-neighbor-peergroup/af-neighbor-capability. Each member element of
the container is represented as a class variable - with a specific
YANG type.
"""
__slots__ = ('_pybind_generated_by', '_path_helper', '_yang_name', '_rest_name', '_extmethods', '__orf',)
_yang_name = 'af-neighbor-capability'
_rest_name = 'capability'
_pybind_generated_by = 'container'
def __init__(self, *args, **kwargs):
path_helper_ = kwargs.pop("path_helper", None)
if path_helper_ is False:
self._path_helper = False
elif path_helper_ is not None and isinstance(path_helper_, xpathhelper.YANGPathHelper):
self._path_helper = path_helper_
elif hasattr(self, "_parent"):
path_helper_ = getattr(self._parent, "_path_helper", False)
self._path_helper = path_helper_
else:
self._path_helper = False
extmethods = kwargs.pop("extmethods", None)
if extmethods is False:
self._extmethods = False
elif extmethods is not None and isinstance(extmethods, dict):
self._extmethods = extmethods
elif hasattr(self, "_parent"):
extmethods = getattr(self._parent, "_extmethods", None)
self._extmethods = extmethods
else:
self._extmethods = False
self.__orf = YANGDynClass(base=orf.orf, is_container='container', presence=False, yang_name="orf", rest_name="orf", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'Advertise ORF capability to the peer', u'cli-incomplete-no': None, u'cli-incomplete-command': None}}, namespace='urn:brocade.com:mgmt:brocade-bgp', defining_module='brocade-bgp', yang_type='container', is_config=True)
load = kwargs.pop("load", None)
if args:
if len(args) > 1:
raise TypeError("cannot create a YANG container with >1 argument")
all_attr = True
for e in self._pyangbind_elements:
if not hasattr(args[0], e):
all_attr = False
break
if not all_attr:
raise ValueError("Supplied object did not have the correct attributes")
for e in self._pyangbind_elements:
nobj = getattr(args[0], e)
if nobj._changed() is False:
continue
setmethod = getattr(self, "_set_%s" % e)
if load is None:
setmethod(getattr(args[0], e))
else:
setmethod(getattr(args[0], e), load=load)
def _path(self):
if hasattr(self, "_parent"):
return self._parent._path()+[self._yang_name]
else:
return [u'rbridge-id', u'router', u'router-bgp', u'address-family', u'ipv4', u'ipv4-unicast', u'default-vrf', u'neighbor', u'af-ipv4-neighbor-peergroup-holder', u'af-ipv4-neighbor-peergroup', u'af-neighbor-capability']
def _rest_path(self):
if hasattr(self, "_parent"):
if self._rest_name:
return self._parent._rest_path()+[self._rest_name]
else:
return self._parent._rest_path()
else:
return [u'rbridge-id', u'router', u'bgp', u'address-family', u'ipv4', u'unicast', u'neighbor', u'af-ipv4-neighbor-peergroup', u'capability']
def _get_orf(self):
"""
Getter method for orf, mapped from YANG variable /rbridge_id/router/router_bgp/address_family/ipv4/ipv4_unicast/default_vrf/neighbor/af_ipv4_neighbor_peergroup_holder/af_ipv4_neighbor_peergroup/af_neighbor_capability/orf (container)
"""
return self.__orf
def _set_orf(self, v, load=False):
"""
Setter method for orf, mapped from YANG variable /rbridge_id/router/router_bgp/address_family/ipv4/ipv4_unicast/default_vrf/neighbor/af_ipv4_neighbor_peergroup_holder/af_ipv4_neighbor_peergroup/af_neighbor_capability/orf (container)
If this variable is read-only (config: false) in the
source YANG file, then _set_orf is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_orf() directly.
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(v,base=orf.orf, is_container='container', presence=False, yang_name="orf", rest_name="orf", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'Advertise ORF capability to the peer', u'cli-incomplete-no': None, u'cli-incomplete-command': None}}, namespace='urn:brocade.com:mgmt:brocade-bgp', defining_module='brocade-bgp', yang_type='container', is_config=True)
except (TypeError, ValueError):
raise ValueError({
'error-string': """orf must be of a type compatible with container""",
'defined-type': "container",
'generated-type': """YANGDynClass(base=orf.orf, is_container='container', presence=False, yang_name="orf", rest_name="orf", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'Advertise ORF capability to the peer', u'cli-incomplete-no': None, u'cli-incomplete-command': None}}, namespace='urn:brocade.com:mgmt:brocade-bgp', defining_module='brocade-bgp', yang_type='container', is_config=True)""",
})
self.__orf = t
if hasattr(self, '_set'):
self._set()
def _unset_orf(self):
self.__orf = YANGDynClass(base=orf.orf, is_container='container', presence=False, yang_name="orf", rest_name="orf", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'Advertise ORF capability to the peer', u'cli-incomplete-no': None, u'cli-incomplete-command': None}}, namespace='urn:brocade.com:mgmt:brocade-bgp', defining_module='brocade-bgp', yang_type='container', is_config=True)
orf = __builtin__.property(_get_orf, _set_orf)
_pyangbind_elements = {'orf': orf, }
| 52.33871
| 490
| 0.720339
|
4a162362c6b6f05422a273caeb1a57dcc3b2fad0
| 31,377
|
py
|
Python
|
tests/template_tests/syntax_tests/i18n/test_blocktranslate.py
|
jpmallarino/django
|
659d2421c7adbbcd205604002d521d82d6b0b465
|
[
"BSD-3-Clause",
"0BSD"
] | 16
|
2019-08-10T12:24:06.000Z
|
2020-05-21T09:11:14.000Z
|
tests/template_tests/syntax_tests/i18n/test_blocktranslate.py
|
jpmallarino/django
|
659d2421c7adbbcd205604002d521d82d6b0b465
|
[
"BSD-3-Clause",
"0BSD"
] | 12
|
2019-08-10T11:55:29.000Z
|
2020-05-21T04:46:30.000Z
|
tests/template_tests/syntax_tests/i18n/test_blocktranslate.py
|
jpmallarino/django
|
659d2421c7adbbcd205604002d521d82d6b0b465
|
[
"BSD-3-Clause",
"0BSD"
] | 3
|
2019-08-20T13:29:34.000Z
|
2020-01-30T22:05:10.000Z
|
import inspect
import os
from functools import partial, wraps
from asgiref.local import Local
from django.template import Context, Template, TemplateSyntaxError
from django.template.base import Token, TokenType
from django.templatetags.i18n import BlockTranslateNode
from django.test import SimpleTestCase, override_settings
from django.utils import translation
from django.utils.safestring import mark_safe
from django.utils.translation import trans_real
from ...utils import setup as base_setup
from .base import MultipleLocaleActivationTestCase, extended_locale_paths, here
def setup(templates, *args, **kwargs):
blocktranslate_setup = base_setup(templates, *args, **kwargs)
blocktrans_setup = base_setup(
{
name: template.replace("{% blocktranslate ", "{% blocktrans ").replace(
"{% endblocktranslate %}", "{% endblocktrans %}"
)
for name, template in templates.items()
}
)
tags = {
"blocktrans": blocktrans_setup,
"blocktranslate": blocktranslate_setup,
}
def decorator(func):
@wraps(func)
def inner(self, *args):
signature = inspect.signature(func)
for tag_name, setup_func in tags.items():
if "tag_name" in signature.parameters:
setup_func(partial(func, tag_name=tag_name))(self)
else:
setup_func(func)(self)
return inner
return decorator
class I18nBlockTransTagTests(SimpleTestCase):
libraries = {"i18n": "django.templatetags.i18n"}
@setup(
{
"i18n03": (
"{% load i18n %}{% blocktranslate %}{{ anton }}{% endblocktranslate %}"
)
}
)
def test_i18n03(self):
"""simple translation of a variable"""
output = self.engine.render_to_string("i18n03", {"anton": "Å"})
self.assertEqual(output, "Å")
@setup(
{
"i18n04": (
"{% load i18n %}{% blocktranslate with berta=anton|lower %}{{ berta }}"
"{% endblocktranslate %}"
)
}
)
def test_i18n04(self):
"""simple translation of a variable and filter"""
output = self.engine.render_to_string("i18n04", {"anton": "Å"})
self.assertEqual(output, "å")
@setup(
{
"legacyi18n04": (
"{% load i18n %}"
"{% blocktranslate with anton|lower as berta %}{{ berta }}"
"{% endblocktranslate %}"
)
}
)
def test_legacyi18n04(self):
"""simple translation of a variable and filter"""
output = self.engine.render_to_string("legacyi18n04", {"anton": "Å"})
self.assertEqual(output, "å")
@setup(
{
"i18n05": (
"{% load i18n %}{% blocktranslate %}xxx{{ anton }}xxx"
"{% endblocktranslate %}"
)
}
)
def test_i18n05(self):
"""simple translation of a string with interpolation"""
output = self.engine.render_to_string("i18n05", {"anton": "yyy"})
self.assertEqual(output, "xxxyyyxxx")
@setup(
{
"i18n07": "{% load i18n %}"
"{% blocktranslate count counter=number %}singular{% plural %}"
"{{ counter }} plural{% endblocktranslate %}"
}
)
def test_i18n07(self):
"""translation of singular form"""
output = self.engine.render_to_string("i18n07", {"number": 1})
self.assertEqual(output, "singular")
@setup(
{
"legacyi18n07": "{% load i18n %}"
"{% blocktranslate count number as counter %}singular{% plural %}"
"{{ counter }} plural{% endblocktranslate %}"
}
)
def test_legacyi18n07(self):
"""translation of singular form"""
output = self.engine.render_to_string("legacyi18n07", {"number": 1})
self.assertEqual(output, "singular")
@setup(
{
"i18n08": "{% load i18n %}"
"{% blocktranslate count number as counter %}singular{% plural %}"
"{{ counter }} plural{% endblocktranslate %}"
}
)
def test_i18n08(self):
"""translation of plural form"""
output = self.engine.render_to_string("i18n08", {"number": 2})
self.assertEqual(output, "2 plural")
@setup(
{
"legacyi18n08": "{% load i18n %}"
"{% blocktranslate count counter=number %}singular{% plural %}"
"{{ counter }} plural{% endblocktranslate %}"
}
)
def test_legacyi18n08(self):
"""translation of plural form"""
output = self.engine.render_to_string("legacyi18n08", {"number": 2})
self.assertEqual(output, "2 plural")
@setup(
{
"i18n17": (
"{% load i18n %}"
"{% blocktranslate with berta=anton|escape %}{{ berta }}"
"{% endblocktranslate %}"
)
}
)
def test_i18n17(self):
"""
Escaping inside blocktranslate and translate works as if it was
directly in the template.
"""
output = self.engine.render_to_string("i18n17", {"anton": "α & β"})
self.assertEqual(output, "α & β")
@setup(
{
"i18n18": (
"{% load i18n %}"
"{% blocktranslate with berta=anton|force_escape %}{{ berta }}"
"{% endblocktranslate %}"
)
}
)
def test_i18n18(self):
output = self.engine.render_to_string("i18n18", {"anton": "α & β"})
self.assertEqual(output, "α & β")
@setup(
{
"i18n19": (
"{% load i18n %}{% blocktranslate %}{{ andrew }}{% endblocktranslate %}"
)
}
)
def test_i18n19(self):
output = self.engine.render_to_string("i18n19", {"andrew": "a & b"})
self.assertEqual(output, "a & b")
@setup(
{
"i18n21": (
"{% load i18n %}{% blocktranslate %}{{ andrew }}{% endblocktranslate %}"
)
}
)
def test_i18n21(self):
output = self.engine.render_to_string("i18n21", {"andrew": mark_safe("a & b")})
self.assertEqual(output, "a & b")
@setup(
{
"legacyi18n17": (
"{% load i18n %}"
"{% blocktranslate with anton|escape as berta %}{{ berta }}"
"{% endblocktranslate %}"
)
}
)
def test_legacyi18n17(self):
output = self.engine.render_to_string("legacyi18n17", {"anton": "α & β"})
self.assertEqual(output, "α & β")
@setup(
{
"legacyi18n18": "{% load i18n %}"
"{% blocktranslate with anton|force_escape as berta %}"
"{{ berta }}{% endblocktranslate %}"
}
)
def test_legacyi18n18(self):
output = self.engine.render_to_string("legacyi18n18", {"anton": "α & β"})
self.assertEqual(output, "α & β")
@setup(
{
"i18n26": "{% load i18n %}"
"{% blocktranslate with extra_field=myextra_field count counter=number %}"
"singular {{ extra_field }}{% plural %}plural{% endblocktranslate %}"
}
)
def test_i18n26(self):
"""
translation of plural form with extra field in singular form (#13568)
"""
output = self.engine.render_to_string(
"i18n26", {"myextra_field": "test", "number": 1}
)
self.assertEqual(output, "singular test")
@setup(
{
"legacyi18n26": (
"{% load i18n %}"
"{% blocktranslate with myextra_field as extra_field "
"count number as counter %}singular {{ extra_field }}{% plural %}plural"
"{% endblocktranslate %}"
)
}
)
def test_legacyi18n26(self):
output = self.engine.render_to_string(
"legacyi18n26", {"myextra_field": "test", "number": 1}
)
self.assertEqual(output, "singular test")
@setup(
{
"i18n27": "{% load i18n %}{% blocktranslate count counter=number %}"
"{{ counter }} result{% plural %}{{ counter }} results"
"{% endblocktranslate %}"
}
)
def test_i18n27(self):
"""translation of singular form in Russian (#14126)"""
with translation.override("ru"):
output = self.engine.render_to_string("i18n27", {"number": 1})
self.assertEqual(
output, "1 \u0440\u0435\u0437\u0443\u043b\u044c\u0442\u0430\u0442"
)
@setup(
{
"legacyi18n27": "{% load i18n %}"
"{% blocktranslate count number as counter %}{{ counter }} result"
"{% plural %}{{ counter }} results{% endblocktranslate %}"
}
)
def test_legacyi18n27(self):
with translation.override("ru"):
output = self.engine.render_to_string("legacyi18n27", {"number": 1})
self.assertEqual(
output, "1 \u0440\u0435\u0437\u0443\u043b\u044c\u0442\u0430\u0442"
)
@setup(
{
"i18n28": (
"{% load i18n %}"
"{% blocktranslate with a=anton b=berta %}{{ a }} + {{ b }}"
"{% endblocktranslate %}"
)
}
)
def test_i18n28(self):
"""simple translation of multiple variables"""
output = self.engine.render_to_string("i18n28", {"anton": "α", "berta": "β"})
self.assertEqual(output, "α + β")
@setup(
{
"legacyi18n28": "{% load i18n %}"
"{% blocktranslate with anton as a and berta as b %}"
"{{ a }} + {{ b }}{% endblocktranslate %}"
}
)
def test_legacyi18n28(self):
output = self.engine.render_to_string(
"legacyi18n28", {"anton": "α", "berta": "β"}
)
self.assertEqual(output, "α + β")
# blocktranslate handling of variables which are not in the context.
# this should work as if blocktranslate was not there (#19915)
@setup(
{
"i18n34": (
"{% load i18n %}{% blocktranslate %}{{ missing }}"
"{% endblocktranslate %}"
)
}
)
def test_i18n34(self):
output = self.engine.render_to_string("i18n34")
if self.engine.string_if_invalid:
self.assertEqual(output, "INVALID")
else:
self.assertEqual(output, "")
@setup(
{
"i18n34_2": (
"{% load i18n %}{% blocktranslate with a='α' %}{{ missing }}"
"{% endblocktranslate %}"
)
}
)
def test_i18n34_2(self):
output = self.engine.render_to_string("i18n34_2")
if self.engine.string_if_invalid:
self.assertEqual(output, "INVALID")
else:
self.assertEqual(output, "")
@setup(
{
"i18n34_3": (
"{% load i18n %}{% blocktranslate with a=anton %}{{ missing }}"
"{% endblocktranslate %}"
)
}
)
def test_i18n34_3(self):
output = self.engine.render_to_string("i18n34_3", {"anton": "\xce\xb1"})
if self.engine.string_if_invalid:
self.assertEqual(output, "INVALID")
else:
self.assertEqual(output, "")
@setup(
{
"i18n37": "{% load i18n %}"
'{% translate "Page not found" as page_not_found %}'
"{% blocktranslate %}Error: {{ page_not_found }}{% endblocktranslate %}"
}
)
def test_i18n37(self):
with translation.override("de"):
output = self.engine.render_to_string("i18n37")
self.assertEqual(output, "Error: Seite nicht gefunden")
# blocktranslate tag with asvar
@setup(
{
"i18n39": (
"{% load i18n %}"
"{% blocktranslate asvar page_not_found %}Page not found"
"{% endblocktranslate %}>{{ page_not_found }}<"
)
}
)
def test_i18n39(self):
with translation.override("de"):
output = self.engine.render_to_string("i18n39")
self.assertEqual(output, ">Seite nicht gefunden<")
@setup(
{
"i18n40": "{% load i18n %}"
'{% translate "Page not found" as pg_404 %}'
"{% blocktranslate with page_not_found=pg_404 asvar output %}"
"Error: {{ page_not_found }}"
"{% endblocktranslate %}"
}
)
def test_i18n40(self):
output = self.engine.render_to_string("i18n40")
self.assertEqual(output, "")
@setup(
{
"i18n41": "{% load i18n %}"
'{% translate "Page not found" as pg_404 %}'
"{% blocktranslate with page_not_found=pg_404 asvar output %}"
"Error: {{ page_not_found }}"
"{% endblocktranslate %}"
">{{ output }}<"
}
)
def test_i18n41(self):
with translation.override("de"):
output = self.engine.render_to_string("i18n41")
self.assertEqual(output, ">Error: Seite nicht gefunden<")
@setup(
{
"template": (
"{% load i18n %}{% blocktranslate asvar %}Yes{% endblocktranslate %}"
)
}
)
def test_blocktrans_syntax_error_missing_assignment(self, tag_name):
msg = "No argument provided to the '{}' tag for the asvar option.".format(
tag_name
)
with self.assertRaisesMessage(TemplateSyntaxError, msg):
self.engine.render_to_string("template")
@setup({"template": "{% load i18n %}{% blocktranslate %}%s{% endblocktranslate %}"})
def test_blocktrans_tag_using_a_string_that_looks_like_str_fmt(self):
output = self.engine.render_to_string("template")
self.assertEqual(output, "%s")
@setup(
{
"template": (
"{% load i18n %}{% blocktranslate %}{% block b %} {% endblock %}"
"{% endblocktranslate %}"
)
}
)
def test_with_block(self, tag_name):
msg = "'{}' doesn't allow other block tags (seen 'block b') inside it".format(
tag_name
)
with self.assertRaisesMessage(TemplateSyntaxError, msg):
self.engine.render_to_string("template")
@setup(
{
"template": (
"{% load i18n %}"
"{% blocktranslate %}{% for b in [1, 2, 3] %} {% endfor %}"
"{% endblocktranslate %}"
)
}
)
def test_with_for(self, tag_name):
msg = (
f"'{tag_name}' doesn't allow other block tags (seen 'for b in [1, 2, 3]') "
f"inside it"
)
with self.assertRaisesMessage(TemplateSyntaxError, msg):
self.engine.render_to_string("template")
@setup(
{
"template": (
"{% load i18n %}{% blocktranslate with foo=bar with %}{{ foo }}"
"{% endblocktranslate %}"
)
}
)
def test_variable_twice(self):
with self.assertRaisesMessage(
TemplateSyntaxError, "The 'with' option was specified more than once"
):
self.engine.render_to_string("template", {"foo": "bar"})
@setup(
{"template": "{% load i18n %}{% blocktranslate with %}{% endblocktranslate %}"}
)
def test_no_args_with(self, tag_name):
msg = "\"with\" in '{}' tag needs at least one keyword argument.".format(
tag_name
)
with self.assertRaisesMessage(TemplateSyntaxError, msg):
self.engine.render_to_string("template")
@setup(
{
"template": (
"{% load i18n %}{% blocktranslate count a %}{% endblocktranslate %}"
)
}
)
def test_count(self, tag_name):
msg = "\"count\" in '{}' tag expected exactly one keyword argument.".format(
tag_name
)
with self.assertRaisesMessage(TemplateSyntaxError, msg):
self.engine.render_to_string("template", {"a": [1, 2, 3]})
@setup(
{
"template": (
"{% load i18n %}{% blocktranslate count counter=num %}{{ counter }}"
"{% plural %}{{ counter }}{% endblocktranslate %}"
)
}
)
def test_count_not_number(self, tag_name):
msg = "'counter' argument to '{}' tag must be a number.".format(tag_name)
with self.assertRaisesMessage(TemplateSyntaxError, msg):
self.engine.render_to_string("template", {"num": "1"})
@setup(
{
"template": (
"{% load i18n %}{% blocktranslate count count=var|length %}"
"There is {{ count }} object. {% block a %} {% endblock %}"
"{% endblocktranslate %}"
)
}
)
def test_plural_bad_syntax(self, tag_name):
msg = "'{}' doesn't allow other block tags inside it".format(tag_name)
with self.assertRaisesMessage(TemplateSyntaxError, msg):
self.engine.render_to_string("template", {"var": [1, 2, 3]})
class TranslationBlockTranslateTagTests(SimpleTestCase):
tag_name = "blocktranslate"
def get_template(self, template_string):
return Template(
template_string.replace(
"{{% blocktranslate ", "{{% {}".format(self.tag_name)
).replace(
"{{% endblocktranslate %}}", "{{% end{} %}}".format(self.tag_name)
)
)
@override_settings(LOCALE_PATHS=extended_locale_paths)
def test_template_tags_pgettext(self):
"""{% blocktranslate %} takes message contexts into account (#14806)."""
trans_real._active = Local()
trans_real._translations = {}
with translation.override("de"):
# Nonexistent context
t = self.get_template(
'{% load i18n %}{% blocktranslate context "nonexistent" %}May'
"{% endblocktranslate %}"
)
rendered = t.render(Context())
self.assertEqual(rendered, "May")
# Existing context... using a literal
t = self.get_template(
"{% load i18n %}"
'{% blocktranslate context "month name" %}May{% endblocktranslate %}'
)
rendered = t.render(Context())
self.assertEqual(rendered, "Mai")
t = self.get_template(
"{% load i18n %}"
'{% blocktranslate context "verb" %}May{% endblocktranslate %}'
)
rendered = t.render(Context())
self.assertEqual(rendered, "Kann")
# Using a variable
t = self.get_template(
"{% load i18n %}{% blocktranslate context message_context %}"
"May{% endblocktranslate %}"
)
rendered = t.render(Context({"message_context": "month name"}))
self.assertEqual(rendered, "Mai")
t = self.get_template(
"{% load i18n %}{% blocktranslate context message_context %}"
"May{% endblocktranslate %}"
)
rendered = t.render(Context({"message_context": "verb"}))
self.assertEqual(rendered, "Kann")
# Using a filter
t = self.get_template(
"{% load i18n %}"
"{% blocktranslate context message_context|lower %}May"
"{% endblocktranslate %}"
)
rendered = t.render(Context({"message_context": "MONTH NAME"}))
self.assertEqual(rendered, "Mai")
t = self.get_template(
"{% load i18n %}"
"{% blocktranslate context message_context|lower %}May"
"{% endblocktranslate %}"
)
rendered = t.render(Context({"message_context": "VERB"}))
self.assertEqual(rendered, "Kann")
# Using 'count'
t = self.get_template(
"{% load i18n %}"
'{% blocktranslate count number=1 context "super search" %}{{ number }}'
" super result{% plural %}{{ number }} super results"
"{% endblocktranslate %}"
)
rendered = t.render(Context())
self.assertEqual(rendered, "1 Super-Ergebnis")
t = self.get_template(
"{% load i18n %}"
'{% blocktranslate count number=2 context "super search" %}{{ number }}'
" super result{% plural %}{{ number }} super results"
"{% endblocktranslate %}"
)
rendered = t.render(Context())
self.assertEqual(rendered, "2 Super-Ergebnisse")
t = self.get_template(
"{% load i18n %}"
'{% blocktranslate context "other super search" count number=1 %}'
"{{ number }} super result{% plural %}{{ number }} super results"
"{% endblocktranslate %}"
)
rendered = t.render(Context())
self.assertEqual(rendered, "1 anderen Super-Ergebnis")
t = self.get_template(
"{% load i18n %}"
'{% blocktranslate context "other super search" count number=2 %}'
"{{ number }} super result{% plural %}{{ number }} super results"
"{% endblocktranslate %}"
)
rendered = t.render(Context())
self.assertEqual(rendered, "2 andere Super-Ergebnisse")
# Using 'with'
t = self.get_template(
"{% load i18n %}"
'{% blocktranslate with num_comments=5 context "comment count" %}'
"There are {{ num_comments }} comments{% endblocktranslate %}"
)
rendered = t.render(Context())
self.assertEqual(rendered, "Es gibt 5 Kommentare")
t = self.get_template(
"{% load i18n %}"
'{% blocktranslate with num_comments=5 context "other comment count" %}'
"There are {{ num_comments }} comments{% endblocktranslate %}"
)
rendered = t.render(Context())
self.assertEqual(rendered, "Andere: Es gibt 5 Kommentare")
# Using trimmed
t = self.get_template(
"{% load i18n %}{% blocktranslate trimmed %}\n\nThere\n\t are 5 "
"\n\n comments\n{% endblocktranslate %}"
)
rendered = t.render(Context())
self.assertEqual(rendered, "There are 5 comments")
t = self.get_template(
"{% load i18n %}"
'{% blocktranslate with num_comments=5 context "comment count" trimmed '
"%}\n\n"
"There are \t\n \t {{ num_comments }} comments\n\n"
"{% endblocktranslate %}"
)
rendered = t.render(Context())
self.assertEqual(rendered, "Es gibt 5 Kommentare")
t = self.get_template(
"{% load i18n %}"
'{% blocktranslate context "other super search" count number=2 trimmed '
"%}\n{{ number }} super \n result{% plural %}{{ number }} super results"
"{% endblocktranslate %}"
)
rendered = t.render(Context())
self.assertEqual(rendered, "2 andere Super-Ergebnisse")
# Misuses
msg = "Unknown argument for 'blocktranslate' tag: %r."
with self.assertRaisesMessage(TemplateSyntaxError, msg % 'month="May"'):
self.get_template(
'{% load i18n %}{% blocktranslate context with month="May" %}'
"{{ month }}{% endblocktranslate %}"
)
msg = (
'"context" in %r tag expected exactly one argument.' % "blocktranslate"
)
with self.assertRaisesMessage(TemplateSyntaxError, msg):
self.get_template(
"{% load i18n %}{% blocktranslate context %}{% endblocktranslate %}"
)
with self.assertRaisesMessage(TemplateSyntaxError, msg):
self.get_template(
"{% load i18n %}{% blocktranslate count number=2 context %}"
"{{ number }} super result{% plural %}{{ number }}"
" super results{% endblocktranslate %}"
)
@override_settings(LOCALE_PATHS=[os.path.join(here, "other", "locale")])
def test_bad_placeholder_1(self):
"""
Error in translation file should not crash template rendering (#16516).
(%(person)s is translated as %(personne)s in fr.po).
"""
with translation.override("fr"):
t = Template(
"{% load i18n %}{% blocktranslate %}My name is {{ person }}."
"{% endblocktranslate %}"
)
rendered = t.render(Context({"person": "James"}))
self.assertEqual(rendered, "My name is James.")
@override_settings(LOCALE_PATHS=[os.path.join(here, "other", "locale")])
def test_bad_placeholder_2(self):
"""
Error in translation file should not crash template rendering (#18393).
(%(person) misses a 's' in fr.po, causing the string formatting to fail)
.
"""
with translation.override("fr"):
t = Template(
"{% load i18n %}{% blocktranslate %}My other name is {{ person }}."
"{% endblocktranslate %}"
)
rendered = t.render(Context({"person": "James"}))
self.assertEqual(rendered, "My other name is James.")
class TranslationBlockTransnTagTests(TranslationBlockTranslateTagTests):
tag_name = "blocktrans"
class MultipleLocaleActivationBlockTranslateTests(MultipleLocaleActivationTestCase):
tag_name = "blocktranslate"
def get_template(self, template_string):
return Template(
template_string.replace(
"{{% blocktranslate ", "{{% {}".format(self.tag_name)
).replace(
"{{% endblocktranslate %}}", "{{% end{} %}}".format(self.tag_name)
)
)
def test_single_locale_activation(self):
"""
Simple baseline behavior with one locale for all the supported i18n
constructs.
"""
with translation.override("fr"):
self.assertEqual(
self.get_template(
"{% load i18n %}{% blocktranslate %}Yes{% endblocktranslate %}"
).render(Context({})),
"Oui",
)
def test_multiple_locale_btrans(self):
with translation.override("de"):
t = self.get_template(
"{% load i18n %}{% blocktranslate %}No{% endblocktranslate %}"
)
with translation.override(self._old_language), translation.override("nl"):
self.assertEqual(t.render(Context({})), "Nee")
def test_multiple_locale_deactivate_btrans(self):
with translation.override("de", deactivate=True):
t = self.get_template(
"{% load i18n %}{% blocktranslate %}No{% endblocktranslate %}"
)
with translation.override("nl"):
self.assertEqual(t.render(Context({})), "Nee")
def test_multiple_locale_direct_switch_btrans(self):
with translation.override("de"):
t = self.get_template(
"{% load i18n %}{% blocktranslate %}No{% endblocktranslate %}"
)
with translation.override("nl"):
self.assertEqual(t.render(Context({})), "Nee")
class MultipleLocaleActivationBlockTransTests(
MultipleLocaleActivationBlockTranslateTests
):
tag_name = "blocktrans"
class MiscTests(SimpleTestCase):
tag_name = "blocktranslate"
def get_template(self, template_string):
return Template(
template_string.replace(
"{{% blocktranslate ", "{{% {}".format(self.tag_name)
).replace(
"{{% endblocktranslate %}}", "{{% end{} %}}".format(self.tag_name)
)
)
@override_settings(LOCALE_PATHS=extended_locale_paths)
def test_percent_in_translatable_block(self):
t_sing = self.get_template(
"{% load i18n %}{% blocktranslate %}The result was {{ percent }}%"
"{% endblocktranslate %}"
)
t_plur = self.get_template(
"{% load i18n %}{% blocktranslate count num as number %}"
"{{ percent }}% represents {{ num }} object{% plural %}"
"{{ percent }}% represents {{ num }} objects{% endblocktranslate %}"
)
with translation.override("de"):
self.assertEqual(
t_sing.render(Context({"percent": 42})), "Das Ergebnis war 42%"
)
self.assertEqual(
t_plur.render(Context({"percent": 42, "num": 1})),
"42% stellt 1 Objekt dar",
)
self.assertEqual(
t_plur.render(Context({"percent": 42, "num": 4})),
"42% stellt 4 Objekte dar",
)
@override_settings(LOCALE_PATHS=extended_locale_paths)
def test_percent_formatting_in_blocktranslate(self):
"""
Python's %-formatting is properly escaped in blocktranslate, singular,
or plural.
"""
t_sing = self.get_template(
"{% load i18n %}{% blocktranslate %}There are %(num_comments)s comments"
"{% endblocktranslate %}"
)
t_plur = self.get_template(
"{% load i18n %}{% blocktranslate count num as number %}"
"%(percent)s% represents {{ num }} object{% plural %}"
"%(percent)s% represents {{ num }} objects{% endblocktranslate %}"
)
with translation.override("de"):
# Strings won't get translated as they don't match after escaping %
self.assertEqual(
t_sing.render(Context({"num_comments": 42})),
"There are %(num_comments)s comments",
)
self.assertEqual(
t_plur.render(Context({"percent": 42, "num": 1})),
"%(percent)s% represents 1 object",
)
self.assertEqual(
t_plur.render(Context({"percent": 42, "num": 4})),
"%(percent)s% represents 4 objects",
)
class MiscBlockTranslationTests(MiscTests):
tag_name = "blocktrans"
class BlockTranslateNodeTests(SimpleTestCase):
def test_repr(self):
block_translate_node = BlockTranslateNode(
extra_context={},
singular=[
Token(TokenType.TEXT, "content"),
Token(TokenType.VAR, "variable"),
],
)
self.assertEqual(
repr(block_translate_node),
"<BlockTranslateNode: extra_context={} "
'singular=[<Text token: "content...">, <Var token: "variable...">] '
"plural=None>",
)
| 35.736902
| 88
| 0.527393
|
4a1623ac8e48b182ccca13602c85402a668b2cfa
| 2,781
|
py
|
Python
|
test.py
|
larsbratholm/ddr
|
1d9690c73553aeb4287694084af781e0944c0190
|
[
"MIT"
] | 1
|
2020-03-17T03:27:18.000Z
|
2020-03-17T03:27:18.000Z
|
test.py
|
larsbratholm/ddr
|
1d9690c73553aeb4287694084af781e0944c0190
|
[
"MIT"
] | null | null | null |
test.py
|
larsbratholm/ddr
|
1d9690c73553aeb4287694084af781e0944c0190
|
[
"MIT"
] | 1
|
2020-03-17T02:58:32.000Z
|
2020-03-17T02:58:32.000Z
|
# Full example for my blog post at:
# https://danijar.com/building-variational-auto-encoders-in-tensorflow/
import numpy as np
import matplotlib.pyplot as plt
import tensorflow as tf
from tensorflow.examples.tutorials.mnist import input_data
tfd = tf.contrib.distributions
def make_encoder(data, code_size):
x = tf.layers.flatten(data)
x = tf.layers.dense(x, 200, tf.nn.relu)
x = tf.layers.dense(x, 200, tf.nn.relu)
loc = tf.layers.dense(x, code_size)
scale = tf.layers.dense(x, code_size, tf.nn.softplus)
return tfd.MultivariateNormalDiag(loc, scale)
def make_prior(code_size):
loc = tf.zeros(code_size)
scale = tf.ones(code_size)
return tfd.MultivariateNormalDiag(loc, scale)
def make_decoder(code, data_shape):
x = code
x = tf.layers.dense(x, 200, tf.nn.relu)
x = tf.layers.dense(x, 200, tf.nn.relu)
logit = tf.layers.dense(x, np.prod(data_shape))
logit = tf.reshape(logit, [-1] + data_shape)
return tfd.Independent(tfd.Bernoulli(logit), 2)
def plot_codes(ax, codes, labels):
ax.scatter(codes[:, 0], codes[:, 1], s=2, c=labels, alpha=0.1)
ax.set_aspect('equal')
ax.set_xlim(codes.min() - .1, codes.max() + .1)
ax.set_ylim(codes.min() - .1, codes.max() + .1)
ax.tick_params(
axis='both', which='both', left='off', bottom='off',
labelleft='off', labelbottom='off')
def plot_samples(ax, samples):
for index, sample in enumerate(samples):
ax[index].imshow(sample, cmap='gray')
ax[index].axis('off')
data = tf.placeholder(tf.float32, [None, 28, 28])
make_encoder = tf.make_template('encoder', make_encoder)
make_decoder = tf.make_template('decoder', make_decoder)
# Define the model.
prior = make_prior(code_size=2)
posterior = make_encoder(data, code_size=2)
code = posterior.sample()
# Define the loss.
likelihood = make_decoder(code, [28, 28]).log_prob(data)
divergence = tfd.kl_divergence(posterior, prior)
elbo = tf.reduce_mean(likelihood - divergence)
optimize = tf.train.AdamOptimizer(0.001).minimize(-elbo)
samples = make_decoder(prior.sample(10), [28, 28]).mean()
mnist = input_data.read_data_sets('MNIST_data/')
fig, ax = plt.subplots(nrows=20, ncols=11, figsize=(10, 20))
with tf.train.MonitoredSession() as sess:
for epoch in range(20):
feed = {data: mnist.test.images.reshape([-1, 28, 28])}
test_elbo, test_codes, test_samples = sess.run([elbo, code, samples], feed)
print('Epoch', epoch, 'elbo', test_elbo)
ax[epoch, 0].set_ylabel('Epoch {}'.format(epoch))
plot_codes(ax[epoch, 0], test_codes, mnist.test.labels)
plot_samples(ax[epoch, 1:], test_samples)
for _ in range(600):
feed = {data: mnist.train.next_batch(100)[0].reshape([-1, 28, 28])}
sess.run(optimize, feed)
plt.savefig('vae-mnist.png', dpi=300, transparent=True, bbox_inches='tight')
| 33.107143
| 79
| 0.701546
|
4a162453ab5fbb672465b5b22cb15b986749aa83
| 24,739
|
py
|
Python
|
mathics/builtin/numbertheory.py
|
JoseCarlosGarcia95/Mathics
|
1927ada93c5ce2d8cbe731731ab1f55cd5527467
|
[
"Apache-2.0"
] | 1
|
2019-10-10T09:56:06.000Z
|
2019-10-10T09:56:06.000Z
|
mathics/builtin/numbertheory.py
|
JoseCarlosGarcia95/Mathics
|
1927ada93c5ce2d8cbe731731ab1f55cd5527467
|
[
"Apache-2.0"
] | null | null | null |
mathics/builtin/numbertheory.py
|
JoseCarlosGarcia95/Mathics
|
1927ada93c5ce2d8cbe731731ab1f55cd5527467
|
[
"Apache-2.0"
] | null | null | null |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
Number theoretic functions
"""
import sympy
from itertools import combinations
from mathics.builtin.base import Builtin, Test
from mathics.core.expression import (
Expression, Integer, Rational, Symbol, from_python)
from mathics.core.convert import from_sympy
import mpmath
class PowerMod(Builtin):
"""
<dl>
<dt>'PowerMod[$x$, $y$, $m$]'
<dd>computes $x$^$y$ modulo $m$.
</dl>
>> PowerMod[2, 10000000, 3]
= 1
>> PowerMod[3, -2, 10]
= 9
>> PowerMod[0, -1, 2]
: 0 is not invertible modulo 2.
= PowerMod[0, -1, 2]
>> PowerMod[5, 2, 0]
: The argument 0 should be nonzero.
= PowerMod[5, 2, 0]
'PowerMod' does not support rational coefficients (roots) yet.
"""
attributes = ('Listable',)
messages = {
'ninv': "`1` is not invertible modulo `2`.",
}
def apply(self, a, b, m, evaluation):
'PowerMod[a_Integer, b_Integer, m_Integer]'
a_int = a
m_int = m
a, b, m = a.value, b.value, m.value
if m == 0:
evaluation.message('PowerMod', 'divz', m)
return
if b < 0:
b = -b
try:
a = int(sympy.invert(a, m))
except sympy.polys.polyerrors.NotInvertible:
evaluation.message('PowerMod', 'ninv', a_int, m_int)
return
return Integer(pow(a, b, m))
class Mod(Builtin):
"""
<dl>
<dt>'Mod[$x$, $m$]'
<dd>returns $x$ modulo $m$.
</dl>
>> Mod[14, 6]
= 2
>> Mod[-3, 4]
= 1
>> Mod[-3, -4]
= -3
>> Mod[5, 0]
: The argument 0 should be nonzero.
= Mod[5, 0]
"""
attributes = ('Listable', 'NumericFunction')
def apply(self, n, m, evaluation):
'Mod[n_Integer, m_Integer]'
n, m = n.get_int_value(), m.get_int_value()
if m == 0:
evaluation.message('Mod', 'divz', m)
return
return Integer(n % m)
class EvenQ(Test):
"""
<dl>
<dt>'EvenQ[$x$]'
<dd>returns 'True' if $x$ is even, and 'False' otherwise.
</dl>
>> EvenQ[4]
= True
>> EvenQ[-3]
= False
>> EvenQ[n]
= False
"""
def test(self, n):
value = n.get_int_value()
return value is not None and value % 2 == 0
class OddQ(Test):
"""
<dl>
<dt>'OddQ[$x$]'
<dd>returns 'True' if $x$ is odd, and 'False' otherwise.
</dl>
>> OddQ[-3]
= True
>> OddQ[0]
= False
"""
def test(self, n):
value = n.get_int_value()
return value is not None and value % 2 != 0
class GCD(Builtin):
"""
<dl>
<dt>'GCD[$n1$, $n2$, ...]'
<dd>computes the greatest common divisor of the given integers.
</dl>
>> GCD[20, 30]
= 10
>> GCD[10, y]
= GCD[10, y]
'GCD' is 'Listable':
>> GCD[4, {10, 11, 12, 13, 14}]
= {2, 1, 4, 1, 2}
'GCD' does not work for rational numbers and Gaussian integers yet.
"""
attributes = ('Flat', 'Listable', 'OneIdentity', 'Orderless')
def apply(self, ns, evaluation):
'GCD[ns___Integer]'
ns = ns.get_sequence()
result = 0
for n in ns:
value = n.get_int_value()
if value is None:
return
result = sympy.gcd(result, value)
return Integer(result)
# FIXME: Previosuly this used gmpy's gcdext. sympy's gcdex is not as powerful
# class ExtendedGCD(Builtin):
# """
# >> ExtendedGCD[10, 15]
# = {5, {-1, 1}}
#
# 'ExtendedGCD' works with any number of arguments:
# >> ExtendedGCD[10, 15, 7]
# = {1, {-3, 3, -2}}
#
# Compute the greated common divisor and check the result:
# >> numbers = {10, 20, 14};
# >> {gcd, factors} = ExtendedGCD[Sequence @@ numbers]
# = {2, {3, 0, -2}}
# >> Plus @@ (numbers * factors)
# = 2
#
# 'ExtendedGCD' does not work for rational numbers and Gaussian integers yet
# """
#
# attributes = ('Listable',)
#
# def apply(self, ns, evaluation):
# 'ExtendedGCD[ns___Integer]'
#
# ns = ns.get_sequence()
# result = 0
# coeff = []
# for n in ns:
# value = n.get_int_value()
# if value is None:
# return
# new_result, c1, c2 = sympy.gcdex(result, value)
# result = new_result
# coeff = [c * c1 for c in coeff] + [c2]
# return Expression('List', Integer(result), Expression(
# 'List', *(Integer(c) for c in coeff)))
class LCM(Builtin):
"""
<dl>
<dt>'LCM[$n1$, $n2$, ...]'
<dd>computes the least common multiple of the given integers.
</dl>
>> LCM[15, 20]
= 60
>> LCM[20, 30, 40, 50]
= 600
"""
attributes = ('Flat', 'Listable', 'OneIdentity', 'Orderless')
def apply(self, ns, evaluation):
'LCM[ns___Integer]'
ns = ns.get_sequence()
result = 1
for n in ns:
value = n.get_int_value()
if value is None:
return
result = sympy.lcm(result, value)
return Integer(result)
class FactorInteger(Builtin):
"""
<dl>
<dt>'FactorInteger[$n$]'
<dd>returns the factorization of $n$ as a list of factors and exponents.
</dl>
>> factors = FactorInteger[2010]
= {{2, 1}, {3, 1}, {5, 1}, {67, 1}}
To get back the original number:
>> Times @@ Power @@@ factors
= 2010
'FactorInteger' factors rationals using negative exponents:
>> FactorInteger[2010 / 2011]
= {{2, 1}, {3, 1}, {5, 1}, {67, 1}, {2011, -1}}
"""
# TODO: GausianIntegers option
# e.g. FactorInteger[5, GaussianIntegers -> True]
def apply(self, n, evaluation):
'FactorInteger[n_]'
if isinstance(n, Integer):
factors = sympy.factorint(n.value)
factors = sorted(factors.items())
return Expression('List', *(Expression('List', factor, exp)
for factor, exp in factors))
elif isinstance(n, Rational):
factors, factors_denom = list(map(
sympy.factorint, n.value.as_numer_denom()))
for factor, exp in factors_denom.items():
factors[factor] = factors.get(factor, 0) - exp
factors = sorted(factors.items())
return Expression('List', *(Expression('List', factor, exp)
for factor, exp in factors))
else:
return evaluation.message('FactorInteger', 'exact', n)
class Divisors(Builtin):
"""
<dl>
<dt>'Divisors[$n$]'
<dd>returns a list of the integers that divide $n$.
</dl>
>> Divisors[96]
= {1, 2, 3, 4, 6, 8, 12, 16, 24, 32, 48, 96}
>> Divisors[704]
= {1, 2, 4, 8, 11, 16, 22, 32, 44, 64, 88, 176, 352, 704}
>> Divisors[{87, 106, 202, 305}]
= {{1, 3, 29, 87}, {1, 2, 53, 106}, {1, 2, 101, 202}, {1, 5, 61, 305}}
#> Divisors[0]
= Divisors[0]
#> Divisors[{-206, -502, -1702, 9}]
= {{1, 2, 103, 206}, {1, 2, 251, 502}, {1, 2, 23, 37, 46, 74, 851, 1702}, {1, 3, 9}}
#> Length[Divisors[1000*369]]
= 96
#> Length[Divisors[305*176*369*100]]
= 672
"""
# TODO: support GaussianIntegers
# e.g. Divisors[2, GaussianIntegers -> True]
attributes = ('Listable',)
def apply(self, n, evaluation):
'Divisors[n_Integer]'
if n == Integer(0):
return None
return Expression('List', *[from_sympy(i) for i in sympy.divisors(n.to_sympy())])
class IntegerExponent(Builtin):
"""
<dl>
<dt>'IntegerExponent[$n$, $b$]'
<dd>gives the highest exponent of $b$ that divides $n$.
</dl>
>> IntegerExponent[16, 2]
= 4
>> IntegerExponent[-510000]
= 4
>> IntegerExponent[10, b]
= IntegerExponent[10, b]
"""
rules = {
'IntegerExponent[n_]': 'IntegerExponent[n, 10]',
}
messages = {
'int': 'Integer expected at position 1 in `1`',
'ibase': 'Base `1` is not an integer greater than 1.',
}
def apply(self, n, b, evaluation):
'IntegerExponent[n_Integer, b_Integer]'
py_n, py_b = n.to_python(), b.to_python()
expr = Expression('IntegerExponent', n, b)
if not isinstance(py_n, int):
evaluation.message('IntegerExponent', 'int', expr)
py_n = abs(py_n)
if not (isinstance(py_b, int) and py_b > 1):
evaluation.message('IntegerExponent', 'ibase', b)
# TODO: Optimise this (dont need to calc. base^result)
# NOTE: IntegerExponent[a,b] causes a Python error here when a or b are
# symbols
result = 1
while py_n % (py_b ** result) == 0:
result += 1
return from_python(result - 1)
class MantissaExponent(Builtin):
"""
<dl>
<dt>'MantissaExponent[$n$]'
<dd>finds a list containing the mantissa and exponent of a given number $n$.
<dt>'MantissaExponent[$n$, $b$]'
<dd>finds the base‐b mantissa and exponent of $n$.
</dl>
>> MantissaExponent[2.5*10^20]
= {0.25, 21}
>> MantissaExponent[125.24]
= {0.12524, 3}
>> MantissaExponent[125., 2]
= {0.976563, 7}
>> MantissaExponent[10, b]
= MantissaExponent[10, b]
#> MantissaExponent[E, Pi]
= {E / Pi, 1}
#> MantissaExponent[Pi, Pi]
= {1 / Pi, 2}
#> MantissaExponent[5/2 + 3, Pi]
= {11 / (2 Pi ^ 2), 2}
#> MantissaExponent[b]
= MantissaExponent[b]
#> MantissaExponent[17, E]
= {17 / E ^ 3, 3}
#> MantissaExponent[17., E]
= {0.84638, 3}
#> MantissaExponent[Exp[Pi], 2]
= {E ^ Pi / 32, 5}
#> MantissaExponent[3 + 2 I, 2]
: The value 3 + 2 I is not a real number
= MantissaExponent[3 + 2 I, 2]
#> MantissaExponent[25, 0.4]
: Base 0.4 is not a real number greater than 1.
= MantissaExponent[25, 0.4]
#> MantissaExponent[0.0000124]
= {0.124, -4}
#> MantissaExponent[0.0000124, 2]
= {0.812646, -16}
#> MantissaExponent[0]
= {0, 0}
#> MantissaExponent[0, 2]
= {0, 0}
"""
attributes = ('Listable',)
rules = {
'MantissaExponent[0]': '{0, 0}',
'MantissaExponent[0, n_]': '{0, 0}',
}
messages = {
'realx': 'The value `1` is not a real number',
'rbase': 'Base `1` is not a real number greater than 1.',
}
def apply(self, n, b, evaluation):
'MantissaExponent[n_, b_]'
# Handle Input with special cases such as PI and E
n_sympy, b_sympy = n.to_sympy(), b.to_sympy()
expr = Expression('MantissaExponent', n, b)
if isinstance(n.to_python(), complex):
evaluation.message('MantissaExponent', 'realx', n)
return expr
if n_sympy.is_constant():
temp_n = Expression('N', n).evaluate(evaluation)
py_n = temp_n.to_python()
else:
return expr
if b_sympy.is_constant():
temp_b = Expression('N', b).evaluate(evaluation)
py_b = temp_b.to_python()
else:
return expr
if not py_b > 1:
evaluation.message('MantissaExponent', 'rbase', b)
return expr
base_exp = int(mpmath.log(py_n, py_b))
exp = (base_exp + 1) if base_exp >= 0 else base_exp
return Expression('List', Expression('Divide', n , b ** exp), exp)
def apply_2(self, n, evaluation):
'MantissaExponent[n_]'
n_sympy = n.to_sympy()
expr = Expression('MantissaExponent', n)
if isinstance(n.to_python(), complex):
evaluation.message('MantissaExponent', 'realx', n)
return expr
# Handle Input with special cases such as PI and E
if n_sympy.is_constant():
temp_n = Expression('N', n).evaluate(evaluation)
py_n = temp_n.to_python()
else:
return expr
base_exp = int(mpmath.log10(py_n))
exp = (base_exp + 1) if base_exp >= 0 else base_exp
return Expression('List', Expression('Divide', n , (10 ** exp)), exp)
def _fractional_part(self, n, expr, evaluation):
n_sympy = n.to_sympy()
if n_sympy.is_constant():
if (n_sympy >= 0):
positive_integer_part = Expression("Floor", n).evaluate(evaluation).to_python()
result = n - positive_integer_part
else:
negative_integer_part = Expression("Ceiling", n).evaluate(evaluation).to_python()
result = n - negative_integer_part
else:
return expr
return from_python(result)
class FractionalPart(Builtin):
"""
<dl>
<dt>'FractionalPart[$n$]'
<dd>finds the fractional part of $n$.
</dl>
>> FractionalPart[4.1]
= 0.1
>> FractionalPart[-5.25]
= -0.25
#> FractionalPart[b]
= FractionalPart[b]
#> FractionalPart[{-2.4, -2.5, -3.0}]
= {-0.4, -0.5, 0.}
#> FractionalPart[14/32]
= 7 / 16
#> FractionalPart[4/(1 + 3 I)]
= 2 / 5 - I / 5
#> FractionalPart[Pi^20]
= -8769956796 + Pi ^ 20
"""
attributes = ('Listable', 'NumericFunction', 'ReadProtected')
def apply(self, n, evaluation):
'FractionalPart[n_]'
expr = Expression('FractionalPart', n)
return _fractional_part(self.__class__.__name__, n, expr, evaluation)
def apply_2(self, n, evaluation):
'FractionalPart[n_Complex]'
expr = Expression('FractionalPart', n)
n_real = Expression("Re", n).evaluate(evaluation)
n_image = Expression("Im", n).evaluate(evaluation)
real_fractional_part = _fractional_part(self.__class__.__name__, n_real, expr, evaluation)
image_fractional_part = _fractional_part(self.__class__.__name__, n_image, expr, evaluation)
return Expression('Complex', real_fractional_part, image_fractional_part)
class Prime(Builtin):
"""
<dl>
<dt>'Prime[$n$]'
<dd>returns the $n$th prime number.
</dl>
>> Prime[1]
= 2
>> Prime[167]
= 991
"""
messages = {
'intpp': 'Positive integer argument expected in `1`.',
}
def apply(self, n, evaluation):
'Prime[n_]'
n_int = n.to_python()
if isinstance(n_int, int) and n_int > 0:
return Integer(sympy.prime(n_int))
expr = Expression('Prime', n)
evaluation.message('Prime', 'intpp', expr)
return
class PrimeQ(Builtin):
"""
<dl>
<dt>'PrimeQ[$n$]'
<dd>returns 'True' if $n$ is a prime number.
</dl>
For very large numbers, 'PrimeQ' uses probabilistic prime testing, so it might be wrong sometimes
(a number might be composite even though 'PrimeQ' says it is prime).
The algorithm might be changed in the future.
>> PrimeQ[2]
= True
>> PrimeQ[-3]
= True
>> PrimeQ[137]
= True
>> PrimeQ[2 ^ 127 - 1]
= True
#> PrimeQ[1]
= False
#> PrimeQ[2 ^ 255 - 1]
= False
All prime numbers between 1 and 100:
>> Select[Range[100], PrimeQ]
= {2, 3, 5, 7, 11, 13, 17, 19, 23, 29, 31, 37, 41, 43, 47, 53, 59, 61, 67, 71, 73, 79, 83, 89, 97}
'PrimeQ' has attribute 'Listable':
>> PrimeQ[Range[20]]
= {False, True, True, False, True, False, True, False, False, False, True, False, True, False, False, False, True, False, True, False}
"""
attributes = ('Listable',)
def apply(self, n, evaluation):
'PrimeQ[n_]'
n = n.get_int_value()
if n is None:
return Symbol('False')
n = abs(n)
if sympy.isprime(n):
return Symbol('True')
else:
return Symbol('False')
class CoprimeQ(Builtin):
"""
<dl>
<dt>'CoprimeQ[$x$, $y$]'
<dd>tests whether $x$ and $y$ are coprime by computing their
greatest common divisor.
</dl>
>> CoprimeQ[7, 9]
= True
>> CoprimeQ[-4, 9]
= True
>> CoprimeQ[12, 15]
= False
CoprimeQ also works for complex numbers
>> CoprimeQ[1+2I, 1-I]
= True
>> CoprimeQ[4+2I, 6+3I]
= False
>> CoprimeQ[2, 3, 5]
= True
>> CoprimeQ[2, 4, 5]
= False
"""
attributes = ('Listable',)
def apply(self, args, evaluation):
'CoprimeQ[args__]'
py_args = [arg.to_python() for arg in args.get_sequence()]
if not all(isinstance(i, int) or isinstance(i, complex)
for i in py_args):
return Symbol('False')
if all(sympy.gcd(n, m) == 1 for (n, m) in combinations(py_args, 2)):
return Symbol('True')
else:
return Symbol('False')
class PrimePowerQ(Builtin):
"""
<dl>
<dt>'PrimePowerQ[$n$]'
<dd>returns 'True' if $n$ is a power of a prime number.
</dl>
>> PrimePowerQ[9]
= True
>> PrimePowerQ[52142]
= False
>> PrimePowerQ[-8]
= True
>> PrimePowerQ[371293]
= True
#> PrimePowerQ[1]
= False
"""
rules = {
'PrimePowerQ[1]': 'False',
}
attributes = ('Listable', 'Protected', 'ReadProtected')
# TODO: GaussianIntegers option
"""
#> PrimePowerQ[5, GaussianIntegers -> True]
= False
"""
# TODO: Complex args
"""
#> PrimePowerQ[{3 + I, 3 - 2 I, 3 + 4 I, 9 + 7 I}]
= {False, True, True, False}
"""
# TODO: Gaussian rationals
"""
#> PrimePowerQ[2/125 - 11 I/125]
= True
"""
def apply(self, n, evaluation):
'PrimePowerQ[n_]'
n = n.get_int_value()
if n is None:
return Symbol('False')
n = abs(n)
if len(sympy.factorint(n)) == 1:
return Symbol('True')
else:
return Symbol('False')
class PrimePi(Builtin):
"""
<dl>
<dt>'PrimePi[$x$]'
<dd>gives the number of primes less than or equal to $x$.
</dl>
>> PrimePi[100]
= 25
>> PrimePi[-1]
= 0
>> PrimePi[3.5]
= 2
>> PrimePi[E]
= 1
"""
# TODO: Traditional Form
def apply(self, n, evaluation):
'PrimePi[n_?NumericQ]'
result = sympy.ntheory.primepi(n.to_python(n_evaluation=evaluation))
return from_python(result)
class NextPrime(Builtin):
"""
<dl>
<dt>'NextPrime[$n$]'
<dd>gives the next prime after $n$.
<dt>'NextPrime[$n$,$k$]'
<dd>gives the $k$th prime after $n$.
</dl>
>> NextPrime[10000]
= 10007
>> NextPrime[100, -5]
= 73
>> NextPrime[10, -5]
= -2
>> NextPrime[100, 5]
= 113
>> NextPrime[5.5, 100]
= 563
>> NextPrime[5, 10.5]
= NextPrime[5, 10.5]
"""
rules = {
'NextPrime[n_]': 'NextPrime[n, 1]',
}
def apply(self, n, k, evaluation):
'NextPrime[n_?NumericQ, k_?IntegerQ]'
py_k = k.to_python(n_evaluation=evaluation)
py_n = n.to_python(n_evaluation=evaluation)
if py_k >= 0:
return from_python(sympy.ntheory.nextprime(py_n, py_k))
# Hack to get earlier primes
result = n.to_python()
for i in range(-py_k):
try:
result = sympy.ntheory.prevprime(result)
except ValueError:
# No earlier primes
return from_python(-1 * sympy.ntheory.nextprime(0, py_k - i))
return from_python(result)
class RandomPrime(Builtin):
"""
<dl>
<dt>'RandomPrime[{$imin$, $imax}]'
<dd>gives a random prime between $imin$ and $imax$.
<dt>'RandomPrime[$imax$]'
<dd>gives a random prime between 2 and $imax$.
<dt>'RandomPrime[$range$, $n$]'
<dd>gives a list of $n$ random primes in $range$.
</dl>
>> RandomPrime[{14, 17}]
= 17
>> RandomPrime[{14, 16}, 1]
: There are no primes in the specified interval.
= RandomPrime[{14, 16}, 1]
>> RandomPrime[{8,12}, 3]
= {11, 11, 11}
>> RandomPrime[{10,30}, {2,5}]
= ...
#> RandomPrime[{10,12}, {2,2}]
= {{11, 11}, {11, 11}}
#> RandomPrime[2, {3,2}]
= {{2, 2}, {2, 2}, {2, 2}}
"""
messages = {
'posdim': (
'The dimensions parameter `1` is expected to be a positive '
'integer or a list of positive integers.'),
'noprime': 'There are no primes in the specified interval.',
'prmrng': ('First argument `1` is not a positive integer or a list '
'of two positive integers.'),
'posint': ('The paramater `1` describing the interval is expected to '
'be a positive integer.'),
}
rules = {
'RandomPrime[imax_?NotListQ]': 'RandomPrime[{1, imax}, 1]',
'RandomPrime[int_?ListQ]': 'RandomPrime[int, 1]',
'RandomPrime[imax_?ListQ, n_?ArrayQ]': (
'ConstantArray[RandomPrime[imax, 1], n]'),
'RandomPrime[imax_?NotListQ, n_?ArrayQ]': (
'ConstantArray[RandomPrime[{1, imax}, 1], n]'),
}
# TODO: Use random state as in other randomised methods within mathics
def apply(self, interval, n, evaluation):
'RandomPrime[interval_?ListQ, n_]'
if not isinstance(n, Integer):
evaluation.message('RandomPrime', 'posdim', n)
return
py_n = n.to_python()
py_int = interval.to_python()
if not (isinstance(py_int, list) and len(py_int) == 2):
evaluation.message('RandomPrime', 'prmrng', interval)
imin, imax = min(py_int), max(py_int)
if imin <= 0 or not isinstance(imin, int):
evaluation.message('RandomPrime', 'posint', interval.leaves[0])
return
if imax <= 0 or not isinstance(imax, int):
evaluation.message('RandomPrime', 'posint', interval.leaves[1])
return
try:
if py_n == 1:
return from_python(sympy.ntheory.randprime(imin, imax + 1))
return from_python([sympy.ntheory.randprime(imin, imax + 1)
for i in range(py_n)])
except ValueError:
evaluation.message('RandomPrime', 'noprime')
return
class Quotient(Builtin):
'''
<dl>
<dt>'Quotient[m, n]'
<dd>computes the integer quotient of $m$ and $n$.
</dl>
>> Quotient[23, 7]
= 3
#> Quotient[13, 0]
: Infinite expression Quotient[13, 0] encountered.
= ComplexInfinity
#> Quotient[-17, 7]
= -3
#> Quotient[-17, -4]
= 4
#> Quotient[19, -4]
= -5
'''
attributes = ('Listable', 'NumericFunction')
messages = {
'infy': 'Infinite expression `1` encountered.',
}
def apply(self, m, n, evaluation):
'Quotient[m_Integer, n_Integer]'
py_m = m.get_int_value()
py_n = n.get_int_value()
if py_n == 0:
evaluation.message('Quotient', 'infy', Expression('Quotient', m, n))
return Symbol('ComplexInfinity')
return Integer(py_m // py_n)
class QuotientRemainder(Builtin):
'''
<dl>
<dt>'QuotientRemainder[m, n]'
<dd>computes a list of the quotient and remainder from division of $m$ by $n$.
</dl>
>> QuotientRemainder[23, 7]
= {3, 2}
#> QuotientRemainder[13, 0]
: The argument 0 in QuotientRemainder[13, 0] should be nonzero.
= QuotientRemainder[13, 0]
#> QuotientRemainder[-17, 7]
= {-3, 4}
#> QuotientRemainder[-17, -4]
= {4, -1}
#> QuotientRemainder[19, -4]
= {-5, -1}
#> QuotientRemainder[a, 0]
= QuotientRemainder[a, 0]
#> QuotientRemainder[a, b]
= QuotientRemainder[a, b]
#> QuotientRemainder[5.2,2.5]
= {2, 0.2}
#> QuotientRemainder[5, 2.]
= {2, 1.}
'''
attributes = ('Listable', 'NumericFunction')
messages = {
'divz': 'The argument 0 in `1` should be nonzero.',
}
def apply(self, m, n, evaluation):
'QuotientRemainder[m_, n_]'
if m.is_numeric() and n.is_numeric():
py_m = m.to_python()
py_n = n.to_python()
if py_n == 0:
return evaluation.message('QuotientRemainder', 'divz', Expression('QuotientRemainder', m, n))
return Expression('List', Integer(py_m // py_n), (py_m % py_n))
else:
return Expression('QuotientRemainder', m, n)
| 25.373333
| 139
| 0.530215
|
4a1624a5b9d2bd1c474a184efa97d17f9ac50df8
| 1,284
|
py
|
Python
|
Reinforcement-Learning/assignment1/draft.py
|
gajeraj/MLSA-workshops-2020-student
|
cafbf5ac8750dd2b962174ad71dabf35ac90e2f4
|
[
"MIT"
] | 5
|
2020-02-27T07:04:44.000Z
|
2021-06-03T17:20:55.000Z
|
Reinforcement-Learning/assignment1/draft.py
|
Phoebe0222/MLSA-workshops-2019-student
|
cafbf5ac8750dd2b962174ad71dabf35ac90e2f4
|
[
"MIT"
] | null | null | null |
Reinforcement-Learning/assignment1/draft.py
|
Phoebe0222/MLSA-workshops-2019-student
|
cafbf5ac8750dd2b962174ad71dabf35ac90e2f4
|
[
"MIT"
] | 9
|
2019-08-09T12:08:28.000Z
|
2019-10-16T06:35:22.000Z
|
'''
This includes my test runs to understand the data structure when attempting the problem.
Feel free to go through and try it yourself.
'''
import gym
import numpy as np
from lake_envs import *
# uncomment to check out stochastic/deterministic environments
#env = gym.make("Deterministic-4x4-FrozenLake-v0")
env = gym.make("Stochastic-4x4-FrozenLake-v0")
'''
P: nested dictionary
From gym.core.Environment
For each pair of states in [1, nS] and actions in [1, nA], P[state][action] is a
tuple of the form (probability, nextstate, reward, terminal) where
- probability: float
the probability of transitioning from "state" to "nextstate" with "action"
- nextstate: int
denotes the state we transition to (in range [0, nS - 1])
- reward: int
either 0 or 1, the reward for transitioning from "state" to
"nextstate" with "action"
- terminal: bool
True when "nextstate" is a terminal state (hole or goal), False otherwise
'''
P = env.P
nA = 4
nS = 16
gamma = 0.9
policy = 2 * np.ones(nS, dtype='int')
for state in P:
A = P[state]
for action in A:
for prob, next_state, reward, terminal in A[action]:
print('p(s_{}|s_{},a_{})={}, with reward {}'.format(next_state,state,action,prob,reward))
env.close()
| 27.319149
| 101
| 0.680685
|
4a1624b8361e20a663f27224a3feabbd2cd1601f
| 1,446
|
py
|
Python
|
ci/test.py
|
tiagoshibata/coremltools_bdist_wininst
|
e052eda593e57dc57c69bacfbd2aa02c89f950fb
|
[
"Unlicense"
] | null | null | null |
ci/test.py
|
tiagoshibata/coremltools_bdist_wininst
|
e052eda593e57dc57c69bacfbd2aa02c89f950fb
|
[
"Unlicense"
] | null | null | null |
ci/test.py
|
tiagoshibata/coremltools_bdist_wininst
|
e052eda593e57dc57c69bacfbd2aa02c89f950fb
|
[
"Unlicense"
] | null | null | null |
import os
import subprocess
import sys
import traceback
from custom_venv import CustomVenv
def run(*command, **kwargs):
subprocess.run(command, cwd='coremltools', check=True, **kwargs)
def run_tests():
env = CustomVenv(clear=True, symlinks=True, with_pip=True, requirements=['-r', 'coremltools/test_requirements.pip'])
env.create('venv')
cmake_environment = os.environ.copy()
cmake_environment['CMAKE_BUILD_TYPE'] = 'Release'
cmake_environment['PATH'] = os.pathsep.join((os.path.dirname(env.python), cmake_environment['PATH']))
cmake_environment['PYTHONPATH'] = os.pathsep.join((os.getcwd(), cmake_environment.get('PYTHONPATH', '')))
run('cmake', '.', env=cmake_environment)
run('cmake', '--build', '.', '--target', 'pytest', '--config', 'Release', env=cmake_environment)
def tests_should_pass():
if sys.version_info >= (3, 7) or sys.version_info < (3, 5) and os.name == 'nt':
print('Some dependencies are unavailable for this Python version in this system, tests are expected to fail')
return False
if os.name == 'nt':
print('Native compilation is failing in Windows, tests are expected to fail')
return False
return True
def main():
if tests_should_pass():
run_tests()
else:
try:
run_tests()
except Exception: # Don't fail the CI build
traceback.print_exc()
if __name__ == '__main__':
main()
| 31.434783
| 120
| 0.663209
|
4a16257e5a54433a12ebd8c8a1c6e08da5ba141a
| 1,247
|
py
|
Python
|
Day2_math_operators/math_operators.py
|
ariv14/python_day1
|
6a649d19669a3952aa55509754a91afebe215066
|
[
"Apache-2.0"
] | null | null | null |
Day2_math_operators/math_operators.py
|
ariv14/python_day1
|
6a649d19669a3952aa55509754a91afebe215066
|
[
"Apache-2.0"
] | null | null | null |
Day2_math_operators/math_operators.py
|
ariv14/python_day1
|
6a649d19669a3952aa55509754a91afebe215066
|
[
"Apache-2.0"
] | null | null | null |
# Simple math operations interpreters
# Addition calculator
print("#######################################################################\n")
print("Simple addition calculator")
a = input("Enter first value: ")
b = input("Enter second value: ")
print("The sum of two values are : ")
print(int(a) + int(b))
print("#######################################################################\n")
# Subtraction calculator
print("Simple subtraction calculator")
a = input("Enter first value: ")
b = input("Enter second value: ")
print("The subtraction of two values are : ")
print(int(a) - int(b))
print("#######################################################################\n")
# Division calculator
print("Simple division calculator")
a = input("Enter first value: ")
b = input("Enter second value: ")
print("The division of two values are : ")
print(int(a) / int(b))
print("#######################################################################\n")
# Multiplication calculator
print("Simple multiplication calculator")
a = input("Enter first value: ")
b = input("Enter second value: ")
print("The multiplication of two values are : ")
print(int(a) * int(b))
print("#######################################################################\n")
| 31.974359
| 82
| 0.493986
|
4a162583f49be887b2988c0fb7a77eb22e826ab5
| 612
|
py
|
Python
|
scripts/writeLineToVTK.py
|
bjlittle/mint
|
fe4862951e0c570caef6e86440b090650dca000b
|
[
"0BSD"
] | null | null | null |
scripts/writeLineToVTK.py
|
bjlittle/mint
|
fe4862951e0c570caef6e86440b090650dca000b
|
[
"0BSD"
] | null | null | null |
scripts/writeLineToVTK.py
|
bjlittle/mint
|
fe4862951e0c570caef6e86440b090650dca000b
|
[
"0BSD"
] | null | null | null |
import sys
from numpy import *
nline = eval(sys.argv[1])
t = linspace(0., 1., nline)
x = eval(sys.argv[2])
y = eval(sys.argv[3])
f = open(sys.argv[4], 'w')
f.write('# vtk DataFile Version 4.2\n')
f.write('vtk output\n')
f.write('ASCII\n')
f.write('DATASET UNSTRUCTURED_GRID\n')
f.write('POINTS {} double\n'.format(nline))
for i in range(nline):
f.write('{} {} 0\n'.format(x[i], y[i]))
ncells = nline - 1
f.write('CELLS {} {}\n'.format(ncells, ncells*3))
for i in range(ncells):
f.write('2 {} {}\n'.format(i, i + 1))
f.write('CELL_TYPES {}\n'.format(ncells))
for i in range(ncells):
f.write('3\n')
f.close()
| 24.48
| 49
| 0.627451
|
4a1626004d346a4bc802b499f550a95e83d63466
| 1,778
|
py
|
Python
|
voice.py
|
dutch213/pt-voicebox
|
8fd13c64dda26de450e815f38b66a14df23fb536
|
[
"MIT"
] | null | null | null |
voice.py
|
dutch213/pt-voicebox
|
8fd13c64dda26de450e815f38b66a14df23fb536
|
[
"MIT"
] | null | null | null |
voice.py
|
dutch213/pt-voicebox
|
8fd13c64dda26de450e815f38b66a14df23fb536
|
[
"MIT"
] | 1
|
2019-09-30T01:31:25.000Z
|
2019-09-30T01:31:25.000Z
|
__author__ = 'jamiebrew'
import operator
# keeps a dictionary of sources with associated weights
class Voice(object):
def __init__(self, weighted_corpora, name = 'no_name'):
self.name = name
self.weighted_corpora = weighted_corpora
# aggregates the suggestion lists of all constituent corpora in this voice, prints the top num_words from this list
def suggest(self, sentence, cursor_position, num_words):
suggestions = {}
for key in self.weighted_corpora:
corp, weight = self.weighted_corpora[key]
if not weight == 0:
contributed_suggestions = corp.suggest(sentence, cursor_position, num_words)
for word, score in contributed_suggestions:
if word not in suggestions:
suggestions[word] = [0, {}]
suggestions[word][0] += score * weight
suggestions[word][1][corp.name] = score
else:
suggestions[word][0] += score * weight
suggestions[word][1][corp.name] = score
suggestions = list(reversed(sorted(suggestions.items(), key=operator.itemgetter(1))))[0:num_words]
return suggestions[0:num_words]
# adds a corpus to this voice
def add_corpus(self, corp, weight):
self.weighted_corpora[corp.name] = [corp, weight]
# proportionally adjusts the weights to different corpora such that they sum to 1
def normalize_weights(self):
total_weight = 0
for key in self.weighted_corpora:
total_weight += self.weighted_corpora[key][1]
for key in self.weighted_corpora:
self.weighted_corpora[key][1] = self.weighted_corpora[key][1] / total_weight
| 44.45
| 119
| 0.626547
|
4a1626d408baae148fbcc1b753c90639aa7fb856
| 5,579
|
py
|
Python
|
rgc/ThreadQueue/__init__.py
|
zyndagj/gantry-crane
|
482385e0ad1f284206a30a1683c9b1aef23d3db4
|
[
"BSD-3-Clause"
] | 8
|
2019-09-27T20:08:16.000Z
|
2021-07-12T18:27:23.000Z
|
rgc/ThreadQueue/__init__.py
|
zyndagj/gantry-crane
|
482385e0ad1f284206a30a1683c9b1aef23d3db4
|
[
"BSD-3-Clause"
] | 13
|
2019-11-19T17:37:18.000Z
|
2021-01-15T17:41:35.000Z
|
rgc/ThreadQueue/__init__.py
|
zyndagj/gantry-crane
|
482385e0ad1f284206a30a1683c9b1aef23d3db4
|
[
"BSD-3-Clause"
] | 2
|
2019-12-13T22:55:01.000Z
|
2021-02-09T22:09:58.000Z
|
###############################################################################
# Author: Greg Zynda
# Last Modified: 01/15/2021
###############################################################################
# BSD 3-Clause License
#
# Copyright (c) 2018, Texas Advanced Computing Center - UT Austin
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# * Neither the name of the copyright holder nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
###############################################################################
import sys, os, logging, re, json
from tqdm import tqdm
from threading import Thread, current_thread
from time import sleep
logger = logging.getLogger(__name__)
try:
from Queue import Queue
pyv = 2
except:
from queue import Queue
pyv = 3
# https://github.com/tqdm/tqdm/issues/313
class TqdmHandler(logging.StreamHandler):
def emit(self, record):
try:
msg = self.format(record)
tqdm.write(msg) # , file=sys.stderr)
self.flush()
except (KeyboardInterrupt, SystemExit): raise
except: self.handleError(record)
class ThreadQueue:
def __init__(self, target, n_threads=10):
'''
Class for killable thread pools
# Parameters
target (function): Target function for threads to run
n_threads (int): Number of worker threads to use [10]
verbose (bool): Enables verbose logging
'''
# Get the log level
self.numerical_level = logger.getEffectiveLevel()
self.log_level = logging.getLevelName(self.numerical_level)
# Init logger with handler for tqdm
FORMAT = '[%(levelname)s - %(threadName)s - %(name)s.%(funcName)s] %(message)s'
# Store handler so it can be removed
self.handler = TqdmHandler()
self.handler.setFormatter(logging.Formatter(FORMAT))
logger.addHandler(self.handler)
logger.propagate = False
logger.debug("Finished initializing the threaded log handler for tqdm")
self.pbar = ''
self.n_threads = n_threads
self.queue = Queue()
# Spawn threads
self.threads = [Thread(target=self.worker, args=[target]) for i in range(n_threads)]
for t in self.threads: t.start()
logger.debug("Spawned and started %i threads"%(n_threads))
def __del__(self):
logger.debug("Removing handler")
logger.removeHandler(self.handler)
def process_list(self, work_list):
'''
# Parameters
work_list (list): List of argument lists for threads to run
'''
#if self.log_level != 'DEBUG':
self.pbar = tqdm(total=len(work_list))
try:
for work_item in work_list:
self.queue.put(work_item)
logger.debug("Added %i items to the work queue"%(len(work_list)))
while not self.queue.empty():
sleep(0.5)
logger.debug("Finished running work list")
while self.pbar.n != len(work_list): sleep(0.5)
if self.pbar:
self.pbar.close()
self.pbar = ''
except KeyboardInterrupt as e:
logger.warning("Caught KeyboardInterrupt - Killing threads")
for t in self.threads: t.alive = False
for t in self.threads: t.join()
sys.exit(e)
def join(self):
'''
Waits until all child threads are joined
'''
try:
for t in self.threads:
logger.debug("Added STOP")
self.queue.put('STOP')
for t in self.threads:
while t.is_alive():
t.join(0.5)
if self.pbar:
self.pbar.close()
self.pbar = ''
logger.debug("Joined all threads")
except KeyboardInterrupt as e:
logger.warning("Caught KeyboardInterrupt. Killing threads")
for t in self.threads: t.alive = False
sp.call('pkill -9f "singularity pull"', shell=True)
for t in self.threads: t.join()
sys.exit(e)
def worker(self, target):
'''
Worker for pulling images
# Parameters
target (function): Target function for thread to run
'''
t = current_thread()
t.alive = True
for args in iter(self.queue.get, 'STOP'):
if not t.alive:
logger.debug("Thread was killed. Stopping")
break
if type(args) is list or type(args) is tuple:
logger.debug("Running %s%s"%(target.__name__, str(map(str, args))))
target(*args)
else:
logger.debug("Running %s(%s)"%(target.__name__, str(args)))
target(args)
#if self.log_level != 'DEBUG':
logger.debug("Finished task. Updating progress")
self.pbar.update(1)
self.queue.task_done()
| 35.535032
| 86
| 0.683277
|
4a1627399965ec4a1e41ff2fe21e6f742dbb51ad
| 7,287
|
py
|
Python
|
src/sentry/utils/cursors.py
|
apragacz/sf-sentry
|
2fdd6c1195c29a1d401d1cd538c22ea68556699a
|
[
"BSD-3-Clause"
] | 1
|
2018-03-05T15:40:12.000Z
|
2018-03-05T15:40:12.000Z
|
src/sentry/utils/cursors.py
|
pitchin/sentry
|
ff6f260e9edb726374d2e4f455ff8b3d0ecd551e
|
[
"BSD-3-Clause"
] | 1
|
2018-08-22T16:49:48.000Z
|
2018-08-22T16:49:48.000Z
|
src/sentry/utils/cursors.py
|
pitchin/sentry
|
ff6f260e9edb726374d2e4f455ff8b3d0ecd551e
|
[
"BSD-3-Clause"
] | 1
|
2018-07-02T09:46:44.000Z
|
2018-07-02T09:46:44.000Z
|
"""
sentry.utils.cursors
~~~~~~~~~~~~~~~~~~~~
:copyright: (c) 2010-2014 by the Sentry Team, see AUTHORS for more details.
:license: BSD, see LICENSE for more details.
"""
from __future__ import absolute_import
import six
from collections import Sequence
class Cursor(object):
def __init__(self, value, offset=0, is_prev=False, has_results=None):
self.value = int(value)
self.offset = int(offset)
self.is_prev = bool(is_prev)
self.has_results = has_results
def __str__(self):
return '%s:%s:%s' % (self.value, self.offset, int(self.is_prev))
def __repr__(self):
return '<%s: value=%s offset=%s is_prev=%s>' % (
type(self), self.value, self.offset, int(self.is_prev)
)
def __nonzero__(self):
return self.has_results
@classmethod
def from_string(cls, value):
bits = value.split(':')
if len(bits) != 3:
raise ValueError
try:
bits = float(bits[0]), int(bits[1]), int(bits[2])
except (TypeError, ValueError):
raise ValueError
return cls(*bits)
class CursorResult(Sequence):
def __init__(self, results, next, prev, hits=None, max_hits=None):
self.results = results
self.next = next
self.prev = prev
self.hits = hits
self.max_hits = max_hits
def __len__(self):
return len(self.results)
def __iter__(self):
return iter(self.results)
def __getitem__(self, key):
return self.results[key]
def __repr__(self):
return '<%s: results=%s>' % (type(self).__name__, len(self.results))
def _build_next_values(cursor, results, key, limit, is_desc):
value = cursor.value
offset = cursor.offset
is_prev = cursor.is_prev
num_results = len(results)
if not value and num_results:
value = int(key(results[0]))
# Next cursor for a prev-cursor simply starts from that prev cursors value
# without an offset.
if is_prev:
return (value, 0, True)
# No results means no more next
if not num_results:
return (value, offset, False)
# Are there more results than whats on the current page?
has_next = num_results > limit
# Determine what our next cursor is by ensuring we have a unique offset
next_value = int(key(results[-1]))
# value has not changed, page forward by adjusting the offset
if next_value == value:
next_offset = offset + limit
return (next_value, next_offset, has_next)
# We have an absolute value to page from. If any of the items in
# the current result set come *after* or *before* (depending on the
# is_desc flag) we will want to increment the offset to account for
# moving past them.
#
# This is required to account for loss of precision in the key value.
next_offset = 0
result_iter = reversed(results)
# If we have more results the last item in the results should be
# skipped, as we know we want to start from that item and do not
# need to offset from it.
if has_next:
six.next(result_iter)
for result in result_iter:
result_value = int(key(result))
is_larger = result_value >= next_value
is_smaller = result_value <= next_value
if (is_desc and is_smaller) or (not is_desc and is_larger):
next_offset += 1
else:
break
return (next_value, next_offset, has_next)
def _build_prev_values(cursor, results, key, limit, is_desc):
value = cursor.value
offset = cursor.offset
is_prev = cursor.is_prev
num_results = len(results)
if is_prev:
has_prev = num_results > limit
else:
# It's likely that there's a previous page if they passed us either
# offset values
has_prev = value or offset
# If the cursor contains previous results, the first item is the item that
# indicates if we have more items later, and is *not* the first item in the
# list, that should be used for the value.
first_prev_index = 1 if is_prev and has_prev else 0
# If we're paging back we need to calculate the key from the first result
# with for_prev=True to ensure rounding of the key is correct.See
# sentry.api.paginator.BasePaginator.get_item_key
prev_value = int(key(results[first_prev_index], for_prev=True)) if results else 0
# Prev only has an offset if the cursor we were dealing with was a
# previous cursor. Otherwise we'd be taking the offset while moving forward.
prev_offset = offset if is_prev else 0
if not (is_prev and num_results):
return (prev_value, prev_offset, has_prev)
# Value has not changed, page back by adjusting the offset
if prev_value == value:
prev_offset = offset + limit
return (prev_value, prev_offset, has_prev)
# Just as in the next cursor builder, we may need to add an offset
# if any of the results at the beginning are *before* or *after*
# (depending on the is_desc flag).
#
# This is required to account for loss of precision in the key value.
prev_offset = 0
result_iter = iter(results)
# If we know there are more previous results, we need to move past
# the item indicating that more items exist.
if has_prev:
six.next(result_iter)
# Always move past the first item, this is the prev_value item and will
# already be offset in the next query.
six.next(result_iter)
for result in result_iter:
result_value = int(key(result, for_prev=True))
is_larger = result_value >= prev_value
is_smaller = result_value <= prev_value
# Note that the checks are reversed here as a prev query has
# it's ordering reversed.
if (is_desc and is_larger) or (not is_desc and is_smaller):
prev_offset += 1
else:
break
return (prev_value, prev_offset, has_prev)
def build_cursor(results, key, limit=100, is_desc=False, cursor=None, hits=None, max_hits=None):
if cursor is None:
cursor = Cursor(0, 0, 0)
# Compute values for next cursor
next_value, next_offset, has_next = _build_next_values(
cursor=cursor,
results=results,
key=key,
limit=limit,
is_desc=is_desc
)
# Compute values for prev cursor
prev_value, prev_offset, has_prev = _build_prev_values(
cursor=cursor,
results=results,
key=key,
limit=limit,
is_desc=is_desc
)
if cursor.is_prev and has_prev:
# A prev cursor with more reults should have the first item chopped off
# as this is the item that indicates we have more items before, and
# should not be included on this page.
results = results[1:]
elif not cursor.is_prev:
# For next page cursors we cut off the extra item that indicates there
# are more items.
results = results[:limit]
next_cursor = Cursor(next_value or 0, next_offset, False, has_next)
prev_cursor = Cursor(prev_value or 0, prev_offset, True, has_prev)
return CursorResult(
results=results,
next=next_cursor,
prev=prev_cursor,
hits=hits,
max_hits=max_hits,
)
| 30.877119
| 96
| 0.650336
|
4a16277fce4469d98744f9be7f00abef87fd233d
| 2,817
|
py
|
Python
|
ReachabilityTables.py
|
PauloBarrantes/Tarea1-Redes
|
ebdb0defbc0a9b7ccf6fb87ff151ca33daf24c64
|
[
"MIT"
] | null | null | null |
ReachabilityTables.py
|
PauloBarrantes/Tarea1-Redes
|
ebdb0defbc0a9b7ccf6fb87ff151ca33daf24c64
|
[
"MIT"
] | null | null | null |
ReachabilityTables.py
|
PauloBarrantes/Tarea1-Redes
|
ebdb0defbc0a9b7ccf6fb87ff151ca33daf24c64
|
[
"MIT"
] | null | null | null |
from texttable import *
import threading
class ReachabilityTables:
"""docstring for ReachabilityTables."""
def __init__(self):
self.reach_table = {}
# Save the ip from the source of the message and the mask as the key. For the entry,
# we will save the message ip address, the cost and the port it is working on.
def save_address(self, ip, origin, mask, cost, port):
# First, we need to make sure that we have the key in table.
if self.reach_table.get((ip, mask)):
# If we did find the key, then we need to try to acquire the lock, before
# updating the table, and make sure it is not in the process of being remove.
# Also, we need to catch any lock exception, because in case the lock gets remove while updating the table.
try:
# Acquire the lock.
lock = self.reach_table.get((ip, mask))[3]
lock.acquire()
# Now update the table and release the lock when finished.
if self.reach_table.get((ip, mask))[2] > cost:
self.reach_table.update({(ip, mask): [origin, port, cost, lock]})
lock.release()
except threading.ThreadError:
# The lock got remove, so just keep going, we know we can't update it.
print("No se pudo modificar la entrada: " + ip + ", " + mask + ". Intente de nuevo.")
return
else:
# Create a new lock for this entry and a lock check, in case we are deleting it.
entry_lock = threading.Lock()
self.reach_table.update({(ip, mask): [origin, port, cost, entry_lock]})
# Remove an entry from the reachability table.
def remove_address(self, ip, port):
for key in list(self.reach_table):
if self.reach_table.get(key)[0] == ip and self.reach_table.get(key)[1] == port:
# Acquire the lock, remove the entry and then release the lock.
# In order to avoid any errors, we need to keep our lock in memory.
entry_lock = self.reach_table.get(key)[3]
entry_lock.acquire()
self.reach_table.pop(key)
entry_lock.release()
# Print the reachability table.
def print_table(self):
print("TABLA DE ALCANZABILIDAD")
table = Texttable()
table.set_cols_align(["l", "r","g" ,"c","k"])
table.set_cols_valign(["t", "m","w", "b","a"])
table.add_row(["IP", "Máscara", "Origen","Puerto","Costo"])
for key in self.reach_table:
table.add_row([key[0], key[1], self.reach_table.get(key)[0],
self.reach_table.get(key)[1], self.reach_table.get(key)[2]])
print (table.draw() + "\n")
| 41.426471
| 119
| 0.579695
|
4a1629890e4f4085770b48a3a68cf69b5b402ee3
| 1,594
|
py
|
Python
|
app/main/views.py
|
Ian1017/News
|
a28fc4447608a20256ad88881f331bc27ef932a2
|
[
"MIT"
] | null | null | null |
app/main/views.py
|
Ian1017/News
|
a28fc4447608a20256ad88881f331bc27ef932a2
|
[
"MIT"
] | null | null | null |
app/main/views.py
|
Ian1017/News
|
a28fc4447608a20256ad88881f331bc27ef932a2
|
[
"MIT"
] | null | null | null |
from flask import render_template, redirect, url_for, request
from . import main
from ..models import Sources
from ..requests import get_articles, get_sources, topheadlines
@main.route('/')
def index():
'''
View root page function that returns the index page and its data
'''
title = "Home- News From Various News Sources"
general_category = get_sources('general')
business_category = get_sources('business')
entertainment_category = get_sources('entertainment')
sports_category = get_sources('sports')
technology_category = get_sources('technology')
science_category = get_sources('science')
health_category = get_sources('health')
return render_template('index.html', title = title, general = general_category, business = business_category, entertainment = entertainment_category, sports = sports_category, tech = technology_category, science = science_category, health = health_category)
@main.route('/articles/source_id, per_page')
def articles(source_id, per_page):
'''
Function that returns articles based on their sources
'''
news_source = get_articles(source_id, per_page)
title = f'{source_id} | All Articles'
return render_template('articles.html', title = title, name = source_id, news = news_source)
@main.route('/topheadlines/per_page')
def headlines(per_page):
'''
Function that return top headlines articles
'''
topheadlines_news = topheadlines(per_page)
title = 'Top Headlines'
return render_template('topheadlines.html', title = 'Top Headlines', news = topheadlines_news)
| 37.069767
| 261
| 0.735885
|
4a162b4278080aa9bd839b3a29f7731617d7d942
| 9,165
|
py
|
Python
|
releasenotes/source/conf.py
|
laoyigrace/subject_store
|
b0113aac169f7bd54a6ab31f145fe01a94365810
|
[
"Apache-2.0"
] | null | null | null |
releasenotes/source/conf.py
|
laoyigrace/subject_store
|
b0113aac169f7bd54a6ab31f145fe01a94365810
|
[
"Apache-2.0"
] | null | null | null |
releasenotes/source/conf.py
|
laoyigrace/subject_store
|
b0113aac169f7bd54a6ab31f145fe01a94365810
|
[
"Apache-2.0"
] | null | null | null |
# -*- coding: utf-8 -*-
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Glance_store Release Notes documentation build configuration file
#
# Modified from corresponding configuration file in Glance.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
# sys.path.insert(0, os.path.abspath('.'))
# -- General configuration ------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
# needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
'oslosphinx',
'reno.sphinxext',
]
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
# source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'Glance_store Release Notes'
copyright = u'2015, Openstack Foundation'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
import pbr.version
subject_store_version = pbr.version.VersionInfo('subject_store')
# The full version, including alpha/beta/rc tags.
release = subject_store_version.version_string_with_vcs()
# The short X.Y version.
version = subject_store_version.canonical_version_string()
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
# language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
# today = ''
# Else, today_fmt is used as the format for a strftime call.
# today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = []
# The reST default role (used for this markup: `text`) to use for all
# documents.
# default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
# add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
# add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
# show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
# modindex_common_prefix = []
# If true, keep warnings as "system message" paragraphs in the built documents.
# keep_warnings = False
# -- Options for HTML output ----------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = 'default'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
# html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
# html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
# html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
# html_short_title = None
# The name of an subject file (relative to this directory) to place at the top
# of the sidebar.
# html_logo = None
# The name of an subject file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
# html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# Add any extra paths that contain custom files (such as robots.txt or
# .htaccess) here, relative to this directory. These files are copied
# directly to the root of the documentation.
# html_extra_path = []
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
# html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
# html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
# html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
# html_additional_pages = {}
# If false, no module index is generated.
# html_domain_indices = True
# If false, no index is generated.
# html_use_index = True
# If true, the index is split into individual pages for each letter.
# html_split_index = False
# If true, links to the reST sources are added to the pages.
# html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
# html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
# html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
# html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
# html_file_suffix = None
# Output file base name for HTML help builder.
htmlhelp_basename = 'GlanceStoreReleaseNotesdoc'
# -- Options for LaTeX output ---------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
# 'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
# 'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
# 'preamble': '',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
('index',
'GlanceStoreReleaseNotes.tex',
u'Glance_store Release Notes Documentation',
u'Glance_store Developers',
'manual'),
]
# The name of an subject file (relative to this directory) to place at the top of
# the title page.
# latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
# latex_use_parts = False
# If true, show page references after internal links.
# latex_show_pagerefs = False
# If true, show URL addresses after external links.
# latex_show_urls = False
# Documents to append as an appendix to all manuals.
# latex_appendices = []
# If false, no module index is generated.
# latex_domain_indices = True
# -- Options for manual page output ---------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
('index',
'subjectstorereleasenotes',
u'Glance_store Release Notes Documentation',
[u'Glance_store Developers'],
1)
]
# If true, show URL addresses after external links.
# man_show_urls = False
# -- Options for Texinfo output -------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
('index',
'GlanceStoreReleaseNotes',
u'Glance_store Release Notes Documentation',
u'Glance_store Developers',
'GlanceStoreReleaseNotes',
'One line description of project.',
'Miscellaneous'),
]
# Documents to append as an appendix to all manuals.
# texinfo_appendices = []
# If false, no module index is generated.
# texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
# texinfo_show_urls = 'footnote'
# If true, do not generate a @detailmenu in the "Top" node's menu.
# texinfo_no_detailmenu = False
# -- Options for Internationalization output ------------------------------
locale_dirs = ['locale/']
| 32.5
| 81
| 0.716094
|
4a162c19b3da4e63f6de66dac71e79e75a3c1d07
| 1,899
|
py
|
Python
|
scripts/playbook.py
|
JHibbard/create-playbook
|
aed60565a09af4b21d849fd083d4f9c4e2bef899
|
[
"MIT"
] | null | null | null |
scripts/playbook.py
|
JHibbard/create-playbook
|
aed60565a09af4b21d849fd083d4f9c4e2bef899
|
[
"MIT"
] | null | null | null |
scripts/playbook.py
|
JHibbard/create-playbook
|
aed60565a09af4b21d849fd083d4f9c4e2bef899
|
[
"MIT"
] | null | null | null |
import click
from pathlib import Path
CONTEXT_SETTINGS = dict(help_option_names=['-h', '--help'],
max_content_width=80)
def create_dirs(dirs_to_create):
for d in dirs_to_create:
d.mkdir(mode=0o755, parents=True)
def create_files(files_to_create):
for f in files_to_create:
open(f, 'a').close()
def create_playbook(location):
dirs_to_create = [
location.joinpath('group_vars'),
location.joinpath('roles'),
]
files_to_create = [
location.joinpath('group_vars', 'all'),
location.joinpath('playbook.yml'),
]
create_dirs(dirs_to_create)
create_files(files_to_create)
def create_role(location, rolename):
dirs_to_create = [
location.joinpath("roles", rolename),
location.joinpath("roles", rolename, "tasks"),
location.joinpath("roles", rolename, "handlers"),
location.joinpath("roles", rolename, "templates"),
location.joinpath("roles", rolename, "files"),
location.joinpath("roles", rolename, "vars"),
location.joinpath("roles", rolename, "meta"),
]
files_to_create = [
location.joinpath("roles", rolename, "tasks", "main.yml"),
location.joinpath("roles", rolename, "handlers", "main.yml"),
]
create_dirs(dirs_to_create)
create_files(files_to_create)
@click.command(context_settings=CONTEXT_SETTINGS)
@click.argument('playbook_location',
type=click.Path(file_okay=False, dir_okay=False, resolve_path=True))
@click.argument('roles_to_create', nargs=-1)
def make_skeleton(playbook_location, roles_to_create):
location = Path(playbook_location)
location.mkdir(parents=True)
click.echo(f'Creating playbook: {location.stem}')
create_playbook(location)
for r in roles_to_create:
click.echo(f'... creating role: {r}')
create_role(location, r)
| 28.772727
| 84
| 0.665087
|
4a162c52ec9dffcdac535037169d1d679a6cc193
| 2,416
|
py
|
Python
|
perfkitbenchmarker/providers/gcp/bigquery.py
|
pierre-emmanuelJ/PerfKitBenchmarker
|
3ef6acfd54d4e3d1f074ef40b3fc5b3a3f855f69
|
[
"Apache-2.0"
] | null | null | null |
perfkitbenchmarker/providers/gcp/bigquery.py
|
pierre-emmanuelJ/PerfKitBenchmarker
|
3ef6acfd54d4e3d1f074ef40b3fc5b3a3f855f69
|
[
"Apache-2.0"
] | null | null | null |
perfkitbenchmarker/providers/gcp/bigquery.py
|
pierre-emmanuelJ/PerfKitBenchmarker
|
3ef6acfd54d4e3d1f074ef40b3fc5b3a3f855f69
|
[
"Apache-2.0"
] | null | null | null |
# Copyright 2018 PerfKitBenchmarker Authors. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Module containing class for GCP's Bigquery EDW service."""
from perfkitbenchmarker import edw_service
from perfkitbenchmarker import flags
from perfkitbenchmarker import providers
from perfkitbenchmarker.providers.gcp import util as gcp_util
FLAGS = flags.FLAGS
class Bigquery(edw_service.EdwService):
"""Object representing a Bigquery cluster."""
CLOUD = providers.GCP
SERVICE_TYPE = 'bigquery'
def __init__(self, edw_service_spec):
super(Bigquery, self).__init__(edw_service_spec)
def _Create(self):
"""Create a BigQuery cluster.
Bigquery clusters creation is out of scope of the benchmarking.
"""
raise NotImplementedError
def _Exists(self):
"""Method to validate the existence of a Bigquery cluster.
Returns:
Boolean value indicating the existence of a cluster.
"""
return True
def _Delete(self):
"""Delete a BigQuery cluster.
Bigquery cluster deletion is out of scope of benchmarking.
"""
raise NotImplementedError
def GetMetadata(self):
"""Return a dictionary of the metadata for the BigQuery cluster."""
basic_data = super(Bigquery, self).GetMetadata()
return basic_data
def RunCommandHelper(self):
"""Bigquery specific run script command components."""
bq = self.cluster_identifier.split('.')
return '--bq_project_id={} --bq_dataset_id={}'.format(bq[0], bq[1])
def InstallAndAuthenticateRunner(self, vm):
"""Method to perform installation and authentication of bigquery runner.
Native Bigquery client that ships with the google_cloud_sdk
https://cloud.google.com/bigquery/docs/bq-command-line-too used as client.
Args:
vm: Client vm on which the script will be run.
"""
vm.Install('google_cloud_sdk')
gcp_util.AuthenticateServiceAccount(vm)
| 31.376623
| 78
| 0.738411
|
4a162c7512d304692e43050efa1c3b9bd044b008
| 2,059
|
py
|
Python
|
resource/lib/python2.7/site-packages/pyasn1/codec/der/encoder.py
|
claudiopastorini/geofire-python
|
274e1b1d733a1158e4f36de40f0349dbc1ff6c34
|
[
"MIT"
] | 1,346
|
2015-01-01T14:52:24.000Z
|
2022-03-28T12:50:48.000Z
|
resource/lib/python2.7/site-packages/pyasn1/codec/der/encoder.py
|
claudiopastorini/geofire-python
|
274e1b1d733a1158e4f36de40f0349dbc1ff6c34
|
[
"MIT"
] | 474
|
2015-01-01T10:27:46.000Z
|
2022-03-21T12:26:16.000Z
|
resource/lib/python2.7/site-packages/pyasn1/codec/der/encoder.py
|
claudiopastorini/geofire-python
|
274e1b1d733a1158e4f36de40f0349dbc1ff6c34
|
[
"MIT"
] | 191
|
2015-01-02T18:27:22.000Z
|
2022-03-29T10:49:48.000Z
|
#
# This file is part of pyasn1 software.
#
# Copyright (c) 2005-2017, Ilya Etingof <etingof@gmail.com>
# License: http://pyasn1.sf.net/license.html
#
from pyasn1.type import univ
from pyasn1.codec.cer import encoder
from pyasn1 import error
class SetOfEncoder(encoder.SetOfEncoder):
@staticmethod
def _cmpSetComponents(c1, c2):
tagSet1 = isinstance(c1, univ.Choice) and c1.getEffectiveTagSet() or c1.getTagSet()
tagSet2 = isinstance(c2, univ.Choice) and c2.getEffectiveTagSet() or c2.getTagSet()
return cmp(tagSet1, tagSet2)
tagMap = encoder.tagMap.copy()
tagMap.update({
# Overload CER encoders with BER ones (a bit hackerish XXX)
univ.BitString.tagSet: encoder.encoder.BitStringEncoder(),
univ.OctetString.tagSet: encoder.encoder.OctetStringEncoder(),
# Set & SetOf have same tags
univ.SetOf().tagSet: SetOfEncoder()
})
typeMap = encoder.typeMap
class Encoder(encoder.Encoder):
supportIndefLength = False
def __call__(self, client, defMode=True, maxChunkSize=0):
if not defMode or maxChunkSize:
raise error.PyAsn1Error('DER forbids indefinite length mode')
return encoder.Encoder.__call__(self, client, defMode, maxChunkSize)
#: Turns ASN.1 object into DER octet stream.
#:
#: Takes any ASN.1 object (e.g. :py:class:`~pyasn1.type.base.PyAsn1Item` derivative)
#: walks all its components recursively and produces a DER octet stream.
#:
#: Parameters
#: ----------
# value: any pyasn1 object (e.g. :py:class:`~pyasn1.type.base.PyAsn1Item` derivative)
#: A pyasn1 object to encode
#:
#: defMode: :py:class:`bool`
#: If `False`, produces indefinite length encoding
#:
#: maxChunkSize: :py:class:`int`
#: Maximum chunk size in chunked encoding mode (0 denotes unlimited chunk size)
#:
#: Returns
#: -------
#: : :py:class:`bytes` (Python 3) or :py:class:`str` (Python 2)
#: Given ASN.1 object encoded into BER octetstream
#:
#: Raises
#: ------
#: : :py:class:`pyasn1.error.PyAsn1Error`
#: On encoding errors
encode = Encoder(tagMap, typeMap)
| 31.19697
| 91
| 0.697912
|
4a162d5722784f69fcdbf49a5d1efbe37cba7f62
| 4,874
|
py
|
Python
|
improver/cli/regrid.py
|
owena11/improver
|
ff658db31364c0bdf3af3940736c260e10544705
|
[
"BSD-3-Clause"
] | 1
|
2021-08-02T21:17:18.000Z
|
2021-08-02T21:17:18.000Z
|
improver/cli/regrid.py
|
owena11/improver
|
ff658db31364c0bdf3af3940736c260e10544705
|
[
"BSD-3-Clause"
] | 4
|
2018-01-24T11:29:15.000Z
|
2021-01-11T15:16:21.000Z
|
improver/cli/regrid.py
|
owena11/improver
|
ff658db31364c0bdf3af3940736c260e10544705
|
[
"BSD-3-Clause"
] | 1
|
2021-08-02T21:17:19.000Z
|
2021-08-02T21:17:19.000Z
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# -----------------------------------------------------------------------------
# (C) British Crown Copyright 2017-2021 Met Office.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# * Neither the name of the copyright holder nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
"""Script to regrid a NetCDF file"""
from improver import cli
@cli.clizefy
@cli.with_output
def process(
cube: cli.inputcube,
target_grid: cli.inputcube,
land_sea_mask: cli.inputcube = None,
*,
regrid_mode="bilinear",
extrapolation_mode="nanmask",
land_sea_mask_vicinity: float = 25000,
regridded_title: str = None,
):
"""Regrids source cube data onto a target grid. Optional land-sea awareness.
Args:
cube (iris.cube.Cube):
Source cube to be regridded.
target_grid (iris.cube.Cube):
Cube defining the spatial grid onto which to regrid the source data.
If also using land_sea_mask-aware regridding then this must be
land_binary_mask data.
land_sea_mask (iris.cube.Cube):
Cube describing the land_binary_mask on the source grid if land-sea
aware regridding is required, with land points set to one and sea points
set to zero.
regrid_mode (str):
Selects which regridding techniques to use. Default uses
iris.analysis.Linear(); "nearest" uses iris.analysis.Nearest();
"nearest-with-mask" uses Nearest() with land-sea awareness.
extrapolation_mode (str):
Mode to use for extrapolating data into regions beyond the limits
of the input cube domain. Refer to online documentation for
iris.analysis.
Modes are -
extrapolate - extrapolated points will take their values from the
nearest source point
nan - extrapolated points will be set to NaN
error - a ValueError will be raised notifying an attempt to
extrapolate
mask - extrapolated points will always be masked, even if
the source data is not a MaskedArray
nanmask - if the source data is a MaskedArray extrapolated points
will be masked; otherwise they will be set to NaN
land_sea_mask_vicinity (float):
Radius of vicinity to search for a coastline, in metres.
regridded_title (str):
New "title" attribute to be set if the field is being regridded
(since "title" may contain grid information). If None, a default
value is used.
Returns:
iris.cube.Cube:
Processed cube.
Raises:
ValueError:
If source land_sea_mask is supplied but regrid mode is not
"nearest-with-mask".
ValueError:
If regrid_mode is "nearest-with-mask" but no source land_sea_mask
is provided (from plugin).
"""
from improver.standardise import RegridLandSea
if land_sea_mask and "nearest-with-mask" not in regrid_mode:
msg = (
"Land-mask file supplied without appropriate regrid-mode. "
"Use --regrid-mode nearest-with-mask."
)
raise ValueError(msg)
return RegridLandSea(
regrid_mode=regrid_mode,
extrapolation_mode=extrapolation_mode,
landmask=land_sea_mask,
landmask_vicinity=land_sea_mask_vicinity,
)(cube, target_grid, regridded_title=regridded_title)
| 42.754386
| 84
| 0.677472
|
4a162df88fb6828ea849b09df60a06af7b567872
| 1,521
|
py
|
Python
|
kd_common/kd_common/funcutil.py
|
konovalovdmitry/catsnap
|
d5f1d7c37dcee1ad3fee2cdc12a3b44b56f4c63f
|
[
"MIT"
] | null | null | null |
kd_common/kd_common/funcutil.py
|
konovalovdmitry/catsnap
|
d5f1d7c37dcee1ad3fee2cdc12a3b44b56f4c63f
|
[
"MIT"
] | null | null | null |
kd_common/kd_common/funcutil.py
|
konovalovdmitry/catsnap
|
d5f1d7c37dcee1ad3fee2cdc12a3b44b56f4c63f
|
[
"MIT"
] | 1
|
2021-09-30T08:06:20.000Z
|
2021-09-30T08:06:20.000Z
|
import collections
from typing import List, Union, Any, Generator, Set, Sequence, TypeVar, Optional, Tuple, Iterable
from types import ModuleType, FunctionType
from gc import get_referents
import sys
T = TypeVar('T')
def to_list(seq: Optional[Union[List[T], Tuple[T, ...], Set[T], str]], unique_flag: bool = False) -> List[Union[T, str]]:
if seq is None:
return []
if isinstance(seq, (set, list, tuple)):
return list(seq)
return [seq]
def unique(sequence: Iterable[T]) -> List[T]:
seen: Set[Any] = set()
result = []
for x in sequence:
if not x in seen:
seen.add(x)
result.append(x)
return result
_BLACKLIST = type, ModuleType, FunctionType
def getsize(obj: Any) -> int:
if isinstance(obj, _BLACKLIST):
raise TypeError('getsize() does not take argument of type: '+ str(type(obj)))
seen_ids: Set[Any] = set()
size = 0
objects = [obj]
while objects:
need_referents = []
for obj in objects:
if not isinstance(obj, _BLACKLIST) and id(obj) not in seen_ids:
seen_ids.add(id(obj))
size += sys.getsizeof(obj)
need_referents.append(obj)
objects = get_referents(*need_referents)
return size
def flatten(l):
if isinstance(l, float) or l is None:
return []
for el in l:
if isinstance(el, Iterable) and not isinstance(el, (str, bytes)):
yield from flatten(el)
else:
yield el
| 26.684211
| 121
| 0.60092
|
4a162f14bbb7e26e395f4d7bf9cb04b2c5da4bfa
| 1,188
|
py
|
Python
|
statsmodels/sandbox/examples/try_quantile_regression1.py
|
yarikoptic/statsmodels
|
f990cb1a1ef0c9883c9394444e6f9d027efabec6
|
[
"BSD-3-Clause"
] | 20
|
2015-01-28T21:52:59.000Z
|
2022-01-24T01:24:26.000Z
|
statsmodels/sandbox/examples/try_quantile_regression1.py
|
yarikoptic/statsmodels
|
f990cb1a1ef0c9883c9394444e6f9d027efabec6
|
[
"BSD-3-Clause"
] | 6
|
2015-08-28T16:59:03.000Z
|
2019-04-12T22:29:01.000Z
|
statsmodels/sandbox/examples/try_quantile_regression1.py
|
yarikoptic/statsmodels
|
f990cb1a1ef0c9883c9394444e6f9d027efabec6
|
[
"BSD-3-Clause"
] | 28
|
2015-04-01T20:02:25.000Z
|
2021-07-03T00:09:28.000Z
|
'''Example to illustrate Quantile Regression
Author: Josef Perktold
polynomial regression with systematic deviations above
'''
import numpy as np
from statsmodels.compat.python import zip
from scipy import stats
import statsmodels.api as sm
from statsmodels.regression.quantile_regression import QuantReg
sige = 0.1
nobs, k_vars = 500, 3
x = np.random.uniform(-1, 1, size=nobs)
x.sort()
exog = np.vander(x, k_vars+1)[:,::-1]
mix = 0.1 * stats.norm.pdf(x[:,None], loc=np.linspace(-0.5, 0.75, 4), scale=0.01).sum(1)
y = exog.sum(1) + mix + sige * (np.random.randn(nobs)/2 + 1)**3
p = 0.5
res_qr = QuantReg(y, exog).fit(p)
res_qr2 = QuantReg(y, exog).fit(0.1)
res_qr3 = QuantReg(y, exog).fit(0.75)
res_ols = sm.OLS(y, exog).fit()
params = [res_ols.params, res_qr2.params, res_qr.params, res_qr3.params]
labels = ['ols', 'qr 0.1', 'qr 0.5', 'qr 0.75']
import matplotlib.pyplot as plt
plt.figure()
plt.plot(x, y, '.', alpha=0.5)
for lab, beta in zip(['ols', 'qr 0.1', 'qr 0.5', 'qr 0.75'], params):
print('%-8s'%lab, np.round(beta, 4))
fitted = np.dot(exog, beta)
lw = 2
plt.plot(x, fitted, lw=lw, label=lab)
plt.legend()
plt.title('Quantile Regression')
plt.show()
| 25.826087
| 88
| 0.670034
|
4a162f81404021520e9fd94756021b5b72932916
| 2,633
|
py
|
Python
|
okta/models/password_policy.py
|
corylevine/okta-sdk-python
|
c86b8fdc4525e84199143c27213c0aebc6b2af8f
|
[
"Apache-2.0"
] | 145
|
2017-06-13T21:54:04.000Z
|
2022-02-25T05:44:34.000Z
|
okta/models/password_policy.py
|
corylevine/okta-sdk-python
|
c86b8fdc4525e84199143c27213c0aebc6b2af8f
|
[
"Apache-2.0"
] | 146
|
2017-06-02T17:46:12.000Z
|
2022-03-29T15:52:15.000Z
|
okta/models/password_policy.py
|
corylevine/okta-sdk-python
|
c86b8fdc4525e84199143c27213c0aebc6b2af8f
|
[
"Apache-2.0"
] | 98
|
2017-06-27T03:44:51.000Z
|
2022-03-23T04:58:18.000Z
|
# flake8: noqa
"""
Copyright 2020 - Present Okta, Inc.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
# AUTO-GENERATED! DO NOT EDIT FILE DIRECTLY
# SEE CONTRIBUTOR DOCUMENTATION
from okta.models.policy\
import Policy
from okta.models.policy_type import PolicyType
from okta.models import password_policy_conditions\
as password_policy_conditions
from okta.models import password_policy_settings\
as password_policy_settings
class PasswordPolicy(
Policy
):
"""
A class for PasswordPolicy objects.
"""
def __init__(self, config=None):
super().__init__(config)
if config:
self.type = PolicyType("PASSWORD")
if "conditions" in config:
if isinstance(config["conditions"],
password_policy_conditions.PasswordPolicyConditions):
self.conditions = config["conditions"]
elif config["conditions"] is not None:
self.conditions = password_policy_conditions.PasswordPolicyConditions(
config["conditions"]
)
else:
self.conditions = None
else:
self.conditions = None
if "settings" in config:
if isinstance(config["settings"],
password_policy_settings.PasswordPolicySettings):
self.settings = config["settings"]
elif config["settings"] is not None:
self.settings = password_policy_settings.PasswordPolicySettings(
config["settings"]
)
else:
self.settings = None
else:
self.settings = None
else:
self.conditions = None
self.settings = None
def request_format(self):
parent_req_format = super().request_format()
current_obj_format = {
"conditions": self.conditions,
"settings": self.settings
}
parent_req_format.update(current_obj_format)
return parent_req_format
| 34.194805
| 90
| 0.615648
|
4a163132977cbbb316f2ac6700da38608081aacf
| 5,062
|
py
|
Python
|
salt/modules/eselect.py
|
ahammond/salt
|
945b21b70dbe708716d7b009a2005ef0acf76e6b
|
[
"Apache-2.0"
] | 1
|
2019-06-04T19:11:22.000Z
|
2019-06-04T19:11:22.000Z
|
salt/modules/eselect.py
|
ahammond/salt
|
945b21b70dbe708716d7b009a2005ef0acf76e6b
|
[
"Apache-2.0"
] | null | null | null |
salt/modules/eselect.py
|
ahammond/salt
|
945b21b70dbe708716d7b009a2005ef0acf76e6b
|
[
"Apache-2.0"
] | 2
|
2017-01-05T16:14:59.000Z
|
2019-01-31T23:15:25.000Z
|
# -*- coding: utf-8 -*-
'''
Support for eselect, Gentoo's configuration and management tool.
'''
from __future__ import absolute_import
import logging
# Import salt libs
import salt.utils
log = logging.getLogger(__name__)
def __virtual__():
'''
Only work on Gentoo systems with eselect installed
'''
if __grains__['os'] == 'Gentoo' and salt.utils.which('eselect'):
return 'eselect'
return (False, 'The eselect execution module cannot be loaded: either the system is not Gentoo or the eselect binary is not in the path.')
def exec_action(module, action, module_parameter=None, action_parameter=None, state_only=False):
'''
Execute an arbitrary action on a module.
module
name of the module to be executed
action
name of the module's action to be run
module_parameter
additional params passed to the defined module
action_parameter
additional params passed to the defined action
state_only
don't return any output but only the success/failure of the operation
CLI Example (updating the ``php`` implementation used for ``apache2``):
.. code-block:: bash
salt '*' eselect.exec_action php update action_parameter='apache2'
'''
out = __salt__['cmd.run'](
'eselect --brief --colour=no {0} {1} {2} {3}'.format(
module, module_parameter or '', action, action_parameter or ''),
python_shell=False
)
out = out.strip().split('\n')
if out[0].startswith('!!! Error'):
return False
if state_only:
return True
if len(out) < 1:
return False
if len(out) == 1 and not out[0].strip():
return False
return out
def get_modules():
'''
List available ``eselect`` modules.
CLI Example:
.. code-block:: bash
salt '*' eselect.get_modules
'''
modules = []
module_list = exec_action('modules', 'list', action_parameter='--only-names')
if not module_list:
return None
for module in module_list:
if module not in ['help', 'usage', 'version']:
modules.append(module)
return modules
def get_target_list(module, action_parameter=None):
'''
List available targets for the given module.
module
name of the module to be queried for its targets
action_parameter
additional params passed to the defined action
.. versionadded:: Carbon
CLI Example:
.. code-block:: bash
salt '*' eselect.get_target_list kernel
'''
exec_output = exec_action(module, 'list', action_parameter=action_parameter)
if not exec_output:
return None
target_list = []
if isinstance(exec_output, list):
for item in exec_output:
target_list.append(item.split(None, 1)[0])
return target_list
return None
def get_current_target(module, module_parameter=None, action_parameter=None):
'''
Get the currently selected target for the given module.
module
name of the module to be queried for its current target
module_parameter
additional params passed to the defined module
action_parameter
additional params passed to the 'show' action
CLI Example (current target of system-wide ``java-vm``):
.. code-block:: bash
salt '*' eselect.get_current_target java-vm action_parameter='system'
CLI Example (current target of ``kernel`` symlink):
.. code-block:: bash
salt '*' eselect.get_current_target kernel
'''
result = exec_action(module, 'show', module_parameter=module_parameter, action_parameter=action_parameter)[0]
if not result:
return None
if result == '(unset)':
return None
return result
def set_target(module, target, module_parameter=None, action_parameter=None):
'''
Set the target for the given module.
Target can be specified by index or name.
module
name of the module for which a target should be set
target
name of the target to be set for this module
module_parameter
additional params passed to the defined module
action_parameter
additional params passed to the defined action
CLI Example (setting target of system-wide ``java-vm``):
.. code-block:: bash
salt '*' eselect.set_target java-vm icedtea-bin-7 action_parameter='system'
CLI Example (setting target of ``kernel`` symlink):
.. code-block:: bash
salt '*' eselect.set_target kernel linux-3.17.5-gentoo
'''
if action_parameter:
action_parameter = '{0} {1}'.format(action_parameter, target)
else:
action_parameter = target
# get list of available modules
if module not in get_modules():
log.error('Module {0} not available'.format(module))
return False
exec_result = exec_action(module, 'set', module_parameter=module_parameter, action_parameter=action_parameter, state_only=True)
if exec_result:
return exec_result
return False
| 25.18408
| 142
| 0.657843
|
4a163190715f2c4f3a393a3148cd950be2923306
| 15,521
|
py
|
Python
|
elyra/metadata/manager.py
|
ptitzler/elyra-2
|
a59f8b0ec69eb224980da8c6fcebd7fa84d89ab9
|
[
"Apache-2.0"
] | null | null | null |
elyra/metadata/manager.py
|
ptitzler/elyra-2
|
a59f8b0ec69eb224980da8c6fcebd7fa84d89ab9
|
[
"Apache-2.0"
] | null | null | null |
elyra/metadata/manager.py
|
ptitzler/elyra-2
|
a59f8b0ec69eb224980da8c6fcebd7fa84d89ab9
|
[
"Apache-2.0"
] | null | null | null |
#
# Copyright 2018-2022 Elyra Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import os
import re
from typing import Any
from typing import Dict
from typing import List
from typing import Union
from jsonschema import draft7_format_checker
from jsonschema import validate
from jsonschema import ValidationError
from traitlets import Type # noqa H306
from traitlets.config import LoggingConfigurable # noqa H306
from elyra.metadata.error import SchemaNotFoundError
from elyra.metadata.metadata import Metadata
from elyra.metadata.schema import SchemaManager
from elyra.metadata.storage import FileMetadataStore
from elyra.metadata.storage import MetadataStore
class MetadataManager(LoggingConfigurable):
"""Manages the persistence and retrieval of metadata instances"""
metadata_store_class = Type(
default_value=FileMetadataStore,
config=True,
klass=MetadataStore,
help="""The metadata store class. This is configurable to allow subclassing of
the MetadataStore for customized behavior.""",
)
def __init__(self, schemaspace: str, **kwargs: Any):
"""
Generic object to manage metadata instances.
:param schemaspace (str): the partition where metadata instances are stored
:param kwargs: additional arguments to be used to instantiate a metadata manager
Keyword Args:
metadata_store_class (str): the name of the MetadataStore subclass to use for storing managed instances
"""
super().__init__(**kwargs)
self.schema_mgr = SchemaManager.instance()
schemaspace_instance = self.schema_mgr.get_schemaspace(schemaspace)
self.schemaspace = schemaspace_instance.name
self.metadata_store = self.metadata_store_class(self.schemaspace, **kwargs)
def schemaspace_exists(self) -> bool:
"""Returns True if the schemaspace for this instance exists"""
return self.metadata_store.schemaspace_exists()
def get_all(self, include_invalid: bool = False, of_schema: str = None) -> List[Metadata]:
"""Returns all metadata instances in summary form (name, display_name, location)"""
instances = []
instance_list = self.metadata_store.fetch_instances(include_invalid=include_invalid)
for metadata_dict in instance_list:
# validate the instance prior to return, include invalid instances as appropriate
try:
metadata = Metadata.from_dict(self.schemaspace, metadata_dict)
if of_schema and metadata.schema_name != of_schema: # If provided, filter on of_schema
continue
metadata.on_load() # Allow class instances to handle loads
# if we're including invalid and there was an issue on retrieval, add it to the list
if include_invalid and metadata.reason:
# If no schema-name is present, set to '{unknown}' since we can't make that determination.
if not metadata.schema_name:
metadata.schema_name = "{unknown}"
else: # go ahead and validate against the schema
self.validate(metadata.name, metadata)
instances.append(metadata)
except Exception as ex: # Ignore ValidationError and others when fetching all instances
# Since we may not have a metadata instance due to a failure during `from_dict()`,
# instantiate a bad instance directly to use in the message and invalid result.
invalid_instance = Metadata(**metadata_dict)
self.log.warning(
f"Fetch of instance '{invalid_instance.name}' "
f"of schemaspace '{self.schemaspace}' "
f"encountered an exception: {ex}"
)
if include_invalid and (not of_schema or invalid_instance.schema_name == of_schema):
# Export invalid instances if requested and if a schema was not specified
# or the specified schema matches the instance's schema.
invalid_instance.reason = ex.__class__.__name__
instances.append(invalid_instance)
return instances
def get(self, name: str) -> Metadata:
"""Returns the metadata instance corresponding to the given name"""
if name is None:
raise ValueError("The 'name' parameter requires a value.")
instance_list = self.metadata_store.fetch_instances(name=name)
metadata_dict = instance_list[0]
metadata = Metadata.from_dict(self.schemaspace, metadata_dict)
# Allow class instances to alter instance
metadata.on_load()
# Validate the instance on load
self.validate(name, metadata)
return metadata
def create(self, name: str, metadata: Metadata) -> Metadata:
"""Creates the given metadata, returning the created instance"""
return self._save(name, metadata)
def update(self, name: str, metadata: Metadata, for_migration: bool = False) -> Metadata:
"""Updates the given metadata, returning the updated instance"""
return self._save(name, metadata, for_update=True, for_migration=for_migration)
def remove(self, name: str) -> None:
"""Removes the metadata instance corresponding to the given name"""
instance_list = self.metadata_store.fetch_instances(name=name)
metadata_dict = instance_list[0]
self.log.debug(f"Removing metadata resource '{name}' from schemaspace '{self.schemaspace}'.")
metadata = Metadata.from_dict(self.schemaspace, metadata_dict)
metadata.pre_delete() # Allow class instances to handle delete
self.metadata_store.delete_instance(metadata_dict)
try:
metadata.post_delete() # Allow class instances to handle post-delete tasks (e.g., cache updates, etc.)
except Exception as ex:
self._rollback(name, metadata, "delete", ex)
raise ex
def validate(self, name: str, metadata: Metadata) -> None:
"""Validate metadata against its schema.
Ensure metadata is valid based on its schema. If invalid or schema
is not found, ValidationError will be raised.
"""
metadata_dict = metadata.to_dict()
schema_name = metadata_dict.get("schema_name")
if not schema_name:
raise ValueError(
f"Instance '{name}' in the {self.schemaspace} schemaspace is missing a 'schema_name' field!"
)
schema = self._get_schema(schema_name) # returns a value or throws
try:
validate(instance=metadata_dict, schema=schema, format_checker=draft7_format_checker)
except ValidationError as ve:
# Because validation errors are so verbose, only provide the first line.
first_line = str(ve).partition("\n")[0]
msg = f"Validation failed for instance '{name}' using the {schema_name} schema with error: {first_line}."
self.log.error(msg)
raise ValidationError(msg) from ve
@staticmethod
def get_normalized_name(name: str) -> str:
# lowercase and replaces spaces with underscore
name = re.sub("\\s+", "_", name.lower())
# remove all invalid characters
name = re.sub("[^a-z0-9-_]+", "", name)
# begin with alpha
if not name[0].isalpha():
name = "a_" + name
# end with alpha numeric
if not name[-1].isalnum():
name = name + "_0"
return name
def _get_schema(self, schema_name: str) -> dict:
"""Loads the schema based on the schema_name and returns the loaded schema json.
Throws ValidationError if schema file is not present.
"""
schema_json = self.schema_mgr.get_schema(self.schemaspace, schema_name)
if schema_json is None:
schema_file = os.path.join(os.path.dirname(__file__), "schemas", schema_name + ".json")
if not os.path.exists(schema_file):
self.log.error(
f"The file for schema '{schema_name}' is missing from its expected location: '{schema_file}'"
)
raise SchemaNotFoundError(f"The file for schema '{schema_name}' is missing!")
return schema_json
def _save(self, name: str, metadata: Metadata, for_update: bool = False, for_migration: bool = False) -> Metadata:
if not metadata:
raise ValueError("An instance of class 'Metadata' was not provided.")
if not isinstance(metadata, Metadata):
raise TypeError("'metadata' is not an instance of class 'Metadata'.")
if not name and not for_update: # name is derived from display_name only on creates
if metadata.display_name:
name = MetadataManager.get_normalized_name(metadata.display_name)
metadata.name = name
if not name: # At this point, name must be set
raise ValueError("Name of metadata was not provided.")
match = re.search("^[a-z]([a-z0-9-_]*[a-z,0-9])?$", name)
if match is None:
raise ValueError(
"Name of metadata must be lowercase alphanumeric, beginning with alpha and can include "
"embedded hyphens ('-') and underscores ('_')."
)
orig_value = None
if for_update:
if for_migration: # Avoid triggering a on_load() call since migrations will likely stem from there
instance_list = self.metadata_store.fetch_instances(name=name)
orig_value = instance_list[0]
else:
orig_value = self.get(name)
# Allow class instances to handle pre-save tasks
metadata.pre_save(for_update=for_update)
self._apply_defaults(metadata)
# Validate the metadata prior to storage then store the instance.
self.validate(name, metadata)
metadata_dict = self.metadata_store.store_instance(name, metadata.prepare_write(), for_update=for_update)
metadata_post_op = Metadata.from_dict(self.schemaspace, metadata_dict)
# Allow class instances to handle post-save tasks (e.g., cache updates, etc.)
# Note that this is a _different_ instance from pre-save call
try:
metadata_post_op.post_save(for_update=for_update)
except Exception as ex:
if for_update:
self._rollback(name, orig_value, "update", ex)
else: # Use the metadata instance prior to post op
self._rollback(name, Metadata.from_dict(self.schemaspace, metadata_dict), "create", ex)
raise ex
return self.get(name) # Retrieve updated/new instance so load hook can be called
def _rollback(self, name: str, orig_value: Union[Metadata, Dict], operation: str, exception: Exception):
"""Rolls back the original value depending on the operation.
For rolled back creation attempts, we must remove the created instance. For rolled back
update or deletion attempts, we must restore the original value. Note that these operations
must call the metadata store directly so that class hooks are not called.
"""
self.log.debug(f"Rolling back metadata operation '{operation}' for instance '{name}' due to: {exception}")
if operation == "create": # remove the instance, orig_value is the newly-created instance.
if isinstance(orig_value, Metadata):
orig_value = orig_value.to_dict()
self.metadata_store.delete_instance(orig_value)
elif operation == "update": # restore original as an update
if isinstance(orig_value, dict):
orig_value = Metadata.from_dict(self.schemaspace, orig_value)
self.metadata_store.store_instance(name, orig_value.prepare_write(), for_update=True)
elif operation == "delete": # restore original as a create
if isinstance(orig_value, dict):
orig_value = Metadata.from_dict(self.schemaspace, orig_value)
self.metadata_store.store_instance(name, orig_value.prepare_write(), for_update=False)
self.log.warning(
f"Rolled back metadata operation '{operation}' for instance '{name}' due to "
f"failure in post-processing method: {exception}"
)
def _apply_defaults(self, metadata: Metadata) -> None:
"""If a given property has a default value defined, and that property is not currently represented,
assign it the default value. We will also treat constants similarly.
For schema-level properties (i.e., not application-level), we will check if such a property
has a corresponding attribute and, if so, set the property to that value.
Note: we only consider constants updates for schema-level properties
"""
# Get the schema and build a dict consisting of properties and their default/const values (for those
# properties that have defaults/consts defined). Then walk the metadata instance looking for missing
# properties and assign the corresponding value. Note that we do not consider existing properties with
# values of None for default replacement since that may be intentional (although those values will
# likely fail subsequent validation). We also don't consider defaults when applying values to the
# schema-level properties since these settings are function of a defined attribute.
schema = self.schema_mgr.get_schema(self.schemaspace, metadata.schema_name)
def _update_instance(target_prop: str, schema_properties: Dict, instance: Union[Metadata, Dict]) -> None:
property_defaults = {}
for name, property in schema_properties.items():
if target_prop in property:
property_defaults[name] = property[target_prop]
if property_defaults: # schema defines defaulted properties
if isinstance(instance, Metadata): # schema properties, updated constants
for name, default in property_defaults.items():
if hasattr(instance, name):
setattr(instance, name, default)
else: # instance properties, update missing defaults
instance_properties = instance
for name, default in property_defaults.items():
if name not in instance_properties:
instance_properties[name] = default
# Update default properties of instance properties
_update_instance("default", schema["properties"]["metadata"]["properties"], metadata.metadata)
# Update const properties of schema properties
_update_instance("const", schema["properties"], metadata)
| 48.808176
| 118
| 0.657883
|
4a16325e3e40776e182362c766a2f37551b72217
| 66,317
|
py
|
Python
|
tests/python/frontend/tflite/test_forward.py
|
yagnasrinath/incubator-tvm
|
8b2476fa87cda205daf4bd17d7d58e07a259043c
|
[
"Apache-2.0"
] | null | null | null |
tests/python/frontend/tflite/test_forward.py
|
yagnasrinath/incubator-tvm
|
8b2476fa87cda205daf4bd17d7d58e07a259043c
|
[
"Apache-2.0"
] | null | null | null |
tests/python/frontend/tflite/test_forward.py
|
yagnasrinath/incubator-tvm
|
8b2476fa87cda205daf4bd17d7d58e07a259043c
|
[
"Apache-2.0"
] | null | null | null |
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# pylint: disable=import-self, invalid-name, unused-argument
"""
TFLite testcases
================
This article is a test script to test TFLite operator with Relay.
"""
from __future__ import print_function
from functools import partial
import numpy as np
import tvm
from tvm import te
from tvm import relay
import tensorflow as tf
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import nn_ops
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import gen_array_ops
from tensorflow.python.ops import variables
try:
from tensorflow import lite as interpreter_wrapper
except ImportError:
from tensorflow.contrib import lite as interpreter_wrapper
from tvm.contrib.download import download_testdata
import tvm.relay.testing.tf as tf_testing
from packaging import version as package_version
from PIL import Image
import os
#######################################################################
# Generic run functions for TVM & TFLite
# --------------------------------------
def convert_to_list(x):
if not isinstance(x, list):
x = [x]
return x
#######################################################################
# Get a real image for e2e testing.
# --------------------------------------
def get_real_image(im_height, im_width):
repo_base = 'https://github.com/dmlc/web-data/raw/master/tensorflow/models/InceptionV1/'
img_name = 'elephant-299.jpg'
image_url = os.path.join(repo_base, img_name)
img_path = download_testdata(image_url, img_name, module='data')
image = Image.open(img_path).resize((im_height, im_width))
x = np.array(image).astype('uint8')
data = np.reshape(x, (1, im_height, im_width, 3))
return data
def run_tvm_graph(tflite_model_buf, input_data, input_node, num_output=1, target='llvm',
out_names=None):
""" Generic function to compile on relay and execute on tvm """
try:
import tflite.Model
except ImportError:
raise ImportError("The tflite package must be installed")
# get TFLite model from buffer
tflite_model = tflite.Model.Model.GetRootAsModel(tflite_model_buf, 0)
input_data = convert_to_list(input_data)
input_node = convert_to_list(input_node)
shape_dict = {}
dtype_dict = {}
for i, e in enumerate(input_node):
shape_dict[e] = input_data[i].shape
dtype_dict[e] = input_data[i].dtype.name
mod, params = relay.frontend.from_tflite(tflite_model,
shape_dict=shape_dict,
dtype_dict=dtype_dict)
with relay.build_config(opt_level=3):
graph, lib, params = relay.build(mod, target, params=params)
ctx = tvm.context(target, 0)
from tvm.contrib import graph_runtime
m = graph_runtime.create(graph, lib, ctx)
# set inputs
for i, e in enumerate(input_node):
m.set_input(e, tvm.nd.array(input_data[i].astype(input_data[i].dtype)))
m.set_input(**params)
# execute
m.run()
# get outputs
assert out_names is None or num_output == len(out_names), "out_names: {} num_output: {}".format(
out_names, num_output)
tvm_output_list = []
for i in range(0, num_output):
tvm_output = m.get_output(i)
tvm_output_list.append(tvm_output.asnumpy())
return tvm_output_list
def run_tflite_graph(tflite_model_buf, input_data):
""" Generic function to execute TFLite """
input_data = convert_to_list(input_data)
interpreter = interpreter_wrapper.Interpreter(model_content=tflite_model_buf)
interpreter.allocate_tensors()
input_details = interpreter.get_input_details()
output_details = interpreter.get_output_details()
# set input
assert len(input_data) == len(input_details)
for i in range(len(input_details)):
interpreter.set_tensor(input_details[i]['index'], input_data[i])
# Run
interpreter.invoke()
# get output
tflite_output = list()
for i in range(len(output_details)):
tflite_output.append(interpreter.get_tensor(output_details[i]['index']))
return tflite_output
def compare_tflite_with_tvm(in_data, in_name, input_tensors,
output_tensors, init_global_variables=False,
out_names=None, quantized=False, input_range=None):
"""Generic function to generate and compare TFLite and TVM output"""
in_data = convert_to_list(in_data)
in_name = convert_to_list(in_name)
out_names = convert_to_list(out_names)
in_node = [0] * len(in_name)
for i in range(len(in_name)):
in_node[i] = in_name[i].split(':')[0] if ":" in in_name[i] else in_name[i]
with tf.Session() as sess:
if init_global_variables:
sess.run(variables.global_variables_initializer())
# convert to tflite model
converter = interpreter_wrapper.TFLiteConverter.from_session(
sess, input_tensors, output_tensors)
if quantized:
converter.inference_type = tf.lite.constants.QUANTIZED_UINT8
input_arrays = converter.get_input_arrays()
input_stats = {}
# calculate the mean and quantization scale for every input tensor,
# with respect to its fp32 input range, defined in fake_quant.
# s = 255/(fmax-fmin); m = -fmin*s (the zero point)
for i in input_arrays:
try:
quant_scale = 255 / (input_range[i][1] - input_range[i][0])
except ZeroDivisionError:
raise ZeroDivisionError('Min and max of the input range for tensor ' + i + ' can\'t be equal')
mean = - input_range[i][0] * quant_scale
input_stats[i] = (mean, quant_scale)
converter.quantized_input_stats = input_stats
tflite_model_buffer = converter.convert()
tflite_output = run_tflite_graph(tflite_model_buffer, in_data)
for device in ["llvm"]:
ctx = tvm.context(device, 0)
if not ctx.exist:
print("Skip because %s is not enabled" % device)
continue
tvm_output = run_tvm_graph(tflite_model_buffer, in_data, in_node, target=device,
num_output=len(out_names), out_names=out_names)
# WARNING: the results could well be random values clipped to 0 or 255 because of badly tuned output
# range for the specific operator. While adding test ensure that we aren't getting only clipped values
# in output tensors that still pass the assertion. For reference see _test_elemwise_qnn_out_range()
if quantized:
for i in range(len(tflite_output)):
# allow absolute tolerance of 1 in the quantized results
tvm.testing.assert_allclose(tflite_output[i], tvm_output[i], atol=1, rtol=1e-5)
else:
for i in range(len(tflite_output)):
tvm.testing.assert_allclose(tflite_output[i], tvm_output[i], atol=1e-5, rtol=1e-5)
def with_fused_activation_function(input_tensor, fn_name):
if fn_name is None or fn_name == "NONE":
return input_tensor
if fn_name == "RELU":
return nn_ops.relu(input_tensor)
if fn_name == "RELU6":
return nn_ops.relu6(input_tensor)
if fn_name == "RELU_N1_TO_1":
return math_ops.maximum(-1, math_ops.minimum(input_tensor, 1))
if fn_name == "TANH":
return math_ops.tanh(input_tensor)
raise AssertionError("Unknown fused_activation_function {}".format(fn_name))
def _test_split(in_shape, axis, num_Splits, dtype):
'''internal split tester taking as parameters in_shape, number of tensors to split into
and dtype (data type)'''
np_data = np.random.uniform(-5, 5, size=in_shape).astype(dtype)
with tf.Graph().as_default():
in_data = array_ops.placeholder(shape=in_shape, dtype=dtype)
out = array_ops.split(in_data, num_Splits, axis=axis)
out_names = ['out_' + str(n) + ':0' for n in range(num_Splits)]
compare_tflite_with_tvm([np_data], ['Placeholder:0'], [in_data], out,
out_names=out_names)
def test_forward_split():
'''test split layer'''
# rank 1
_test_split((3,), 0, 1, 'float32')
_test_split((3,), 0, 3, 'float32')
_test_split((6,), 0, 3, 'float32')
# rank 2
_test_split((6, 2), 0, 3, 'float32')
_test_split((2, 6), 1, 6, 'float32')
# rank 3
if package_version.parse(tf.VERSION) >= package_version.parse('1.14.0'):
_test_split((6, 2, 4), 0, 2, 'int32')
_test_split((2, 6, 4), 1, 3, 'float32')
_test_split((2, 4, 6), 2, 1, 'float32')
# rank 4
_test_split((6, 1, 3, 5), 0, 3, 'float32')
_test_split((1, 6, 3, 5), 1, 3, 'float32')
_test_split((1, 3, 6, 5), 2, 3, 'float32')
_test_split((1, 3, 5, 6), 3, 3, 'float32')
# split along negative axis
_test_split((6, 1, 3, 5), -4, 3, 'float32')
_test_split((1, 6, 3, 5), -3, 3, 'float32')
_test_split((1, 3, 6, 5), -2, 3, 'float32')
_test_split((1, 3, 5, 6), -1, 3, 'float32')
#######################################################################
# slice
# -----
def _test_slice(data, begin, size):
""" One iteration of SLICE """
with tf.Graph().as_default():
in_data = array_ops.placeholder(shape=data.shape, dtype=data.dtype)
out = array_ops.slice(in_data, begin, size)
compare_tflite_with_tvm(data, 'Placeholder:0', [in_data], [out])
def test_forward_slice():
""" SLICE """
_test_slice(np.arange(4, dtype=np.float32).reshape((4, )), begin=[0], size=[2])
_test_slice(np.arange(18, dtype=np.int32).reshape((3, 2, 3)), begin=[1, 0, 0], size=[1, 1, 3])
# tflite 1.13 outputs nonsense values if size[i] == -1
if package_version.parse(tf.VERSION) >= package_version.parse('1.14.0'):
_test_slice(np.arange(8, dtype=np.int32).reshape((2, 4)), begin=[0, 1], size=[-1, -1])
_test_slice(np.arange(5, dtype=np.int32).reshape((5, )), begin=[4], size=[-1])
#######################################################################
# transpose
# ---------
def _test_forward_transpose(ishape, axes=()):
data = np.random.uniform(size=ishape).astype(np.float32)
with tf.Graph().as_default():
in_data = array_ops.placeholder(shape=data.shape, dtype=data.dtype)
if not axes:
out = array_ops.transpose(in_data)
else:
out = array_ops.transpose(in_data, axes)
compare_tflite_with_tvm(data, 'Placeholder:0', [in_data], [out])
def test_forward_transpose():
_test_forward_transpose((2, 2))
_test_forward_transpose((2, 3, 4))
_test_forward_transpose((7, 8, 8, 10))
_test_forward_transpose((2, 3, 4), (1, 2, 0))
_test_forward_transpose((2, 3, 4), (0, 1, 2))
_test_forward_transpose((2, 3, 4, 5), (3, 0, 1, 2))
_test_forward_transpose((2, 3, 4, 5), ())
#######################################################################
# Cast
# --------
def _test_cast(data, cast_dtype):
""" One iteration of CAST """
with tf.Graph().as_default():
in_data = array_ops.placeholder(shape=data.shape, dtype=data.dtype)
out = math_ops.cast(in_data, cast_dtype)
compare_tflite_with_tvm(data, 'Placeholder:0', [in_data], [out])
def test_forward_cast():
""" CAST """
_test_cast(np.arange(6.0, dtype=np.float32).reshape((1, 6)), cast_dtype=tf.int32)
_test_cast(np.arange(6.0, dtype=np.float32).reshape((1, 6)), cast_dtype=tf.uint8)
_test_cast(np.arange(6.0, dtype=np.int32).reshape((1, 6)), cast_dtype=tf.int64)
#######################################################################
# tile
# ---------
def _test_forward_tile(in_shape, reps, dtype):
data = np.random.uniform(-5, 5, size=in_shape).astype(dtype)
with tf.Graph().as_default():
in_data = array_ops.placeholder(shape=data.shape, dtype=data.dtype)
out = array_ops.tile(in_data, reps)
compare_tflite_with_tvm(data, 'Placeholder:0', [in_data], [out])
def test_forward_tile():
_test_forward_tile((2, ), (3, ), "int32")
_test_forward_tile((2, 2), (2, 3), "float32")
######################################################################
# BatchToSpaceND
# --------------
def _test_batch_to_space_nd(input_shape, block_shape, crops, dtype='int32'):
data = np.random.uniform(0, 5, size=input_shape).astype(dtype)
with tf.Graph().as_default():
in_data = array_ops.placeholder(shape=input_shape, dtype=dtype)
out = array_ops.batch_to_space_nd(in_data, block_shape, crops)
compare_tflite_with_tvm(data, 'Placeholder:0', [in_data], [out])
def test_forward_batch_to_space_nd():
# test cases: https://www.tensorflow.org/api_docs/cc/class/tensorflow/ops/batch-to-space-n-d
_test_batch_to_space_nd(
input_shape=[4, 1, 1, 1],
block_shape=[2, 2],
crops=[[0, 0], [0, 0]]
)
_test_batch_to_space_nd(
input_shape=[4, 1, 1, 3],
block_shape=[2, 2],
crops=[[0, 0], [0, 0]]
)
_test_batch_to_space_nd(
input_shape=[4, 2, 2, 1],
block_shape=[2, 2],
crops=[[0, 0], [0, 0]]
)
######################################################################
# SpaceToBatchND
# --------------
def _test_space_to_batch_nd(input_shape, block_shape, paddings, dtype='int32'):
data = np.random.uniform(0, 5, size=input_shape).astype(dtype)
with tf.Graph().as_default():
in_data = array_ops.placeholder(shape=input_shape, dtype=dtype)
out = array_ops.space_to_batch_nd(in_data, block_shape, paddings)
compare_tflite_with_tvm(data, 'Placeholder:0', [in_data], [out])
def test_forward_space_to_batch_nd():
# test cases: https://www.tensorflow.org/api_docs/python/tf/space_to_batch_nd
_test_space_to_batch_nd(
input_shape=[1, 2, 2, 1],
block_shape=[2, 2],
paddings=[[0, 0], [0, 0]]
)
_test_space_to_batch_nd(
input_shape=[1, 2, 2, 3],
block_shape=[2, 2],
paddings=[[0, 0], [0, 0]]
)
_test_space_to_batch_nd(
input_shape=[1, 4, 4, 1],
block_shape=[2, 2],
paddings=[[0, 0], [0, 0]]
)
_test_space_to_batch_nd(
input_shape=[2, 2, 4, 1],
block_shape=[2, 2],
paddings=[[0, 0], [2, 0]]
)
#######################################################################
# Pooling
# -------
def _test_pooling_iteration(input_shape, **kwargs):
""" One iteration of pool operation with given shapes and attributes """
x = -np.arange(
np.prod(input_shape), dtype=np.float32).reshape(input_shape) - 1
with tf.Graph().as_default():
in_data = array_ops.placeholder(shape=input_shape, dtype='float32')
out = nn_ops.pool(in_data, **kwargs)
compare_tflite_with_tvm(x,'Placeholder:0', [in_data], [out])
def _test_pooling(input_shape, **kwargs):
_test_pooling_iteration(input_shape, **kwargs)
def test_forward_pooling():
""" Pooling """
for pool_type in ['AVG', 'MAX']:
_test_pooling(input_shape=[2, 9, 10, 2],
window_shape=[1, 1],
padding='SAME',
pooling_type=pool_type,
dilation_rate=[1, 1],
strides=[1, 1])
_test_pooling(input_shape=[2, 10, 9, 2],
window_shape=[1, 1],
padding='SAME',
pooling_type=pool_type,
dilation_rate=[1, 1],
strides=[1, 1])
_test_pooling(input_shape=[2, 9, 10, 2],
window_shape=[2, 1],
padding='SAME',
pooling_type=pool_type,
dilation_rate=[1, 1],
strides=[1, 1])
_test_pooling(input_shape=[2, 10, 9, 2],
window_shape=[2, 3],
padding='SAME',
pooling_type=pool_type,
dilation_rate=[1, 1],
strides=[2, 1])
#######################################################################
# Convolution
# -----------
def _test_convolution(tensor_in_sizes, filter_in_sizes,
dilations, strides, padding, data_format,
is_depthwise=False):
""" One iteration of convolution with given shapes and attributes """
total_size_1 = 1
total_size_2 = 1
for s in tensor_in_sizes:
total_size_1 *= s
for s in filter_in_sizes:
total_size_2 *= s
# Initializes the input tensor with array containing incrementing
# numbers from 1.
data_array = [f * 1.0 for f in range(1, total_size_1 + 1)]
filter_array = [f * 1.0 for f in range(1, total_size_2 + 1)]
with tf.Graph().as_default():
in_data = array_ops.placeholder(shape=tensor_in_sizes, dtype='float32')
in_filter = constant_op.constant(filter_array, shape=filter_in_sizes, dtype='float32')
strides = [1] + strides + [1]
dilations = [1] + dilations + [1]
if is_depthwise:
out = nn_ops.depthwise_conv2d_native(in_data,
in_filter,
strides=strides,
padding=padding,
data_format=data_format)
else:
out = nn_ops.conv2d(in_data,
in_filter,
strides=strides,
padding=padding,
data_format=data_format)
data_array = np.reshape(data_array, tensor_in_sizes).astype('float32')
compare_tflite_with_tvm(data_array, 'Placeholder:0', [in_data], [out])
def test_forward_convolution():
_test_convolution([4, 8, 8, 176], [1, 1, 176, 32], [1, 1], [1, 1], 'SAME', 'NHWC')
_test_convolution([4, 17, 17, 19], [3, 3, 19, 19], [1, 1], [2, 2], 'VALID', 'NHWC')
_test_convolution([4, 17, 17, 124], [1, 1, 124, 19], [1, 1], [1, 1], 'SAME', 'NHWC')
_test_convolution([4, 17, 17, 12], [3, 3, 12, 32], [1, 1], [2, 2], 'VALID', 'NHWC')
# depthwise convolution
_test_convolution([4, 8, 8, 176], [1, 1, 176, 1], [1, 1], [1, 1], 'SAME', 'NHWC', True)
_test_convolution([4, 17, 17, 19], [3, 3, 19, 1], [1, 1], [2, 2], 'VALID', 'NHWC', True)
_test_convolution([4, 17, 17, 124], [1, 1, 124, 1], [1, 1], [1, 1], 'SAME', 'NHWC', True)
_test_convolution([4, 17, 17, 12], [3, 3, 12, 1], [1, 1], [2, 2], 'VALID', 'NHWC', True)
_test_convolution([4, 17, 17, 12], [3, 3, 12, 2], [1, 1], [2, 2], 'VALID', 'NHWC', True)
#######################################################################
# Transpose Convolution
# ---------------------
def _test_transpose_conv(tensor_in_sizes, filter_in_sizes, output_shape, strides, padding):
""" One iteration of transpose convolution with given shapes and attributes """
total_size_1 = 1
total_size_2 = 1
for s in tensor_in_sizes:
total_size_1 *= s
for s in filter_in_sizes:
total_size_2 *= s
# Initializes the input tensor with array containing incrementing
# numbers from 1.
data_array = [f * 1.0 for f in range(1, total_size_1 + 1)]
filter_array = [f * 1.0 for f in range(1, total_size_2 + 1)]
with tf.Graph().as_default():
in_data = array_ops.placeholder(shape=tensor_in_sizes, dtype='float32')
in_filter = constant_op.constant(filter_array, shape=filter_in_sizes, dtype='float32')
strides = [1] + strides + [1]
# in_filter layout is HWOI
out = nn_ops.conv2d_transpose(in_data,
in_filter,
output_shape=output_shape,
strides=strides,
padding=padding)
data_array = np.reshape(data_array, tensor_in_sizes).astype('float32')
compare_tflite_with_tvm(data_array, 'Placeholder:0', [in_data], [out])
def test_forward_transpose_conv():
# kernel 3x3, padding VALID
_test_transpose_conv([4, 32, 32, 16], [3, 3, 5, 16], [4, 34, 34, 5], [1, 1], 'VALID')
_test_transpose_conv([1, 32, 32, 16], [3, 3, 5, 16], [1, 65, 65, 5], [2, 2], 'VALID')
_test_transpose_conv([1, 32, 32, 16], [3, 3, 5, 16], [1, 65, 34, 5], [2, 1], 'VALID')
# kernel 2x2, padding VALID
_test_transpose_conv([4, 32, 32, 16], [2, 2, 5, 16], [4, 33, 33, 5], [1, 1], 'VALID')
_test_transpose_conv([1, 32, 32, 16], [2, 2, 5, 16], [1, 64, 64, 5], [2, 2], 'VALID')
_test_transpose_conv([1, 32, 32, 16], [2, 2, 5, 16], [1, 64, 33, 5], [2, 1], 'VALID')
# kernel 1x1, padding VALID
_test_transpose_conv([4, 32, 32, 16], [1, 1, 5, 16], [4, 32, 32, 5], [1, 1], 'VALID')
_test_transpose_conv([1, 32, 32, 16], [1, 1, 5, 16], [1, 63, 63, 5], [2, 2], 'VALID')
_test_transpose_conv([1, 32, 32, 16], [1, 1, 5, 16], [1, 63, 32, 5], [2, 1], 'VALID')
# kernel 1x1, padding SAME
_test_transpose_conv([4, 32, 32, 16], [1, 1, 5, 16], [4, 32, 32, 5], [1, 1], 'SAME')
_test_transpose_conv([1, 32, 32, 16], [1, 1, 5, 16], [1, 63, 63, 5], [2, 2], 'SAME')
_test_transpose_conv([1, 32, 32, 16], [1, 1, 5, 16], [1, 63, 32, 5], [2, 1], 'SAME')
#######################################################################
# Reshape
# -------
def _test_reshape(data, out_shape):
""" One iteration of reshape operation with given data and out shape """
with tf.Graph().as_default():
in_data = array_ops.placeholder(shape=data.shape, dtype=data.dtype)
out = array_ops.reshape(in_data, out_shape)
compare_tflite_with_tvm(data, 'Placeholder:0', [in_data], [out])
def test_forward_reshape():
_test_reshape(np.arange(6.0, dtype=np.float32), [2, 3])
_test_reshape(np.arange(6), [-1, 2])
_test_reshape(np.arange(6), [3, -1])
_test_reshape(np.arange(6), [-1])
#######################################################################
# Resize
# ------
def _test_resize(tf_resize_op, data, align_corners):
""" One iteration of Resize """
assert len(data) == 2
# Test with tensor and constant
with tf.Graph().as_default():
images_tensor = array_ops.placeholder(shape=data[0].shape, dtype=data[0].dtype, name='in')
size = ops.convert_to_tensor(data[1], dtype=data[1].dtype)
out_tensor = tf_resize_op(images=images_tensor, size=size, align_corners=align_corners)
compare_tflite_with_tvm([data[0]], ['in:0'], [images_tensor], [out_tensor])
def test_all_resize():
""" Resize """
data = [np.random.rand(1, 16, 16, 3).astype("float32"), np.array([8, 8], dtype=np.int32)]
### RESIZE_BILINEAR
_test_resize(tf.image.resize_bilinear, data, align_corners=False)
_test_resize(tf.image.resize_bilinear, data, align_corners=True)
### RESIZE_NEAREST_NEIGHBOR (was added in v1.13)
# According to topi resize.h
# Align corners not supported for nearest neighbour
from tflite.BuiltinOperator import BuiltinOperator
if 'RESIZE_NEAREST_NEIGHBOR' in dir(BuiltinOperator()):
_test_resize(tf.image.resize_nearest_neighbor, data, align_corners=False)
#######################################################################
# Concatenation
# -------------
def _test_concatenation(data, axis):
""" One iteration of concatenation """
assert len(data) >= 1
with tf.Graph().as_default():
in_data = [
array_ops.placeholder(shape=tensor.shape, dtype=tensor.dtype, name="in_{}".format(idx))
for idx, tensor in enumerate(data)]
out = array_ops.concat(in_data, axis=axis)
name = ["in_{}:0".format(idx) for idx in range(len(data))]
compare_tflite_with_tvm(data, name, in_data, [out])
def test_forward_concatenation():
_test_concatenation(
[np.arange(6).reshape((1, 2, 1, 3)),
np.arange(6).reshape((1, 2, 1, 3))], 1)
_test_concatenation(
[np.arange(6).reshape((3, 2)),
np.arange(6).reshape((3, 2))], 1)
_test_concatenation(
[np.arange(6).reshape((2, 1, 1, 3)),
np.arange(6).reshape((2, 1, 1, 3)),
np.arange(6).reshape((2, 1, 1, 3))], 1)
#######################################################################
# Unary elemwise
# --------------
def _test_unary_elemwise(math_op, data):
""" One iteration of unary elemwise """
with tf.Graph().as_default():
in_data = array_ops.placeholder(shape=data.shape, dtype=data.dtype, name='in')
out = math_op(in_data)
compare_tflite_with_tvm(data, ['in:0'], [in_data], [out])
#######################################################################
# Abs
# ---
def _test_abs(data):
""" One iteration of abs """
return _test_unary_elemwise(math_ops.abs, data)
#######################################################################
# Ceil
# ----
def _test_ceil(data):
""" One iteration of ceil """
return _test_unary_elemwise(math_ops.ceil, data)
#######################################################################
# Floor
# -----
def _test_floor(data):
""" One iteration of floor """
return _test_unary_elemwise(math_ops.floor, data)
#######################################################################
# Exp
# ---
def _test_exp(data):
""" One iteration of exp """
return _test_unary_elemwise(math_ops.exp, data)
#######################################################################
# Log
# ---
def _test_log(data):
""" One iteration of log """
return _test_unary_elemwise(math_ops.log, data)
#######################################################################
# Sin
# ---
def _test_sin(data):
""" One iteration of sin """
return _test_unary_elemwise(math_ops.sin, data)
#######################################################################
# Cos
# ---
def _test_cos(data):
""" One iteration of cos """
return _test_unary_elemwise(math_ops.cos, data)
#######################################################################
# Sqrt
# ----
def _test_sqrt(data):
""" One iteration of sqrt """
return _test_unary_elemwise(math_ops.sqrt, data)
#######################################################################
# Rsqrt
# -----
def _test_rsqrt(data):
""" One iteration of rsqrt """
return _test_unary_elemwise(math_ops.rsqrt, data)
#######################################################################
# Neg
# ---
def _test_neg(data):
""" One iteration of neg """
return _test_unary_elemwise(math_ops.neg, data)
#######################################################################
# Square
# ------
def _test_square(data):
""" One iteration of square """
return _test_unary_elemwise(math_ops.square, data)
def _test_forward_unary_elemwise(test_op):
# functions that need positive input
if test_op.__name__ in {'_test_log', '_test_sqrt', '_test_rsqrt'}:
test_op(np.arange(1.0, 7.0, dtype=np.float32).reshape((2, 1, 3)))
else:
test_op(np.random.uniform(-10, 10, (3, 2)).astype(np.float32))
def test_all_unary_elemwise():
_test_forward_unary_elemwise(_test_abs)
_test_forward_unary_elemwise(_test_floor)
_test_forward_unary_elemwise(_test_exp)
_test_forward_unary_elemwise(_test_log)
_test_forward_unary_elemwise(_test_sin)
_test_forward_unary_elemwise(_test_sqrt)
_test_forward_unary_elemwise(_test_rsqrt)
_test_forward_unary_elemwise(_test_neg)
_test_forward_unary_elemwise(_test_square)
# ceil and cos come with TFLite 1.14.0.post1 fbs schema
if package_version.parse(tf.VERSION) >= package_version.parse('1.14.0'):
_test_forward_unary_elemwise(_test_ceil)
_test_forward_unary_elemwise(_test_cos)
#######################################################################
# Element-wise
# ---
def _test_elemwise(math_op, data, fused_activation_function=None, quantized=False, qnn_op=None):
""" One iteration of elemwise """
assert len(data) == 2
# Test with two tensors
with tf.Graph().as_default():
in_data = [array_ops.placeholder(shape=data[0].shape, dtype='float32', name='in_0'),
array_ops.placeholder(shape=data[1].shape, dtype='float32', name='in_1')]
if quantized:
# fake_quant will keep the tensors in float32 until the conversion in the session
inq_data = [tf.quantization.fake_quant_with_min_max_args(in_data[0], min=-100, max=100, name="inq_0"),
tf.quantization.fake_quant_with_min_max_args(in_data[1], min=-50, max=50, name="inq_1")]
input_range = {'inq_0': (-100, 100), 'inq_1': (-50, 50)}
out = math_op(inq_data[0], inq_data[1])
out = with_fused_activation_function(out, fused_activation_function)
# set the fp32 output range with respect to the operation
out_min, out_max = _test_elemwise_qnn_out_range(qnn_op)
out = tf.quantization.fake_quant_with_min_max_args(out, min=out_min, max=out_max, name="out")
compare_tflite_with_tvm(data, ['inq_0:0', 'inq_1:0'], inq_data, [out],
quantized=True, input_range=input_range)
else:
out = math_op(in_data[0], in_data[1])
out = with_fused_activation_function(out, fused_activation_function)
compare_tflite_with_tvm(data, ['in_0:0', 'in_1:0'], in_data, [out])
# Test with tensor and constant
with tf.Graph().as_default():
in_data = [array_ops.placeholder(shape=data[0].shape, dtype='float32', name='in_0')]
if quantized:
inq_data = [tf.quantization.fake_quant_with_min_max_args(in_data[0], min=-100, max=100, name="inq_0")]
inq_const = tf.quantization.fake_quant_with_min_max_args(data[1], min=-50, max=50, name="const_tensor")
input_range = {'inq_0': (-100, 100)}
# the 2nd tensor is treated as constant and directly added as part of the operation
out = math_op(inq_data, ops.convert_to_tensor(inq_const, dtype='float32', name='inq_const'))
out = with_fused_activation_function(out, fused_activation_function)
out_min, out_max = _test_elemwise_qnn_out_range(qnn_op)
out = tf.quantization.fake_quant_with_min_max_args(out, min=out_min, max=out_max, name="out")
compare_tflite_with_tvm(data[0], ['inq_0:0'], inq_data, [out], quantized=True, input_range=input_range)
else:
out = math_op(in_data[0], ops.convert_to_tensor(data[1], dtype=data[1].dtype))
out = with_fused_activation_function(out, fused_activation_function)
compare_tflite_with_tvm(data[0], ['in_0:0'], in_data, [out])
# Test with constant and tensor
with tf.Graph().as_default():
in_data = [array_ops.placeholder(shape=data[1].shape, dtype='float32', name='in_1')]
if quantized:
inq_const = tf.quantization.fake_quant_with_min_max_args(data[0], min=-100, max=100, name="const_tensor")
inq_data = [tf.quantization.fake_quant_with_min_max_args(in_data[0], min=-50, max=50, name="inq_1")]
input_range = {'inq_1': (-50, 50)}
# the 1st tensor is treated as constant and directly added as part of the operation
out = math_op(ops.convert_to_tensor(inq_const, dtype='float32', name='inq_const'), inq_data)
out = with_fused_activation_function(out, fused_activation_function)
out_min, out_max = _test_elemwise_qnn_out_range(qnn_op)
out = tf.quantization.fake_quant_with_min_max_args(out, min=out_min, max=out_max, name="out")
compare_tflite_with_tvm(data[1], ['inq_1:0'], inq_data, [out], quantized=True, input_range=input_range)
else:
out = math_op(ops.convert_to_tensor(data[0], dtype=data[0].dtype), in_data[0])
out = with_fused_activation_function(out, fused_activation_function)
compare_tflite_with_tvm(data[1], ['in_1:0'], in_data, [out])
#######################################################################
# Add
# ---
def _test_add(data, fused_activation_function=None, quantized=False, qnn_op=None):
""" One iteration of add """
return _test_elemwise(math_ops.add, data, fused_activation_function, quantized, qnn_op)
#######################################################################
# Subtract
# --------
def _test_sub(data, fused_activation_function=None):
""" One iteration of subtract """
return _test_elemwise(math_ops.subtract, data, fused_activation_function)
#######################################################################
# Mul
# ---
def _test_mul(data, fused_activation_function=None, quantized=False, qnn_op=None):
""" One iteration of mul """
return _test_elemwise(math_ops.multiply, data, fused_activation_function, quantized, qnn_op)
#######################################################################
# Divide
# ------
def _test_div(data, fused_activation_function=None):
""" One iteration of divide """
return _test_elemwise(math_ops.divide, data, fused_activation_function)
#######################################################################
# Power
# -----
def _test_pow(data):
""" One iteration of power """
return _test_elemwise(math_ops.pow, data)
#######################################################################
# Maximum
# -------
def _test_maximum(data):
""" One iteration of maximum """
return _test_elemwise(math_ops.maximum, data)
#######################################################################
# Minimum
# -------
def _test_minimum(data):
""" One iteration of minimum """
return _test_elemwise(math_ops.minimum, data)
#######################################################################
# Greater
# -------
def _test_greater(data):
""" One iteration of greater """
return _test_elemwise(math_ops.greater, data)
#######################################################################
# Greater_equal
# -------------
def _test_greater_equal(data):
""" One iteration of greater_equal """
return _test_elemwise(math_ops.greater_equal, data)
#######################################################################
# Less
# ----
def _test_less(data):
""" One iteration of less """
return _test_elemwise(math_ops.less, data)
#######################################################################
# Less_equal
# ----------
def _test_less_equal(data):
""" One iteration of less_equal """
return _test_elemwise(math_ops.less_equal, data)
#######################################################################
# Equal
# -----
def _test_equal(data):
""" One iteration of equal """
return _test_elemwise(math_ops.equal, data)
#######################################################################
# Not_equal
# ---------
def _test_not_equal(data):
""" One iteration of not_equal"""
return _test_elemwise(math_ops.not_equal, data)
#######################################################################
# Squared_difference
# ------------------
def _test_squared_difference(data):
""" One iteration of squared difference """
return _test_elemwise(math_ops.squared_difference, data)
def _test_forward_elemwise(testop):
""" Elewise"""
testop([np.arange(6.0, dtype=np.float32).reshape((2, 1, 1, 3)),
np.arange(1.0, 7.0, dtype=np.float32).reshape((2, 1, 1, 3))])
testop([np.arange(6.0, dtype=np.float32).reshape((2, 1, 3)),
np.arange(1.0, 7.0, dtype=np.float32).reshape((2, 1, 3))])
testop([np.arange(3.0, dtype=np.float32).reshape((1, 3)),
np.arange(1.0, 4.0, dtype=np.float32).reshape((1, 3))])
def _test_forward_elemwise_quantized(testop):
testop([np.array(np.random.uniform(0, 255, (3, 6)), dtype=np.uint8),
np.array(np.random.uniform(0, 255, (3, 6)), dtype=np.uint8)], quantized=True, qnn_op=testop)
def _test_elemwise_qnn_out_range(qnn_op):
# set the fake_quant output range with respect to the input tensors float32 range
qnn_out_range = {
_test_add: (-150, 150),
_test_sub: (-150, 150),
_test_mul: (-5e+3, 5e+3),
}
return qnn_out_range[qnn_op]
def test_all_elemwise():
_test_forward_elemwise(_test_add)
_test_forward_elemwise_quantized(_test_add)
_test_forward_elemwise(partial(_test_add, fused_activation_function="RELU"))
_test_forward_elemwise(partial(_test_add, fused_activation_function="RELU6"))
_test_forward_elemwise(_test_sub)
_test_forward_elemwise(partial(_test_sub, fused_activation_function="RELU"))
_test_forward_elemwise(partial(_test_sub, fused_activation_function="RELU6"))
_test_forward_elemwise(_test_mul)
_test_forward_elemwise_quantized(_test_mul)
_test_forward_elemwise(partial(_test_mul, fused_activation_function="RELU"))
_test_forward_elemwise(partial(_test_mul, fused_activation_function="RELU6"))
_test_forward_elemwise(_test_div)
_test_forward_elemwise(partial(_test_div, fused_activation_function="RELU"))
_test_forward_elemwise(partial(_test_div, fused_activation_function="RELU6"))
_test_forward_elemwise(_test_pow)
_test_forward_elemwise(_test_maximum)
_test_forward_elemwise(_test_minimum)
_test_forward_elemwise(_test_greater)
_test_forward_elemwise(_test_squared_difference)
_test_forward_elemwise(_test_greater_equal)
_test_forward_elemwise(_test_less)
_test_forward_elemwise(_test_less_equal)
_test_forward_elemwise(_test_equal)
_test_forward_elemwise(_test_not_equal)
#######################################################################
# Logical operators
# -----------------
def _test_logical_binary(logical_bin_op, data):
with tf.Graph().as_default():
in_data = [array_ops.placeholder(shape=data[0].shape, dtype='bool', name='in_0'),
array_ops.placeholder(shape=data[1].shape, dtype='bool', name='in_1')]
out = logical_bin_op(in_data[0], in_data[1], name='out')
compare_tflite_with_tvm(data, ['in_0:0', 'in_1:0'], in_data, [out])
def _test_forward_logical_and(data):
""" One iteration of logical and """
return _test_logical_binary(math_ops.logical_and, data)
def _test_forward_logical_or(data):
""" One iteration of logical or """
return _test_logical_binary(math_ops.logical_or, data)
def test_all_logical():
data = [np.random.choice(a=[False, True], size=(2, 3, 4)).astype('bool'),
np.random.choice(a=[False, True], size=(2, 3, 4)).astype('bool')]
# boolean dtype is not supported by older versions than TFLite 1.15.0
if package_version.parse(tf.VERSION) >= package_version.parse('1.15.0'):
_test_forward_logical_and(data)
_test_forward_logical_or(data)
#######################################################################
# Zeros like
# --------
def _test_zeros_like(data):
""" One iteration of ZEROS LIKE """
with tf.Graph().as_default():
in_data = array_ops.placeholder(shape=data.shape, dtype=data.dtype)
out = gen_array_ops.zeros_like(in_data)
compare_tflite_with_tvm(data, 'Placeholder:0', [in_data], [out])
def test_forward_zeros_like():
""" ZEROS LIKE """
_test_zeros_like(np.arange(6.0, dtype=np.float32).reshape((1, 6)))
#######################################################################
# Reduce
# ------
def _test_reduce(math_op, data, keep_dims=None):
""" One iteration of reduce """
assert len(data) == 2
# Test with tensor and constant
with tf.Graph().as_default():
in_data = array_ops.placeholder(shape=data[0].shape, dtype=data[0].dtype, name='in')
out = math_op(in_data, data[1], keep_dims)
compare_tflite_with_tvm([data[0]], ['in:0'], [in_data], [out])
def _test_reduce_quantize(math_op, data, keep_dims=None):
""" One iteration of reduce """
assert len(data) == 2
# Test with tensor and constant
with tf.Graph().as_default():
in_data = [array_ops.placeholder(shape=data[0].shape, dtype="float32", name='in')]
inq_data = [tf.quantization.fake_quant_with_min_max_args(in_data[0], min=-100, max=100, name="inq_0")]
input_range = {'inq_0': (-100, 100)}
out = math_op(inq_data, data[1], keep_dims)
out = tf.quantization.fake_quant_with_min_max_args(out, min=-200, max=200, name="out")
compare_tflite_with_tvm([data[0]], ['inq_0:0'], [inq_data[0]], [out], quantized=True, input_range=input_range)
#######################################################################
# Reduce_min
# ----------
def _test_reduce_min(data, keep_dims=None):
""" One iteration of reduce_min """
return _test_reduce(math_ops.reduce_min, data, keep_dims)
#######################################################################
# Reduce_max
# ----------
def _test_reduce_max(data, keep_dims=None):
""" One iteration of reduce_max """
return _test_reduce(math_ops.reduce_max, data, keep_dims)
#######################################################################
# Reduce_mean
# -----------
def _test_reduce_mean(data, keep_dims=None, quantized=False):
""" One iteration of reduce_mean """
if quantized:
return _test_reduce_quantize(math_ops.reduce_mean, data, keep_dims)
else:
return _test_reduce(math_ops.reduce_mean, data, keep_dims)
#######################################################################
# Reduce_prod
# -----------
def _test_reduce_prod(data, keep_dims=None):
""" One iteration of reduce_prod """
return _test_reduce(math_ops.reduce_prod, data, keep_dims)
#######################################################################
# Reduce_sum
# -----------
def _test_reduce_sum(data, keep_dims=None):
""" One iteration of reduce_sum """
return _test_reduce(math_ops.reduce_sum, data, keep_dims)
def _test_forward_reduce(testop):
""" Reduce """
data0 = [np.random.rand(16, 16, 16, 16).astype("float32"), None]
data1 = [np.random.rand(16, 16, 16, 16).astype("float32"), np.array([1, 2], dtype=np.int32)]
testop(data0)
testop(data0, keep_dims=False)
testop(data0, keep_dims=True)
testop(data1)
testop(data1, keep_dims=False)
testop(data1, keep_dims=True)
def _test_forward_reduce_quantized(testop):
data0 = [np.array(np.random.uniform(0, 255, (3, 6)), dtype=np.uint8), np.array([1, 2], dtype=np.int32)]
testop(data0, quantized=True)
testop(data0, keep_dims=False, quantized=True)
testop(data0, keep_dims=True, quantized=True)
def test_all_reduce():
_test_forward_reduce(_test_reduce_min)
_test_forward_reduce(_test_reduce_max)
_test_forward_reduce(_test_reduce_mean)
_test_forward_reduce_quantized(_test_reduce_mean)
_test_forward_reduce(_test_reduce_prod)
_test_forward_reduce(_test_reduce_sum)
#######################################################################
# Squeeze
# -------
def _test_squeeze(data, squeeze_dims=None):
""" One iteration of squeeze """
if squeeze_dims is None:
squeeze_dims = []
with tf.Graph().as_default():
in_data = array_ops.placeholder(shape=data.shape, dtype=data.dtype)
if squeeze_dims:
out = array_ops.squeeze(in_data, squeeze_dims)
else:
out = array_ops.squeeze(in_data)
compare_tflite_with_tvm(data, 'Placeholder:0', [in_data], [out])
def test_forward_squeeze():
""" Squeeze """
_test_squeeze(np.arange(6).reshape((1, 2, 1, 3)), [0, 2])
_test_squeeze(np.arange(6).reshape((2, 1, 3, 1)), [1, 3])
#######################################################################
# Pad
# ---
def _test_pad(data, mode="CONSTANT", quantized=False):
""" One iteration of PAD """
assert len(data) == 2
# Test with tensor and constant
with tf.Graph().as_default():
in_data = [array_ops.placeholder(shape=data[0].shape, dtype='float32', name='in')]
if quantized:
# fake_quant will keep the tensors in float32 until the conversion in the session
input_range = {'inq_0': (-100, 100)}
inq_data = [tf.quantization.fake_quant_with_min_max_args(in_data[0],
min=-100,
max=100,
name="inq_0")]
out = array_ops.pad(inq_data[0], ops.convert_to_tensor(data[1], dtype=data[1].dtype), mode=mode)
compare_tflite_with_tvm([data[0]], ['inq_0:0'], inq_data, [out], quantized=True,
input_range=input_range)
else:
out = array_ops.pad(in_data[0], ops.convert_to_tensor(data[1], dtype=data[1].dtype), mode=mode)
compare_tflite_with_tvm([data[0]], ['in:0'], in_data, [out])
def test_forward_pad():
""" Pad """
_test_pad([np.arange(1.0, 7.0, dtype=np.float32).reshape((2, 1, 1, 3)),
np.array([[1, 1], [2, 2], [1, 1], [2, 2]], dtype=np.int32)])
_test_pad([np.arange(1.0, 7.0, dtype=np.float32).reshape((2, 1, 3)),
np.array([[2, 2], [1, 1], [1, 1]], dtype=np.int32)])
_test_pad([np.arange(1.0, 7.0, dtype=np.float32).reshape((2, 3)),
np.array([[1, 1], [2, 2]], dtype=np.int32)])
_test_pad([np.arange(1.0, 4.0, dtype=np.float32).reshape((1, 3)),
np.array([[1, 1], [2, 2]], dtype=np.int32)])
_test_pad([np.arange(1.0, 7.0, dtype=np.float32).reshape((2, 3)),
np.array([[1, 1], [2, 2]], dtype=np.int32)], mode="REFLECT")
_test_pad([np.arange(1.0, 7.0, dtype=np.float32).reshape((2, 3)),
np.array([[1, 1], [2, 2]], dtype=np.int32)], mode="SYMMETRIC")
_test_pad([np.arange(0, 256, dtype=np.uint8).reshape((1, 256)),
np.array([[1, 1], [2, 2]], dtype=np.int32)], quantized=True)
#######################################################################
# Pack
# -------------
def _test_pack(data, axis):
""" One iteration of pack """
assert len(data) >= 1
with tf.Graph().as_default():
in_data = [
array_ops.placeholder(shape=tensor.shape, dtype=tensor.dtype, name="in_{}".format(idx))
for idx, tensor in enumerate(data)]
out = array_ops.pack(in_data, axis=axis)
name = ["in_{}:0".format(idx) for idx in range(len(data))]
compare_tflite_with_tvm(data, name, in_data, [out])
def test_forward_pack():
""" Pack """
_test_pack(
[np.arange(6).reshape((1, 2, 1, 3)),
np.arange(6).reshape((1, 2, 1, 3))], 1)
_test_pack(
[np.arange(6).reshape((3, 2)),
np.arange(6).reshape((3, 2))], 1)
_test_pack(
[np.arange(6).reshape((2, 1, 1, 3)),
np.arange(6).reshape((2, 1, 1, 3)),
np.arange(6).reshape((2, 1, 1, 3))], 1)
#######################################################################
# Unpack
# ------
def _test_unpack(data, axis, num_unpacks):
""" One iteration of UNPACK """
with tf.Graph().as_default():
in_data = array_ops.placeholder(shape=data.shape, dtype=data.dtype)
out = gen_array_ops.unpack(in_data, num=num_unpacks, axis=axis, name='unpack')
out_names = ['out_' + str(n) + ':0' for n in range(num_unpacks)]
compare_tflite_with_tvm([data], 'Placeholder:0', [in_data], out, out_names=out_names)
def test_forward_unpack():
""" UNPACK """
_test_unpack(np.array(np.random.uniform(0, 5, (3, 1)), dtype=np.int32), axis=1, num_unpacks=1)
_test_unpack(np.array(np.random.uniform(0, 5, (3, 4)), dtype=np.float32), axis=0, num_unpacks=3)
# tflite 1.13 doesn't accept negative axis
if package_version.parse(tf.VERSION) >= package_version.parse('1.14.0'):
_test_unpack(np.array(np.random.uniform(0, 5, (3, 6)), dtype=np.int32), axis=-2, num_unpacks=3)
_test_unpack(np.array(np.random.uniform(0, 5, (2, 3, 4)), dtype=np.int32), axis=-3, num_unpacks=2)
#######################################################################
# Logistic
# --------
def _test_logistic(data, quantized=False):
""" One iteration of LOGISTIC """
with tf.Graph().as_default():
in_data = array_ops.placeholder(shape=data.shape, dtype='float32', name='in_0')
if quantized:
inq_data = tf.quantization.fake_quant_with_min_max_args(in_data, min=-5, max=5, name="inq_0")
input_range = {'inq_0': (-5, 5)}
out = math_ops.sigmoid(inq_data)
out = tf.quantization.fake_quant_with_min_max_args(out, min=0, max=1, name="out")
compare_tflite_with_tvm(data, 'inq_0:0', [inq_data], [out], quantized=True, input_range=input_range)
else:
out = math_ops.sigmoid(in_data)
compare_tflite_with_tvm(data, 'in_0:0', [in_data], [out])
def test_forward_logistic():
""" LOGISTIC """
_test_logistic(np.arange(6.0, dtype=np.float32).reshape((1, 6)))
_test_logistic(np.random.uniform(0, 255, (3, 6)).astype(np.uint8), quantized=True)
#######################################################################
# Softmax
# -------
def _test_softmax(data):
""" One iteration of softmax """
with tf.Graph().as_default():
in_data = array_ops.placeholder(shape=data.shape, dtype=data.dtype)
out = nn_ops.softmax(in_data)
compare_tflite_with_tvm(data, 'Placeholder:0', [in_data], [out])
def test_forward_softmax():
""" Softmax """
_test_softmax(np.arange(6.0, dtype=np.float32).reshape((1, 6)))
#######################################################################
# Tanh
# --------
def _test_tanh(data):
""" One iteration of TANH """
with tf.Graph().as_default():
in_data = array_ops.placeholder(shape=data.shape, dtype=data.dtype)
out = math_ops.tanh(in_data)
compare_tflite_with_tvm(data, 'Placeholder:0', [in_data], [out])
def test_forward_tanh():
""" TANH """
_test_tanh(np.arange(6.0, dtype=np.float32).reshape((1, 6)))
#######################################################################
# ReLu
# --------
def _test_relu(data):
""" One iteration of ReLU """
with tf.Graph().as_default():
in_data = array_ops.placeholder(shape=data.shape, dtype=data.dtype)
out = nn_ops.relu(in_data)
compare_tflite_with_tvm(data, 'Placeholder:0', [in_data], [out])
def test_forward_relu():
""" ReLU """
_test_relu(np.arange(6.0, dtype=np.float32).reshape((1, 6)))
def _test_prelu(data, alpha):
""" One iteration of PReLU """
with tf.Graph().as_default():
in_data = array_ops.placeholder(shape=data.shape, dtype=data.dtype)
# This specific pattern will be replaced into PRelu by tflite
out = nn_ops.relu(in_data) + (-alpha * nn_ops.relu(-in_data))
compare_tflite_with_tvm(data, 'Placeholder:0', [in_data], [out])
def test_forward_prelu():
""" PReLU """
_test_prelu(np.random.uniform(-5, 5, size=(1, 32, 32, 3)).astype("float32"), np.full((3,), 0.2, dtype="float32"))
_test_prelu(np.random.uniform(-5, 5, size=(1, 32, 32, 3)).astype("float32"), np.full((1, 1, 3), 0.2, dtype="float32"))
#######################################################################
# Fully Connected
# -------
def _test_fully_connected(tensor_in_sizes, filter_in_sizes, bias_in_size=None):
""" One iteration of fully connected """
total_size_1 = 1
total_size_2 = 1
for s in tensor_in_sizes:
total_size_1 *= s
for s in filter_in_sizes:
total_size_2 *= s
# Initializes the input tensor with array containing incrementing
# numbers from 1.
data_array = [f * 1.0 for f in range(1, total_size_1 + 1)]
filter_array = [f * 1.0 for f in range(1, total_size_2 + 1)]
assert int(total_size_1 / tensor_in_sizes[0]) == filter_in_sizes[0], \
"input size and filter size are mismatched"
with tf.Graph().as_default():
in_data = array_ops.placeholder(shape=tensor_in_sizes, dtype='float32')
in_filter = constant_op.constant(filter_array, shape=filter_in_sizes, dtype='float32')
# reshape N H W C into N H*W*C
in_data_reshape = array_ops.reshape(in_data, [tensor_in_sizes[0], -1])
out = math_ops.mat_mul(in_data_reshape, in_filter)
# if we have bias
if bias_in_size:
assert bias_in_size[0] == filter_in_sizes[1], "bias and filter size are mismatched"
bias_array = [f * 1.0 for f in range(1, bias_in_size[0] + 1)]
in_bias = constant_op.constant(bias_array, shape=bias_in_size, dtype='float32')
out = nn_ops.bias_add(out, in_bias)
data_array = np.reshape(data_array, tensor_in_sizes).astype('float32')
compare_tflite_with_tvm(data_array, 'Placeholder:0', [in_data], [out])
def test_forward_fully_connected():
""" Fully Connected """
_test_fully_connected([1, 1, 1, 150], [150, 100])
_test_fully_connected([1, 1, 1, 150], [150, 100], [100])
_test_fully_connected([5, 1, 1, 150], [150, 100])
_test_fully_connected([5, 1, 1, 150], [150, 100], [100])
#######################################################################
# Custom Operators
# ----------------
def test_detection_postprocess():
tf_model_file = tf_testing.get_workload_official(
"http://download.tensorflow.org/models/object_detection/"
"ssd_mobilenet_v2_quantized_300x300_coco_2019_01_03.tar.gz",
"ssd_mobilenet_v2_quantized_300x300_coco_2019_01_03/tflite_graph.pb"
)
converter = tf.lite.TFLiteConverter.from_frozen_graph(
tf_model_file,
input_arrays=["raw_outputs/box_encodings", "raw_outputs/class_predictions"],
output_arrays=[
"TFLite_Detection_PostProcess",
"TFLite_Detection_PostProcess:1",
"TFLite_Detection_PostProcess:2",
"TFLite_Detection_PostProcess:3"
],
input_shapes={
"raw_outputs/box_encodings": (1, 1917, 4),
"raw_outputs/class_predictions": (1, 1917, 91),
},
)
converter.allow_custom_ops = True
converter.inference_type = tf.lite.constants.FLOAT
tflite_model = converter.convert()
np.random.seed(0)
box_encodings = np.random.uniform(size=(1, 1917, 4)).astype('float32')
class_predictions = np.random.uniform(size=(1, 1917, 91)).astype('float32')
tflite_output = run_tflite_graph(tflite_model, [box_encodings, class_predictions])
tvm_output = run_tvm_graph(tflite_model, [box_encodings, class_predictions],
["raw_outputs/box_encodings", "raw_outputs/class_predictions"], num_output=4)
# check valid count is the same
assert tvm_output[3] == tflite_output[3]
valid_count = tvm_output[3][0]
tvm_boxes = tvm_output[0][0][:valid_count]
tvm_classes = tvm_output[1][0][:valid_count]
tvm_scores = tvm_output[2][0][:valid_count]
# check the output data is correct
tvm.testing.assert_allclose(np.squeeze(tvm_boxes), np.squeeze(tflite_output[0]), rtol=1e-5, atol=1e-5)
tvm.testing.assert_allclose(np.squeeze(tvm_classes), np.squeeze(tflite_output[1]), rtol=1e-5, atol=1e-5)
tvm.testing.assert_allclose(np.squeeze(tvm_scores), np.squeeze(tflite_output[2]), rtol=1e-5, atol=1e-5)
#######################################################################
# Mobilenet
# ---------
def test_forward_mobilenet_v1():
"""Test the Mobilenet V1 TF Lite model."""
# MobilenetV1
tflite_model_file = tf_testing.get_workload_official(
"http://download.tensorflow.org/models/mobilenet_v1_2018_08_02/mobilenet_v1_1.0_224.tgz",
"mobilenet_v1_1.0_224.tflite")
with open(tflite_model_file, "rb") as f:
tflite_model_buf = f.read()
data = np.random.uniform(size=(1, 224, 224, 3)).astype('float32')
tflite_output = run_tflite_graph(tflite_model_buf, data)
tvm_output = run_tvm_graph(tflite_model_buf, data, 'input')
tvm.testing.assert_allclose(np.squeeze(tvm_output[0]), np.squeeze(tflite_output[0]),
rtol=1e-5, atol=1e-5)
def test_forward_mobilenet_v2():
"""Test the Mobilenet V2 TF Lite model."""
# MobilenetV2
tflite_model_file = tf_testing.get_workload_official(
"http://download.tensorflow.org/models/tflite_11_05_08/mobilenet_v2_1.0_224.tgz",
"mobilenet_v2_1.0_224.tflite")
with open(tflite_model_file, "rb") as f:
tflite_model_buf = f.read()
data = np.random.uniform(size=(1, 224, 224, 3)).astype('float32')
tflite_output = run_tflite_graph(tflite_model_buf, data)
tvm_output = run_tvm_graph(tflite_model_buf, data, 'input')
tvm.testing.assert_allclose(np.squeeze(tvm_output[0]), np.squeeze(tflite_output[0]),
rtol=1e-5, atol=1e-5)
#######################################################################
# Inception
# ------------
def test_forward_inception_v3_net():
"""Test the Inception V3 TF Lite model."""
# InceptionV3
tflite_model_file = tf_testing.get_workload_official(
"https://storage.googleapis.com/download.tensorflow.org/models/tflite/model_zoo/upload_20180427/inception_v3_2018_04_27.tgz",
"inception_v3.tflite")
with open(tflite_model_file, "rb") as f:
tflite_model_buf = f.read()
data = np.random.uniform(size=(1, 299, 299, 3)).astype('float32')
tflite_output = run_tflite_graph(tflite_model_buf, data)
tvm_output = run_tvm_graph(tflite_model_buf, data, 'input')
tvm.testing.assert_allclose(np.squeeze(tvm_output[0]), np.squeeze(tflite_output[0]),
rtol=1e-5, atol=1e-5)
def test_forward_inception_v4_net():
"""Test the Inception V4 TF Lite model."""
# InceptionV4
tflite_model_file = tf_testing.get_workload_official(
"https://storage.googleapis.com/download.tensorflow.org/models/tflite/model_zoo/upload_20180427/inception_v4_2018_04_27.tgz",
"inception_v4.tflite")
with open(tflite_model_file, "rb") as f:
tflite_model_buf = f.read()
data = np.random.uniform(size=(1, 299, 299, 3)).astype('float32')
tflite_output = run_tflite_graph(tflite_model_buf, data)
tvm_output = run_tvm_graph(tflite_model_buf, data, 'input')
tvm.testing.assert_allclose(np.squeeze(tvm_output[0]), np.squeeze(tflite_output[0]),
rtol=1e-5, atol=1e-5)
def test_forward_qnn_inception_v1_net():
"""Test the Quantized TFLite Inception model."""
# InceptionV1
tflite_model_file = tf_testing.get_workload_official(
"https://storage.googleapis.com/download.tensorflow.org/models/inception_v1_224_quant_20181026.tgz",
"inception_v1_224_quant.tflite")
with open(tflite_model_file, "rb") as f:
tflite_model_buf = f.read()
# Test image. Checking the labels because the requantize implementation is different between
# TFLite and Relay. This cause final output numbers to mismatch. So, testing accuracy via
# labels. Also, giving a real image, instead of random inputs.
data = get_real_image(224, 224)
tflite_output = run_tflite_graph(tflite_model_buf, data)
tflite_predictions = np.squeeze(tflite_output)
tflite_sorted_labels = tflite_predictions.argsort()[-3:][::-1]
tvm_output = run_tvm_graph(tflite_model_buf, data, 'input')
tvm_predictions = np.squeeze(tvm_output)
tvm_sorted_labels = tvm_predictions.argsort()[-3:][::-1]
tvm.testing.assert_allclose(tvm_sorted_labels, tflite_sorted_labels)
def test_forward_qnn_mobilenet_v1_net():
"""Test the Quantized TFLite Mobilenet V1 model."""
# MobilenetV1
tflite_model_file = tf_testing.get_workload_official(
"https://storage.googleapis.com/download.tensorflow.org/models/mobilenet_v1_2018_08_02/mobilenet_v1_1.0_224_quant.tgz",
"mobilenet_v1_1.0_224_quant.tflite")
with open(tflite_model_file, "rb") as f:
tflite_model_buf = f.read()
# Test image. Checking the labels because the requantize implementation is different between
# TFLite and Relay. This cause final output numbers to mismatch. So, testing accuracy via
# labels. Also, giving a real image, instead of random inputs.
data = get_real_image(224, 224)
tflite_output = run_tflite_graph(tflite_model_buf, data)
tflite_predictions = np.squeeze(tflite_output)
tflite_sorted_labels = tflite_predictions.argsort()[-3:][::-1]
tvm_output = run_tvm_graph(tflite_model_buf, data, 'input')
tvm_predictions = np.squeeze(tvm_output)
tvm_sorted_labels = tvm_predictions.argsort()[-3:][::-1]
tvm.testing.assert_allclose(tvm_sorted_labels, tflite_sorted_labels)
def test_forward_qnn_mobilenet_v2_net():
"""Test the Quantized TFLite Mobilenet V2 model."""
# MobilenetV2
tflite_model_file = tf_testing.get_workload_official(
"https://storage.googleapis.com/download.tensorflow.org/models/tflite_11_05_08/mobilenet_v2_1.0_224_quant.tgz",
"mobilenet_v2_1.0_224_quant.tflite")
with open(tflite_model_file, "rb") as f:
tflite_model_buf = f.read()
# Test image. Checking the labels because the requantize implementation is different between
# TFLite and Relay. This cause final output numbers to mismatch. So, testing accuracy via
# labels. Also, giving a real image, instead of random inputs.
data = get_real_image(224, 224)
tflite_output = run_tflite_graph(tflite_model_buf, data)
tflite_predictions = np.squeeze(tflite_output)
tflite_sorted_labels = tflite_predictions.argsort()[-3:][::-1]
tvm_output = run_tvm_graph(tflite_model_buf, data, 'input')
tvm_predictions = np.squeeze(tvm_output)
tvm_sorted_labels = tvm_predictions.argsort()[-3:][::-1]
tvm.testing.assert_allclose(tvm_sorted_labels, tflite_sorted_labels)
#######################################################################
# SSD Mobilenet
# -------------
def test_forward_ssd_mobilenet_v1():
"""Test the SSD Mobilenet V1 TF Lite model."""
# SSD MobilenetV1
tflite_model_file = tf_testing.get_workload_official(
"https://raw.githubusercontent.com/dmlc/web-data/master/tensorflow/models/object_detection/ssd_mobilenet_v1_coco_2018_01_28_nopp.tgz",
"ssd_mobilenet_v1_coco_2018_01_28_nopp.tflite")
with open(tflite_model_file, "rb") as f:
tflite_model_buf = f.read()
np.random.seed(0)
data = np.random.uniform(size=(1, 300, 300, 3)).astype('float32')
tflite_output = run_tflite_graph(tflite_model_buf, data)
tvm_output = run_tvm_graph(tflite_model_buf, data, 'normalized_input_image_tensor', num_output=2)
for i in range(2):
tvm.testing.assert_allclose(np.squeeze(tvm_output[i]), np.squeeze(tflite_output[i]),
rtol=1e-5, atol=2e-5)
#######################################################################
# MediaPipe
# -------------
def test_forward_mediapipe_hand_landmark():
"""Test MediaPipe 2D hand landmark TF Lite model."""
# MediaPipe 2D hand landmark TF
tflite_model_file = download_testdata(
"https://github.com/google/mediapipe/raw/master/mediapipe/models/hand_landmark.tflite",
"hand_landmark.tflite")
with open(tflite_model_file, "rb") as f:
tflite_model_buf = f.read()
data = np.random.uniform(size=(1, 256, 256, 3)).astype('float32')
tflite_output = run_tflite_graph(tflite_model_buf, data)
tvm_output = run_tvm_graph(tflite_model_buf, data, 'input_1', num_output=2)
for i in range(2):
tvm.testing.assert_allclose(np.squeeze(tvm_output[i]), np.squeeze(tflite_output[i]),
rtol=1e-5, atol=1e-5)
#######################################################################
# Main
# ----
if __name__ == '__main__':
# BatchToSpaceND
test_forward_batch_to_space_nd()
# SpaceToBatchND
test_forward_space_to_batch_nd()
# Split
test_forward_split()
# Transpose
test_forward_transpose()
# Cast
test_forward_cast()
# Tile
test_forward_tile()
# Transforms
test_forward_concatenation()
test_forward_pad()
test_forward_pack()
test_forward_unpack()
test_forward_reshape()
test_all_resize()
test_forward_squeeze()
test_forward_slice()
# NN
test_forward_convolution()
test_forward_transpose_conv()
test_forward_logistic()
test_forward_pooling()
test_forward_softmax()
test_forward_tanh()
test_forward_relu()
test_forward_prelu()
test_forward_fully_connected()
# Elemwise
test_all_elemwise()
# Unary elemwise
test_all_unary_elemwise()
# Zeros Like
test_forward_zeros_like()
# Reduce
test_all_reduce()
# Logical
test_all_logical()
# Detection_PostProcess
test_detection_postprocess()
# End to End
test_forward_mobilenet_v1()
test_forward_mobilenet_v2()
test_forward_inception_v3_net()
test_forward_inception_v4_net()
test_forward_ssd_mobilenet_v1()
test_forward_mediapipe_hand_landmark()
# End to End quantized
test_forward_qnn_inception_v1_net()
test_forward_qnn_mobilenet_v1_net()
test_forward_qnn_mobilenet_v2_net()
| 39.40404
| 142
| 0.603345
|
4a1633057533cf831ba8641c633d34527ffb8934
| 442
|
py
|
Python
|
core/cogs/info.py
|
yxqsnz/Ark
|
6842b652a582b06da2ec71bc6e2c7e3f5a9fc3f8
|
[
"MIT"
] | null | null | null |
core/cogs/info.py
|
yxqsnz/Ark
|
6842b652a582b06da2ec71bc6e2c7e3f5a9fc3f8
|
[
"MIT"
] | null | null | null |
core/cogs/info.py
|
yxqsnz/Ark
|
6842b652a582b06da2ec71bc6e2c7e3f5a9fc3f8
|
[
"MIT"
] | null | null | null |
from discord.ext import commands
from discord.ext.commands.context import Context
from ark.settings import __version__, DRAGON_IMG
class Botinfo(commands.Cog):
def __init__(self, bot):
self.bot = bot
self.description = "BotInfo Commands"
@commands.command(aliases=['v'],pass_context=True)
async def version(self,ctx:Context):
return await ctx.send(__version__)
def setup(bot):
bot.add_cog(Botinfo(bot))
| 36.833333
| 54
| 0.726244
|
4a1634fa052dd32daeb29a4201c371879c9e9fcc
| 725
|
py
|
Python
|
Etap 3/Logia12/Zad3.py
|
aszokalski/Logia
|
5e29745b01623df8a2f162f143656a76056af407
|
[
"MIT"
] | null | null | null |
Etap 3/Logia12/Zad3.py
|
aszokalski/Logia
|
5e29745b01623df8a2f162f143656a76056af407
|
[
"MIT"
] | null | null | null |
Etap 3/Logia12/Zad3.py
|
aszokalski/Logia
|
5e29745b01623df8a2f162f143656a76056af407
|
[
"MIT"
] | null | null | null |
def kulki(lk):
war.clear()
zrodlo = 'r' * lk[0] + 'g' * lk[1] + 'b' * lk[2] + 'y' * lk[3]
wariacjabezpowt(zrodlo, len(zrodlo), "")
lista = war[:]
wynik = []
for w in lista:
keep = True
for i in range(len(w) - 1):
if w[i] == w[i+1]:
keep = False
break
if keep and w not in wynik:
wynik.append(w)
wynik.sort()
return wynik
war = []
def wariacjabezpowt(zrodlo, n, wariacja):
if n == 0:
war.append(wariacja)
return
for litera in zrodlo:
if wariacja.count(litera) < zrodlo.count(litera):
wariacjabezpowt(zrodlo, n-1, wariacja+litera)
return
| 23.387097
| 66
| 0.485517
|
4a16352797a087a8385e3c49a4e6f04443c50b25
| 1,848
|
py
|
Python
|
facerecognition/recognition/FaceRecognition.py
|
LEGaTO-SmartMirror/SmartMirror-Facerecognition
|
65b87528b5f64a94b9352134712ff4ca404294a1
|
[
"MIT"
] | null | null | null |
facerecognition/recognition/FaceRecognition.py
|
LEGaTO-SmartMirror/SmartMirror-Facerecognition
|
65b87528b5f64a94b9352134712ff4ca404294a1
|
[
"MIT"
] | null | null | null |
facerecognition/recognition/FaceRecognition.py
|
LEGaTO-SmartMirror/SmartMirror-Facerecognition
|
65b87528b5f64a94b9352134712ff4ca404294a1
|
[
"MIT"
] | 1
|
2019-06-12T21:13:22.000Z
|
2019-06-12T21:13:22.000Z
|
import os
import cv2
import numpy as np
import tensorflow as tf
if (tf.__version__ == "1.15.1"):
import tensorflow.compat.v1 as tf
from recognition import facenet
#CUDA_VISIBLE_DEVICES=1
os.environ["CUDA_VISIBLE_DEVICES"]="1"
BASE_DIR = os.path.dirname(__file__) + '/'
# Path to frozen detection graph. This is the actual model that is used for the object detection.
PATH_TO_CKPT = 'model/20170512-110547.pb'
input_image_size = 160
class FaceRecognition:
def __init__(self):
# Load models
self.recognition_graph = tf.Graph()
config = tf.ConfigProto(device_count = {'GPU': 1})
#config.gpu_options.allow_growth = True
config.gpu_options.per_process_gpu_memory_fraction = 0.3
self.sess = tf.Session(graph=self.recognition_graph,config=config)
#print('Loading feature extraction model')
with self.sess.as_default():
with self.recognition_graph.as_default():
facenet.load_model(BASE_DIR + PATH_TO_CKPT)
def __del__(self):
self.sess.close()
def recognize(self, image):
images_placeholder = self.recognition_graph.get_tensor_by_name("input:0")
embeddings = self.recognition_graph.get_tensor_by_name("embeddings:0")
phase_train_placeholder = self.recognition_graph.get_tensor_by_name("phase_train:0")
embedding_size = embeddings.get_shape()[1]
emb_array = np.zeros((1, embedding_size))
image = facenet.prewhiten(image)
#image = cv2.resize(image, (input_image_size, input_image_size), interpolation=cv2.INTER_AREA)
image = image.reshape(-1, input_image_size, input_image_size, 3)
feed_dict = {images_placeholder: image, phase_train_placeholder: False}
emb_array[0, :] = self.sess.run(embeddings, feed_dict=feed_dict)
return emb_array.squeeze()
| 37.714286
| 102
| 0.706169
|
4a163563bb937b817dbbdfdc242cc936a4d4ca4d
| 2,019
|
py
|
Python
|
app/service/auth_service.py
|
WishesFire/Epam-Python-Project
|
d54bbe48d539b0810d9b42b0839a64b035021c6d
|
[
"Apache-2.0"
] | 1
|
2021-11-18T11:57:02.000Z
|
2021-11-18T11:57:02.000Z
|
app/service/auth_service.py
|
WishesFire/Epam-project
|
d54bbe48d539b0810d9b42b0839a64b035021c6d
|
[
"Apache-2.0"
] | null | null | null |
app/service/auth_service.py
|
WishesFire/Epam-project
|
d54bbe48d539b0810d9b42b0839a64b035021c6d
|
[
"Apache-2.0"
] | null | null | null |
"""
Requires user registration and login utilities:
- `util_signup`: validation data form and create new user in database
- `util_login`: requirement validation parameters
"""
# pylint: disable=unused-variable
# pylint: disable=inconsistent-return-statements
import logging
from typing import Union
from werkzeug.security import generate_password_hash
from sqlalchemy import exc
from app.models.model import User
from app.service.validartors import RegistrationFormValidator, LoginFormValidator
from app.service.user_service import UserService
def util_signup(email: str, password_1: str, password_2: str) -> Union[bool, User]:
"""
Validation and create new user
:param email: user mail
:param password_1: first password
:param password_2: second similar password
:return: Status False or new_user
"""
validator = RegistrationFormValidator(email, password_1, password_2)
email_status, email_error_msg = validator.check_exists_email()
password_status, password_error_msg = validator.check_password_similar()
logging.info("Validation id DONE")
if email_status and password_status:
secure_password = generate_password_hash(password_1, method="sha256")
try:
new_user = UserService.create_new_user(email, secure_password)
logging.info("New user is created")
return new_user
except (ValueError, exc.DataError):
return False
elif not email_status or not password_status:
return False
def util_login(email: str, password: str):
"""
Validation user and prepare data for login in system
:param email: user mail
:param password: user password
:return: validation user status, msg about error, user from bd
"""
user = UserService.find_user_by_email(email)
validator = LoginFormValidator(password, user)
user_status, user_error_msg = validator.check_user_exists()
logging.info("Validation is DONE")
return user_status, user_error_msg, user
| 35.421053
| 83
| 0.738484
|
4a16367b79e9453774507aed9353525b37dea2ed
| 1,302
|
py
|
Python
|
cfn_transform_library/always_update.py
|
benkehoe/cfn-transform-lib
|
587155f1b184e416fedec83be1dfdd9b48c70b44
|
[
"Apache-2.0"
] | 2
|
2020-04-07T12:43:10.000Z
|
2021-08-20T16:59:48.000Z
|
cfn_transform_library/always_update.py
|
benkehoe/cfn-transform-lib
|
587155f1b184e416fedec83be1dfdd9b48c70b44
|
[
"Apache-2.0"
] | 1
|
2018-02-08T19:06:19.000Z
|
2018-02-08T19:06:19.000Z
|
cfn_transform_library/always_update.py
|
benkehoe/cfn-transform-library
|
587155f1b184e416fedec83be1dfdd9b48c70b44
|
[
"Apache-2.0"
] | null | null | null |
"""Make a resource's properties always different if needed.
Look for AlwaysUpdate set to True in a resource's metadata,
and if so, put AlwaysUpdateNonce in the resource's properties.
Copyright 2018 iRobot Corporation
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
from __future__ import absolute_import, print_function
import datetime
from cfn_transform import CloudFormationTemplateTransform
class AlwaysUpdate(CloudFormationTemplateTransform):
def process_resource(self, resource_name, resource):
if resource.get('Metadata', {}).get('AlwaysUpdate', False):
if 'Properties' not in resource:
resource['Properties'] = {}
resource['Properties']['AlwaysUpdateNonce'] = datetime.datetime.utcnow().isoformat() + 'Z'
if __name__ == '__main__':
AlwaysUpdate.main()
| 37.2
| 102
| 0.757296
|
4a1636bab322ea6f4c64a46395477a3fd3e50183
| 366
|
py
|
Python
|
Arase/PWE/__init__.py
|
mattkjames7/Arase
|
996167be35a13bbb1fdddfbe75e3a06d124b1d25
|
[
"MIT"
] | null | null | null |
Arase/PWE/__init__.py
|
mattkjames7/Arase
|
996167be35a13bbb1fdddfbe75e3a06d124b1d25
|
[
"MIT"
] | 1
|
2021-06-10T22:51:09.000Z
|
2021-06-10T22:51:09.000Z
|
Arase/PWE/__init__.py
|
mattkjames7/Arase
|
996167be35a13bbb1fdddfbe75e3a06d124b1d25
|
[
"MIT"
] | null | null | null |
from .DownloadData import DownloadData
from .ReadEFD import ReadEFD
from .ReadHFA import ReadHFA
from .ReadHFALow import ReadHFALow
from .ReadHFAHigh import ReadHFAHigh
from .ReadUHDensity import ReadUHDensity
from .RebuildDataIndex import RebuildDataIndex
from .ReadCDF import ReadCDF
from .ReadIndex import ReadIndex
from .DataAvailability import DataAvailability
| 33.272727
| 46
| 0.863388
|
4a16376ccad7478656e538acef0418e7fdfcf31b
| 10,550
|
py
|
Python
|
networkx/algorithms/traversal/breadth_first_search.py
|
LorenzoCancello/networkx
|
3a0b782c21f0d99ca7154ae8322292ec571e4a04
|
[
"BSD-3-Clause"
] | null | null | null |
networkx/algorithms/traversal/breadth_first_search.py
|
LorenzoCancello/networkx
|
3a0b782c21f0d99ca7154ae8322292ec571e4a04
|
[
"BSD-3-Clause"
] | null | null | null |
networkx/algorithms/traversal/breadth_first_search.py
|
LorenzoCancello/networkx
|
3a0b782c21f0d99ca7154ae8322292ec571e4a04
|
[
"BSD-3-Clause"
] | null | null | null |
"""Basic algorithms for breadth-first searching the nodes of a graph."""
import networkx as nx
from collections import deque
__all__ = [
'bfs_edges', 'bfs_tree', 'bfs_predecessors', 'bfs_successors',
'descendants_at_distance'
]
def generic_bfs_edges(G, source, neighbors=None, depth_limit=None):
"""Iterate over edges in a breadth-first search.
The breadth-first search begins at `source` and enqueues the
neighbors of newly visited nodes specified by the `neighbors`
function.
Parameters
----------
G : NetworkX graph
source : node
Starting node for the breadth-first search; this function
iterates over only those edges in the component reachable from
this node.
neighbors : function
A function that takes a newly visited node of the graph as input
and returns an *iterator* (not just a list) of nodes that are
neighbors of that node. If not specified, this is just the
``G.neighbors`` method, but in general it can be any function
that returns an iterator over some or all of the neighbors of a
given node, in any order.
depth_limit : int, optional(default=len(G))
Specify the maximum search depth
Yields
------
edge
Edges in the breadth-first search starting from `source`.
Examples
--------
>>> G = nx.path_graph(3)
>>> print(list(nx.bfs_edges(G,0)))
[(0, 1), (1, 2)]
>>> print(list(nx.bfs_edges(G, source=0, depth_limit=1)))
[(0, 1)]
Notes
-----
This implementation is from `PADS`_, which was in the public domain
when it was first accessed in July, 2004. The modifications
to allow depth limits are based on the Wikipedia article
"`Depth-limited-search`_".
.. _PADS: http://www.ics.uci.edu/~eppstein/PADS/BFS.py
.. _Depth-limited-search: https://en.wikipedia.org/wiki/Depth-limited_search
"""
visited = {source}
if depth_limit is None:
depth_limit = len(G)
queue = deque([(source, depth_limit, neighbors(source))])
print(source)
while queue:
parent, depth_now, children = queue[0]
try:
child = next(children)
if child not in visited:
yield parent, child
visited.add(child)
if depth_now > 1:
queue.append((child, depth_now - 1, neighbors(child)))
print(child)
except StopIteration:
queue.popleft()
def bfs_edges(G, source, reverse=False, depth_limit=None):
"""Iterate over edges in a breadth-first-search starting at source.
Parameters
----------
G : NetworkX graph
source : node
Specify starting node for breadth-first search; this function
iterates over only those edges in the component reachable from
this node.
reverse : bool, optional
If True traverse a directed graph in the reverse direction
depth_limit : int, optional(default=len(G))
Specify the maximum search depth
Returns
-------
edges: generator
A generator of edges in the breadth-first-search.
Examples
--------
To get the edges in a breadth-first search::
>>> G = nx.path_graph(3)
>>> list(nx.bfs_edges(G, 0))
[(0, 1), (1, 2)]
>>> list(nx.bfs_edges(G, source=0, depth_limit=1))
[(0, 1)]
To get the nodes in a breadth-first search order::
>>> G = nx.path_graph(3)
>>> root = 2
>>> edges = nx.bfs_edges(G, root)
>>> nodes = [root] + [v for u, v in edges]
>>> nodes
[2, 1, 0]
Notes
-----
The naming of this function is very similar to edge_bfs. The difference
is that 'edge_bfs' yields edges even if they extend back to an already
explored node while 'bfs_edges' yields the edges of the tree that results
from a breadth-first-search (BFS) so no edges are reported if they extend
to already explored nodes. That means 'edge_bfs' reports all edges while
'bfs_edges' only reports those traversed by a node-based BFS. Yet another
description is that 'bfs_edges' reports the edges traversed during BFS
while 'edge_bfs' reports all edges in the order they are explored.
Based on http://www.ics.uci.edu/~eppstein/PADS/BFS.py.
by D. Eppstein, July 2004. The modifications
to allow depth limits based on the Wikipedia article
"`Depth-limited-search`_".
.. _Depth-limited-search: https://en.wikipedia.org/wiki/Depth-limited_search
See Also
--------
bfs_tree
dfs_edges
edge_bfs
"""
if reverse and G.is_directed():
successors = G.predecessors
else:
successors = G.neighbors
yield from generic_bfs_edges(G, source, successors, depth_limit)
def bfs_tree(G, source, reverse=False, depth_limit=None):
"""Returns an oriented tree constructed from of a breadth-first-search
starting at source.
Parameters
----------
G : NetworkX graph
source : node
Specify starting node for breadth-first search
reverse : bool, optional
If True traverse a directed graph in the reverse direction
depth_limit : int, optional(default=len(G))
Specify the maximum search depth
Returns
-------
T: NetworkX DiGraph
An oriented tree
Examples
--------
>>> G = nx.path_graph(3)
>>> print(list(nx.bfs_tree(G,1).edges()))
[(1, 0), (1, 2)]
>>> H = nx.Graph()
>>> nx.add_path(H, [0, 1, 2, 3, 4, 5, 6])
>>> nx.add_path(H, [2, 7, 8, 9, 10])
>>> print(sorted(list(nx.bfs_tree(H, source=3, depth_limit=3).edges())))
[(1, 0), (2, 1), (2, 7), (3, 2), (3, 4), (4, 5), (5, 6), (7, 8)]
Notes
-----
Based on http://www.ics.uci.edu/~eppstein/PADS/BFS.py
by D. Eppstein, July 2004. The modifications
to allow depth limits based on the Wikipedia article
"`Depth-limited-search`_".
.. _Depth-limited-search: https://en.wikipedia.org/wiki/Depth-limited_search
See Also
--------
dfs_tree
bfs_edges
edge_bfs
"""
T = nx.DiGraph()
T.add_node(source)
edges_gen = bfs_edges(G, source, reverse=reverse, depth_limit=depth_limit)
T.add_edges_from(edges_gen)
return T
def bfs_predecessors(G, source, depth_limit=None):
"""Returns an iterator of predecessors in breadth-first-search from source.
Parameters
----------
G : NetworkX graph
source : node
Specify starting node for breadth-first search
depth_limit : int, optional(default=len(G))
Specify the maximum search depth
Returns
-------
pred: iterator
(node, predecessors) iterator where predecessors is the list of
predecessors of the node.
Examples
--------
>>> G = nx.path_graph(3)
>>> print(dict(nx.bfs_predecessors(G, 0)))
{1: 0, 2: 1}
>>> H = nx.Graph()
>>> H.add_edges_from([(0, 1), (0, 2), (1, 3), (1, 4), (2, 5), (2, 6)])
>>> print(dict(nx.bfs_predecessors(H, 0)))
{1: 0, 2: 0, 3: 1, 4: 1, 5: 2, 6: 2}
>>> M = nx.Graph()
>>> nx.add_path(M, [0, 1, 2, 3, 4, 5, 6])
>>> nx.add_path(M, [2, 7, 8, 9, 10])
>>> print(sorted(nx.bfs_predecessors(M, source=1, depth_limit=3)))
[(0, 1), (2, 1), (3, 2), (4, 3), (7, 2), (8, 7)]
Notes
-----
Based on http://www.ics.uci.edu/~eppstein/PADS/BFS.py
by D. Eppstein, July 2004. The modifications
to allow depth limits based on the Wikipedia article
"`Depth-limited-search`_".
.. _Depth-limited-search: https://en.wikipedia.org/wiki/Depth-limited_search
See Also
--------
bfs_tree
bfs_edges
edge_bfs
"""
for s, t in bfs_edges(G, source, depth_limit=depth_limit):
yield (t, s)
def bfs_successors(G, source, depth_limit=None):
"""Returns an iterator of successors in breadth-first-search from source.
Parameters
----------
G : NetworkX graph
source : node
Specify starting node for breadth-first search
depth_limit : int, optional(default=len(G))
Specify the maximum search depth
Returns
-------
succ: iterator
(node, successors) iterator where successors is the list of
successors of the node.
Examples
--------
>>> G = nx.path_graph(3)
>>> print(dict(nx.bfs_successors(G,0)))
{0: [1], 1: [2]}
>>> H = nx.Graph()
>>> H.add_edges_from([(0, 1), (0, 2), (1, 3), (1, 4), (2, 5), (2, 6)])
>>> print(dict(nx.bfs_successors(H, 0)))
{0: [1, 2], 1: [3, 4], 2: [5, 6]}
>>> G = nx.Graph()
>>> nx.add_path(G, [0, 1, 2, 3, 4, 5, 6])
>>> nx.add_path(G, [2, 7, 8, 9, 10])
>>> print(dict(nx.bfs_successors(G, source=1, depth_limit=3)))
{1: [0, 2], 2: [3, 7], 3: [4], 7: [8]}
Notes
-----
Based on http://www.ics.uci.edu/~eppstein/PADS/BFS.py
by D. Eppstein, July 2004.The modifications
to allow depth limits based on the Wikipedia article
"`Depth-limited-search`_".
.. _Depth-limited-search: https://en.wikipedia.org/wiki/Depth-limited_search
See Also
--------
bfs_tree
bfs_edges
edge_bfs
"""
parent = source
children = []
for p, c in bfs_edges(G, source, depth_limit=depth_limit):
if p == parent:
children.append(c)
continue
yield (parent, children)
children = [c]
parent = p
yield (parent, children)
def descendants_at_distance(G, source, distance):
"""Returns all nodes at a fixed `distance` from `source` in `G`.
Parameters
----------
G : NetworkX DiGraph
A directed graph
source : node in `G`
distance : the distance of the wanted nodes from `source`
Returns
-------
set()
The descendants of `source` in `G` at the given `distance` from `source`
"""
if not G.has_node(source):
raise nx.NetworkXError(f"The node {source} is not in the graph.")
current_distance = 0
queue = {source}
visited = {source}
# this is basically BFS, except that the queue only stores the nodes at
# current_distance from source at each iteration
while queue:
if current_distance == distance:
return queue
current_distance += 1
next_vertices = set()
for vertex in queue:
for child in G[vertex]:
if child not in visited:
visited.add(child)
next_vertices.add(child)
queue = next_vertices
return set()
| 28.90411
| 80
| 0.602844
|
4a1637879158b4035d697b49d749bae39ffd50a9
| 350
|
py
|
Python
|
00-basics/00-function_default_value.py
|
ralexrivero/python_fundation
|
34a855db7380d3d91db6a8f02d97f287d038ef5f
|
[
"Apache-2.0"
] | 1
|
2021-09-19T04:09:48.000Z
|
2021-09-19T04:09:48.000Z
|
00-basics/00-function_default_value.py
|
ralexrivero/python_fundation
|
34a855db7380d3d91db6a8f02d97f287d038ef5f
|
[
"Apache-2.0"
] | null | null | null |
00-basics/00-function_default_value.py
|
ralexrivero/python_fundation
|
34a855db7380d3d91db6a8f02d97f287d038ef5f
|
[
"Apache-2.0"
] | null | null | null |
#
# function with default value
#
def power(num, x=1):
result = 1
for i in range(x):
result = result * num
return result
# if dont pass x, the default will be 1
print(power(2))
print(power(2,3))
print(power(3,4))
# supply the name of the values. Dont need to pass in a particular order if use names
print(power(x = 4, num = 3))
| 21.875
| 86
| 0.651429
|
4a1638358f59fe67e43fc55f0a6ba6b656be94ae
| 17,083
|
py
|
Python
|
tests/zero_code_change/tensorflow_integration_tests.py
|
vandanavk/sagemaker-debugger
|
5246cda198295aa1dd1656ad32b30c4bb1e2aec4
|
[
"Apache-2.0"
] | null | null | null |
tests/zero_code_change/tensorflow_integration_tests.py
|
vandanavk/sagemaker-debugger
|
5246cda198295aa1dd1656ad32b30c4bb1e2aec4
|
[
"Apache-2.0"
] | null | null | null |
tests/zero_code_change/tensorflow_integration_tests.py
|
vandanavk/sagemaker-debugger
|
5246cda198295aa1dd1656ad32b30c4bb1e2aec4
|
[
"Apache-2.0"
] | null | null | null |
"""
WARNING: This must be run manually, with the custom TensorFlow fork installed.
Not used in CI/CD. May be useful for DLC testing.
Be sure to run with Python2 (/usr/bin/python) and Python3.
Run with and without the flag --zcc.
Test with DNNClassifier and raw Estimator.
Test with Session.
Test with Keras.
Test with AdamOptimizer and SGD.
We check that certain tensors are saved.
Here in the test suite we delete the hook after every script.
"""
# Standard Library
import argparse
# Third Party
import tensorflow as tf
import tensorflow_datasets as tfds
from tests.tensorflow.hooks.test_mirrored_strategy import test_basic
from tests.tensorflow.keras.test_keras_mirrored import test_tf_keras
# First Party
import smdebug.tensorflow as smd
from smdebug.core.utils import SagemakerSimulator
# Local
from .tf_utils import (
get_data,
get_estimator,
get_input_fns,
get_keras_data,
get_keras_model_v1,
get_train_op_and_placeholders,
)
def test_estimator(script_mode: bool = False):
""" Works as intended. """
smd.del_hook()
tf.reset_default_graph()
with SagemakerSimulator() as sim:
# Setup
mnist_classifier = get_estimator()
train_input_fn, eval_input_fn = get_input_fns()
# Train and evaluate
train_steps, eval_steps = 80, 20
if script_mode:
hook = smd.EstimatorHook(out_dir=sim.out_dir)
hook.set_mode(mode=smd.modes.TRAIN)
mnist_classifier.train(input_fn=train_input_fn, steps=train_steps, hooks=[hook])
hook.set_mode(mode=smd.modes.EVAL)
mnist_classifier.evaluate(input_fn=eval_input_fn, steps=eval_steps, hooks=[hook])
else:
mnist_classifier.train(input_fn=train_input_fn, steps=train_steps)
mnist_classifier.evaluate(input_fn=eval_input_fn, steps=eval_steps)
# Check that hook created and tensors saved
trial = smd.create_trial(path=sim.out_dir)
print(trial)
assert smd.get_hook() is not None, "Hook was not created."
assert len(trial.steps()) > 0, "Nothing saved at any step."
assert len(trial.tensor_names()) > 0, "Tensors were not saved."
assert trial.steps() == [0, train_steps], "Wrong step count for trial."
def test_estimator_gradients_zcc(nested=False, mirrored=False):
""" Works as intended. """
smd.del_hook()
tf.reset_default_graph()
json_file_contents = """
{
"S3OutputPath": "s3://sagemaker-test",
"LocalPath": "/opt/ml/output/tensors",
"HookParameters" : {
"save_interval": "2",
"include_workers": "all"
},
"CollectionConfigurations": [
{
"CollectionName": "gradients"
},
{
"CollectionName": "weights"
},
{
"CollectionName": "losses"
},
{
"CollectionName": "biases"
}
]
}
"""
with SagemakerSimulator(json_file_contents=json_file_contents) as sim:
if mirrored:
test_basic("/opt/ml/output/tensors", zcc=True)
else:
# Setup
mnist_classifier = get_estimator(nested_optimizer=nested, mirrored=mirrored)
train_input_fn, eval_input_fn = get_input_fns()
# Train and evaluate
train_steps, eval_steps = 10, 10
mnist_classifier.train(input_fn=train_input_fn, steps=train_steps)
mnist_classifier.evaluate(input_fn=eval_input_fn, steps=eval_steps)
# Check that hook created and tensors saved
trial = smd.create_trial(path=sim.out_dir)
print(trial)
assert smd.get_hook() is not None, "Hook was not created."
assert len(trial.steps()) > 0, "Nothing saved at any step."
assert len(trial.tensor_names()) > 0, "Tensors were not saved."
assert trial.steps() == [
0,
2,
4,
6,
8,
10,
12,
14,
16,
18,
], "Wrong step count for trial."
print(trial.tensor_names(collection="gradients"))
assert len(trial.tensor_names(collection="gradients")) > 0
assert len(trial.tensor_names(collection="weights")) > 0
assert len(trial.tensor_names(collection="losses")) > 0
assert len(trial.tensor(trial.tensor_names(collection="gradients")[0]).steps()) > 0
assert len(trial.modes()) == 2
def test_estimator_gradients_zcc_nested():
test_estimator_gradients_zcc(nested=True)
def test_estimator_gradients_zcc_mirrored():
test_estimator_gradients_zcc(nested=False, mirrored=True)
def test_linear_classifier(script_mode: bool = False):
""" Works as intended. """
smd.del_hook()
tf.reset_default_graph()
with SagemakerSimulator() as sim:
# Setup
train_input_fn, eval_input_fn = get_input_fns()
x_feature = tf.feature_column.numeric_column("x", shape=(28, 28))
estimator = tf.compat.v1.estimator.LinearClassifier(
feature_columns=[x_feature], model_dir="/tmp/mnist_linear_classifier", n_classes=10
)
# Train
if script_mode:
hook = smd.EstimatorHook(out_dir=sim.out_dir)
estimator.train(input_fn=train_input_fn, steps=100, hooks=[hook])
else:
estimator.train(input_fn=train_input_fn, steps=100)
# Check that hook created and tensors saved
trial = smd.create_trial(path=sim.out_dir)
assert smd.get_hook() is not None, "Hook was not created."
assert len(trial.steps()) > 0, "Nothing saved at any step."
assert len(trial.tensor_names()) > 0, "Tensors were not saved."
def test_monitored_session(script_mode: bool = False):
""" Works as intended. """
smd.del_hook()
tf.reset_default_graph()
json_file_contents = """
{
"S3OutputPath": "s3://sagemaker-test",
"LocalPath": "/opt/ml/output/tensors",
"HookParameters" : {
"save_interval": "100"
}
}
"""
with SagemakerSimulator(json_file_contents=json_file_contents) as sim:
train_op, X, Y = get_train_op_and_placeholders()
init = tf.compat.v1.global_variables_initializer()
mnist = get_data()
if script_mode:
hook = smd.SessionHook(out_dir=sim.out_dir)
sess = tf.train.MonitoredSession(hooks=[hook])
else:
sess = tf.train.MonitoredSession()
with sess:
sess.run(init)
for step in range(1, 101):
batch_x, batch_y = mnist.train.next_batch(32)
sess.run(train_op, feed_dict={X: batch_x, Y: batch_y})
# Check that hook created and tensors saved
trial = smd.create_trial(path=sim.out_dir)
assert smd.get_hook() is not None, "Hook was not created."
assert len(trial.steps()) > 0, "Nothing saved at any step."
assert len(trial.tensor_names()) > 0, "Tensors were not saved."
def test_monitored_session_gradients_zcc():
""" Works as intended. """
smd.del_hook()
json_file_contents = """
{
"S3OutputPath": "s3://sagemaker-test",
"LocalPath": "/opt/ml/output/tensors",
"HookParameters" : {
"save_interval": "100"
},
"CollectionConfigurations": [
{
"CollectionName": "gradients"
},
{
"CollectionName": "losses"
}
]
}
"""
tf.reset_default_graph()
with SagemakerSimulator(json_file_contents=json_file_contents) as sim:
train_op, X, Y = get_train_op_and_placeholders()
init = tf.compat.v1.global_variables_initializer()
mnist = get_data()
sess = tf.train.MonitoredSession()
with sess:
sess.run(init)
for step in range(1, 101):
batch_x, batch_y = mnist.train.next_batch(32)
sess.run(train_op, feed_dict={X: batch_x, Y: batch_y})
# Check that hook created and tensors saved
trial = smd.create_trial(path=sim.out_dir)
assert smd.get_hook() is not None, "Hook was not created."
assert len(trial.steps()) > 0, "Nothing saved at any step."
assert len(trial.tensor_names()) > 0, "Tensors were not saved."
assert len(trial.tensor_names(collection="gradients")) > 0
def test_keras_v1(script_mode: bool = False):
""" Works as intended. """
smd.del_hook()
tf.reset_default_graph()
tf.keras.backend.clear_session()
with SagemakerSimulator() as sim:
model = get_keras_model_v1()
(x_train, y_train), (x_test, y_test) = get_keras_data()
model.compile(
loss="sparse_categorical_crossentropy",
optimizer=tf.keras.optimizers.RMSprop(),
metrics=["accuracy"],
)
if script_mode:
hook = smd.KerasHook(out_dir=sim.out_dir)
history = model.fit(
x_train, y_train, batch_size=64, epochs=5, validation_split=0.2, callbacks=[hook]
)
test_scores = model.evaluate(x_test, y_test, verbose=2, callbacks=[hook])
else:
history = model.fit(x_train, y_train, batch_size=64, epochs=5, validation_split=0.2)
test_scores = model.evaluate(x_test, y_test, verbose=2)
# Check that hook created and tensors saved
trial = smd.create_trial(path=sim.out_dir)
assert smd.get_hook() is not None, "Hook was not created."
assert len(trial.steps()) > 0, "Nothing saved at any step."
assert len(trial.tensor_names()) > 0, "Tensors were not saved."
def test_keras_gradients(script_mode: bool = False, tf_optimizer: bool = False):
""" Works as intended. """
smd.del_hook()
tf.reset_default_graph()
tf.keras.backend.clear_session()
json_file_contents = """
{
"S3OutputPath": "s3://sagemaker-test",
"LocalPath": "/opt/ml/output/tensors",
"CollectionConfigurations": [
{
"CollectionName": "gradients"
},
{
"CollectionName": "optimizer_variables"
},
{
"CollectionName": "losses"
}
]
}
"""
with SagemakerSimulator(json_file_contents=json_file_contents) as sim:
model = get_keras_model_v1()
(x_train, y_train), (x_test, y_test) = get_keras_data()
if tf_optimizer:
opt = tf.train.RMSPropOptimizer(0.1)
else:
opt = tf.keras.optimizers.RMSprop()
if script_mode:
hook = smd.KerasHook(
out_dir=sim.out_dir,
include_collections=["gradients", "optimizer_variables", "losses"],
)
opt = hook.wrap_optimizer(opt)
model.compile(
loss="sparse_categorical_crossentropy", optimizer=opt, metrics=["accuracy"]
)
history = model.fit(
x_train, y_train, batch_size=16, epochs=5, validation_split=0.2, callbacks=[hook]
)
test_scores = model.evaluate(x_test, y_test, verbose=2, callbacks=[hook])
else:
model.compile(
loss="sparse_categorical_crossentropy", optimizer=opt, metrics=["accuracy"]
)
history = model.fit(x_train, y_train, batch_size=16, epochs=5, validation_split=0.2)
test_scores = model.evaluate(x_test, y_test, verbose=2)
# Check that hook created and tensors saved
trial = smd.create_trial(path=sim.out_dir)
assert smd.get_hook() is not None, "Hook was not created."
assert len(trial.steps()) > 0, "Nothing saved at any step."
assert len(trial.tensor_names()) > 0, "Tensors were not saved."
assert len(trial.tensor_names(collection="gradients")) > 0
if not tf_optimizer:
# as this is only supported for keras optimizers currently
assert len(trial.tensor_names(collection="optimizer_variables")) > 0
def test_keras_gradients_tf_opt(script_mode: bool = False):
test_keras_gradients(script_mode=script_mode, tf_optimizer=True)
def test_keras_gradients_mirrored(include_workers="one"):
""" Works as intended. """
smd.del_hook()
tf.reset_default_graph()
tf.keras.backend.clear_session()
json_file_contents_p1 = """
{
"S3OutputPath": "s3://sagemaker-test",
"LocalPath": "/opt/ml/output/tensors",
"HookParameters" : {
"""
json_file_contents_p2 = f'"include_workers": "{include_workers}",'
json_file_contents_p3 = """
"save_interval": "3"
},
"CollectionConfigurations": [
{
"CollectionName": "gradients"
},
{
"CollectionName": "optimizer_variables"
},
{
"CollectionName": "losses"
},
{
"CollectionName": "weights"
},
{
"CollectionName": "biases"
},
{
"CollectionName": "outputs"
},
{
"CollectionName": "metrics"
}
]
}
"""
json_file_contents = json_file_contents_p1 + json_file_contents_p2 + json_file_contents_p3
with SagemakerSimulator(json_file_contents=json_file_contents) as sim:
test_tf_keras("/opt/ml/output/tensors", zcc=True, include_workers=include_workers)
def test_keras_gradients_mirrored_all_workers():
test_keras_gradients_mirrored(include_workers="all")
def test_keras_to_estimator(script_mode: bool = False):
""" Works as intended. """
import tensorflow.compat.v1.keras as keras
tf.reset_default_graph()
smd.del_hook()
keras.backend.clear_session()
with SagemakerSimulator() as sim:
model = keras.models.Sequential(
[
keras.layers.Dense(16, activation="relu", input_shape=(4,)),
keras.layers.Dropout(0.2),
keras.layers.Dense(1, activation="sigmoid"),
]
)
def input_fn():
split = tfds.Split.TRAIN
dataset = tfds.load("iris", split=split, as_supervised=True)
dataset = dataset.map(lambda features, labels: ({"dense_input": features}, labels))
dataset = dataset.batch(32).repeat()
return dataset
model.compile(loss="categorical_crossentropy", optimizer="adam")
model.summary()
keras_estimator = tf.keras.estimator.model_to_estimator(
keras_model=model, model_dir=sim.out_dir
)
if script_mode:
hook = smd.EstimatorHook(sim.out_dir)
hook.set_mode(smd.modes.TRAIN)
keras_estimator.train(input_fn=input_fn, steps=25, hooks=[hook])
hook.set_mode(smd.modes.EVAL)
eval_result = keras_estimator.evaluate(input_fn=input_fn, steps=10, hooks=[hook])
else:
keras_estimator.train(input_fn=input_fn, steps=25)
keras_estimator.evaluate(input_fn=input_fn, steps=10)
tr = smd.create_trial(sim.out_dir)
assert len(tr.tensor_names()) == 1
assert tr.steps() == [0, 25]
assert len(tr.steps(smd.modes.TRAIN)) == 1
assert len(tr.steps(smd.modes.EVAL)) == 1
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument(
"--script-mode", help="Manually create hooks instead of relying on ZCC", action="store_true"
)
args = parser.parse_args()
script_mode = args.script_mode
test_monitored_session(script_mode=script_mode)
if not script_mode:
test_monitored_session_gradients_zcc()
test_estimator(script_mode=script_mode)
if not script_mode:
test_estimator_gradients_zcc()
test_estimator_gradients_zcc_nested()
test_estimator_gradients_zcc_mirrored()
test_linear_classifier(script_mode=script_mode)
test_keras_v1(script_mode=script_mode)
test_keras_gradients(script_mode=script_mode)
test_keras_gradients_tf_opt(script_mode=script_mode)
test_keras_to_estimator(script_mode=script_mode)
if not script_mode:
test_keras_gradients_mirrored_all_workers()
test_keras_gradients_mirrored()
| 36.346809
| 100
| 0.593338
|
4a1638b2e2d95c0a3a544b68e79c087f2739dc5b
| 14,152
|
py
|
Python
|
nssrc/com/citrix/netscaler/nitro/resource/config/authentication/authenticationvserver_authenticationsamlpolicy_binding.py
|
benfinke/ns_python
|
d651d7aa01d7dc63c1cd435c7b3314d7f5b26659
|
[
"Apache-2.0"
] | 1
|
2015-04-05T21:21:26.000Z
|
2015-04-05T21:21:26.000Z
|
nssrc/com/citrix/netscaler/nitro/resource/config/authentication/authenticationvserver_authenticationsamlpolicy_binding.py
|
benfinke/ns_python
|
d651d7aa01d7dc63c1cd435c7b3314d7f5b26659
|
[
"Apache-2.0"
] | 1
|
2017-01-20T22:56:58.000Z
|
2017-01-20T22:56:58.000Z
|
nssrc/com/citrix/netscaler/nitro/resource/config/authentication/authenticationvserver_authenticationsamlpolicy_binding.py
|
benfinke/ns_python
|
d651d7aa01d7dc63c1cd435c7b3314d7f5b26659
|
[
"Apache-2.0"
] | 6
|
2015-04-21T13:14:08.000Z
|
2020-12-03T07:27:52.000Z
|
#
# Copyright (c) 2008-2015 Citrix Systems, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License")
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from nssrc.com.citrix.netscaler.nitro.resource.base.base_resource import base_resource
from nssrc.com.citrix.netscaler.nitro.resource.base.base_resource import base_response
from nssrc.com.citrix.netscaler.nitro.service.options import options
from nssrc.com.citrix.netscaler.nitro.exception.nitro_exception import nitro_exception
from nssrc.com.citrix.netscaler.nitro.util.nitro_util import nitro_util
class authenticationvserver_authenticationsamlpolicy_binding(base_resource) :
""" Binding class showing the authenticationsamlpolicy that can be bound to authenticationvserver.
"""
def __init__(self) :
self._policy = ""
self._priority = 0
self._acttype = 0
self._secondary = False
self._name = ""
self._groupextraction = False
self._nextfactor = ""
self._gotopriorityexpression = ""
self.___count = 0
@property
def priority(self) :
ur"""The priority, if any, of the vpn vserver policy.
"""
try :
return self._priority
except Exception as e:
raise e
@priority.setter
def priority(self, priority) :
ur"""The priority, if any, of the vpn vserver policy.
"""
try :
self._priority = priority
except Exception as e:
raise e
@property
def name(self) :
ur"""Name of the authentication virtual server to which to bind the policy.<br/>Minimum length = 1.
"""
try :
return self._name
except Exception as e:
raise e
@name.setter
def name(self, name) :
ur"""Name of the authentication virtual server to which to bind the policy.<br/>Minimum length = 1
"""
try :
self._name = name
except Exception as e:
raise e
@property
def nextfactor(self) :
ur"""Applicable only while binding advance authentication policy as classic authentication policy does not support nFactor.
"""
try :
return self._nextfactor
except Exception as e:
raise e
@nextfactor.setter
def nextfactor(self, nextfactor) :
ur"""Applicable only while binding advance authentication policy as classic authentication policy does not support nFactor
"""
try :
self._nextfactor = nextfactor
except Exception as e:
raise e
@property
def gotopriorityexpression(self) :
ur"""Applicable only to advance authentication policy. Expression or other value specifying the next policy to be evaluated if the current policy evaluates to TRUE. Specify one of the following values:
* NEXT - Evaluate the policy with the next higher priority number.
* END - End policy evaluation.
* USE_INVOCATION_RESULT - Applicable if this policy invokes another policy label. If the final goto in the invoked policy label has a value of END, the evaluation stops. If the final goto is anything other than END, the current policy label performs a NEXT.
* A default syntax expression that evaluates to a number.
If you specify an expression, the number to which it evaluates determines the next policy to evaluate, as follows:
* If the expression evaluates to a higher numbered priority, the policy with that priority is evaluated next.
* If the expression evaluates to the priority of the current policy, the policy with the next higher numbered priority is evaluated next.
* If the expression evaluates to a priority number that is numerically higher than the highest numbered priority, policy evaluation ends.
An UNDEF event is triggered if:
* The expression is invalid.
* The expression evaluates to a priority number that is numerically lower than the current policy's priority.
* The expression evaluates to a priority number that is between the current policy's priority number (say, 30) and the highest priority number (say, 100), but does not match any configured priority number (for example, the expression evaluates to the number 85). This example assumes that the priority number increments by 10 for every successive policy, and therefore a priority number of 85 does not exist in the policy label.
"""
try :
return self._gotopriorityexpression
except Exception as e:
raise e
@gotopriorityexpression.setter
def gotopriorityexpression(self, gotopriorityexpression) :
ur"""Applicable only to advance authentication policy. Expression or other value specifying the next policy to be evaluated if the current policy evaluates to TRUE. Specify one of the following values:
* NEXT - Evaluate the policy with the next higher priority number.
* END - End policy evaluation.
* USE_INVOCATION_RESULT - Applicable if this policy invokes another policy label. If the final goto in the invoked policy label has a value of END, the evaluation stops. If the final goto is anything other than END, the current policy label performs a NEXT.
* A default syntax expression that evaluates to a number.
If you specify an expression, the number to which it evaluates determines the next policy to evaluate, as follows:
* If the expression evaluates to a higher numbered priority, the policy with that priority is evaluated next.
* If the expression evaluates to the priority of the current policy, the policy with the next higher numbered priority is evaluated next.
* If the expression evaluates to a priority number that is numerically higher than the highest numbered priority, policy evaluation ends.
An UNDEF event is triggered if:
* The expression is invalid.
* The expression evaluates to a priority number that is numerically lower than the current policy's priority.
* The expression evaluates to a priority number that is between the current policy's priority number (say, 30) and the highest priority number (say, 100), but does not match any configured priority number (for example, the expression evaluates to the number 85). This example assumes that the priority number increments by 10 for every successive policy, and therefore a priority number of 85 does not exist in the policy label.
"""
try :
self._gotopriorityexpression = gotopriorityexpression
except Exception as e:
raise e
@property
def secondary(self) :
ur"""Bind the authentication policy to the secondary chain.
Provides for multifactor authentication in which a user must authenticate via both a primary authentication method and, afterward, via a secondary authentication method.
Because user groups are aggregated across authentication systems, usernames must be the same on all authentication servers. Passwords can be different.
"""
try :
return self._secondary
except Exception as e:
raise e
@secondary.setter
def secondary(self, secondary) :
ur"""Bind the authentication policy to the secondary chain.
Provides for multifactor authentication in which a user must authenticate via both a primary authentication method and, afterward, via a secondary authentication method.
Because user groups are aggregated across authentication systems, usernames must be the same on all authentication servers. Passwords can be different.
"""
try :
self._secondary = secondary
except Exception as e:
raise e
@property
def policy(self) :
ur"""The name of the policy, if any, bound to the authentication vserver.
"""
try :
return self._policy
except Exception as e:
raise e
@policy.setter
def policy(self, policy) :
ur"""The name of the policy, if any, bound to the authentication vserver.
"""
try :
self._policy = policy
except Exception as e:
raise e
@property
def groupextraction(self) :
ur"""Applicable only while bindind classic authentication policy as advance authentication policy use nFactor.
"""
try :
return self._groupextraction
except Exception as e:
raise e
@groupextraction.setter
def groupextraction(self, groupextraction) :
ur"""Applicable only while bindind classic authentication policy as advance authentication policy use nFactor
"""
try :
self._groupextraction = groupextraction
except Exception as e:
raise e
@property
def acttype(self) :
try :
return self._acttype
except Exception as e:
raise e
def _get_nitro_response(self, service, response) :
ur""" converts nitro response into object and returns the object array in case of get request.
"""
try :
result = service.payload_formatter.string_to_resource(authenticationvserver_authenticationsamlpolicy_binding_response, response, self.__class__.__name__)
if(result.errorcode != 0) :
if (result.errorcode == 444) :
service.clear_session(self)
if result.severity :
if (result.severity == "ERROR") :
raise nitro_exception(result.errorcode, str(result.message), str(result.severity))
else :
raise nitro_exception(result.errorcode, str(result.message), str(result.severity))
return result.authenticationvserver_authenticationsamlpolicy_binding
except Exception as e :
raise e
def _get_object_name(self) :
ur""" Returns the value of object identifier argument
"""
try :
if self.name is not None :
return str(self.name)
return None
except Exception as e :
raise e
@classmethod
def add(cls, client, resource) :
try :
if resource and type(resource) is not list :
updateresource = authenticationvserver_authenticationsamlpolicy_binding()
updateresource.name = resource.name
updateresource.policy = resource.policy
updateresource.priority = resource.priority
updateresource.secondary = resource.secondary
updateresource.groupextraction = resource.groupextraction
updateresource.nextfactor = resource.nextfactor
updateresource.gotopriorityexpression = resource.gotopriorityexpression
return updateresource.update_resource(client)
else :
if resource and len(resource) > 0 :
updateresources = [authenticationvserver_authenticationsamlpolicy_binding() for _ in range(len(resource))]
for i in range(len(resource)) :
updateresources[i].name = resource[i].name
updateresources[i].policy = resource[i].policy
updateresources[i].priority = resource[i].priority
updateresources[i].secondary = resource[i].secondary
updateresources[i].groupextraction = resource[i].groupextraction
updateresources[i].nextfactor = resource[i].nextfactor
updateresources[i].gotopriorityexpression = resource[i].gotopriorityexpression
return cls.update_bulk_request(client, updateresources)
except Exception as e :
raise e
@classmethod
def delete(cls, client, resource) :
try :
if resource and type(resource) is not list :
deleteresource = authenticationvserver_authenticationsamlpolicy_binding()
deleteresource.name = resource.name
deleteresource.policy = resource.policy
deleteresource.secondary = resource.secondary
deleteresource.groupextraction = resource.groupextraction
return deleteresource.delete_resource(client)
else :
if resource and len(resource) > 0 :
deleteresources = [authenticationvserver_authenticationsamlpolicy_binding() for _ in range(len(resource))]
for i in range(len(resource)) :
deleteresources[i].name = resource[i].name
deleteresources[i].policy = resource[i].policy
deleteresources[i].secondary = resource[i].secondary
deleteresources[i].groupextraction = resource[i].groupextraction
return cls.delete_bulk_request(client, deleteresources)
except Exception as e :
raise e
@classmethod
def get(cls, service, name) :
ur""" Use this API to fetch authenticationvserver_authenticationsamlpolicy_binding resources.
"""
try :
obj = authenticationvserver_authenticationsamlpolicy_binding()
obj.name = name
response = obj.get_resources(service)
return response
except Exception as e:
raise e
@classmethod
def get_filtered(cls, service, name, filter_) :
ur""" Use this API to fetch filtered set of authenticationvserver_authenticationsamlpolicy_binding resources.
Filter string should be in JSON format.eg: "port:80,servicetype:HTTP".
"""
try :
obj = authenticationvserver_authenticationsamlpolicy_binding()
obj.name = name
option_ = options()
option_.filter = filter_
response = obj.getfiltered(service, option_)
return response
except Exception as e:
raise e
@classmethod
def count(cls, service, name) :
ur""" Use this API to count authenticationvserver_authenticationsamlpolicy_binding resources configued on NetScaler.
"""
try :
obj = authenticationvserver_authenticationsamlpolicy_binding()
obj.name = name
option_ = options()
option_.count = True
response = obj.get_resources(service, option_)
if response :
return response[0].__dict__['___count']
return 0
except Exception as e:
raise e
@classmethod
def count_filtered(cls, service, name, filter_) :
ur""" Use this API to count the filtered set of authenticationvserver_authenticationsamlpolicy_binding resources.
Filter string should be in JSON format.eg: "port:80,servicetype:HTTP".
"""
try :
obj = authenticationvserver_authenticationsamlpolicy_binding()
obj.name = name
option_ = options()
option_.count = True
option_.filter = filter_
response = obj.getfiltered(service, option_)
if response :
return response[0].__dict__['___count']
return 0
except Exception as e:
raise e
class authenticationvserver_authenticationsamlpolicy_binding_response(base_response) :
def __init__(self, length=1) :
self.authenticationvserver_authenticationsamlpolicy_binding = []
self.errorcode = 0
self.message = ""
self.severity = ""
self.sessionid = ""
self.authenticationvserver_authenticationsamlpolicy_binding = [authenticationvserver_authenticationsamlpolicy_binding() for _ in range(length)]
| 40.666667
| 430
| 0.759327
|
4a1639c03c4272df2e3f1348fd446f04d01a8fa0
| 1,430
|
py
|
Python
|
examples/pylab_examples/pcolor_demo.py
|
SoftwareDev/mat-plot-lib
|
abaf94859d5ef6e653a4d8a7ce2c59cea1724a57
|
[
"MIT",
"BSD-3-Clause"
] | 16
|
2016-06-14T19:45:35.000Z
|
2020-11-30T19:02:58.000Z
|
lib/mpl_examples/pylab_examples/pcolor_demo.py
|
yingkailiang/matplotlib
|
255a79b106c98c1904489afe6a754e4d943179d6
|
[
"MIT",
"BSD-3-Clause"
] | 7
|
2015-05-08T19:36:25.000Z
|
2015-06-30T15:32:17.000Z
|
lib/mpl_examples/pylab_examples/pcolor_demo.py
|
yingkailiang/matplotlib
|
255a79b106c98c1904489afe6a754e4d943179d6
|
[
"MIT",
"BSD-3-Clause"
] | 6
|
2015-06-05T03:34:06.000Z
|
2022-01-25T09:07:10.000Z
|
"""
Demonstrates similarities between pcolor, pcolormesh, imshow and pcolorfast
for drawing quadrilateral grids.
"""
import matplotlib.pyplot as plt
import numpy as np
# make these smaller to increase the resolution
dx, dy = 0.15, 0.05
# generate 2 2d grids for the x & y bounds
y, x = np.mgrid[slice(-3, 3 + dy, dy),
slice(-3, 3 + dx, dx)]
z = (1 - x / 2. + x ** 5 + y ** 3) * np.exp(-x ** 2 - y ** 2)
# x and y are bounds, so z should be the value *inside* those bounds.
# Therefore, remove the last value from the z array.
z = z[:-1, :-1]
z_min, z_max = -np.abs(z).max(), np.abs(z).max()
plt.subplot(2, 2, 1)
plt.pcolor(x, y, z, cmap='RdBu', vmin=z_min, vmax=z_max)
plt.title('pcolor')
# set the limits of the plot to the limits of the data
plt.axis([x.min(), x.max(), y.min(), y.max()])
plt.colorbar()
plt.subplot(2, 2, 2)
plt.pcolormesh(x, y, z, cmap='RdBu', vmin=z_min, vmax=z_max)
plt.title('pcolormesh')
# set the limits of the plot to the limits of the data
plt.axis([x.min(), x.max(), y.min(), y.max()])
plt.colorbar()
plt.subplot(2, 2, 3)
plt.imshow(z, cmap='RdBu', vmin=z_min, vmax=z_max,
extent=[x.min(), x.max(), y.min(), y.max()],
interpolation='nearest', origin='lower')
plt.title('image (interp. nearest)')
plt.colorbar()
ax = plt.subplot(2, 2, 4)
ax.pcolorfast(x, y, z, cmap='RdBu', vmin=z_min, vmax=z_max)
plt.title('pcolorfast')
plt.colorbar()
plt.show()
| 25.087719
| 75
| 0.632168
|
4a1639c330ae085f7fbdbd6206b78bacc3efb0d7
| 16,552
|
py
|
Python
|
localstack/services/iam/iam_starter.py
|
VanRoy/localstack
|
39f1a1c034ae345f87a1485d717428b59308e6fc
|
[
"Apache-2.0"
] | 1
|
2022-01-05T10:10:38.000Z
|
2022-01-05T10:10:38.000Z
|
localstack/services/iam/iam_starter.py
|
VanRoy/localstack
|
39f1a1c034ae345f87a1485d717428b59308e6fc
|
[
"Apache-2.0"
] | null | null | null |
localstack/services/iam/iam_starter.py
|
VanRoy/localstack
|
39f1a1c034ae345f87a1485d717428b59308e6fc
|
[
"Apache-2.0"
] | null | null | null |
import json
import re
from copy import deepcopy
from urllib.parse import quote
import xmltodict
from moto.iam.models import (
AWSManagedPolicy,
IAMNotFoundException,
InlinePolicy,
Policy,
Role,
aws_managed_policies,
aws_managed_policies_data_parsed,
)
from moto.iam.models import iam_backend as moto_iam_backend
from moto.iam.policy_validation import VALID_STATEMENT_ELEMENTS, IAMPolicyDocumentValidator
from moto.iam.responses import (
GENERIC_EMPTY_TEMPLATE,
GET_ROLE_TEMPLATE,
LIST_ROLES_TEMPLATE,
IamResponse,
)
from localstack import config, constants
from localstack.services.infra import start_moto_server
from localstack.utils.common import short_uid
from localstack.utils.patch import patch
XMLNS_IAM = "https://iam.amazonaws.com/doc/2010-05-08/"
USER_RESPONSE_TEMPLATE = """<{{ action }}UserResponse>
<{{ action }}UserResult>
<User>
<Path>{{ user.path }}</Path>
<UserName>{{ user.name }}</UserName>
<UserId>{{ user.id }}</UserId>
<Arn>{{ user.arn }}</Arn>
<CreateDate>{{ user.created_iso_8601 }}</CreateDate>
<Tags>
{% for tag in user.tags %}<member>
<Key>{{ tag.Key }}</Key>
<Value>{{ tag.Value }}</Value>
</member>{% endfor %}
</Tags>
</User>
</{{ action }}UserResult>
<ResponseMetadata>
<RequestId>{{request_id}}</RequestId>
</ResponseMetadata>
</{{ action }}UserResponse>"""
ADDITIONAL_MANAGED_POLICIES = {
"AWSLambdaExecute": {
"Arn": "arn:aws:iam::aws:policy/AWSLambdaExecute",
"Path": "/",
"CreateDate": "2017-10-20T17:23:10+00:00",
"DefaultVersionId": "v4",
"Document": {
"Version": "2012-10-17",
"Statement": [
{
"Effect": "Allow",
"Action": ["logs:*"],
"Resource": "arn:aws:logs:*:*:*",
},
{
"Effect": "Allow",
"Action": ["s3:GetObject", "s3:PutObject"],
"Resource": "arn:aws:s3:::*",
},
],
},
"UpdateDate": "2019-05-20T18:22:18+00:00",
}
}
SIMULATE_PRINCIPAL_POLICY_RESPONSE = """
<SimulatePrincipalPolicyResponse xmlns="__xmlns__">
<SimulatePrincipalPolicyResult>
<IsTruncated>false</IsTruncated>
<EvaluationResults>
{% for eval in evaluations %}
<member>
<MatchedStatements>
<member>
<SourcePolicyId>PolicyInputList.1</SourcePolicyId>
<EndPosition>
<Column>4</Column>
<Line>7</Line>
</EndPosition>
<StartPosition>
<Column>16</Column>
<Line>3</Line>
</StartPosition>
</member>
</MatchedStatements>
<MissingContextValues/>
<EvalResourceName>{{eval.resourceName}}</EvalResourceName>
<EvalDecision>{{eval.decision}}</EvalDecision>
<EvalActionName>{{eval.actionName}}</EvalActionName>
</member>
{% endfor %}
</EvaluationResults>
</SimulatePrincipalPolicyResult>
<ResponseMetadata>
<RequestId>004d7059-4c14-11e5-b121-bd8c7EXAMPLE</RequestId>
</ResponseMetadata>
</SimulatePrincipalPolicyResponse>""".replace(
"__xmlns__", XMLNS_IAM
)
class AWSManagedPolicyUSGov(AWSManagedPolicy):
# Fix missing regions in managed policies (e.g., aws-us-gov). Note: make sure to keep at global scope here
# TODO: possibly find a more efficient way for this - e.g., lazy loading of policies in special regions
@property
def arn(self):
return "arn:aws-us-gov:iam::aws:policy{0}{1}".format(self.path, self.name)
def apply_patches():
# Add missing managed polices
aws_managed_policies.extend(
[AWSManagedPolicy.from_data(k, v) for k, v in ADDITIONAL_MANAGED_POLICIES.items()]
)
if "Principal" not in VALID_STATEMENT_ELEMENTS:
VALID_STATEMENT_ELEMENTS.append("Principal")
@patch(IAMPolicyDocumentValidator._validate_resource_syntax, pass_target=False)
def _validate_resource_syntax(statement, *args, **kwargs):
# Note: Serverless generates policies without "Resource" section (only "Effect"/"Principal"/"Action"),
# which causes several policy validators in moto to fail
if statement.get("Resource") in [None, [None]]:
statement["Resource"] = ["*"]
# patch get_user to include tags
@patch(IamResponse.get_user)
def iam_response_get_user(fn, self):
result = fn(self)
regex = r"(.*<UserName>\s*)([^\s]+)(\s*</UserName>.*)"
regex2 = r"(.*<UserId>\s*)([^\s]+)(\s*</UserId>.*)"
flags = re.MULTILINE | re.DOTALL
user_name = re.match(regex, result, flags=flags).group(2)
# replace default user id/name in response
if config.TEST_IAM_USER_NAME:
result = re.sub(regex, r"\g<1>%s\3" % config.TEST_IAM_USER_NAME, result)
user_name = config.TEST_IAM_USER_NAME
if config.TEST_IAM_USER_ID:
result = re.sub(regex2, r"\g<1>%s\3" % config.TEST_IAM_USER_ID, result)
user = moto_iam_backend.users.get(user_name)
if not user:
return result
tags = moto_iam_backend.tagger.list_tags_for_resource(user.arn)
if tags and "<Tags>" not in result:
tags_str = "".join(
[
"<member><Key>%s</Key><Value>%s</Value></member>" % (t["Key"], t["Value"])
for t in tags["Tags"]
]
)
result = result.replace("</Arn>", "</Arn><Tags>%s</Tags>" % tags_str)
return result
# patch delete_policy
@patch(IamResponse.delete_policy, pass_target=False)
def iam_response_delete_policy(self):
policy_arn = self._get_param("PolicyArn")
if moto_iam_backend.managed_policies.get(policy_arn):
moto_iam_backend.managed_policies.pop(policy_arn, None)
template = self.response_template(GENERIC_EMPTY_TEMPLATE)
return template.render(name="DeletePolicy")
else:
raise IAMNotFoundException("Policy {0} was not found.".format(policy_arn))
# patch detach_role_policy
@patch(moto_iam_backend.detach_role_policy, pass_target=False)
def iam_backend_detach_role_policy(policy_arn, role_name):
try:
role = moto_iam_backend.get_role(role_name)
policy = role.managed_policies[policy_arn]
policy.detach_from(role)
except KeyError:
raise IAMNotFoundException("Policy {0} was not found.".format(policy_arn))
# patch/implement simulate_principal_policy
def iam_response_simulate_principal_policy(self):
def build_evaluation(action_name, resource_name, policy_statements):
for statement in policy_statements:
# TODO Implement evaluation logic here
if (
action_name in statement["Action"]
and resource_name in statement["Resource"]
and statement["Effect"] == "Allow"
):
return {
"actionName": action_name,
"resourceName": resource_name,
"decision": "allowed",
"matchedStatements": [],
}
return {
"actionName": action_name,
"resourceName": resource_name,
"decision": "explicitDeny",
}
policy = moto_iam_backend.get_policy(self._get_param("PolicySourceArn"))
policy_statements = json.loads(policy.document)["Statement"]
actions = self._get_multi_param("ActionNames.member")
resource_arns = self._get_multi_param("ResourceArns.member")
evaluations = []
for action in actions:
for resource_arn in resource_arns:
evaluations.append(build_evaluation(action, resource_arn, policy_statements))
template = self.response_template(SIMULATE_PRINCIPAL_POLICY_RESPONSE)
return template.render(evaluations=evaluations)
if not hasattr(IamResponse, "simulate_principal_policy"):
IamResponse.simulate_principal_policy = iam_response_simulate_principal_policy
# patch policy __init__ to set document as attribute
@patch(Policy.__init__)
def policy__init__(
fn, self, name, default_version_id=None, description=None, document=None, **kwargs
):
fn(self, name, default_version_id, description, document, **kwargs)
self.document = document
# patch list_roles
@patch(IamResponse.list_roles, pass_target=False)
def iam_response_list_roles(self):
roles = moto_iam_backend.get_roles()
items = []
for role in roles:
item = deepcopy(role)
item.assume_role_policy_document = quote(
json.dumps(item.assume_role_policy_document or {})
)
items.append(item)
prefix = self._get_param("PathPrefix")
if prefix:
filtered_roles = []
for role in roles:
if role.path.startswith(prefix):
filtered_roles.append(role)
items = filtered_roles
template = self.response_template(LIST_ROLES_TEMPLATE)
return template.render(roles=items)
# patch unapply_policy
@patch(InlinePolicy.unapply_policy)
def inline_policy_unapply_policy(fn, self, backend):
try:
fn(self, backend)
except Exception:
# Actually role can be deleted before policy being deleted in cloudformation
pass
# support update_group
def update_group(self):
group_name = self._get_param("GroupName")
new_path = self._get_param("NewPath")
new_group_name = self._get_param("NewGroupName") or group_name
group = moto_iam_backend.get_group(group_name)
group.path = new_path
group.name = new_group_name
moto_iam_backend.groups[new_group_name] = moto_iam_backend.groups.pop(group_name)
return ""
# TODO: potentially extend @patch utility to allow "conditional" patches like below ...
if not hasattr(IamResponse, "update_group"):
IamResponse.update_group = update_group
# support instance profile tags
def list_instance_profile_tags(self):
profile_name = self._get_param("InstanceProfileName")
profile = moto_iam_backend.get_instance_profile(profile_name)
result = {
"ListInstanceProfileTagsResponse": {
"@xmlns": XMLNS_IAM,
"ListInstanceProfileTagsResult": {"Tags": profile.tags},
}
}
return xmltodict.unparse(result)
if not hasattr(IamResponse, "list_instance_profile_tags"):
IamResponse.list_instance_profile_tags = list_instance_profile_tags
# patch/implement tag_instance_profile
def tag_instance_profile(self):
profile_name = self._get_param("InstanceProfileName")
tags = self._get_multi_param("Tags.member")
tags = {tag["Key"]: tag["Value"] for tag in tags or []}
profile = moto_iam_backend.get_instance_profile(profile_name)
profile.tags.update(tags)
return ""
if not hasattr(IamResponse, "tag_instance_profile"):
IamResponse.tag_instance_profile = tag_instance_profile
# patch/implement untag_instance_profile
def untag_instance_profile(self):
profile_name = self._get_param("InstanceProfileName")
tag_keys = self._get_multi_param("TagKeys.member")
profile = moto_iam_backend.get_instance_profile(profile_name)
profile.tags = {k: v for k, v in profile.tags.items() if k not in tag_keys}
return ""
if not hasattr(IamResponse, "untag_instance_profile"):
IamResponse.untag_instance_profile = untag_instance_profile
# support policy tags
def tag_policy(self):
policy_arn = self._get_param("PolicyArn")
tags = self._get_multi_param("Tags.member")
tags = {tag["Key"]: tag["Value"] for tag in tags or []}
policy = moto_iam_backend.get_policy(policy_arn)
policy.tags.update(tags)
return ""
if not hasattr(IamResponse, "tag_policy"):
IamResponse.tag_policy = tag_policy
def untag_policy(self):
policy_arn = self._get_param("PolicyArn")
tag_keys = self._get_multi_param("TagKeys.member")
policy = moto_iam_backend.get_policy(policy_arn)
policy.tags = {k: v for k, v in policy.tags.items() if k not in tag_keys}
return ""
if not hasattr(IamResponse, "untag_policy"):
IamResponse.untag_policy = untag_policy
# support service linked roles
if not hasattr(IamResponse, "create_service_linked_role"):
@property
def role_arn(self):
return getattr(self, "service_linked_role_arn", None) or role_arn_orig.__get__(self)
role_arn_orig = Role.arn
Role.arn = role_arn
def create_service_linked_role(self):
name_prefix = "service-linked-role"
service_name = self._get_param("AWSServiceName")
description = self._get_param("Description")
# TODO: how to support "CustomSuffix" API request parameter?
policy_doc = json.dumps(
{
"Version": "2012-10-17",
"Statement": [
{
"Effect": "Allow",
"Principal": {"Service": service_name},
"Action": "sts:AssumeRole",
}
],
}
)
role = moto_iam_backend.create_role(
role_name="%s-%s" % (name_prefix, short_uid()),
assume_role_policy_document=policy_doc,
path="/",
permissions_boundary="",
description=description,
tags={},
max_session_duration=3600,
)
template = self.response_template(GET_ROLE_TEMPLATE)
role.service_linked_role_arn = "arn:aws:iam::{0}:role/aws-service-role/{1}/{2}".format(
constants.TEST_AWS_ACCOUNT_ID, service_name, role.name
)
result = re.sub(
r"<(/)?GetRole",
r"<\1CreateServiceLinkedRole",
template.render(role=role),
)
return result
IamResponse.create_service_linked_role = create_service_linked_role
if not hasattr(IamResponse, "delete_service_linked_role"):
def delete_service_linked_role(self):
role_name = self._get_param("RoleName")
moto_iam_backend.delete_role(role_name)
result = {
"DeleteServiceLinkedRoleResponse": {
"@xmlns": XMLNS_IAM,
"DeleteServiceLinkedRoleResult": {"DeletionTaskId": short_uid()},
}
}
return xmltodict.unparse(result)
IamResponse.delete_service_linked_role = delete_service_linked_role
if not hasattr(IamResponse, "get_service_linked_role_deletion_status"):
def get_service_linked_role_deletion_status(self):
result = {
"GetServiceLinkedRoleDeletionStatusResponse": {
"@xmlns": XMLNS_IAM,
"GetServiceLinkedRoleDeletionStatusResult": {"Status": "SUCCEEDED"},
}
}
return xmltodict.unparse(result)
IamResponse.get_service_linked_role_deletion_status = (
get_service_linked_role_deletion_status
)
managed_policies = moto_iam_backend.managed_policies
if "arn:aws-us-gov:iam::aws:policy/AmazonRDSFullAccess" not in managed_policies:
for name, data in aws_managed_policies_data_parsed.items():
policy = AWSManagedPolicyUSGov.from_data(name, data)
if policy.arn not in moto_iam_backend.managed_policies:
moto_iam_backend.managed_policies[policy.arn] = policy
def start_iam(port=None, asynchronous=False, update_listener=None):
port = port or config.PORT_IAM
apply_patches()
return start_moto_server(
"iam",
port,
name="IAM",
asynchronous=asynchronous,
update_listener=update_listener,
)
| 36.378022
| 110
| 0.615636
|
4a163a994467f842fc7c92c8fb4d308e24ed3d7c
| 1,156
|
py
|
Python
|
main/slang/template.py
|
RoastVeg/cports
|
803c7f07af341eb32f791b6ec1f237edb2764bd5
|
[
"BSD-2-Clause"
] | null | null | null |
main/slang/template.py
|
RoastVeg/cports
|
803c7f07af341eb32f791b6ec1f237edb2764bd5
|
[
"BSD-2-Clause"
] | null | null | null |
main/slang/template.py
|
RoastVeg/cports
|
803c7f07af341eb32f791b6ec1f237edb2764bd5
|
[
"BSD-2-Clause"
] | null | null | null |
pkgname = "slang"
pkgver = "2.3.2"
pkgrel = 0
build_style = "gnu_configure"
make_cmd = "gmake"
make_dir = "."
hostmakedepends = ["gmake", "pkgconf", "ncurses-devel"]
makedepends = [
"ncurses-devel", "zlib-devel", "pcre-devel",
"libpng-devel", "oniguruma-devel"
]
pkgdesc = "S-Lang programming library"
maintainer = "q66 <q66@chimera-linux.org>"
license = "GPL-2.0-or-later"
url = "https://www.jedsoft.org/slang"
source = f"https://www.jedsoft.org/releases/{pkgname}/{pkgname}-{pkgver}.tar.bz2"
sha256 = "fc9e3b0fc4f67c3c1f6d43c90c16a5c42d117b8e28457c5b46831b8b5d3ae31a"
# racey
options = ["!parallel"]
def init_configure(self):
sroot = str(self.profile().sysroot / "usr")
self.configure_args += [
f"--with-z={sroot}", f"--with-pcre={sroot}",
f"--with-png={sroot}", f"--with-onig={sroot}"
]
# force it to use CFLAGS too during linking
self.configure_env = {
"LDFLAGS": self.get_cflags(shell = True) + " " + \
self.get_ldflags(shell = True)
}
def post_install(self):
self.install_license("COPYING")
@subpackage("slang-devel")
def _devel(self):
return self.default_devel()
| 29.641026
| 81
| 0.656574
|
4a163aad983f267602f530c6aa186eb0402edadd
| 4,476
|
py
|
Python
|
python/tests/test_logger_interceptors.py
|
meyskens/lookout-sdk
|
805d0d61ca97120a257283780790ff2458a6ef29
|
[
"Apache-2.0"
] | 5
|
2018-12-17T11:22:31.000Z
|
2019-03-14T02:50:43.000Z
|
python/tests/test_logger_interceptors.py
|
meyskens/lookout-sdk
|
805d0d61ca97120a257283780790ff2458a6ef29
|
[
"Apache-2.0"
] | 64
|
2018-08-31T10:41:36.000Z
|
2019-12-14T15:01:13.000Z
|
python/tests/test_logger_interceptors.py
|
meyskens/lookout-sdk
|
805d0d61ca97120a257283780790ff2458a6ef29
|
[
"Apache-2.0"
] | 16
|
2018-08-30T20:33:55.000Z
|
2021-08-09T05:37:06.000Z
|
from concurrent.futures import ThreadPoolExecutor
import unittest
import grpc
from lookout.sdk import pb
from lookout.sdk.service_data import DataStub
from lookout.sdk.grpc import create_channel, create_server, \
LogUnaryServerInterceptor, \
LogStreamServerInterceptor, \
LogUnaryClientInterceptor, \
LogStreamClientInterceptor
from lookout.sdk import event_pb2
from lookout.sdk.test import mixins
class DummyAnalyzer(pb.AnalyzerServicer):
def notify_review_event(self, request, context):
return pb.EventResponse()
def notify_push_event(self, request, context):
return pb.EventResponse()
class DummyDataServicer(pb.DataServicer):
def GetChanges(self, request, context):
return pb.Change()
def GetFiles(self, request, context):
return pb.File()
class TestServerLoggerInterceptors(mixins.TestWithRunningServicerMixin,
unittest.TestCase):
def build_server(self):
server = create_server(10, interceptors=[
LogUnaryServerInterceptor(self._tracker.unary),
LogStreamServerInterceptor(self._tracker.stream),
])
pb.add_analyzer_to_server(DummyAnalyzer(), server)
return server
def test_interceptors_called(self):
with create_channel(self._target) as channel:
stub = pb.AnalyzerStub(channel)
stub.NotifyReviewEvent(event_pb2.ReviewEvent())
self.assertEqual(self._tracker.counter, {"unary": 2, "stream": 0})
first_unary = self._tracker.logs[0]
first_log_fields = first_unary[0]
self.assertEqual(first_log_fields, {
"system": "grpc",
"span.kind": "server",
"grpc.service": "pb.Analyzer",
"grpc.method": "NotifyReviewEvent",
})
self.assertEqual(first_unary[1], "gRPC unary server call started")
second_unary = self._tracker.logs[1]
second_log_fields = second_unary[0]
self.assertEqual(set(second_log_fields.keys()),
{"system", "span.kind", "grpc.service", "grpc.method",
"grpc.code", "grpc.start_time", "duration"})
second_log_fields.pop("grpc.start_time")
second_log_fields.pop("duration")
self.assertEqual(second_log_fields, {
"system": "grpc",
"span.kind": "server",
"grpc.service": "pb.Analyzer",
"grpc.method": "NotifyReviewEvent",
"grpc.code": "OK"
})
self.assertEqual(second_unary[1], "gRPC unary server call finished")
class TestClientLoggerInterceptors(mixins.TestWithRunningServicerMixin,
unittest.TestCase):
def build_server(self):
server = grpc.server(ThreadPoolExecutor(max_workers=10))
pb.add_dataservicer_to_server(DummyDataServicer(), server)
return server
def test_interceptors_called(self):
with create_channel(self._target, interceptors=[
LogUnaryClientInterceptor(self._tracker.unary),
LogStreamClientInterceptor(self._tracker.stream),
]) as channel:
stub = DataStub(channel)
stub.get_changes(None, pb.ChangesRequest())
self.assertEqual(self._tracker.counter, {"unary": 0, "stream": 2})
first_streaming = self._tracker.logs[0]
first_log_fields = first_streaming[0]
self.assertEqual(first_log_fields, {
"system": "grpc",
"span.kind": "client",
"grpc.service": "pb.Data",
"grpc.method": "GetChanges",
})
self.assertEqual(first_streaming[1],
"gRPC streaming client call started")
second_streaming = self._tracker.logs[1]
second_log_fields = second_streaming[0]
self.assertEqual(set(second_log_fields.keys()),
{"system", "span.kind", "grpc.service", "grpc.method",
"grpc.code", "grpc.start_time", "duration"})
second_log_fields.pop("grpc.start_time")
second_log_fields.pop("duration")
self.assertEqual(second_log_fields, {
"system": "grpc",
"span.kind": "client",
"grpc.service": "pb.Data",
"grpc.method": "GetChanges",
"grpc.code": "OK"
})
self.assertEqual(second_streaming[1],
"gRPC streaming client call finished")
| 34.430769
| 79
| 0.621314
|
4a163ae1135a7594307b5a8d3fa3c8d961198b4c
| 520
|
py
|
Python
|
Python/Exercícios_Python/034_maior_e_menor_valor.py
|
vdonoladev/aprendendo-programacao
|
83abbcd6701b2105903b28fd549738863418cfb8
|
[
"MIT"
] | null | null | null |
Python/Exercícios_Python/034_maior_e_menor_valor.py
|
vdonoladev/aprendendo-programacao
|
83abbcd6701b2105903b28fd549738863418cfb8
|
[
"MIT"
] | null | null | null |
Python/Exercícios_Python/034_maior_e_menor_valor.py
|
vdonoladev/aprendendo-programacao
|
83abbcd6701b2105903b28fd549738863418cfb8
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
"""034 - Maior e menor valor
Automatically generated by Colaboratory.
Original file is located at
https://colab.research.google.com/drive/1tR5I-76SVFX8tPZ8l2JrakHqZregyIRU
"""
primeiro = int(input('Digite o primeiro número: '))
segundo = int(input('Digite o segundo número: '))
terceiro = int(input('Digite o terceiro número: '))
numeros = [primeiro, segundo, terceiro]
print('O maior valor digitado foi {}'.format(max(numeros)))
print('O menor valor digitado foi {}'.format(min(numeros)))
| 34.666667
| 77
| 0.725
|
4a163c31d64452f163118d42d84d5f171f047ce0
| 5,135
|
py
|
Python
|
tasrif/processing_pipeline/pandas/json_normalize_operator.py
|
qcri/tasrif
|
327bc1eccb8f8e11d8869ba65a7c72ad038aa094
|
[
"BSD-3-Clause"
] | 20
|
2021-12-06T10:41:54.000Z
|
2022-03-13T16:25:43.000Z
|
tasrif/processing_pipeline/pandas/json_normalize_operator.py
|
qcri/tasrif
|
327bc1eccb8f8e11d8869ba65a7c72ad038aa094
|
[
"BSD-3-Clause"
] | 33
|
2021-12-06T08:27:18.000Z
|
2022-03-14T05:07:53.000Z
|
tasrif/processing_pipeline/pandas/json_normalize_operator.py
|
qcri/tasrif
|
327bc1eccb8f8e11d8869ba65a7c72ad038aa094
|
[
"BSD-3-Clause"
] | 2
|
2022-02-07T08:06:48.000Z
|
2022-02-14T07:13:42.000Z
|
"""
Normalize semi-structured JSON data into a flat table.
"""
import warnings
import pandas as pd
from tasrif.processing_pipeline import PandasOperator
class JsonNormalizeOperator(PandasOperator):
"""
Examples
--------
>>> from tasrif.processing_pipeline.pandas import JsonNormalizeOperator
>>> data = [
... {
... "date": "2019-05-02",
... "main_sleep": True,
... "data": {
... "total_sleep": 365,
... "time_series": [{
... "dateTime" : "2019-05-02T00:18:00.000",
... "level" : "light",
... "seconds" : 2130
... },{
... "dateTime" : "2019-05-02T00:53:30.000",
... "level" : "deep",
... "seconds" : 540
... },{
... "dateTime" : "2019-05-02T01:02:30.000",
... "level" : "light",
... "seconds" : 870
... },{
... "dateTime" : "2019-05-02T01:17:00.000",
... "level" : "rem",
... "seconds" : 660
... },{
... "dateTime" : "2019-05-02T01:28:00.000",
... "level" : "light",
... "seconds" : 1230
... },{
... "dateTime" : "2019-05-02T01:48:30.000",
... "level" : "wake",
... "seconds" : 210
... }]
... }
... },
... {
... "date": "2019-04-29",
... "main_sleep": True,
... "data": {
... "total_sleep": 456,
... "time_series": [{
... "dateTime" : "2019-04-29T23:46:00.000",
... "level" : "wake",
... "seconds" : 300
... },{
... "dateTime" : "2019-04-29T23:51:00.000",
... "level" : "light",
... "seconds" : 660
... },{
... "dateTime" : "2019-04-30T00:02:00.000",
... "level" : "deep",
... "seconds" : 450
... },{
... "dateTime" : "2019-04-30T00:09:30.000",
... "level" : "light",
... "seconds" : 2070
... }]
... }
... }
... ]
>>>
>>> op = JsonNormalizeOperator(record_path=['data', 'time_series'],
... meta=['date', 'main_sleep', ['data', 'total_sleep']])
>>>
>>> df = op.process(data)
>>> print(df)
[ dateTime level seconds date main_sleep data.total_sleep
0 2019-05-02T00:18:00.000 light 2130 2019-05-02 True 365
1 2019-05-02T00:53:30.000 deep 540 2019-05-02 True 365
2 2019-05-02T01:02:30.000 light 870 2019-05-02 True 365
3 2019-05-02T01:17:00.000 rem 660 2019-05-02 True 365
4 2019-05-02T01:28:00.000 light 1230 2019-05-02 True 365
5 2019-05-02T01:48:30.000 wake 210 2019-05-02 True 365
6 2019-04-29T23:46:00.000 wake 300 2019-04-29 True 456
7 2019-04-29T23:51:00.000 light 660 2019-04-29 True 456
8 2019-04-30T00:02:00.000 deep 450 2019-04-29 True 456
9 2019-04-30T00:09:30.000 light 2070 2019-04-29 True 456 ]
"""
def __init__(self, **kwargs):
"""
Initializes the operator
Args:
**kwargs:
key word arguments passed to pandas DataFrame.dropna method
"""
super().__init__(kwargs)
self.kwargs = kwargs
def _process(self, *data_frames):
"""Process the passed data using the processing configuration specified
in the constructor
Args:
data_frames (list of pd.DataFrame):
Variable number of arrays of python dictionaries (representing JSON data) to be processed
Returns:
data_frames (list of pd.DataFrame):
Processed data frames
"""
processed = []
for dataframe in data_frames:
if (
isinstance(dataframe, list)
and len(dataframe) == 1
and isinstance(dataframe[0], list)
):
dataframe = dataframe[0]
dataframe = pd.json_normalize(dataframe, **self.kwargs)
processed.append(dataframe)
return processed
def _validate(self, *data_frames):
"""
Validation hook that is run before any processing happens. Checks if
any input is not a dict or list.
"""
for data_frame in data_frames:
if not isinstance(data_frame, (dict, list)):
warnings.warn('One or more inputs are not dict in JsonNormalizeOperator.')
| 36.678571
| 103
| 0.42999
|
4a163c3b81574feeb16084479b4df6f5eff34b61
| 2,319
|
py
|
Python
|
kalman_test_script.py
|
cunningham-lab/cyclic-gps
|
b11fa959afb0dc50e83e5c8c679981a74dc28f2a
|
[
"MIT"
] | null | null | null |
kalman_test_script.py
|
cunningham-lab/cyclic-gps
|
b11fa959afb0dc50e83e5c8c679981a74dc28f2a
|
[
"MIT"
] | null | null | null |
kalman_test_script.py
|
cunningham-lab/cyclic-gps
|
b11fa959afb0dc50e83e5c8c679981a74dc28f2a
|
[
"MIT"
] | 1
|
2022-02-19T22:36:14.000Z
|
2022-02-19T22:36:14.000Z
|
import torch
from torch.utils.data import DataLoader
import pytorch_lightning as pl
import scipy as sp
import scipy.ndimage
from cyclic_gps.models import LEGFamily
from cyclic_gps.data_utils import time_series_dataset
import matplotlib.pyplot as plt
from cyclic_gps.kalman import init_kalman_filter, get_state_estimates
import filterpy
from filterpy.kalman import KalmanFilter
num_datapoints = 100
sample1_ts = torch.empty(num_datapoints)
#sample1_ts = torch.cumsum(sample1_ts.exponential_(lambd=1) + 0.01, dim=0)
sample1_ts = torch.cumsum(torch.ones(num_datapoints), dim=0) #starting with uniform time steps
sample1_vals = torch.tensor(
sp.ndimage.gaussian_filter1d(torch.randn(num_datapoints), 10, axis=0)[:, None]
)
sample2_vals = torch.tensor(
sp.ndimage.gaussian_filter1d(torch.randn(num_datapoints), 10, axis=0)[:, None]
)
vals = torch.cat([sample1_vals, sample2_vals], dim=-1)
assert vals.shape == (num_datapoints, 2)
assert sample1_ts.shape == (num_datapoints,)
# plt.scatter(sample1_ts, vals[:, 0])
# plt.scatter(sample1_ts, vals[:, 1])
# plt.show()
dataset = time_series_dataset(sample1_ts.unsqueeze(0), vals.unsqueeze(0))
example = dataset[0]
assert torch.allclose(example[0], sample1_ts.unsqueeze(0)) # we're getting our data plus one batch element
dl = DataLoader(dataset=dataset, batch_size=1)
RANK = 5
MAX_EPOCHS = 1000 #for testing purposes
LEG_model = LEGFamily(rank=RANK, obs_dim=vals.shape[1], train=True)
# logger = pl.loggers.TensorBoardLogger("tb_logs", name="first_pass_model")
# trainer = pl.Trainer(max_epochs=MAX_EPOCHS, logger=logger, log_every_n_steps=1)
# trainer.fit(model=LEG_model, train_dataloaders=dl)
TIME_STEP = 1 #time step for kalman predictions
kf = init_kalman_filter(LEG_model, TIME_STEP)
kf_state_ests = torch.from_numpy(get_state_estimates(kf, vals.numpy())).float().squeeze(-1)
leg_state_ests, _ = LEG_model.compute_insample_posterior(ts=sample1_ts, xs=vals)
print(kf_state_ests.shape, leg_state_ests.shape)
print(kf_state_ests[0], leg_state_ests[0])
reconstructed_data_kf = kf_state_ests @ LEG_model.B.T
reconstructed_data_leg = leg_state_ests @ LEG_model.B.T
print("KF LOSS:")
print(torch.norm(reconstructed_data_kf - vals))
print("LEG LOSS:")
print(torch.norm(reconstructed_data_leg - vals))
#assert torch.allclose(kf_state_ests, leg_state_ests)
| 34.102941
| 106
| 0.787408
|
4a163e6197c65d806b523c68f143680f891bca4d
| 1,172
|
py
|
Python
|
common/exploration_strategies/base.py
|
SharathRaparthy/efficient-mrl
|
a1667019bc9a849639fba55825fef865967b6d7d
|
[
"MIT"
] | 1
|
2020-01-17T00:26:25.000Z
|
2020-01-17T00:26:25.000Z
|
common/exploration_strategies/base.py
|
SharathRaparthy/efficient-mrl
|
a1667019bc9a849639fba55825fef865967b6d7d
|
[
"MIT"
] | null | null | null |
common/exploration_strategies/base.py
|
SharathRaparthy/efficient-mrl
|
a1667019bc9a849639fba55825fef865967b6d7d
|
[
"MIT"
] | null | null | null |
import abc
from common.policies.base import ExplorationPolicy
class ExplorationStrategy(object, metaclass=abc.ABCMeta):
@abc.abstractmethod
def get_action(self, t, observation, policy, **kwargs):
pass
def reset(self):
pass
class RawExplorationStrategy(ExplorationStrategy, metaclass=abc.ABCMeta):
@abc.abstractmethod
def get_action_from_raw_action(self, action, **kwargs):
pass
def get_action(self, t, policy, *args, **kwargs):
action, agent_info = policy.get_action(*args, **kwargs)
return self.get_action_from_raw_action(action, t=t), agent_info
def reset(self):
pass
class PolicyWrappedWithExplorationStrategy(ExplorationPolicy):
def __init__(
self,
exploration_strategy: ExplorationStrategy,
policy,
):
self.es = exploration_strategy
self.policy = policy
self.t = 0
def set_num_steps_total(self, t):
self.t = t
def get_action(self, *args, **kwargs):
return self.es.get_action(self.t, self.policy, *args, **kwargs)
def reset(self):
self.es.reset()
self.policy.reset()
| 24.93617
| 73
| 0.656143
|
4a163efa6a87d681bed9d997361ac7de5218c862
| 6,275
|
py
|
Python
|
pykrita/layer_extra_properties/ui/data_editors/dict_editor.py
|
akirfin/krita_python_fun
|
74173d140b39f7f80f43f9474381e4adfa3b5f01
|
[
"MIT"
] | 1
|
2021-10-01T00:25:43.000Z
|
2021-10-01T00:25:43.000Z
|
pykrita/layer_extra_properties/ui/data_editors/dict_editor.py
|
akirfin/krita_python_fun
|
74173d140b39f7f80f43f9474381e4adfa3b5f01
|
[
"MIT"
] | null | null | null |
pykrita/layer_extra_properties/ui/data_editors/dict_editor.py
|
akirfin/krita_python_fun
|
74173d140b39f7f80f43f9474381e4adfa3b5f01
|
[
"MIT"
] | null | null | null |
from collections import OrderedDict as oDict
try:
from collections.abc import MutableMapping
except:
from collections import MutableMapping
from PyQt5.QtCore import Qt
from PyQt5.QtCore import pyqtSlot as QSlot
from PyQt5.QtCore import pyqtSignal as QSignal
from PyQt5.QtCore import pyqtProperty as QProperty
from PyQt5.QtWidgets import \
QWidget, QFormLayout, QLabel
from .abc_editor_container import AbcEditorContainer
from .data_editor_mapper import data_editor_mapper
class DictEditor(AbcEditorContainer, MutableMapping):
def __init__(self, parent=None):
AbcEditorContainer.__init__(self, parent=parent)
self._label_width = 110
def create_ui(self): # super calls this create_ui
AbcEditorContainer.create_ui(self) # this calls super create_ui
content = QWidget()
content.setObjectName("dict_content")
content.setStyleSheet("""
.QWidget#dict_content {
background-color: palette(window);
border: 1px outset rgba(0, 0, 0, 10%);
border-radius: 3px;
}""")
self._content_layout = QFormLayout()
self._content_layout.setContentsMargins(40, 10, 4, 10)
content.setLayout(self._content_layout)
self.set_content(content)
def __len__(self):
return self._content_layout.rowCount()
def __iter__(self):
layout = self._content_layout
for row in range(layout.rowCount()):
span_item = layout.itemAt(row, QFormLayout.SpanningRole)
if span_item is not None:
yield span_item.widget().objectName()
else:
field_item = layout.itemAt(row, QFormLayout.FieldRole)
yield field_item.widget().objectName()
def __getitem__(self, key):
layout = self._content_layout
for row in range(layout.rowCount()):
editor = None
span_item = layout.itemAt(row, QFormLayout.SpanningRole)
if span_item is not None:
editor = span_item.widget()
else:
field_item = layout.itemAt(row, QFormLayout.FieldRole)
editor = field_item.widget()
if (editor is not None) and (editor.objectName() == key):
return editor
raise KeyError("{key!r}".format(**locals()))
def __setitem__(self, key, editor):
layout = self._content_layout
for row in range(layout.rowCount()):
found = False
span_item = layout.itemAt(row, QFormLayout.SpanningRole)
if span_item is not None:
if span_item.widget().objectName() == key:
found = True
else:
field_item = layout.itemAt(row, QFormLayout.FieldRole)
if field_item.widget().objectName() == key:
found = True
if found:
layout.removeRow(row)
if isinstance(editor, AbcEditorContainer):
editor.title = key
editor.setObjectName(key)
layout.insertRow(row, editor)
else:
editor.setObjectName(key)
layout.insertRow(row, key, editor)
label_item = layout.itemAt(row, QFormLayout.LabelRole)
label_widget = label_item.widget()
label_widget.setAlignment(Qt.AlignRight | Qt.AlignVCenter)
label_widget.setFixedWidth(self._label_width) # elide right missing...
break
else:
if isinstance(editor, AbcEditorContainer):
editor.title = key
editor.setObjectName(key)
layout.addRow(editor)
else:
editor.setObjectName(key)
key_label = QLabel(key)
key_label.setAlignment(Qt.AlignRight | Qt.AlignVCenter)
key_label.setFixedWidth(self._label_width) # elide right missing...
layout.addRow(key_label, editor)
def __delitem__(self, key):
layout = self._content_layout
for row in range(layout.rowCount()):
span_item = layout.itemAt(row, QFormLayout.SpanningRole)
if span_item is not None:
if span_item.widget().objectName() == key:
layout.removeRow(row)
break
else:
field_item = layout.itemAt(row, QFormLayout.FieldRole)
if field_item.widget().objectName() == key:
layout.removeRow(row)
break
else:
raise KeyError("{key!r}".format(**locals()))
def _refresh_rows(self):
layout = self._content_layout
for row in range(layout.rowCount()):
label_item = layout.itemAt(row, QFormLayout.LabelRole)
if label_item is not None:
label_widget = label_item.widget()
label_widget.setAlignment(Qt.AlignRight | Qt.AlignVCenter)
label_widget.setFixedWidth(self._label_width) # elide right missing...
def get_label_width(self):
return self._label_width
@QSlot(int)
def set_label_width(self, new_label_width):
new_label_width = int(new_label_width)
old_label_width = self.get_label_width()
if new_label_width != old_label_width:
self._label_width = new_label_width
self._refresh_rows()
self.label_width_changed.emit(self.get_label_width())
label_width_changed = QSignal(int)
label_width = QProperty(int, fget=get_label_width, fset=set_label_width, notify=label_width_changed)
def get_data(self):
return oDict((name, data_editor.data) for name, data_editor in self.items())
QSlot(oDict)
def set_data(self, new_data):
self.clear()
for key, value in new_data.items():
self[key] = data_editor_mapper.create_editor(value, title=key)
self.data_changed.emit(self.get_data())
data_changed = QSignal(oDict)
data = QProperty(oDict, fget=get_data, fset=set_data, notify=data_changed, user=True)
data_editor_mapper.register(oDict, DictEditor)
| 36.271676
| 104
| 0.6051
|
4a163fb68ef532739896cdec84218ee0b66718a2
| 3,999
|
py
|
Python
|
dvc/data_cloud.py
|
hfchong/dvc
|
2e3ce3b3dbb02f6524b0383e3f599c4561413634
|
[
"Apache-2.0"
] | null | null | null |
dvc/data_cloud.py
|
hfchong/dvc
|
2e3ce3b3dbb02f6524b0383e3f599c4561413634
|
[
"Apache-2.0"
] | null | null | null |
dvc/data_cloud.py
|
hfchong/dvc
|
2e3ce3b3dbb02f6524b0383e3f599c4561413634
|
[
"Apache-2.0"
] | null | null | null |
import re
from multiprocessing.pool import ThreadPool
from dvc.logger import Logger
from dvc.exceptions import DvcException
from dvc.config import Config, ConfigError
from dvc.remote.s3 import RemoteS3
from dvc.remote.gs import RemoteGS
from dvc.remote.azure import RemoteAzure
from dvc.remote.ssh import RemoteSSH
from dvc.remote.hdfs import RemoteHDFS
from dvc.remote.local import RemoteLOCAL
from dvc.remote.base import RemoteBase
class DataCloud(object):
""" Generic class to do initial config parsing and redirect to proper DataCloud methods """
CLOUD_MAP = {
'aws' : RemoteS3,
'gcp' : RemoteGS,
'azure' : RemoteAzure,
'ssh' : RemoteSSH,
'hdfs' : RemoteHDFS,
'local' : RemoteLOCAL,
}
def __init__(self, project, config=None):
self.project = project
self._config = config
remote = self._config[Config.SECTION_CORE].get(Config.SECTION_CORE_REMOTE, '')
if remote == '':
if config[Config.SECTION_CORE].get(Config.SECTION_CORE_CLOUD, None):
# backward compatibility
Logger.warn('Using obsoleted config format. Consider updating.')
self._cloud = self.__init__compat()
else:
self._cloud = None
return
self._cloud = self._init_remote(remote)
@staticmethod
def supported(config):
for cloud in DataCloud.CLOUD_MAP.values():
if cloud.supported(config):
return cloud
return None
def _init_remote(self, remote):
section = Config.SECTION_REMOTE_FMT.format(remote)
cloud_config = self._config.get(section, None)
if not cloud_config:
raise ConfigError("Can't find remote section '{}' in config".format(section))
cloud_type = self.supported(cloud_config)
if not cloud_type:
raise ConfigError("Unsupported cloud '{}'".format(cloud_config))
return self._init_cloud(cloud_config, cloud_type)
def __init__compat(self):
cloud_name = self._config[Config.SECTION_CORE].get(Config.SECTION_CORE_CLOUD, '').strip().lower()
if cloud_name == '':
self._cloud = None
return
cloud_type = self.CLOUD_MAP.get(cloud_name, None)
if not cloud_type:
raise ConfigError('Wrong cloud type %s specified' % cloud_name)
cloud_config = self._config.get(cloud_name, None)
if not cloud_config:
raise ConfigError('Can\'t find cloud section \'[%s]\' in config' % cloud_name)
return self._init_cloud(cloud_config, cloud_type)
def _init_cloud(self, cloud_config, cloud_type):
global_storage_path = self._config[Config.SECTION_CORE].get(Config.SECTION_CORE_STORAGEPATH, None)
if global_storage_path:
Logger.warn('Using obsoleted config format. Consider updating.')
cloud = cloud_type(self.project, cloud_config)
return cloud
def _get_cloud(self, remote):
if remote:
return self._init_remote(remote)
if self._cloud:
return self._cloud
raise ConfigError("No remote repository specified. Setup default repository " \
"with 'dvc config core.remote <name>' or use '-r <name>'.")
def push(self, targets, jobs=1, remote=None):
"""
Push data items in a cloud-agnostic way.
"""
return self.project.cache.local.push(targets, jobs=jobs, remote=self._get_cloud(remote))
def pull(self, targets, jobs=1, remote=None):
"""
Pull data items in a cloud-agnostic way.
"""
return self.project.cache.local.pull(targets, jobs=jobs, remote=self._get_cloud(remote))
def status(self, targets, jobs=1, remote=None):
"""
Check status of data items in a cloud-agnostic way.
"""
return self.project.cache.local.status(targets, jobs=jobs, remote=self._get_cloud(remote))
| 34.773913
| 106
| 0.644911
|
4a1640ac42f4dd31310c5148c627e17c21c53f92
| 2,582
|
py
|
Python
|
setup.py
|
Jaikinator/dqc
|
47c964c7d1323a35f4f69521d40476c41843810e
|
[
"Apache-2.0"
] | null | null | null |
setup.py
|
Jaikinator/dqc
|
47c964c7d1323a35f4f69521d40476c41843810e
|
[
"Apache-2.0"
] | null | null | null |
setup.py
|
Jaikinator/dqc
|
47c964c7d1323a35f4f69521d40476c41843810e
|
[
"Apache-2.0"
] | null | null | null |
import os
import sys
import re
import subprocess as sp
import shutil
from setuptools import setup, find_packages, Extension
from setuptools.command.build_ext import build_ext
# module's descriptor
module_name = "dqc"
github_url = "https://github.com/diffqc/dqc/tree/master/"
raw_github_url = "https://raw.githubusercontent.com/diffqc/dqc/master/"
file_dir = os.path.dirname(os.path.realpath(__file__))
absdir = lambda p: os.path.join(file_dir, p)
# get the long description from README
# open readme and convert all relative path to absolute path
with open("README.md", "r") as f:
long_desc = f.read()
link_pattern = re.compile(r"\]\(([\w\-/]+)\)")
img_pattern = re.compile(r"\]\(([\w\-/\.]+)\)")
link_repl = r"](%s\1)" % github_url
img_repl = r"](%s\1)" % raw_github_url
long_desc = re.sub(link_pattern, link_repl, long_desc)
long_desc = re.sub(img_pattern, img_repl, long_desc)
############### versioning ###############
verfile = os.path.abspath(os.path.join(module_name, "_version.py"))
version = {"__file__": verfile}
with open(verfile, "r") as fp:
exec(fp.read(), version)
# execute _version.py to create _version.txt
cmd = [sys.executable, verfile]
sp.run(cmd)
vers = version["get_version"]()
setup(
name=module_name,
version=vers,
description='Differentiable Quantum Chemistry',
url='https://github.com/diffqc/dqc/',
long_description=long_desc,
long_description_content_type="text/markdown",
author='mfkasim1',
author_email='firman.kasim@gmail.com',
license='Apache License 2.0',
packages=find_packages(),
package_data={module_name: ["_version.txt", "datasets/lebedevquad/lebedev_*.txt"]},
python_requires=">=3.7",
install_requires=[
"numpy>=1.8.2",
"scipy>=0.15",
"basis_set_exchange",
"h5py==3.1.0", # temporary == because pyscf 1.7.6 is not compatible to 3.4.0
"pylibxc2>=6.0.0",
"dqclibs>=0.1.0",
"xitorch>=0.3.0",
"torch>=1.8", # ideally the nightly build
],
classifiers=[
"Development Status :: 3 - Alpha",
"Intended Audience :: Science/Research",
"Topic :: Scientific/Engineering",
"Topic :: Scientific/Engineering :: Physics",
"Topic :: Scientific/Engineering :: Mathematics",
"License :: OSI Approved :: Apache Software License",
"Programming Language :: Python :: 3.7",
"Programming Language :: Python :: 3.8",
"Programming Language :: Python :: 3.9",
],
keywords="project library deep-learning dft quantum-chemistry",
zip_safe=False
)
| 33.102564
| 87
| 0.657242
|
4a16432dd794e554fed596c06b795788a508fa01
| 11,316
|
py
|
Python
|
src/controller/WASH/Query_WaMDaM_for_WASH/QueryWAMDAM.py
|
WamdamProject/WaMDaM_Wizard
|
f8f5a830464f3c8f45e4eb0557833eefb267d7b2
|
[
"BSD-3-Clause"
] | null | null | null |
src/controller/WASH/Query_WaMDaM_for_WASH/QueryWAMDAM.py
|
WamdamProject/WaMDaM_Wizard
|
f8f5a830464f3c8f45e4eb0557833eefb267d7b2
|
[
"BSD-3-Clause"
] | 3
|
2018-11-17T05:49:18.000Z
|
2020-12-31T15:57:14.000Z
|
src/controller/WASH/Query_WaMDaM_for_WASH/QueryWAMDAM.py
|
WamdamProject/WaMDaM_Wizard
|
f8f5a830464f3c8f45e4eb0557833eefb267d7b2
|
[
"BSD-3-Clause"
] | null | null | null |
# call these functions from here. the result will save excel data in the second function of each.
from QueryMultiAttributes import *
from QueryTimeSeries import *
from QueryDescriptor import *
from QuerySeasonal import *
from QueryNumeric import *
# from controller.WEAP_exporter.ExtractWeapArea import WEAP_export
from controller.ConnectDB_ParseExcel import DB_Setup
from controller.ConnectDB_ParseExcel import SqlAlchemy as sq
from sqlalchemy.orm import aliased
import sqlite3
import pandas as pd
# fileDir = ""
# weap_export = WEAP_export(textCtrl_AreaNameOnText, fileDir)
# textCtrl_AreaNameOnText = "BearRiverFeb2018_V1"
#
# ActiveArea = WEAP.ActiveArea.Name
# Scenario = WEAP.ActiveScenario.Name
#
# WEAPAreasDirectory = WEAP.AreasDirectory
#
# print ActiveArea
# print Scenario
# print WEAPAreasDirectory
# SourceName = WEAP.ActiveArea.Name
def Read_provided_file_query_Required():
Network_input = pd.read_excel('./WASH_Network_input.xlsx', sheetname=None)
sheetnames = Network_input.keys()
sheet = Network_input[sheetnames[0]]
keys = sheet.columns
query_data_list = []
# for column in keys:
# query_data_list[column] = []
for i in sheet.index:
row = {}
for column in keys:
row[column] = sheet[column][i]
query_data_list.append(row)
# print query_data_list
# based on the selected model (WEAP) or (WASH),
ModelName = 'WASH'
# Query WaMDaM db to get the list of Object types and their Attributes
Model_required_attributes = '''
SELECT DISTINCT ObjectType as Required_ObjectType,ObjectTypeCV as Required_ObjectTypeCV ,
AttributeName as Required_AttributeName, AttributeNameCV Required_AttributeNameCV,
AttributeDataTypeCV as Required_AttributeDataTypeCV, UnitName as Required_UnitName
FROM ResourceTypes
LEFT JOIN "ObjectTypes"
ON "ObjectTypes"."ResourceTypeID"="ResourceTypes"."ResourceTypeID"
LEFT JOIN "ObjectCategories"
ON "ObjectCategories"."ObjectCategoryID"="ObjectTypes"."ObjectCategoryID"
LEFT JOIN "Attributes"
ON "Attributes"."ObjectTypeID"="ObjectTypes"."ObjectTypeID"
LEFT JOIN "AttributeCategories"
ON "AttributeCategories"."AttributeCategoryID"="Attributes"."AttributeCategoryID"
-- Provide the model name
WHERE "ResourceTypeAcronym"='%s'
--exclude the dummy attributes that are just used to connect Object Types with their Instances.
AND AttributeName!='ObjectTypeInstances'
'''%(ModelName)
# df_Model_required_attributes = session.execute(Model_required_attributes)
conn = sqlite3.connect("WaMDaM_db_WASH.sqlite")
df_Model_required_attributes = pd.read_sql_query(Model_required_attributes, conn)
# Model_required_attributes.keys()
#
# Required_ObjectType=df_Model_required_attributes['Required_ObjectType']
# Required_AttributeName=df_Model_required_attributes['Required_AttributeName']
# Required_AttributeDataTypeCV=df_Model_required_attributes['Required_AttributeDataTypeCV']
# Required_UnitName=df_Model_required_attributes['Required_UnitName']
Query_Load_params_list = []
# loop over the provided Object Types in Excel.
# loop over the Required ObjectTypes from the db query
# If the provided Object Type equals the required object type, then get the
for prov_objs in query_data_list:
for i, req_objs in enumerate(df_Model_required_attributes['Required_ObjectType']): # get the string value inside each column
if prov_objs['Provided_ObjectType'] == req_objs:
Query_Load_params = {}
Query_Load_params['Required_AttributeName'] = df_Model_required_attributes['Required_AttributeName'][i]
Query_Load_params['Required_AttributeDataTypeCV'] = df_Model_required_attributes['Required_AttributeDataTypeCV'][i]
Query_Load_params['Required_UnitName'] = df_Model_required_attributes['Required_UnitName'][i]
Query_Load_params['Required_ObjectType'] = req_objs
Query_Load_params['Provided_ObjectType'] = prov_objs['Provided_ObjectType'] #
Query_Load_params['Provided_InstanceName'] = prov_objs['Provided_InstanceName']
Query_Load_params['Provided_FullBranch'] = prov_objs['Provided_FullBranch']
Query_Load_params_list.append(Query_Load_params)
# print Query_Load_params_list
# return Query_Load_params_list
# iterate over the rows and get the string value of each Required_AttributeDataTypeCV
setup = DB_Setup()
session = setup.get_session()
multi_timeseries = []
multi_AttributeSeries = []
multi_Seasonal = []
multi_Numeric=[]
multi_Descriptor=[]
for input_param in Query_Load_params_list:
if input_param['Required_AttributeDataTypeCV']=='TimeSeries':
#call the time series function
#pass the input_param value to the time series function
multi_timeseries.append(input_param)
elif input_param['Required_AttributeDataTypeCV'] == 'SeasonalNumericValues':
multi_Seasonal.append(input_param)
#
elif input_param['Required_AttributeDataTypeCV'] == 'NumericValues':
multi_Numeric.append(input_param)
elif input_param['Required_AttributeDataTypeCV'] == 'DescriptorValues':
multi_Descriptor.append(input_param)
#
#
elif input_param['Required_AttributeDataTypeCV'] == 'MultiAttributeSeries':
multi_AttributeSeries.append(input_param)
else:
continue
# Execute the time series function (for both the query and write csv)
total_df_TimeSeries,Metadata_TimeSeries =execute_TimeSeries_query(conn, multi_timeseries)
# Execute the seasonal query
total_df_Seasonal,Metadata_seasonal = execute_Seasonal_query(conn, multi_Seasonal)
# Execute the multi attributes series query
total_df_MultiColumns,Metadata_multi_att = execute_MultiAtt_query(conn, multi_AttributeSeries)
# Execute the numeric attributes query
total_df_Numeric,Metadata_multi_numeric = execute_Numeric_query(conn, multi_Numeric)
# Execute the descriptor attributes query
total_df_Descriptor,Metadata_multi_descriptor = execute_Descriptor_query(conn, multi_Descriptor)
# Execute the metadata file
InputFile_list = execute_WriteMetadataFile(total_df_TimeSeries, total_df_Seasonal,total_df_MultiColumns,total_df_Numeric,total_df_Descriptor,
Metadata_TimeSeries , Metadata_seasonal,Metadata_multi_att,Metadata_multi_numeric,Metadata_multi_descriptor)
return total_df_Seasonal, total_df_MultiColumns, total_df_Descriptor, total_df_Numeric,total_df_TimeSeries # just these
# load_InputTo_WEAP(InputFile_list)
def execute_TimeSeries_query(conn, multi_timeseries):
# setup = DB_Setup()
# session = setup.get_session()
df_TimeSeries,TimeSeries_Full_Branch_total = TimeSeries_query(conn,multi_timeseries)
total_df_TimeSeries,Metadata_TimeSeries =Timeseries_csv_file(df_TimeSeries,TimeSeries_Full_Branch_total)
return (total_df_TimeSeries,Metadata_TimeSeries)
#total_df_TimeSeries
#
def execute_MultiAtt_query(conn, multi_AttributeSeries):
# setup = DB_Setup()
# session = setup.get_session()
df_MultiColumns,Multi_Full_Branch_total = MultiAttributes_query(conn,multi_AttributeSeries)
total_df_MultiColumns,Metadata_multi_att = MultiAttributes_csv_file(df_MultiColumns,Multi_Full_Branch_total)
return (total_df_MultiColumns,Metadata_multi_att)
#total_df_Seasonal
# csv_file_path_or_value_seasonal_all
def execute_Seasonal_query(conn, multi_Seasonal):
# setup = DB_Setup()
# session = setup.get_session()
df_Seasonal,Seasonal_Full_Branch_total = Seasonal_query(conn, multi_Seasonal)
total_df_Seasonal,Metadata_seasonal = Seasonal_csv_file(df_Seasonal,Seasonal_Full_Branch_total)
return (total_df_Seasonal,Metadata_seasonal)
#
def execute_Numeric_query(conn, multi_Numeric):
# setup = DB_Setup()
# session = setup.get_session()
# df_Numeric = Numeric_query(session)
total_df_Numeric,Metadata_multi_numeric = Numeric_query(conn, multi_Numeric)
return (total_df_Numeric, Metadata_multi_numeric)
# total_df_Numeric,Metadata_multi_descriptor = execute_Descriptor_query(conn, multi_Descriptor)
#
def execute_Descriptor_query(conn, multi_Descriptor):
# setup = DB_Setup()
# session = setup.get_session()
# df_Descriptor = Descriptor_query(session)
total_df_Descriptor,Metadata_multi_descriptor = Descriptor_query(conn, multi_Descriptor)
return (total_df_Descriptor, Metadata_multi_descriptor)
# store these as lists (matrix) then pass it to the write it all once into the input file or make
def execute_WriteMetadataFile(total_df_TimeSeries, total_df_MultiColumns,total_df_Seasonal,total_df_Numeric,total_df_Descriptor,
Metadata_TimeSeries,Metadata_seasonal,Metadata_multi_att,Metadata_multi_numeric,Metadata_multi_descriptor ):
# multi_AttributeSeries
# from Read_provided_file_query_Required import *
# from execute_TimeSeries_query import total_csv_file_name
from ExportWASH_Inputfile_metadata import WriteMetadataFile
return WriteMetadataFile(total_df_TimeSeries,total_df_MultiColumns,total_df_Seasonal,total_df_Numeric,total_df_Descriptor,
Metadata_TimeSeries,Metadata_seasonal,Metadata_multi_att,Metadata_multi_numeric,Metadata_multi_descriptor)
#
# def load_InputTo_WEAP(InputFile_list):
# # WEAP.Branch("Supply and Resources\River\Little Bear River\Reservoirs\Hyrum Reservoir").Variable("Volume Elevation Curve").Expression = "VolumeElevation(0.0,4590.0,130.0,4600.0,649.0,4610.0,1739.0,4620.0,3456.0,4630.0,5937.0,4640.0,9236.0,4650.0,13206.0,4660.0,17721.0,4670.0,18684.0,4672.0,22600.0,4680.0,28100.0,4690.0,34100.0,4700.0,40700.0,4710.0,47900.0,4720.0,55800.0,4730.0,64500.0,4740.0,73900.0,4750.0"
# # WEAP.Branch(BranchFullName).Variable(Required_AttributeName).Expression = InputFile
#
# for InputFile in InputFile_list:
# BranchName = InputFile['FullBranch']
# Required_AttributeName = InputFile['VariableName']
# Value = InputFile['Value']
# if Required_AttributeName=='Model Water Quality?': continue
# for Branch in WEAP.Branches:
# if Branch.FullName == BranchName:
# for V in Branch.Variables:
# # check if the provided attribute from WAMDAM matches the one used in WEAP
# if Required_AttributeName == V.Name:
# WEAP.Branch(BranchName).Variable(Required_AttributeName).Expression = Value
# print BranchName,Required_AttributeName,Value
# # if Branch.TypeName=='Demand Site':
# # for V in Branch.Variables:
# # if V.Name=='Method' and V.Expression == 'Specify monthly demand':
# # InputFile['VariableName']='Monthly Demand'
# # 'Consumption'
#
# Execute loading data to WEAP
# load_InputTo_WEAP(WEAP,InputFile_list)
# # Option 2: Read network from a provided excel file
#
# # Read the excel file
# Read_provided_file_query_Required()
| 39.291667
| 418
| 0.743549
|
4a1643f0dfbd3d535616fafe11b012ba1b1f45a1
| 7,153
|
py
|
Python
|
pymoo/algorithms/rnsga2.py
|
gabicavalcante/pymoo
|
1711ce3a96e5ef622d0116d6c7ea4d26cbe2c846
|
[
"Apache-2.0"
] | 11
|
2018-05-22T17:38:02.000Z
|
2022-02-28T03:34:33.000Z
|
pymoo/algorithms/rnsga2.py
|
gabicavalcante/pymoo
|
1711ce3a96e5ef622d0116d6c7ea4d26cbe2c846
|
[
"Apache-2.0"
] | 15
|
2022-01-03T19:36:36.000Z
|
2022-03-30T03:57:58.000Z
|
pymoo/algorithms/rnsga2.py
|
gabicavalcante/pymoo
|
1711ce3a96e5ef622d0116d6c7ea4d26cbe2c846
|
[
"Apache-2.0"
] | 3
|
2021-11-22T08:01:47.000Z
|
2022-03-11T08:53:58.000Z
|
import numpy as np
from pymoo.algorithms.nsga2 import NSGA2
from pymoo.algorithms.nsga3 import get_extreme_points_c
from pymoo.docs import parse_doc_string
from pymoo.model.survival import Survival
from pymoo.operators.selection.random_selection import RandomSelection
from pymoo.util.nds.non_dominated_sorting import NonDominatedSorting
# =========================================================================================================
# Implementation
# =========================================================================================================
class RNSGA2(NSGA2):
def __init__(self,
ref_points,
epsilon=0.001,
normalization="front",
weights=None,
extreme_points_as_reference_points=False,
**kwargs):
"""
Parameters
----------
ref_points : {ref_points}
epsilon : float
weights : np.array
normalization : {{'no', 'front', 'ever'}}
extreme_points_as_reference_points : bool
"""
self.epsilon = epsilon
self.weights = weights
self.normalization = normalization
self.selection = RandomSelection()
super().__init__(**kwargs)
self.survival = RankAndModifiedCrowdingSurvival(ref_points, epsilon, weights, normalization,
extreme_points_as_reference_points)
class RankAndModifiedCrowdingSurvival(Survival):
def __init__(self, ref_points,
epsilon,
weights,
normalization,
extreme_points_as_reference_points
) -> None:
super().__init__(True)
self.n_obj = ref_points.shape[1]
self.ref_points = ref_points
self.epsilon = epsilon
self.extreme_points_as_reference_points = extreme_points_as_reference_points
self.weights = weights
if self.weights is None:
self.weights = np.full(self.n_obj, 1 / self.n_obj)
self.normalization = normalization
self.ideal_point = np.full(self.n_obj, np.inf)
self.nadir_point = np.full(self.n_obj, -np.inf)
def _do(self, problem, pop, n_survive, **kwargs):
# get the objective space values and objects
F = pop.get("F")
# the final indices of surviving individuals
survivors = []
# do the non-dominated sorting until splitting front
fronts = NonDominatedSorting().do(F)
if self.normalization == "ever":
# find or usually update the new ideal point - from feasible solutions
self.ideal_point = np.min(np.vstack((self.ideal_point, F)), axis=0)
self.nadir_point = np.max(np.vstack((self.nadir_point, F)), axis=0)
elif self.normalization == "front":
front = fronts[0]
if len(front) > 1:
self.ideal_point = np.min(F[front], axis=0)
self.nadir_point = np.max(F[front], axis=0)
elif self.normalization == "no":
self.ideal_point = np.zeros(self.n_obj)
self.nadir_point = np.ones(self.n_obj)
if self.extreme_points_as_reference_points:
self.ref_points = np.row_stack([self.ref_points, get_extreme_points_c(F, self.ideal_point)])
# calculate the distance matrix from ever solution to all reference point
dist_to_ref_points = calc_norm_pref_distance(F, self.ref_points, self.weights, self.ideal_point,
self.nadir_point)
for k, front in enumerate(fronts):
# save rank attributes to the individuals - rank = front here
pop[front].set("rank", np.full(len(front), k))
# number of individuals remaining
n_remaining = n_survive - len(survivors)
# the ranking of each point regarding each reference point (two times argsort is necessary)
rank_by_distance = np.argsort(np.argsort(dist_to_ref_points[front], axis=0), axis=0)
# the reference point where the best ranking is coming from
ref_point_of_best_rank = np.argmin(rank_by_distance, axis=1)
# the actual ranking which is used as crowding
ranking = rank_by_distance[np.arange(len(front)), ref_point_of_best_rank]
if len(front) <= n_remaining:
# we can simply copy the crowding to ranking. not epsilon selection here
crowding = ranking
I = np.arange(len(front))
else:
# Distance from solution to every other solution and set distance to itself to infinity
dist_to_others = calc_norm_pref_distance(F[front], F[front], self.weights, self.ideal_point,
self.nadir_point)
np.fill_diagonal(dist_to_others, np.inf)
# the crowding that will be used for selection
crowding = np.full(len(front), np.nan)
# solutions which are not already selected - for
not_selected = np.argsort(ranking)
# until we have saved a crowding for each solution
while len(not_selected) > 0:
# select the closest solution
idx = not_selected[0]
# set crowding for that individual
crowding[idx] = ranking[idx]
# need to remove myself from not-selected array
to_remove = [idx]
# Group of close solutions
dist = dist_to_others[idx][not_selected]
group = not_selected[np.where(dist < self.epsilon)[0]]
# if there exists solution with a distance less than epsilon
if len(group):
# discourage them by giving them a high crowding
crowding[group] = ranking[group] + np.round(len(front) / 2)
# remove group from not_selected array
to_remove.extend(group)
not_selected = np.array([i for i in not_selected if i not in to_remove])
# now sort by the crowding (actually modified rank) ascending and let the best survive
I = np.argsort(crowding)[:n_remaining]
# set the crowding to all individuals
pop[front].set("crowding", crowding)
# extend the survivors by all or selected individuals
survivors.extend(front[I])
# inverse of crowding because nsga2 does maximize it (then tournament selection can stay the same)
pop.set("crowding", -pop.get("crowding"))
return pop[survivors]
def calc_norm_pref_distance(A, B, weights, ideal, nadir):
D = np.repeat(A, B.shape[0], axis=0) - np.tile(B, (A.shape[0], 1))
N = ((D / (nadir - ideal)) ** 2) * weights
N = np.sqrt(np.sum(N, axis=1) * len(weights))
return np.reshape(N, (A.shape[0], B.shape[0]))
parse_doc_string(RNSGA2.__init__)
| 37.846561
| 108
| 0.573745
|
4a164421a1c8ce87412eafcc934520a29e57ebfe
| 522
|
py
|
Python
|
efls-console/alembic/versions/3900a6b99e7e_init.py
|
universe-hcy/Elastic-Federated-Learning-Solution
|
4e047fbbe6ae9809cd631499b7d3a3855dfe2208
|
[
"Apache-2.0"
] | 65
|
2021-09-30T01:54:34.000Z
|
2022-03-26T13:57:15.000Z
|
efls-console/alembic/versions/3900a6b99e7e_init.py
|
universe-hcy/Elastic-Federated-Learning-Solution
|
4e047fbbe6ae9809cd631499b7d3a3855dfe2208
|
[
"Apache-2.0"
] | 24
|
2021-09-30T09:25:43.000Z
|
2022-03-29T06:33:44.000Z
|
efls-console/alembic/versions/3900a6b99e7e_init.py
|
universe-hcy/Elastic-Federated-Learning-Solution
|
4e047fbbe6ae9809cd631499b7d3a3855dfe2208
|
[
"Apache-2.0"
] | 18
|
2021-09-30T09:04:08.000Z
|
2022-03-31T10:17:27.000Z
|
"""init
Revision ID: 3900a6b99e7e
Revises:
Create Date: 2021-08-11 15:28:48.507537
"""
from alembic import op
import sqlalchemy as sa
# revision identifiers, used by Alembic.
revision = '3900a6b99e7e'
down_revision = None
branch_labels = None
depends_on = None
def upgrade():
# ### commands auto generated by Alembic - please adjust! ###
pass
# ### end Alembic commands ###
def downgrade():
# ### commands auto generated by Alembic - please adjust! ###
pass
# ### end Alembic commands ###
| 18
| 65
| 0.67433
|
4a164473a3357ef5ef91f68514e5475421f94cd2
| 552
|
py
|
Python
|
electrical_test/limelight.py
|
frc1418/2020-robot
|
027a7b7f02bd48ade79a18c6b2a48d165cd4017c
|
[
"MIT"
] | 3
|
2020-07-14T23:28:30.000Z
|
2020-07-14T23:28:35.000Z
|
electrical_test/limelight.py
|
AndrewLester/2020-robot
|
f3c03e72bcf106f5752e443b7a3a6eb87bc0bff2
|
[
"MIT"
] | 2
|
2020-03-14T19:13:25.000Z
|
2020-10-19T16:29:32.000Z
|
electrical_test/limelight.py
|
frc1418/2020-robot
|
027a7b7f02bd48ade79a18c6b2a48d165cd4017c
|
[
"MIT"
] | null | null | null |
from networktables.util import ntproperty
import math
class Limelight():
YAW_ANGLE = ntproperty('/limelight/tx',0)
PITCH_ANGLE = ntproperty('/limelight/ty',0)
# change with new robot; UNIT = inches
CAMERA_HEIGHT = 43
TARGET_HEIGHT = 98.25
# UNIT = degrees
CAMERA_ANGLE = 0
def findDistance(self):
if self.YAW_ANGLE == 0 or self.PITCH_ANGLE == 0:
return 0
else:
distance = (self.TARGET_HEIGHT/self.CAMERA_HEIGHT)/math.tan(self.CAMERA_ANGLE + self.TY)
return distance
| 29.052632
| 100
| 0.648551
|
4a16450a1562ed3ee3c1d6b3652c762863389faf
| 387
|
py
|
Python
|
src/models/predict_model.py
|
gagan3012/summarization
|
5bfeb087feb3379ec41849e4125081c1e351971b
|
[
"MIT"
] | 2
|
2021-07-31T08:27:01.000Z
|
2021-11-01T07:42:58.000Z
|
src/models/predict_model.py
|
gagan3012/summarization
|
5bfeb087feb3379ec41849e4125081c1e351971b
|
[
"MIT"
] | null | null | null |
src/models/predict_model.py
|
gagan3012/summarization
|
5bfeb087feb3379ec41849e4125081c1e351971b
|
[
"MIT"
] | null | null | null |
import yaml
from .model import Summarization
def predict_model(text: str):
"""
Predict the summary of the given text.
"""
with open("model_params.yml") as f:
params = yaml.safe_load(f)
model = Summarization()
model.load_model(model_type=params["model_type"], model_dir=params["model_dir"])
pre_summary = model.predict(text)
return pre_summary
| 22.764706
| 84
| 0.684755
|
4a16456257af3ef0250db171a68b96585d81755e
| 270
|
py
|
Python
|
quantpy/conf.py
|
iamxiatian/quantpy
|
316580c575207f7af836e6c784de0351762f748b
|
[
"Apache-2.0"
] | 2
|
2016-06-21T08:11:10.000Z
|
2020-06-25T14:34:46.000Z
|
quantpy/conf.py
|
iamxiatian/quantpy
|
316580c575207f7af836e6c784de0351762f748b
|
[
"Apache-2.0"
] | null | null | null |
quantpy/conf.py
|
iamxiatian/quantpy
|
316580c575207f7af836e6c784de0351762f748b
|
[
"Apache-2.0"
] | null | null | null |
import configparser
cfg = configparser.ConfigParser()
cfg.read('settings.cfg')
MONGODB_DB_NAME = cfg['db']['dbname']
MONGODB_USER_NAME = cfg['db']['user']
MONGODB_PASSWORD = cfg['db']['password']
MONGODB_HOST = cfg['db']['host']
MONGODB_PORT = int(cfg['db']['port'])
| 22.5
| 40
| 0.7
|
4a1645c08015de2aaf710b0be1ee5d12a9a86a90
| 2,370
|
py
|
Python
|
examples/crawler_brohltal.py
|
Hadryan/NewsBot
|
e6a28f96857c608abddc6eeafdcf765ffde74461
|
[
"MIT"
] | 1
|
2021-09-04T01:12:02.000Z
|
2021-09-04T01:12:02.000Z
|
examples/crawler_brohltal.py
|
Hadryan/NewsBot
|
e6a28f96857c608abddc6eeafdcf765ffde74461
|
[
"MIT"
] | null | null | null |
examples/crawler_brohltal.py
|
Hadryan/NewsBot
|
e6a28f96857c608abddc6eeafdcf765ffde74461
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import os
import sys
sys.path.append(os.path.join(os.path.dirname(__file__), "../"))
import logging
import re
from _datetime import datetime
from urllib.request import urlopen
from telegramnews import Site
logging.basicConfig(
format="%(asctime)s - %(name)s - %(levelname)s - %(message)s", level=logging.INFO
)
def main():
site = Site()
site.name = "brohltal"
site.alias = "BrohltalInfo24"
site.base_url = "http://www.brohltal-info24.de/"
site.channel_id = -1001131410143
site.short = "brohl"
for article in get_data(site.base_url):
text, tags = create_hashtag(article[2])
site.add_article(
img=site.base_url + article[0],
title=article[1],
text=text,
tags=tags,
date=article[4],
link=site.base_url + article[3],
)
site.post(variant=3)
def create_hashtag(text):
text_list = re.split("[.:]", text, maxsplit=1)
tags = re.split("[/-]", text_list[0])
tags = [tag.replace(" ", "") for tag in tags]
new_text = text_list[1]
for tag in tags:
if len(tag) > 15:
return text, []
return new_text, tags
def read_data(url, site, limit=None):
add = "&limit=" + str(limit) if limit else ""
response = urlopen(url + site + add).read().decode("cp1252").replace("–", "-")
data = re.findall(
'img src="([^"]*)".*\r\n.*\r\n.*\r\n[ ]*(.*?)[ ]*<.*?<br>[ \r\n]*(.*) <a href=([^>]*)>.*\r\n'
+ "[ ]*([0-9\.]* [0-9\:]*)",
response,
)[::-1]
return list(data)
def read_all_data(url, site):
limit = 0
result = 1
data = []
while result > 0:
adata = read_data(url, site, limit=limit)
data = data + adata
result = len(adata)
limit = limit + 30
return data
def get_data(base_url):
data_list = []
url = base_url + "index.php?site="
sites = ["newsregio-direkt", "newsaw-direkt", "news-direkt"]
for site in sites:
data_list = data_list + read_data(url, site)
data_list = [list(item) for item in data_list]
for item in data_list:
item[4] = datetime.strptime(item[4], "%d.%m.%y %H:%M")
data_list.sort(key=lambda r: r[4])
for x in data_list:
x[4] = None
return data_list[::-1]
if __name__ == "__main__":
main()
| 25.76087
| 101
| 0.567089
|
4a1645dd1e858dfcc339cee408c118a4ff650f55
| 5,885
|
py
|
Python
|
bindings/python/ensmallen_graph/datasets/networkrepository/orkut.py
|
caufieldjh/ensmallen_graph
|
14e98b1cdbc73193a84a913d7d4f2b2b3eb2c43a
|
[
"MIT"
] | null | null | null |
bindings/python/ensmallen_graph/datasets/networkrepository/orkut.py
|
caufieldjh/ensmallen_graph
|
14e98b1cdbc73193a84a913d7d4f2b2b3eb2c43a
|
[
"MIT"
] | null | null | null |
bindings/python/ensmallen_graph/datasets/networkrepository/orkut.py
|
caufieldjh/ensmallen_graph
|
14e98b1cdbc73193a84a913d7d4f2b2b3eb2c43a
|
[
"MIT"
] | null | null | null |
"""
This file offers the methods to automatically retrieve the graph orkut.
The graph is automatically retrieved from the NetworkRepository repository.
Report
---------------------
At the time of rendering these methods (please see datetime below), the graph
had the following characteristics:
Datetime: 2021-02-04 01:49:39.921874
The undirected graph orkut has 11803298 nodes with 2 different node types:
2 (nodes number 8730857) and 1 (nodes number 3072441) and 444222386 unweighted
edges, of which none are self-loops. The graph is extremely sparse as it
has a density of 0.00001 and is connected, as it has a single component.
The graph median node degree is 7, the mean node degree is 75.27 and the
node degree mode is 1. The top 5 most central nodes are 3079078 (degree
318240), 3079778 (degree 284401), 3079189 (degree 221523), 3080210 (degree
213394) and 3080202 (degree 208310).
References
---------------------
Please cite the following if you use the data:
@inproceedings{nr,
title = {The Network Data Repository with Interactive Graph Analytics and Visualization},
author={Ryan A. Rossi and Nesreen K. Ahmed},
booktitle = {AAAI},
url={http://networkrepository.com},
year={2015}
}
Usage example
----------------------
The usage of this graph is relatively straightforward:
.. code:: python
# First import the function to retrieve the graph from the datasets
from ensmallen_graph.datasets.networkrepository import Orkut
# Then load the graph
graph = Orkut()
# Finally, you can do anything with it, for instance, compute its report:
print(graph)
# If you need to run a link prediction task with validation,
# you can split the graph using a connected holdout as follows:
train_graph, validation_graph = graph.connected_holdout(
# You can use an 80/20 split the holdout, for example.
train_size=0.8,
# The random state is used to reproduce the holdout.
random_state=42,
# Wether to show a loading bar.
verbose=True
)
# Remember that, if you need, you can enable the memory-time trade-offs:
train_graph.enable(
vector_sources=True,
vector_destinations=True,
vector_outbounds=True
)
# Consider using the methods made available in the Embiggen package
# to run graph embedding or link prediction tasks.
"""
from typing import Dict
from ..automatic_graph_retrieval import AutomaticallyRetrievedGraph
from ...ensmallen_graph import EnsmallenGraph # pylint: disable=import-error
def Orkut(
directed: bool = False,
verbose: int = 2,
cache_path: str = "graphs/networkrepository",
**additional_graph_kwargs: Dict
) -> EnsmallenGraph:
"""Return new instance of the orkut graph.
The graph is automatically retrieved from the NetworkRepository repository.
Parameters
-------------------
directed: bool = False,
Wether to load the graph as directed or undirected.
By default false.
verbose: int = 2,
Wether to show loading bars during the retrieval and building
of the graph.
cache_path: str = "graphs",
Where to store the downloaded graphs.
additional_graph_kwargs: Dict,
Additional graph kwargs.
Returns
-----------------------
Instace of orkut graph.
Report
---------------------
At the time of rendering these methods (please see datetime below), the graph
had the following characteristics:
Datetime: 2021-02-04 01:49:39.921874
The undirected graph orkut has 11803298 nodes with 2 different node types:
2 (nodes number 8730857) and 1 (nodes number 3072441) and 444222386 unweighted
edges, of which none are self-loops. The graph is extremely sparse as it
has a density of 0.00001 and is connected, as it has a single component.
The graph median node degree is 7, the mean node degree is 75.27 and the
node degree mode is 1. The top 5 most central nodes are 3079078 (degree
318240), 3079778 (degree 284401), 3079189 (degree 221523), 3080210 (degree
213394) and 3080202 (degree 208310).
References
---------------------
Please cite the following if you use the data:
@inproceedings{nr,
title = {The Network Data Repository with Interactive Graph Analytics and Visualization},
author={Ryan A. Rossi and Nesreen K. Ahmed},
booktitle = {AAAI},
url={http://networkrepository.com},
year={2015}
}
Usage example
----------------------
The usage of this graph is relatively straightforward:
.. code:: python
# First import the function to retrieve the graph from the datasets
from ensmallen_graph.datasets.networkrepository import Orkut
# Then load the graph
graph = Orkut()
# Finally, you can do anything with it, for instance, compute its report:
print(graph)
# If you need to run a link prediction task with validation,
# you can split the graph using a connected holdout as follows:
train_graph, validation_graph = graph.connected_holdout(
# You can use an 80/20 split the holdout, for example.
train_size=0.8,
# The random state is used to reproduce the holdout.
random_state=42,
# Wether to show a loading bar.
verbose=True
)
# Remember that, if you need, you can enable the memory-time trade-offs:
train_graph.enable(
vector_sources=True,
vector_destinations=True,
vector_outbounds=True
)
# Consider using the methods made available in the Embiggen package
# to run graph embedding or link prediction tasks.
"""
return AutomaticallyRetrievedGraph(
graph_name="Orkut",
dataset="networkrepository",
directed=directed,
verbose=verbose,
cache_path=cache_path,
additional_graph_kwargs=additional_graph_kwargs
)()
| 32.15847
| 94
| 0.68921
|
4a1646b122580893d02eeb0cd28e8c6abcae6db4
| 4,916
|
py
|
Python
|
tutorials/gpt2_e2e.py
|
Zuwei-Zhao/ort-customops
|
4e78e9651bdcbe8518448c3cbeab90eb92a1be05
|
[
"MIT"
] | null | null | null |
tutorials/gpt2_e2e.py
|
Zuwei-Zhao/ort-customops
|
4e78e9651bdcbe8518448c3cbeab90eb92a1be05
|
[
"MIT"
] | null | null | null |
tutorials/gpt2_e2e.py
|
Zuwei-Zhao/ort-customops
|
4e78e9651bdcbe8518448c3cbeab90eb92a1be05
|
[
"MIT"
] | 1
|
2021-09-21T00:47:16.000Z
|
2021-09-21T00:47:16.000Z
|
import os
import numpy
from transformers import AutoConfig
from onnxruntime_customops import mytorch as torch
from onnxruntime_customops.utils import trace_for_onnx, op_from_model, build_customop_model
device = 'cpu'
model_name_or_path = 'gpt2'
gpt2_core_model_path = './gpt2.onnx'
gpt2_encoder_model_path = './gpt2_tok.onnx'
gpt2_decoder_model_path = './gpt2_dec.onnx'
gpt2_full_model_path = './gpt2_full.onnx'
# input_text = ['best hotel in bay area', 'here is an example of gpt2 model']
input_text = ['best hotel in bay area']
num_tokens_to_produce = 10
def get_cache_directory():
cache_dir = os.path.join("../..", "cache_models")
if not os.path.exists(cache_dir):
os.makedirs(cache_dir)
return cache_dir
def convert_models():
from transformers import GPT2Tokenizer # noqa
from onnxruntime.transformers.gpt2_helper import Gpt2Helper, MyGPT2LMHeadModel # noqa
cache_dir = get_cache_directory()
tokenizer = GPT2Tokenizer.from_pretrained(model_name_or_path, cache_dir=cache_dir)
build_customop_model('GPT2Tokenizer', gpt2_encoder_model_path, model=tokenizer)
build_customop_model('VectorToString', gpt2_decoder_model_path, decoder=tokenizer.decoder)
config = AutoConfig.from_pretrained(model_name_or_path, cache_dir=cache_dir)
# remove the dependency from onnxruntime-tools.
model = MyGPT2LMHeadModel.from_pretrained(model_name_or_path, config=config, cache_dir=cache_dir)
Gpt2Helper.export_onnx(model, device, gpt2_core_model_path)
def inference_and_dump_full_model(inputs):
config = AutoConfig.from_pretrained(model_name_or_path, cache_dir=get_cache_directory())
core_model = op_from_model(gpt2_core_model_path)
gpt2_tokenizer = op_from_model(gpt2_encoder_model_path)
with trace_for_onnx(inputs, names=gpt2_tokenizer.input_names) as tc_sess:
num_attention_heads = config.n_head
hidden_size = config.n_embd
num_layer = config.n_layer
eos_token_id = config.eos_token_id
input_ids, attention_mask = gpt2_tokenizer(*tc_sess.get_inputs(), padding=True, padding_side='left')
position_ids = (attention_mask.long().cumsum(-1) - 1)
# position_ids.masked_fill_(position_ids < 0, 0)
# Empty Past State for generating first word
batch_size = input_ids.size()[0]
past_shape = [2, batch_size, num_attention_heads, 0, hidden_size // num_attention_heads]
empty_past = [torch.empty(*past_shape).type(torch.float32).to(device)] * num_layer
has_eos = torch.zeros(batch_size, dtype=torch.bool)
all_token_ids = input_ids.clone()
all_eos = torch.tensor(False, dtype=torch.bool)
past = empty_past
for step in torch.onnx_loop(num_tokens_to_produce, all_eos, past):
outputs = core_model(input_ids, position_ids, attention_mask.type(torch.float32), *past)
next_token_logits = outputs[0][:, -1, :]
next_tokens = torch.argmax(next_token_logits, dim=-1)
has_eos = has_eos | (next_tokens == eos_token_id)
tokens_to_add = next_tokens.masked_fill(has_eos, eos_token_id)
all_token_ids = torch.cat([all_token_ids, tokens_to_add.unsqueeze(-1)], dim=-1)
# FIXME: not support the loop yet.
break
# Update input_ids, attention_mask, position_ids and past
input_ids = tokens_to_add.clone().detach().reshape([batch_size, 1]).to(device)
position_ids = (position_ids[:, -1] + 1).reshape(batch_size, 1)
attention_mask = torch.cat([attention_mask, torch.ones([batch_size, 1]).type_as(attention_mask)], 1).to(device)
past = []
for i in range(num_layer):
past_i = torch.from_numpy(outputs[i + 1]) if \
isinstance(outputs[i + 1], numpy.ndarray) else outputs[i + 1].clone().detach()
past.append(past_i.to(device))
all_eos = torch.all(has_eos)
gpt2_decoder = torch.op_from_model(gpt2_decoder_model_path)
output_text = gpt2_decoder(all_token_ids.squeeze(0))
tc_sess.save_as_onnx(gpt2_full_model_path, output_text)
return output_text
# 1. Check if the gpt2 models is already exported, otherwise, they are converted.
if not os.path.exists(gpt2_core_model_path):
convert_models()
# 2. Run the inference with the pre and post process, trace the computation graph and build the all-in-one ONNX model
output_ms = inference_and_dump_full_model(input_text)
# 3. Inference on the all-in-one model
full_model = torch.op_from_model(gpt2_full_model_path)
outputs = full_model(input_text)
# 4. Test the result
for n_, tx_ in enumerate(outputs):
if output_ms[n_] != outputs[n_]:
import warnings # noqa
warnings.warn("{}: The all-in-one model output is not the same\t{}".format(n_, output_ms[n_]))
print("{}: {}\n\t{}".format(n_, input_text, tx_))
| 40.966667
| 123
| 0.711554
|
4a16473e564b0489533beaa91121d0aaf9146307
| 263
|
py
|
Python
|
src/sentry/tagstore/legacy/__init__.py
|
AlexWayfer/sentry
|
ef935cda2b2e960bd602fda590540882d1b0712d
|
[
"BSD-3-Clause"
] | 4
|
2019-05-27T13:55:07.000Z
|
2021-03-30T07:05:09.000Z
|
src/sentry/tagstore/legacy/__init__.py
|
AlexWayfer/sentry
|
ef935cda2b2e960bd602fda590540882d1b0712d
|
[
"BSD-3-Clause"
] | 196
|
2019-06-10T08:34:10.000Z
|
2022-02-22T01:26:13.000Z
|
src/sentry/tagstore/legacy/__init__.py
|
AlexWayfer/sentry
|
ef935cda2b2e960bd602fda590540882d1b0712d
|
[
"BSD-3-Clause"
] | 1
|
2018-07-02T09:46:44.000Z
|
2018-07-02T09:46:44.000Z
|
"""
sentry.tagstore.legacy
~~~~~~~~~~~~~~~~~~~~~~~
:copyright: (c) 2010-2017 by the Sentry Team, see AUTHORS for more details.
:license: BSD, see LICENSE for more details.
"""
from __future__ import absolute_import
from .backend import LegacyTagStorage # NOQA
| 23.909091
| 75
| 0.695817
|
4a164834d9678a949efc55e3fc62a45a5fbc2936
| 32,625
|
py
|
Python
|
synapse/storage/persist_events.py
|
schildbach/synapse
|
3aa36b782c715a2d3c965d5d9b37f7a49a5f17e1
|
[
"Apache-2.0"
] | 1
|
2020-10-10T13:23:05.000Z
|
2020-10-10T13:23:05.000Z
|
synapse/storage/persist_events.py
|
schildbach/synapse
|
3aa36b782c715a2d3c965d5d9b37f7a49a5f17e1
|
[
"Apache-2.0"
] | null | null | null |
synapse/storage/persist_events.py
|
schildbach/synapse
|
3aa36b782c715a2d3c965d5d9b37f7a49a5f17e1
|
[
"Apache-2.0"
] | null | null | null |
# -*- coding: utf-8 -*-
# Copyright 2014-2016 OpenMarket Ltd
# Copyright 2018-2019 New Vector Ltd
# Copyright 2019 The Matrix.org Foundation C.I.C.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import itertools
import logging
from collections import deque, namedtuple
from typing import Iterable, List, Optional, Set, Tuple
from prometheus_client import Counter, Histogram
from twisted.internet import defer
from synapse.api.constants import EventTypes, Membership
from synapse.events import EventBase
from synapse.events.snapshot import EventContext
from synapse.logging.context import PreserveLoggingContext, make_deferred_yieldable
from synapse.metrics.background_process_metrics import run_as_background_process
from synapse.storage.data_stores import DataStores
from synapse.storage.data_stores.main.events import DeltaState
from synapse.types import StateMap
from synapse.util.async_helpers import ObservableDeferred
from synapse.util.metrics import Measure
logger = logging.getLogger(__name__)
# The number of times we are recalculating the current state
state_delta_counter = Counter("synapse_storage_events_state_delta", "")
# The number of times we are recalculating state when there is only a
# single forward extremity
state_delta_single_event_counter = Counter(
"synapse_storage_events_state_delta_single_event", ""
)
# The number of times we are reculating state when we could have resonably
# calculated the delta when we calculated the state for an event we were
# persisting.
state_delta_reuse_delta_counter = Counter(
"synapse_storage_events_state_delta_reuse_delta", ""
)
# The number of forward extremities for each new event.
forward_extremities_counter = Histogram(
"synapse_storage_events_forward_extremities_persisted",
"Number of forward extremities for each new event",
buckets=(1, 2, 3, 5, 7, 10, 15, 20, 50, 100, 200, 500, "+Inf"),
)
# The number of stale forward extremities for each new event. Stale extremities
# are those that were in the previous set of extremities as well as the new.
stale_forward_extremities_counter = Histogram(
"synapse_storage_events_stale_forward_extremities_persisted",
"Number of unchanged forward extremities for each new event",
buckets=(0, 1, 2, 3, 5, 7, 10, 15, 20, 50, 100, 200, 500, "+Inf"),
)
class _EventPeristenceQueue(object):
"""Queues up events so that they can be persisted in bulk with only one
concurrent transaction per room.
"""
_EventPersistQueueItem = namedtuple(
"_EventPersistQueueItem", ("events_and_contexts", "backfilled", "deferred")
)
def __init__(self):
self._event_persist_queues = {}
self._currently_persisting_rooms = set()
def add_to_queue(self, room_id, events_and_contexts, backfilled):
"""Add events to the queue, with the given persist_event options.
NB: due to the normal usage pattern of this method, it does *not*
follow the synapse logcontext rules, and leaves the logcontext in
place whether or not the returned deferred is ready.
Args:
room_id (str):
events_and_contexts (list[(EventBase, EventContext)]):
backfilled (bool):
Returns:
defer.Deferred: a deferred which will resolve once the events are
persisted. Runs its callbacks *without* a logcontext.
"""
queue = self._event_persist_queues.setdefault(room_id, deque())
if queue:
# if the last item in the queue has the same `backfilled` setting,
# we can just add these new events to that item.
end_item = queue[-1]
if end_item.backfilled == backfilled:
end_item.events_and_contexts.extend(events_and_contexts)
return end_item.deferred.observe()
deferred = ObservableDeferred(defer.Deferred(), consumeErrors=True)
queue.append(
self._EventPersistQueueItem(
events_and_contexts=events_and_contexts,
backfilled=backfilled,
deferred=deferred,
)
)
return deferred.observe()
def handle_queue(self, room_id, per_item_callback):
"""Attempts to handle the queue for a room if not already being handled.
The given callback will be invoked with for each item in the queue,
of type _EventPersistQueueItem. The per_item_callback will continuously
be called with new items, unless the queue becomnes empty. The return
value of the function will be given to the deferreds waiting on the item,
exceptions will be passed to the deferreds as well.
This function should therefore be called whenever anything is added
to the queue.
If another callback is currently handling the queue then it will not be
invoked.
"""
if room_id in self._currently_persisting_rooms:
return
self._currently_persisting_rooms.add(room_id)
async def handle_queue_loop():
try:
queue = self._get_drainining_queue(room_id)
for item in queue:
try:
ret = await per_item_callback(item)
except Exception:
with PreserveLoggingContext():
item.deferred.errback()
else:
with PreserveLoggingContext():
item.deferred.callback(ret)
finally:
queue = self._event_persist_queues.pop(room_id, None)
if queue:
self._event_persist_queues[room_id] = queue
self._currently_persisting_rooms.discard(room_id)
# set handle_queue_loop off in the background
run_as_background_process("persist_events", handle_queue_loop)
def _get_drainining_queue(self, room_id):
queue = self._event_persist_queues.setdefault(room_id, deque())
try:
while True:
yield queue.popleft()
except IndexError:
# Queue has been drained.
pass
class EventsPersistenceStorage(object):
"""High level interface for handling persisting newly received events.
Takes care of batching up events by room, and calculating the necessary
current state and forward extremity changes.
"""
def __init__(self, hs, stores: DataStores):
# We ultimately want to split out the state store from the main store,
# so we use separate variables here even though they point to the same
# store for now.
self.main_store = stores.main
self.state_store = stores.state
self.persist_events_store = stores.persist_events
self._clock = hs.get_clock()
self.is_mine_id = hs.is_mine_id
self._event_persist_queue = _EventPeristenceQueue()
self._state_resolution_handler = hs.get_state_resolution_handler()
async def persist_events(
self,
events_and_contexts: List[Tuple[EventBase, EventContext]],
backfilled: bool = False,
) -> int:
"""
Write events to the database
Args:
events_and_contexts: list of tuples of (event, context)
backfilled: Whether the results are retrieved from federation
via backfill or not. Used to determine if they're "new" events
which might update the current state etc.
Returns:
the stream ordering of the latest persisted event
"""
partitioned = {}
for event, ctx in events_and_contexts:
partitioned.setdefault(event.room_id, []).append((event, ctx))
deferreds = []
for room_id, evs_ctxs in partitioned.items():
d = self._event_persist_queue.add_to_queue(
room_id, evs_ctxs, backfilled=backfilled
)
deferreds.append(d)
for room_id in partitioned:
self._maybe_start_persisting(room_id)
await make_deferred_yieldable(
defer.gatherResults(deferreds, consumeErrors=True)
)
return self.main_store.get_current_events_token()
async def persist_event(
self, event: EventBase, context: EventContext, backfilled: bool = False
) -> Tuple[int, int]:
"""
Returns:
The stream ordering of `event`, and the stream ordering of the
latest persisted event
"""
deferred = self._event_persist_queue.add_to_queue(
event.room_id, [(event, context)], backfilled=backfilled
)
self._maybe_start_persisting(event.room_id)
await make_deferred_yieldable(deferred)
max_persisted_id = self.main_store.get_current_events_token()
return (event.internal_metadata.stream_ordering, max_persisted_id)
def _maybe_start_persisting(self, room_id: str):
async def persisting_queue(item):
with Measure(self._clock, "persist_events"):
await self._persist_events(
item.events_and_contexts, backfilled=item.backfilled
)
self._event_persist_queue.handle_queue(room_id, persisting_queue)
async def _persist_events(
self,
events_and_contexts: List[Tuple[EventBase, EventContext]],
backfilled: bool = False,
):
"""Calculates the change to current state and forward extremities, and
persists the given events and with those updates.
"""
if not events_and_contexts:
return
chunks = [
events_and_contexts[x : x + 100]
for x in range(0, len(events_and_contexts), 100)
]
for chunk in chunks:
# We can't easily parallelize these since different chunks
# might contain the same event. :(
# NB: Assumes that we are only persisting events for one room
# at a time.
# map room_id->list[event_ids] giving the new forward
# extremities in each room
new_forward_extremeties = {}
# map room_id->(type,state_key)->event_id tracking the full
# state in each room after adding these events.
# This is simply used to prefill the get_current_state_ids
# cache
current_state_for_room = {}
# map room_id->(to_delete, to_insert) where to_delete is a list
# of type/state keys to remove from current state, and to_insert
# is a map (type,key)->event_id giving the state delta in each
# room
state_delta_for_room = {}
# Set of remote users which were in rooms the server has left. We
# should check if we still share any rooms and if not we mark their
# device lists as stale.
potentially_left_users = set() # type: Set[str]
if not backfilled:
with Measure(self._clock, "_calculate_state_and_extrem"):
# Work out the new "current state" for each room.
# We do this by working out what the new extremities are and then
# calculating the state from that.
events_by_room = {}
for event, context in chunk:
events_by_room.setdefault(event.room_id, []).append(
(event, context)
)
for room_id, ev_ctx_rm in events_by_room.items():
latest_event_ids = await self.main_store.get_latest_event_ids_in_room(
room_id
)
new_latest_event_ids = await self._calculate_new_extremities(
room_id, ev_ctx_rm, latest_event_ids
)
latest_event_ids = set(latest_event_ids)
if new_latest_event_ids == latest_event_ids:
# No change in extremities, so no change in state
continue
# there should always be at least one forward extremity.
# (except during the initial persistence of the send_join
# results, in which case there will be no existing
# extremities, so we'll `continue` above and skip this bit.)
assert new_latest_event_ids, "No forward extremities left!"
new_forward_extremeties[room_id] = new_latest_event_ids
len_1 = (
len(latest_event_ids) == 1
and len(new_latest_event_ids) == 1
)
if len_1:
all_single_prev_not_state = all(
len(event.prev_event_ids()) == 1
and not event.is_state()
for event, ctx in ev_ctx_rm
)
# Don't bother calculating state if they're just
# a long chain of single ancestor non-state events.
if all_single_prev_not_state:
continue
state_delta_counter.inc()
if len(new_latest_event_ids) == 1:
state_delta_single_event_counter.inc()
# This is a fairly handwavey check to see if we could
# have guessed what the delta would have been when
# processing one of these events.
# What we're interested in is if the latest extremities
# were the same when we created the event as they are
# now. When this server creates a new event (as opposed
# to receiving it over federation) it will use the
# forward extremities as the prev_events, so we can
# guess this by looking at the prev_events and checking
# if they match the current forward extremities.
for ev, _ in ev_ctx_rm:
prev_event_ids = set(ev.prev_event_ids())
if latest_event_ids == prev_event_ids:
state_delta_reuse_delta_counter.inc()
break
logger.debug("Calculating state delta for room %s", room_id)
with Measure(
self._clock, "persist_events.get_new_state_after_events"
):
res = await self._get_new_state_after_events(
room_id,
ev_ctx_rm,
latest_event_ids,
new_latest_event_ids,
)
current_state, delta_ids = res
# If either are not None then there has been a change,
# and we need to work out the delta (or use that
# given)
delta = None
if delta_ids is not None:
# If there is a delta we know that we've
# only added or replaced state, never
# removed keys entirely.
delta = DeltaState([], delta_ids)
elif current_state is not None:
with Measure(
self._clock, "persist_events.calculate_state_delta"
):
delta = await self._calculate_state_delta(
room_id, current_state
)
if delta:
# If we have a change of state then lets check
# whether we're actually still a member of the room,
# or if our last user left. If we're no longer in
# the room then we delete the current state and
# extremities.
is_still_joined = await self._is_server_still_joined(
room_id,
ev_ctx_rm,
delta,
current_state,
potentially_left_users,
)
if not is_still_joined:
logger.info("Server no longer in room %s", room_id)
latest_event_ids = []
current_state = {}
delta.no_longer_in_room = True
state_delta_for_room[room_id] = delta
# If we have the current_state then lets prefill
# the cache with it.
if current_state is not None:
current_state_for_room[room_id] = current_state
await self.persist_events_store._persist_events_and_state_updates(
chunk,
current_state_for_room=current_state_for_room,
state_delta_for_room=state_delta_for_room,
new_forward_extremeties=new_forward_extremeties,
backfilled=backfilled,
)
await self._handle_potentially_left_users(potentially_left_users)
async def _calculate_new_extremities(
self,
room_id: str,
event_contexts: List[Tuple[EventBase, EventContext]],
latest_event_ids: List[str],
):
"""Calculates the new forward extremities for a room given events to
persist.
Assumes that we are only persisting events for one room at a time.
"""
# we're only interested in new events which aren't outliers and which aren't
# being rejected.
new_events = [
event
for event, ctx in event_contexts
if not event.internal_metadata.is_outlier()
and not ctx.rejected
and not event.internal_metadata.is_soft_failed()
]
latest_event_ids = set(latest_event_ids)
# start with the existing forward extremities
result = set(latest_event_ids)
# add all the new events to the list
result.update(event.event_id for event in new_events)
# Now remove all events which are prev_events of any of the new events
result.difference_update(
e_id for event in new_events for e_id in event.prev_event_ids()
)
# Remove any events which are prev_events of any existing events.
existing_prevs = await self.persist_events_store._get_events_which_are_prevs(
result
)
result.difference_update(existing_prevs)
# Finally handle the case where the new events have soft-failed prev
# events. If they do we need to remove them and their prev events,
# otherwise we end up with dangling extremities.
existing_prevs = await self.persist_events_store._get_prevs_before_rejected(
e_id for event in new_events for e_id in event.prev_event_ids()
)
result.difference_update(existing_prevs)
# We only update metrics for events that change forward extremities
# (e.g. we ignore backfill/outliers/etc)
if result != latest_event_ids:
forward_extremities_counter.observe(len(result))
stale = latest_event_ids & result
stale_forward_extremities_counter.observe(len(stale))
return result
async def _get_new_state_after_events(
self,
room_id: str,
events_context: List[Tuple[EventBase, EventContext]],
old_latest_event_ids: Iterable[str],
new_latest_event_ids: Iterable[str],
) -> Tuple[Optional[StateMap[str]], Optional[StateMap[str]]]:
"""Calculate the current state dict after adding some new events to
a room
Args:
room_id (str):
room to which the events are being added. Used for logging etc
events_context (list[(EventBase, EventContext)]):
events and contexts which are being added to the room
old_latest_event_ids (iterable[str]):
the old forward extremities for the room.
new_latest_event_ids (iterable[str]):
the new forward extremities for the room.
Returns:
Returns a tuple of two state maps, the first being the full new current
state and the second being the delta to the existing current state.
If both are None then there has been no change.
If there has been a change then we only return the delta if its
already been calculated. Conversely if we do know the delta then
the new current state is only returned if we've already calculated
it.
"""
# map from state_group to ((type, key) -> event_id) state map
state_groups_map = {}
# Map from (prev state group, new state group) -> delta state dict
state_group_deltas = {}
for ev, ctx in events_context:
if ctx.state_group is None:
# This should only happen for outlier events.
if not ev.internal_metadata.is_outlier():
raise Exception(
"Context for new event %s has no state "
"group" % (ev.event_id,)
)
continue
if ctx.state_group in state_groups_map:
continue
# We're only interested in pulling out state that has already
# been cached in the context. We'll pull stuff out of the DB later
# if necessary.
current_state_ids = ctx.get_cached_current_state_ids()
if current_state_ids is not None:
state_groups_map[ctx.state_group] = current_state_ids
if ctx.prev_group:
state_group_deltas[(ctx.prev_group, ctx.state_group)] = ctx.delta_ids
# We need to map the event_ids to their state groups. First, let's
# check if the event is one we're persisting, in which case we can
# pull the state group from its context.
# Otherwise we need to pull the state group from the database.
# Set of events we need to fetch groups for. (We know none of the old
# extremities are going to be in events_context).
missing_event_ids = set(old_latest_event_ids)
event_id_to_state_group = {}
for event_id in new_latest_event_ids:
# First search in the list of new events we're adding.
for ev, ctx in events_context:
if event_id == ev.event_id and ctx.state_group is not None:
event_id_to_state_group[event_id] = ctx.state_group
break
else:
# If we couldn't find it, then we'll need to pull
# the state from the database
missing_event_ids.add(event_id)
if missing_event_ids:
# Now pull out the state groups for any missing events from DB
event_to_groups = await self.main_store._get_state_group_for_events(
missing_event_ids
)
event_id_to_state_group.update(event_to_groups)
# State groups of old_latest_event_ids
old_state_groups = {
event_id_to_state_group[evid] for evid in old_latest_event_ids
}
# State groups of new_latest_event_ids
new_state_groups = {
event_id_to_state_group[evid] for evid in new_latest_event_ids
}
# If they old and new groups are the same then we don't need to do
# anything.
if old_state_groups == new_state_groups:
return None, None
if len(new_state_groups) == 1 and len(old_state_groups) == 1:
# If we're going from one state group to another, lets check if
# we have a delta for that transition. If we do then we can just
# return that.
new_state_group = next(iter(new_state_groups))
old_state_group = next(iter(old_state_groups))
delta_ids = state_group_deltas.get((old_state_group, new_state_group), None)
if delta_ids is not None:
# We have a delta from the existing to new current state,
# so lets just return that. If we happen to already have
# the current state in memory then lets also return that,
# but it doesn't matter if we don't.
new_state = state_groups_map.get(new_state_group)
return new_state, delta_ids
# Now that we have calculated new_state_groups we need to get
# their state IDs so we can resolve to a single state set.
missing_state = new_state_groups - set(state_groups_map)
if missing_state:
group_to_state = await self.state_store._get_state_for_groups(missing_state)
state_groups_map.update(group_to_state)
if len(new_state_groups) == 1:
# If there is only one state group, then we know what the current
# state is.
return state_groups_map[new_state_groups.pop()], None
# Ok, we need to defer to the state handler to resolve our state sets.
state_groups = {sg: state_groups_map[sg] for sg in new_state_groups}
events_map = {ev.event_id: ev for ev, _ in events_context}
# We need to get the room version, which is in the create event.
# Normally that'd be in the database, but its also possible that we're
# currently trying to persist it.
room_version = None
for ev, _ in events_context:
if ev.type == EventTypes.Create and ev.state_key == "":
room_version = ev.content.get("room_version", "1")
break
if not room_version:
room_version = await self.main_store.get_room_version_id(room_id)
logger.debug("calling resolve_state_groups from preserve_events")
# Avoid a circular import.
from synapse.state import StateResolutionStore
res = await self._state_resolution_handler.resolve_state_groups(
room_id,
room_version,
state_groups,
events_map,
state_res_store=StateResolutionStore(self.main_store),
)
return res.state, None
async def _calculate_state_delta(
self, room_id: str, current_state: StateMap[str]
) -> DeltaState:
"""Calculate the new state deltas for a room.
Assumes that we are only persisting events for one room at a time.
"""
existing_state = await self.main_store.get_current_state_ids(room_id)
to_delete = [key for key in existing_state if key not in current_state]
to_insert = {
key: ev_id
for key, ev_id in current_state.items()
if ev_id != existing_state.get(key)
}
return DeltaState(to_delete=to_delete, to_insert=to_insert)
async def _is_server_still_joined(
self,
room_id: str,
ev_ctx_rm: List[Tuple[EventBase, EventContext]],
delta: DeltaState,
current_state: Optional[StateMap[str]],
potentially_left_users: Set[str],
) -> bool:
"""Check if the server will still be joined after the given events have
been persised.
Args:
room_id
ev_ctx_rm
delta: The delta of current state between what is in the database
and what the new current state will be.
current_state: The new current state if it already been calculated,
otherwise None.
potentially_left_users: If the server has left the room, then joined
remote users will be added to this set to indicate that the
server may no longer be sharing a room with them.
"""
if not any(
self.is_mine_id(state_key)
for typ, state_key in itertools.chain(delta.to_delete, delta.to_insert)
if typ == EventTypes.Member
):
# There have been no changes to membership of our users, so nothing
# has changed and we assume we're still in the room.
return True
# Check if any of the given events are a local join that appear in the
# current state
events_to_check = [] # Event IDs that aren't an event we're persisting
for (typ, state_key), event_id in delta.to_insert.items():
if typ != EventTypes.Member or not self.is_mine_id(state_key):
continue
for event, _ in ev_ctx_rm:
if event_id == event.event_id:
if event.membership == Membership.JOIN:
return True
# The event is not in `ev_ctx_rm`, so we need to pull it out of
# the DB.
events_to_check.append(event_id)
# Check if any of the changes that we don't have events for are joins.
if events_to_check:
rows = await self.main_store.get_membership_from_event_ids(events_to_check)
is_still_joined = any(row["membership"] == Membership.JOIN for row in rows)
if is_still_joined:
return True
# None of the new state events are local joins, so we check the database
# to see if there are any other local users in the room. We ignore users
# whose state has changed as we've already their new state above.
users_to_ignore = [
state_key
for typ, state_key in itertools.chain(delta.to_insert, delta.to_delete)
if typ == EventTypes.Member and self.is_mine_id(state_key)
]
if await self.main_store.is_local_host_in_room_ignoring_users(
room_id, users_to_ignore
):
return True
# The server will leave the room, so we go and find out which remote
# users will still be joined when we leave.
if current_state is None:
current_state = await self.main_store.get_current_state_ids(room_id)
current_state = dict(current_state)
for key in delta.to_delete:
current_state.pop(key, None)
current_state.update(delta.to_insert)
remote_event_ids = [
event_id
for (typ, state_key,), event_id in current_state.items()
if typ == EventTypes.Member and not self.is_mine_id(state_key)
]
rows = await self.main_store.get_membership_from_event_ids(remote_event_ids)
potentially_left_users.update(
row["user_id"] for row in rows if row["membership"] == Membership.JOIN
)
return False
async def _handle_potentially_left_users(self, user_ids: Set[str]):
"""Given a set of remote users check if the server still shares a room with
them. If not then mark those users' device cache as stale.
"""
if not user_ids:
return
joined_users = await self.main_store.get_users_server_still_shares_room_with(
user_ids
)
left_users = user_ids - joined_users
for user_id in left_users:
await self.main_store.mark_remote_user_device_list_as_unsubscribed(user_id)
| 41.56051
| 94
| 0.599479
|
4a164943225ecb64c0a3ab746a0d066bbdb0712e
| 37,252
|
py
|
Python
|
google/cloud/aiplatform_v1beta1/services/endpoint_service/async_client.py
|
Nawod/python-aiplatform
|
ffc70d148868489161797cc25a63298dda322d5f
|
[
"Apache-2.0"
] | 1
|
2021-11-27T00:41:48.000Z
|
2021-11-27T00:41:48.000Z
|
google/cloud/aiplatform_v1beta1/services/endpoint_service/async_client.py
|
orionnye/python-aiplatform
|
e3ea683bf754832340853a15bdb0a0662500a70f
|
[
"Apache-2.0"
] | null | null | null |
google/cloud/aiplatform_v1beta1/services/endpoint_service/async_client.py
|
orionnye/python-aiplatform
|
e3ea683bf754832340853a15bdb0a0662500a70f
|
[
"Apache-2.0"
] | null | null | null |
# -*- coding: utf-8 -*-
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from collections import OrderedDict
import functools
import re
from typing import Dict, Sequence, Tuple, Type, Union
import pkg_resources
import google.api_core.client_options as ClientOptions # type: ignore
from google.api_core import exceptions as core_exceptions # type: ignore
from google.api_core import gapic_v1 # type: ignore
from google.api_core import retry as retries # type: ignore
from google.auth import credentials as ga_credentials # type: ignore
from google.oauth2 import service_account # type: ignore
from google.api_core import operation as gac_operation # type: ignore
from google.api_core import operation_async # type: ignore
from google.cloud.aiplatform_v1beta1.services.endpoint_service import pagers
from google.cloud.aiplatform_v1beta1.types import encryption_spec
from google.cloud.aiplatform_v1beta1.types import endpoint
from google.cloud.aiplatform_v1beta1.types import endpoint as gca_endpoint
from google.cloud.aiplatform_v1beta1.types import endpoint_service
from google.cloud.aiplatform_v1beta1.types import operation as gca_operation
from google.protobuf import empty_pb2 # type: ignore
from google.protobuf import field_mask_pb2 # type: ignore
from google.protobuf import timestamp_pb2 # type: ignore
from .transports.base import EndpointServiceTransport, DEFAULT_CLIENT_INFO
from .transports.grpc_asyncio import EndpointServiceGrpcAsyncIOTransport
from .client import EndpointServiceClient
class EndpointServiceAsyncClient:
"""A service for managing Vertex AI's Endpoints."""
_client: EndpointServiceClient
DEFAULT_ENDPOINT = EndpointServiceClient.DEFAULT_ENDPOINT
DEFAULT_MTLS_ENDPOINT = EndpointServiceClient.DEFAULT_MTLS_ENDPOINT
endpoint_path = staticmethod(EndpointServiceClient.endpoint_path)
parse_endpoint_path = staticmethod(EndpointServiceClient.parse_endpoint_path)
model_path = staticmethod(EndpointServiceClient.model_path)
parse_model_path = staticmethod(EndpointServiceClient.parse_model_path)
model_deployment_monitoring_job_path = staticmethod(
EndpointServiceClient.model_deployment_monitoring_job_path
)
parse_model_deployment_monitoring_job_path = staticmethod(
EndpointServiceClient.parse_model_deployment_monitoring_job_path
)
network_path = staticmethod(EndpointServiceClient.network_path)
parse_network_path = staticmethod(EndpointServiceClient.parse_network_path)
common_billing_account_path = staticmethod(
EndpointServiceClient.common_billing_account_path
)
parse_common_billing_account_path = staticmethod(
EndpointServiceClient.parse_common_billing_account_path
)
common_folder_path = staticmethod(EndpointServiceClient.common_folder_path)
parse_common_folder_path = staticmethod(
EndpointServiceClient.parse_common_folder_path
)
common_organization_path = staticmethod(
EndpointServiceClient.common_organization_path
)
parse_common_organization_path = staticmethod(
EndpointServiceClient.parse_common_organization_path
)
common_project_path = staticmethod(EndpointServiceClient.common_project_path)
parse_common_project_path = staticmethod(
EndpointServiceClient.parse_common_project_path
)
common_location_path = staticmethod(EndpointServiceClient.common_location_path)
parse_common_location_path = staticmethod(
EndpointServiceClient.parse_common_location_path
)
@classmethod
def from_service_account_info(cls, info: dict, *args, **kwargs):
"""Creates an instance of this client using the provided credentials
info.
Args:
info (dict): The service account private key info.
args: Additional arguments to pass to the constructor.
kwargs: Additional arguments to pass to the constructor.
Returns:
EndpointServiceAsyncClient: The constructed client.
"""
return EndpointServiceClient.from_service_account_info.__func__(EndpointServiceAsyncClient, info, *args, **kwargs) # type: ignore
@classmethod
def from_service_account_file(cls, filename: str, *args, **kwargs):
"""Creates an instance of this client using the provided credentials
file.
Args:
filename (str): The path to the service account private key json
file.
args: Additional arguments to pass to the constructor.
kwargs: Additional arguments to pass to the constructor.
Returns:
EndpointServiceAsyncClient: The constructed client.
"""
return EndpointServiceClient.from_service_account_file.__func__(EndpointServiceAsyncClient, filename, *args, **kwargs) # type: ignore
from_service_account_json = from_service_account_file
@property
def transport(self) -> EndpointServiceTransport:
"""Returns the transport used by the client instance.
Returns:
EndpointServiceTransport: The transport used by the client instance.
"""
return self._client.transport
get_transport_class = functools.partial(
type(EndpointServiceClient).get_transport_class, type(EndpointServiceClient)
)
def __init__(
self,
*,
credentials: ga_credentials.Credentials = None,
transport: Union[str, EndpointServiceTransport] = "grpc_asyncio",
client_options: ClientOptions = None,
client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO,
) -> None:
"""Instantiates the endpoint service client.
Args:
credentials (Optional[google.auth.credentials.Credentials]): The
authorization credentials to attach to requests. These
credentials identify the application to the service; if none
are specified, the client will attempt to ascertain the
credentials from the environment.
transport (Union[str, ~.EndpointServiceTransport]): The
transport to use. If set to None, a transport is chosen
automatically.
client_options (ClientOptions): Custom options for the client. It
won't take effect if a ``transport`` instance is provided.
(1) The ``api_endpoint`` property can be used to override the
default endpoint provided by the client. GOOGLE_API_USE_MTLS_ENDPOINT
environment variable can also be used to override the endpoint:
"always" (always use the default mTLS endpoint), "never" (always
use the default regular endpoint) and "auto" (auto switch to the
default mTLS endpoint if client certificate is present, this is
the default value). However, the ``api_endpoint`` property takes
precedence if provided.
(2) If GOOGLE_API_USE_CLIENT_CERTIFICATE environment variable
is "true", then the ``client_cert_source`` property can be used
to provide client certificate for mutual TLS transport. If
not provided, the default SSL client certificate will be used if
present. If GOOGLE_API_USE_CLIENT_CERTIFICATE is "false" or not
set, no client certificate will be used.
Raises:
google.auth.exceptions.MutualTlsChannelError: If mutual TLS transport
creation failed for any reason.
"""
self._client = EndpointServiceClient(
credentials=credentials,
transport=transport,
client_options=client_options,
client_info=client_info,
)
async def create_endpoint(
self,
request: endpoint_service.CreateEndpointRequest = None,
*,
parent: str = None,
endpoint: gca_endpoint.Endpoint = None,
retry: retries.Retry = gapic_v1.method.DEFAULT,
timeout: float = None,
metadata: Sequence[Tuple[str, str]] = (),
) -> operation_async.AsyncOperation:
r"""Creates an Endpoint.
Args:
request (:class:`google.cloud.aiplatform_v1beta1.types.CreateEndpointRequest`):
The request object. Request message for
[EndpointService.CreateEndpoint][google.cloud.aiplatform.v1beta1.EndpointService.CreateEndpoint].
parent (:class:`str`):
Required. The resource name of the Location to create
the Endpoint in. Format:
``projects/{project}/locations/{location}``
This corresponds to the ``parent`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
endpoint (:class:`google.cloud.aiplatform_v1beta1.types.Endpoint`):
Required. The Endpoint to create.
This corresponds to the ``endpoint`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
retry (google.api_core.retry.Retry): Designation of what errors, if any,
should be retried.
timeout (float): The timeout for this request.
metadata (Sequence[Tuple[str, str]]): Strings which should be
sent along with the request as metadata.
Returns:
google.api_core.operation_async.AsyncOperation:
An object representing a long-running operation.
The result type for the operation will be :class:`google.cloud.aiplatform_v1beta1.types.Endpoint` Models are deployed into it, and afterwards Endpoint is called to obtain
predictions and explanations.
"""
# Create or coerce a protobuf request object.
# Sanity check: If we got a request object, we should *not* have
# gotten any keyword arguments that map to the request.
has_flattened_params = any([parent, endpoint])
if request is not None and has_flattened_params:
raise ValueError(
"If the `request` argument is set, then none of "
"the individual field arguments should be set."
)
request = endpoint_service.CreateEndpointRequest(request)
# If we have keyword arguments corresponding to fields on the
# request, apply these.
if parent is not None:
request.parent = parent
if endpoint is not None:
request.endpoint = endpoint
# Wrap the RPC method; this adds retry and timeout information,
# and friendly error handling.
rpc = gapic_v1.method_async.wrap_method(
self._client._transport.create_endpoint,
default_timeout=5.0,
client_info=DEFAULT_CLIENT_INFO,
)
# Certain fields should be provided within the metadata header;
# add these here.
metadata = tuple(metadata) + (
gapic_v1.routing_header.to_grpc_metadata((("parent", request.parent),)),
)
# Send the request.
response = await rpc(request, retry=retry, timeout=timeout, metadata=metadata,)
# Wrap the response in an operation future.
response = operation_async.from_gapic(
response,
self._client._transport.operations_client,
gca_endpoint.Endpoint,
metadata_type=endpoint_service.CreateEndpointOperationMetadata,
)
# Done; return the response.
return response
async def get_endpoint(
self,
request: endpoint_service.GetEndpointRequest = None,
*,
name: str = None,
retry: retries.Retry = gapic_v1.method.DEFAULT,
timeout: float = None,
metadata: Sequence[Tuple[str, str]] = (),
) -> endpoint.Endpoint:
r"""Gets an Endpoint.
Args:
request (:class:`google.cloud.aiplatform_v1beta1.types.GetEndpointRequest`):
The request object. Request message for
[EndpointService.GetEndpoint][google.cloud.aiplatform.v1beta1.EndpointService.GetEndpoint]
name (:class:`str`):
Required. The name of the Endpoint resource. Format:
``projects/{project}/locations/{location}/endpoints/{endpoint}``
This corresponds to the ``name`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
retry (google.api_core.retry.Retry): Designation of what errors, if any,
should be retried.
timeout (float): The timeout for this request.
metadata (Sequence[Tuple[str, str]]): Strings which should be
sent along with the request as metadata.
Returns:
google.cloud.aiplatform_v1beta1.types.Endpoint:
Models are deployed into it, and
afterwards Endpoint is called to obtain
predictions and explanations.
"""
# Create or coerce a protobuf request object.
# Sanity check: If we got a request object, we should *not* have
# gotten any keyword arguments that map to the request.
has_flattened_params = any([name])
if request is not None and has_flattened_params:
raise ValueError(
"If the `request` argument is set, then none of "
"the individual field arguments should be set."
)
request = endpoint_service.GetEndpointRequest(request)
# If we have keyword arguments corresponding to fields on the
# request, apply these.
if name is not None:
request.name = name
# Wrap the RPC method; this adds retry and timeout information,
# and friendly error handling.
rpc = gapic_v1.method_async.wrap_method(
self._client._transport.get_endpoint,
default_timeout=5.0,
client_info=DEFAULT_CLIENT_INFO,
)
# Certain fields should be provided within the metadata header;
# add these here.
metadata = tuple(metadata) + (
gapic_v1.routing_header.to_grpc_metadata((("name", request.name),)),
)
# Send the request.
response = await rpc(request, retry=retry, timeout=timeout, metadata=metadata,)
# Done; return the response.
return response
async def list_endpoints(
self,
request: endpoint_service.ListEndpointsRequest = None,
*,
parent: str = None,
retry: retries.Retry = gapic_v1.method.DEFAULT,
timeout: float = None,
metadata: Sequence[Tuple[str, str]] = (),
) -> pagers.ListEndpointsAsyncPager:
r"""Lists Endpoints in a Location.
Args:
request (:class:`google.cloud.aiplatform_v1beta1.types.ListEndpointsRequest`):
The request object. Request message for
[EndpointService.ListEndpoints][google.cloud.aiplatform.v1beta1.EndpointService.ListEndpoints].
parent (:class:`str`):
Required. The resource name of the Location from which
to list the Endpoints. Format:
``projects/{project}/locations/{location}``
This corresponds to the ``parent`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
retry (google.api_core.retry.Retry): Designation of what errors, if any,
should be retried.
timeout (float): The timeout for this request.
metadata (Sequence[Tuple[str, str]]): Strings which should be
sent along with the request as metadata.
Returns:
google.cloud.aiplatform_v1beta1.services.endpoint_service.pagers.ListEndpointsAsyncPager:
Response message for
[EndpointService.ListEndpoints][google.cloud.aiplatform.v1beta1.EndpointService.ListEndpoints].
Iterating over this object will yield results and
resolve additional pages automatically.
"""
# Create or coerce a protobuf request object.
# Sanity check: If we got a request object, we should *not* have
# gotten any keyword arguments that map to the request.
has_flattened_params = any([parent])
if request is not None and has_flattened_params:
raise ValueError(
"If the `request` argument is set, then none of "
"the individual field arguments should be set."
)
request = endpoint_service.ListEndpointsRequest(request)
# If we have keyword arguments corresponding to fields on the
# request, apply these.
if parent is not None:
request.parent = parent
# Wrap the RPC method; this adds retry and timeout information,
# and friendly error handling.
rpc = gapic_v1.method_async.wrap_method(
self._client._transport.list_endpoints,
default_timeout=5.0,
client_info=DEFAULT_CLIENT_INFO,
)
# Certain fields should be provided within the metadata header;
# add these here.
metadata = tuple(metadata) + (
gapic_v1.routing_header.to_grpc_metadata((("parent", request.parent),)),
)
# Send the request.
response = await rpc(request, retry=retry, timeout=timeout, metadata=metadata,)
# This method is paged; wrap the response in a pager, which provides
# an `__aiter__` convenience method.
response = pagers.ListEndpointsAsyncPager(
method=rpc, request=request, response=response, metadata=metadata,
)
# Done; return the response.
return response
async def update_endpoint(
self,
request: endpoint_service.UpdateEndpointRequest = None,
*,
endpoint: gca_endpoint.Endpoint = None,
update_mask: field_mask_pb2.FieldMask = None,
retry: retries.Retry = gapic_v1.method.DEFAULT,
timeout: float = None,
metadata: Sequence[Tuple[str, str]] = (),
) -> gca_endpoint.Endpoint:
r"""Updates an Endpoint.
Args:
request (:class:`google.cloud.aiplatform_v1beta1.types.UpdateEndpointRequest`):
The request object. Request message for
[EndpointService.UpdateEndpoint][google.cloud.aiplatform.v1beta1.EndpointService.UpdateEndpoint].
endpoint (:class:`google.cloud.aiplatform_v1beta1.types.Endpoint`):
Required. The Endpoint which replaces
the resource on the server.
This corresponds to the ``endpoint`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
update_mask (:class:`google.protobuf.field_mask_pb2.FieldMask`):
Required. The update mask applies to the resource. See
[google.protobuf.FieldMask][google.protobuf.FieldMask].
This corresponds to the ``update_mask`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
retry (google.api_core.retry.Retry): Designation of what errors, if any,
should be retried.
timeout (float): The timeout for this request.
metadata (Sequence[Tuple[str, str]]): Strings which should be
sent along with the request as metadata.
Returns:
google.cloud.aiplatform_v1beta1.types.Endpoint:
Models are deployed into it, and
afterwards Endpoint is called to obtain
predictions and explanations.
"""
# Create or coerce a protobuf request object.
# Sanity check: If we got a request object, we should *not* have
# gotten any keyword arguments that map to the request.
has_flattened_params = any([endpoint, update_mask])
if request is not None and has_flattened_params:
raise ValueError(
"If the `request` argument is set, then none of "
"the individual field arguments should be set."
)
request = endpoint_service.UpdateEndpointRequest(request)
# If we have keyword arguments corresponding to fields on the
# request, apply these.
if endpoint is not None:
request.endpoint = endpoint
if update_mask is not None:
request.update_mask = update_mask
# Wrap the RPC method; this adds retry and timeout information,
# and friendly error handling.
rpc = gapic_v1.method_async.wrap_method(
self._client._transport.update_endpoint,
default_timeout=5.0,
client_info=DEFAULT_CLIENT_INFO,
)
# Certain fields should be provided within the metadata header;
# add these here.
metadata = tuple(metadata) + (
gapic_v1.routing_header.to_grpc_metadata(
(("endpoint.name", request.endpoint.name),)
),
)
# Send the request.
response = await rpc(request, retry=retry, timeout=timeout, metadata=metadata,)
# Done; return the response.
return response
async def delete_endpoint(
self,
request: endpoint_service.DeleteEndpointRequest = None,
*,
name: str = None,
retry: retries.Retry = gapic_v1.method.DEFAULT,
timeout: float = None,
metadata: Sequence[Tuple[str, str]] = (),
) -> operation_async.AsyncOperation:
r"""Deletes an Endpoint.
Args:
request (:class:`google.cloud.aiplatform_v1beta1.types.DeleteEndpointRequest`):
The request object. Request message for
[EndpointService.DeleteEndpoint][google.cloud.aiplatform.v1beta1.EndpointService.DeleteEndpoint].
name (:class:`str`):
Required. The name of the Endpoint resource to be
deleted. Format:
``projects/{project}/locations/{location}/endpoints/{endpoint}``
This corresponds to the ``name`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
retry (google.api_core.retry.Retry): Designation of what errors, if any,
should be retried.
timeout (float): The timeout for this request.
metadata (Sequence[Tuple[str, str]]): Strings which should be
sent along with the request as metadata.
Returns:
google.api_core.operation_async.AsyncOperation:
An object representing a long-running operation.
The result type for the operation will be :class:`google.protobuf.empty_pb2.Empty` A generic empty message that you can re-use to avoid defining duplicated
empty messages in your APIs. A typical example is to
use it as the request or the response type of an API
method. For instance:
service Foo {
rpc Bar(google.protobuf.Empty) returns
(google.protobuf.Empty);
}
The JSON representation for Empty is empty JSON
object {}.
"""
# Create or coerce a protobuf request object.
# Sanity check: If we got a request object, we should *not* have
# gotten any keyword arguments that map to the request.
has_flattened_params = any([name])
if request is not None and has_flattened_params:
raise ValueError(
"If the `request` argument is set, then none of "
"the individual field arguments should be set."
)
request = endpoint_service.DeleteEndpointRequest(request)
# If we have keyword arguments corresponding to fields on the
# request, apply these.
if name is not None:
request.name = name
# Wrap the RPC method; this adds retry and timeout information,
# and friendly error handling.
rpc = gapic_v1.method_async.wrap_method(
self._client._transport.delete_endpoint,
default_timeout=5.0,
client_info=DEFAULT_CLIENT_INFO,
)
# Certain fields should be provided within the metadata header;
# add these here.
metadata = tuple(metadata) + (
gapic_v1.routing_header.to_grpc_metadata((("name", request.name),)),
)
# Send the request.
response = await rpc(request, retry=retry, timeout=timeout, metadata=metadata,)
# Wrap the response in an operation future.
response = operation_async.from_gapic(
response,
self._client._transport.operations_client,
empty_pb2.Empty,
metadata_type=gca_operation.DeleteOperationMetadata,
)
# Done; return the response.
return response
async def deploy_model(
self,
request: endpoint_service.DeployModelRequest = None,
*,
endpoint: str = None,
deployed_model: gca_endpoint.DeployedModel = None,
traffic_split: Sequence[
endpoint_service.DeployModelRequest.TrafficSplitEntry
] = None,
retry: retries.Retry = gapic_v1.method.DEFAULT,
timeout: float = None,
metadata: Sequence[Tuple[str, str]] = (),
) -> operation_async.AsyncOperation:
r"""Deploys a Model into this Endpoint, creating a
DeployedModel within it.
Args:
request (:class:`google.cloud.aiplatform_v1beta1.types.DeployModelRequest`):
The request object. Request message for
[EndpointService.DeployModel][google.cloud.aiplatform.v1beta1.EndpointService.DeployModel].
endpoint (:class:`str`):
Required. The name of the Endpoint resource into which
to deploy a Model. Format:
``projects/{project}/locations/{location}/endpoints/{endpoint}``
This corresponds to the ``endpoint`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
deployed_model (:class:`google.cloud.aiplatform_v1beta1.types.DeployedModel`):
Required. The DeployedModel to be created within the
Endpoint. Note that
[Endpoint.traffic_split][google.cloud.aiplatform.v1beta1.Endpoint.traffic_split]
must be updated for the DeployedModel to start receiving
traffic, either as part of this call, or via
[EndpointService.UpdateEndpoint][google.cloud.aiplatform.v1beta1.EndpointService.UpdateEndpoint].
This corresponds to the ``deployed_model`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
traffic_split (:class:`Sequence[google.cloud.aiplatform_v1beta1.types.DeployModelRequest.TrafficSplitEntry]`):
A map from a DeployedModel's ID to the percentage of
this Endpoint's traffic that should be forwarded to that
DeployedModel.
If this field is non-empty, then the Endpoint's
[traffic_split][google.cloud.aiplatform.v1beta1.Endpoint.traffic_split]
will be overwritten with it. To refer to the ID of the
just being deployed Model, a "0" should be used, and the
actual ID of the new DeployedModel will be filled in its
place by this method. The traffic percentage values must
add up to 100.
If this field is empty, then the Endpoint's
[traffic_split][google.cloud.aiplatform.v1beta1.Endpoint.traffic_split]
is not updated.
This corresponds to the ``traffic_split`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
retry (google.api_core.retry.Retry): Designation of what errors, if any,
should be retried.
timeout (float): The timeout for this request.
metadata (Sequence[Tuple[str, str]]): Strings which should be
sent along with the request as metadata.
Returns:
google.api_core.operation_async.AsyncOperation:
An object representing a long-running operation.
The result type for the operation will be
:class:`google.cloud.aiplatform_v1beta1.types.DeployModelResponse`
Response message for
[EndpointService.DeployModel][google.cloud.aiplatform.v1beta1.EndpointService.DeployModel].
"""
# Create or coerce a protobuf request object.
# Sanity check: If we got a request object, we should *not* have
# gotten any keyword arguments that map to the request.
has_flattened_params = any([endpoint, deployed_model, traffic_split])
if request is not None and has_flattened_params:
raise ValueError(
"If the `request` argument is set, then none of "
"the individual field arguments should be set."
)
request = endpoint_service.DeployModelRequest(request)
# If we have keyword arguments corresponding to fields on the
# request, apply these.
if endpoint is not None:
request.endpoint = endpoint
if deployed_model is not None:
request.deployed_model = deployed_model
if traffic_split:
request.traffic_split.update(traffic_split)
# Wrap the RPC method; this adds retry and timeout information,
# and friendly error handling.
rpc = gapic_v1.method_async.wrap_method(
self._client._transport.deploy_model,
default_timeout=5.0,
client_info=DEFAULT_CLIENT_INFO,
)
# Certain fields should be provided within the metadata header;
# add these here.
metadata = tuple(metadata) + (
gapic_v1.routing_header.to_grpc_metadata((("endpoint", request.endpoint),)),
)
# Send the request.
response = await rpc(request, retry=retry, timeout=timeout, metadata=metadata,)
# Wrap the response in an operation future.
response = operation_async.from_gapic(
response,
self._client._transport.operations_client,
endpoint_service.DeployModelResponse,
metadata_type=endpoint_service.DeployModelOperationMetadata,
)
# Done; return the response.
return response
async def undeploy_model(
self,
request: endpoint_service.UndeployModelRequest = None,
*,
endpoint: str = None,
deployed_model_id: str = None,
traffic_split: Sequence[
endpoint_service.UndeployModelRequest.TrafficSplitEntry
] = None,
retry: retries.Retry = gapic_v1.method.DEFAULT,
timeout: float = None,
metadata: Sequence[Tuple[str, str]] = (),
) -> operation_async.AsyncOperation:
r"""Undeploys a Model from an Endpoint, removing a
DeployedModel from it, and freeing all resources it's
using.
Args:
request (:class:`google.cloud.aiplatform_v1beta1.types.UndeployModelRequest`):
The request object. Request message for
[EndpointService.UndeployModel][google.cloud.aiplatform.v1beta1.EndpointService.UndeployModel].
endpoint (:class:`str`):
Required. The name of the Endpoint resource from which
to undeploy a Model. Format:
``projects/{project}/locations/{location}/endpoints/{endpoint}``
This corresponds to the ``endpoint`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
deployed_model_id (:class:`str`):
Required. The ID of the DeployedModel
to be undeployed from the Endpoint.
This corresponds to the ``deployed_model_id`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
traffic_split (:class:`Sequence[google.cloud.aiplatform_v1beta1.types.UndeployModelRequest.TrafficSplitEntry]`):
If this field is provided, then the Endpoint's
[traffic_split][google.cloud.aiplatform.v1beta1.Endpoint.traffic_split]
will be overwritten with it. If last DeployedModel is
being undeployed from the Endpoint, the
[Endpoint.traffic_split] will always end up empty when
this call returns. A DeployedModel will be successfully
undeployed only if it doesn't have any traffic assigned
to it when this method executes, or if this field
unassigns any traffic to it.
This corresponds to the ``traffic_split`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
retry (google.api_core.retry.Retry): Designation of what errors, if any,
should be retried.
timeout (float): The timeout for this request.
metadata (Sequence[Tuple[str, str]]): Strings which should be
sent along with the request as metadata.
Returns:
google.api_core.operation_async.AsyncOperation:
An object representing a long-running operation.
The result type for the operation will be
:class:`google.cloud.aiplatform_v1beta1.types.UndeployModelResponse`
Response message for
[EndpointService.UndeployModel][google.cloud.aiplatform.v1beta1.EndpointService.UndeployModel].
"""
# Create or coerce a protobuf request object.
# Sanity check: If we got a request object, we should *not* have
# gotten any keyword arguments that map to the request.
has_flattened_params = any([endpoint, deployed_model_id, traffic_split])
if request is not None and has_flattened_params:
raise ValueError(
"If the `request` argument is set, then none of "
"the individual field arguments should be set."
)
request = endpoint_service.UndeployModelRequest(request)
# If we have keyword arguments corresponding to fields on the
# request, apply these.
if endpoint is not None:
request.endpoint = endpoint
if deployed_model_id is not None:
request.deployed_model_id = deployed_model_id
if traffic_split:
request.traffic_split.update(traffic_split)
# Wrap the RPC method; this adds retry and timeout information,
# and friendly error handling.
rpc = gapic_v1.method_async.wrap_method(
self._client._transport.undeploy_model,
default_timeout=5.0,
client_info=DEFAULT_CLIENT_INFO,
)
# Certain fields should be provided within the metadata header;
# add these here.
metadata = tuple(metadata) + (
gapic_v1.routing_header.to_grpc_metadata((("endpoint", request.endpoint),)),
)
# Send the request.
response = await rpc(request, retry=retry, timeout=timeout, metadata=metadata,)
# Wrap the response in an operation future.
response = operation_async.from_gapic(
response,
self._client._transport.operations_client,
endpoint_service.UndeployModelResponse,
metadata_type=endpoint_service.UndeployModelOperationMetadata,
)
# Done; return the response.
return response
try:
DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo(
gapic_version=pkg_resources.get_distribution(
"google-cloud-aiplatform",
).version,
)
except pkg_resources.DistributionNotFound:
DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo()
__all__ = ("EndpointServiceAsyncClient",)
| 43.518692
| 186
| 0.641335
|
4a16497bd347fcc6cc5058207cb329d5cdc942ee
| 3,010
|
py
|
Python
|
venv/Lib/site-packages/xero_python/payrolluk/models/leave_types.py
|
RobMilinski/Xero-Starter-Branched-Test
|
c82382e674b34c2336ee164f5a079d6becd1ed46
|
[
"MIT"
] | 77
|
2020-02-16T03:50:18.000Z
|
2022-03-11T03:53:26.000Z
|
venv/Lib/site-packages/xero_python/payrolluk/models/leave_types.py
|
RobMilinski/Xero-Starter-Branched-Test
|
c82382e674b34c2336ee164f5a079d6becd1ed46
|
[
"MIT"
] | 50
|
2020-04-06T10:15:52.000Z
|
2022-03-29T21:27:50.000Z
|
venv/Lib/site-packages/xero_python/payrolluk/models/leave_types.py
|
RobMilinski/Xero-Starter-Branched-Test
|
c82382e674b34c2336ee164f5a079d6becd1ed46
|
[
"MIT"
] | 27
|
2020-06-04T11:16:17.000Z
|
2022-03-19T06:27:36.000Z
|
# coding: utf-8
"""
Xero Payroll UK
This is the Xero Payroll API for orgs in the UK region. # noqa: E501
Contact: api@xero.com
Generated by: https://openapi-generator.tech
"""
import re # noqa: F401
from xero_python.models import BaseModel
class LeaveTypes(BaseModel):
"""NOTE: This class is auto generated by OpenAPI Generator.
Ref: https://openapi-generator.tech
Do not edit the class manually.
"""
"""
Attributes:
openapi_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
openapi_types = {
"pagination": "Pagination",
"problem": "Problem",
"leave_types": "list[LeaveType]",
}
attribute_map = {
"pagination": "pagination",
"problem": "problem",
"leave_types": "leaveTypes",
}
def __init__(self, pagination=None, problem=None, leave_types=None): # noqa: E501
"""LeaveTypes - a model defined in OpenAPI""" # noqa: E501
self._pagination = None
self._problem = None
self._leave_types = None
self.discriminator = None
if pagination is not None:
self.pagination = pagination
if problem is not None:
self.problem = problem
if leave_types is not None:
self.leave_types = leave_types
@property
def pagination(self):
"""Gets the pagination of this LeaveTypes. # noqa: E501
:return: The pagination of this LeaveTypes. # noqa: E501
:rtype: Pagination
"""
return self._pagination
@pagination.setter
def pagination(self, pagination):
"""Sets the pagination of this LeaveTypes.
:param pagination: The pagination of this LeaveTypes. # noqa: E501
:type: Pagination
"""
self._pagination = pagination
@property
def problem(self):
"""Gets the problem of this LeaveTypes. # noqa: E501
:return: The problem of this LeaveTypes. # noqa: E501
:rtype: Problem
"""
return self._problem
@problem.setter
def problem(self, problem):
"""Sets the problem of this LeaveTypes.
:param problem: The problem of this LeaveTypes. # noqa: E501
:type: Problem
"""
self._problem = problem
@property
def leave_types(self):
"""Gets the leave_types of this LeaveTypes. # noqa: E501
:return: The leave_types of this LeaveTypes. # noqa: E501
:rtype: list[LeaveType]
"""
return self._leave_types
@leave_types.setter
def leave_types(self, leave_types):
"""Sets the leave_types of this LeaveTypes.
:param leave_types: The leave_types of this LeaveTypes. # noqa: E501
:type: list[LeaveType]
"""
self._leave_types = leave_types
| 24.876033
| 86
| 0.599668
|
4a164a23d8ab43c21c16022f46f531866c88c393
| 73,751
|
py
|
Python
|
taxcalc/calcfunctions.py
|
adamoppenheimer/OG-USA
|
05926fd79ce2fd0e58e2f0088fd461445d862db5
|
[
"CC0-1.0"
] | null | null | null |
taxcalc/calcfunctions.py
|
adamoppenheimer/OG-USA
|
05926fd79ce2fd0e58e2f0088fd461445d862db5
|
[
"CC0-1.0"
] | null | null | null |
taxcalc/calcfunctions.py
|
adamoppenheimer/OG-USA
|
05926fd79ce2fd0e58e2f0088fd461445d862db5
|
[
"CC0-1.0"
] | null | null | null |
"""
Tax-Calculator functions that calculate payroll and individual income taxes.
These functions are imported into the Calculator class.
Note: the cpi_offset policy parameter is the only policy parameter that
does not appear here; it is used in the policy.py file to possibly adjust
the price inflation rate used to index policy parameters (as would be done
in a reform that introduces chained-CPI indexing).
"""
# CODING-STYLE CHECKS:
# pycodestyle calcfunctions.py
# pylint --disable=locally-disabled calcfunctions.py
#
# pylint: disable=too-many-lines
# pylint: disable=invalid-name
# pylint: disable=too-many-arguments
# pylint: disable=too-many-locals
import math
import copy
import numpy as np
from taxcalc.decorators import iterate_jit, JIT
def BenefitPrograms(calc):
"""
Calculate total government cost and consumption value of benefits
delivered by non-repealed benefit programs.
"""
# zero out benefits delivered by repealed programs
zero = np.zeros(calc.array_len)
if calc.policy_param('BEN_housing_repeal'):
calc.array('housing_ben', zero)
if calc.policy_param('BEN_ssi_repeal'):
calc.array('ssi_ben', zero)
if calc.policy_param('BEN_snap_repeal'):
calc.array('snap_ben', zero)
if calc.policy_param('BEN_tanf_repeal'):
calc.array('tanf_ben', zero)
if calc.policy_param('BEN_vet_repeal'):
calc.array('vet_ben', zero)
if calc.policy_param('BEN_wic_repeal'):
calc.array('wic_ben', zero)
if calc.policy_param('BEN_mcare_repeal'):
calc.array('mcare_ben', zero)
if calc.policy_param('BEN_mcaid_repeal'):
calc.array('mcaid_ben', zero)
if calc.policy_param('BEN_oasdi_repeal'):
calc.array('e02400', zero)
if calc.policy_param('BEN_ui_repeal'):
calc.array('e02300', zero)
if calc.policy_param('BEN_other_repeal'):
calc.array('other_ben', zero)
# calculate government cost of all benefits
cost = np.array(
calc.array('housing_ben') +
calc.array('ssi_ben') +
calc.array('snap_ben') +
calc.array('tanf_ben') +
calc.array('vet_ben') +
calc.array('wic_ben') +
calc.array('mcare_ben') +
calc.array('mcaid_ben') +
calc.array('e02400') +
calc.array('e02300') +
calc.array('other_ben')
)
calc.array('benefit_cost_total', cost)
# calculate consumption value of all benefits
# (assuming that cash benefits have full value)
value = np.array(
calc.array('housing_ben') * calc.consump_param('BEN_housing_value') +
calc.array('ssi_ben') +
calc.array('snap_ben') * calc.consump_param('BEN_snap_value') +
calc.array('tanf_ben') * calc.consump_param('BEN_tanf_value') +
calc.array('vet_ben') * calc.consump_param('BEN_vet_value') +
calc.array('wic_ben') * calc.consump_param('BEN_wic_value') +
calc.array('mcare_ben') * calc.consump_param('BEN_mcare_value') +
calc.array('mcaid_ben') * calc.consump_param('BEN_mcaid_value') +
calc.array('e02400') +
calc.array('e02300') +
calc.array('other_ben') * calc.consump_param('BEN_other_value')
)
calc.array('benefit_value_total', value)
@iterate_jit(nopython=True)
def EI_PayrollTax(SS_Earnings_c, e00200p, e00200s, pencon_p, pencon_s,
FICA_ss_trt, FICA_mc_trt, ALD_SelfEmploymentTax_hc,
e00900p, e00900s, e02100p, e02100s, k1bx14p, k1bx14s,
payrolltax, ptax_was, setax, c03260, ptax_oasdi,
sey, earned, earned_p, earned_s):
"""
Compute part of total OASDI+HI payroll taxes and earned income variables.
"""
# compute sey and its individual components
sey_p = e00900p + e02100p + k1bx14p
sey_s = e00900s + e02100s + k1bx14s
sey = sey_p + sey_s # total self-employment income for filing unit
# compute gross wage and salary income ('was' denotes 'wage and salary')
gross_was_p = e00200p + pencon_p
gross_was_s = e00200s + pencon_s
# compute taxable earnings for OASDI FICA
sey_frac = 1.0 - 0.5 * (FICA_ss_trt + FICA_mc_trt)
txearn_was_p = min(SS_Earnings_c, gross_was_p)
txearn_was_s = min(SS_Earnings_c, gross_was_s)
txearn_sey_p = min(max(0., sey_p * sey_frac), SS_Earnings_c - txearn_was_p)
txearn_sey_s = min(max(0., sey_s * sey_frac), SS_Earnings_c - txearn_was_s)
# compute OASDI and HI payroll taxes on gross wage-and-salary income
ptax_ss_was_p = FICA_ss_trt * txearn_was_p
ptax_ss_was_s = FICA_ss_trt * txearn_was_s
ptax_mc_was_p = FICA_mc_trt * gross_was_p
ptax_mc_was_s = FICA_mc_trt * gross_was_s
ptax_was = ptax_ss_was_p + ptax_ss_was_s + ptax_mc_was_p + ptax_mc_was_s
# compute self-employment tax on taxable self-employment income
setax_ss_p = FICA_ss_trt * txearn_sey_p
setax_ss_s = FICA_ss_trt * txearn_sey_s
setax_mc_p = FICA_mc_trt * max(0., sey_p * sey_frac)
setax_mc_s = FICA_mc_trt * max(0., sey_s * sey_frac)
setax_p = setax_ss_p + setax_mc_p
setax_s = setax_ss_s + setax_mc_s
setax = setax_p + setax_s
# compute part of total regular payroll taxes for filing unit
payrolltax = ptax_was + setax
# compute OASDI part of payroll taxes
ptax_oasdi = ptax_ss_was_p + ptax_ss_was_s + setax_ss_p + setax_ss_s
# compute earned* variables and AGI deduction for
# "employer share" of self-employment tax, c03260
# Note: c03260 is the amount on 2015 Form 1040, line 27
c03260 = (1. - ALD_SelfEmploymentTax_hc) * 0.5 * setax
earned = max(0., e00200p + e00200s + sey - c03260)
earned_p = max(0., (e00200p + sey_p -
(1. - ALD_SelfEmploymentTax_hc) * 0.5 * setax_p))
earned_s = max(0., (e00200s + sey_s -
(1. - ALD_SelfEmploymentTax_hc) * 0.5 * setax_s))
return (sey, payrolltax, ptax_was, setax, c03260, ptax_oasdi,
earned, earned_p, earned_s)
@iterate_jit(nopython=True)
def DependentCare(nu13, elderly_dependents, earned,
MARS, ALD_Dependents_thd, ALD_Dependents_hc,
ALD_Dependents_Child_c, ALD_Dependents_Elder_c,
care_deduction):
"""
Computes dependent-care above-the-line deduction.
Parameters
----------
nu13: Number of dependents under 13 years old
elderly_dependents: number of elderly dependents
earned: Form 2441 earned income amount
MARS: Marital Status
ALD_Dependents_thd: Maximum income to qualify for deduction
ALD_Dependents_hc: Deduction for dependent care haircut
ALD_Dependents_Child_c: National weighted average cost of childcare
ALD_Dependents_Elder_c: Eldercare deduction ceiling
Returns
-------
care_deduction: Total above the line deductions for dependent care.
"""
if earned <= ALD_Dependents_thd[MARS - 1]:
care_deduction = (((1. - ALD_Dependents_hc) * nu13 *
ALD_Dependents_Child_c) +
((1. - ALD_Dependents_hc) * elderly_dependents *
ALD_Dependents_Elder_c))
else:
care_deduction = 0.
return care_deduction
@iterate_jit(nopython=True)
def Adj(e03150, e03210, c03260,
e03270, e03300, e03400, e03500, e00800,
e03220, e03230, e03240, e03290, care_deduction,
ALD_StudentLoan_hc, ALD_SelfEmp_HealthIns_hc, ALD_KEOGH_SEP_hc,
ALD_EarlyWithdraw_hc, ALD_AlimonyPaid_hc, ALD_AlimonyReceived_hc,
ALD_EducatorExpenses_hc, ALD_HSADeduction_hc, ALD_IRAContributions_hc,
ALD_DomesticProduction_hc, ALD_Tuition_hc,
c02900):
"""
Adj calculates Form 1040 AGI adjustments (i.e., Above-the-Line Deductions).
Notes
-----
Taxpayer characteristics:
e03210 : Student loan interest paid
e03220 : Educator expenses
e03150 : Total deductible IRA plan contributions
e03230 : Tuition and fees (Form 8917)
e03240 : Domestic production activity deduction (Form 8903)
c03260 : Self-employment tax deduction (after haircut)
e03270 : Self-employed health insurance premiums
e03290 : HSA deduction (Form 8889)
e03300 : Total deductible KEOGH/SEP/SIMPLE/etc. plan contributions
e03400 : Penalty on early withdrawal of savings deduction
e03500 : Alimony paid
e00800 : Alimony received
care_deduction : Dependent care expense deduction
Tax law parameters:
ALD_StudentLoan_hc : Student loan interest deduction haircut
ALD_SelfEmp_HealthIns_hc : Self-employed h.i. deduction haircut
ALD_KEOGH_SEP_hc : KEOGH/etc. plan contribution deduction haircut
ALD_EarlyWithdraw_hc : Penalty on early withdrawal deduction haricut
ALD_AlimonyPaid_hc : Alimony paid deduction haircut
ALD_AlimonyReceived_hc : Alimony received deduction haircut
ALD_EducatorExpenses_hc: Eductor expenses haircut
ALD_HSADeduction_hc: HSA Deduction haircut
ALD_IRAContributions_hc: IRA Contribution haircut
ALD_DomesticProduction_hc: Domestic production haircut
ALD_Tuition_hc: Tuition and fees haircut
Returns
-------
c02900 : total Form 1040 adjustments, which are not included in AGI
"""
# Form 2555 foreign earned income deduction is assumed to be zero
# Form 1040 adjustments that are included in expanded income:
c02900 = ((1. - ALD_StudentLoan_hc) * e03210 +
c03260 +
(1. - ALD_EarlyWithdraw_hc) * e03400 +
(1. - ALD_AlimonyPaid_hc) * e03500 +
(1. - ALD_AlimonyReceived_hc) * e00800 +
(1. - ALD_EducatorExpenses_hc) * e03220 +
(1. - ALD_Tuition_hc) * e03230 +
(1. - ALD_DomesticProduction_hc) * e03240 +
(1. - ALD_HSADeduction_hc) * e03290 +
(1. - ALD_SelfEmp_HealthIns_hc) * e03270 +
(1. - ALD_IRAContributions_hc) * e03150 +
(1. - ALD_KEOGH_SEP_hc) * e03300 +
care_deduction)
return c02900
@iterate_jit(nopython=True)
def ALD_InvInc_ec_base(p22250, p23250, sep,
e00300, e00600, e01100, e01200,
invinc_ec_base):
"""
Computes invinc_ec_base.
"""
# limitation on net short-term and long-term capital losses
cgain = max((-3000. / sep), p22250 + p23250)
# compute exclusion of investment income from AGI
invinc_ec_base = e00300 + e00600 + cgain + e01100 + e01200
return invinc_ec_base
@iterate_jit(nopython=True)
def CapGains(p23250, p22250, sep, ALD_StudentLoan_hc,
ALD_InvInc_ec_rt, invinc_ec_base, ALD_InvInc_ec_base_RyanBrady,
e00200, e00300, e00600, e00650, e00700, e00800,
CG_nodiff, CG_ec, CG_reinvest_ec_rt,
ALD_BusinessLosses_c, MARS,
e00900, e01100, e01200, e01400, e01700, e02000, e02100,
e02300, e00400, e02400, c02900, e03210, e03230, e03240,
c01000, c23650, ymod, ymod1, invinc_agi_ec):
"""
CapGains function: ...
"""
# net capital gain (long term + short term) before exclusion
c23650 = p23250 + p22250
# limitation on capital losses
c01000 = max((-3000. / sep), c23650)
# compute total investment income
invinc = e00300 + e00600 + c01000 + e01100 + e01200
# compute exclusion of investment income from AGI
invinc_agi_ec = ALD_InvInc_ec_rt * max(0., invinc_ec_base)
# compute exclusion of investment income for Ryan-Brady plan
if ALD_InvInc_ec_base_RyanBrady:
# This RyanBrady code interprets the Blueprint reform as providing
# an investment income AGI exclusion for each of three investment
# income types (e00300, e00650, p23250) separately. The alternative
# interpretation (that is not adopted here) is that the investment
# income AGI exclusion is calculated using a base that is the sum
# of those three investment income types, with the base being zero
# if the sum of the three is negative.
CG_ec_RyanBrady = (c01000 - max(-3000. / sep,
p22250 + ALD_InvInc_ec_rt * p23250))
invinc_agi_ec = ALD_InvInc_ec_rt * (e00300 + e00650) + CG_ec_RyanBrady
# compute ymod1 variable that is included in AGI
ymod1 = (e00200 + e00700 + e00800 + e01400 + e01700 +
invinc - invinc_agi_ec + e02100 + e02300 +
max(e00900 + e02000, -ALD_BusinessLosses_c[MARS - 1]))
if CG_nodiff:
# apply QDIV+CG exclusion if QDIV+LTCG receive no special tax treatment
qdcg_pos = max(0., e00650 + c01000)
qdcg_exclusion = (min(CG_ec, qdcg_pos) +
CG_reinvest_ec_rt * max(0., qdcg_pos - CG_ec))
ymod1 = max(0., ymod1 - qdcg_exclusion)
invinc_agi_ec += qdcg_exclusion
# compute ymod variable that is used in OASDI benefit taxation logic
ymod2 = e00400 + (0.50 * e02400) - c02900
ymod3 = (1. - ALD_StudentLoan_hc) * e03210 + e03230 + e03240
ymod = ymod1 + ymod2 + ymod3
return (c01000, c23650, ymod, ymod1, invinc_agi_ec)
@iterate_jit(nopython=True)
def SSBenefits(MARS, ymod, e02400, SS_thd50, SS_thd85,
SS_percentage1, SS_percentage2, c02500):
"""
Calculates OASDI benefits included in AGI, c02500.
"""
if ymod < SS_thd50[MARS - 1]:
c02500 = 0.
elif ymod < SS_thd85[MARS - 1]:
c02500 = SS_percentage1 * min(ymod - SS_thd50[MARS - 1], e02400)
else:
c02500 = min(SS_percentage2 * (ymod - SS_thd85[MARS - 1]) +
SS_percentage1 *
min(e02400, SS_thd85[MARS - 1] -
SS_thd50[MARS - 1]), SS_percentage2 * e02400)
return c02500
@iterate_jit(nopython=True)
def UBI(nu18, n1820, n21, UBI_u18, UBI_1820, UBI_21, UBI_ecrt,
ubi, taxable_ubi, nontaxable_ubi):
"""
Calculates total and taxable Universal Basic Income (UBI) amount.
Parameters
----------
nu18: Number of people in the tax unit under 18
n1820: Number of people in the tax unit age 18-20
n21: Number of people in the tax unit age 21+
UBI_u18: UBI benefit for those under 18
UBI_1820: UBI benefit for those between 18 to 20
UBI_21: UBI benefit for those 21 or more
UBI_ecrt: Fraction of UBI benefits that are not included in AGI
Returns
-------
ubi: total UBI received by the tax unit (is included in expanded_income)
taxable_ubi: amount of UBI that is taxable (is added to AGI)
nontaxable_ubi: amount of UBI that is nontaxable
"""
ubi = nu18 * UBI_u18 + n1820 * UBI_1820 + n21 * UBI_21
taxable_ubi = ubi * (1. - UBI_ecrt)
nontaxable_ubi = ubi - taxable_ubi
return ubi, taxable_ubi, nontaxable_ubi
@iterate_jit(nopython=True)
def AGI(ymod1, c02500, c02900, XTOT, MARS, sep, DSI, exact, nu18, taxable_ubi,
II_em, II_em_ps, II_prt, II_no_em_nu18,
c00100, pre_c04600, c04600):
"""
Computes Adjusted Gross Income (AGI), c00100, and
compute personal exemption amount, c04600.
"""
# calculate AGI assuming no foreign earned income exclusion
c00100 = ymod1 + c02500 - c02900 + taxable_ubi
# calculate personal exemption amount
if II_no_em_nu18: # repeal of personal exemptions for deps. under 18
pre_c04600 = max(0, XTOT - nu18) * II_em
else:
pre_c04600 = XTOT * II_em
if DSI:
pre_c04600 = 0.
# phase-out personal exemption amount
if exact == 1: # exact calculation as on tax forms
line5 = max(0., c00100 - II_em_ps[MARS - 1])
line6 = math.ceil(line5 / (2500. / sep))
line7 = II_prt * line6
c04600 = max(0., pre_c04600 * (1. - line7))
else: # smoothed calculation needed for sensible mtr calculation
dispc_numer = II_prt * (c00100 - II_em_ps[MARS - 1])
dispc_denom = 2500. / sep
dispc = min(1., max(0., dispc_numer / dispc_denom))
c04600 = pre_c04600 * (1. - dispc)
return (c00100, pre_c04600, c04600)
@iterate_jit(nopython=True)
def ItemDedCap(e17500, e18400, e18500, e19200, e19800, e20100, e20400, g20500,
c00100, ID_AmountCap_rt, ID_AmountCap_Switch, e17500_capped,
e18400_capped, e18500_capped, e19200_capped, e19800_capped,
e20100_capped, e20400_capped, g20500_capped):
"""
Applies a cap to gross itemized deductions.
Notes
-----
Tax Law Parameters:
ID_AmountCap_Switch : Indicator for which itemized deductions are
capped
ID_AmountCap_rt : Cap on itemized deductions; decimal fraction of AGI
Taxpayer Characteristics:
e17500 : Medical expenses
e18400 : State and local taxes
e18500 : Real-estate taxes
e19200 : Interest paid
e19800 : Charity cash contributions
e20100 : Charity noncash contributions
e20400 : Total miscellaneous expenses
g20500 : Gross casualty or theft loss (before disregard)
c00100: Adjusted Gross Income
Returns
-------
e17500_capped: Medical expenses, capped by ItemDedCap
e18400_capped: State and local taxes, capped by ItemDedCap
e18500_capped : Real-estate taxes, capped by ItemDedCap
e19200_capped : Interest paid, capped by ItemDedCap
e19800_capped : Charity cash contributions, capped by ItemDedCap
e20100_capped : Charity noncash contributions, capped by ItemDedCap
e20400_capped : Total miscellaneous expenses, capped by ItemDedCap
g20500_capped : Gross casualty or theft loss (before disregard),
capped by ItemDedCap
"""
# pylint: disable=too-many-branches
cap = max(0., ID_AmountCap_rt * c00100)
gross_ded_amt = 0
if ID_AmountCap_Switch[0]: # medical
gross_ded_amt += e17500
if ID_AmountCap_Switch[1]: # statelocal
gross_ded_amt += e18400
if ID_AmountCap_Switch[2]: # realestate
gross_ded_amt += e18500
if ID_AmountCap_Switch[3]: # casualty
gross_ded_amt += g20500
if ID_AmountCap_Switch[4]: # misc
gross_ded_amt += e20400
if ID_AmountCap_Switch[5]: # interest
gross_ded_amt += e19200
if ID_AmountCap_Switch[6]: # charity
gross_ded_amt += e19800 + e20100
overage = max(0., gross_ded_amt - cap)
e17500_capped = e17500
e18400_capped = e18400
e18500_capped = e18500
g20500_capped = g20500
e20400_capped = e20400
e19200_capped = e19200
e19800_capped = e19800
e20100_capped = e20100
if overage > 0. and c00100 > 0.:
if ID_AmountCap_Switch[0]: # medical
e17500_capped -= (e17500 / gross_ded_amt) * overage
if ID_AmountCap_Switch[1]: # statelocal
e18400_capped -= (e18400 / (gross_ded_amt) * overage)
if ID_AmountCap_Switch[2]: # realestate
e18500_capped -= (e18500 / gross_ded_amt) * overage
if ID_AmountCap_Switch[3]: # casualty
g20500_capped -= (g20500 / gross_ded_amt) * overage
if ID_AmountCap_Switch[4]: # misc
e20400_capped -= (e20400 / gross_ded_amt) * overage
if ID_AmountCap_Switch[5]: # interest
e19200_capped -= (e19200 / gross_ded_amt) * overage
if ID_AmountCap_Switch[6]: # charity
e19800_capped -= (e19800 / gross_ded_amt) * overage
e20100_capped -= (e20100 / gross_ded_amt) * overage
return (e17500_capped, e18400_capped, e18500_capped, g20500_capped,
e20400_capped, e19200_capped, e19800_capped, e20100_capped)
@iterate_jit(nopython=True)
def ItemDed(e17500_capped, e18400_capped, e18500_capped,
g20500_capped, e20400_capped, e19200_capped, e19800_capped,
e20100_capped, MARS, age_head, age_spouse,
c00100, c04470, c17000, c18300, c20500, c19200,
c20800, c21040, c21060, c19700,
ID_ps, ID_Medical_frt, ID_Medical_frt_add4aged, ID_Medical_hc,
ID_Casualty_frt, ID_Casualty_hc, ID_Miscellaneous_frt,
ID_Miscellaneous_hc, ID_Charity_crt_all, ID_Charity_crt_noncash,
ID_prt, ID_crt, ID_c, ID_StateLocalTax_hc, ID_Charity_frt,
ID_Charity_hc, ID_InterestPaid_hc, ID_RealEstate_hc,
ID_Medical_c, ID_StateLocalTax_c, ID_RealEstate_c,
ID_InterestPaid_c, ID_Charity_c, ID_Casualty_c,
ID_Miscellaneous_c, ID_AllTaxes_c, ID_StateLocalTax_crt,
ID_RealEstate_crt, ID_Charity_f):
"""
Calculates itemized deductions, Form 1040, Schedule A.
Notes
-----
Tax Law Parameters:
ID_ps : Itemized deduction phaseout AGI start (Pease)
ID_crt : Itemized deduction maximum phaseout
as a decimal fraction of total itemized deduction (Pease)
ID_prt : Itemized deduction phaseout rate (Pease)
ID_c: Dollar limit on itemized deductions
ID_Medical_frt : Deduction for medical expenses;
floor as a decimal fraction of AGI
ID_Medical_frt_add4aged : Addon for medical expenses deduction for
elderly; addon as a decimal fraction of AGI
ID_Casualty_frt : Deduction for casualty loss;
floor as a decimal fraction of AGI
ID_Miscellaneous_frt : Deduction for miscellaneous expenses;
floor as a decimal fraction of AGI
ID_Charity_crt_all : Deduction for all charitable contributions;
ceiling as a decimal fraction of AGI
ID_Charity_crt_noncash : Deduction for noncash charitable
contributions; ceiling as a decimal fraction of AGI
ID_Charity_frt : Disregard for charitable contributions;
floor as a decimal fraction of AGI
ID_Medical_c : Ceiling on medical expense deduction
ID_StateLocalTax_c : Ceiling on state and local tax deduction
ID_RealEstate_c : Ceiling on real estate tax deduction
ID_AllTaxes_c: Ceiling combined state and local income/sales and
real estate tax deductions
ID_InterestPaid_c : Ceiling on interest paid deduction
ID_Charity_c : Ceiling on charity expense deduction
ID_Charity_f: Floor on charity expense deduction
ID_Casualty_c : Ceiling on casuality expense deduction
ID_Miscellaneous_c : Ceiling on miscellaneous expense deduction
ID_StateLocalTax_crt : Deduction for state and local taxes;
ceiling as a decimal fraction of AGI
ID_RealEstate_crt : Deduction for real estate taxes;
ceiling as a decimal fraction of AGI
Taxpayer Characteristics:
e17500_capped : Medical expenses, capped by ItemDedCap
e18400_capped : State and local taxes, capped by ItemDedCap
e18500_capped : Real-estate taxes, capped by ItemDedCap
e19200_capped : Interest paid, capped by ItemDedCap
e19800_capped : Charity cash contributions, capped by ItemDedCap
e20100_capped : Charity noncash contributions, capped by ItemDedCap
e20400_capped : Total miscellaneous expenses, capped by ItemDedCap
g20500_capped : Gross casualty or theft loss (before disregard),
capped by ItemDedCap
Returns
-------
c04470 : Itemized deduction amount (and other intermediate variables)
"""
posagi = max(c00100, 0.)
# Medical
medical_frt = ID_Medical_frt
if age_head >= 65 or (MARS == 2 and age_spouse >= 65):
medical_frt += ID_Medical_frt_add4aged
c17750 = medical_frt * posagi
c17000 = max(0., e17500_capped - c17750) * (1. - ID_Medical_hc)
c17000 = min(c17000, ID_Medical_c[MARS - 1])
# State and local taxes
c18400 = min((1. - ID_StateLocalTax_hc) * max(e18400_capped, 0.),
ID_StateLocalTax_c[MARS - 1])
c18500 = min((1. - ID_RealEstate_hc) * e18500_capped,
ID_RealEstate_c[MARS - 1])
# following two statements implement a cap on c18400 and c18500 in a way
# that those with negative AGI, c00100, are not capped under current law,
# hence the 0.0001 rather than zero
c18400 = min(c18400, ID_StateLocalTax_crt * max(c00100, 0.0001))
c18500 = min(c18500, ID_RealEstate_crt * max(c00100, 0.0001))
c18300 = min(c18400 + c18500, ID_AllTaxes_c[MARS - 1])
# Interest paid
c19200 = e19200_capped * (1. - ID_InterestPaid_hc)
c19200 = min(c19200, ID_InterestPaid_c[MARS - 1])
# Charity
lim30 = min(ID_Charity_crt_noncash * posagi, e20100_capped)
c19700 = min(ID_Charity_crt_all * posagi, lim30 + e19800_capped)
# charity floor is zero in present law
charity_floor = max(ID_Charity_frt * posagi, ID_Charity_f[MARS - 1])
c19700 = max(0., c19700 - charity_floor) * (1. - ID_Charity_hc)
c19700 = min(c19700, ID_Charity_c[MARS - 1])
# Casualty
c20500 = (max(0., g20500_capped - ID_Casualty_frt * posagi) *
(1. - ID_Casualty_hc))
c20500 = min(c20500, ID_Casualty_c[MARS - 1])
# Miscellaneous
c20400 = e20400_capped
c20750 = ID_Miscellaneous_frt * posagi
c20800 = max(0., c20400 - c20750) * (1. - ID_Miscellaneous_hc)
c20800 = min(c20800, ID_Miscellaneous_c[MARS - 1])
# Gross Itemized Deductions
c21060 = c17000 + c18300 + c19200 + c19700 + c20500 + c20800
# Limitation on total itemized deductions
nonlimited = c17000 + c20500
limitstart = ID_ps[MARS - 1]
if c21060 > nonlimited and c00100 > limitstart:
dedmin = ID_crt * (c21060 - nonlimited)
dedpho = ID_prt * max(0., posagi - limitstart)
c21040 = min(dedmin, dedpho)
c04470 = c21060 - c21040
else:
c21040 = 0.
c04470 = c21060
c04470 = min(c04470, ID_c[MARS - 1])
return (c17000, c18300, c19200, c20500, c20800, c21040, c21060, c04470,
c19700)
@iterate_jit(nopython=True)
def AdditionalMedicareTax(e00200, MARS,
AMEDT_ec, sey, AMEDT_rt,
FICA_mc_trt, FICA_ss_trt,
ptax_amc, payrolltax):
"""
Computes Additional Medicare Tax (Form 8959) included in payroll taxes.
Notes
-----
Tax Law Parameters:
AMEDT_ec : Additional Medicare Tax earnings exclusion
AMEDT_rt : Additional Medicare Tax rate
FICA_ss_trt : FICA Social Security tax rate
FICA_mc_trt : FICA Medicare tax rate
Taxpayer Charateristics:
e00200 : Wages and salaries
sey : Self-employment income
Returns
-------
ptax_amc : Additional Medicare Tax
payrolltax : payroll tax augmented by Additional Medicare Tax
"""
line8 = max(0., sey) * (1. - 0.5 * (FICA_mc_trt + FICA_ss_trt))
line11 = max(0., AMEDT_ec[MARS - 1] - e00200)
ptax_amc = AMEDT_rt * (max(0., e00200 - AMEDT_ec[MARS - 1]) +
max(0., line8 - line11))
payrolltax = payrolltax + ptax_amc
return (ptax_amc, payrolltax)
@iterate_jit(nopython=True)
def StdDed(DSI, earned, STD, age_head, age_spouse, STD_Aged, STD_Dep,
MARS, MIDR, blind_head, blind_spouse, standard, c19700,
STD_allow_charity_ded_nonitemizers):
"""
Calculates standard deduction, including standard deduction for
dependents, aged and bind.
Notes
-----
Tax Law Parameters:
STD : Standard deduction amount, filing status dependent
STD_Dep : Standard deduction for dependents
STD_Aged : Additional standard deduction for blind and aged
Taxpayer Characteristics:
earned : Form 2441 earned income amount
e02400 : Gross Social Security Benefit
DSI : Dependent Status Indicator:
0 - not being claimed as a dependent
1 - claimed as a dependent
MIDR : Married filing separately itemized deductions
requirement indicator:
0 - not necessary to itemize because of filing status
1 - necessary to itemize when filing separately
Returns
-------
standard : the standard deduction amount for filing unit
"""
# calculate deduction for dependents
if DSI == 1:
c15100 = max(350. + earned, STD_Dep)
basic_stded = min(STD[MARS - 1], c15100)
else:
c15100 = 0.
if MIDR == 1:
basic_stded = 0.
else:
basic_stded = STD[MARS - 1]
# calculate extra standard deduction for aged and blind
num_extra_stded = blind_head + blind_spouse
if age_head >= 65:
num_extra_stded += 1
if MARS == 2 and age_spouse >= 65:
num_extra_stded += 1
extra_stded = num_extra_stded * STD_Aged[MARS - 1]
# calculate the total standard deduction
standard = basic_stded + extra_stded
if MARS == 3 and MIDR == 1:
standard = 0.
if STD_allow_charity_ded_nonitemizers:
standard += c19700
return standard
@iterate_jit(nopython=True)
def TaxInc(c00100, standard, c04470, c04600, c04800, MARS,
PT_excl_rt, PT_excl_wagelim_rt, PT_excl_wagelim_thd,
PT_excl_wagelim_prt, e00900, e26270, e00200):
"""
Calculates taxable income, c04800.
"""
pt_excl_pre = max(0., PT_excl_rt * (e00900 + e26270))
wagelim_pre = e00200 * PT_excl_wagelim_rt
taxinc_pre = max(0., c00100 - max(c04470, standard) - c04600)
# calculate business income exclusion
excess = max(taxinc_pre - PT_excl_wagelim_thd[MARS - 1], 0.)
wagelim_rt = min(excess * PT_excl_wagelim_prt[MARS - 1], 1.)
limit = wagelim_rt * max(pt_excl_pre - wagelim_pre, 0.)
pt_excl = pt_excl_pre - limit
c04800 = max(0., taxinc_pre - pt_excl)
return c04800
@JIT(nopython=True)
def SchXYZ(taxable_income, MARS, e00900, e26270, e02000, e00200,
PT_rt1, PT_rt2, PT_rt3, PT_rt4, PT_rt5,
PT_rt6, PT_rt7, PT_rt8,
PT_brk1, PT_brk2, PT_brk3, PT_brk4, PT_brk5,
PT_brk6, PT_brk7,
II_rt1, II_rt2, II_rt3, II_rt4, II_rt5,
II_rt6, II_rt7, II_rt8,
II_brk1, II_brk2, II_brk3, II_brk4, II_brk5,
II_brk6, II_brk7, PT_EligibleRate_active,
PT_EligibleRate_passive, PT_wages_active_income,
PT_top_stacking):
"""
Returns Schedule X, Y, Z tax amount for specified taxable_income.
"""
# separate non-negative taxable income into two non-negative components,
# doing this in a way so that the components add up to taxable income
# define pass-through income eligible for PT schedule
pt_passive = PT_EligibleRate_passive * (e02000 - e26270)
pt_active_gross = e00900 + e26270
if (pt_active_gross > 0) and PT_wages_active_income:
pt_active_gross = pt_active_gross + e00200
pt_active = PT_EligibleRate_active * pt_active_gross
pt_active = min(pt_active, e00900 + e26270)
pt_taxinc = max(0., pt_passive + pt_active)
if pt_taxinc >= taxable_income:
pt_taxinc = taxable_income
reg_taxinc = 0.
else:
# pt_taxinc is unchanged
reg_taxinc = taxable_income - pt_taxinc
# determine stacking order
if PT_top_stacking:
reg_tbase = 0.
pt_tbase = reg_taxinc
else:
reg_tbase = pt_taxinc
pt_tbase = 0.
# compute Schedule X,Y,Z tax using the two components of taxable income
if reg_taxinc > 0.:
reg_tax = Taxes(reg_taxinc, MARS, reg_tbase,
II_rt1, II_rt2, II_rt3, II_rt4,
II_rt5, II_rt6, II_rt7, II_rt8, II_brk1, II_brk2,
II_brk3, II_brk4, II_brk5, II_brk6, II_brk7)
else:
reg_tax = 0.
if pt_taxinc > 0.:
pt_tax = Taxes(pt_taxinc, MARS, pt_tbase,
PT_rt1, PT_rt2, PT_rt3, PT_rt4,
PT_rt5, PT_rt6, PT_rt7, PT_rt8, PT_brk1, PT_brk2,
PT_brk3, PT_brk4, PT_brk5, PT_brk6, PT_brk7)
else:
pt_tax = 0.
return reg_tax + pt_tax
@iterate_jit(nopython=True)
def SchXYZTax(c04800, MARS, e00900, e26270, e02000, e00200,
PT_rt1, PT_rt2, PT_rt3, PT_rt4, PT_rt5,
PT_rt6, PT_rt7, PT_rt8,
PT_brk1, PT_brk2, PT_brk3, PT_brk4, PT_brk5,
PT_brk6, PT_brk7,
II_rt1, II_rt2, II_rt3, II_rt4, II_rt5,
II_rt6, II_rt7, II_rt8,
II_brk1, II_brk2, II_brk3, II_brk4, II_brk5,
II_brk6, II_brk7, PT_EligibleRate_active,
PT_EligibleRate_passive, PT_wages_active_income,
PT_top_stacking, c05200):
"""
SchXYZTax calls SchXYZ function and sets c05200 to returned amount.
"""
c05200 = SchXYZ(c04800, MARS, e00900, e26270, e02000, e00200,
PT_rt1, PT_rt2, PT_rt3, PT_rt4, PT_rt5,
PT_rt6, PT_rt7, PT_rt8,
PT_brk1, PT_brk2, PT_brk3, PT_brk4, PT_brk5,
PT_brk6, PT_brk7,
II_rt1, II_rt2, II_rt3, II_rt4, II_rt5,
II_rt6, II_rt7, II_rt8,
II_brk1, II_brk2, II_brk3, II_brk4, II_brk5,
II_brk6, II_brk7, PT_EligibleRate_active,
PT_EligibleRate_passive, PT_wages_active_income,
PT_top_stacking)
return c05200
@iterate_jit(nopython=True)
def GainsTax(e00650, c01000, c23650, p23250, e01100, e58990, e00200,
e24515, e24518, MARS, c04800, c05200, e00900, e26270, e02000,
II_rt1, II_rt2, II_rt3, II_rt4, II_rt5, II_rt6, II_rt7, II_rt8,
II_brk1, II_brk2, II_brk3, II_brk4, II_brk5, II_brk6, II_brk7,
PT_rt1, PT_rt2, PT_rt3, PT_rt4, PT_rt5, PT_rt6, PT_rt7, PT_rt8,
PT_brk1, PT_brk2, PT_brk3, PT_brk4, PT_brk5, PT_brk6, PT_brk7,
CG_nodiff, PT_EligibleRate_active, PT_EligibleRate_passive,
PT_wages_active_income, PT_top_stacking,
CG_rt1, CG_rt2, CG_rt3, CG_rt4, CG_brk1, CG_brk2, CG_brk3,
dwks10, dwks13, dwks14, dwks19, c05700, taxbc):
"""
GainsTax function implements (2015) Schedule D Tax Worksheet logic for
the special taxation of long-term capital gains and qualified dividends
if CG_nodiff is false.
"""
# pylint: disable=too-many-statements
if c01000 > 0. or c23650 > 0. or p23250 > 0. or e01100 > 0. or e00650 > 0.:
hasqdivltcg = 1 # has qualified dividends or long-term capital gains
else:
hasqdivltcg = 0 # no qualified dividends or long-term capital gains
if CG_nodiff:
hasqdivltcg = 0 # no special taxation of qual divids and l-t cap gains
if hasqdivltcg == 1:
dwks1 = c04800
dwks2 = e00650
dwks3 = e58990
dwks4 = 0. # always assumed to be zero
dwks5 = max(0., dwks3 - dwks4)
dwks6 = max(0., dwks2 - dwks5)
dwks7 = min(p23250, c23650) # SchD lines 15 and 16, respectively
# dwks8 = min(dwks3, dwks4)
# dwks9 = max(0., dwks7 - dwks8)
# BELOW TWO STATEMENTS ARE UNCLEAR IN LIGHT OF dwks9=... COMMENT
if e01100 > 0.:
c24510 = e01100
else:
c24510 = max(0., dwks7) + e01100
dwks9 = max(0., c24510 - min(0., e58990))
# ABOVE TWO STATEMENTS ARE UNCLEAR IN LIGHT OF dwks9=... COMMENT
dwks10 = dwks6 + dwks9
dwks11 = e24515 + e24518 # SchD lines 18 and 19, respectively
dwks12 = min(dwks9, dwks11)
dwks13 = dwks10 - dwks12
dwks14 = max(0., dwks1 - dwks13)
dwks16 = min(CG_brk1[MARS - 1], dwks1)
dwks17 = min(dwks14, dwks16)
dwks18 = max(0., dwks1 - dwks10)
dwks19 = max(dwks17, dwks18)
dwks20 = dwks16 - dwks17
lowest_rate_tax = CG_rt1 * dwks20
# break in worksheet lines
dwks21 = min(dwks1, dwks13)
dwks22 = dwks20
dwks23 = max(0., dwks21 - dwks22)
dwks25 = min(CG_brk2[MARS - 1], dwks1)
dwks26 = dwks19 + dwks20
dwks27 = max(0., dwks25 - dwks26)
dwks28 = min(dwks23, dwks27)
dwks29 = CG_rt2 * dwks28
dwks30 = dwks22 + dwks28
dwks31 = dwks21 - dwks30
dwks32 = CG_rt3 * dwks31
hi_base = max(0., dwks31 - CG_brk3[MARS - 1])
hi_incremental_rate = CG_rt4 - CG_rt3
highest_rate_incremental_tax = hi_incremental_rate * hi_base
# break in worksheet lines
dwks33 = min(dwks9, e24518)
dwks34 = dwks10 + dwks19
dwks36 = max(0., dwks34 - dwks1)
dwks37 = max(0., dwks33 - dwks36)
dwks38 = 0.25 * dwks37
# break in worksheet lines
dwks39 = dwks19 + dwks20 + dwks28 + dwks31 + dwks37
dwks40 = dwks1 - dwks39
dwks41 = 0.28 * dwks40
dwks42 = SchXYZ(dwks19, MARS, e00900, e26270, e02000, e00200,
PT_rt1, PT_rt2, PT_rt3, PT_rt4, PT_rt5,
PT_rt6, PT_rt7, PT_rt8,
PT_brk1, PT_brk2, PT_brk3, PT_brk4, PT_brk5,
PT_brk6, PT_brk7,
II_rt1, II_rt2, II_rt3, II_rt4, II_rt5,
II_rt6, II_rt7, II_rt8,
II_brk1, II_brk2, II_brk3, II_brk4, II_brk5,
II_brk6, II_brk7, PT_EligibleRate_active,
PT_EligibleRate_passive, PT_wages_active_income,
PT_top_stacking)
dwks43 = (dwks29 + dwks32 + dwks38 + dwks41 + dwks42 +
lowest_rate_tax + highest_rate_incremental_tax)
dwks44 = c05200
dwks45 = min(dwks43, dwks44)
c24580 = dwks45
else: # if hasqdivltcg is zero
c24580 = c05200
dwks10 = max(0., min(p23250, c23650)) + e01100
dwks13 = 0.
dwks14 = 0.
dwks19 = 0.
# final calculations done no matter what the value of hasqdivltcg
c05100 = c24580 # because no foreign earned income deduction
c05700 = 0. # no Form 4972, Lump Sum Distributions
taxbc = c05700 + c05100
return (dwks10, dwks13, dwks14, dwks19, c05700, taxbc)
@iterate_jit(nopython=True)
def AGIsurtax(c00100, MARS, AGI_surtax_trt, AGI_surtax_thd, taxbc, surtax):
"""
Computes surtax on AGI above some threshold.
"""
if AGI_surtax_trt > 0.:
hiAGItax = AGI_surtax_trt * max(c00100 - AGI_surtax_thd[MARS - 1], 0.)
taxbc += hiAGItax
surtax += hiAGItax
return (taxbc, surtax)
@iterate_jit(nopython=True)
def AMT(e07300, dwks13, standard, f6251, c00100, c18300, taxbc,
c04470, c17000, c20800, c21040, e24515, MARS, sep, dwks19,
dwks14, c05700, e62900, e00700, dwks10, age_head, age_spouse,
earned, cmbtp,
AMT_child_em_c_age, AMT_brk1,
AMT_em, AMT_prt, AMT_rt1, AMT_rt2,
AMT_child_em, AMT_em_ps, AMT_em_pe,
AMT_CG_brk1, AMT_CG_brk2, AMT_CG_brk3, AMT_CG_rt1, AMT_CG_rt2,
AMT_CG_rt3, AMT_CG_rt4, c05800, c09600, c62100):
"""
Computes Alternative Minimum Tax (AMT) taxable income and liability, where
c62100 is AMT taxable income,
c09600 is AMT tax liability, and
c05800 is total (regular + AMT) income tax liability before credits.
Note that line-number variable names refer to 2015 Form 6251.
"""
# pylint: disable=too-many-statements,too-many-branches
# Form 6251, Part I
if standard == 0.0:
c62100 = (c00100 - e00700 - c04470 +
max(0., min(c17000, 0.025 * c00100)) +
c18300 + c20800 - c21040)
if standard > 0.0:
c62100 = c00100 - e00700
c62100 += cmbtp # add income not in AGI but considered income for AMT
if MARS == 3:
amtsepadd = max(0.,
min(AMT_em[MARS - 1], AMT_prt * (c62100 - AMT_em_pe)))
else:
amtsepadd = 0.
c62100 = c62100 + amtsepadd # AMT taxable income, which is line28
# Form 6251, Part II top
line29 = max(0., AMT_em[MARS - 1] - AMT_prt *
max(0., c62100 - AMT_em_ps[MARS - 1]))
young_head = age_head != 0 and age_head < AMT_child_em_c_age
no_or_young_spouse = age_spouse < AMT_child_em_c_age
if young_head and no_or_young_spouse:
line29 = min(line29, earned + AMT_child_em)
line30 = max(0., c62100 - line29)
line3163 = (AMT_rt1 * line30 +
AMT_rt2 * max(0., (line30 - (AMT_brk1 / sep))))
if dwks10 > 0. or dwks13 > 0. or dwks14 > 0. or dwks19 > 0. or e24515 > 0.:
# complete Form 6251, Part III (line36 is equal to line30)
line37 = dwks13
line38 = e24515
line39 = min(line37 + line38, dwks10)
line40 = min(line30, line39)
line41 = max(0., line30 - line40)
line42 = (AMT_rt1 * line41 +
AMT_rt2 * max(0., (line41 - (AMT_brk1 / sep))))
line44 = dwks14
line45 = max(0., AMT_CG_brk1[MARS - 1] - line44)
line46 = min(line30, line37)
line47 = min(line45, line46) # line47 is amount taxed at AMT_CG_rt1
cgtax1 = line47 * AMT_CG_rt1
line48 = line46 - line47
line51 = dwks19
line52 = line45 + line51
line53 = max(0., AMT_CG_brk2[MARS - 1] - line52)
line54 = min(line48, line53) # line54 is amount taxed at AMT_CG_rt2
cgtax2 = line54 * AMT_CG_rt2
line56 = line47 + line54 # total amount in lower two brackets
if line41 == line56:
line57 = 0. # line57 is amount taxed at AMT_CG_rt3
linex2 = 0. # linex2 is amount taxed at AMT_CG_rt4
else:
line57 = line46 - line56
linex1 = min(line48,
max(0., AMT_CG_brk3[MARS - 1] - line44 - line45))
linex2 = max(0., line54 - linex1)
cgtax3 = line57 * AMT_CG_rt3
cgtax4 = linex2 * AMT_CG_rt4
if line38 == 0.:
line61 = 0.
else:
line61 = 0.25 * max(0., line30 - line41 - line56 - line57 - linex2)
line62 = line42 + cgtax1 + cgtax2 + cgtax3 + cgtax4 + line61
line64 = min(line3163, line62)
line31 = line64
else: # if not completing Form 6251, Part III
line31 = line3163
# Form 6251, Part II bottom
if f6251 == 1:
line32 = e62900
else:
line32 = e07300
line33 = line31 - line32
c09600 = max(0., line33 - max(0., taxbc - e07300 - c05700))
c05800 = taxbc + c09600
return (c62100, c09600, c05800)
@iterate_jit(nopython=True)
def NetInvIncTax(e00300, e00600, e02000, e26270, c01000,
c00100, NIIT_thd, MARS, NIIT_PT_taxed, NIIT_rt, niit):
"""
Computes Net Investment Income Tax (NIIT) amount assuming that
all annuity income is excluded from net investment income.
"""
modAGI = c00100 # no deducted foreign earned income to add
if not NIIT_PT_taxed:
NII = max(0., e00300 + e00600 + c01000 + e02000 - e26270)
else: # do not subtract e26270 from e02000
NII = max(0., e00300 + e00600 + c01000 + e02000)
niit = NIIT_rt * min(NII, max(0., modAGI - NIIT_thd[MARS - 1]))
return niit
@iterate_jit(nopython=True)
def F2441(MARS, earned_p, earned_s, f2441, CDCC_c, e32800,
exact, c00100, CDCC_ps, CDCC_crt, c05800, e07300, c07180):
"""
Calculates Form 2441 child and dependent care expense credit, c07180.
"""
# credit for at most two cared-for individuals and for actual expenses
max_credit = min(f2441, 2) * CDCC_c
c32800 = max(0., min(e32800, max_credit))
# credit is limited to minimum of individuals' earned income
c32880 = earned_p # earned income of taxpayer
if MARS == 2:
c32890 = earned_s # earned income of spouse when present
else:
c32890 = earned_p
c33000 = max(0., min(c32800, min(c32880, c32890)))
# credit is limited by AGI-related fraction
if exact == 1: # exact calculation as on tax forms
tratio = math.ceil(max(((c00100 - CDCC_ps) / 2000.), 0.))
c33200 = c33000 * 0.01 * max(20., CDCC_crt - min(15., tratio))
else:
c33200 = c33000 * 0.01 * max(20., CDCC_crt -
max(((c00100 - CDCC_ps) / 2000.), 0.))
# credit is limited by tax liability
c07180 = min(max(0., c05800 - e07300), c33200)
return c07180
@JIT(nopython=True)
def EITCamount(basic_frac, phasein_rate, earnings, max_amount,
phaseout_start, agi, phaseout_rate):
"""
Returns EITC amount given specified parameters.
English parameter names are used in this function because the
EITC formula is not available on IRS forms or in IRS instructions;
the extensive IRS EITC look-up table does not reveal the formula.
"""
eitc = min((basic_frac * max_amount +
(1.0 - basic_frac) * phasein_rate * earnings), max_amount)
if earnings > phaseout_start or agi > phaseout_start:
eitcx = max(0., (max_amount - phaseout_rate *
max(0., max(earnings, agi) - phaseout_start)))
eitc = min(eitc, eitcx)
return eitc
@iterate_jit(nopython=True)
def EITC(MARS, DSI, EIC, c00100, e00300, e00400, e00600, c01000,
e27200, age_head, age_spouse, earned, earned_p, earned_s,
EITC_ps, EITC_MinEligAge, EITC_MaxEligAge, EITC_ps_MarriedJ,
EITC_rt, EITC_c, EITC_prt, EITC_basic_frac,
EITC_InvestIncome_c, EITC_excess_InvestIncome_rt,
EITC_indiv,
c59660):
"""
Computes EITC amount, c59660.
"""
# pylint: disable=too-many-branches
if not EITC_indiv:
# filing-unit and number-of-kids based EITC (rather than indiv EITC)
if MARS == 3 or DSI == 1:
c59660 = 0.
else:
po_start = EITC_ps[EIC]
if MARS == 2:
po_start += EITC_ps_MarriedJ[EIC]
eitc = EITCamount(EITC_basic_frac,
EITC_rt[EIC], earned, EITC_c[EIC],
po_start, c00100, EITC_prt[EIC])
if EIC == 0:
# enforce age eligibility rule for those with no EITC-eligible
# kids assuming that an unknown age_* value implies EITC age
# eligibility
h_age_elig = EITC_MinEligAge <= age_head <= EITC_MaxEligAge
s_age_elig = EITC_MinEligAge <= age_spouse <= EITC_MaxEligAge
if MARS == 2:
if (age_head == 0 or age_spouse == 0 or
h_age_elig or s_age_elig):
c59660 = eitc
else:
c59660 = 0.
else: # if MARS != 2
if (age_head == 0 or h_age_elig):
c59660 = eitc
else:
c59660 = 0.
else: # if EIC != 0
c59660 = eitc
# reduce positive EITC if investment income exceeds ceiling
if c59660 > 0.:
invinc = (e00400 + e00300 + e00600 +
max(0., c01000) + max(0., e27200))
if invinc > EITC_InvestIncome_c:
eitc = (c59660 - EITC_excess_InvestIncome_rt *
(invinc - EITC_InvestIncome_c))
c59660 = max(0., eitc)
else:
# individual EITC rather than a filing-unit EITC
# .. calculate eitc amount for taxpayer
eitc_p = EITCamount(EITC_basic_frac,
EITC_rt[EIC], earned_p, EITC_c[EIC],
EITC_ps[EIC], earned_p, EITC_prt[EIC])
# .. calculate eitc amount for spouse
if MARS == 2:
eitc_s = EITCamount(EITC_basic_frac,
EITC_rt[EIC], earned_s, EITC_c[EIC],
EITC_ps[EIC], earned_s, EITC_prt[EIC])
else:
eitc_s = 0.
# .. combine taxpayer and spouse individual EITC amounts
c59660 = eitc_p + eitc_s
return c59660
@iterate_jit(nopython=True)
def ChildDepTaxCredit(n24, MARS, c00100, exact,
CTC_c, CTC_ps, CTC_prt, prectc, nu05,
CTC_c_under5_bonus, XTOT, num,
DependentCredit_Child_c, DependentCredit_Nonchild_c,
FilerCredit_c, dep_credit):
"""
Computes pre-CTC amount (prectc) and nonrefundable dependent credit.
"""
modAGI = c00100 # no foreign earned income deduction to add to AGI
# calculate and phase-out pre-CTC amount
base_ctc = CTC_c * n24 + CTC_c_under5_bonus * nu05
prectc = base_ctc
if prectc > 0. and modAGI > CTC_ps[MARS - 1]:
excess = modAGI - CTC_ps[MARS - 1]
if exact == 1: # exact calculation as on tax forms
excess = 1000. * math.ceil(excess / 1000.)
prectc = max(0., prectc - CTC_prt * excess)
# calculate and phase-out dependent credit after pre-CTC is phased out
dep_credit = (DependentCredit_Child_c * n24 +
DependentCredit_Nonchild_c * max(0, XTOT - n24 - num) +
FilerCredit_c[MARS - 1])
if dep_credit > 0. and CTC_prt > 0.:
thresh = CTC_ps[MARS - 1] + base_ctc / CTC_prt
if modAGI > thresh:
excess = modAGI - thresh
if exact == 1: # exact calculation as on tax forms
excess = 1000. * math.ceil(excess / 1000.)
dep_credit = max(0., dep_credit - CTC_prt * excess)
return (prectc, dep_credit)
@iterate_jit(nopython=True)
def PersonalTaxCredit(MARS, c00100,
II_credit, II_credit_ps, II_credit_prt,
II_credit_nr, II_credit_nr_ps, II_credit_nr_prt,
personal_refundable_credit,
personal_nonrefundable_credit):
"""
Computes personal_refundable_credit and personal_nonrefundable_credit,
neither of which are part of current-law policy.
"""
# calculate personal refundable credit amount with phase-out
personal_refundable_credit = II_credit[MARS - 1]
if II_credit_prt > 0. and c00100 > II_credit_ps[MARS - 1]:
pout = II_credit_prt * (c00100 - II_credit_ps[MARS - 1])
fully_phasedout = personal_refundable_credit - pout
personal_refundable_credit = max(0., fully_phasedout)
# calculate personal nonrefundable credit amount with phase-out
personal_nonrefundable_credit = II_credit_nr[MARS - 1]
if II_credit_nr_prt > 0. and c00100 > II_credit_nr_ps[MARS - 1]:
pout = II_credit_nr_prt * (c00100 - II_credit_nr_ps[MARS - 1])
fully_phasedout = personal_nonrefundable_credit - pout
personal_nonrefundable_credit = max(0., fully_phasedout)
return (personal_refundable_credit, personal_nonrefundable_credit)
@iterate_jit(nopython=True)
def AmOppCreditParts(exact, e87521, num, c00100, CR_AmOppRefundable_hc,
CR_AmOppNonRefundable_hc, c10960, c87668):
"""
Applies a phaseout to the Form 8863, line 1, American Opportunity Credit
amount, e87521, and then applies the 0.4 refundable rate.
Logic corresponds to Form 8863, Part I.
Notes
-----
Tax Law Parameters that are not parameterized:
90000 : American Opportunity Credit phaseout income base
10000 : American Opportunity Credit phaseout income range length
1/1000 : American Opportunity Credit phaseout rate
0.4 : American Opportunity Credit refundable rate
Parameters
----------
exact : whether or not to do rounding of phaseout fraction
e87521 : total tentative American Opportunity Credit for all students,
Form 8863, line 1
num : number of people filing jointly
c00100 : AGI
CR_AmOppRefundable_hc: haircut for the refundable portion of the
American Opportunity Credit
CR_AmOppNonRefundable_hc: haircut for the nonrefundable portion of the
American Opportunity Credit
Returns
-------
c10960 : Refundable part of American Opportunity Credit
c87668 : Tentative nonrefundable part of American Opportunity Credit
"""
if e87521 > 0.:
c87658 = max(0., 90000. * num - c00100)
c87660 = 10000. * num
if exact == 1: # exact calculation as on tax forms
c87662 = 1000. * min(1., round(c87658 / c87660, 3))
else:
c87662 = 1000. * min(1., c87658 / c87660)
c87664 = c87662 * e87521 / 1000.
c10960 = 0.4 * c87664 * (1. - CR_AmOppRefundable_hc)
c87668 = c87664 - c10960 * (1. - CR_AmOppNonRefundable_hc)
else:
c10960 = 0.
c87668 = 0.
return (c10960, c87668)
@iterate_jit(nopython=True)
def SchR(age_head, age_spouse, MARS, c00100,
c05800, e07300, c07180, e02400, c02500, e01500, e01700, CR_SchR_hc,
c07200):
"""
Calculates Schedule R credit for the elderly and the disabled, c07200.
Note that no Schedule R policy parameters are inflation indexed.
Note that all Schedule R policy parameters are hard-coded, and therefore,
are not able to be changed using Policy class parameters.
Note that the CR_SchR_hc policy parameter allows the user to eliminate
or reduce total schedule R credits.
"""
if age_head >= 65 or (MARS == 2 and age_spouse >= 65):
# calculate credit assuming nobody is disabled (so line12 = line10)
if MARS == 2:
if age_head >= 65 and age_spouse >= 65:
schr12 = 7500.
else:
schr12 = 5000.
schr15 = 10000.
elif MARS == 3:
schr12 = 3750.
schr15 = 5000.
elif MARS in (1, 4):
schr12 = 5000.
schr15 = 7500.
else:
schr12 = 0.
schr15 = 0.
# nontaxable portion of OASDI benefits, line 13a
schr13a = max(0., e02400 - c02500)
# nontaxable portion of pension benefits, line 13b
# NOTE: the following approximation (required because of inadequate IRS
# data) will be accurate if all pensions are partially taxable
# or if all pensions are fully taxable. But if a filing unit
# receives at least one partially taxable pension and at least
# one fully taxable pension, then the approximation in the
# following line is not exactly correct.
schr13b = max(0., e01500 - e01700)
schr13c = schr13a + schr13b
schr16 = max(0., c00100 - schr15)
schr17 = 0.5 * schr16
schr18 = schr13c + schr17
schr19 = max(0., schr12 - schr18)
schr20 = 0.15 * schr19
schr21 = max(0., (c05800 - e07300 - c07180))
c07200 = min(schr20, schr21) * (1. - CR_SchR_hc)
else: # if not calculating Schedule R credit
c07200 = 0.
return c07200
@iterate_jit(nopython=True)
def EducationTaxCredit(exact, e87530, MARS, c00100, num, c05800,
e07300, c07180, c07200, c87668,
LLC_Expense_c, ETC_pe_Single, ETC_pe_Married,
CR_Education_hc,
c07230):
"""
Computes Education Tax Credits (Form 8863) nonrefundable amount, c07230.
Logic corresponds to Form 8863, Part II.
Notes
-----
Tax Law Parameters that are not parameterized:
0.2 : Lifetime Learning Credit ratio against expense
Tax Law Parameters that are parameterized:
LLC_Expense_c : Lifetime Learning Credit expense limit
ETC_pe_Married : Education Tax Credit phaseout end for married
ETC_pe_Single : Education Tax Credit phaseout end for single
Taxpayer Charateristics:
exact : whether or not to do rounding of phaseout fraction
e87530 : Lifetime Learning Credit total qualified expenses,
Form 8863, line 10
e07300 : Foreign tax credit - Form 1116
c07180 : Child/dependent care expense credit - Form 2441
c07200 : Schedule R credit
Returns
-------
c07230 : Education Tax Credits (Form 8863) nonrefundable amount
"""
c87560 = 0.2 * min(e87530, LLC_Expense_c)
if MARS == 2:
c87570 = ETC_pe_Married * 1000.
else:
c87570 = ETC_pe_Single * 1000.
c87590 = max(0., c87570 - c00100)
c87600 = 10000. * num
if exact == 1: # exact calculation as on tax forms
c87610 = min(1., round(c87590 / c87600, 3))
else:
c87610 = min(1., c87590 / c87600)
c87620 = c87560 * c87610
xline4 = max(0., c05800 - (e07300 + c07180 + c07200))
xline5 = min(c87620, xline4)
xline9 = max(0., c05800 - (e07300 + c07180 + c07200 + xline5))
xline10 = min(c87668, xline9)
c87680 = xline5 + xline10
c07230 = c87680 * (1. - CR_Education_hc)
return c07230
@iterate_jit(nopython=True)
def CharityCredit(e19800, e20100, c00100, CR_Charity_rt, CR_Charity_f,
CR_Charity_frt, MARS, charity_credit):
"""
Computes nonrefundable charity credit, charity_credit.
This credit is not part of current-law policy.
"""
total_charity = e19800 + e20100
floor = max(CR_Charity_frt * c00100, CR_Charity_f[MARS - 1])
charity_cr_floored = max(total_charity - floor, 0)
charity_credit = CR_Charity_rt * (charity_cr_floored)
return charity_credit
@iterate_jit(nopython=True)
def NonrefundableCredits(c05800, e07240, e07260, e07300, e07400,
e07600, p08000, prectc, dep_credit,
personal_nonrefundable_credit,
CR_RetirementSavings_hc, CR_ForeignTax_hc,
CR_ResidentialEnergy_hc, CR_GeneralBusiness_hc,
CR_MinimumTax_hc, CR_OtherCredits_hc, charity_credit,
c07180, c07200, c07220, c07230, c07240,
c07260, c07300, c07400, c07600, c08000,
DependentCredit_before_CTC):
"""
NonRefundableCredits function sequentially limits credits to tax liability.
Parameters
----------
CR_RetirementSavings_hc: Retirement savings credit haircut
CR_ForeignTax_hc: Foreign tax credit haircut
CR_ResidentialEnergy_hc: Residential energy credit haircut
CR_GeneralBusiness_hc: General business credit haircut
CR_MinimumTax_hc: Minimum tax credit haircut
CR_OtherCredits_hc: Other credits haircut
"""
# limit tax credits to tax liability in order they are on 2015 1040 form
avail = c05800
# Foreign tax credit - Form 1116
c07300 = min(e07300 * (1. - CR_ForeignTax_hc), avail)
avail = avail - c07300
# Child & dependent care expense credit
c07180 = min(c07180, avail)
avail = avail - c07180
# Education tax credit
c07230 = min(c07230, avail)
avail = avail - c07230
# Retirement savings credit - Form 8880
c07240 = min(e07240 * (1. - CR_RetirementSavings_hc), avail)
avail = avail - c07240
if DependentCredit_before_CTC:
# Dependent credit
dep_credit = min(dep_credit, avail)
avail = avail - dep_credit
# Child tax credit
c07220 = min(prectc, avail)
avail = avail - c07220
# Residential energy credit - Form 5695
c07260 = min(e07260 * (1. - CR_ResidentialEnergy_hc), avail)
avail = avail - c07260
# General business credit - Form 3800
c07400 = min(e07400 * (1. - CR_GeneralBusiness_hc), avail)
avail = avail - c07400
# Prior year minimum tax credit - Form 8801
c07600 = min(e07600 * (1. - CR_MinimumTax_hc), avail)
avail = avail - c07600
# Schedule R credit
c07200 = min(c07200, avail)
avail = avail - c07200
if not DependentCredit_before_CTC:
# Dependent credit
dep_credit = min(dep_credit, avail)
avail = avail - dep_credit
# Other credits
c08000 = min(p08000 * (1. - CR_OtherCredits_hc), avail)
avail = avail - c08000
charity_credit = min(charity_credit, avail)
avail = avail - charity_credit
# Personal nonrefundable credit
personal_nonrefundable_credit = min(personal_nonrefundable_credit, avail)
avail = avail - personal_nonrefundable_credit
return (c07180, c07200, c07220, c07230, c07240, dep_credit,
c07260, c07300, c07400, c07600, c08000, charity_credit,
personal_nonrefundable_credit)
@iterate_jit(nopython=True)
def AdditionalCTC(n24, prectc, earned, c07220, ptax_was,
ACTC_Income_thd, ACTC_rt, ACTC_ChildNum,
c03260, e09800, c59660, e11200, c11070, nu05,
ACTC_rt_bonus_under5family):
"""
Calculates Additional (refundable) Child Tax Credit, c11070.
"""
c82925 = 0.
c82930 = 0.
c82935 = 0.
c82880 = 0.
c82885 = 0.
c82890 = 0.
c82900 = 0.
c82905 = 0.
c82910 = 0.
c82915 = 0.
c82920 = 0.
c82937 = 0.
c82940 = 0.
c11070 = 0.
# Part I of 2005 Form 8812
if n24 > 0:
c82925 = prectc
c82930 = c07220
c82935 = c82925 - c82930
# CTC not applied to tax
c82880 = max(0., earned)
c82885 = max(0., c82880 - ACTC_Income_thd)
# Accomodate ACTC rate bonus for families with children under 5
if nu05 == 0:
ACTC_rate = ACTC_rt
else:
ACTC_rate = ACTC_rt + ACTC_rt_bonus_under5family
c82890 = ACTC_rate * c82885
# Part II of 2005 Form 8812
if n24 >= ACTC_ChildNum and c82890 < c82935:
c82900 = 0.5 * ptax_was
c82905 = c03260 + e09800
c82910 = c82900 + c82905
c82915 = c59660 + e11200
c82920 = max(0., c82910 - c82915)
c82937 = max(c82890, c82920)
# Part II of 2005 Form 8812
if n24 < ACTC_ChildNum:
if n24 > 0 and c82890 > 0:
c82940 = min(c82890, c82935)
else: # if n24 >= ACTC_ChildNum
if c82890 >= c82935:
c82940 = c82935
else:
c82940 = min(c82935, c82937)
c11070 = c82940
return c11070
@iterate_jit(nopython=True)
def C1040(c05800, c07180, c07200, c07220, c07230, c07240, c07260, c07300,
c07400, c07600, c08000, e09700, e09800, e09900, niit, othertaxes,
c07100, c09200, dep_credit, charity_credit,
personal_nonrefundable_credit):
"""
Computes total used nonrefundable credits, c07100, othertaxes, and
income tax before refundable credits, c09200.
"""
# total used nonrefundable credits (as computed in NonrefundableCredits)
c07100 = (c07180 + c07200 + c07600 + c07300 + c07400 + c07220 + c08000 +
c07230 + c07240 + c07260 + dep_credit + charity_credit +
personal_nonrefundable_credit)
# tax after credits (2016 Form 1040, line 56)
tax_net_nonrefundable_credits = max(0., c05800 - c07100)
# tax (including othertaxes) before refundable credits
othertaxes = e09700 + e09800 + e09900 + niit
c09200 = othertaxes + tax_net_nonrefundable_credits
return (c07100, othertaxes, c09200)
@iterate_jit(nopython=True)
def CTC_new(CTC_new_c, CTC_new_rt, CTC_new_c_under5_bonus,
CTC_new_ps, CTC_new_prt, CTC_new_for_all,
CTC_new_refund_limited, CTC_new_refund_limit_payroll_rt,
CTC_new_refund_limited_all_payroll, payrolltax,
n24, nu05, c00100, MARS, ptax_oasdi, c09200,
ctc_new):
"""
Computes new refundable child tax credit using specified parameters.
"""
if n24 > 0:
posagi = max(c00100, 0.)
ctc_new = CTC_new_c * n24 + CTC_new_c_under5_bonus * nu05
if not CTC_new_for_all:
ctc_new = min(CTC_new_rt * posagi, ctc_new)
ymax = CTC_new_ps[MARS - 1]
if posagi > ymax:
ctc_new_reduced = max(0.,
ctc_new - CTC_new_prt * (posagi - ymax))
ctc_new = min(ctc_new, ctc_new_reduced)
if ctc_new > 0. and CTC_new_refund_limited:
refund_new = max(0., ctc_new - c09200)
if not CTC_new_refund_limited_all_payroll:
limit_new = CTC_new_refund_limit_payroll_rt * ptax_oasdi
if CTC_new_refund_limited_all_payroll:
limit_new = CTC_new_refund_limit_payroll_rt * payrolltax
limited_new = max(0., refund_new - limit_new)
ctc_new = max(0., ctc_new - limited_new)
else:
ctc_new = 0.
return ctc_new
@iterate_jit(nopython=True)
def IITAX(c59660, c11070, c10960, personal_refundable_credit, ctc_new,
c09200, payrolltax,
eitc, refund, iitax, combined):
"""
Computes final taxes.
"""
eitc = c59660
refund = eitc + c11070 + c10960 + personal_refundable_credit + ctc_new
iitax = c09200 - refund
combined = iitax + payrolltax
return (eitc, refund, iitax, combined)
@JIT(nopython=True)
def Taxes(income, MARS, tbrk_base,
rate1, rate2, rate3, rate4, rate5, rate6, rate7, rate8,
tbrk1, tbrk2, tbrk3, tbrk4, tbrk5, tbrk6, tbrk7):
"""
Taxes function returns tax amount given the progressive tax rate
schedule specified by the rate* and (upper) tbrk* parameters and
given income, filing status (MARS), and tax bracket base (tbrk_base).
"""
if tbrk_base > 0.:
brk1 = max(tbrk1[MARS - 1] - tbrk_base, 0.)
brk2 = max(tbrk2[MARS - 1] - tbrk_base, 0.)
brk3 = max(tbrk3[MARS - 1] - tbrk_base, 0.)
brk4 = max(tbrk4[MARS - 1] - tbrk_base, 0.)
brk5 = max(tbrk5[MARS - 1] - tbrk_base, 0.)
brk6 = max(tbrk6[MARS - 1] - tbrk_base, 0.)
brk7 = max(tbrk7[MARS - 1] - tbrk_base, 0.)
else:
brk1 = tbrk1[MARS - 1]
brk2 = tbrk2[MARS - 1]
brk3 = tbrk3[MARS - 1]
brk4 = tbrk4[MARS - 1]
brk5 = tbrk5[MARS - 1]
brk6 = tbrk6[MARS - 1]
brk7 = tbrk7[MARS - 1]
return (rate1 * min(income, brk1) +
rate2 * min(brk2 - brk1, max(0., income - brk1)) +
rate3 * min(brk3 - brk2, max(0., income - brk2)) +
rate4 * min(brk4 - brk3, max(0., income - brk3)) +
rate5 * min(brk5 - brk4, max(0., income - brk4)) +
rate6 * min(brk6 - brk5, max(0., income - brk5)) +
rate7 * min(brk7 - brk6, max(0., income - brk6)) +
rate8 * max(0., income - brk7))
def ComputeBenefit(calc, ID_switch):
"""
Calculates the value of the benefits accrued from itemizing.
"""
# compute income tax liability with no itemized deductions allowed for
# the types of itemized deductions covered under the BenefitSurtax
no_ID_calc = copy.deepcopy(calc)
if ID_switch[0]:
no_ID_calc.policy_param('ID_Medical_hc', 1.)
if ID_switch[1]:
no_ID_calc.policy_param('ID_StateLocalTax_hc', 1.)
if ID_switch[2]:
no_ID_calc.policy_param('ID_RealEstate_hc', 1.)
if ID_switch[3]:
no_ID_calc.policy_param('ID_Casualty_hc', 1.)
if ID_switch[4]:
no_ID_calc.policy_param('ID_Miscellaneous_hc', 1.)
if ID_switch[5]:
no_ID_calc.policy_param('ID_InterestPaid_hc', 1.)
if ID_switch[6]:
no_ID_calc.policy_param('ID_Charity_hc', 1.)
no_ID_calc._calc_one_year() # pylint: disable=protected-access
diff_iitax = no_ID_calc.array('iitax') - calc.array('iitax')
benefit = np.where(diff_iitax > 0., diff_iitax, 0.)
return benefit
def BenefitSurtax(calc):
"""
Computes itemized-deduction-benefit surtax and adds the surtax amount
to income tax, combined tax, and surtax liabilities.
"""
if calc.policy_param('ID_BenefitSurtax_crt') != 1.:
ben = ComputeBenefit(calc,
calc.policy_param('ID_BenefitSurtax_Switch'))
agi = calc.array('c00100')
ben_deduct = calc.policy_param('ID_BenefitSurtax_crt') * agi
ben_exempt_array = calc.policy_param('ID_BenefitSurtax_em')
ben_exempt = ben_exempt_array[calc.array('MARS') - 1]
ben_dedem = ben_deduct + ben_exempt
ben_surtax = (calc.policy_param('ID_BenefitSurtax_trt') *
np.where(ben > ben_dedem, ben - ben_dedem, 0.))
# add ben_surtax to income & combined taxes and to surtax subtotal
calc.incarray('iitax', ben_surtax)
calc.incarray('combined', ben_surtax)
calc.incarray('surtax', ben_surtax)
def BenefitLimitation(calc):
"""
Limits the benefits of select itemized deductions to a fraction of
deductible expenses.
"""
if calc.policy_param('ID_BenefitCap_rt') != 1.:
benefit = ComputeBenefit(calc,
calc.policy_param('ID_BenefitCap_Switch'))
# Calculate total deductible expenses under the cap
deduct_exps = 0.
if calc.policy_param('ID_BenefitCap_Switch')[0]: # medical
deduct_exps += calc.array('c17000')
if calc.policy_param('ID_BenefitCap_Switch')[1]: # statelocal
one_minus_hc = 1. - calc.policy_param('ID_StateLocalTax_hc')
deduct_exps += (one_minus_hc *
np.maximum(calc.array('e18400_capped'), 0.))
if calc.policy_param('ID_BenefitCap_Switch')[2]: # realestate
one_minus_hc = 1. - calc.policy_param('ID_RealEstate_hc')
deduct_exps += one_minus_hc * calc.array('e18500_capped')
if calc.policy_param('ID_BenefitCap_Switch')[3]: # casualty
deduct_exps += calc.array('c20500')
if calc.policy_param('ID_BenefitCap_Switch')[4]: # misc
deduct_exps += calc.array('c20800')
if calc.policy_param('ID_BenefitCap_Switch')[5]: # interest
deduct_exps += calc.array('c19200')
if calc.policy_param('ID_BenefitCap_Switch')[6]: # charity
deduct_exps += calc.array('c19700')
# Calculate cap value for itemized deductions
benefit_limit = deduct_exps * calc.policy_param('ID_BenefitCap_rt')
# Add the difference between the actual benefit and capped benefit
# to income tax and combined tax liabilities.
excess_benefit = np.maximum(benefit - benefit_limit, 0)
calc.incarray('iitax', excess_benefit)
calc.incarray('surtax', excess_benefit)
calc.incarray('combined', excess_benefit)
@iterate_jit(nopython=True)
def FairShareTax(c00100, MARS, ptax_was, setax, ptax_amc,
FST_AGI_trt, FST_AGI_thd_lo, FST_AGI_thd_hi,
fstax, iitax, combined, surtax):
"""
Computes Fair Share Tax, or "Buffet Rule", types of reforms.
Taxpayer Characteristics
------------------------
c00100 : AGI
MARS : filing (marital) status
ptax_was : payroll tax on wages and salaries
setax : self-employment tax
ptax_amc : Additional Medicare Tax on high earnings
Returns
-------
fstax : Fair Share Tax amount
iitax : individual income tax augmented by fstax
combined : individual income tax plus payroll taxes augmented by fstax
surtax : individual income tax subtotal augmented by fstax
"""
if FST_AGI_trt > 0. and c00100 >= FST_AGI_thd_lo[MARS - 1]:
employee_share = 0.5 * ptax_was + 0.5 * setax + ptax_amc
fstax = max(c00100 * FST_AGI_trt - iitax - employee_share, 0.)
thd_gap = max(FST_AGI_thd_hi[MARS - 1] - FST_AGI_thd_lo[MARS - 1], 0.)
if thd_gap > 0. and c00100 < FST_AGI_thd_hi[MARS - 1]:
fstax *= (c00100 - FST_AGI_thd_lo[MARS - 1]) / thd_gap
iitax += fstax
combined += fstax
surtax += fstax
else:
fstax = 0.
return (fstax, iitax, combined, surtax)
@iterate_jit(nopython=True)
def LumpSumTax(DSI, num, XTOT,
LST,
lumpsum_tax, combined):
"""
Computes lump-sum tax and add it to combined taxes.
"""
if LST == 0.0 or DSI == 1:
lumpsum_tax = 0.
else:
lumpsum_tax = LST * max(num, XTOT)
combined += lumpsum_tax
return (lumpsum_tax, combined)
@iterate_jit(nopython=True)
def ExpandIncome(e00200, pencon_p, pencon_s, e00300, e00400, e00600,
e00700, e00800, e00900, e01100, e01200, e01400, e01500,
e02000, e02100, p22250, p23250,
cmbtp, ptax_was, benefit_value_total, ubi,
expanded_income):
"""
Calculates expanded_income from component income types.
"""
expanded_income = (
e00200 + # wage and salary income net of DC pension contributions
pencon_p + # DC pension contributions for taxpayer
pencon_s + # DC pension contributions for spouse
e00300 + # taxable interest income
e00400 + # non-taxable interest income
e00600 + # dividends
e00700 + # state and local income tax refunds
e00800 + # alimony received
e00900 + # Sch C business net income/loss
e01100 + # capital gain distributions not reported on Sch D
e01200 + # Form 4797 other net gain/loss
e01400 + # taxable IRA distributions
e01500 + # total pension and annuity income
e02000 + # Sch E total rental, ..., partnership, S-corp income/loss
e02100 + # Sch F farm net income/loss
p22250 + # Sch D: net short-term capital gain/loss
p23250 + # Sch D: net long-term capital gain/loss
cmbtp + # other AMT taxable income items from Form 6251
0.5 * ptax_was + # employer share of FICA taxes
benefit_value_total + # consumption value of all benefits received
ubi # total UBI benefit
)
return expanded_income
@iterate_jit(nopython=True)
def AfterTaxIncome(combined, expanded_income, aftertax_income):
"""
Calculates after-tax expanded income.
Parameters
----------
combined: combined tax liability
expanded_income: expanded income
Returns
-------
aftertax_income: expanded_income minus combined
"""
aftertax_income = expanded_income - combined
return aftertax_income
| 38.755123
| 79
| 0.632615
|
4a164a58e6d45421f862feede2c107ecd104cbab
| 3,139
|
py
|
Python
|
src/pybullet_planning/interfaces/robots/dynamics.py
|
logan-dunbar/pybullet_planning
|
3b25fc7a0f350f4b46048be5c42f9cbf3ab2d6fb
|
[
"MIT"
] | null | null | null |
src/pybullet_planning/interfaces/robots/dynamics.py
|
logan-dunbar/pybullet_planning
|
3b25fc7a0f350f4b46048be5c42f9cbf3ab2d6fb
|
[
"MIT"
] | null | null | null |
src/pybullet_planning/interfaces/robots/dynamics.py
|
logan-dunbar/pybullet_planning
|
3b25fc7a0f350f4b46048be5c42f9cbf3ab2d6fb
|
[
"MIT"
] | null | null | null |
from collections import defaultdict, deque, namedtuple
import numpy as np
import pybullet as p
from pybullet_planning.utils import get_client, BASE_LINK, STATIC_MASS
#####################################
# https://docs.google.com/document/d/10sXEhzFRSnvFcl3XxNGhnD4N2SedqwdAvK3dsihxVUA/edit#
DynamicsInfo = namedtuple('DynamicsInfo', ['mass', 'lateral_friction',
'local_inertia_diagonal', 'local_inertial_pos', 'local_inertial_orn',
'restitution', 'rolling_friction', 'spinning_friction',
'contact_damping', 'contact_stiffness', 'body_type', 'collision_margin'])
def get_dynamics_info(body, link=BASE_LINK):
return DynamicsInfo(*p.getDynamicsInfo(body, link, physicsClientId=get_client()))
get_link_info = get_dynamics_info
def get_mass(body, link=BASE_LINK):
# TOOD: get full mass
return get_dynamics_info(body, link).mass
def set_dynamics(body, link=BASE_LINK, **kwargs):
# TODO: iterate over all links
p.changeDynamics(body, link, physicsClientId=get_client(), **kwargs)
def set_mass(body, mass, link=BASE_LINK):
set_dynamics(body, link=link, mass=mass)
def set_static(body):
"""set all the body's links to be static (infinite mass, doesn't move under gravity)
Parameters
----------
body : int
[description]
"""
from pybullet_planning.interfaces.robots.link import get_all_links
for link in get_all_links(body):
set_mass(body, mass=STATIC_MASS, link=link)
def set_all_static():
from pybullet_planning.interfaces.env_manager.simulation import disable_gravity
from pybullet_planning.interfaces.robots.body import get_bodies
# TODO: mass saver
disable_gravity()
for body in get_bodies():
set_static(body)
def get_joint_inertial_pose(body, joint):
dynamics_info = get_dynamics_info(body, joint)
return dynamics_info.local_inertial_pos, dynamics_info.local_inertial_orn
def get_local_link_pose(body, joint):
from pybullet_planning.interfaces.env_manager.pose_transformation import Pose, multiply, invert
from pybullet_planning.interfaces.robots.joint import get_joint_parent_frame
from pybullet_planning.interfaces.robots.link import parent_link_from_joint
parent_joint = parent_link_from_joint(body, joint)
#world_child = get_link_pose(body, joint)
#world_parent = get_link_pose(body, parent_joint)
##return multiply(invert(world_parent), world_child)
#return multiply(world_child, invert(world_parent))
# https://github.com/bulletphysics/bullet3/blob/9c9ac6cba8118544808889664326fd6f06d9eeba/examples/pybullet/gym/pybullet_utils/urdfEditor.py#L169
parent_com = get_joint_parent_frame(body, joint)
tmp_pose = invert(multiply(get_joint_inertial_pose(body, joint), parent_com))
parent_inertia = get_joint_inertial_pose(body, parent_joint)
#return multiply(parent_inertia, tmp_pose) # TODO: why is this wrong...
_, orn = multiply(parent_inertia, tmp_pose)
pos, _ = multiply(parent_inertia, Pose(parent_com[0]))
return (pos, orn)
| 41.853333
| 148
| 0.727302
|
4a164a93a0a735136b2c34105a07f3ef835cbb67
| 793
|
py
|
Python
|
03.Layout/06.Size.py
|
sarincr/Python-App-Development-using-Kivy
|
bc5d40efa29ddf59042a85e0e908657590149d6f
|
[
"MIT"
] | 1
|
2021-11-10T11:31:23.000Z
|
2021-11-10T11:31:23.000Z
|
03.Layout/06.Size.py
|
sarincr/Python-App-Development-using-Kivy
|
bc5d40efa29ddf59042a85e0e908657590149d6f
|
[
"MIT"
] | null | null | null |
03.Layout/06.Size.py
|
sarincr/Python-App-Development-using-Kivy
|
bc5d40efa29ddf59042a85e0e908657590149d6f
|
[
"MIT"
] | null | null | null |
import kivy
from kivy.app import App
from kivy.uix.button import Button
from kivy.uix.gridlayout import GridLayout
class Grid_LayoutApp(App):
def build(self):
layout = GridLayout(cols = 2)
layout.add_widget(Button(text ='Element A 1', size_hint_x = None, width = 100))
layout.add_widget(Button(text ='Element B 1'))
layout.add_widget(Button(text ='Element A 2'))
layout.add_widget(Button(text ='Element B 2'))
layout.add_widget(Button(text ='Element A 3', size_hint_x = None, width = 100))
layout.add_widget(Button(text ='Element B 3'))
layout.add_widget(Button(text ='Element A 4'))
layout.add_widget(Button(text ='Element B 4'))
return layout
root = Grid_LayoutApp()
root.run()
| 20.868421
| 88
| 0.648172
|
4a164b9b1e26a4b21469d1ae3c063dfa59190d16
| 3,116
|
py
|
Python
|
LC/08-tring-to-integer-atoi.py
|
brunodleite2/python
|
72c21239869747c36cdee8a23dbabfd8bc78e480
|
[
"MIT"
] | null | null | null |
LC/08-tring-to-integer-atoi.py
|
brunodleite2/python
|
72c21239869747c36cdee8a23dbabfd8bc78e480
|
[
"MIT"
] | null | null | null |
LC/08-tring-to-integer-atoi.py
|
brunodleite2/python
|
72c21239869747c36cdee8a23dbabfd8bc78e480
|
[
"MIT"
] | null | null | null |
'FIRST SOLUTION - NO BULTIN FUNCTIONS USED - FASTER'
class Solution:
MAX = 2**31 - 1
MIN = 2**31 * -1
def __is_sign(self, char: str) -> bool:
if char in ['-', '+']:
return True
return False
def __is_number(self, char: str) -> bool:
if char in ['0', '1', '2', '3', '4', '5', '6', '7', '8', '9']:
return True
return False
def __is_whitespace(self, char: str) -> bool:
if char == ' ':
return True
return False
def __convert_to_int(self, str: str) -> int:
if not len(str) or (len(str) == 1 and not self.__is_number(str[0])):
return 0
integer = int(str)
if integer > Solution.MAX:
return Solution.MAX
if integer < Solution.MIN:
return Solution.MIN
return integer
def myAtoi(self, str: str) -> int:
number_has_started = False
response = ""
for char in str:
if not number_has_started:
if self.__is_whitespace(char):
continue
if self.__is_sign(char) or self.__is_number(char):
response += char
number_has_started = True
continue
# Letter!!
return 0
# number has started
if self.__is_number(char):
response += char
continue
# number has finished
return self.__convert_to_int(response)
return self.__convert_to_int(response)
'SECOND SOLUTION - STR BULTIN FUNCTIONS USED - SLOWER'
class Solution:
MAX = 2**31 - 1
MIN = 2**31 * -1
def __is_sign(self, char: str) -> bool:
if char in ['-', '+']:
return True
return False
def __convert_to_int(self, str: str) -> int:
str_len = len(str)
if not str_len or (str_len == 1 and not str[0].isnumeric()):
return 0
integer = int(str)
if integer > Solution.MAX:
return Solution.MAX
if integer < Solution.MIN:
return Solution.MIN
return integer
def myAtoi(self, str: str) -> int:
number_has_started = False
response = ""
#str = str.lstrip()
for char in str:
if not number_has_started:
if char.isspace():
continue
if self.__is_sign(char) or char.isnumeric():
response += char
number_has_started = True
continue
# Letter!!
return 0
# number has started
if char.isnumeric():
response += char
continue
# number has finished
return self.__convert_to_int(response)
return self.__convert_to_int(response)
solution = Solution()
assert solution.myAtoi('42') == 42
assert solution.myAtoi(' -42') == -42
assert solution.myAtoi('words and 987') == 0
assert solution.myAtoi('-91283472332') == -2147483648
| 24.928
| 76
| 0.511553
|
4a164d05265ea9904ed1e3d011109aa3e6414ee1
| 747
|
py
|
Python
|
setup.py
|
chenbowen/py-thin-plate-spline
|
e82af945a30b229887807594ea26f43e511f3754
|
[
"MIT"
] | null | null | null |
setup.py
|
chenbowen/py-thin-plate-spline
|
e82af945a30b229887807594ea26f43e511f3754
|
[
"MIT"
] | null | null | null |
setup.py
|
chenbowen/py-thin-plate-spline
|
e82af945a30b229887807594ea26f43e511f3754
|
[
"MIT"
] | null | null | null |
# Copyright 2018 Christoph Heindl.
#
# Licensed under MIT License
# ============================================================
import os
try:
from setuptools import setup
except ImportError:
from distutils.core import setup
with open('requirements.txt') as f:
required = f.read().splitlines()
setup(
name='thinplate',
version=open('thinplate/__init__.py').readlines()[-1].split()[-1].strip('\''),
description='Thin plate splines for numpy and pytorch',
author='Christoph Heindl',
url='https://github.com/cheind/py-thin-plate-spline',
license='MIT',
install_requires=required,
packages=['thinplate'],
include_package_data=True,
keywords='thin plate spline spatial transformer network'
)
| 27.666667
| 82
| 0.641232
|
4a164d786d324bbacd828f751fe19d861e14d010
| 3,296
|
py
|
Python
|
mars/scheduler/service.py
|
sighingnow/mars
|
c7897fbd144d230fff5edabc1494fb3ff44aa0d2
|
[
"Apache-2.0"
] | null | null | null |
mars/scheduler/service.py
|
sighingnow/mars
|
c7897fbd144d230fff5edabc1494fb3ff44aa0d2
|
[
"Apache-2.0"
] | null | null | null |
mars/scheduler/service.py
|
sighingnow/mars
|
c7897fbd144d230fff5edabc1494fb3ff44aa0d2
|
[
"Apache-2.0"
] | null | null | null |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# Copyright 1999-2018 Alibaba Group Holding Ltd.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import json
import logging
from .. import kvstore
from ..config import options
from .session import SessionManagerActor
from .resource import ResourceActor
from .chunkmeta import ChunkMetaActor
from .kvstore import KVStoreActor
from .node_info import NodeInfoActor
from .utils import SchedulerClusterInfoActor
logger = logging.getLogger(__name__)
class SchedulerService(object):
def __init__(self):
self._cluster_info_ref = None
self._session_manager_ref = None
self._assigner_ref = None
self._resource_ref = None
self._chunk_meta_ref = None
self._kv_store_ref = None
self._node_info_ref = None
self._result_receiver_ref = None
def start(self, endpoint, discoverer, pool):
"""
there are two way to start a scheduler
1) if options.kv_store is specified as an etcd address, the endpoint will be written
into kv-storage to indicate that this scheduler is one the schedulers,
and the etcd is used as a service discover.
2) if options.kv_store is not an etcd address, there will be only one scheduler
"""
kv_store = kvstore.get(options.kv_store)
kv_store.write('/schedulers/%s' % endpoint, dir=True)
if not isinstance(kv_store, kvstore.LocalKVStore):
# set etcd as service discover
logger.info('Mars Scheduler started with kv store %s.', options.kv_store)
# create KVStoreActor when there is a distributed KV store
self._kv_store_ref = pool.create_actor(KVStoreActor, uid=KVStoreActor.default_uid())
else:
# single scheduler
logger.info('Mars Scheduler started in standalone mode.')
# create ClusterInfoActor
self._cluster_info_ref = pool.create_actor(
SchedulerClusterInfoActor, discoverer, uid=SchedulerClusterInfoActor.default_uid())
# create ChunkMetaActor
self._chunk_meta_ref = pool.create_actor(ChunkMetaActor, uid=ChunkMetaActor.default_uid())
# create SessionManagerActor
self._session_manager_ref = pool.create_actor(
SessionManagerActor, uid=SessionManagerActor.default_uid())
# create ResourceActor
self._resource_ref = pool.create_actor(ResourceActor, uid=ResourceActor.default_uid())
# create NodeInfoActor
self._node_info_ref = pool.create_actor(NodeInfoActor, uid=NodeInfoActor.default_uid())
kv_store.write('/schedulers/%s/meta' % endpoint,
json.dumps(self._resource_ref.get_workers_meta()))
def stop(self, pool):
pool.destroy_actor(self._resource_ref)
| 40.691358
| 98
| 0.707221
|
4a164da6565e0ab422f64e32d093d86c8f112f6a
| 16,553
|
py
|
Python
|
utils/eval_tool.py
|
wen0618/simple-faster-rcnn-pytorch
|
b5c41eeaf9f0641f65bdd6fe0d7f1301bd1cabf3
|
[
"MIT"
] | null | null | null |
utils/eval_tool.py
|
wen0618/simple-faster-rcnn-pytorch
|
b5c41eeaf9f0641f65bdd6fe0d7f1301bd1cabf3
|
[
"MIT"
] | null | null | null |
utils/eval_tool.py
|
wen0618/simple-faster-rcnn-pytorch
|
b5c41eeaf9f0641f65bdd6fe0d7f1301bd1cabf3
|
[
"MIT"
] | null | null | null |
from __future__ import division
from collections import defaultdict
import itertools
import numpy as np
import six
from model.utils.bbox_tools import bbox_iou
#根据PASCAL VOC的evaluation code 计算平均精度
def eval_detection_voc(
pred_bboxes, pred_labels, pred_scores, gt_bboxes, gt_labels,
gt_difficults=None,
iou_thresh=0.5, use_07_metric=False):
"""Calculate average precisions based on evaluation code of PASCAL VOC.
This function evaluates predicted bounding boxes obtained from a dataset
which has :math:`N` images by using average precision for each class.
The code is based on the evaluation code used in PASCAL VOC Challenge.
#test_num张图片(图片数据来自测试数据testdata)的预测框,标签,分数,和真实的框,标签和分数。
# 所有参数都是list len(list)=opt.test_num(default=10000)
# pred_boxes:[(A,4),(B,4),(C,4)....共test_num个] 输入源gt_数据 经过train.predict函数预测出的结果框
# pred_labels[(A,),(B,),(C,)...共test_num个] pred_scores同pred_labels
# A,B,C,D是由nms决定的个数,即预测的框个数,不确定。
# gt_bboxes:[(a,4),(b,4)....共test_num个] a b...是每张图片标注真实框的个数
# gt_labels与gt_difficults同理
# use_07_metric (bool): 是否使用PASCAL VOC 2007 evaluation metric计算平均精度
Args:
pred_bboxes (iterable of numpy.ndarray): An iterable of :math:`N`
sets of bounding boxes.
Its index corresponds to an index for the base dataset.
Each element of :obj:`pred_bboxes` is a set of coordinates
of bounding boxes. This is an array whose shape is :math:`(R, 4)`,
where :math:`R` corresponds
to the number of bounding boxes, which may vary among boxes.
The second axis corresponds to
:math:`y_{min}, x_{min}, y_{max}, x_{max}` of a bounding box.
pred_labels (iterable of numpy.ndarray): An iterable of labels.
Similar to :obj:`pred_bboxes`, its index corresponds to an
index for the base dataset. Its length is :math:`N`.
pred_scores (iterable of numpy.ndarray): An iterable of confidence
scores for predicted bounding boxes. Similar to :obj:`pred_bboxes`,
its index corresponds to an index for the base dataset.
Its length is :math:`N`.
gt_bboxes (iterable of numpy.ndarray): An iterable of ground truth
bounding boxes
whose length is :math:`N`. An element of :obj:`gt_bboxes` is a
bounding box whose shape is :math:`(R, 4)`. Note that the number of
bounding boxes in each image does not need to be same as the number
of corresponding predicted boxes.
gt_labels (iterable of numpy.ndarray): An iterable of ground truth
labels which are organized similarly to :obj:`gt_bboxes`.
gt_difficults (iterable of numpy.ndarray): An iterable of boolean
arrays which is organized similarly to :obj:`gt_bboxes`.
This tells whether the
corresponding ground truth bounding box is difficult or not.
By default, this is :obj:`None`. In that case, this function
considers all bounding boxes to be not difficult.
iou_thresh (float): A prediction is correct if its Intersection over
Union with the ground truth is above this value.
use_07_metric (bool): Whether to use PASCAL VOC 2007 evaluation metric
for calculating average precision. The default value is
:obj:`False`.
Returns:
dict:
The keys, value-types and the description of the values are listed
below.
* **ap** (*numpy.ndarray*): An array of average precisions. \
The :math:`l`-th value corresponds to the average precision \
for class :math:`l`. If class :math:`l` does not exist in \
either :obj:`pred_labels` or :obj:`gt_labels`, the corresponding \
value is set to :obj:`numpy.nan`.
* **map** (*float*): The average of Average Precisions over classes.
"""
prec, rec = calc_detection_voc_prec_rec( #函数算出每个label类的准确率和召回率,定义见下
pred_bboxes, pred_labels, pred_scores,
gt_bboxes, gt_labels, gt_difficults,
iou_thresh=iou_thresh)
ap = calc_detection_voc_ap(prec, rec, use_07_metric=use_07_metric) #根据prec和rec 计算ap和map
return {'ap': ap, 'map': np.nanmean(ap)}
def calc_detection_voc_prec_rec(
pred_bboxes, pred_labels, pred_scores, gt_bboxes, gt_labels,
gt_difficults=None,
iou_thresh=0.5):
"""Calculate precision and recall based on evaluation code of PASCAL VOC.
This function calculates precision and recall of
predicted bounding boxes obtained from a dataset which has :math:`N`
images.
The code is based on the evaluation code used in PASCAL VOC Challenge.
Args:
pred_bboxes (iterable of numpy.ndarray): An iterable of :math:`N`
sets of bounding boxes.
Its index corresponds to an index for the base dataset.
Each element of :obj:`pred_bboxes` is a set of coordinates
of bounding boxes. This is an array whose shape is :math:`(R, 4)`,
where :math:`R` corresponds
to the number of bounding boxes, which may vary among boxes.
The second axis corresponds to
:math:`y_{min}, x_{min}, y_{max}, x_{max}` of a bounding box.
pred_labels (iterable of numpy.ndarray): An iterable of labels.
Similar to :obj:`pred_bboxes`, its index corresponds to an
index for the base dataset. Its length is :math:`N`.
pred_scores (iterable of numpy.ndarray): An iterable of confidence
scores for predicted bounding boxes. Similar to :obj:`pred_bboxes`,
its index corresponds to an index for the base dataset.
Its length is :math:`N`.
gt_bboxes (iterable of numpy.ndarray): An iterable of ground truth
bounding boxes
whose length is :math:`N`. An element of :obj:`gt_bboxes` is a
bounding box whose shape is :math:`(R, 4)`. Note that the number of
bounding boxes in each image does not need to be same as the number
of corresponding predicted boxes.
gt_labels (iterable of numpy.ndarray): An iterable of ground truth
labels which are organized similarly to :obj:`gt_bboxes`.
gt_difficults (iterable of numpy.ndarray): An iterable of boolean
arrays which is organized similarly to :obj:`gt_bboxes`.
This tells whether the
corresponding ground truth bounding box is difficult or not.
By default, this is :obj:`None`. In that case, this function
considers all bounding boxes to be not difficult.
iou_thresh (float): A prediction is correct if its Intersection over
Union with the ground truth is above this value..
Returns:
tuple of two lists:
This function returns two lists: :obj:`prec` and :obj:`rec`.
* :obj:`prec`: A list of arrays. :obj:`prec[l]` is precision \
for class :math:`l`. If class :math:`l` does not exist in \
either :obj:`pred_labels` or :obj:`gt_labels`, :obj:`prec[l]` is \
set to :obj:`None`.
* :obj:`rec`: A list of arrays. :obj:`rec[l]` is recall \
for class :math:`l`. If class :math:`l` that is not marked as \
difficult does not exist in \
:obj:`gt_labels`, :obj:`rec[l]` is \
set to :obj:`None`.
"""
pred_bboxes = iter(pred_bboxes) #iter()生成迭代器 eg.lst = [1, 2, 3] for i in iter(lst): print(i) 1 2 3
pred_labels = iter(pred_labels)
pred_scores = iter(pred_scores)
gt_bboxes = iter(gt_bboxes)
gt_labels = iter(gt_labels)
if gt_difficults is None:
gt_difficults = itertools.repeat(None) #/itertools.repeat生成一个重复的迭代器 None是每次迭代获得的数据
else:
gt_difficults = iter(gt_difficults)
n_pos = defaultdict(int) #defaultdict():defaultdict的作用是在于,当字典里的key不存在但被查找时,返回的不是keyError而是一个默认值,
#dict[key]=default(int)=0 default(list)=[] default(dict)={}
score = defaultdict(list)
match = defaultdict(list)
for pred_bbox, pred_label, pred_score, gt_bbox, gt_label, gt_difficult in \
six.moves.zip(
pred_bboxes, pred_labels, pred_scores,
gt_bboxes, gt_labels, gt_difficults):
#six.move.zip()==zip():函数用于将可迭代的对象作为参数,将对象中||对应的||元素打包成一个个元组,然后返回由这些元组组成的列表。 *zip()解压
if gt_difficult is None:
gt_difficult = np.zeros(gt_bbox.shape[0], dtype=bool)#全部设置为非difficult
#遍历一张图片中 所有出现的label:
for l in np.unique(np.concatenate((pred_label, gt_label)).astype(int)):
#拼接后返回无重复的从小到大排序的一维numpy ,如[2,3,4,5,6] 并遍历这个一维数组,即遍历这张图片出现过的标签数字(gt_label+pred_label)
#对于其中的每一个标签l:
pred_mask_l = pred_label == l #广播(对矩阵中每个元素执行相同的操作)
#pred_mask_l=[eg. T,F,T,T,F,F,F,T..] 所有预测label中等于L的为T 否则F
pred_bbox_l = pred_bbox[pred_mask_l] #选出label=L的所有pre_box
pred_score_l = pred_score[pred_mask_l]#label=L 对应所有pre_score
# sort by score
order = pred_score_l.argsort()[::-1] #获得score降序排序 索引||argsort()返回的是索引
pred_bbox_l = pred_bbox_l[order] #按照分数从高到低 对box进行排序
pred_score_l = pred_score_l[order]#对score进行排序
gt_mask_l = gt_label == l #广播gt_mask_l =[eg. T,F,T,T,F,F,F,T..] 所有真实label中等于L的为T 否则F
gt_bbox_l = gt_bbox[gt_mask_l]#选出label=L的所有gt_box
gt_difficult_l = gt_difficult[gt_mask_l]#选出label=L的所有difficult
n_pos[l] += np.logical_not(gt_difficult_l).sum()#对T,F取反求和 即统计difficult=0的个数
score[l].extend(pred_score_l) #score={l:predscore_1,....} extend是针对list的方法
if len(pred_bbox_l) == 0: #没有预测的label=L的box 即真实label有L,我们全没有预测到
continue #跳过这张图片 此时没有对match字典操作 之前score[l].extend操作也为空 保持了match和score形状一致
if len(gt_bbox_l) == 0: #没有真实的label=L的box 即预测label有L,真实中没有 我们都预测错了
match[l].extend((0,) #pred_bbox_l.shape[0]) #match{L:[0,0,0,.. n_pred_box个0]}
continue #预测错label就是0 已不需要后续操作 跳过此图片
# VOC evaluation follows integer typed bounding boxes.
# 我不太懂这么做的目的,作者给的注释是follows integer typed bounding boxes
# 但是只改变了ymax,xmax的值,重要的是这样做并不能转化为整数 pred_bbox和gt_bbox只
# 参与了IOU计算且后面没有参与其他计算 有待解答。
pred_bbox_l = pred_bbox_l.copy()
pred_bbox_l[:, 2:] += 1 #ymax,xmax +=1
gt_bbox_l = gt_bbox_l.copy()
gt_bbox_l[:, 2:] += 1 #ymax,xmax +=1
iou = bbox_iou(pred_bbox_l, gt_bbox_l) #计算两个box的IOU
gt_index = iou.argmax(axis=1) #argmax(axis=1)按照a[0][1]中的a[1]方向,即行方向搜索最大值
#有len(pred_bbox_l)个索引 第i个索引值n表示 gt_box[n]与pred_box[i] IOU最大?
# set -1 if there is no matching ground truth
gt_index[iou.max(axis=1) < iou_thresh] = -1 #将gt_box与pred_box iou<thresh的索引值置为-1
#即针对每个pred_bbox,与每个gt_bbox IOU的最大值 如果最大值小于阀值则置为-1
#即我们预测的这个box效果并不理想 后续会将index=-1的 matchlabel=0
del iou
selec = np.zeros(gt_bbox_l.shape[0], dtype=bool)
for gt_idx in gt_index: #遍历gt_index索引值
if gt_idx >= 0: #即IOU满足条件
if gt_difficult_l[gt_idx]: #对应的gt_difficult =1 即困难标识
match[l].append(-1) #match[l]追加一个-1
else: #不是困难标识
if not selec[gt_idx]: #没有被选过 select[gt_idx]=0时
match[l].append(1) #match[l]追加一个1
else: #对应的gt_box已经被选择过一次 即已经和前面某pred_box IOU最大
match[l].append(0) #match[l]追加一个0
selec[gt_idx] = True #select[gt_idx]=1 置为1,表示已经被选过一次
else: #不满足IOU>thresh 效果并不理想
match[l].append(0) #match[l]追加一个0
#我们注意到 上面为每个pred_box都打了label 0,1,-1 len(match[l])=len(score[l])=len(pred_bbox_l)
for iter_ in ( #上面的 six.moves.zip遍历会在某一iter遍历到头后停止,由于pred_bboxes等是全局iter对象
#我们此时继续调用next取下一数据,如果有任一数据不为None,那么说明他们的len是不相等的 有悖常理,数据错误
pred_bboxes, pred_labels, pred_scores,
gt_bboxes, gt_labels, gt_difficults):
if next(iter_, None) is not None: #next(iter,None) 表示调用next 如果已经遍历到头 不抛出异常而是返回None
raise ValueError('Length of input iterables need to be same.')
#注意pr曲线与roc曲线的区别
n_fg_class = max(n_pos.keys()) + 1 #有n_fg_class个类
prec = [None] * n_fg_class #list[None,.....len(n_fg_class)]
rec = [None] * n_fg_class
for l in n_pos.keys(): #遍历所有Label
score_l = np.array(score[l]) #list to np.array
match_l = np.array(match[l], dtype=np.int8)
order = score_l.argsort()[::-1]
match_l = match_l[order] #对应match按照 score由大到小排序
tp = np.cumsum(match_l == 1) #统计累计 match_1=1的个数
fp = np.cumsum(match_l == 0) #统计累计 match_1=0的个数
#tp eg. [1 2 3 3 4 5 5 6 7 8 8 9 10 11........]
#fp eg. [0 0 0 0 0 0 0 1 1 1 1 1 1 2 ......]
# 如果 fp + tp = 0, 那么计算出的prec[l] = nan
# If an element of fp + tp is 0,
# the corresponding element of prec[l] is nan.
prec[l] = tp / (fp + tp) //计算准确率
# If n_pos[l] is 0, rec[l] is None.
if n_pos[l] > 0: #w/如果n_pos[l] = 0,那么rec[l] =Non
rec[l] = tp / n_pos[l] #/计算召回率
return prec, rec
def calc_detection_voc_ap(prec, rec, use_07_metric=False):
"""Calculate average precisions based on evaluation code of PASCAL VOC.
This function calculates average precisions
from given precisions and recalls.
The code is based on the evaluation code used in PASCAL VOC Challenge.
Args:
prec (list of numpy.array): A list of arrays.
:obj:`prec[l]` indicates precision for class :math:`l`.
If :obj:`prec[l]` is :obj:`None`, this function returns
:obj:`numpy.nan` for class :math:`l`.
rec (list of numpy.array): A list of arrays.
:obj:`rec[l]` indicates recall for class :math:`l`.
If :obj:`rec[l]` is :obj:`None`, this function returns
:obj:`numpy.nan` for class :math:`l`.
use_07_metric (bool): Whether to use PASCAL VOC 2007 evaluation metric
for calculating average precision. The default value is
:obj:`False`.
Returns:
~numpy.ndarray:
This function returns an array of average precisions.
The :math:`l`-th value corresponds to the average precision
for class :math:`l`. If :obj:`prec[l]` or :obj:`rec[l]` is
:obj:`None`, the corresponding value is set to :obj:`numpy.nan`.
"""
n_fg_class = len(prec)
ap = np.empty(n_fg_class)
for l in six.moves.range(n_fg_class): #遍历每个label
if prec[l] is None or rec[l] is None: #如果为None 则ap置为np.nan
ap[l] = np.nan
continue
if use_07_metric:
# 11 point metric
ap[l] = 0
for t in np.arange(0., 1.1, 0.1): # //t=0 0.1 0.2 ....1.0
if np.sum(rec[l] >= t) == 0:#这个标签的召回率没有>=t的
p = 0
else:
p = np.max(np.nan_to_num(prec[l])[rec[l] >= t])#p=(rec>=t时 对应index:prec中的最大值)
# P=(X>t时,X对应的Y:Y的最大值) np.nan_to_num 是为了让None=0 以便计算
ap[l] += p / 11
else:
# correct AP calculation
# first append sentinel values at the end
mpre = np.concatenate(([0], np.nan_to_num(prec[l]), [0])) #头尾填0
mrec = np.concatenate(([0], rec[l], [1])) #头填0 尾填1
mpre = np.maximum.accumulate(mpre[::-1])[::-1]#我们知道 我们是按score由高到低排序的 而且我们给box打了label
# 0,1,-1 score高时1的概率会大,所以pre是累计降序的
# 而rec是累积升序的,那么此时将pre倒序再maxuim.ac
# 获得累积最大值,再倒序后 从小到大排序的累积最大值
# to calculate area under PR curve, look for points
# where X axis (recall) changes value
i = np.where(mrec[1:] != mrec[:-1])[0] #差位比较,看哪里改变了recall的值 记录Index (x轴)
# and sum (\Delta recall) * prec
ap[l] = np.sum((mrec[i + 1] - mrec[i]) * mpre[i + 1]) #差值*mpre_max值 (x轴差值*y_max)
return ap
| 48.973373
| 106
| 0.604362
|
4a164e79f594e409266fc65a805a343d7352aa70
| 2,152
|
py
|
Python
|
setup.py
|
samrui/reelog
|
807a1caf0bacdf60366ca14e3dd84435acc3491e
|
[
"Apache-2.0"
] | null | null | null |
setup.py
|
samrui/reelog
|
807a1caf0bacdf60366ca14e3dd84435acc3491e
|
[
"Apache-2.0"
] | null | null | null |
setup.py
|
samrui/reelog
|
807a1caf0bacdf60366ca14e3dd84435acc3491e
|
[
"Apache-2.0"
] | null | null | null |
# encoding: utf-8
import os
import io
import sys
from setuptools import setup, find_packages, Command
from shutil import rmtree
NAME = 'reelog'
DESCRIPTION = 'python log best practice.'
URL = ''
EMAIL = 'samrui0129@gmail.com'
AUTHOR = 'Sam Rui'
REQUIRES_PYTHON = '>=2.7.0'
VERSION = '1.6.7'
REQUIRED = [
]
EXTRAS = {
}
here = os.path.abspath(os.path.dirname(__file__))
try:
with io.open(os.path.join(here, 'README.rst'), encoding='utf-8') as f:
long_description = '\n' + f.read()
except FileNotFoundError:
long_description = DESCRIPTION
class UploadCommand(Command):
"""Support setup.py upload."""
description = 'Build and publish the package.'
user_options = []
def initialize_options(self):
pass
def finalize_options(self):
pass
def run(self):
try:
print('Removing previous builds…')
rmtree(os.path.join(here, 'dist'))
except OSError:
pass
print('Building Source and Wheel (universal) distribution…')
os.system('{0} setup.py sdist bdist_wheel --universal'.format(sys.executable))
print('Uploading the package to PyPI via Twine…')
os.system('twine upload dist/*')
# print('Pushing git tags…')
# os.system('git tag v{0}'.format(VERSION))
# os.system('git push --tags')
sys.exit()
setup(
name=NAME,
version=VERSION,
description=DESCRIPTION,
long_description=long_description,
long_description_content_type='text/x-rst',
author=AUTHOR,
author_email=EMAIL,
python_requires=REQUIRES_PYTHON,
url=URL,
packages=find_packages(exclude=('tests',)),
install_requires=REQUIRED,
extras_require=EXTRAS,
include_package_data=True,
zip_safe=False,
license='Apache License',
classifiers=[
'License :: OSI Approved :: Apache Software License',
'Programming Language :: Python',
'Programming Language :: Python :: 2',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: Implementation :: CPython',
],
cmdclass={
'upload': UploadCommand,
}
)
| 23.911111
| 86
| 0.63987
|
4a164ec7c129376f5fb83e072b0dc5a660562daa
| 167
|
py
|
Python
|
general-practice/Exercises solved/w3resource/basic/Exercise52.py
|
lugabrielbueno/Projeto
|
f012c5bb9ce6f6d7c9e8196cc7986127dba3eba0
|
[
"MIT"
] | null | null | null |
general-practice/Exercises solved/w3resource/basic/Exercise52.py
|
lugabrielbueno/Projeto
|
f012c5bb9ce6f6d7c9e8196cc7986127dba3eba0
|
[
"MIT"
] | null | null | null |
general-practice/Exercises solved/w3resource/basic/Exercise52.py
|
lugabrielbueno/Projeto
|
f012c5bb9ce6f6d7c9e8196cc7986127dba3eba0
|
[
"MIT"
] | null | null | null |
#Write a Python program to print to stderr
import sys
def eprint(*args, **kwargs):
print(*args, file=sys.stderr, **kwargs)
eprint("abc", "efg", "xyz", sep="--")
| 20.875
| 43
| 0.646707
|
4a164ecc7631250db45f87cf9aacc31fa97b0fe8
| 8,442
|
py
|
Python
|
lib/python2.7/site-packages/pyscope/falconframe.py
|
leschzinerlab/myami-3.2-freeHand
|
974b8a48245222de0d9cfb0f433533487ecce60d
|
[
"MIT"
] | null | null | null |
lib/python2.7/site-packages/pyscope/falconframe.py
|
leschzinerlab/myami-3.2-freeHand
|
974b8a48245222de0d9cfb0f433533487ecce60d
|
[
"MIT"
] | null | null | null |
lib/python2.7/site-packages/pyscope/falconframe.py
|
leschzinerlab/myami-3.2-freeHand
|
974b8a48245222de0d9cfb0f433533487ecce60d
|
[
"MIT"
] | 1
|
2019-09-05T20:58:37.000Z
|
2019-09-05T20:58:37.000Z
|
#!/usr/bin/env python
import math
import sys
import os
import time
import xml.dom.minidom
import xml.etree.ElementTree as et
import itertools
class FalconFrameConfigXmlMaker(object):
'''
Create Falcon IntermediateConfig.xml for given exposure time
either when frames to be saved and not.
The raw frames are put in limited number of bins.
'''
def __init__(self,simu=False):
'''
i/o time unit is second
'''
self.simulation = simu
self.idcounter = itertools.cycle(range(100))
self.base_frame_time = 0.055771
self.format_version = 1.0
self.no_save_frame_path = 'E:\\not_a_real_path'
self.base_frame_path = 'E:\\frames'
self.configxml_path = 'C:\Titan\Data\Falcon'
# Falcon 2 software can save frames in at most 7 bins
self.output_bin_limit = 7
self.output_bins = self.output_bin_limit + 0
self.equal_distributed_frame = 0
self.resetParams()
def resetParams(self):
self.frame_path = self.no_save_frame_path
self.frames_name = ''
# Readout delay of one means frame 0 (shutter roll-in) is not readout
self.internal_readout_delay = 1
self.frame_readout_delay = 1
self.equally_distributed_frame = 0
def getBaseFrameTime(self):
return self.base_frame_time
def getNumberOfBinLimit(self):
return self.output_bin_limit
def makeFrameDirName(self,use_timestamp):
if use_timestamp:
frames_name = time.strftime('%Y%m%d_%H%M%S', time.localtime())
self.frames_name = frames_name + '%02d' % (self.idcounter.next(),)
else:
self.frames_name = 'dummy'
return self.frames_name
def getFrameDirName(self):
return self.frames_name
def createFramePath(self,base_path):
if os.path.isdir(base_path) or self.simulation:
# real path below existing base_path becomes the frame path
self.frame_path = os.path.join(base_path,self.makeFrameDirName(True))
if not self.simulation:
os.makedirs(self.frame_path)
else:
# dummy path will keep the frames from being written
self.makeFrameDirName(False)
self.frame_path = base_path
def getFramePath(self):
return self.frame_path
def validateExposureTime(self,second):
'''
Exposure time in second is valid only if it allows one frame to be
saved after readout delay.
'''
if second < self.base_frame_time * (self.frame_readout_delay - 0.5):
return False
return True
def setExposureTime(self,second):
'''
Set Exposure Time in second. The time must be validated first.
'''
self.exposure_time = second
def setMaxNumberOfFrameBins(self,nframes):
'''
Set maximal bins to output frames
'''
self.output_bins = min(self.output_bin_limit,nframes)
def setEquallyDistributedStartFrame(self,frame_number):
'''
Set start of equal distribution to output frames
'''
self.equally_distributed_frame = frame_number
def getNumberOfFrameBins(self):
'''
Get number of frame bins saved without running self.distributeFramesInBins()
Exposure time must be set first.
'''
available_nframes = self.getNumberOfAvailableFrames()
usable_nframes = available_nframes -self.frame_readout_delay
return min(self.output_bins,max(usable_nframes,1))
def setFrameReadoutDelay(self,value=1):
self.frame_readout_delay = value
def getFrameReadoutDelay(self):
return self.frame_readout_delay
def getNumberOfAvailableFrames(self):
'''
Available frames include the roll-in frame since exposure time does not include that.
'''
half_frame_time = self.base_frame_time / 2.0
return int((self.exposure_time + half_frame_time) / self.base_frame_time) + self.internal_readout_delay
def distributeFramesInBins(self):
'''
return list of number of frames in each output bin.
'''
eframe = max((self.equally_distributed_frame - self.frame_readout_delay,0))
available_nframes = self.getNumberOfAvailableFrames()
# usable number of frames does not include those not read for output
usable_nframes = available_nframes -self.frame_readout_delay
# initialize with single frames
frames_in_bins = map((lambda x:1),range(eframe))
# equally distribute the rest
usable_equal_frames = usable_nframes - len(frames_in_bins)
equal_bins = self.output_bins - len(frames_in_bins)
frames_in_bins.extend(self._distributeItemsInBins(usable_equal_frames, equal_bins))
return frames_in_bins
def _distributeItemsInBins(self,n_items,bins):
'''
Distribute items in bins as evenly as possible.
Fill spare items from the end
'''
# keep bin as equally occupied as possible
equal_step = n_items / bins
spare_frames = n_items - equal_step * bins
n_items_in_bins = []
if equal_step > 0:
n_items_in_bins = bins * [equal_step,]
# spare items are filled-in from the last bins
for i in range(1,spare_frames+1):
n_items_in_bins[-1*i] += 1
else:
# not enough items to fill all bins
n_items_in_bins = n_items * [1,]
return n_items_in_bins
def setFrameRange(self):
'''
frame number range to output. Frame 0 is the first frame with
shutter roll-in.
'''
nframe_in_bins = self.distributeFramesInBins()
if self.simulation:
print nframe_in_bins
end_frames = []
offset = self.frame_readout_delay - self.internal_readout_delay
if False:
return [0],[0]
else:
end_frames = map((lambda x:offset+sum(nframe_in_bins[:x+1])),range(len(nframe_in_bins)))
start_frames = map((lambda x:end_frames[x]+1),range(len(end_frames)-1))
start_frames.insert(0,self.frame_readout_delay)
self.output_bins = len(start_frames)
return start_frames, end_frames
def writeConfigXml(self,start_frames,end_frames):
rt = et.Element('IntermediateConfig')
fv = et.SubElement(rt,'FormatVersion')
fv.text = '%.1f' % self.format_version
sp = et.SubElement(rt,'StoragePath')
sp.text = self.frame_path
ifb = {}
for i in range(len(start_frames)):
ifb[i] = et.SubElement(rt,'InterFrameBoundary')
start = self.base_frame_time * (start_frames[i]+0.5)
end = self.base_frame_time * (end_frames[i]+0.5)
ifb[i].text = '%.3f - %.3f' % (start, end)
# pretty print
roughstr = et.tostring(rt,'utf-8')
reparsed = xml.dom.minidom.parseString(roughstr)
xmlstr = reparsed.toprettyxml(indent="\t",newl='\n',encoding="utf-8")
if not self.simulation:
f = open(os.path.join(self.configxml_path,'IntermediateConfig.xml'),'w')
f.write(xmlstr)
f.close()
else:
print xmlstr
def makeConfigXML(self):
start_frames,end_frames = self.setFrameRange()
self.writeConfigXml(start_frames,end_frames)
def makeRealConfigFromExposureTime(self,second,equal_distr_frame=0,delay=None):
'''
Make Useful Frame saving config.xml.
Minimal is 2 base_frame_time
'''
self.resetParams()
self.setMaxNumberOfFrameBins(7)
self.setEquallyDistributedStartFrame(equal_distr_frame)
if delay is not None:
self.setFrameReadoutDelay(delay)
status = self.validateExposureTime(second)
if status is False:
return False
self.setExposureTime(second)
self.createFramePath(self.base_frame_path)
self.makeConfigXML()
return True
def makeDummyConfig(self,second):
'''
Non-frame saving can be achieved by saving
the frames to a directory under a non-existing
directory
'''
self.resetParams()
self.setMaxNumberOfFrameBins(1)
self.setFrameReadoutDelay(1)
status = self.validateExposureTime(second)
if status is False:
return status
self.setExposureTime(self.base_frame_time)
# self.no_save_frame_path needs to be on not existing
# since FalconIntermediateImage program is capable of
# making a directory below existing ones
self.createFramePath(os.path.join(self.no_save_frame_path,'dummy'))
self.makeConfigXML()
return True
if __name__ == '__main__':
equal_distr_frame = 0
if len(sys.argv) < 2:
print 'usage: falconframe.py exposure_time_in_second equal_distr_frame delay_number_frames'
print 'default to 0.5 second'
print 'default equal_distr_frame is the first non-single frame'
print ' including which will be equally distributed'
print 'delay_number_frames default is 1'
exposure_second = 0.5
delay = 1
else:
exposure_second = float(sys.argv[1])
if len(sys.argv) >= 3:
equal_distr_frame = int(sys.argv[2])
else:
equal_distr_frame = 0
if len(sys.argv) == 4:
delay = int(sys.argv[3])
else:
delay = 1
app = FalconFrameConfigXmlMaker(True)
#is_success = app.makeDummyConfig(exposure_second)
is_success = app.makeRealConfigFromExposureTime(exposure_second, equal_distr_frame,delay)
print is_success
print app.getFrameDirName()
| 31.617978
| 105
| 0.74461
|
4a164ecc9bf3379193920e2e5114bc71f30596c9
| 8,110
|
py
|
Python
|
DexiNed Files/imageSVHN.py
|
cschwa11/Data502FP
|
5255118eeca0e482474ae1119057b60ac013d61c
|
[
"MIT"
] | null | null | null |
DexiNed Files/imageSVHN.py
|
cschwa11/Data502FP
|
5255118eeca0e482474ae1119057b60ac013d61c
|
[
"MIT"
] | null | null | null |
DexiNed Files/imageSVHN.py
|
cschwa11/Data502FP
|
5255118eeca0e482474ae1119057b60ac013d61c
|
[
"MIT"
] | 2
|
2021-11-23T16:59:08.000Z
|
2021-11-23T17:01:43.000Z
|
import os
import cv2
import numpy as np
import torch
import kornia as kn
def image_normalization(img, img_min=0, img_max=255,
epsilon=1e-12):
"""This is a typical image normalization function
where the minimum and maximum of the image is needed
source: https://en.wikipedia.org/wiki/Normalization_(image_processing)
:param img: an image could be gray scale or color
:param img_min: for default is 0
:param img_max: for default is 255
:return: a normalized image, if max is 255 the dtype is uint8
"""
img = np.float32(img)
# whenever an inconsistent image
img = (img - np.min(img)) * (img_max - img_min) / \
((np.max(img) - np.min(img)) + epsilon) + img_min
return img
def count_parameters(model=None):
if model is not None:
return sum(p.numel() for p in model.parameters() if p.requires_grad)
else:
print("Error counting model parameters line 32 img_processing.py")
raise NotImplementedError
def save_image_batch_to_disk(tensor, output_dir, file_names, img_shape=None, arg=None, is_inchannel=False):
os.makedirs(output_dir, exist_ok=True)
if not arg.is_testing:
assert len(tensor.shape) == 4, tensor.shape
img_shape = np.array(img_shape)
for tensor_image, file_name in zip(tensor, file_names):
image_vis = kn.utils.tensor_to_image(
torch.sigmoid(tensor_image))#[..., 0]
image_vis = (255.0*(1.0 - image_vis)).astype(np.uint8)
output_file_name = os.path.join(output_dir, file_name)
image_vis =cv2.resize(image_vis, dsize=(img_shape[1], img_shape[0]))
assert cv2.imwrite(output_file_name, image_vis)
else:
if is_inchannel:
tensor, tensor2 = tensor
fuse_name = 'fusedCH'
av_name='avgCH'
is_2tensors=True
edge_maps2 = []
for i in tensor2:
tmp = torch.sigmoid(i).cpu().detach().numpy()
edge_maps2.append(tmp)
tensor2 = np.array(edge_maps2)
else:
fuse_name = 'fused'
av_name = 'avg'
tensor2=None
tmp_img2 = None
output_dir_f = os.path.join(output_dir, fuse_name)
output_dir_a = os.path.join(output_dir, av_name)
os.makedirs(output_dir_f, exist_ok=True)
os.makedirs(output_dir_a, exist_ok=True)
# 255.0 * (1.0 - em_a)
edge_maps = []
for i in tensor:
tmp = torch.sigmoid(i).cpu().detach().numpy()
edge_maps.append(tmp)
tensor = np.array(edge_maps)
# print(f"tensor shape: {tensor.shape}")
image_shape = [x.cpu().detach().numpy() for x in img_shape]
# (H, W) -> (W, H)
image_shape = [[y, x] for x, y in zip(image_shape[1], image_shape[2])]
assert len(image_shape) == len(file_names)
idx = 0
for i_shape, file_name in zip(image_shape, file_names):
tmp = tensor[:, idx, ...]
tmp2 = tensor2[:, idx, ...] if tensor2 is not None else None
# tmp = np.transpose(np.squeeze(tmp), [0, 1, 2])
tmp = np.squeeze(tmp)
tmp2 = np.squeeze(tmp2) if tensor2 is not None else None
# Iterate our all 7 NN outputs for a particular image
preds = []
for i in range(tmp.shape[0]):
tmp_img = tmp[i]
tmp_img = np.uint8(image_normalization(tmp_img))
tmp_img = cv2.bitwise_not(tmp_img)
# tmp_img[tmp_img < 0.0] = 0.0
# tmp_img = 255.0 * (1.0 - tmp_img)
if tmp2 is not None:
tmp_img2 = tmp2[i]
tmp_img2 = np.uint8(image_normalization(tmp_img2))
tmp_img2 = cv2.bitwise_not(tmp_img2)
# Resize prediction to match input image size
if not tmp_img.shape[1] == i_shape[0] or not tmp_img.shape[0] == i_shape[1]:
tmp_img = cv2.resize(tmp_img, (i_shape[0], i_shape[1]))
tmp_img2 = cv2.resize(tmp_img2, (i_shape[0], i_shape[1])) if tmp2 is not None else None
if tmp2 is not None:
tmp_mask = np.logical_and(tmp_img>128,tmp_img2<128)
tmp_img= np.where(tmp_mask, tmp_img2, tmp_img)
preds.append(tmp_img)
else:
preds.append(tmp_img)
if i == 6:
fuse = tmp_img
fuse = fuse.astype(np.uint8)
if tmp_img2 is not None:
fuse2 = tmp_img2
fuse2 = fuse2.astype(np.uint8)
# fuse = fuse-fuse2
fuse_mask=np.logical_and(fuse>128,fuse2<128)
fuse = np.where(fuse_mask,fuse2, fuse)
# print(fuse.shape, fuse_mask.shape)
# Get the mean prediction of all the 7 outputs
average = np.array(preds, dtype=np.float32)
average = np.uint8(np.mean(average, axis=0))
output_file_name_f = os.path.join('/content/SVHN_Edge/f/test/', file_name)
output_file_name_a = os.path.join('/content/SVHN_Edge/a/test/', file_name)
cv2.imwrite(output_file_name_f, fuse)
cv2.imwrite(output_file_name_a, average)
idx += 1
def restore_rgb(config, I, restore_rgb=False):
"""
:param config: [args.channel_swap, args.mean_pixel_value]
:param I: and image or a set of images
:return: an image or a set of images restored
"""
if len(I) > 3 and not type(I) == np.ndarray:
I = np.array(I)
I = I[:, :, :, 0:3]
n = I.shape[0]
for i in range(n):
x = I[i, ...]
x = np.array(x, dtype=np.float32)
x += config[1]
if restore_rgb:
x = x[:, :, config[0]]
x = image_normalization(x)
I[i, :, :, :] = x
elif len(I.shape) == 3 and I.shape[-1] == 3:
I = np.array(I, dtype=np.float32)
I += config[1]
if restore_rgb:
I = I[:, :, config[0]]
I = image_normalization(I)
else:
print("Sorry the input data size is out of our configuration")
return I
def visualize_result(imgs_list, arg):
"""
data 2 image in one matrix
:param imgs_list: a list of prediction, gt and input data
:param arg:
:return: one image with the whole of imgs_list data
"""
n_imgs = len(imgs_list)
data_list = []
for i in range(n_imgs):
tmp = imgs_list[i]
# print(tmp.shape)
if tmp.shape[0] == 3:
tmp = np.transpose(tmp, [1, 2, 0])
tmp = restore_rgb([
arg.channel_swap,
arg.mean_pixel_values[:3]
], tmp)
tmp = np.uint8(image_normalization(tmp))
else:
tmp = np.squeeze(tmp)
if len(tmp.shape) == 2:
tmp = np.uint8(image_normalization(tmp))
tmp = cv2.bitwise_not(tmp)
tmp = cv2.cvtColor(tmp, cv2.COLOR_GRAY2BGR)
else:
tmp = np.uint8(image_normalization(tmp))
data_list.append(tmp)
# print(i,tmp.shape)
img = data_list[0]
if n_imgs % 2 == 0:
imgs = np.zeros((img.shape[0] * 2 + 10, img.shape[1]
* (n_imgs // 2) + ((n_imgs // 2 - 1) * 5), 3))
else:
imgs = np.zeros((img.shape[0] * 2 + 10, img.shape[1]
* ((1 + n_imgs) // 2) + ((n_imgs // 2) * 5), 3))
n_imgs += 1
k = 0
imgs = np.uint8(imgs)
i_step = img.shape[0] + 10
j_step = img.shape[1] + 5
for i in range(2):
for j in range(n_imgs // 2):
if k < len(data_list):
imgs[i * i_step:i * i_step+img.shape[0],
j * j_step:j * j_step+img.shape[1],
:] = data_list[k]
k += 1
else:
pass
return imgs
| 36.367713
| 107
| 0.535018
|
4a164fabbbf2d98362f99c47750ec364d7c292ba
| 630
|
py
|
Python
|
pfm/pf_command/base.py
|
takahi-i/pfm
|
224ca961ca43f50bd877789e2d8659ae838d517f
|
[
"MIT"
] | 9
|
2018-01-06T05:44:43.000Z
|
2020-06-24T00:15:16.000Z
|
pfm/pf_command/base.py
|
takahi-i/pfm
|
224ca961ca43f50bd877789e2d8659ae838d517f
|
[
"MIT"
] | 27
|
2018-01-06T09:29:48.000Z
|
2020-04-10T16:11:59.000Z
|
pfm/pf_command/base.py
|
takahi-i/pfm
|
224ca961ca43f50bd877789e2d8659ae838d517f
|
[
"MIT"
] | 1
|
2018-01-09T01:33:42.000Z
|
2018-01-09T01:33:42.000Z
|
from abc import abstractmethod
import os
from pfm.util.log import logger
class BaseCommand(object):
def __init__(self, config_path):
self.config_path = config_path
if self.config_path is None:
logger.info('config_path set to None...')
logger.info('Skip creating setting file...')
return
if not os.path.exists(self.config_path):
logger.info('Creating setting file of pfm in ' + self.config_path + '...')
f = open(self.config_path, 'w')
f.write('{}')
f.close()
@abstractmethod
def run(self):
pass
| 24.230769
| 86
| 0.588889
|
4a164fe62f91734e448d14c0eabf53a79897b3a8
| 12,606
|
py
|
Python
|
intersight/model/adapter_ext_eth_interface_all_of.py
|
CiscoDevNet/intersight-python
|
04b721f37c3044646a91c185c7259edfb991557a
|
[
"Apache-2.0"
] | 5
|
2021-12-16T15:13:32.000Z
|
2022-03-29T16:09:54.000Z
|
intersight/model/adapter_ext_eth_interface_all_of.py
|
CiscoDevNet/intersight-python
|
04b721f37c3044646a91c185c7259edfb991557a
|
[
"Apache-2.0"
] | 4
|
2022-01-25T19:05:51.000Z
|
2022-03-29T20:18:37.000Z
|
intersight/model/adapter_ext_eth_interface_all_of.py
|
CiscoDevNet/intersight-python
|
04b721f37c3044646a91c185c7259edfb991557a
|
[
"Apache-2.0"
] | 2
|
2020-07-07T15:01:08.000Z
|
2022-01-31T04:27:35.000Z
|
"""
Cisco Intersight
Cisco Intersight is a management platform delivered as a service with embedded analytics for your Cisco and 3rd party IT infrastructure. This platform offers an intelligent level of management that enables IT organizations to analyze, simplify, and automate their environments in more advanced ways than the prior generations of tools. Cisco Intersight provides an integrated and intuitive management experience for resources in the traditional data center as well as at the edge. With flexible deployment options to address complex security needs, getting started with Intersight is quick and easy. Cisco Intersight has deep integration with Cisco UCS and HyperFlex systems allowing for remote deployment, configuration, and ongoing maintenance. The model-based deployment works for a single system in a remote location or hundreds of systems in a data center and enables rapid, standardized configuration and deployment. It also streamlines maintaining those systems whether you are working with small or very large configurations. The Intersight OpenAPI document defines the complete set of properties that are returned in the HTTP response. From that perspective, a client can expect that no additional properties are returned, unless these properties are explicitly defined in the OpenAPI document. However, when a client uses an older version of the Intersight OpenAPI document, the server may send additional properties because the software is more recent than the client. In that case, the client may receive properties that it does not know about. Some generated SDKs perform a strict validation of the HTTP response body against the OpenAPI document. # noqa: E501
The version of the OpenAPI document: 1.0.9-4950
Contact: intersight@cisco.com
Generated by: https://openapi-generator.tech
"""
import re # noqa: F401
import sys # noqa: F401
from intersight.model_utils import ( # noqa: F401
ApiTypeError,
ModelComposed,
ModelNormal,
ModelSimple,
cached_property,
change_keys_js_to_python,
convert_js_args_to_python_args,
date,
datetime,
file_type,
none_type,
validate_get_composed_info,
)
def lazy_import():
from intersight.model.adapter_unit_relationship import AdapterUnitRelationship
from intersight.model.asset_device_registration_relationship import AssetDeviceRegistrationRelationship
from intersight.model.inventory_device_info_relationship import InventoryDeviceInfoRelationship
globals()['AdapterUnitRelationship'] = AdapterUnitRelationship
globals()['AssetDeviceRegistrationRelationship'] = AssetDeviceRegistrationRelationship
globals()['InventoryDeviceInfoRelationship'] = InventoryDeviceInfoRelationship
class AdapterExtEthInterfaceAllOf(ModelNormal):
"""NOTE: This class is auto generated by OpenAPI Generator.
Ref: https://openapi-generator.tech
Do not edit the class manually.
Attributes:
allowed_values (dict): The key is the tuple path to the attribute
and the for var_name this is (var_name,). The value is a dict
with a capitalized key describing the allowed value and an allowed
value. These dicts store the allowed enum values.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
discriminator_value_class_map (dict): A dict to go from the discriminator
variable value to the discriminator class name.
validations (dict): The key is the tuple path to the attribute
and the for var_name this is (var_name,). The value is a dict
that stores validations for max_length, min_length, max_items,
min_items, exclusive_maximum, inclusive_maximum, exclusive_minimum,
inclusive_minimum, and regex.
additional_properties_type (tuple): A tuple of classes accepted
as additional properties values.
"""
allowed_values = {
('class_id',): {
'ADAPTER.EXTETHINTERFACE': "adapter.ExtEthInterface",
},
('object_type',): {
'ADAPTER.EXTETHINTERFACE': "adapter.ExtEthInterface",
},
}
validations = {
}
additional_properties_type = None
_nullable = False
@cached_property
def openapi_types():
"""
This must be a method because a model may have properties that are
of type self, this must run after the class is loaded
Returns
openapi_types (dict): The key is attribute name
and the value is attribute type.
"""
lazy_import()
return {
'class_id': (str,), # noqa: E501
'object_type': (str,), # noqa: E501
'admin_state': (str,), # noqa: E501
'ep_dn': (str,), # noqa: E501
'ext_eth_interface_id': (str,), # noqa: E501
'interface_type': (str,), # noqa: E501
'mac_address': (str,), # noqa: E501
'peer_aggr_port_id': (int,), # noqa: E501
'peer_dn': (str,), # noqa: E501
'peer_port_id': (int,), # noqa: E501
'peer_slot_id': (int,), # noqa: E501
'switch_id': (str,), # noqa: E501
'adapter_unit': (AdapterUnitRelationship,), # noqa: E501
'inventory_device_info': (InventoryDeviceInfoRelationship,), # noqa: E501
'registered_device': (AssetDeviceRegistrationRelationship,), # noqa: E501
}
@cached_property
def discriminator():
return None
attribute_map = {
'class_id': 'ClassId', # noqa: E501
'object_type': 'ObjectType', # noqa: E501
'admin_state': 'AdminState', # noqa: E501
'ep_dn': 'EpDn', # noqa: E501
'ext_eth_interface_id': 'ExtEthInterfaceId', # noqa: E501
'interface_type': 'InterfaceType', # noqa: E501
'mac_address': 'MacAddress', # noqa: E501
'peer_aggr_port_id': 'PeerAggrPortId', # noqa: E501
'peer_dn': 'PeerDn', # noqa: E501
'peer_port_id': 'PeerPortId', # noqa: E501
'peer_slot_id': 'PeerSlotId', # noqa: E501
'switch_id': 'SwitchId', # noqa: E501
'adapter_unit': 'AdapterUnit', # noqa: E501
'inventory_device_info': 'InventoryDeviceInfo', # noqa: E501
'registered_device': 'RegisteredDevice', # noqa: E501
}
_composed_schemas = {}
required_properties = set([
'_data_store',
'_check_type',
'_spec_property_naming',
'_path_to_item',
'_configuration',
'_visited_composed_classes',
])
@convert_js_args_to_python_args
def __init__(self, *args, **kwargs): # noqa: E501
"""AdapterExtEthInterfaceAllOf - a model defined in OpenAPI
Args:
Keyword Args:
class_id (str): The fully-qualified name of the instantiated, concrete type. This property is used as a discriminator to identify the type of the payload when marshaling and unmarshaling data.. defaults to "adapter.ExtEthInterface", must be one of ["adapter.ExtEthInterface", ] # noqa: E501
object_type (str): The fully-qualified name of the instantiated, concrete type. The value should be the same as the 'ClassId' property.. defaults to "adapter.ExtEthInterface", must be one of ["adapter.ExtEthInterface", ] # noqa: E501
_check_type (bool): if True, values for parameters in openapi_types
will be type checked and a TypeError will be
raised if the wrong type is input.
Defaults to True
_path_to_item (tuple/list): This is a list of keys or values to
drill down to the model in received_data
when deserializing a response
_spec_property_naming (bool): True if the variable names in the input data
are serialized names, as specified in the OpenAPI document.
False if the variable names in the input data
are pythonic names, e.g. snake case (default)
_configuration (Configuration): the instance to use when
deserializing a file_type parameter.
If passed, type conversion is attempted
If omitted no type conversion is done.
_visited_composed_classes (tuple): This stores a tuple of
classes that we have traveled through so that
if we see that class again we will not use its
discriminator again.
When traveling through a discriminator, the
composed schema that is
is traveled through is added to this set.
For example if Animal has a discriminator
petType and we pass in "Dog", and the class Dog
allOf includes Animal, we move through Animal
once using the discriminator, and pick Dog.
Then in Dog, we will make an instance of the
Animal class but this time we won't travel
through its discriminator because we passed in
_visited_composed_classes = (Animal,)
admin_state (str): Admin configured state of an External Ethernet Interface.. [optional] # noqa: E501
ep_dn (str): Endpoint Config DN of an External Ethernet Interface.. [optional] # noqa: E501
ext_eth_interface_id (str): Unique Identifier for an External Ethernet Interface within the adapter object.. [optional] # noqa: E501
interface_type (str): Type of an External Ethernet Interface.. [optional] # noqa: E501
mac_address (str): MAC address of an External Ethernet Interface.. [optional] # noqa: E501
peer_aggr_port_id (int): Peer Aggregate Port Id attached to an External Ethernet Interface.. [optional] # noqa: E501
peer_dn (str): DN of peer end-point attached to an External Ethernet Interface.. [optional] # noqa: E501
peer_port_id (int): Peer Port Id attached to an External Ethernet Interface.. [optional] # noqa: E501
peer_slot_id (int): Peer Slot Id attached to an External Ethernet Interface.. [optional] # noqa: E501
switch_id (str): SwitchId attached to an External Ethernet Interface.. [optional] # noqa: E501
adapter_unit (AdapterUnitRelationship): [optional] # noqa: E501
inventory_device_info (InventoryDeviceInfoRelationship): [optional] # noqa: E501
registered_device (AssetDeviceRegistrationRelationship): [optional] # noqa: E501
"""
class_id = kwargs.get('class_id', "adapter.ExtEthInterface")
object_type = kwargs.get('object_type', "adapter.ExtEthInterface")
_check_type = kwargs.pop('_check_type', True)
_spec_property_naming = kwargs.pop('_spec_property_naming', False)
_path_to_item = kwargs.pop('_path_to_item', ())
_configuration = kwargs.pop('_configuration', None)
_visited_composed_classes = kwargs.pop('_visited_composed_classes', ())
if args:
raise ApiTypeError(
"Invalid positional arguments=%s passed to %s. Remove those invalid positional arguments." % (
args,
self.__class__.__name__,
),
path_to_item=_path_to_item,
valid_classes=(self.__class__,),
)
self._data_store = {}
self._check_type = _check_type
self._spec_property_naming = _spec_property_naming
self._path_to_item = _path_to_item
self._configuration = _configuration
self._visited_composed_classes = _visited_composed_classes + (self.__class__,)
self.class_id = class_id
self.object_type = object_type
for var_name, var_value in kwargs.items():
if var_name not in self.attribute_map and \
self._configuration is not None and \
self._configuration.discard_unknown_keys and \
self.additional_properties_type is None:
# discard variable.
continue
setattr(self, var_name, var_value)
| 54.571429
| 1,678
| 0.644138
|
4a1650294cc39e69e2c3a591f942fef999e8736c
| 391
|
py
|
Python
|
pyobs/interfaces/ISyncTarget.py
|
pyobs/pyobs-core
|
e3401e63eb31587c2bc535f7346b7e4ef69d64ab
|
[
"MIT"
] | 4
|
2020-02-14T10:50:03.000Z
|
2022-03-25T04:15:06.000Z
|
pyobs/interfaces/ISyncTarget.py
|
pyobs/pyobs-core
|
e3401e63eb31587c2bc535f7346b7e4ef69d64ab
|
[
"MIT"
] | 60
|
2020-09-14T09:10:20.000Z
|
2022-03-25T17:51:42.000Z
|
pyobs/interfaces/ISyncTarget.py
|
pyobs/pyobs-core
|
e3401e63eb31587c2bc535f7346b7e4ef69d64ab
|
[
"MIT"
] | 2
|
2020-10-14T09:34:57.000Z
|
2021-04-27T09:35:57.000Z
|
from .interface import *
class ISyncTarget(Interface):
"""The module can synchronize a target, e.g. via a telescope control software behinde an
:class:`~pyobs.interfaces.ITelescope`."""
__module__ = 'pyobs.interfaces'
def sync_target(self, *args, **kwargs):
"""Synchronize device on current target."""
raise NotImplementedError
__all__ = ['ISyncTarget']
| 26.066667
| 92
| 0.690537
|
4a1650ec73309cef25c00fa227626ba4a042de5f
| 316
|
py
|
Python
|
tests/utils.py
|
acutexyz/monkeyideas
|
062ffa16f3bdf1fa57338837b50bfddfebdecdac
|
[
"MIT"
] | null | null | null |
tests/utils.py
|
acutexyz/monkeyideas
|
062ffa16f3bdf1fa57338837b50bfddfebdecdac
|
[
"MIT"
] | null | null | null |
tests/utils.py
|
acutexyz/monkeyideas
|
062ffa16f3bdf1fa57338837b50bfddfebdecdac
|
[
"MIT"
] | null | null | null |
from app.models import Idea
def create_idea_by_monkey(session, monkey):
"""Creates and returns new idea authored by given monkey
"""
idea = Idea(
title='-',
body='-',
author_id=monkey.id,
is_public=True
)
session.add(idea)
session.commit()
return idea
| 21.066667
| 60
| 0.598101
|
4a165206245bb09480355c46293bd577294b422a
| 3,149
|
py
|
Python
|
src/annalist_root/utils/StdoutContext.py
|
gklyne/annalist
|
82e7ef2d56a400325e7618fa9e590072ee8a71d3
|
[
"MIT"
] | 18
|
2015-02-20T23:09:13.000Z
|
2020-11-13T06:06:43.000Z
|
src/annalist_root/utils/StdoutContext.py
|
gklyne/annalist
|
82e7ef2d56a400325e7618fa9e590072ee8a71d3
|
[
"MIT"
] | 30
|
2015-01-03T09:56:28.000Z
|
2021-06-10T20:58:55.000Z
|
src/annalist_root/utils/StdoutContext.py
|
gklyne/annalist
|
82e7ef2d56a400325e7618fa9e590072ee8a71d3
|
[
"MIT"
] | 5
|
2015-02-02T09:01:23.000Z
|
2018-06-14T20:05:28.000Z
|
#!/usr/bin/python
"""
Context managers for redirecting stdout and stderr to a supplied file or stream
"""
from __future__ import unicode_literals
from __future__ import absolute_import, division, print_function
__author__ = "Graham Klyne (GK@ACM.ORG)"
__copyright__ = "Copyright 2011-2014, University of Oxford"
__license__ = "MIT (http://opensource.org/licenses/MIT)"
import sys
from utils.py3porting import is_string, to_unicode
class SwitchStdout:
"""
Context handler class that swiches standard output to a named file or supplied stream.
See also http://code.activestate.com/recipes/577564-context-manager-for-low-level-redirection-of-stdou/
I didn't use this because I wanted to be able to catch output to a StringIO stream for testing.
"""
def __init__(self, fileorstr):
self.fileorstr = fileorstr
self.opened = False
return
def __enter__(self):
if is_string(self.fileorstr):
self.outstr = open(self.fileorstr, "w")
self.opened = True
else:
self.outstr = self.fileorstr
self.savestdout = sys.stdout
sys.stdout = self.outstr
return
def __exit__(self, exctype, excval, exctraceback):
if self.opened: self.outstr.close()
sys.stdout = self.savestdout
return False
class SwitchStderr(object):
"""
Context handler class that swiches standard error to a named file or supplied stream.
(Same as SwitchStdout, but swiches stderr)
"""
def __init__(self, fileorstr):
self.fileorstr = fileorstr
self.opened = False
return
def __enter__(self):
if is_string(self.fileorstr):
self.outstr = open(self.fileorstr, "w")
self.opened = True
else:
self.outstr = self.fileorstr
self.savestderr = sys.stderr
sys.stderr = self.outstr
return
def __exit__(self, exctype, excval, exctraceback):
if self.opened:
self.outstr.close()
sys.stderr = self.savestderr
return False
class SwitchStdin(object):
"""
Context handler class that swiches standard input to a named file or supplied stream.
"""
def __init__(self, fileorstr):
self.fileorstr = fileorstr
self.opened = False
return
def __enter__(self):
if is_string(self.fileorstr):
self.instr = open(self.fileorstr, "w")
self.opened = True
else:
self.instr = self.fileorstr
self.savestdin = sys.stdin
sys.stdin = self.instr
return
def __exit__(self, exctype, excval, exctraceback):
if self.opened:
self.outin.close()
sys.stdin = self.savestdin
return False
if __name__ == "__main__":
import StringIO
outstr = StringIO.StringIO()
with SwitchStdout(outstr) as mystdout:
print("Hello, ")
print("world", file=mystdout)
outtxt = outstr.getvalue()
print(repr(outtxt))
assert outtxt == "Hello, \nworld\n"
outstr.close()
# End.
| 28.369369
| 107
| 0.625913
|
4a16523e660f07f3d1680937e1726082b4b6792a
| 470
|
py
|
Python
|
sync/rest/list-permissions/delete-permission/delete-permission.6.x.py
|
azaddeveloper/api-snippets
|
f88b153cd7186fa70b33733b205886502db0d1f2
|
[
"MIT"
] | 3
|
2020-05-05T10:01:02.000Z
|
2021-02-06T14:23:13.000Z
|
sync/rest/list-permissions/delete-permission/delete-permission.6.x.py
|
azaddeveloper/api-snippets
|
f88b153cd7186fa70b33733b205886502db0d1f2
|
[
"MIT"
] | null | null | null |
sync/rest/list-permissions/delete-permission/delete-permission.6.x.py
|
azaddeveloper/api-snippets
|
f88b153cd7186fa70b33733b205886502db0d1f2
|
[
"MIT"
] | 1
|
2019-10-02T14:36:36.000Z
|
2019-10-02T14:36:36.000Z
|
# Download the Python helper library from twilio.com/docs/python/install
from twilio.rest import Client
# Your Account Sid and Auth Token from twilio.com/console
account_sid = "ACXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX"
auth_token = "your_auth_token"
client = Client(account_sid, auth_token)
did_delete = client.sync \
.services("ISXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX") \
.sync_lists("MyFirstList") \
.sync_list_permissions("bob") \
.delete()
print(did_delete)
| 29.375
| 72
| 0.77234
|
4a16536df294a27d8dbb39b18a23a820ba21eda7
| 1,104
|
py
|
Python
|
registersKtob.py
|
Gyrst/HyperLogLog
|
73b485999ed4dd9657fbcbbc7f0ca5b7d0b17449
|
[
"Apache-2.0"
] | null | null | null |
registersKtob.py
|
Gyrst/HyperLogLog
|
73b485999ed4dd9657fbcbbc7f0ca5b7d0b17449
|
[
"Apache-2.0"
] | null | null | null |
registersKtob.py
|
Gyrst/HyperLogLog
|
73b485999ed4dd9657fbcbbc7f0ca5b7d0b17449
|
[
"Apache-2.0"
] | null | null | null |
from hashSuggested import hash_A
from rhoSuggested import rho
import fileinput
def f(x):
res = ((x*0xbc164501) & 0x7fffffff) >> 21
return res
A = [0x21ae4036,
0x32435171,
0xac3338cf,
0xea97b40c,
0x0e504b22,
0x9ff9a4ef,
0x111d014d,
0x934f3787,
0x6cd079bf,
0x69db5c31,
0xdf3c28ed,
0x40daf2ad,
0x82a5891c,
0x4659c7b0,
0x73dc0ca8,
0xdad3aca2,
0x00c74c7e,
0x9a2521e2,
0xf38eb6aa,
0x64711ab6,
0x5823150a,
0xd13a3a9a,
0x30a5aa04,
0x0fb9a1da,
0xef785119,
0xc9f0b067,
0x1e7dde42,
0xdda4a7b2,
0x1a1c2640,
0x297c0633,
0x744edb48,
0x19adce93 ]
M = [0]*1024
def registers(x: int):
resF = f(x)
resHash = hash_A(A, x)
resRho = rho(resHash)
if(resRho > M[resF]):
M[resF] = resRho
| 22.08
| 45
| 0.464674
|
4a1654d228f0f58fe1bf5f492b681c8065bf45eb
| 2,577
|
py
|
Python
|
examples/model_selection/plot_train_error_vs_test_error.py
|
DeuroIO/Deuro-scikit-learn
|
9dd09a19593d1224077fe0d1a754aed936269528
|
[
"MIT"
] | 5
|
2018-07-04T22:13:54.000Z
|
2018-07-04T22:21:29.000Z
|
examples/model_selection/plot_train_error_vs_test_error.py
|
DeuroIO/Deuro-scikit-learn
|
9dd09a19593d1224077fe0d1a754aed936269528
|
[
"MIT"
] | null | null | null |
examples/model_selection/plot_train_error_vs_test_error.py
|
DeuroIO/Deuro-scikit-learn
|
9dd09a19593d1224077fe0d1a754aed936269528
|
[
"MIT"
] | 1
|
2018-02-25T21:40:51.000Z
|
2018-02-25T21:40:51.000Z
|
"""
=========================
Train error vs Test error
=========================
Illustration of how the performance of an estimator on unseen data (test data)
is not the same as the performance on training data. As the regularization
increases the performance on train decreases while the performance on test
is optimal within a range of values of the regularization parameter.
The example with an Elastic-Net regression model and the performance is
measured using the explained variance a.k.a. R^2.
"""
print(__doc__)
# Author: Alexandre Gramfort <alexandre.gramfort@inria.fr>
# License: BSD 3 clause
import numpy as np
from sklearn import linear_model
# #############################################################################
# Generate sample data
n_samples_train, n_samples_test, n_features = 75, 150, 500
np.random.seed(0)
coef = np.random.randn(n_features)
coef[50:] = 0.0 # only the top 10 features are impacting the model
X = np.random.randn(n_samples_train + n_samples_test, n_features)
y = np.dot(X, coef)
# Split train and test data
X_train, X_test = X[:n_samples_train], X[n_samples_train:]
y_train, y_test = y[:n_samples_train], y[n_samples_train:]
# #############################################################################
# Compute train and test errors
alphas = np.logspace(-5, 1, 60)
enet = linear_model.ElasticNet(l1_ratio=0.7)
train_errors = list()
test_errors = list()
for alpha in alphas:
enet.set_params(alpha=alpha)
enet.fit(X_train, y_train)
train_errors.append(enet.score(X_train, y_train))
test_errors.append(enet.score(X_test, y_test))
i_alpha_optim = np.argmax(test_errors)
alpha_optim = alphas[i_alpha_optim]
print("Optimal regularization parameter : %s" % alpha_optim)
# Estimate the coef_ on full data with optimal regularization parameter
enet.set_params(alpha=alpha_optim)
coef_ = enet.fit(X, y).coef_
# #############################################################################
# Plot results functions
import matplotlib.pyplot as plt
plt.subplot(2, 1, 1)
plt.semilogx(alphas, train_errors, label='Train')
plt.semilogx(alphas, test_errors, label='Test')
plt.vlines(alpha_optim, plt.ylim()[0], np.max(test_errors), color='k',
linewidth=3, label='Optimum on test')
plt.legend(loc='lower left')
plt.ylim([0, 1.2])
plt.xlabel('Regularization parameter')
plt.ylabel('Performance')
# Show estimated coef_ vs true coef
plt.subplot(2, 1, 2)
plt.plot(coef, label='True coef')
plt.plot(coef_, label='Estimated coef')
plt.legend()
plt.subplots_adjust(0.09, 0.04, 0.94, 0.94, 0.26, 0.26)
plt.show()
| 33.907895
| 79
| 0.670159
|
4a1654d50500fdf03c56cb4f92b199944fc58b3b
| 15,974
|
py
|
Python
|
deviceupdownstatus.py
|
zabrewer/automation-scripts
|
cd9f4ab8227333412baeba659d5a9fea4343e78f
|
[
"MIT"
] | 1
|
2021-01-09T21:18:52.000Z
|
2021-01-09T21:18:52.000Z
|
deviceupdownstatus.py
|
zabrewer/automation-scripts
|
cd9f4ab8227333412baeba659d5a9fea4343e78f
|
[
"MIT"
] | null | null | null |
deviceupdownstatus.py
|
zabrewer/automation-scripts
|
cd9f4ab8227333412baeba659d5a9fea4343e78f
|
[
"MIT"
] | 1
|
2020-12-02T19:51:43.000Z
|
2020-12-02T19:51:43.000Z
|
# This is a script to print a list of all devices in a organization's inventory and their up/down status.
# The script will not return up/down status for MV security cameras, as this was not supported at time of writing.
#
# To run the script, enter:
# python deviceupdownstatus.py -k <api key> -o <org name> [-a <snmp auth key> -p <snmp priv key>]
#
# Mandatory arguments:
# -k <api key> : Your Meraki Dashboard API key
# -o <org name> : Your Dashboard Organization name
# Optional arguments to use SNMPv3:
# -a <snmp auth key> : SNMPv3 authentication key. Required for SNMPv3
# -p <snmp priv key> : SNMPv3 privacy key. Required for SNMPv3
#
# Example:
# python deviceupdownstatus.py -k 1234 -o "Meraki Inc" -a authpass123 -p privpass123
#
# This script was developed using Python 3.6.4. You will need the Requests and PySNMP modules to run it. You can install
# these modules via pip:
# pip install requests
# pip install pysnmp
#
# More info on these modules:
# http://python-requests.org
# http://pysnmp.sourceforge.net
#
# To make script chaining easier, all lines containing informational messages to the user
# start with the character @
#
# This file was last modified on 2018-03-01
import sys, getopt, requests, json, time
from pysnmp.hlapi import *
from datetime import datetime
#Used for time.sleep(API_EXEC_DELAY). Delay added to avoid hitting dashboard API max request rate
API_EXEC_DELAY = 0.21
#connect and read timeouts for the Requests module
REQUESTS_CONNECT_TIMEOUT = 30
REQUESTS_READ_TIMEOUT = 30
#used by merakirequestthrottler(). DO NOT MODIFY
LAST_MERAKI_REQUEST = datetime.now()
class c_serialstatus:
def __init__(self):
self.serial = ''
self.status = ''
class c_deviceinfo:
def __init__(self):
self.serial = ''
self.model = ''
self.name = ''
self.networkId = ''
self.status = '' #this will be filled via SNMP
class c_snmpinfo:
def __init__(self):
self.host = ''
self.usercommunity = '' # SNMPv3 user / SNMPv2c community. Same in Meraki Dashboard
self.v3enabled = False
self.v2cenabled = False
self.authkey = '' #not returned by API
self.privkey = '' #not returned by API
def printusertext(p_message):
#prints a line of text that is meant for the user to read
#do not process these lines when chaining scripts
print('@ %s' % p_message)
def printhelp():
#prints help text
printusertext('This is a script to print a list of all devices in a organization\'s inventory and their up/down status.')
printusertext(' The script will not return up/down status for MV security cameras, as this was not supported at time of writing.')
printusertext('')
printusertext('To run the script, enter:')
printusertext(' python deviceupdownstatus.py -k <api key> -o <org name> [-a <snmp auth key> -p <snmp priv key>]')
printusertext('')
printusertext('Mandatory argument:s')
printusertext(' -k <key> : Your Meraki Dashboard API key')
printusertext(' -o <org name> : Your Dashboard Organization name')
printusertext('Optional arguments to use SNMPv3:')
printusertext(' -a <snmp auth key> : SNMPv3 authentication key. Required for SNMPv3')
printusertext(' -p <snmp priv key> : SNMPv3 privacy key. Required for SNMPv3')
printusertext('')
printusertext('Example:')
printusertext(' python deviceupdownstatus.py -k 1234 -o "Meraki Inc" -a authpass123 -p privpass123')
printusertext('')
printusertext('Use double quotes ("") in Windows to pass arguments containing spaces. Names are case-sensitive.')
def snmppolldevicestatuses (p_shardhost, p_usercommunity, p_authkey = '', p_privkey = ''):
#returns a list of c_serialstatus objects containing serial numbers of devices returned by SNMP and their up/down status
#note that SNMP only returns status for devices assigned to a network, not the whole inventory
returnlist = []
serialslist = []
statuslist = []
flag_snmpv3 = True
if p_authkey == '' or p_privkey == '':
flag_snmpv3 = False
#snmp poll serials' list
if flag_snmpv3:
try:
g = nextCmd(SnmpEngine(),
UsmUserData(p_usercommunity, p_authkey, p_privkey,
authProtocol=usmHMACSHAAuthProtocol,
privProtocol=usmAesCfb128Protocol),
UdpTransportTarget((p_shardhost, 16100)),
ContextData(),
ObjectType(ObjectIdentity('1.3.6.1.4.1.29671.1.1.4.1.8')),
lexicographicMode = False,
lookupMib = False)
except:
printusertext ("ERROR 01: SNMPv3 nextCmd failed")
sys.exit(2)
else: #using SNMPv2c
try:
g = nextCmd(SnmpEngine(),
CommunityData(p_usercommunity),
UdpTransportTarget((p_shardhost, 16100)),
ContextData(),
ObjectType(ObjectIdentity('1.3.6.1.4.1.29671.1.1.4.1.8')),
lexicographicMode = False,
lookupMib = False)
except:
printusertext ("ERROR 02: SNMPv2c nextCmd failed")
sys.exit(2)
flag_continue = True
while flag_continue:
try:
errorIndication, errorStatus, errorIndex, varBinds = next(g)
except StopIteration:
flag_continue = False
except:
printusertext ("ERROR 03: SNMP next failed")
sys.exit(2)
if flag_continue:
for vb in varBinds:
try:
#mash everything to a str and grab the right characters. this works more reliably
crashbuffer = str(vb)[-17:-3]
serialslist.append(crashbuffer)
except:
printusertext ('WARNING: SNMP poll for serials returned no data')
#snmp poll statuses' list
if flag_snmpv3:
try:
g = nextCmd(SnmpEngine(),
UsmUserData(p_usercommunity, p_authkey, p_privkey,
authProtocol=usmHMACSHAAuthProtocol,
privProtocol=usmAesCfb128Protocol),
UdpTransportTarget((p_shardhost, 16100)),
ContextData(),
ObjectType(ObjectIdentity('1.3.6.1.4.1.29671.1.1.4.1.3')),
lexicographicMode = False,
lookupMib = False)
except:
printusertext ("ERROR 04: SNMPv3 nextCmd failed")
sys.exit(2)
else: #using SNMPv2c
try:
g = nextCmd(SnmpEngine(),
CommunityData(p_usercommunity),
UdpTransportTarget((p_shardhost, 16100)),
ContextData(),
ObjectType(ObjectIdentity('1.3.6.1.4.1.29671.1.1.4.1.3')),
lexicographicMode = False,
lookupMib = False)
except:
printusertext ("ERROR 05: SNMPv2c nextCmd failed")
sys.exit(2)
flag_continue = True
while flag_continue:
try:
errorIndication, errorStatus, errorIndex, varBinds = next(g)
except StopIteration:
flag_continue = False
except:
printusertext ("ERROR 06: SNMP next failed")
sys.exit(2)
if flag_continue:
for vb in varBinds:
try:
crashbuffer = vb[len(vb)-1]
statuslist.append(crashbuffer)
except:
printusertext ('WARNING: SNMP poll for statuses returned no data')
lastitem = len(statuslist)
for i in range (0, lastitem):
returnlist.append(c_serialstatus())
returnlist[i].serial = serialslist[i]
if statuslist[i] == 1:
returnlist[i].status = 'up'
else:
returnlist[i].status = 'down'
return (returnlist)
def merakirequestthrottler(p_requestcount=1):
#makes sure there is enough time between API requests to Dashboard not to hit shaper
global LAST_MERAKI_REQUEST
if (datetime.now()-LAST_MERAKI_REQUEST).total_seconds() < (API_EXEC_DELAY*p_requestcount):
time.sleep(API_EXEC_DELAY*p_requestcount)
LAST_MERAKI_REQUEST = datetime.now()
return
def getorgid(p_apikey, p_orgname):
#looks up org id for a specific org name
#on failure returns 'null'
merakirequestthrottler()
try:
r = requests.get('https://api.meraki.com/api/v0/organizations', headers={'X-Cisco-Meraki-API-Key': p_apikey, 'Content-Type': 'application/json'})
except:
printusertext('ERROR 07: Unable to contact Meraki cloud')
sys.exit(2)
if r.status_code != requests.codes.ok:
return 'null'
rjson = r.json()
for record in rjson:
if record['name'] == p_orgname:
return record['id']
return('null')
def getsnmpinfo(p_apikey, p_orgid):
#Looks up shard URL for a specific org. Use this URL instead of 'dashboard.meraki.com'
# when making API calls with API accounts that can access multiple orgs.
#On failure returns 'null'
merakirequestthrottler()
try:
r = requests.get('https://api.meraki.com/api/v0/organizations/%s/snmp' % p_orgid, headers={'X-Cisco-Meraki-API-Key': p_apikey, 'Content-Type': 'application/json'}, timeout=(REQUESTS_CONNECT_TIMEOUT, REQUESTS_READ_TIMEOUT))
except:
printusertext('ERROR 08: Unable to contact Meraki cloud')
sys.exit(2)
returnobject = c_snmpinfo()
if r.status_code != requests.codes.ok:
returnobject.host = 'null'
return (returnobject)
rjson = r.json()
returnobject.host = rjson['hostname']
returnobject.v3enabled = rjson['v3Enabled']
returnobject.v2cenabled = rjson['v2cEnabled']
if rjson['v2cEnabled']:
returnobject.usercommunity = rjson['v2CommunityString']
elif rjson['v3Enabled']:
returnobject.usercommunity = rjson['v3User']
return(returnobject)
def getinventory(p_apikey, p_shardhost, p_orgid):
#returns a list of all networks in an organization
#on failure returns a single record with 'null' name and id
merakirequestthrottler()
try:
r = requests.get('https://%s/api/v0/organizations/%s/inventory' % (p_shardhost, p_orgid), headers={'X-Cisco-Meraki-API-Key': p_apikey, 'Content-Type': 'application/json'}, timeout=(REQUESTS_CONNECT_TIMEOUT, REQUESTS_READ_TIMEOUT) )
except:
printusertext('ERROR 09: Unable to contact Meraki cloud')
sys.exit(2)
returnvalue = []
if r.status_code != requests.codes.ok:
returnvalue.append(c_deviceinfo())
returnvalue[0].serial = 'null'
return(returnvalue)
rjson = r.json()
rjlen = len(rjson)
for i in range (0, rjlen):
returnvalue.append(c_deviceinfo())
returnvalue[i].serial = rjson[i]['serial']
returnvalue[i].model = rjson[i]['model']
if rjson[i]['networkId'] is None:
returnvalue[i].networkId = ''
else:
returnvalue[i].networkId = rjson[i]['networkId']
return(returnvalue)
def getdevicename(p_apikey, p_shardhost, p_nwid, p_serial):
#returns a list of all networks in an organization
#on failure returns a single record with 'null' name and id
merakirequestthrottler()
try:
r = requests.get('https://%s/api/v0/networks/%s/devices/%s' % (p_shardhost, p_nwid, p_serial), headers={'X-Cisco-Meraki-API-Key': p_apikey, 'Content-Type': 'application/json'}, timeout=(REQUESTS_CONNECT_TIMEOUT, REQUESTS_READ_TIMEOUT) )
except:
printusertext('ERROR 10: Unable to contact Meraki cloud')
sys.exit(2)
if r.status_code != requests.codes.ok:
return('')
rjson = r.json()
if rjson['name'] is None:
return(rjson['mac'])
return(rjson['name'])
def main(argv):
#initialize variables for command line arguments
arg_apikey = ''
arg_orgname = ''
arg_authkey = ''
arg_privkey = ''
#get command line arguments
try:
opts, args = getopt.getopt(argv, 'hk:o:a:p:')
except getopt.GetoptError:
printhelp()
sys.exit(2)
for opt, arg in opts:
if opt == '-h':
printhelp()
sys.exit()
elif opt == '-k':
arg_apikey = arg
elif opt == '-o':
arg_orgname = arg
elif opt == '-a':
arg_authkey = arg
elif opt == '-p':
arg_privkey = arg
#check if all parameters are required parameters have been given
if arg_apikey == '':
printhelp()
sys.exit(2)
#resolve orgid
orgid = getorgid(arg_apikey, arg_orgname)
#get SNMP info
#this call sometimes fails. implementing a try-verify-wait-repeat loop
MAX_SHARD_RESOLVE_TRIES = 10
flag_unabletoresolveshard = True
for i in range (0, MAX_SHARD_RESOLVE_TRIES):
snmpinfo = getsnmpinfo(arg_apikey, orgid)
if snmpinfo.host == 'null':
time.sleep(API_EXEC_DELAY*(i+1))
else:
flag_unabletoresolveshard = False
break
if flag_unabletoresolveshard:
printusertext('ERROR 11: Unable to read data for org "%s"' % record.name)
sys.exit(2)
#get device inventory
inventory = getinventory(arg_apikey, snmpinfo.host, orgid)
if inventory[0].serial == 'null':
printusertext('ERROR 12: Unable to read inventory via API')
sys.exit(2)
#poll device up/down status via SNMP
if arg_authkey != '' and arg_privkey != '':
if snmpinfo.v3enabled:
updownstatus = snmppolldevicestatuses(snmpinfo.host, snmpinfo.usercommunity, arg_authkey, arg_privkey)
else:
printusertext('ERROR 13: Unable to poll via SNMPv3 (not enabled in org)')
sys.exit(2)
else:
if snmpinfo.v2cenabled:
updownstatus = snmppolldevicestatuses(snmpinfo.host, snmpinfo.usercommunity)
else:
printusertext('ERROR 14: Unable to poll via SNMPv2c (not enabled in org)')
sys.exit(2)
for device in inventory:
if device.networkId != '':
if device.model[:2] == 'MV':
device.status = 'n/a' #currently SNMP does not return up/down for MV cameras
flag_devicelocated = False
for state in updownstatus:
if device.serial == state.serial:
device.status = state.status
flag_devicelocated = True
break
device.name = getdevicename(arg_apikey, snmpinfo.host, device.networkId, device.serial)
else:
device.status = "not in use"
print ('DEVICE NAME MODEL SERIAL STATUS')
for device in inventory:
print('%-30s%-15s%-20s%-10s' % (device.name, device.model, device.serial, device.status))
if __name__ == '__main__':
main(sys.argv[1:])
| 38.033333
| 245
| 0.585201
|
4a16552239e1dfdb75d4b0c2c660842d97a3492a
| 16,160
|
py
|
Python
|
plugins/modules/oci_compute_instance_agent_instance_agent_command_facts.py
|
LaudateCorpus1/oci-ansible-collection
|
2b1cd87b4d652a97c1ca752cfc4fdc4bdb37a7e7
|
[
"Apache-2.0"
] | null | null | null |
plugins/modules/oci_compute_instance_agent_instance_agent_command_facts.py
|
LaudateCorpus1/oci-ansible-collection
|
2b1cd87b4d652a97c1ca752cfc4fdc4bdb37a7e7
|
[
"Apache-2.0"
] | null | null | null |
plugins/modules/oci_compute_instance_agent_instance_agent_command_facts.py
|
LaudateCorpus1/oci-ansible-collection
|
2b1cd87b4d652a97c1ca752cfc4fdc4bdb37a7e7
|
[
"Apache-2.0"
] | null | null | null |
#!/usr/bin/python
# Copyright (c) 2020, 2022 Oracle and/or its affiliates.
# This software is made available to you under the terms of the GPL 3.0 license or the Apache 2.0 license.
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
# Apache License v2.0
# See LICENSE.TXT for details.
# GENERATED FILE - DO NOT EDIT - MANUAL CHANGES WILL BE OVERWRITTEN
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {
"metadata_version": "1.1",
"status": ["preview"],
"supported_by": "community",
}
DOCUMENTATION = """
---
module: oci_compute_instance_agent_instance_agent_command_facts
short_description: Fetches details about one or multiple InstanceAgentCommand resources in Oracle Cloud Infrastructure
description:
- Fetches details about one or multiple InstanceAgentCommand resources in Oracle Cloud Infrastructure
- Lists the Oracle Cloud Agent commands issued in a compartment.
- If I(instance_agent_command_id) is specified, the details of a single InstanceAgentCommand will be returned.
version_added: "2.9.0"
author: Oracle (@oracle)
options:
instance_agent_command_id:
description:
- The L(OCID,https://docs.cloud.oracle.com/Content/General/Concepts/identifiers.htm) of the command.
- Required to get a specific instance_agent_command.
type: str
aliases: ["id"]
compartment_id:
description:
- The L(OCID,https://docs.cloud.oracle.com/Content/General/Concepts/identifiers.htm) of the compartment.
- Required to list multiple instance_agent_commands.
type: str
sort_by:
description:
- The field to sort by. You can provide one sort order (`sortOrder`). Default order for
`TIMECREATED` is descending.
- "**Note:** In general, some \\"List\\" operations (for example, `ListInstances`) let you
optionally filter by availability domain if the scope of the resource type is within a
single availability domain. If you call one of these \\"List\\" operations without specifying
an availability domain, the resources are grouped by availability domain, then sorted."
type: str
choices:
- "TIMECREATED"
- "DISPLAYNAME"
sort_order:
description:
- The sort order to use, either ascending (`ASC`) or descending (`DESC`). The `DISPLAYNAME` sort order
is case sensitive.
type: str
choices:
- "ASC"
- "DESC"
extends_documentation_fragment: [ oracle.oci.oracle, oracle.oci.oracle_display_name_option ]
"""
EXAMPLES = """
- name: Get a specific instance_agent_command
oci_compute_instance_agent_instance_agent_command_facts:
# required
instance_agent_command_id: "ocid1.instanceagentcommand.oc1..xxxxxxEXAMPLExxxxxx"
- name: List instance_agent_commands
oci_compute_instance_agent_instance_agent_command_facts:
# required
compartment_id: "ocid1.compartment.oc1..xxxxxxEXAMPLExxxxxx"
# optional
sort_by: TIMECREATED
sort_order: ASC
"""
RETURN = """
instance_agent_commands:
description:
- List of InstanceAgentCommand resources
returned: on success
type: complex
contains:
id:
description:
- The L(OCID,https://docs.cloud.oracle.com/Content/General/Concepts/identifiers.htm) of the command.
- Returned for get operation
returned: on success
type: str
sample: "ocid1.resource.oc1..xxxxxxEXAMPLExxxxxx"
compartment_id:
description:
- The L(OCID,https://docs.cloud.oracle.com/Content/General/Concepts/identifiers.htm) of the compartment containing the command.
returned: on success
type: str
sample: "ocid1.compartment.oc1..xxxxxxEXAMPLExxxxxx"
display_name:
description:
- A user-friendly name. Does not have to be unique. Avoid entering confidential information.
returned: on success
type: str
sample: display_name_example
time_created:
description:
- The date and time the command was created, in the format defined by
L(RFC3339,https://tools.ietf.org/html/rfc3339).
returned: on success
type: str
sample: "2013-10-20T19:20:30+01:00"
time_updated:
description:
- The date and time the command was last updated, in the format defined by
L(RFC3339,https://tools.ietf.org/html/rfc3339).
returned: on success
type: str
sample: "2013-10-20T19:20:30+01:00"
is_canceled:
description:
- Whether a request was made to cancel the command. Canceling a command is a best-effort attempt.
returned: on success
type: bool
sample: true
execution_time_out_in_seconds:
description:
- The amount of time that Oracle Cloud Agent is given to run the command on the instance before timing
out. The timer starts when Oracle Cloud Agent starts the command. Zero means no timeout.
- Returned for get operation
returned: on success
type: int
sample: 56
target:
description:
- The target instance that the command runs on.
- Returned for get operation
returned: on success
type: complex
contains:
instance_id:
description:
- The L(OCID,https://docs.cloud.oracle.com/Content/General/Concepts/identifiers.htm) of the target instance.
returned: on success
type: str
sample: "ocid1.instance.oc1..xxxxxxEXAMPLExxxxxx"
content:
description:
- The contents of the command.
- Returned for get operation
returned: on success
type: complex
contains:
source:
description:
- The source of the command.
returned: on success
type: complex
contains:
source_type:
description:
- "The source type for the command. The following values are supported:"
- "- `TEXT` - uses a plain text command that is specified inline with the request.
- `OBJECT_STORAGE_URI` - imports a command from an Object Storage URL.
- `OBJECT_STORAGE_TUPLE` - imports a command from an Object Storage bucket."
- For background information about Object Storage buckets and URLs, see
L(Overview of Object Storage,https://docs.cloud.oracle.com/Content/Object/Concepts/objectstorageoverview.htm).
returned: on success
type: str
sample: TEXT
bucket_name:
description:
- The Object Storage bucket for the command.
returned: on success
type: str
sample: bucket_name_example
namespace_name:
description:
- The Object Storage namespace for the command.
returned: on success
type: str
sample: namespace_name_example
object_name:
description:
- The Object Storage object name for the command.
returned: on success
type: str
sample: object_name_example
source_uri:
description:
- The Object Storage URL or pre-authenticated request (PAR) for the command.
returned: on success
type: str
sample: source_uri_example
text:
description:
- The plain text command.
returned: on success
type: str
sample: text_example
text_sha256:
description:
- SHA-256 checksum value of the text content.
returned: on success
type: str
sample: text_sha256_example
output:
description:
- The output destination for the command.
returned: on success
type: complex
contains:
output_type:
description:
- "The output type for the command. The following values are supported:"
- "- `TEXT` - the command output is returned as plain text.
- `OBJECT_STORAGE_URI` - the command output is saved to an Object Storage URL.
- `OBJECT_STORAGE_TUPLE` - the command output is saved to an Object Storage bucket."
- For background information about Object Storage buckets and URLs, see
L(Overview of Object Storage,https://docs.cloud.oracle.com/Content/Object/Concepts/objectstorageoverview.htm).
returned: on success
type: str
sample: TEXT
bucket_name:
description:
- The Object Storage bucket for the command output.
returned: on success
type: str
sample: bucket_name_example
namespace_name:
description:
- The Object Storage namespace for the command output.
returned: on success
type: str
sample: namespace_name_example
object_name:
description:
- The Object Storage object name for the command output.
returned: on success
type: str
sample: object_name_example
output_uri:
description:
- The Object Storage URL or pre-authenticated request (PAR) for the command output.
returned: on success
type: str
sample: output_uri_example
instance_agent_command_id:
description:
- The L(OCID,https://docs.cloud.oracle.com/Content/General/Concepts/identifiers.htm) of the command.
- Returned for list operation
returned: on success
type: str
sample: "ocid1.instanceagentcommand.oc1..xxxxxxEXAMPLExxxxxx"
sample: [{
"id": "ocid1.resource.oc1..xxxxxxEXAMPLExxxxxx",
"compartment_id": "ocid1.compartment.oc1..xxxxxxEXAMPLExxxxxx",
"display_name": "display_name_example",
"time_created": "2013-10-20T19:20:30+01:00",
"time_updated": "2013-10-20T19:20:30+01:00",
"is_canceled": true,
"execution_time_out_in_seconds": 56,
"target": {
"instance_id": "ocid1.instance.oc1..xxxxxxEXAMPLExxxxxx"
},
"content": {
"source": {
"source_type": "TEXT",
"bucket_name": "bucket_name_example",
"namespace_name": "namespace_name_example",
"object_name": "object_name_example",
"source_uri": "source_uri_example",
"text": "text_example",
"text_sha256": "text_sha256_example"
},
"output": {
"output_type": "TEXT",
"bucket_name": "bucket_name_example",
"namespace_name": "namespace_name_example",
"object_name": "object_name_example",
"output_uri": "output_uri_example"
}
},
"instance_agent_command_id": "ocid1.instanceagentcommand.oc1..xxxxxxEXAMPLExxxxxx"
}]
"""
from ansible.module_utils.basic import AnsibleModule
from ansible_collections.oracle.oci.plugins.module_utils import oci_common_utils
from ansible_collections.oracle.oci.plugins.module_utils.oci_resource_utils import (
OCIResourceFactsHelperBase,
get_custom_class,
)
try:
from oci.compute_instance_agent import ComputeInstanceAgentClient
HAS_OCI_PY_SDK = True
except ImportError:
HAS_OCI_PY_SDK = False
class InstanceAgentCommandFactsHelperGen(OCIResourceFactsHelperBase):
"""Supported operations: get, list"""
def get_required_params_for_get(self):
return [
"instance_agent_command_id",
]
def get_required_params_for_list(self):
return [
"compartment_id",
]
def get_resource(self):
return oci_common_utils.call_with_backoff(
self.client.get_instance_agent_command,
instance_agent_command_id=self.module.params.get(
"instance_agent_command_id"
),
)
def list_resources(self):
optional_list_method_params = [
"sort_by",
"sort_order",
"display_name",
]
optional_kwargs = dict(
(param, self.module.params[param])
for param in optional_list_method_params
if self.module.params.get(param) is not None
)
return oci_common_utils.list_all_resources(
self.client.list_instance_agent_commands,
compartment_id=self.module.params.get("compartment_id"),
**optional_kwargs
)
InstanceAgentCommandFactsHelperCustom = get_custom_class(
"InstanceAgentCommandFactsHelperCustom"
)
class ResourceFactsHelper(
InstanceAgentCommandFactsHelperCustom, InstanceAgentCommandFactsHelperGen
):
pass
def main():
module_args = oci_common_utils.get_common_arg_spec()
module_args.update(
dict(
instance_agent_command_id=dict(aliases=["id"], type="str"),
compartment_id=dict(type="str"),
sort_by=dict(type="str", choices=["TIMECREATED", "DISPLAYNAME"]),
sort_order=dict(type="str", choices=["ASC", "DESC"]),
display_name=dict(type="str"),
)
)
module = AnsibleModule(argument_spec=module_args)
if not HAS_OCI_PY_SDK:
module.fail_json(msg="oci python sdk required for this module.")
resource_facts_helper = ResourceFactsHelper(
module=module,
resource_type="instance_agent_command",
service_client_class=ComputeInstanceAgentClient,
namespace="compute_instance_agent",
)
result = []
if resource_facts_helper.is_get():
result = [resource_facts_helper.get()]
elif resource_facts_helper.is_list():
result = resource_facts_helper.list()
else:
resource_facts_helper.fail()
module.exit_json(instance_agent_commands=result)
if __name__ == "__main__":
main()
| 41.119593
| 144
| 0.559158
|
4a16570646470a41122dfc228149cf254c1cc807
| 1,989
|
py
|
Python
|
test_pypheus/test_pypheus_network.py
|
xod442/pypheus
|
11988d7c0efe2772bca80c2d40a71cc8ba707153
|
[
"Apache-2.0"
] | null | null | null |
test_pypheus/test_pypheus_network.py
|
xod442/pypheus
|
11988d7c0efe2772bca80c2d40a71cc8ba707153
|
[
"Apache-2.0"
] | null | null | null |
test_pypheus/test_pypheus_network.py
|
xod442/pypheus
|
11988d7c0efe2772bca80c2d40a71cc8ba707153
|
[
"Apache-2.0"
] | null | null | null |
# -*- coding: utf-8 -*-
"""
Module for testing the functions in pypheus.network.
"""
import os
import vcr
from unittest import TestCase
from unittest import mock
from nose.plugins.skip import SkipTest
import urllib3
urllib3.disable_warnings()
import json
import requests
from pypheus.network import Network
from pypheus.storage import Storage
from pypheus.logs import Logs
from pypheus.monitoring import Monitoring
host = os.environ['HOST']
username = os.environ['USERNAME']
password = os.environ['PASSWORD']
nets = Network(host,username,password)
SKIPTEST=True
#TODO TAKE OUT HARDCODED DATA LATER
my_vcr = vcr.VCR(
serializer='json',
cassette_library_dir='./test_pyhpecfm/fixtures/cassettes',
record_mode='new_episodes',
match_on=['uri', 'method'],
)
class Networks(TestCase):
"""
Test Case for pypheus.network get_all_networks function
"""
@vcr.use_cassette(cassette_library_dir='./test_pypheus/fixtures/cassettes')
def test_get_all_networks(self):
"""
Simple test to return networks. URL has an ID parameter that is optional
:return:
"""
test_networks = nets.get_all_networks()
self.assertIs(type(test_networks['networks']), list)
self.assertIs(type(test_networks['networks'][0]), dict)
def test_get_network_types(self):
"""
Simple test to return network-types. URL has an ID parameter that is optional
:return:
"""
test_networks = nets.network_types()
self.assertIs(type(test_networks['networkTypes']), list)
self.assertIs(type(test_networks['networkTypes'][0]), dict)
def test_get_network_routers(self):
"""
Simple test to return network-routers. URL has an ID parameter that is optional
:return:
"""
test_networks = nets.network_routers()
self.assertIs(type(test_networks['networkRouters']), list)
self.assertIs(type(test_networks['networkRouters'][0]), dict)
| 28.414286
| 87
| 0.694822
|
4a16570c89a08877faed85f18323f571160971b3
| 425
|
py
|
Python
|
htf/__init__.py
|
oktak/hoomd-tf
|
357b5c365781612b574d8fb34592ab7247e611e6
|
[
"MIT"
] | null | null | null |
htf/__init__.py
|
oktak/hoomd-tf
|
357b5c365781612b574d8fb34592ab7247e611e6
|
[
"MIT"
] | null | null | null |
htf/__init__.py
|
oktak/hoomd-tf
|
357b5c365781612b574d8fb34592ab7247e611e6
|
[
"MIT"
] | null | null | null |
# Copyright (c) 2018 Andrew White at the University of Rochester
# This file is part of the Hoomd-Tensorflow plugin developed by Andrew White
# need to import md to have library available.
import hoomd.md
# import make reverse for testing purposes
from .tfcompute import tfcompute, _make_reverse_indices
from .graphbuilder import graph_builder
from .tfarraycomm import tf_array_comm
from .utils import *
__version__ = '0.4'
| 35.416667
| 76
| 0.807059
|
4a1658d900dce3c12cbf6de624e68fbbd6823352
| 15,868
|
py
|
Python
|
allennlp/models/semantic_role_labeler.py
|
loopylangur/allennlp
|
0fc695b08a0376317e45ae0a45584aa9eb14beb6
|
[
"Apache-2.0"
] | null | null | null |
allennlp/models/semantic_role_labeler.py
|
loopylangur/allennlp
|
0fc695b08a0376317e45ae0a45584aa9eb14beb6
|
[
"Apache-2.0"
] | null | null | null |
allennlp/models/semantic_role_labeler.py
|
loopylangur/allennlp
|
0fc695b08a0376317e45ae0a45584aa9eb14beb6
|
[
"Apache-2.0"
] | null | null | null |
from typing import Dict, List, TextIO, Optional, Any
import warnings
from overrides import overrides
import torch
from torch.nn.modules import Linear, Dropout
import torch.nn.functional as F
from allennlp.common.checks import check_dimensions_match
from allennlp.data import Vocabulary
from allennlp.models.srl_util import (
convert_bio_tags_to_conll_format,
write_bio_formatted_tags_to_file,
)
from allennlp.modules import Seq2SeqEncoder, TimeDistributed, TextFieldEmbedder
from allennlp.modules.token_embedders import Embedding
from allennlp.models.model import Model
from allennlp.nn import InitializerApplicator, RegularizerApplicator
from allennlp.nn.util import get_text_field_mask, sequence_cross_entropy_with_logits
from allennlp.nn.util import get_lengths_from_binary_sequence_mask, viterbi_decode
from allennlp.training.metrics.srl_eval_scorer import SrlEvalScorer, DEFAULT_SRL_EVAL_PATH
@Model.register("srl")
class SemanticRoleLabeler(Model):
"""
This model performs semantic role labeling using BIO tags using Propbank semantic roles.
Specifically, it is an implementation of [Deep Semantic Role Labeling - What works
and what's next](https://www.aclweb.org/anthology/P17-1044).
This implementation is effectively a series of stacked interleaved LSTMs with highway
connections, applied to embedded sequences of words concatenated with a binary indicator
containing whether or not a word is the verbal predicate to generate predictions for in
the sentence. Additionally, during inference, Viterbi decoding is applied to constrain
the predictions to contain valid BIO sequences.
Specifically, the model expects and outputs IOB2-formatted tags, where the
B- tag is used in the beginning of every chunk (i.e. all chunks start with the B- tag).
# Parameters
vocab : ``Vocabulary``, required
A Vocabulary, required in order to compute sizes for input/output projections.
text_field_embedder : ``TextFieldEmbedder``, required
Used to embed the ``tokens`` ``TextField`` we get as input to the model.
encoder : ``Seq2SeqEncoder``
The encoder (with its own internal stacking) that we will use in between embedding tokens
and predicting output tags.
binary_feature_dim : int, required.
The dimensionality of the embedding of the binary verb predicate features.
initializer : ``InitializerApplicator``, optional (default=``InitializerApplicator()``)
Used to initialize the model parameters.
regularizer : ``RegularizerApplicator``, optional (default=``None``)
If provided, will be used to calculate the regularization penalty during training.
label_smoothing : ``float``, optional (default = 0.0)
Whether or not to use label smoothing on the labels when computing cross entropy loss.
ignore_span_metric : ``bool``, optional (default = False)
Whether to calculate span loss, which is irrelevant when predicting BIO for Open Information Extraction.
srl_eval_path : ``str``, optional (default=``DEFAULT_SRL_EVAL_PATH``)
The path to the srl-eval.pl script. By default, will use the srl-eval.pl included with allennlp,
which is located at allennlp/tools/srl-eval.pl . If ``None``, srl-eval.pl is not used.
"""
def __init__(
self,
vocab: Vocabulary,
text_field_embedder: TextFieldEmbedder,
encoder: Seq2SeqEncoder,
binary_feature_dim: int,
embedding_dropout: float = 0.0,
initializer: InitializerApplicator = InitializerApplicator(),
regularizer: Optional[RegularizerApplicator] = None,
label_smoothing: float = None,
ignore_span_metric: bool = False,
srl_eval_path: str = DEFAULT_SRL_EVAL_PATH,
) -> None:
super().__init__(vocab, regularizer)
self.text_field_embedder = text_field_embedder
self.num_classes = self.vocab.get_vocab_size("labels")
if srl_eval_path is not None:
# For the span based evaluation, we don't want to consider labels
# for verb, because the verb index is provided to the model.
self.span_metric = SrlEvalScorer(srl_eval_path, ignore_classes=["V"])
else:
self.span_metric = None
self.encoder = encoder
# There are exactly 2 binary features for the verb predicate embedding.
self.binary_feature_embedding = Embedding(2, binary_feature_dim)
self.tag_projection_layer = TimeDistributed(
Linear(self.encoder.get_output_dim(), self.num_classes)
)
self.embedding_dropout = Dropout(p=embedding_dropout)
self._label_smoothing = label_smoothing
self.ignore_span_metric = ignore_span_metric
check_dimensions_match(
text_field_embedder.get_output_dim() + binary_feature_dim,
encoder.get_input_dim(),
"text embedding dim + verb indicator embedding dim",
"encoder input dim",
)
initializer(self)
def forward( # type: ignore
self,
tokens: Dict[str, torch.LongTensor],
verb_indicator: torch.LongTensor,
tags: torch.LongTensor = None,
metadata: List[Dict[str, Any]] = None,
) -> Dict[str, torch.Tensor]:
"""
# Parameters
tokens : Dict[str, torch.LongTensor], required
The output of ``TextField.as_array()``, which should typically be passed directly to a
``TextFieldEmbedder``. This output is a dictionary mapping keys to ``TokenIndexer``
tensors. At its most basic, using a ``SingleIdTokenIndexer`` this is : ``{"tokens":
Tensor(batch_size, num_tokens)}``. This dictionary will have the same keys as were used
for the ``TokenIndexers`` when you created the ``TextField`` representing your
sequence. The dictionary is designed to be passed directly to a ``TextFieldEmbedder``,
which knows how to combine different word representations into a single vector per
token in your input.
verb_indicator: torch.LongTensor, required.
An integer ``SequenceFeatureField`` representation of the position of the verb
in the sentence. This should have shape (batch_size, num_tokens) and importantly, can be
all zeros, in the case that the sentence has no verbal predicate.
tags : torch.LongTensor, optional (default = None)
A torch tensor representing the sequence of integer gold class labels
of shape ``(batch_size, num_tokens)``
metadata : ``List[Dict[str, Any]]``, optional, (default = None)
metadata containg the original words in the sentence and the verb to compute the
frame for, under 'words' and 'verb' keys, respectively.
# Returns
An output dictionary consisting of:
logits : torch.FloatTensor
A tensor of shape ``(batch_size, num_tokens, tag_vocab_size)`` representing
unnormalised log probabilities of the tag classes.
class_probabilities : torch.FloatTensor
A tensor of shape ``(batch_size, num_tokens, tag_vocab_size)`` representing
a distribution of the tag classes per word.
loss : torch.FloatTensor, optional
A scalar loss to be optimised.
"""
embedded_text_input = self.embedding_dropout(self.text_field_embedder(tokens))
mask = get_text_field_mask(tokens)
embedded_verb_indicator = self.binary_feature_embedding(verb_indicator.long())
# Concatenate the verb feature onto the embedded text. This now
# has shape (batch_size, sequence_length, embedding_dim + binary_feature_dim).
embedded_text_with_verb_indicator = torch.cat(
[embedded_text_input, embedded_verb_indicator], -1
)
batch_size, sequence_length, _ = embedded_text_with_verb_indicator.size()
encoded_text = self.encoder(embedded_text_with_verb_indicator, mask)
logits = self.tag_projection_layer(encoded_text)
reshaped_log_probs = logits.view(-1, self.num_classes)
class_probabilities = F.softmax(reshaped_log_probs, dim=-1).view(
[batch_size, sequence_length, self.num_classes]
)
output_dict = {"logits": logits, "class_probabilities": class_probabilities}
# We need to retain the mask in the output dictionary
# so that we can crop the sequences to remove padding
# when we do viterbi inference in self.decode.
output_dict["mask"] = mask
if tags is not None:
loss = sequence_cross_entropy_with_logits(
logits, tags, mask, label_smoothing=self._label_smoothing
)
if not self.ignore_span_metric and self.span_metric is not None and not self.training:
batch_verb_indices = [
example_metadata["verb_index"] for example_metadata in metadata
]
batch_sentences = [example_metadata["words"] for example_metadata in metadata]
# Get the BIO tags from decode()
# TODO (nfliu): This is kind of a hack, consider splitting out part
# of decode() to a separate function.
batch_bio_predicted_tags = self.decode(output_dict).pop("tags")
batch_conll_predicted_tags = [
convert_bio_tags_to_conll_format(tags) for tags in batch_bio_predicted_tags
]
batch_bio_gold_tags = [
example_metadata["gold_tags"] for example_metadata in metadata
]
batch_conll_gold_tags = [
convert_bio_tags_to_conll_format(tags) for tags in batch_bio_gold_tags
]
self.span_metric(
batch_verb_indices,
batch_sentences,
batch_conll_predicted_tags,
batch_conll_gold_tags,
)
output_dict["loss"] = loss
words, verbs = zip(*[(x["words"], x["verb"]) for x in metadata])
if metadata is not None:
output_dict["words"] = list(words)
output_dict["verb"] = list(verbs)
return output_dict
@overrides
def decode(self, output_dict: Dict[str, torch.Tensor]) -> Dict[str, torch.Tensor]:
"""
Does constrained viterbi decoding on class probabilities output in :func:`forward`. The
constraint simply specifies that the output tags must be a valid BIO sequence. We add a
``"tags"`` key to the dictionary with the result.
"""
all_predictions = output_dict["class_probabilities"]
sequence_lengths = get_lengths_from_binary_sequence_mask(output_dict["mask"]).data.tolist()
if all_predictions.dim() == 3:
predictions_list = [
all_predictions[i].detach().cpu() for i in range(all_predictions.size(0))
]
else:
predictions_list = [all_predictions]
all_tags = []
transition_matrix = self.get_viterbi_pairwise_potentials()
start_transitions = self.get_start_transitions()
for predictions, length in zip(predictions_list, sequence_lengths):
max_likelihood_sequence, _ = viterbi_decode(
predictions[:length], transition_matrix, allowed_start_transitions=start_transitions
)
tags = [
self.vocab.get_token_from_index(x, namespace="labels")
for x in max_likelihood_sequence
]
all_tags.append(tags)
output_dict["tags"] = all_tags
return output_dict
def get_metrics(self, reset: bool = False):
if self.ignore_span_metric:
# Return an empty dictionary if ignoring the
# span metric
return {}
else:
metric_dict = self.span_metric.get_metric(reset=reset)
# This can be a lot of metrics, as there are 3 per class.
# we only really care about the overall metrics, so we filter for them here.
return {x: y for x, y in metric_dict.items() if "overall" in x}
def get_viterbi_pairwise_potentials(self):
"""
Generate a matrix of pairwise transition potentials for the BIO labels.
The only constraint implemented here is that I-XXX labels must be preceded
by either an identical I-XXX tag or a B-XXX tag. In order to achieve this
constraint, pairs of labels which do not satisfy this constraint have a
pairwise potential of -inf.
# Returns
transition_matrix : torch.Tensor
A (num_labels, num_labels) matrix of pairwise potentials.
"""
all_labels = self.vocab.get_index_to_token_vocabulary("labels")
num_labels = len(all_labels)
transition_matrix = torch.zeros([num_labels, num_labels])
for i, previous_label in all_labels.items():
for j, label in all_labels.items():
# I labels can only be preceded by themselves or
# their corresponding B tag.
if i != j and label[0] == "I" and not previous_label == "B" + label[1:]:
transition_matrix[i, j] = float("-inf")
return transition_matrix
def get_start_transitions(self):
"""
In the BIO sequence, we cannot start the sequence with an I-XXX tag.
This transition sequence is passed to viterbi_decode to specify this constraint.
# Returns
start_transitions : torch.Tensor
The pairwise potentials between a START token and
the first token of the sequence.
"""
all_labels = self.vocab.get_index_to_token_vocabulary("labels")
num_labels = len(all_labels)
start_transitions = torch.zeros(num_labels)
for i, label in all_labels.items():
if label[0] == "I":
start_transitions[i] = float("-inf")
return start_transitions
def write_to_conll_eval_file(
prediction_file: TextIO,
gold_file: TextIO,
verb_index: Optional[int],
sentence: List[str],
prediction: List[str],
gold_labels: List[str],
):
"""
.. deprecated:: 0.8.4
The ``write_to_conll_eval_file`` function was deprecated in favor of the
identical ``write_bio_formatted_tags_to_file`` in version 0.8.4.
Prints predicate argument predictions and gold labels for a single verbal
predicate in a sentence to two provided file references.
The CoNLL SRL format is described in
[the shared task data README](https://www.lsi.upc.edu/~srlconll/conll05st-release/README).
This function expects IOB2-formatted tags, where the B- tag is used in the beginning
of every chunk (i.e. all chunks start with the B- tag).
# Parameters
prediction_file : TextIO, required.
A file reference to print predictions to.
gold_file : TextIO, required.
A file reference to print gold labels to.
verb_index : Optional[int], required.
The index of the verbal predicate in the sentence which
the gold labels are the arguments for, or None if the sentence
contains no verbal predicate.
sentence : List[str], required.
The word tokens.
prediction : List[str], required.
The predicted BIO labels.
gold_labels : List[str], required.
The gold BIO labels.
"""
warnings.warn(
"The 'write_to_conll_eval_file' function has been deprecated in favor of "
"the identical 'write_bio_formatted_tags_to_file' function.",
DeprecationWarning,
)
write_bio_formatted_tags_to_file(
prediction_file, gold_file, verb_index, sentence, prediction, gold_labels
)
| 45.467049
| 112
| 0.667696
|
4a165ac7d5dda0ab76a427db33686688f165044d
| 2,957
|
py
|
Python
|
setup.py
|
True-Demon/smoke-zephyr
|
bf398416c0ece69ecf4a440d73f1a5c5670897cd
|
[
"BSD-3-Clause"
] | null | null | null |
setup.py
|
True-Demon/smoke-zephyr
|
bf398416c0ece69ecf4a440d73f1a5c5670897cd
|
[
"BSD-3-Clause"
] | null | null | null |
setup.py
|
True-Demon/smoke-zephyr
|
bf398416c0ece69ecf4a440d73f1a5c5670897cd
|
[
"BSD-3-Clause"
] | null | null | null |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
#
# setup.py
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of the nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
#
import os
import sys
base_directory = os.path.dirname(__file__)
try:
from setuptools import setup, find_packages
except ImportError:
print('This project needs setuptools in order to build. Install it using your package')
print('manager (usually python-setuptools) or via pip (pip install setuptools).')
sys.exit(1)
try:
with open(os.path.join(base_directory, 'README.rst')) as file_h:
long_description = file_h.read()
except (IOError, OSError):
sys.stderr.write('README.rst is unavailable, can not generate the long description\n')
long_description = None
DESCRIPTION = """\
This project provides a collection of miscellaneous Python utilities.\
"""
setup(
name='smoke-zephyr',
version='2.0.1',
author='Spencer McIntyre',
author_email='zeroSteiner@gmail.com',
maintainer='Spencer McIntyre',
maintainer_email='zeroSteiner@gmail.com',
description=DESCRIPTION,
long_description=long_description,
url='https://github.com/zeroSteiner/smoke-zephyr',
license='BSD',
packages=['smoke_zephyr'],
classifiers=[
'Development Status :: 5 - Production/Stable',
'Intended Audience :: Developers',
'License :: OSI Approved :: BSD License',
'Operating System :: POSIX',
'Programming Language :: Python',
'Programming Language :: Python :: 3.4',
'Programming Language :: Python :: 3.5',
'Programming Language :: Python :: 3.6',
'Programming Language :: Python :: 3.7',
]
)
| 36.506173
| 88
| 0.747379
|
4a165c494bfcb45d5a67693b0d3f68a9db8e459a
| 6,467
|
py
|
Python
|
Plugins/UnrealEnginePython/Binaries/Win64/Lib/site-packages/tensorflow/python/estimator/gc.py
|
JustinACoder/H22-GR3-UnrealAI
|
361eb9ef1147f8a2991e5f98c4118cd823184adf
|
[
"MIT"
] | 6
|
2022-02-04T18:12:24.000Z
|
2022-03-21T23:57:12.000Z
|
Lib/site-packages/tensorflow/python/estimator/gc.py
|
shfkdroal/Robot-Learning-in-Mixed-Adversarial-and-Collaborative-Settings
|
1fa4cd6a566c8745f455fc3d2273208f21f88ced
|
[
"bzip2-1.0.6"
] | null | null | null |
Lib/site-packages/tensorflow/python/estimator/gc.py
|
shfkdroal/Robot-Learning-in-Mixed-Adversarial-and-Collaborative-Settings
|
1fa4cd6a566c8745f455fc3d2273208f21f88ced
|
[
"bzip2-1.0.6"
] | 1
|
2022-02-08T03:53:23.000Z
|
2022-02-08T03:53:23.000Z
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
r"""System for specifying garbage collection (GC) of path based data.
This framework allows for GC of data specified by path names, for example files
on disk. gc.Path objects each represent a single item stored at a path and may
be a base directory,
/tmp/exports/0/...
/tmp/exports/1/...
...
or a fully qualified file,
/tmp/train-1.ckpt
/tmp/train-2.ckpt
...
A gc filter function takes and returns a list of gc.Path items. Filter
functions are responsible for selecting Path items for preservation or deletion.
Note that functions should always return a sorted list.
For example,
base_dir = "/tmp"
# Create the directories.
for e in xrange(10):
os.mkdir("%s/%d" % (base_dir, e), 0o755)
# Create a simple parser that pulls the export_version from the directory.
path_regex = "^" + re.escape(base_dir) + "/(\\d+)$"
def parser(path):
match = re.match(path_regex, path.path)
if not match:
return None
return path._replace(export_version=int(match.group(1)))
path_list = gc._get_paths("/tmp", parser) # contains all ten Paths
every_fifth = gc._mod_export_version(5)
print(every_fifth(path_list)) # shows ["/tmp/0", "/tmp/5"]
largest_three = gc.largest_export_versions(3)
print(largest_three(all_paths)) # shows ["/tmp/7", "/tmp/8", "/tmp/9"]
both = gc._union(every_fifth, largest_three)
print(both(all_paths)) # shows ["/tmp/0", "/tmp/5",
# "/tmp/7", "/tmp/8", "/tmp/9"]
# Delete everything not in 'both'.
to_delete = gc._negation(both)
for p in to_delete(all_paths):
gfile.DeleteRecursively(p.path) # deletes: "/tmp/1", "/tmp/2",
# "/tmp/3", "/tmp/4", "/tmp/6",
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
import heapq
import math
import os
from tensorflow.python.platform import gfile
from tensorflow.python.util import compat
Path = collections.namedtuple('Path', 'path export_version')
def _largest_export_versions(n):
"""Creates a filter that keeps the largest n export versions.
Args:
n: number of versions to keep.
Returns:
A filter function that keeps the n largest paths.
"""
def keep(paths):
heap = []
for idx, path in enumerate(paths):
if path.export_version is not None:
heapq.heappush(heap, (path.export_version, idx))
keepers = [paths[i] for _, i in heapq.nlargest(n, heap)]
return sorted(keepers)
return keep
def _one_of_every_n_export_versions(n):
"""Creates a filter that keeps one of every n export versions.
Args:
n: interval size.
Returns:
A filter function that keeps exactly one path from each interval
[0, n], (n, 2n], (2n, 3n], etc... If more than one path exists in an
interval the largest is kept.
"""
def keep(paths):
"""A filter function that keeps exactly one out of every n paths."""
keeper_map = {} # map from interval to largest path seen in that interval
for p in paths:
if p.export_version is None:
# Skip missing export_versions.
continue
# Find the interval (with a special case to map export_version = 0 to
# interval 0.
interval = math.floor(
(p.export_version - 1) / n) if p.export_version else 0
existing = keeper_map.get(interval, None)
if (not existing) or (existing.export_version < p.export_version):
keeper_map[interval] = p
return sorted(keeper_map.values())
return keep
def _mod_export_version(n):
"""Creates a filter that keeps every export that is a multiple of n.
Args:
n: step size.
Returns:
A filter function that keeps paths where export_version % n == 0.
"""
def keep(paths):
keepers = []
for p in paths:
if p.export_version % n == 0:
keepers.append(p)
return sorted(keepers)
return keep
def _union(lf, rf):
"""Creates a filter that keeps the union of two filters.
Args:
lf: first filter
rf: second filter
Returns:
A filter function that keeps the n largest paths.
"""
def keep(paths):
l = set(lf(paths))
r = set(rf(paths))
return sorted(list(l|r))
return keep
def _negation(f):
"""Negate a filter.
Args:
f: filter function to invert
Returns:
A filter function that returns the negation of f.
"""
def keep(paths):
l = set(paths)
r = set(f(paths))
return sorted(list(l-r))
return keep
def _get_paths(base_dir, parser):
"""Gets a list of Paths in a given directory.
Args:
base_dir: directory.
parser: a function which gets the raw Path and can augment it with
information such as the export_version, or ignore the path by returning
None. An example parser may extract the export version from a path
such as "/tmp/exports/100" an another may extract from a full file
name such as "/tmp/checkpoint-99.out".
Returns:
A list of Paths contained in the base directory with the parsing function
applied.
By default the following fields are populated,
- Path.path
The parsing function is responsible for populating,
- Path.export_version
"""
raw_paths = gfile.ListDirectory(base_dir)
paths = []
for r in raw_paths:
# ListDirectory() return paths with "/" at the last if base_dir was GCS URL
r = compat.as_str_any(r)
if r[-1] == '/':
r = r[0:len(r)-1]
p = parser(Path(os.path.join(compat.as_str_any(base_dir), r), None))
if p:
paths.append(p)
return sorted(paths)
| 30.504717
| 81
| 0.645431
|
4a165c533148fa693119dd2942c7bec0567bbce3
| 1,771
|
py
|
Python
|
bin.d/ssh-import-id.py
|
hlecuanda/zconf
|
3e07e5104d07b846294b797f082fb09727f005da
|
[
"BSD-2-Clause"
] | 2
|
2019-02-26T08:06:55.000Z
|
2019-09-11T05:14:48.000Z
|
bin.d/ssh-import-id.py
|
hlecuanda/zconf
|
3e07e5104d07b846294b797f082fb09727f005da
|
[
"BSD-2-Clause"
] | 1
|
2018-02-20T22:43:24.000Z
|
2018-02-20T22:43:24.000Z
|
bin.d/ssh-import-id.py
|
hlecuanda/zconf
|
3e07e5104d07b846294b797f082fb09727f005da
|
[
"BSD-2-Clause"
] | null | null | null |
#! /usr/bin/python3
#
# ssh-import-id - Authorize SSH public keys from trusted online identities
#
# Copyright (c) 2013 Casey Marshall <casey.marshall@gmail.com>
# Copyright (c) 2013-16 Dustin Kirkland <dustin.kirkland@gmail.com>
#
# ssh-import-id is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, version 3.
#
# ssh-import-id is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with ssh-import-id. If not, see <http://www.gnu.org/licenses/>.
import argparse
import sys
from ssh_import_id import *
def main():
errors = []
try:
os.umask(0o177)
parser.options = parser.parse_args()
keys = []
for userid in parser.options.userids:
user_pieces = userid.split(':')
if len(user_pieces) == 2:
proto, username = user_pieces
elif len(user_pieces) == 1:
proto, username = DEFAULT_PROTO, userid
else:
die("Invalid user ID: [%s]" % (userid))
if parser.options.remove:
k = remove_keys(proto, username)
keys.extend(k)
action = "Removed"
else:
k = import_keys(proto, username, parser.options.useragent)
keys.extend(k)
action = "Authorized"
if len(k) == 0:
errors.append(userid)
logging.info("[%d] SSH keys [%s]" % (len(keys), action))
except (Exception,):
e = sys.exc_info()[1]
die("%s" % (str(e)))
cleanup()
if len(errors) > 0:
die("No matching keys found for [%s]" % ','.join(errors))
os._exit(0)
if __name__ == '__main__':
main()
| 28.564516
| 74
| 0.687747
|
4a165caf113ee0eb9e2cc66382562da62f57888f
| 64,646
|
py
|
Python
|
cms/admin/pageadmin.py
|
leture/django-cms
|
a022f2cd9eb18a85765ef7c00444c9b63e53008e
|
[
"BSD-3-Clause"
] | null | null | null |
cms/admin/pageadmin.py
|
leture/django-cms
|
a022f2cd9eb18a85765ef7c00444c9b63e53008e
|
[
"BSD-3-Clause"
] | null | null | null |
cms/admin/pageadmin.py
|
leture/django-cms
|
a022f2cd9eb18a85765ef7c00444c9b63e53008e
|
[
"BSD-3-Clause"
] | null | null | null |
# -*- coding: utf-8 -*-
import copy
from functools import wraps
import json
import sys
from cms.utils.compat import DJANGO_1_7
import django
from django.contrib.admin.helpers import AdminForm
from django.conf import settings
from django.conf.urls import url
from django.contrib import admin, messages
from django.contrib.admin.models import LogEntry, CHANGE
from django.contrib.admin.options import IncorrectLookupParameters
from django.contrib.admin.util import get_deleted_objects
from django.contrib.contenttypes.models import ContentType
from django.contrib.sites.models import Site, get_current_site
from django.core.exceptions import PermissionDenied, ObjectDoesNotExist, ValidationError
from django.db import router, transaction
from django.db.models import Q
from django.http import HttpResponseRedirect, HttpResponse, Http404, HttpResponseBadRequest, HttpResponseForbidden
from django.shortcuts import render, get_object_or_404
from django.template.defaultfilters import escape
from django.utils.encoding import force_text
from django.utils.six.moves.urllib.parse import unquote
from django.utils.translation import ugettext_lazy as _, get_language
from django.utils.decorators import method_decorator
from django.views.decorators.http import require_POST
from cms.admin.change_list import CMSChangeList
from cms.admin.dialog.views import get_copy_dialog
from cms.admin.forms import (PageForm, AdvancedSettingsForm, PagePermissionForm,
PublicationDatesForm)
from cms.admin.permissionadmin import (PERMISSION_ADMIN_INLINES, PagePermissionInlineAdmin, ViewRestrictionInlineAdmin)
from cms.admin.placeholderadmin import PlaceholderAdminMixin
from cms.admin.views import revert_plugins
from cms.constants import PAGE_TYPES_ID, PUBLISHER_STATE_PENDING
from cms.models import Page, Title, CMSPlugin, PagePermission, GlobalPagePermission, StaticPlaceholder
from cms.models.managers import PagePermissionsPermissionManager
from cms.plugin_pool import plugin_pool
from cms.toolbar_pool import toolbar_pool
from cms.utils import helpers, permissions, get_language_from_request, admin as admin_utils, copy_plugins
from cms.utils.i18n import get_language_list, get_language_tuple, get_language_object, force_language
from cms.utils.admin import jsonify_request
from cms.utils.compat.dj import is_installed
from cms.utils.conf import get_cms_setting
from cms.utils.helpers import find_placeholder_relation, current_site
from cms.utils.permissions import has_global_page_permission, has_generic_permission
from cms.utils.urlutils import add_url_parameters, admin_reverse
require_POST = method_decorator(require_POST)
if is_installed('reversion'):
from reversion.admin import VersionAdmin as ModelAdmin
from reversion import create_revision
else: # pragma: no cover
from django.contrib.admin import ModelAdmin
class ReversionContext(object):
def __enter__(self):
yield
def __exit__(self, exc_type, exc_val, exc_tb):
pass
def __call__(self, func):
"""Allows this revision context to be used as a decorator."""
@wraps(func)
def do_revision_context(*args, **kwargs):
self.__enter__()
exception = False
try:
try:
return func(*args, **kwargs)
except:
exception = True
if not self.__exit__(*sys.exc_info()):
raise
finally:
if not exception:
self.__exit__(None, None, None)
return do_revision_context
def create_revision():
return ReversionContext()
PUBLISH_COMMENT = "Publish"
INITIAL_COMMENT = "Initial version."
class PageAdmin(PlaceholderAdminMixin, ModelAdmin):
form = PageForm
search_fields = ('=id', 'title_set__slug', 'title_set__title', 'reverse_id')
revision_form_template = "admin/cms/page/history/revision_header.html"
recover_form_template = "admin/cms/page/history/recover_header.html"
add_general_fields = ['title', 'slug', 'language', 'template']
change_list_template = "admin/cms/page/tree/base.html"
list_filter = ['in_navigation', 'template', 'changed_by', 'soft_root']
title_frontend_editable_fields = ['title', 'menu_title', 'page_title']
inlines = PERMISSION_ADMIN_INLINES
def get_urls(self):
"""Get the admin urls
"""
info = "%s_%s" % (self.model._meta.app_label, self.model._meta.model_name)
pat = lambda regex, fn: url(regex, self.admin_site.admin_view(fn), name='%s_%s' % (info, fn.__name__))
url_patterns = [
pat(r'^([0-9]+)/advanced-settings/$', self.advanced),
pat(r'^([0-9]+)/dates/$', self.dates),
pat(r'^([0-9]+)/permission-settings/$', self.permissions),
pat(r'^([0-9]+)/delete-translation/$', self.delete_translation),
pat(r'^([0-9]+)/move-page/$', self.move_page),
pat(r'^([0-9]+)/copy-page/$', self.copy_page),
pat(r'^([0-9]+)/copy-language/$', self.copy_language),
pat(r'^([0-9]+)/dialog/copy/$', get_copy_dialog), # copy dialog
pat(r'^([0-9]+)/change-navigation/$', self.change_innavigation),
pat(r'^([0-9]+)/permissions/$', self.get_permissions),
pat(r'^([0-9]+)/undo/$', self.undo),
pat(r'^([0-9]+)/redo/$', self.redo),
pat(r'^([0-9]+)/change_template/$', self.change_template),
pat(r'^([0-9]+)/([a-z\-]+)/descendants/$', self.descendants), # menu html for page descendants
pat(r'^([0-9]+)/([a-z\-]+)/edit-field/$', self.edit_title_fields),
pat(r'^([0-9]+)/([a-z\-]+)/publish/$', self.publish_page),
pat(r'^([0-9]+)/([a-z\-]+)/unpublish/$', self.unpublish),
pat(r'^([0-9]+)/([a-z\-]+)/revert/$', self.revert_page),
pat(r'^([0-9]+)/([a-z\-]+)/preview/$', self.preview_page),
pat(r'^add-page-type/$', self.add_page_type),
pat(r'^published-pages/$', self.get_published_pagelist),
url(r'^resolve/$', self.resolve, name="cms_page_resolve"),
]
if plugin_pool.get_all_plugins():
url_patterns += plugin_pool.get_patterns()
url_patterns += super(PageAdmin, self).get_urls()
return url_patterns
def get_revision_instances(self, request, object):
"""Returns all the instances to be used in the object's revision."""
if isinstance(object, Title):
object = object.page
if isinstance(object, Page) and not object.publisher_is_draft:
object = object.publisher_public
placeholder_relation = find_placeholder_relation(object)
data = [object]
filters = {'placeholder__%s' % placeholder_relation: object}
for plugin in CMSPlugin.objects.filter(**filters):
data.append(plugin)
plugin_instance, admin = plugin.get_plugin_instance()
if plugin_instance:
data.append(plugin_instance)
if isinstance(object, Page):
titles = object.title_set.all()
for title in titles:
title.publisher_public = None
data.append(title)
return data
def save_model(self, request, obj, form, change):
"""
Move the page in the tree if necessary and save every placeholder
Content object.
"""
target = request.GET.get('target', None)
position = request.GET.get('position', None)
if 'recover' in request.path_info:
pk = obj.pk
if obj.parent_id:
try:
parent = Page.objects.get(pk=obj.parent_id)
except Page.DoesNotExist:
parent = None
else:
parent = None
obj.pk = None
obj.path = None
obj.numchild = 0
obj.depth = 0
if parent:
saved_obj = parent.add_child(instance=obj)
else:
saved_obj = obj.add_root(instance=obj)
tmp_pk = saved_obj.pk
saved_obj.pk = pk
Page.objects.get(pk=tmp_pk).delete()
saved_obj.save(no_signals=True)
else:
if 'history' in request.path_info:
old_obj = self.model.objects.get(pk=obj.pk)
obj.depth = old_obj.depth
obj.parent_id = old_obj.parent_id
obj.path = old_obj.path
obj.numchild = old_obj.numchild
new = False
if not obj.pk:
new = True
obj.save()
if 'recover' in request.path_info or 'history' in request.path_info:
revert_plugins(request, obj.version.pk, obj)
if target is not None and position is not None:
try:
target = self.model.objects.get(pk=target)
except self.model.DoesNotExist:
pass
else:
if position == 'last-child' or position == 'first-child':
obj.parent_id = target.pk
else:
obj.parent_id = target.parent_id
obj.save()
obj = obj.move(target, pos=position)
page_type_id = form.cleaned_data.get('page_type')
copy_target_id = request.GET.get('copy_target')
if copy_target_id or page_type_id:
if page_type_id:
copy_target_id = page_type_id
copy_target = self.model.objects.get(pk=copy_target_id)
if not copy_target.has_view_permission(request):
raise PermissionDenied()
obj = self.model.objects.get(pk=obj.pk) #mptt reload
copy_target._copy_attributes(obj, clean=True)
obj.save()
for lang in copy_target.languages.split(','):
copy_target._copy_contents(obj, lang)
if not 'permission' in request.path_info:
language = form.cleaned_data['language']
Title.objects.set_or_create(
request,
obj,
form,
language,
)
# is it home? publish it right away
if new and Page.objects.filter(site_id=obj.site_id).count() == 1:
obj.publish(language)
def get_fieldsets(self, request, obj=None):
form = self.get_form(request, obj, fields=None)
if getattr(form, 'fieldsets', None) is None:
fields = list(form.base_fields) + list(self.get_readonly_fields(request, obj))
return [(None, {'fields': fields})]
else:
return form.fieldsets
def get_inline_classes(self, request, obj=None, **kwargs):
if obj and 'permission' in request.path_info:
return PERMISSION_ADMIN_INLINES
return []
def get_form_class(self, request, obj=None, **kwargs):
if 'advanced' in request.path_info:
return AdvancedSettingsForm
elif 'permission' in request.path_info:
return PagePermissionForm
elif 'dates' in request.path_info:
return PublicationDatesForm
return self.form
def get_form(self, request, obj=None, **kwargs):
"""
Get PageForm for the Page model and modify its fields depending on
the request.
"""
language = get_language_from_request(request, obj)
form_cls = self.get_form_class(request, obj)
form = super(PageAdmin, self).get_form(request, obj, form=form_cls, **kwargs)
# get_form method operates by overriding initial fields value which
# may persist across invocation. Code below deepcopies fields definition
# to avoid leaks
for field in form.base_fields.keys():
form.base_fields[field] = copy.deepcopy(form.base_fields[field])
if 'language' in form.base_fields:
form.base_fields['language'].initial = language
if 'page_type' in form.base_fields:
if 'copy_target' in request.GET or 'add_page_type' in request.GET or obj:
del form.base_fields['page_type']
elif not Title.objects.filter(page__parent__reverse_id=PAGE_TYPES_ID, language=language).exists():
del form.base_fields['page_type']
if 'add_page_type' in request.GET:
del form.base_fields['menu_title']
del form.base_fields['meta_description']
del form.base_fields['page_title']
self.inlines = self.get_inline_classes(request, obj, **kwargs)
if obj:
if 'history' in request.path_info or 'recover' in request.path_info:
version_id = request.path_info.split('/')[-2]
else:
version_id = None
title_obj = obj.get_title_obj(language=language, fallback=False, version_id=version_id, force_reload=True)
if 'site' in form.base_fields and form.base_fields['site'].initial is None:
form.base_fields['site'].initial = obj.site
for name in ('slug', 'title', 'meta_description', 'menu_title', 'page_title', 'redirect'):
if name in form.base_fields:
form.base_fields[name].initial = getattr(title_obj, name)
if 'overwrite_url' in form.base_fields:
if title_obj.has_url_overwrite:
form.base_fields['overwrite_url'].initial = title_obj.path
else:
form.base_fields['overwrite_url'].initial = ''
else:
for name in ('slug', 'title'):
form.base_fields[name].initial = u''
if 'target' in request.GET or 'copy_target' in request.GET:
target = request.GET.get('copy_target') or request.GET.get('target')
if 'position' in request.GET:
position = request.GET['position']
if position == 'last-child' or position == 'first-child':
form.base_fields['parent'].initial = request.GET.get('target', None)
else:
sibling = self.model.objects.get(pk=target)
form.base_fields['parent'].initial = sibling.parent_id
else:
form.base_fields['parent'].initial = request.GET.get('target', None)
form.base_fields['site'].initial = request.session.get('cms_admin_site', None)
return form
def advanced(self, request, object_id):
page = get_object_or_404(self.model, pk=object_id)
if not page.has_advanced_settings_permission(request):
raise PermissionDenied("No permission for editing advanced settings")
return self.change_view(request, object_id, extra_context={'advanced_settings': True, 'title': _("Advanced Settings")})
def dates(self, request, object_id):
return self.change_view(request, object_id, extra_context={'publishing_dates': True, 'title': _("Publishing dates")})
def permissions(self, request, object_id):
page = get_object_or_404(self.model, pk=object_id)
if not page.has_change_permissions_permission(request):
raise PermissionDenied("No permission for editing advanced settings")
return self.change_view(request, object_id, extra_context={'show_permissions': True, 'title': _("Change Permissions")})
def get_inline_instances(self, request, obj=None):
inlines = super(PageAdmin, self).get_inline_instances(request, obj)
if get_cms_setting('PERMISSION') and obj:
filtered_inlines = []
for inline in inlines:
if (isinstance(inline, PagePermissionInlineAdmin)
and not isinstance(inline, ViewRestrictionInlineAdmin)):
if "recover" in request.path or "history" in request.path:
# do not display permissions in recover mode
continue
if not obj.has_change_permissions_permission(request):
continue
filtered_inlines.append(inline)
inlines = filtered_inlines
return inlines
def get_unihandecode_context(self, language):
if language[:2] in get_cms_setting('UNIHANDECODE_DECODERS'):
uhd_lang = language[:2]
else:
uhd_lang = get_cms_setting('UNIHANDECODE_DEFAULT_DECODER')
uhd_host = get_cms_setting('UNIHANDECODE_HOST')
uhd_version = get_cms_setting('UNIHANDECODE_VERSION')
if uhd_lang and uhd_host and uhd_version:
uhd_urls = [
'%sunihandecode-%s.core.min.js' % (uhd_host, uhd_version),
'%sunihandecode-%s.%s.min.js' % (uhd_host, uhd_version, uhd_lang),
]
else:
uhd_urls = []
return {'unihandecode_lang': uhd_lang, 'unihandecode_urls': uhd_urls}
def add_view(self, request, form_url='', extra_context=None):
extra_context = extra_context or {}
language = get_language_from_request(request)
extra_context.update({
'language': language,
})
if not request.GET.get('add_page_type') is None:
extra_context.update({
'add_page_type': True,
'title': _("Add Page Type"),
})
elif 'copy_target' in request.GET:
extra_context.update({
'title': _("Add Page Copy"),
})
else:
extra_context = self.update_language_tab_context(request, context=extra_context)
extra_context.update(self.get_unihandecode_context(language))
return super(PageAdmin, self).add_view(request, form_url, extra_context=extra_context)
def change_view(self, request, object_id, form_url='', extra_context=None):
"""
The 'change' admin view for the Page model.
"""
if extra_context is None:
extra_context = {'basic_info': True}
try:
obj = self.model.objects.get(pk=object_id)
except self.model.DoesNotExist:
# Don't raise Http404 just yet, because we haven't checked
# permissions yet. We don't want an unauthenticated user to be able
# to determine whether a given object exists.
obj = None
else:
#activate(user_lang_set)
context = {
'page': obj,
'CMS_PERMISSION': get_cms_setting('PERMISSION'),
'ADMIN_MEDIA_URL': settings.STATIC_URL,
'can_change': obj.has_change_permission(request),
'can_change_permissions': obj.has_change_permissions_permission(request),
'current_site_id': settings.SITE_ID,
}
context.update(extra_context or {})
extra_context = self.update_language_tab_context(request, obj, context)
tab_language = get_language_from_request(request)
extra_context.update(self.get_unihandecode_context(tab_language))
response = super(PageAdmin, self).change_view(
request, object_id, form_url=form_url, extra_context=extra_context)
if tab_language and response.status_code == 302 and response._headers['location'][1] == request.path_info:
location = response._headers['location']
response._headers['location'] = (location[0], "%s?language=%s" % (location[1], tab_language))
return response
def render_change_form(self, request, context, add=False, change=False, form_url='', obj=None):
# add context variables
filled_languages = []
if obj:
filled_languages = [t[0] for t in obj.title_set.filter(title__isnull=False).values_list('language')]
allowed_languages = [lang[0] for lang in self._get_site_languages(obj)]
context.update({
'filled_languages': [lang for lang in filled_languages if lang in allowed_languages],
})
return super(PageAdmin, self).render_change_form(request, context, add, change, form_url, obj)
def _get_site_languages(self, obj=None):
site_id = None
if obj:
site_id = obj.site_id
else:
site_id = Site.objects.get_current().pk
return get_language_tuple(site_id)
def update_language_tab_context(self, request, obj=None, context=None):
if not context:
context = {}
language = get_language_from_request(request, obj)
languages = self._get_site_languages(obj)
context.update({
'language': language,
'language_tabs': languages,
# Dates are not language dependent, thus we hide the language
# selection bar: the language is forced through the form class
'show_language_tabs': len(list(languages)) > 1 and not context.get('publishing_dates', False),
})
return context
def response_change(self, request, obj):
"""Called always when page gets changed, call save on page, there may be
some new stuff, which should be published after all other objects on page
are collected.
"""
# save the object again, so all the related changes to page model
# can be published if required
obj.save()
return super(PageAdmin, self).response_change(request, obj)
def has_add_permission(self, request):
"""
Return true if the current user has permission to add a new page.
"""
if get_cms_setting('PERMISSION'):
return permissions.has_page_add_permission(request)
return super(PageAdmin, self).has_add_permission(request)
def has_change_permission(self, request, obj=None):
"""
Return true if the current user has permission on the page.
Return the string 'All' if the user has all rights.
"""
if get_cms_setting('PERMISSION'):
if obj:
return obj.has_change_permission(request)
else:
return permissions.has_page_change_permission(request)
return super(PageAdmin, self).has_change_permission(request, obj)
def has_delete_permission(self, request, obj=None):
"""
Returns True if the given request has permission to change the given
Django model instance. If CMS_PERMISSION are in use also takes look to
object permissions.
"""
if get_cms_setting('PERMISSION') and obj is not None:
return obj.has_delete_permission(request)
return super(PageAdmin, self).has_delete_permission(request, obj)
def has_recover_permission(self, request):
"""
Returns True if the use has the right to recover pages
"""
if not is_installed('reversion'):
return False
user = request.user
if user.is_superuser:
return True
try:
if has_global_page_permission(request, can_recover_page=True):
return True
except:
pass
return False
def has_add_plugin_permission(self, request, placeholder, plugin_type):
if not permissions.has_plugin_permission(request.user, plugin_type, "add"):
return False
page = placeholder.page
if page and not page.has_change_permission(request):
return False
if page and not page.publisher_is_draft:
return False
return True
def has_copy_plugin_permission(self, request, source_placeholder, target_placeholder, plugins):
source_page = source_placeholder.page
if source_page and not source_page.has_change_permission(request):
return False
target_page = target_placeholder.page
if target_page and not target_page.has_change_permission(request):
return False
if target_page and not target_page.publisher_is_draft:
return False
for plugin in plugins:
if not permissions.has_plugin_permission(request.user, plugin.plugin_type, "add"):
return False
return True
def has_change_plugin_permission(self, request, plugin):
page = plugin.placeholder.page if plugin.placeholder else None
if page and not page.has_change_permission(request):
return False
if page and not page.publisher_is_draft:
return False
if not permissions.has_plugin_permission(request.user, plugin.plugin_type, "change"):
return False
return True
def has_move_plugin_permission(self, request, plugin, target_placeholder):
if not permissions.has_plugin_permission(request.user, plugin.plugin_type, "change"):
return False
page = plugin.placeholder.page
if page and not page.has_change_permission(request):
return False
if page and not page.publisher_is_draft:
return False
return True
def has_delete_plugin_permission(self, request, plugin):
if not permissions.has_plugin_permission(request.user, plugin.plugin_type, "delete"):
return False
page = plugin.placeholder.page
if page:
if not page.publisher_is_draft:
return False
if not page.has_change_permission(request):
return False
return True
def has_clear_placeholder_permission(self, request, placeholder):
page = placeholder.page if placeholder else None
if page:
if not page.publisher_is_draft:
return False
if not page.has_change_permission(request):
return False
return True
def post_add_plugin(self, request, placeholder, plugin):
if is_installed('reversion') and placeholder.page:
plugin_name = force_text(plugin_pool.get_plugin(plugin.plugin_type).name)
message = _(u"%(plugin_name)s plugin added to %(placeholder)s") % {
'plugin_name': plugin_name, 'placeholder': placeholder}
self.cleanup_history(placeholder.page)
helpers.make_revision_with_plugins(placeholder.page, request.user, message)
def post_copy_plugins(self, request, source_placeholder, target_placeholder, plugins):
page = target_placeholder.page
if page and is_installed('reversion'):
message = _(u"Copied plugins to %(placeholder)s") % {'placeholder': target_placeholder}
self.cleanup_history(page)
helpers.make_revision_with_plugins(page, request.user, message)
def post_edit_plugin(self, request, plugin):
page = plugin.placeholder.page
if page:
# if reversion is installed, save version of the page plugins
if is_installed('reversion') and page:
plugin_name = force_text(plugin_pool.get_plugin(plugin.plugin_type).name)
message = _(
u"%(plugin_name)s plugin edited at position %(position)s in %(placeholder)s") % {
'plugin_name': plugin_name,
'position': plugin.position,
'placeholder': plugin.placeholder.slot
}
self.cleanup_history(page)
helpers.make_revision_with_plugins(page, request.user, message)
def post_move_plugin(self, request, source_placeholder, target_placeholder, plugin):
page = target_placeholder.page
if page and is_installed('reversion'):
self.cleanup_history(page)
helpers.make_revision_with_plugins(page, request.user, _(u"Plugins were moved"))
def post_delete_plugin(self, request, plugin):
plugin_name = force_text(plugin_pool.get_plugin(plugin.plugin_type).name)
page = plugin.placeholder.page
if page:
page.save()
comment = _("%(plugin_name)s plugin at position %(position)s in %(placeholder)s was deleted.") % {
'plugin_name': plugin_name,
'position': plugin.position,
'placeholder': plugin.placeholder,
}
if is_installed('reversion'):
self.cleanup_history(page)
helpers.make_revision_with_plugins(page, request.user, comment)
def post_clear_placeholder(self, request, placeholder):
page = placeholder.page
if page:
page.save()
comment = _('All plugins in the placeholder "%(name)s" were deleted.') % {
'name': force_text(placeholder)
}
if is_installed('reversion'):
self.cleanup_history(page)
helpers.make_revision_with_plugins(page, request.user, comment)
def get_placeholder_template(self, request, placeholder):
page = placeholder.page
if page:
return page.get_template()
def changelist_view(self, request, extra_context=None):
"The 'change list' admin view for this model."
from django.contrib.admin.views.main import ERROR_FLAG
opts = self.model._meta
app_label = opts.app_label
if not self.has_change_permission(request, None):
return HttpResponseForbidden(force_text(_("You do not have permission to change pages.")))
try:
cl = CMSChangeList(request, self.model, self.list_display, self.list_display_links, self.list_filter,
self.date_hierarchy, self.search_fields, self.list_select_related, self.list_per_page,
self.list_max_show_all, self.list_editable, self)
except IncorrectLookupParameters:
# Wacky lookup parameters were given, so redirect to the main
# changelist page, without parameters, and pass an 'invalid=1'
# parameter via the query string. If wacky parameters were given and
# the 'invalid=1' parameter was already in the query string, something
# is screwed up with the database, so display an error page.
if ERROR_FLAG in request.GET.keys():
return render(request, 'admin/invalid_setup.html', {'title': _('Database error')})
return HttpResponseRedirect(request.path_info + '?' + ERROR_FLAG + '=1')
cl.set_items(request)
site_id = request.GET.get('site__exact', None)
if site_id is None:
site_id = current_site(request).pk
site_id = int(site_id)
# languages
languages = get_language_list(site_id)
# parse the cookie that saves which page trees have
# been opened already and extracts the page ID
djangocms_nodes_open = request.COOKIES.get('djangocms_nodes_open', '')
raw_nodes = unquote(djangocms_nodes_open).split(',')
try:
open_menu_trees = [int(c.split('page_', 1)[1]) for c in raw_nodes]
except IndexError:
open_menu_trees = []
# Language may be present in the GET dictionary but empty
language = request.GET.get('language', get_language())
if not language:
language = get_language()
context = {
'title': cl.title,
'is_popup': cl.is_popup,
'cl': cl,
'opts': opts,
'has_add_permission': self.has_add_permission(request),
'root_path': admin_reverse('index'),
'app_label': app_label,
'preview_language': language,
'CMS_MEDIA_URL': get_cms_setting('MEDIA_URL'),
'CMS_PERMISSION': get_cms_setting('PERMISSION'),
'DEBUG': settings.DEBUG,
'site_languages': languages,
'open_menu_trees': open_menu_trees,
}
if is_installed('reversion'):
context['has_recover_permission'] = self.has_recover_permission(request)
context['has_change_permission'] = self.has_change_permission(request)
context.update(extra_context or {})
return render(request, self.change_list_template or [
'admin/%s/%s/change_list.html' % (app_label, opts.object_name.lower()),
'admin/%s/change_list.html' % app_label,
'admin/change_list.html'
], context)
def recoverlist_view(self, request, extra_context=None):
if not self.has_recover_permission(request):
raise PermissionDenied
return super(PageAdmin, self).recoverlist_view(request, extra_context)
def recover_view(self, request, version_id, extra_context=None):
if not self.has_recover_permission(request):
raise PermissionDenied
extra_context = self.update_language_tab_context(request, None, extra_context)
return super(PageAdmin, self).recover_view(request, version_id, extra_context)
def revision_view(self, request, object_id, version_id, extra_context=None):
if not self.has_change_permission(request, Page.objects.get(pk=object_id)):
raise PermissionDenied
extra_context = self.update_language_tab_context(request, None, extra_context)
response = super(PageAdmin, self).revision_view(request, object_id, version_id, extra_context)
return response
def history_view(self, request, object_id, extra_context=None):
if not self.has_change_permission(request, Page.objects.get(pk=object_id)):
raise PermissionDenied
extra_context = self.update_language_tab_context(request, None, extra_context)
return super(PageAdmin, self).history_view(request, object_id, extra_context)
def render_revision_form(self, request, obj, version, context, revert=False, recover=False):
# reset parent to null if parent is not found
if version.field_dict['parent']:
try:
Page.objects.get(pk=version.field_dict['parent'])
except:
if revert and obj.parent_id != int(version.field_dict['parent']):
version.field_dict['parent'] = obj.parent_id
if recover:
obj.parent = None
obj.parent_id = None
version.field_dict['parent'] = None
obj.version = version
return super(PageAdmin, self).render_revision_form(request, obj, version, context, revert, recover)
@require_POST
def undo(self, request, object_id):
if not is_installed('reversion'):
return HttpResponseBadRequest('django reversion not installed')
page = get_object_or_404(self.model, pk=object_id)
if not page.publisher_is_draft:
page = page.publisher_draft
if not page.has_change_permission(request):
return HttpResponseForbidden(force_text(_("You do not have permission to change this page")))
try:
reverted, clean = page.undo()
if not clean:
messages.error(request, _("Page reverted but slug stays the same because of url collisions."))
except IndexError as e:
return HttpResponseBadRequest(e.message)
return HttpResponse("ok")
@require_POST
def redo(self, request, object_id):
if not is_installed('reversion'):
return HttpResponseBadRequest('django reversion not installed')
page = get_object_or_404(self.model, pk=object_id)
if not page.publisher_is_draft:
page = page.publisher_draft
if not page.has_change_permission(request):
return HttpResponseForbidden(force_text(_("You do not have permission to change this page")))
try:
reverted, clean = page.redo()
if not clean:
messages.error(request, _("Page reverted but slug stays the same because of url collisions."))
except IndexError as e:
return HttpResponseBadRequest(e.message)
return HttpResponse("ok")
@require_POST
@create_revision()
def change_template(self, request, object_id):
page = get_object_or_404(self.model, pk=object_id)
if not page.has_change_permission(request):
return HttpResponseForbidden(force_text(_("You do not have permission to change the template")))
to_template = request.POST.get("template", None)
if to_template not in dict(get_cms_setting('TEMPLATES')):
return HttpResponseBadRequest(force_text(_("Template not valid")))
page.template = to_template
page.save()
if is_installed('reversion'):
message = _("Template changed to %s") % dict(get_cms_setting('TEMPLATES'))[to_template]
self.cleanup_history(page)
helpers.make_revision_with_plugins(page, request.user, message)
return HttpResponse(force_text(_("The template was successfully changed")))
@require_POST
@transaction.atomic
def move_page(self, request, page_id, extra_context=None):
"""
Move the page to the requested target, at the given position
"""
target = request.POST.get('target', None)
position = request.POST.get('position', None)
if target is None or position is None:
return HttpResponseRedirect('../../')
try:
page = self.model.objects.get(pk=page_id)
target = self.model.objects.get(pk=target)
except self.model.DoesNotExist:
return jsonify_request(HttpResponseBadRequest("error"))
# does he haves permissions to do this...?
if not page.has_move_page_permission(request) or \
not target.has_add_permission(request):
return jsonify_request(
HttpResponseForbidden(force_text(_("Error! You don't have permissions to move this page. Please reload the page"))))
# move page
page.move_page(target, position)
if is_installed('reversion'):
self.cleanup_history(page)
helpers.make_revision_with_plugins(page, request.user, _("Page moved"))
return jsonify_request(HttpResponse(admin_utils.render_admin_menu_item(request, page).content))
def get_permissions(self, request, page_id):
page = get_object_or_404(self.model, id=page_id)
can_change_list = Page.permissions.get_change_id_list(request.user, page.site_id)
global_page_permissions = GlobalPagePermission.objects.filter(sites__in=[page.site_id])
page_permissions = PagePermission.objects.for_page(page)
all_permissions = list(global_page_permissions) + list(page_permissions)
# does he can change global permissions ?
has_global = permissions.has_global_change_permissions_permission(request)
permission_set = []
for permission in all_permissions:
if isinstance(permission, GlobalPagePermission):
if has_global:
permission_set.append([(True, True), permission])
else:
permission_set.append([(True, False), permission])
else:
if can_change_list == PagePermissionsPermissionManager.GRANT_ALL:
can_change = True
else:
can_change = permission.page_id in can_change_list
permission_set.append([(False, can_change), permission])
context = {
'page': page,
'permission_set': permission_set,
}
return render(request, 'admin/cms/page/permissions.html', context)
@require_POST
@transaction.atomic
def copy_language(self, request, page_id):
with create_revision():
source_language = request.POST.get('source_language')
target_language = request.POST.get('target_language')
page = Page.objects.get(pk=page_id)
placeholders = page.get_placeholders()
if not target_language or not target_language in get_language_list():
return HttpResponseBadRequest(force_text(_("Language must be set to a supported language!")))
for placeholder in placeholders:
plugins = list(
placeholder.cmsplugin_set.filter(language=source_language).order_by('path'))
if not self.has_copy_plugin_permission(request, placeholder, placeholder, plugins):
return HttpResponseForbidden(force_text(_('You do not have permission to copy these plugins.')))
copy_plugins.copy_plugins_to(plugins, placeholder, target_language)
if page and is_installed('reversion'):
message = _(u"Copied plugins from %(source_language)s to %(target_language)s") % {
'source_language': source_language, 'target_language': target_language}
self.cleanup_history(page)
helpers.make_revision_with_plugins(page, request.user, message)
return HttpResponse("ok")
@require_POST
@transaction.atomic
def copy_page(self, request, page_id, extra_context=None):
"""
Copy the page and all its plugins and descendants to the requested target, at the given position
"""
context = {}
page = Page.objects.get(pk=page_id)
target = request.POST.get('target', None)
position = request.POST.get('position', None)
site = request.POST.get('site', None)
if target is not None and position is not None and site is not None:
try:
target = self.model.objects.get(pk=target)
# does he have permissions to copy this page under target?
assert target.has_add_permission(request)
site = Site.objects.get(pk=site)
except (ObjectDoesNotExist, AssertionError):
return HttpResponse("error")
#context.update({'error': _('Page could not been moved.')})
else:
try:
kwargs = {
'copy_permissions': request.REQUEST.get('copy_permissions', False),
}
page.copy_page(target, site, position, **kwargs)
return jsonify_request(HttpResponse("ok"))
except ValidationError:
exc = sys.exc_info()[1]
return jsonify_request(HttpResponseBadRequest(exc.messages))
context.update(extra_context or {})
return HttpResponseRedirect('../../')
@require_POST
@transaction.atomic
@create_revision()
def publish_page(self, request, page_id, language):
try:
page = Page.objects.get(id=page_id, publisher_is_draft=True)
except Page.DoesNotExist:
page = None
# ensure user has permissions to publish this page
all_published = True
if page:
if not page.has_publish_permission(request):
return HttpResponseForbidden(force_text(_("You do not have permission to publish this page")))
published = page.publish(language)
if not published:
all_published = False
statics = request.GET.get('statics', '')
if not statics and not page:
return Http404("No page or stack found for publishing.")
if statics:
static_ids = statics .split(',')
for pk in static_ids:
static_placeholder = StaticPlaceholder.objects.get(pk=pk)
published = static_placeholder.publish(request, language)
if not published:
all_published = False
if page:
if all_published:
if page.get_publisher_state(language) == PUBLISHER_STATE_PENDING:
messages.warning(request, _("Page not published! A parent page is not published yet."))
else:
messages.info(request, _('The content was successfully published.'))
LogEntry.objects.log_action(
user_id=request.user.id,
content_type_id=ContentType.objects.get_for_model(Page).pk,
object_id=page_id,
object_repr=page.get_title(language),
action_flag=CHANGE,
)
else:
if page.get_publisher_state(language) == PUBLISHER_STATE_PENDING:
messages.warning(request, _("Page not published! A parent page is not published yet."))
else:
messages.warning(request, _("There was a problem publishing your content"))
if is_installed('reversion') and page:
self.cleanup_history(page, publish=True)
helpers.make_revision_with_plugins(page, request.user, PUBLISH_COMMENT)
# create a new publish reversion
if 'node' in request.REQUEST:
# if request comes from tree..
return admin_utils.render_admin_menu_item(request, page)
if 'redirect' in request.GET:
return HttpResponseRedirect(request.GET['redirect'])
referrer = request.META.get('HTTP_REFERER', '')
path = admin_reverse("cms_page_changelist")
if request.GET.get('redirect_language'):
path = "%s?language=%s&page_id=%s" % (path, request.GET.get('redirect_language'), request.GET.get('redirect_page_id'))
if admin_reverse('index') not in referrer:
if all_published:
if page:
if page.get_publisher_state(language) == PUBLISHER_STATE_PENDING:
path = page.get_absolute_url(language, fallback=True)
else:
public_page = Page.objects.get(publisher_public=page.pk)
path = '%s?%s' % (public_page.get_absolute_url(language, fallback=True), get_cms_setting('CMS_TOOLBAR_URL__EDIT_OFF'))
else:
path = '%s?%s' % (referrer, get_cms_setting('CMS_TOOLBAR_URL__EDIT_OFF'))
else:
path = '/?%s' % get_cms_setting('CMS_TOOLBAR_URL__EDIT_OFF')
return HttpResponseRedirect(path)
def cleanup_history(self, page, publish=False):
if is_installed('reversion') and page:
# delete revisions that are not publish revisions
from reversion.models import Version
content_type = ContentType.objects.get_for_model(Page)
# reversion 1.8+ removes type field, revision filtering must be based on comments
versions_qs = Version.objects.filter(content_type=content_type, object_id_int=page.pk)
history_limit = get_cms_setting("MAX_PAGE_HISTORY_REVERSIONS")
deleted = []
for version in versions_qs.exclude(revision__comment__in=(INITIAL_COMMENT, PUBLISH_COMMENT)).order_by(
'-revision__pk')[history_limit - 1:]:
if not version.revision_id in deleted:
revision = version.revision
revision.delete()
deleted.append(revision.pk)
# delete all publish revisions that are more then MAX_PAGE_PUBLISH_REVERSIONS
publish_limit = get_cms_setting("MAX_PAGE_PUBLISH_REVERSIONS")
if publish_limit and publish:
deleted = []
for version in versions_qs.filter(revision__comment__exact=PUBLISH_COMMENT).order_by(
'-revision__pk')[publish_limit - 1:]:
if not version.revision_id in deleted:
revision = version.revision
revision.delete()
deleted.append(revision.pk)
@require_POST
@transaction.atomic
def unpublish(self, request, page_id, language):
"""
Publish or unpublish a language of a page
"""
site = Site.objects.get_current()
page = get_object_or_404(self.model, pk=page_id)
if not page.has_publish_permission(request):
return HttpResponseForbidden(force_text(_("You do not have permission to unpublish this page")))
if not page.publisher_public_id:
return HttpResponseForbidden(force_text(_("This page was never published")))
try:
page.unpublish(language)
message = _('The %(language)s page "%(page)s" was successfully unpublished') % {
'language': get_language_object(language, site)['name'], 'page': page}
messages.info(request, message)
LogEntry.objects.log_action(
user_id=request.user.id,
content_type_id=ContentType.objects.get_for_model(Page).pk,
object_id=page_id,
object_repr=page.get_title(),
action_flag=CHANGE,
change_message=message,
)
except RuntimeError:
exc = sys.exc_info()[1]
messages.error(request, exc.message)
except ValidationError:
exc = sys.exc_info()[1]
messages.error(request, exc.message)
path = admin_reverse("cms_page_changelist")
if request.GET.get('redirect_language'):
path = "%s?language=%s&page_id=%s" % (path, request.GET.get('redirect_language'), request.GET.get('redirect_page_id'))
return HttpResponseRedirect(path)
@require_POST
@transaction.atomic
def revert_page(self, request, page_id, language):
page = get_object_or_404(self.model, id=page_id)
# ensure user has permissions to publish this page
if not page.has_change_permission(request):
return HttpResponseForbidden(force_text(_("You do not have permission to change this page")))
page.revert(language)
messages.info(request, _('The page "%s" was successfully reverted.') % page)
if 'node' in request.REQUEST:
# if request comes from tree..
return admin_utils.render_admin_menu_item(request, page)
referer = request.META.get('HTTP_REFERER', '')
path = '../../'
if admin_reverse('index') not in referer:
path = '%s?%s' % (referer.split('?')[0], get_cms_setting('CMS_TOOLBAR_URL__EDIT_OFF'))
return HttpResponseRedirect(path)
@create_revision()
def delete_translation(self, request, object_id, extra_context=None):
if 'language' in request.GET:
language = request.GET['language']
else:
language = get_language_from_request(request)
opts = Page._meta
titleopts = Title._meta
app_label = titleopts.app_label
pluginopts = CMSPlugin._meta
try:
obj = self.get_queryset(request).get(pk=unquote(object_id))
except self.model.DoesNotExist:
# Don't raise Http404 just yet, because we haven't checked
# permissions yet. We don't want an unauthenticated user to be able
# to determine whether a given object exists.
obj = None
if not self.has_delete_permission(request, obj):
return HttpResponseForbidden(force_text(_("You do not have permission to change this page")))
if obj is None:
raise Http404(
_('%(name)s object with primary key %(key)r does not exist.') % {
'name': force_text(opts.verbose_name),
'key': escape(object_id)
})
if not len(list(obj.get_languages())) > 1:
raise Http404(_('There only exists one translation for this page'))
titleobj = get_object_or_404(Title, page__id=object_id, language=language)
saved_plugins = CMSPlugin.objects.filter(placeholder__page__id=object_id, language=language)
using = router.db_for_read(self.model)
kwargs = {
'admin_site': self.admin_site,
'user': request.user,
'using': using
}
if DJANGO_1_7:
deleted_objects, perms_needed = get_deleted_objects(
[titleobj],
titleopts,
**kwargs
)[:2]
to_delete_plugins, perms_needed_plugins = get_deleted_objects(
saved_plugins,
pluginopts,
**kwargs
)[:2]
else:
deleted_objects, __, perms_needed = get_deleted_objects(
[titleobj],
titleopts,
**kwargs
)[:3]
to_delete_plugins, __, perms_needed_plugins = get_deleted_objects(
saved_plugins,
pluginopts,
**kwargs
)[:3]
deleted_objects.append(to_delete_plugins)
perms_needed = set(list(perms_needed) + list(perms_needed_plugins))
if request.method == 'POST':
if perms_needed:
raise PermissionDenied
message = _('Title and plugins with language %(language)s was deleted') % {
'language': force_text(get_language_object(language)['name'])
}
self.log_change(request, titleobj, message)
messages.info(request, message)
titleobj.delete()
for p in saved_plugins:
p.delete()
public = obj.publisher_public
if public:
public.save()
if is_installed('reversion'):
self.cleanup_history(obj)
helpers.make_revision_with_plugins(obj, request.user, message)
if not self.has_change_permission(request, None):
return HttpResponseRedirect("../../../../")
return HttpResponseRedirect("../../")
context = {
"title": _("Are you sure?"),
"object_name": force_text(titleopts.verbose_name),
"object": titleobj,
"deleted_objects": deleted_objects,
"perms_lacking": perms_needed,
"opts": opts,
"root_path": admin_reverse('index'),
"app_label": app_label,
}
context.update(extra_context or {})
request.current_app = self.admin_site.name
return render(request, self.delete_confirmation_template or [
"admin/%s/%s/delete_confirmation.html" % (app_label, titleopts.object_name.lower()),
"admin/%s/delete_confirmation.html" % app_label,
"admin/delete_confirmation.html"
], context)
def preview_page(self, request, object_id, language):
"""Redirecting preview function based on draft_id
"""
page = get_object_or_404(self.model, id=object_id)
attrs = "?%s" % get_cms_setting('CMS_TOOLBAR_URL__EDIT_ON')
attrs += "&language=" + language
with force_language(language):
url = page.get_absolute_url(language) + attrs
site = get_current_site(request)
if not site == page.site:
url = "http%s://%s%s" % ('s' if request.is_secure() else '',
page.site.domain, url)
return HttpResponseRedirect(url)
@require_POST
def change_innavigation(self, request, page_id):
"""
Switch the in_navigation of a page
"""
page = get_object_or_404(self.model, pk=page_id)
if page.has_change_permission(request):
page.toggle_in_navigation()
language = request.GET.get('language') or get_language_from_request(request)
return admin_utils.render_admin_menu_item(request, page, language=language)
return HttpResponseForbidden(force_text(_("You do not have permission to change this page's in_navigation status")))
def descendants(self, request, page_id, language):
"""
Get html for descendants of given page
Used for lazy loading pages in cms.changelist.js
Permission checks is done in admin_utils.get_admin_menu_item_context
which is called by admin_utils.render_admin_menu_item.
"""
page = get_object_or_404(self.model, pk=page_id)
return admin_utils.render_admin_menu_item(request, page,
template="admin/cms/page/tree/lazy_menu.html", language=language)
def add_page_type(self, request):
site = Site.objects.get_current()
language = request.GET.get('language') or get_language()
target = request.GET.get('copy_target')
type_root, created = self.model.objects.get_or_create(reverse_id=PAGE_TYPES_ID, publisher_is_draft=True, site=site,
defaults={'in_navigation': False})
type_title, created = Title.objects.get_or_create(page=type_root, language=language, slug=PAGE_TYPES_ID,
defaults={'title': _('Page Types')})
url = add_url_parameters(admin_reverse('cms_page_add'), target=type_root.pk, position='first-child',
add_page_type=1, copy_target=target, language=language)
return HttpResponseRedirect(url)
def resolve(self, request):
if not request.user.is_staff:
return HttpResponse('/', content_type='text/plain')
obj = False
url = False
if request.session.get('cms_log_latest', False):
log = LogEntry.objects.get(pk=request.session['cms_log_latest'])
try:
obj = log.get_edited_object()
except (ObjectDoesNotExist, ValueError):
obj = None
del request.session['cms_log_latest']
if obj and obj.__class__ in toolbar_pool.get_watch_models() and hasattr(obj, 'get_absolute_url'):
# This is a test if the object url can be retrieved
# In case it can't, object it's not taken into account
try:
force_text(obj.get_absolute_url())
except:
obj = None
else:
obj = None
if not obj:
pk = request.REQUEST.get('pk')
full_model = request.REQUEST.get('model')
if pk and full_model:
app_label, model = full_model.split('.')
if pk and app_label:
ctype = ContentType.objects.get(app_label=app_label, model=model)
try:
obj = ctype.get_object_for_this_type(pk=pk)
except ctype.model_class().DoesNotExist:
obj = None
try:
force_text(obj.get_absolute_url())
except:
obj = None
if obj:
if not request.toolbar or not request.toolbar.edit_mode:
if isinstance(obj, Page):
if obj.get_public_object():
url = obj.get_public_object().get_absolute_url()
else:
url = '%s?%s' % (
obj.get_draft_object().get_absolute_url(),
get_cms_setting('CMS_TOOLBAR_URL__EDIT_ON')
)
else:
url = obj.get_absolute_url()
else:
url = obj.get_absolute_url()
if url:
return HttpResponse(force_text(url), content_type='text/plain')
return HttpResponse('', content_type='text/plain')
def lookup_allowed(self, key, *args, **kwargs):
if key == 'site__exact':
return True
return super(PageAdmin, self).lookup_allowed(key, *args, **kwargs)
def edit_title_fields(self, request, page_id, language):
title = Title.objects.get(page_id=page_id, language=language)
saved_successfully = False
raw_fields = request.GET.get("edit_fields", 'title')
edit_fields = [field for field in raw_fields.split(",") if field in self.title_frontend_editable_fields]
cancel_clicked = request.POST.get("_cancel", False)
opts = Title._meta
if not edit_fields:
# Defaults to title
edit_fields = ('title',)
if not has_generic_permission(title.page.pk, request.user, "change",
title.page.site.pk):
return HttpResponseForbidden(force_text(_("You do not have permission to edit this page")))
class PageTitleForm(django.forms.ModelForm):
"""
Dynamic form showing only the fields to be edited
"""
class Meta:
model = Title
fields = edit_fields
if not cancel_clicked and request.method == 'POST':
form = PageTitleForm(instance=title, data=request.POST)
if form.is_valid():
form.save()
saved_successfully = True
else:
form = PageTitleForm(instance=title)
admin_form = AdminForm(form, fieldsets=[(None, {'fields': edit_fields})], prepopulated_fields={},
model_admin=self)
media = self.media + admin_form.media
context = {
'CMS_MEDIA_URL': get_cms_setting('MEDIA_URL'),
'title': 'Title',
'plugin': title.page,
'plugin_id': title.page.id,
'adminform': admin_form,
'add': False,
'is_popup': True,
'media': media,
'opts': opts,
'change': True,
'save_as': False,
'has_add_permission': False,
'window_close_timeout': 10,
}
if cancel_clicked:
# cancel button was clicked
context.update({
'cancel': True,
})
return render(request, 'admin/cms/page/plugin/confirm_form.html', context)
if not cancel_clicked and request.method == 'POST' and saved_successfully:
return render(request, 'admin/cms/page/plugin/confirm_form.html', context)
return render(request, 'admin/cms/page/plugin/change_form.html', context)
def get_published_pagelist(self, *args, **kwargs):
"""
This view is used by the PageSmartLinkWidget as the user type to feed the autocomplete drop-down.
"""
request = args[0]
if request.is_ajax():
query_term = request.GET.get('q','').strip('/')
language_code = request.GET.get('language_code', settings.LANGUAGE_CODE)
matching_published_pages = self.model.objects.published().public().filter(
Q(title_set__title__icontains=query_term, title_set__language=language_code)
| Q(title_set__path__icontains=query_term, title_set__language=language_code)
| Q(title_set__menu_title__icontains=query_term, title_set__language=language_code)
| Q(title_set__page_title__icontains=query_term, title_set__language=language_code)
).distinct()
results = []
for page in matching_published_pages:
results.append(
{
'path': page.get_path(language=language_code),
'title': page.get_title(language=language_code),
'redirect_url': page.get_absolute_url(language=language_code)
}
)
return HttpResponse(json.dumps(results), content_type='application/json')
else:
return HttpResponseForbidden()
def add_plugin(self, *args, **kwargs):
with create_revision():
return super(PageAdmin, self).add_plugin(*args, **kwargs)
def copy_plugins(self, *args, **kwargs):
with create_revision():
return super(PageAdmin, self).copy_plugins(*args, **kwargs)
def edit_plugin(self, *args, **kwargs):
with create_revision():
return super(PageAdmin, self).edit_plugin(*args, **kwargs)
def move_plugin(self, *args, **kwargs):
with create_revision():
return super(PageAdmin, self).move_plugin(*args, **kwargs)
def delete_plugin(self, *args, **kwargs):
with create_revision():
return super(PageAdmin, self).delete_plugin(*args, **kwargs)
def clear_placeholder(self, *args, **kwargs):
with create_revision():
return super(PageAdmin, self).clear_placeholder(*args, **kwargs)
admin.site.register(Page, PageAdmin)
| 44.706777
| 142
| 0.615135
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.